summaryrefslogtreecommitdiff
path: root/vendor/hyper
diff options
context:
space:
mode:
authormo khan <mo@mokhan.ca>2025-07-02 18:36:06 -0600
committermo khan <mo@mokhan.ca>2025-07-02 18:36:06 -0600
commit8cdfa445d6629ffef4cb84967ff7017654045bc2 (patch)
tree22f0b0907c024c78d26a731e2e1f5219407d8102 /vendor/hyper
parent4351c74c7c5f97156bc94d3a8549b9940ac80e3f (diff)
chore: add vendor directory
Diffstat (limited to 'vendor/hyper')
-rw-r--r--vendor/hyper/.cargo-checksum.json1
-rw-r--r--vendor/hyper/Cargo.lock738
-rw-r--r--vendor/hyper/Cargo.toml243
-rw-r--r--vendor/hyper/LICENSE19
-rw-r--r--vendor/hyper/src/body/incoming.rs617
-rw-r--r--vendor/hyper/src/body/length.rs129
-rw-r--r--vendor/hyper/src/body/mod.rs50
-rw-r--r--vendor/hyper/src/cfg.rs44
-rw-r--r--vendor/hyper/src/client/conn/http1.rs611
-rw-r--r--vendor/hyper/src/client/conn/http2.rs718
-rw-r--r--vendor/hyper/src/client/conn/mod.rs22
-rw-r--r--vendor/hyper/src/client/dispatch.rs510
-rw-r--r--vendor/hyper/src/client/mod.rs22
-rw-r--r--vendor/hyper/src/client/tests.rs261
-rw-r--r--vendor/hyper/src/common/buf.rs150
-rw-r--r--vendor/hyper/src/common/date.rs138
-rw-r--r--vendor/hyper/src/common/io/compat.rs150
-rw-r--r--vendor/hyper/src/common/io/mod.rs7
-rw-r--r--vendor/hyper/src/common/io/rewind.rs160
-rw-r--r--vendor/hyper/src/common/mod.rs14
-rw-r--r--vendor/hyper/src/common/task.rs9
-rw-r--r--vendor/hyper/src/common/time.rs79
-rw-r--r--vendor/hyper/src/common/watch.rs73
-rw-r--r--vendor/hyper/src/error.rs658
-rw-r--r--vendor/hyper/src/ext/h1_reason_phrase.rs221
-rw-r--r--vendor/hyper/src/ext/informational.rs86
-rw-r--r--vendor/hyper/src/ext/mod.rs246
-rw-r--r--vendor/hyper/src/ffi/body.rs302
-rw-r--r--vendor/hyper/src/ffi/client.rs274
-rw-r--r--vendor/hyper/src/ffi/error.rs96
-rw-r--r--vendor/hyper/src/ffi/http_types.rs703
-rw-r--r--vendor/hyper/src/ffi/io.rs198
-rw-r--r--vendor/hyper/src/ffi/macros.rs53
-rw-r--r--vendor/hyper/src/ffi/mod.rs99
-rw-r--r--vendor/hyper/src/ffi/task.rs549
-rw-r--r--vendor/hyper/src/headers.rs159
-rw-r--r--vendor/hyper/src/lib.rs139
-rw-r--r--vendor/hyper/src/mock.rs235
-rw-r--r--vendor/hyper/src/proto/h1/conn.rs1530
-rw-r--r--vendor/hyper/src/proto/h1/decode.rs1236
-rw-r--r--vendor/hyper/src/proto/h1/dispatch.rs808
-rw-r--r--vendor/hyper/src/proto/h1/encode.rs660
-rw-r--r--vendor/hyper/src/proto/h1/io.rs967
-rw-r--r--vendor/hyper/src/proto/h1/mod.rs113
-rw-r--r--vendor/hyper/src/proto/h1/role.rs3098
-rw-r--r--vendor/hyper/src/proto/h2/client.rs749
-rw-r--r--vendor/hyper/src/proto/h2/mod.rs446
-rw-r--r--vendor/hyper/src/proto/h2/ping.rs509
-rw-r--r--vendor/hyper/src/proto/h2/server.rs545
-rw-r--r--vendor/hyper/src/proto/mod.rs73
-rw-r--r--vendor/hyper/src/rt/bounds.rs109
-rw-r--r--vendor/hyper/src/rt/io.rs405
-rw-r--r--vendor/hyper/src/rt/mod.rs42
-rw-r--r--vendor/hyper/src/rt/timer.rs127
-rw-r--r--vendor/hyper/src/server/conn/http1.rs544
-rw-r--r--vendor/hyper/src/server/conn/http2.rs312
-rw-r--r--vendor/hyper/src/server/conn/mod.rs20
-rw-r--r--vendor/hyper/src/server/mod.rs9
-rw-r--r--vendor/hyper/src/service/http.rs52
-rw-r--r--vendor/hyper/src/service/mod.rs30
-rw-r--r--vendor/hyper/src/service/service.rs100
-rw-r--r--vendor/hyper/src/service/util.rs82
-rw-r--r--vendor/hyper/src/trace.rs128
-rw-r--r--vendor/hyper/src/upgrade.rs407
64 files changed, 21884 insertions, 0 deletions
diff --git a/vendor/hyper/.cargo-checksum.json b/vendor/hyper/.cargo-checksum.json
new file mode 100644
index 00000000..c2876846
--- /dev/null
+++ b/vendor/hyper/.cargo-checksum.json
@@ -0,0 +1 @@
+{"files":{"Cargo.lock":"8ab67235ffd001087de399d70d81c1cbe33ef55b08ed77944463fa9bc225f47e","Cargo.toml":"2de73fcae59df74cfdad3b4b5400e3702fd85f5fb8c110064f466fd3bc54ea7e","LICENSE":"8f2eee700f56dd8b381d92dc5b136befe09f90e186f2c1c8414a70c4c7534550","src/body/incoming.rs":"6860ea00a996aa54b3745cffecad0d426f83548fbb70215f5c704e656e6fe2e1","src/body/length.rs":"fcf245cd9d46d5742c5745db3f643ac2f4e67a5165aed60550689ed0731d4ffc","src/body/mod.rs":"6e029d258d08b35a2f825235842da2f4d98fe90a9543851516382f019cfa4b8e","src/cfg.rs":"de5fee5bba45a982c10e8f09fc24677be5494bf6da0b39a3a132f0f6eb3fe41e","src/client/conn/http1.rs":"005fbcd3e3fc0548b3110298c968bae169f6d301b5caff037dab91685719ceb2","src/client/conn/http2.rs":"4236005d2b007b4cafdf1be0f847c3348d6e4c09612b7726d25d323aef7345bf","src/client/conn/mod.rs":"9a3a11f287ac5f8a2eb2f27af4d66cf201f92dc580f773f9cb5495f32756ee28","src/client/dispatch.rs":"2acc6a05f897d0a67c7cfc9df351f3d23b48da3f7b69c723f33e3f95846c23f5","src/client/mod.rs":"3428a487d81d2a46742493fb07b0b852d0868acf6136c77cc4a5c1aeeda7c311","src/client/tests.rs":"de0001609ef0b82d34d6e152b20d19f0b2f79ab6708bc4b3748a40acd3325238","src/common/buf.rs":"6ffe7941d14edbdd4d20e18e4c1da225161eb0d89ae807476d7b4282519bac7c","src/common/date.rs":"3fc169163f142a17f3bc883861bec65333710d478275881d1a9b792f832dbf91","src/common/io/compat.rs":"e3e9333b8c1862c61239fef6fc8aae6b39eebcfe85393e8b9906bca8b75326a0","src/common/io/mod.rs":"1f9002411f8a74be746e86d7a6afa5a7c3bdfe7c75943bed5ac752b802b1594d","src/common/io/rewind.rs":"1e15c9b837bd96753bbaf6470681d032a3e324c80ce8bbbab16a8d67b54654ec","src/common/mod.rs":"5827c350a2ba5716ae2a30a3b1658db552c7ff974c9a8e372ebf171e974fb0a4","src/common/task.rs":"09d2f914586f8ad42f92ba8149a8757c8cbd094467a1519f2d26906b977616f7","src/common/time.rs":"cc87655c71eb6ddfaeb90cb3efcbdc3aa08cff1bcbe588a6cfde442b76b7417a","src/common/watch.rs":"eb6db13fbb266ec11200ffa1f5e7a9a132d7e6555373895efde1e2daea428e03","src/error.rs":"24792e9cadc3a6152c786efe2f9cad42d864cc97d350e39212d4df89661adac8","src/ext/h1_reason_phrase.rs":"296ecdcb206eaa5b7dd7fa0399fc5e7415aa61da83cce2a27e2d790ba20fbb99","src/ext/informational.rs":"83a9b2a9cd7086f2be65d508d4cf0f45a2977fb90f94926d6a3cfbc98bdba073","src/ext/mod.rs":"1bbf1ce18aca97c56b6ab7a3e9c929e7c9ccdcd1cca79b5b8d2362e510bb4406","src/ffi/body.rs":"614955cfca938025a7b471d9bc91004edfd1ff028b66553ce757b5b0ec040dce","src/ffi/client.rs":"f3e7b519d9725b99ddc18a25ab527014c804d5e112fe130ef3dd539eac6e5a09","src/ffi/error.rs":"dd269cd749cfe2679ba499e9dc0832469cbf4f8101157fd88d00c0fb526868d8","src/ffi/http_types.rs":"a1addfbd00ec8afc03653e04ebaa0f57682cfcc5019bfed90299e87c06a58604","src/ffi/io.rs":"94455b259bdb9952f04b5e12c607dbfba560f4aa42e92aa932406021bf6afe3f","src/ffi/macros.rs":"8e1fe58244295db1d19aceeb0e9a777fe484ccc20194fae88b54208e0cbeb515","src/ffi/mod.rs":"80639b0ff428ff21ebf1e87c7800558002efc5f51424fe21fba6bf57ca8818e6","src/ffi/task.rs":"fd7b8909d2769c3e63f7434736fb4192792662a4658b1d69fc06b38235252dad","src/headers.rs":"43305ee388d59a3712e78e0c108c788999448600d0f5284e9361afc6f11eb679","src/lib.rs":"a3405c478bc3a1b6187e0393745d778441cf416138e082abb047aa0c4c3b3fde","src/mock.rs":"8b455312be74af6c4a27b3beba85a9493c86d43097947c5aad8a5e9dc0dcdbb3","src/proto/h1/conn.rs":"212bff0bccb833420d904bb0307e7cf709dfa5e171c1db3fa2b5a8747cfe54d9","src/proto/h1/decode.rs":"28acab6e991716307b26cb69f1be0ba657c5e2127afac5e96449c6013deb97d4","src/proto/h1/dispatch.rs":"0479a0eea3c1989b0e38f5ed51393c94085b400832e622fed909acb7830dfa65","src/proto/h1/encode.rs":"0ec088e6d766b4d22515117e39d8766ec1f1c474a4dceaee65d9474f876a06ad","src/proto/h1/io.rs":"d92bc4ffb54dc2e94d54052f903a9406aa8bf51f9c79ce231dbb14a0e9a8de72","src/proto/h1/mod.rs":"179bbde1399e5d827175f27157b96fb7bd6da9707965ad7df41fa85f24e80588","src/proto/h1/role.rs":"210ad28ef2e3c35f2268e3dac979e519c02193b4ba6f895de86442b26b802fad","src/proto/h2/client.rs":"930b97657aa74ea54b454d5097e74ad7b172f62466701959a7a378311a9dc51c","src/proto/h2/mod.rs":"bc92681b36a5666208fb5c4110412b9948ec21ff482c0a3e2d7f049add44efce","src/proto/h2/ping.rs":"2dd4bf7477641cc26bde845d6114baabb58ca964ab27a4e8088a7eed2b8eb046","src/proto/h2/server.rs":"bd5483f72922957c7819a2c90a465ff4e85b2ea2f9de63fe36c23ac6e7570c72","src/proto/mod.rs":"075880551f7ad6daf9a863d6707d116ff22c37bd18bcfa2d272d185c1853a9c3","src/rt/bounds.rs":"3c75b9039a57d31bb92b12c3aa4597beb09273103e1759ed2a28ad133fa72444","src/rt/io.rs":"096573f6f586d70f28bc0e9f084d48b6f7404dbeeb6219f5f6a2dd7ac5985a03","src/rt/mod.rs":"1452a0b001d5895c9a1d3c57002877ba583c988199e54f84a8b3d9cbbc80e5c3","src/rt/timer.rs":"14e28bb4f46d32025666f4738df2550446c8abb22e9d73c4f2efe2a93de9dab9","src/server/conn/http1.rs":"721d60fbd2e129685e53e7a6de771a155c7940499e711f1d66f3c79653e83982","src/server/conn/http2.rs":"11a33c81982a958390c05f65e6164b1cf8404f1e50a18a622d37465ec6934b22","src/server/conn/mod.rs":"b2393dc0d68c09b560f7c9bcc61ed3bf99fce4d746eda9e1ad158891ee56f2be","src/server/mod.rs":"ffe7729eba251a89d24d605d889dfdb32020203f337e88de0bacb19317e0ea9c","src/service/http.rs":"a1cae019c6f542ac7ce84f3324a3fe4de3daee43fda53eca0a7ba3e77e73d13b","src/service/mod.rs":"de143e994e0021c1d54a7214f1748712e166d37a6496e783ee589956677ce034","src/service/service.rs":"0a31d5e0014b09a29cf3c60e1470a536033b4d8b5c21c0f33e65c8705f8198ea","src/service/util.rs":"7d2fcf8701722456979edf7e8431efee235f838c6f70f66c28ce8e3a36d514b6","src/trace.rs":"a766c472433c7569440862e978eceeea78220af170220e9fdc90b71bab731344","src/upgrade.rs":"02107e8607fa0bf76530808ed0d8522f76f7b2ccdddeacccb29ad473d3f55d48"},"package":"cc2b571658e38e0c01b1fdca3bbbe93c00d3d71693ff2770043f8c29bc7d6f80"} \ No newline at end of file
diff --git a/vendor/hyper/Cargo.lock b/vendor/hyper/Cargo.lock
new file mode 100644
index 00000000..07db5a9d
--- /dev/null
+++ b/vendor/hyper/Cargo.lock
@@ -0,0 +1,738 @@
+# This file is automatically @generated by Cargo.
+# It is not intended for manual editing.
+version = 4
+
+[[package]]
+name = "addr2line"
+version = "0.24.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "dfbe277e56a376000877090da837660b4427aad530e3028d44e0bffe4f89a1c1"
+dependencies = [
+ "gimli",
+]
+
+[[package]]
+name = "adler2"
+version = "2.0.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "512761e0bb2578dd7380c6baaa0f4ce03e84f95e960231d1dec8bf4d7d6e2627"
+
+[[package]]
+name = "aho-corasick"
+version = "1.1.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "8e60d3430d3a69478ad0993f19238d2df97c507009a52b3c10addcd7f6bcb916"
+dependencies = [
+ "memchr",
+]
+
+[[package]]
+name = "async-stream"
+version = "0.3.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "0b5a71a6f37880a80d1d7f19efd781e4b5de42c88f0722cc13bcb6cc2cfe8476"
+dependencies = [
+ "async-stream-impl",
+ "futures-core",
+ "pin-project-lite",
+]
+
+[[package]]
+name = "async-stream-impl"
+version = "0.3.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "c7c24de15d275a1ecfd47a380fb4d5ec9bfe0933f309ed5e705b775596a3574d"
+dependencies = [
+ "proc-macro2",
+ "quote",
+ "syn",
+]
+
+[[package]]
+name = "atomic-waker"
+version = "1.1.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "1505bd5d3d116872e7271a6d4e16d81d0c8570876c8de68093a09ac269d8aac0"
+
+[[package]]
+name = "autocfg"
+version = "1.4.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "ace50bade8e6234aa140d9a2f552bbee1db4d353f69b8217bc503490fc1a9f26"
+
+[[package]]
+name = "backtrace"
+version = "0.3.74"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "8d82cb332cdfaed17ae235a638438ac4d4839913cc2af585c3c6746e8f8bee1a"
+dependencies = [
+ "addr2line",
+ "cfg-if",
+ "libc",
+ "miniz_oxide",
+ "object",
+ "rustc-demangle",
+ "windows-targets",
+]
+
+[[package]]
+name = "bytes"
+version = "1.8.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "9ac0150caa2ae65ca5bd83f25c7de183dea78d4d366469f148435e2acfbad0da"
+
+[[package]]
+name = "cfg-if"
+version = "1.0.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd"
+
+[[package]]
+name = "env_logger"
+version = "0.10.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "4cd405aab171cb85d6735e5c8d9db038c17d3ca007a4d2c25f337935c3d90580"
+dependencies = [
+ "humantime",
+ "is-terminal",
+ "log",
+ "regex",
+ "termcolor",
+]
+
+[[package]]
+name = "equivalent"
+version = "1.0.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "5443807d6dff69373d433ab9ef5378ad8df50ca6298caf15de6e52e24aaf54d5"
+
+[[package]]
+name = "fnv"
+version = "1.0.7"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1"
+
+[[package]]
+name = "form_urlencoded"
+version = "1.2.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "e13624c2627564efccf4934284bdd98cbaa14e79b0b5a141218e507b3a823456"
+dependencies = [
+ "percent-encoding",
+]
+
+[[package]]
+name = "futures-channel"
+version = "0.3.31"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "2dff15bf788c671c1934e366d07e30c1814a8ef514e1af724a602e8a2fbe1b10"
+dependencies = [
+ "futures-core",
+ "futures-sink",
+]
+
+[[package]]
+name = "futures-core"
+version = "0.3.31"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "05f29059c0c2090612e8d742178b0580d2dc940c837851ad723096f87af6663e"
+
+[[package]]
+name = "futures-sink"
+version = "0.3.31"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "e575fab7d1e0dcb8d0c7bcf9a63ee213816ab51902e6d244a95819acacf1d4f7"
+
+[[package]]
+name = "futures-task"
+version = "0.3.31"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "f90f7dce0722e95104fcb095585910c0977252f286e354b5e3bd38902cd99988"
+
+[[package]]
+name = "futures-util"
+version = "0.3.31"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "9fa08315bb612088cc391249efdc3bc77536f16c91f6cf495e6fbe85b20a4a81"
+dependencies = [
+ "futures-core",
+ "futures-sink",
+ "futures-task",
+ "pin-project-lite",
+ "pin-utils",
+]
+
+[[package]]
+name = "gimli"
+version = "0.31.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "07e28edb80900c19c28f1072f2e8aeca7fa06b23cd4169cefe1af5aa3260783f"
+
+[[package]]
+name = "h2"
+version = "0.4.7"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "ccae279728d634d083c00f6099cb58f01cc99c145b84b8be2f6c74618d79922e"
+dependencies = [
+ "atomic-waker",
+ "bytes",
+ "fnv",
+ "futures-core",
+ "futures-sink",
+ "http",
+ "indexmap",
+ "slab",
+ "tokio",
+ "tokio-util",
+ "tracing",
+]
+
+[[package]]
+name = "hashbrown"
+version = "0.15.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "3a9bfc1af68b1726ea47d3d5109de126281def866b33970e10fbab11b5dafab3"
+
+[[package]]
+name = "hermit-abi"
+version = "0.3.9"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "d231dfb89cfffdbc30e7fc41579ed6066ad03abda9e567ccafae602b97ec5024"
+
+[[package]]
+name = "hermit-abi"
+version = "0.4.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "fbf6a919d6cf397374f7dfeeea91d974c7c0a7221d0d0f4f20d859d329e53fcc"
+
+[[package]]
+name = "http"
+version = "1.2.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "f16ca2af56261c99fba8bac40a10251ce8188205a4c448fbb745a2e4daa76fea"
+dependencies = [
+ "bytes",
+ "fnv",
+ "itoa",
+]
+
+[[package]]
+name = "http-body"
+version = "1.0.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "1efedce1fb8e6913f23e0c92de8e62cd5b772a67e7b3946df930a62566c93184"
+dependencies = [
+ "bytes",
+ "http",
+]
+
+[[package]]
+name = "http-body-util"
+version = "0.1.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "793429d76616a256bcb62c2a2ec2bed781c8307e797e2598c50010f2bee2544f"
+dependencies = [
+ "bytes",
+ "futures-util",
+ "http",
+ "http-body",
+ "pin-project-lite",
+]
+
+[[package]]
+name = "httparse"
+version = "1.9.5"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "7d71d3574edd2771538b901e6549113b4006ece66150fb69c0fb6d9a2adae946"
+
+[[package]]
+name = "httpdate"
+version = "1.0.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "df3b46402a9d5adb4c86a0cf463f42e19994e3ee891101b1841f30a545cb49a9"
+
+[[package]]
+name = "humantime"
+version = "2.1.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "9a3a5bfb195931eeb336b2a7b4d761daec841b97f947d34394601737a7bba5e4"
+
+[[package]]
+name = "hyper"
+version = "1.6.0"
+dependencies = [
+ "bytes",
+ "form_urlencoded",
+ "futures-channel",
+ "futures-util",
+ "h2",
+ "http",
+ "http-body",
+ "http-body-util",
+ "httparse",
+ "httpdate",
+ "itoa",
+ "pin-project-lite",
+ "pretty_env_logger",
+ "serde",
+ "serde_json",
+ "smallvec",
+ "spmc",
+ "tokio",
+ "tokio-test",
+ "tokio-util",
+ "tracing",
+ "want",
+]
+
+[[package]]
+name = "indexmap"
+version = "2.6.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "707907fe3c25f5424cce2cb7e1cbcafee6bdbe735ca90ef77c29e84591e5b9da"
+dependencies = [
+ "equivalent",
+ "hashbrown",
+]
+
+[[package]]
+name = "is-terminal"
+version = "0.4.13"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "261f68e344040fbd0edea105bef17c66edf46f984ddb1115b775ce31be948f4b"
+dependencies = [
+ "hermit-abi 0.4.0",
+ "libc",
+ "windows-sys 0.52.0",
+]
+
+[[package]]
+name = "itoa"
+version = "1.0.11"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "49f1f14873335454500d59611f1cf4a4b0f786f9ac11f4312a78e4cf2566695b"
+
+[[package]]
+name = "libc"
+version = "0.2.164"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "433bfe06b8c75da9b2e3fbea6e5329ff87748f0b144ef75306e674c3f6f7c13f"
+
+[[package]]
+name = "log"
+version = "0.4.22"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "a7a70ba024b9dc04c27ea2f0c0548feb474ec5c54bba33a7f72f873a39d07b24"
+
+[[package]]
+name = "memchr"
+version = "2.7.4"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "78ca9ab1a0babb1e7d5695e3530886289c18cf2f87ec19a575a0abdce112e3a3"
+
+[[package]]
+name = "miniz_oxide"
+version = "0.8.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "e2d80299ef12ff69b16a84bb182e3b9df68b5a91574d3d4fa6e41b65deec4df1"
+dependencies = [
+ "adler2",
+]
+
+[[package]]
+name = "mio"
+version = "1.0.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "80e04d1dcff3aae0704555fe5fee3bcfaf3d1fdf8a7e521d5b9d2b42acb52cec"
+dependencies = [
+ "hermit-abi 0.3.9",
+ "libc",
+ "wasi",
+ "windows-sys 0.52.0",
+]
+
+[[package]]
+name = "object"
+version = "0.36.5"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "aedf0a2d09c573ed1d8d85b30c119153926a2b36dce0ab28322c09a117a4683e"
+dependencies = [
+ "memchr",
+]
+
+[[package]]
+name = "once_cell"
+version = "1.20.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "1261fe7e33c73b354eab43b1273a57c8f967d0391e80353e51f764ac02cf6775"
+
+[[package]]
+name = "percent-encoding"
+version = "2.3.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "e3148f5046208a5d56bcfc03053e3ca6334e51da8dfb19b6cdc8b306fae3283e"
+
+[[package]]
+name = "pin-project-lite"
+version = "0.2.15"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "915a1e146535de9163f3987b8944ed8cf49a18bb0056bcebcdcece385cece4ff"
+
+[[package]]
+name = "pin-utils"
+version = "0.1.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184"
+
+[[package]]
+name = "pretty_env_logger"
+version = "0.5.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "865724d4dbe39d9f3dd3b52b88d859d66bcb2d6a0acfd5ea68a65fb66d4bdc1c"
+dependencies = [
+ "env_logger",
+ "log",
+]
+
+[[package]]
+name = "proc-macro2"
+version = "1.0.89"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "f139b0662de085916d1fb67d2b4169d1addddda1919e696f3252b740b629986e"
+dependencies = [
+ "unicode-ident",
+]
+
+[[package]]
+name = "quote"
+version = "1.0.37"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "b5b9d34b8991d19d98081b46eacdd8eb58c6f2b201139f7c5f643cc155a633af"
+dependencies = [
+ "proc-macro2",
+]
+
+[[package]]
+name = "regex"
+version = "1.11.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "b544ef1b4eac5dc2db33ea63606ae9ffcfac26c1416a2806ae0bf5f56b201191"
+dependencies = [
+ "aho-corasick",
+ "memchr",
+ "regex-automata",
+ "regex-syntax",
+]
+
+[[package]]
+name = "regex-automata"
+version = "0.4.9"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "809e8dc61f6de73b46c85f4c96486310fe304c434cfa43669d7b40f711150908"
+dependencies = [
+ "aho-corasick",
+ "memchr",
+ "regex-syntax",
+]
+
+[[package]]
+name = "regex-syntax"
+version = "0.8.5"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "2b15c43186be67a4fd63bee50d0303afffcef381492ebe2c5d87f324e1b8815c"
+
+[[package]]
+name = "rustc-demangle"
+version = "0.1.24"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "719b953e2095829ee67db738b3bfa9fa368c94900df327b3f07fe6e794d2fe1f"
+
+[[package]]
+name = "ryu"
+version = "1.0.18"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "f3cb5ba0dc43242ce17de99c180e96db90b235b8a9fdc9543c96d2209116bd9f"
+
+[[package]]
+name = "serde"
+version = "1.0.215"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "6513c1ad0b11a9376da888e3e0baa0077f1aed55c17f50e7b2397136129fb88f"
+dependencies = [
+ "serde_derive",
+]
+
+[[package]]
+name = "serde_derive"
+version = "1.0.215"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "ad1e866f866923f252f05c889987993144fb74e722403468a4ebd70c3cd756c0"
+dependencies = [
+ "proc-macro2",
+ "quote",
+ "syn",
+]
+
+[[package]]
+name = "serde_json"
+version = "1.0.133"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "c7fceb2473b9166b2294ef05efcb65a3db80803f0b03ef86a5fc88a2b85ee377"
+dependencies = [
+ "itoa",
+ "memchr",
+ "ryu",
+ "serde",
+]
+
+[[package]]
+name = "slab"
+version = "0.4.9"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "8f92a496fb766b417c996b9c5e57daf2f7ad3b0bebe1ccfca4856390e3d3bb67"
+dependencies = [
+ "autocfg",
+]
+
+[[package]]
+name = "smallvec"
+version = "1.13.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "3c5e1a9a646d36c3599cd173a41282daf47c44583ad367b8e6837255952e5c67"
+
+[[package]]
+name = "socket2"
+version = "0.5.7"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "ce305eb0b4296696835b71df73eb912e0f1ffd2556a501fcede6e0c50349191c"
+dependencies = [
+ "libc",
+ "windows-sys 0.52.0",
+]
+
+[[package]]
+name = "spmc"
+version = "0.3.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "02a8428da277a8e3a15271d79943e80ccc2ef254e78813a166a08d65e4c3ece5"
+
+[[package]]
+name = "syn"
+version = "2.0.87"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "25aa4ce346d03a6dcd68dd8b4010bcb74e54e62c90c573f394c46eae99aba32d"
+dependencies = [
+ "proc-macro2",
+ "quote",
+ "unicode-ident",
+]
+
+[[package]]
+name = "termcolor"
+version = "1.4.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "06794f8f6c5c898b3275aebefa6b8a1cb24cd2c6c79397ab15774837a0bc5755"
+dependencies = [
+ "winapi-util",
+]
+
+[[package]]
+name = "tokio"
+version = "1.41.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "22cfb5bee7a6a52939ca9224d6ac897bb669134078daa8735560897f69de4d33"
+dependencies = [
+ "backtrace",
+ "bytes",
+ "libc",
+ "mio",
+ "pin-project-lite",
+ "socket2",
+ "tokio-macros",
+ "windows-sys 0.52.0",
+]
+
+[[package]]
+name = "tokio-macros"
+version = "2.4.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "693d596312e88961bc67d7f1f97af8a70227d9f90c31bba5806eec004978d752"
+dependencies = [
+ "proc-macro2",
+ "quote",
+ "syn",
+]
+
+[[package]]
+name = "tokio-stream"
+version = "0.1.16"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "4f4e6ce100d0eb49a2734f8c0812bcd324cf357d21810932c5df6b96ef2b86f1"
+dependencies = [
+ "futures-core",
+ "pin-project-lite",
+ "tokio",
+]
+
+[[package]]
+name = "tokio-test"
+version = "0.4.4"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "2468baabc3311435b55dd935f702f42cd1b8abb7e754fb7dfb16bd36aa88f9f7"
+dependencies = [
+ "async-stream",
+ "bytes",
+ "futures-core",
+ "tokio",
+ "tokio-stream",
+]
+
+[[package]]
+name = "tokio-util"
+version = "0.7.12"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "61e7c3654c13bcd040d4a03abee2c75b1d14a37b423cf5a813ceae1cc903ec6a"
+dependencies = [
+ "bytes",
+ "futures-core",
+ "futures-sink",
+ "pin-project-lite",
+ "tokio",
+]
+
+[[package]]
+name = "tracing"
+version = "0.1.40"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "c3523ab5a71916ccf420eebdf5521fcef02141234bbc0b8a49f2fdc4544364ef"
+dependencies = [
+ "pin-project-lite",
+ "tracing-core",
+]
+
+[[package]]
+name = "tracing-core"
+version = "0.1.32"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "c06d3da6113f116aaee68e4d601191614c9053067f9ab7f6edbcb161237daa54"
+dependencies = [
+ "once_cell",
+]
+
+[[package]]
+name = "try-lock"
+version = "0.2.5"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "e421abadd41a4225275504ea4d6566923418b7f05506fbc9c0fe86ba7396114b"
+
+[[package]]
+name = "unicode-ident"
+version = "1.0.13"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "e91b56cd4cadaeb79bbf1a5645f6b4f8dc5bde8834ad5894a8db35fda9efa1fe"
+
+[[package]]
+name = "want"
+version = "0.3.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "bfa7760aed19e106de2c7c0b581b509f2f25d3dacaf737cb82ac61bc6d760b0e"
+dependencies = [
+ "try-lock",
+]
+
+[[package]]
+name = "wasi"
+version = "0.11.0+wasi-snapshot-preview1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423"
+
+[[package]]
+name = "winapi-util"
+version = "0.1.9"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "cf221c93e13a30d793f7645a0e7762c55d169dbb0a49671918a2319d289b10bb"
+dependencies = [
+ "windows-sys 0.59.0",
+]
+
+[[package]]
+name = "windows-sys"
+version = "0.52.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "282be5f36a8ce781fad8c8ae18fa3f9beff57ec1b52cb3de0789201425d9a33d"
+dependencies = [
+ "windows-targets",
+]
+
+[[package]]
+name = "windows-sys"
+version = "0.59.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "1e38bc4d79ed67fd075bcc251a1c39b32a1776bbe92e5bef1f0bf1f8c531853b"
+dependencies = [
+ "windows-targets",
+]
+
+[[package]]
+name = "windows-targets"
+version = "0.52.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "9b724f72796e036ab90c1021d4780d4d3d648aca59e491e6b98e725b84e99973"
+dependencies = [
+ "windows_aarch64_gnullvm",
+ "windows_aarch64_msvc",
+ "windows_i686_gnu",
+ "windows_i686_gnullvm",
+ "windows_i686_msvc",
+ "windows_x86_64_gnu",
+ "windows_x86_64_gnullvm",
+ "windows_x86_64_msvc",
+]
+
+[[package]]
+name = "windows_aarch64_gnullvm"
+version = "0.52.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "32a4622180e7a0ec044bb555404c800bc9fd9ec262ec147edd5989ccd0c02cd3"
+
+[[package]]
+name = "windows_aarch64_msvc"
+version = "0.52.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "09ec2a7bb152e2252b53fa7803150007879548bc709c039df7627cabbd05d469"
+
+[[package]]
+name = "windows_i686_gnu"
+version = "0.52.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "8e9b5ad5ab802e97eb8e295ac6720e509ee4c243f69d781394014ebfe8bbfa0b"
+
+[[package]]
+name = "windows_i686_gnullvm"
+version = "0.52.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "0eee52d38c090b3caa76c563b86c3a4bd71ef1a819287c19d586d7334ae8ed66"
+
+[[package]]
+name = "windows_i686_msvc"
+version = "0.52.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "240948bc05c5e7c6dabba28bf89d89ffce3e303022809e73deaefe4f6ec56c66"
+
+[[package]]
+name = "windows_x86_64_gnu"
+version = "0.52.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "147a5c80aabfbf0c7d901cb5895d1de30ef2907eb21fbbab29ca94c5b08b1a78"
+
+[[package]]
+name = "windows_x86_64_gnullvm"
+version = "0.52.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "24d5b23dc417412679681396f2b49f3de8c1473deb516bd34410872eff51ed0d"
+
+[[package]]
+name = "windows_x86_64_msvc"
+version = "0.52.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "589f6da84c646204747d1270a2a5661ea66ed1cced2631d546fdfb155959f9ec"
diff --git a/vendor/hyper/Cargo.toml b/vendor/hyper/Cargo.toml
new file mode 100644
index 00000000..7d0ea3a8
--- /dev/null
+++ b/vendor/hyper/Cargo.toml
@@ -0,0 +1,243 @@
+# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO
+#
+# When uploading crates to the registry Cargo will automatically
+# "normalize" Cargo.toml files for maximal compatibility
+# with all versions of Cargo and also rewrite `path` dependencies
+# to registry (e.g., crates.io) dependencies.
+#
+# If you are reading this file be aware that the original Cargo.toml
+# will likely look very different (and much more reasonable).
+# See Cargo.toml.orig for the original contents.
+
+[package]
+edition = "2021"
+rust-version = "1.63"
+name = "hyper"
+version = "1.6.0"
+authors = ["Sean McArthur <sean@seanmonstar.com>"]
+build = false
+include = [
+ "Cargo.toml",
+ "LICENSE",
+ "src/**/*",
+]
+autolib = false
+autobins = false
+autoexamples = false
+autotests = false
+autobenches = false
+description = "A protective and efficient HTTP library for all."
+homepage = "https://hyper.rs"
+documentation = "https://docs.rs/hyper"
+readme = "README.md"
+keywords = [
+ "http",
+ "hyper",
+ "hyperium",
+]
+categories = [
+ "network-programming",
+ "web-programming::http-client",
+ "web-programming::http-server",
+]
+license = "MIT"
+repository = "https://github.com/hyperium/hyper"
+
+[package.metadata.capi.header]
+generation = false
+subdirectory = false
+
+[[package.metadata.capi.install.include.asset]]
+from = "capi/include/hyper.h"
+
+[package.metadata.docs.rs]
+features = [
+ "ffi",
+ "full",
+ "tracing",
+]
+rustdoc-args = [
+ "--cfg",
+ "hyper_unstable_ffi",
+ "--cfg",
+ "hyper_unstable_tracing",
+]
+
+[package.metadata.playground]
+features = ["full"]
+
+[features]
+capi = []
+client = [
+ "dep:want",
+ "dep:pin-project-lite",
+ "dep:smallvec",
+]
+default = []
+ffi = [
+ "dep:http-body-util",
+ "futures-util?/alloc",
+]
+full = [
+ "client",
+ "http1",
+ "http2",
+ "server",
+]
+http1 = [
+ "dep:futures-channel",
+ "dep:futures-util",
+ "dep:httparse",
+ "dep:itoa",
+]
+http2 = [
+ "dep:futures-channel",
+ "dep:futures-util",
+ "dep:h2",
+]
+nightly = []
+server = [
+ "dep:httpdate",
+ "dep:pin-project-lite",
+ "dep:smallvec",
+]
+tracing = ["dep:tracing"]
+
+[lib]
+name = "hyper"
+path = "src/lib.rs"
+
+[dependencies.bytes]
+version = "1.2"
+
+[dependencies.futures-channel]
+version = "0.3"
+optional = true
+
+[dependencies.futures-util]
+version = "0.3"
+optional = true
+default-features = false
+
+[dependencies.h2]
+version = "0.4.2"
+optional = true
+
+[dependencies.http]
+version = "1"
+
+[dependencies.http-body]
+version = "1"
+
+[dependencies.http-body-util]
+version = "0.1"
+optional = true
+
+[dependencies.httparse]
+version = "1.9"
+optional = true
+
+[dependencies.httpdate]
+version = "1.0"
+optional = true
+
+[dependencies.itoa]
+version = "1"
+optional = true
+
+[dependencies.pin-project-lite]
+version = "0.2.4"
+optional = true
+
+[dependencies.smallvec]
+version = "1.12"
+features = [
+ "const_generics",
+ "const_new",
+]
+optional = true
+
+[dependencies.tokio]
+version = "1"
+features = ["sync"]
+
+[dependencies.tracing]
+version = "0.1"
+features = ["std"]
+optional = true
+default-features = false
+
+[dependencies.want]
+version = "0.3"
+optional = true
+
+[dev-dependencies.form_urlencoded]
+version = "1"
+
+[dev-dependencies.futures-channel]
+version = "0.3"
+features = ["sink"]
+
+[dev-dependencies.futures-util]
+version = "0.3"
+features = [
+ "alloc",
+ "sink",
+]
+default-features = false
+
+[dev-dependencies.http-body-util]
+version = "0.1"
+
+[dev-dependencies.pin-project-lite]
+version = "0.2.4"
+
+[dev-dependencies.pretty_env_logger]
+version = "0.5"
+
+[dev-dependencies.serde]
+version = "1.0"
+features = ["derive"]
+
+[dev-dependencies.serde_json]
+version = "1.0"
+
+[dev-dependencies.spmc]
+version = "0.3"
+
+[dev-dependencies.tokio]
+version = "1"
+features = [
+ "fs",
+ "macros",
+ "net",
+ "io-std",
+ "io-util",
+ "rt",
+ "rt-multi-thread",
+ "sync",
+ "time",
+ "test-util",
+]
+
+[dev-dependencies.tokio-test]
+version = "0.4"
+
+[dev-dependencies.tokio-util]
+version = "0.7.10"
+
+[lints.rust.unexpected_cfgs]
+level = "warn"
+priority = 0
+check-cfg = [
+ "cfg(hyper_unstable_tracing)",
+ "cfg(hyper_unstable_ffi)",
+]
+
+[profile.bench]
+codegen-units = 1
+incremental = false
+
+[profile.release]
+codegen-units = 1
+incremental = false
diff --git a/vendor/hyper/LICENSE b/vendor/hyper/LICENSE
new file mode 100644
index 00000000..da8b6ec1
--- /dev/null
+++ b/vendor/hyper/LICENSE
@@ -0,0 +1,19 @@
+Copyright (c) 2014-2025 Sean McArthur
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
diff --git a/vendor/hyper/src/body/incoming.rs b/vendor/hyper/src/body/incoming.rs
new file mode 100644
index 00000000..dcfb71d5
--- /dev/null
+++ b/vendor/hyper/src/body/incoming.rs
@@ -0,0 +1,617 @@
+use std::fmt;
+#[cfg(all(feature = "http1", any(feature = "client", feature = "server")))]
+use std::future::Future;
+use std::pin::Pin;
+use std::task::{Context, Poll};
+
+use bytes::Bytes;
+#[cfg(all(feature = "http1", any(feature = "client", feature = "server")))]
+use futures_channel::{mpsc, oneshot};
+#[cfg(all(
+ any(feature = "http1", feature = "http2"),
+ any(feature = "client", feature = "server")
+))]
+use futures_util::ready;
+#[cfg(all(feature = "http1", any(feature = "client", feature = "server")))]
+use futures_util::{stream::FusedStream, Stream}; // for mpsc::Receiver
+#[cfg(all(feature = "http1", any(feature = "client", feature = "server")))]
+use http::HeaderMap;
+use http_body::{Body, Frame, SizeHint};
+
+#[cfg(all(
+ any(feature = "http1", feature = "http2"),
+ any(feature = "client", feature = "server")
+))]
+use super::DecodedLength;
+#[cfg(all(feature = "http1", any(feature = "client", feature = "server")))]
+use crate::common::watch;
+#[cfg(all(feature = "http2", any(feature = "client", feature = "server")))]
+use crate::proto::h2::ping;
+
+#[cfg(all(feature = "http1", any(feature = "client", feature = "server")))]
+type BodySender = mpsc::Sender<Result<Bytes, crate::Error>>;
+#[cfg(all(feature = "http1", any(feature = "client", feature = "server")))]
+type TrailersSender = oneshot::Sender<HeaderMap>;
+
+/// A stream of `Bytes`, used when receiving bodies from the network.
+///
+/// Note that Users should not instantiate this struct directly. When working with the hyper client,
+/// `Incoming` is returned to you in responses. Similarly, when operating with the hyper server,
+/// it is provided within requests.
+///
+/// # Examples
+///
+/// ```rust,ignore
+/// async fn echo(
+/// req: Request<hyper::body::Incoming>,
+/// ) -> Result<Response<BoxBody<Bytes, hyper::Error>>, hyper::Error> {
+/// //Here, you can process `Incoming`
+/// }
+/// ```
+#[must_use = "streams do nothing unless polled"]
+pub struct Incoming {
+ kind: Kind,
+}
+
+enum Kind {
+ Empty,
+ #[cfg(all(feature = "http1", any(feature = "client", feature = "server")))]
+ Chan {
+ content_length: DecodedLength,
+ want_tx: watch::Sender,
+ data_rx: mpsc::Receiver<Result<Bytes, crate::Error>>,
+ trailers_rx: oneshot::Receiver<HeaderMap>,
+ },
+ #[cfg(all(feature = "http2", any(feature = "client", feature = "server")))]
+ H2 {
+ content_length: DecodedLength,
+ data_done: bool,
+ ping: ping::Recorder,
+ recv: h2::RecvStream,
+ },
+ #[cfg(feature = "ffi")]
+ Ffi(crate::ffi::UserBody),
+}
+
+/// A sender half created through [`Body::channel()`].
+///
+/// Useful when wanting to stream chunks from another thread.
+///
+/// ## Body Closing
+///
+/// Note that the request body will always be closed normally when the sender is dropped (meaning
+/// that the empty terminating chunk will be sent to the remote). If you desire to close the
+/// connection with an incomplete response (e.g. in the case of an error during asynchronous
+/// processing), call the [`Sender::abort()`] method to abort the body in an abnormal fashion.
+///
+/// [`Body::channel()`]: struct.Body.html#method.channel
+/// [`Sender::abort()`]: struct.Sender.html#method.abort
+#[must_use = "Sender does nothing unless sent on"]
+#[cfg(all(feature = "http1", any(feature = "client", feature = "server")))]
+pub(crate) struct Sender {
+ want_rx: watch::Receiver,
+ data_tx: BodySender,
+ trailers_tx: Option<TrailersSender>,
+}
+
+#[cfg(all(feature = "http1", any(feature = "client", feature = "server")))]
+const WANT_PENDING: usize = 1;
+#[cfg(all(feature = "http1", any(feature = "client", feature = "server")))]
+const WANT_READY: usize = 2;
+
+impl Incoming {
+ /// Create a `Body` stream with an associated sender half.
+ ///
+ /// Useful when wanting to stream chunks from another thread.
+ #[inline]
+ #[cfg(test)]
+ pub(crate) fn channel() -> (Sender, Incoming) {
+ Self::new_channel(DecodedLength::CHUNKED, /*wanter =*/ false)
+ }
+
+ #[cfg(all(feature = "http1", any(feature = "client", feature = "server")))]
+ pub(crate) fn new_channel(content_length: DecodedLength, wanter: bool) -> (Sender, Incoming) {
+ let (data_tx, data_rx) = mpsc::channel(0);
+ let (trailers_tx, trailers_rx) = oneshot::channel();
+
+ // If wanter is true, `Sender::poll_ready()` won't becoming ready
+ // until the `Body` has been polled for data once.
+ let want = if wanter { WANT_PENDING } else { WANT_READY };
+
+ let (want_tx, want_rx) = watch::channel(want);
+
+ let tx = Sender {
+ want_rx,
+ data_tx,
+ trailers_tx: Some(trailers_tx),
+ };
+ let rx = Incoming::new(Kind::Chan {
+ content_length,
+ want_tx,
+ data_rx,
+ trailers_rx,
+ });
+
+ (tx, rx)
+ }
+
+ fn new(kind: Kind) -> Incoming {
+ Incoming { kind }
+ }
+
+ #[allow(dead_code)]
+ pub(crate) fn empty() -> Incoming {
+ Incoming::new(Kind::Empty)
+ }
+
+ #[cfg(feature = "ffi")]
+ pub(crate) fn ffi() -> Incoming {
+ Incoming::new(Kind::Ffi(crate::ffi::UserBody::new()))
+ }
+
+ #[cfg(all(feature = "http2", any(feature = "client", feature = "server")))]
+ pub(crate) fn h2(
+ recv: h2::RecvStream,
+ mut content_length: DecodedLength,
+ ping: ping::Recorder,
+ ) -> Self {
+ // If the stream is already EOS, then the "unknown length" is clearly
+ // actually ZERO.
+ if !content_length.is_exact() && recv.is_end_stream() {
+ content_length = DecodedLength::ZERO;
+ }
+
+ Incoming::new(Kind::H2 {
+ data_done: false,
+ ping,
+ content_length,
+ recv,
+ })
+ }
+
+ #[cfg(feature = "ffi")]
+ pub(crate) fn as_ffi_mut(&mut self) -> &mut crate::ffi::UserBody {
+ match self.kind {
+ Kind::Ffi(ref mut body) => return body,
+ _ => {
+ self.kind = Kind::Ffi(crate::ffi::UserBody::new());
+ }
+ }
+
+ match self.kind {
+ Kind::Ffi(ref mut body) => body,
+ _ => unreachable!(),
+ }
+ }
+}
+
+impl Body for Incoming {
+ type Data = Bytes;
+ type Error = crate::Error;
+
+ fn poll_frame(
+ #[cfg_attr(
+ not(all(
+ any(feature = "http1", feature = "http2"),
+ any(feature = "client", feature = "server")
+ )),
+ allow(unused_mut)
+ )]
+ mut self: Pin<&mut Self>,
+ #[cfg_attr(
+ not(all(
+ any(feature = "http1", feature = "http2"),
+ any(feature = "client", feature = "server")
+ )),
+ allow(unused_variables)
+ )]
+ cx: &mut Context<'_>,
+ ) -> Poll<Option<Result<Frame<Self::Data>, Self::Error>>> {
+ match self.kind {
+ Kind::Empty => Poll::Ready(None),
+ #[cfg(all(feature = "http1", any(feature = "client", feature = "server")))]
+ Kind::Chan {
+ content_length: ref mut len,
+ ref mut data_rx,
+ ref mut want_tx,
+ ref mut trailers_rx,
+ } => {
+ want_tx.send(WANT_READY);
+
+ if !data_rx.is_terminated() {
+ if let Some(chunk) = ready!(Pin::new(data_rx).poll_next(cx)?) {
+ len.sub_if(chunk.len() as u64);
+ return Poll::Ready(Some(Ok(Frame::data(chunk))));
+ }
+ }
+
+ // check trailers after data is terminated
+ match ready!(Pin::new(trailers_rx).poll(cx)) {
+ Ok(t) => Poll::Ready(Some(Ok(Frame::trailers(t)))),
+ Err(_) => Poll::Ready(None),
+ }
+ }
+ #[cfg(all(feature = "http2", any(feature = "client", feature = "server")))]
+ Kind::H2 {
+ ref mut data_done,
+ ref ping,
+ recv: ref mut h2,
+ content_length: ref mut len,
+ } => {
+ if !*data_done {
+ match ready!(h2.poll_data(cx)) {
+ Some(Ok(bytes)) => {
+ let _ = h2.flow_control().release_capacity(bytes.len());
+ len.sub_if(bytes.len() as u64);
+ ping.record_data(bytes.len());
+ return Poll::Ready(Some(Ok(Frame::data(bytes))));
+ }
+ Some(Err(e)) => {
+ return match e.reason() {
+ // These reasons should cause the body reading to stop, but not fail it.
+ // The same logic as for `Read for H2Upgraded` is applied here.
+ Some(h2::Reason::NO_ERROR) | Some(h2::Reason::CANCEL) => {
+ Poll::Ready(None)
+ }
+ _ => Poll::Ready(Some(Err(crate::Error::new_body(e)))),
+ };
+ }
+ None => {
+ *data_done = true;
+ // fall through to trailers
+ }
+ }
+ }
+
+ // after data, check trailers
+ match ready!(h2.poll_trailers(cx)) {
+ Ok(t) => {
+ ping.record_non_data();
+ Poll::Ready(Ok(t.map(Frame::trailers)).transpose())
+ }
+ Err(e) => Poll::Ready(Some(Err(crate::Error::new_h2(e)))),
+ }
+ }
+
+ #[cfg(feature = "ffi")]
+ Kind::Ffi(ref mut body) => body.poll_data(cx),
+ }
+ }
+
+ fn is_end_stream(&self) -> bool {
+ match self.kind {
+ Kind::Empty => true,
+ #[cfg(all(feature = "http1", any(feature = "client", feature = "server")))]
+ Kind::Chan { content_length, .. } => content_length == DecodedLength::ZERO,
+ #[cfg(all(feature = "http2", any(feature = "client", feature = "server")))]
+ Kind::H2 { recv: ref h2, .. } => h2.is_end_stream(),
+ #[cfg(feature = "ffi")]
+ Kind::Ffi(..) => false,
+ }
+ }
+
+ fn size_hint(&self) -> SizeHint {
+ #[cfg(all(
+ any(feature = "http1", feature = "http2"),
+ any(feature = "client", feature = "server")
+ ))]
+ fn opt_len(decoded_length: DecodedLength) -> SizeHint {
+ if let Some(content_length) = decoded_length.into_opt() {
+ SizeHint::with_exact(content_length)
+ } else {
+ SizeHint::default()
+ }
+ }
+
+ match self.kind {
+ Kind::Empty => SizeHint::with_exact(0),
+ #[cfg(all(feature = "http1", any(feature = "client", feature = "server")))]
+ Kind::Chan { content_length, .. } => opt_len(content_length),
+ #[cfg(all(feature = "http2", any(feature = "client", feature = "server")))]
+ Kind::H2 { content_length, .. } => opt_len(content_length),
+ #[cfg(feature = "ffi")]
+ Kind::Ffi(..) => SizeHint::default(),
+ }
+ }
+}
+
+impl fmt::Debug for Incoming {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ #[cfg(any(
+ all(
+ any(feature = "http1", feature = "http2"),
+ any(feature = "client", feature = "server")
+ ),
+ feature = "ffi"
+ ))]
+ #[derive(Debug)]
+ struct Streaming;
+ #[derive(Debug)]
+ struct Empty;
+
+ let mut builder = f.debug_tuple("Body");
+ match self.kind {
+ Kind::Empty => builder.field(&Empty),
+ #[cfg(any(
+ all(
+ any(feature = "http1", feature = "http2"),
+ any(feature = "client", feature = "server")
+ ),
+ feature = "ffi"
+ ))]
+ _ => builder.field(&Streaming),
+ };
+
+ builder.finish()
+ }
+}
+
+#[cfg(all(feature = "http1", any(feature = "client", feature = "server")))]
+impl Sender {
+ /// Check to see if this `Sender` can send more data.
+ pub(crate) fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll<crate::Result<()>> {
+ // Check if the receiver end has tried polling for the body yet
+ ready!(self.poll_want(cx)?);
+ self.data_tx
+ .poll_ready(cx)
+ .map_err(|_| crate::Error::new_closed())
+ }
+
+ fn poll_want(&mut self, cx: &mut Context<'_>) -> Poll<crate::Result<()>> {
+ match self.want_rx.load(cx) {
+ WANT_READY => Poll::Ready(Ok(())),
+ WANT_PENDING => Poll::Pending,
+ watch::CLOSED => Poll::Ready(Err(crate::Error::new_closed())),
+ unexpected => unreachable!("want_rx value: {}", unexpected),
+ }
+ }
+
+ #[cfg(test)]
+ async fn ready(&mut self) -> crate::Result<()> {
+ futures_util::future::poll_fn(|cx| self.poll_ready(cx)).await
+ }
+
+ /// Send data on data channel when it is ready.
+ #[cfg(test)]
+ #[allow(unused)]
+ pub(crate) async fn send_data(&mut self, chunk: Bytes) -> crate::Result<()> {
+ self.ready().await?;
+ self.data_tx
+ .try_send(Ok(chunk))
+ .map_err(|_| crate::Error::new_closed())
+ }
+
+ /// Send trailers on trailers channel.
+ #[allow(unused)]
+ pub(crate) async fn send_trailers(&mut self, trailers: HeaderMap) -> crate::Result<()> {
+ let tx = match self.trailers_tx.take() {
+ Some(tx) => tx,
+ None => return Err(crate::Error::new_closed()),
+ };
+ tx.send(trailers).map_err(|_| crate::Error::new_closed())
+ }
+
+ /// Try to send data on this channel.
+ ///
+ /// # Errors
+ ///
+ /// Returns `Err(Bytes)` if the channel could not (currently) accept
+ /// another `Bytes`.
+ ///
+ /// # Note
+ ///
+ /// This is mostly useful for when trying to send from some other thread
+ /// that doesn't have an async context. If in an async context, prefer
+ /// `send_data()` instead.
+ #[cfg(feature = "http1")]
+ pub(crate) fn try_send_data(&mut self, chunk: Bytes) -> Result<(), Bytes> {
+ self.data_tx
+ .try_send(Ok(chunk))
+ .map_err(|err| err.into_inner().expect("just sent Ok"))
+ }
+
+ #[cfg(feature = "http1")]
+ pub(crate) fn try_send_trailers(
+ &mut self,
+ trailers: HeaderMap,
+ ) -> Result<(), Option<HeaderMap>> {
+ let tx = match self.trailers_tx.take() {
+ Some(tx) => tx,
+ None => return Err(None),
+ };
+
+ tx.send(trailers).map_err(Some)
+ }
+
+ #[cfg(test)]
+ pub(crate) fn abort(mut self) {
+ self.send_error(crate::Error::new_body_write_aborted());
+ }
+
+ pub(crate) fn send_error(&mut self, err: crate::Error) {
+ let _ = self
+ .data_tx
+ // clone so the send works even if buffer is full
+ .clone()
+ .try_send(Err(err));
+ }
+}
+
+#[cfg(all(feature = "http1", any(feature = "client", feature = "server")))]
+impl fmt::Debug for Sender {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ #[derive(Debug)]
+ struct Open;
+ #[derive(Debug)]
+ struct Closed;
+
+ let mut builder = f.debug_tuple("Sender");
+ match self.want_rx.peek() {
+ watch::CLOSED => builder.field(&Closed),
+ _ => builder.field(&Open),
+ };
+
+ builder.finish()
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use std::mem;
+ use std::task::Poll;
+
+ use super::{Body, DecodedLength, Incoming, Sender, SizeHint};
+ use http_body_util::BodyExt;
+
+ #[test]
+ fn test_size_of() {
+ // These are mostly to help catch *accidentally* increasing
+ // the size by too much.
+
+ let body_size = mem::size_of::<Incoming>();
+ let body_expected_size = mem::size_of::<u64>() * 5;
+ assert!(
+ body_size <= body_expected_size,
+ "Body size = {} <= {}",
+ body_size,
+ body_expected_size,
+ );
+
+ //assert_eq!(body_size, mem::size_of::<Option<Incoming>>(), "Option<Incoming>");
+
+ assert_eq!(
+ mem::size_of::<Sender>(),
+ mem::size_of::<usize>() * 5,
+ "Sender"
+ );
+
+ assert_eq!(
+ mem::size_of::<Sender>(),
+ mem::size_of::<Option<Sender>>(),
+ "Option<Sender>"
+ );
+ }
+
+ #[test]
+ fn size_hint() {
+ fn eq(body: Incoming, b: SizeHint, note: &str) {
+ let a = body.size_hint();
+ assert_eq!(a.lower(), b.lower(), "lower for {:?}", note);
+ assert_eq!(a.upper(), b.upper(), "upper for {:?}", note);
+ }
+
+ eq(Incoming::empty(), SizeHint::with_exact(0), "empty");
+
+ eq(Incoming::channel().1, SizeHint::new(), "channel");
+
+ eq(
+ Incoming::new_channel(DecodedLength::new(4), /*wanter =*/ false).1,
+ SizeHint::with_exact(4),
+ "channel with length",
+ );
+ }
+
+ #[cfg(not(miri))]
+ #[tokio::test]
+ async fn channel_abort() {
+ let (tx, mut rx) = Incoming::channel();
+
+ tx.abort();
+
+ let err = rx.frame().await.unwrap().unwrap_err();
+ assert!(err.is_body_write_aborted(), "{:?}", err);
+ }
+
+ #[cfg(all(not(miri), feature = "http1"))]
+ #[tokio::test]
+ async fn channel_abort_when_buffer_is_full() {
+ let (mut tx, mut rx) = Incoming::channel();
+
+ tx.try_send_data("chunk 1".into()).expect("send 1");
+ // buffer is full, but can still send abort
+ tx.abort();
+
+ let chunk1 = rx
+ .frame()
+ .await
+ .expect("item 1")
+ .expect("chunk 1")
+ .into_data()
+ .unwrap();
+ assert_eq!(chunk1, "chunk 1");
+
+ let err = rx.frame().await.unwrap().unwrap_err();
+ assert!(err.is_body_write_aborted(), "{:?}", err);
+ }
+
+ #[cfg(feature = "http1")]
+ #[test]
+ fn channel_buffers_one() {
+ let (mut tx, _rx) = Incoming::channel();
+
+ tx.try_send_data("chunk 1".into()).expect("send 1");
+
+ // buffer is now full
+ let chunk2 = tx.try_send_data("chunk 2".into()).expect_err("send 2");
+ assert_eq!(chunk2, "chunk 2");
+ }
+
+ #[cfg(not(miri))]
+ #[tokio::test]
+ async fn channel_empty() {
+ let (_, mut rx) = Incoming::channel();
+
+ assert!(rx.frame().await.is_none());
+ }
+
+ #[test]
+ fn channel_ready() {
+ let (mut tx, _rx) = Incoming::new_channel(DecodedLength::CHUNKED, /*wanter = */ false);
+
+ let mut tx_ready = tokio_test::task::spawn(tx.ready());
+
+ assert!(tx_ready.poll().is_ready(), "tx is ready immediately");
+ }
+
+ #[test]
+ fn channel_wanter() {
+ let (mut tx, mut rx) =
+ Incoming::new_channel(DecodedLength::CHUNKED, /*wanter = */ true);
+
+ let mut tx_ready = tokio_test::task::spawn(tx.ready());
+ let mut rx_data = tokio_test::task::spawn(rx.frame());
+
+ assert!(
+ tx_ready.poll().is_pending(),
+ "tx isn't ready before rx has been polled"
+ );
+
+ assert!(rx_data.poll().is_pending(), "poll rx.data");
+ assert!(tx_ready.is_woken(), "rx poll wakes tx");
+
+ assert!(
+ tx_ready.poll().is_ready(),
+ "tx is ready after rx has been polled"
+ );
+ }
+
+ #[test]
+ fn channel_notices_closure() {
+ let (mut tx, rx) = Incoming::new_channel(DecodedLength::CHUNKED, /*wanter = */ true);
+
+ let mut tx_ready = tokio_test::task::spawn(tx.ready());
+
+ assert!(
+ tx_ready.poll().is_pending(),
+ "tx isn't ready before rx has been polled"
+ );
+
+ drop(rx);
+ assert!(tx_ready.is_woken(), "dropping rx wakes tx");
+
+ match tx_ready.poll() {
+ Poll::Ready(Err(ref e)) if e.is_closed() => (),
+ unexpected => panic!("tx poll ready unexpected: {:?}", unexpected),
+ }
+ }
+}
diff --git a/vendor/hyper/src/body/length.rs b/vendor/hyper/src/body/length.rs
new file mode 100644
index 00000000..e5eab744
--- /dev/null
+++ b/vendor/hyper/src/body/length.rs
@@ -0,0 +1,129 @@
+use std::fmt;
+
+#[derive(Clone, Copy, PartialEq, Eq)]
+pub(crate) struct DecodedLength(u64);
+
+#[cfg(any(feature = "http1", feature = "http2"))]
+impl From<Option<u64>> for DecodedLength {
+ fn from(len: Option<u64>) -> Self {
+ len.and_then(|len| {
+ // If the length is u64::MAX, oh well, just reported chunked.
+ Self::checked_new(len).ok()
+ })
+ .unwrap_or(DecodedLength::CHUNKED)
+ }
+}
+
+#[cfg(any(feature = "http1", feature = "http2", test))]
+const MAX_LEN: u64 = u64::MAX - 2;
+
+impl DecodedLength {
+ pub(crate) const CLOSE_DELIMITED: DecodedLength = DecodedLength(u64::MAX);
+ pub(crate) const CHUNKED: DecodedLength = DecodedLength(u64::MAX - 1);
+ pub(crate) const ZERO: DecodedLength = DecodedLength(0);
+
+ #[cfg(test)]
+ pub(crate) fn new(len: u64) -> Self {
+ debug_assert!(len <= MAX_LEN);
+ DecodedLength(len)
+ }
+
+ /// Takes the length as a content-length without other checks.
+ ///
+ /// Should only be called if previously confirmed this isn't
+ /// CLOSE_DELIMITED or CHUNKED.
+ #[inline]
+ #[cfg(all(any(feature = "client", feature = "server"), feature = "http1"))]
+ pub(crate) fn danger_len(self) -> u64 {
+ debug_assert!(self.0 < Self::CHUNKED.0);
+ self.0
+ }
+
+ /// Converts to an Option<u64> representing a Known or Unknown length.
+ #[cfg(all(
+ any(feature = "http1", feature = "http2"),
+ any(feature = "client", feature = "server")
+ ))]
+ pub(crate) fn into_opt(self) -> Option<u64> {
+ match self {
+ DecodedLength::CHUNKED | DecodedLength::CLOSE_DELIMITED => None,
+ DecodedLength(known) => Some(known),
+ }
+ }
+
+ /// Checks the `u64` is within the maximum allowed for content-length.
+ #[cfg(any(feature = "http1", feature = "http2"))]
+ pub(crate) fn checked_new(len: u64) -> Result<Self, crate::error::Parse> {
+ if len <= MAX_LEN {
+ Ok(DecodedLength(len))
+ } else {
+ warn!("content-length bigger than maximum: {} > {}", len, MAX_LEN);
+ Err(crate::error::Parse::TooLarge)
+ }
+ }
+
+ #[cfg(all(
+ any(feature = "http1", feature = "http2"),
+ any(feature = "client", feature = "server")
+ ))]
+ pub(crate) fn sub_if(&mut self, amt: u64) {
+ match *self {
+ DecodedLength::CHUNKED | DecodedLength::CLOSE_DELIMITED => (),
+ DecodedLength(ref mut known) => {
+ *known -= amt;
+ }
+ }
+ }
+
+ /// Returns whether this represents an exact length.
+ ///
+ /// This includes 0, which of course is an exact known length.
+ ///
+ /// It would return false if "chunked" or otherwise size-unknown.
+ #[cfg(all(any(feature = "client", feature = "server"), feature = "http2"))]
+ pub(crate) fn is_exact(&self) -> bool {
+ self.0 <= MAX_LEN
+ }
+}
+
+impl fmt::Debug for DecodedLength {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ match *self {
+ DecodedLength::CLOSE_DELIMITED => f.write_str("CLOSE_DELIMITED"),
+ DecodedLength::CHUNKED => f.write_str("CHUNKED"),
+ DecodedLength(n) => f.debug_tuple("DecodedLength").field(&n).finish(),
+ }
+ }
+}
+
+impl fmt::Display for DecodedLength {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ match *self {
+ DecodedLength::CLOSE_DELIMITED => f.write_str("close-delimited"),
+ DecodedLength::CHUNKED => f.write_str("chunked encoding"),
+ DecodedLength::ZERO => f.write_str("empty"),
+ DecodedLength(n) => write!(f, "content-length ({} bytes)", n),
+ }
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+
+ #[test]
+ fn sub_if_known() {
+ let mut len = DecodedLength::new(30);
+ len.sub_if(20);
+
+ assert_eq!(len.0, 10);
+ }
+
+ #[test]
+ fn sub_if_chunked() {
+ let mut len = DecodedLength::CHUNKED;
+ len.sub_if(20);
+
+ assert_eq!(len, DecodedLength::CHUNKED);
+ }
+}
diff --git a/vendor/hyper/src/body/mod.rs b/vendor/hyper/src/body/mod.rs
new file mode 100644
index 00000000..7b71d98b
--- /dev/null
+++ b/vendor/hyper/src/body/mod.rs
@@ -0,0 +1,50 @@
+//! Streaming bodies for Requests and Responses
+//!
+//! For both [Clients](crate::client) and [Servers](crate::server), requests and
+//! responses use streaming bodies, instead of complete buffering. This
+//! allows applications to not use memory they don't need, and allows exerting
+//! back-pressure on connections by only reading when asked.
+//!
+//! There are two pieces to this in hyper:
+//!
+//! - **The [`Body`] trait** describes all possible bodies.
+//! hyper allows any body type that implements `Body`, allowing
+//! applications to have fine-grained control over their streaming.
+//! - **The [`Incoming`] concrete type**, which is an implementation
+//! of `Body`, and returned by hyper as a "receive stream" (so, for server
+//! requests and client responses).
+//!
+//! There are additional implementations available in [`http-body-util`][],
+//! such as a `Full` or `Empty` body.
+//!
+//! [`http-body-util`]: https://docs.rs/http-body-util
+
+pub use bytes::{Buf, Bytes};
+pub use http_body::Body;
+pub use http_body::Frame;
+pub use http_body::SizeHint;
+
+pub use self::incoming::Incoming;
+
+#[cfg(all(any(feature = "client", feature = "server"), feature = "http1"))]
+pub(crate) use self::incoming::Sender;
+#[cfg(all(
+ any(feature = "http1", feature = "http2"),
+ any(feature = "client", feature = "server")
+))]
+pub(crate) use self::length::DecodedLength;
+
+mod incoming;
+#[cfg(all(
+ any(feature = "http1", feature = "http2"),
+ any(feature = "client", feature = "server")
+))]
+mod length;
+
+fn _assert_send_sync() {
+ fn _assert_send<T: Send>() {}
+ fn _assert_sync<T: Sync>() {}
+
+ _assert_send::<Incoming>();
+ _assert_sync::<Incoming>();
+}
diff --git a/vendor/hyper/src/cfg.rs b/vendor/hyper/src/cfg.rs
new file mode 100644
index 00000000..71a5351d
--- /dev/null
+++ b/vendor/hyper/src/cfg.rs
@@ -0,0 +1,44 @@
+macro_rules! cfg_feature {
+ (
+ #![$meta:meta]
+ $($item:item)*
+ ) => {
+ $(
+ #[cfg($meta)]
+ #[cfg_attr(docsrs, doc(cfg($meta)))]
+ $item
+ )*
+ }
+}
+
+macro_rules! cfg_proto {
+ ($($item:item)*) => {
+ cfg_feature! {
+ #![all(
+ any(feature = "http1", feature = "http2"),
+ any(feature = "client", feature = "server"),
+ )]
+ $($item)*
+ }
+ }
+}
+
+cfg_proto! {
+ macro_rules! cfg_client {
+ ($($item:item)*) => {
+ cfg_feature! {
+ #![feature = "client"]
+ $($item)*
+ }
+ }
+ }
+
+ macro_rules! cfg_server {
+ ($($item:item)*) => {
+ cfg_feature! {
+ #![feature = "server"]
+ $($item)*
+ }
+ }
+ }
+}
diff --git a/vendor/hyper/src/client/conn/http1.rs b/vendor/hyper/src/client/conn/http1.rs
new file mode 100644
index 00000000..ecfe6eb8
--- /dev/null
+++ b/vendor/hyper/src/client/conn/http1.rs
@@ -0,0 +1,611 @@
+//! HTTP/1 client connections
+
+use std::error::Error as StdError;
+use std::fmt;
+use std::future::Future;
+use std::pin::Pin;
+use std::task::{Context, Poll};
+
+use crate::rt::{Read, Write};
+use bytes::Bytes;
+use futures_util::ready;
+use http::{Request, Response};
+use httparse::ParserConfig;
+
+use super::super::dispatch::{self, TrySendError};
+use crate::body::{Body, Incoming as IncomingBody};
+use crate::proto;
+
+type Dispatcher<T, B> =
+ proto::dispatch::Dispatcher<proto::dispatch::Client<B>, B, T, proto::h1::ClientTransaction>;
+
+/// The sender side of an established connection.
+pub struct SendRequest<B> {
+ dispatch: dispatch::Sender<Request<B>, Response<IncomingBody>>,
+}
+
+/// Deconstructed parts of a `Connection`.
+///
+/// This allows taking apart a `Connection` at a later time, in order to
+/// reclaim the IO object, and additional related pieces.
+#[derive(Debug)]
+#[non_exhaustive]
+pub struct Parts<T> {
+ /// The original IO object used in the handshake.
+ pub io: T,
+ /// A buffer of bytes that have been read but not processed as HTTP.
+ ///
+ /// For instance, if the `Connection` is used for an HTTP upgrade request,
+ /// it is possible the server sent back the first bytes of the new protocol
+ /// along with the response upgrade.
+ ///
+ /// You will want to check for any existing bytes if you plan to continue
+ /// communicating on the IO object.
+ pub read_buf: Bytes,
+}
+
+/// A future that processes all HTTP state for the IO object.
+///
+/// In most cases, this should just be spawned into an executor, so that it
+/// can process incoming and outgoing messages, notice hangups, and the like.
+///
+/// Instances of this type are typically created via the [`handshake`] function
+#[must_use = "futures do nothing unless polled"]
+pub struct Connection<T, B>
+where
+ T: Read + Write,
+ B: Body + 'static,
+{
+ inner: Dispatcher<T, B>,
+}
+
+impl<T, B> Connection<T, B>
+where
+ T: Read + Write + Unpin,
+ B: Body + 'static,
+ B::Error: Into<Box<dyn StdError + Send + Sync>>,
+{
+ /// Return the inner IO object, and additional information.
+ ///
+ /// Only works for HTTP/1 connections. HTTP/2 connections will panic.
+ pub fn into_parts(self) -> Parts<T> {
+ let (io, read_buf, _) = self.inner.into_inner();
+ Parts { io, read_buf }
+ }
+
+ /// Poll the connection for completion, but without calling `shutdown`
+ /// on the underlying IO.
+ ///
+ /// This is useful to allow running a connection while doing an HTTP
+ /// upgrade. Once the upgrade is completed, the connection would be "done",
+ /// but it is not desired to actually shutdown the IO object. Instead you
+ /// would take it back using `into_parts`.
+ ///
+ /// Use [`poll_fn`](https://docs.rs/futures/0.1.25/futures/future/fn.poll_fn.html)
+ /// and [`try_ready!`](https://docs.rs/futures/0.1.25/futures/macro.try_ready.html)
+ /// to work with this function; or use the `without_shutdown` wrapper.
+ pub fn poll_without_shutdown(&mut self, cx: &mut Context<'_>) -> Poll<crate::Result<()>> {
+ self.inner.poll_without_shutdown(cx)
+ }
+
+ /// Prevent shutdown of the underlying IO object at the end of service the request,
+ /// instead run `into_parts`. This is a convenience wrapper over `poll_without_shutdown`.
+ pub async fn without_shutdown(self) -> crate::Result<Parts<T>> {
+ let mut conn = Some(self);
+ futures_util::future::poll_fn(move |cx| -> Poll<crate::Result<Parts<T>>> {
+ ready!(conn.as_mut().unwrap().poll_without_shutdown(cx))?;
+ Poll::Ready(Ok(conn.take().unwrap().into_parts()))
+ })
+ .await
+ }
+}
+
+/// A builder to configure an HTTP connection.
+///
+/// After setting options, the builder is used to create a handshake future.
+///
+/// **Note**: The default values of options are *not considered stable*. They
+/// are subject to change at any time.
+#[derive(Clone, Debug)]
+pub struct Builder {
+ h09_responses: bool,
+ h1_parser_config: ParserConfig,
+ h1_writev: Option<bool>,
+ h1_title_case_headers: bool,
+ h1_preserve_header_case: bool,
+ h1_max_headers: Option<usize>,
+ #[cfg(feature = "ffi")]
+ h1_preserve_header_order: bool,
+ h1_read_buf_exact_size: Option<usize>,
+ h1_max_buf_size: Option<usize>,
+}
+
+/// Returns a handshake future over some IO.
+///
+/// This is a shortcut for `Builder::new().handshake(io)`.
+/// See [`client::conn`](crate::client::conn) for more.
+pub async fn handshake<T, B>(io: T) -> crate::Result<(SendRequest<B>, Connection<T, B>)>
+where
+ T: Read + Write + Unpin,
+ B: Body + 'static,
+ B::Data: Send,
+ B::Error: Into<Box<dyn StdError + Send + Sync>>,
+{
+ Builder::new().handshake(io).await
+}
+
+// ===== impl SendRequest
+
+impl<B> SendRequest<B> {
+ /// Polls to determine whether this sender can be used yet for a request.
+ ///
+ /// If the associated connection is closed, this returns an Error.
+ pub fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll<crate::Result<()>> {
+ self.dispatch.poll_ready(cx)
+ }
+
+ /// Waits until the dispatcher is ready
+ ///
+ /// If the associated connection is closed, this returns an Error.
+ pub async fn ready(&mut self) -> crate::Result<()> {
+ futures_util::future::poll_fn(|cx| self.poll_ready(cx)).await
+ }
+
+ /// Checks if the connection is currently ready to send a request.
+ ///
+ /// # Note
+ ///
+ /// This is mostly a hint. Due to inherent latency of networks, it is
+ /// possible that even after checking this is ready, sending a request
+ /// may still fail because the connection was closed in the meantime.
+ pub fn is_ready(&self) -> bool {
+ self.dispatch.is_ready()
+ }
+
+ /// Checks if the connection side has been closed.
+ pub fn is_closed(&self) -> bool {
+ self.dispatch.is_closed()
+ }
+}
+
+impl<B> SendRequest<B>
+where
+ B: Body + 'static,
+{
+ /// Sends a `Request` on the associated connection.
+ ///
+ /// Returns a future that if successful, yields the `Response`.
+ ///
+ /// `req` must have a `Host` header.
+ ///
+ /// # Uri
+ ///
+ /// The `Uri` of the request is serialized as-is.
+ ///
+ /// - Usually you want origin-form (`/path?query`).
+ /// - For sending to an HTTP proxy, you want to send in absolute-form
+ /// (`https://hyper.rs/guides`).
+ ///
+ /// This is however not enforced or validated and it is up to the user
+ /// of this method to ensure the `Uri` is correct for their intended purpose.
+ pub fn send_request(
+ &mut self,
+ req: Request<B>,
+ ) -> impl Future<Output = crate::Result<Response<IncomingBody>>> {
+ let sent = self.dispatch.send(req);
+
+ async move {
+ match sent {
+ Ok(rx) => match rx.await {
+ Ok(Ok(resp)) => Ok(resp),
+ Ok(Err(err)) => Err(err),
+ // this is definite bug if it happens, but it shouldn't happen!
+ Err(_canceled) => panic!("dispatch dropped without returning error"),
+ },
+ Err(_req) => {
+ debug!("connection was not ready");
+ Err(crate::Error::new_canceled().with("connection was not ready"))
+ }
+ }
+ }
+ }
+
+ /// Sends a `Request` on the associated connection.
+ ///
+ /// Returns a future that if successful, yields the `Response`.
+ ///
+ /// # Error
+ ///
+ /// If there was an error before trying to serialize the request to the
+ /// connection, the message will be returned as part of this error.
+ pub fn try_send_request(
+ &mut self,
+ req: Request<B>,
+ ) -> impl Future<Output = Result<Response<IncomingBody>, TrySendError<Request<B>>>> {
+ let sent = self.dispatch.try_send(req);
+ async move {
+ match sent {
+ Ok(rx) => match rx.await {
+ Ok(Ok(res)) => Ok(res),
+ Ok(Err(err)) => Err(err),
+ // this is definite bug if it happens, but it shouldn't happen!
+ Err(_) => panic!("dispatch dropped without returning error"),
+ },
+ Err(req) => {
+ debug!("connection was not ready");
+ let error = crate::Error::new_canceled().with("connection was not ready");
+ Err(TrySendError {
+ error,
+ message: Some(req),
+ })
+ }
+ }
+ }
+ }
+}
+
+impl<B> fmt::Debug for SendRequest<B> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_struct("SendRequest").finish()
+ }
+}
+
+// ===== impl Connection
+
+impl<T, B> Connection<T, B>
+where
+ T: Read + Write + Unpin + Send,
+ B: Body + 'static,
+ B::Error: Into<Box<dyn StdError + Send + Sync>>,
+{
+ /// Enable this connection to support higher-level HTTP upgrades.
+ ///
+ /// See [the `upgrade` module](crate::upgrade) for more.
+ pub fn with_upgrades(self) -> upgrades::UpgradeableConnection<T, B> {
+ upgrades::UpgradeableConnection { inner: Some(self) }
+ }
+}
+
+impl<T, B> fmt::Debug for Connection<T, B>
+where
+ T: Read + Write + fmt::Debug,
+ B: Body + 'static,
+{
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_struct("Connection").finish()
+ }
+}
+
+impl<T, B> Future for Connection<T, B>
+where
+ T: Read + Write + Unpin,
+ B: Body + 'static,
+ B::Data: Send,
+ B::Error: Into<Box<dyn StdError + Send + Sync>>,
+{
+ type Output = crate::Result<()>;
+
+ fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
+ match ready!(Pin::new(&mut self.inner).poll(cx))? {
+ proto::Dispatched::Shutdown => Poll::Ready(Ok(())),
+ proto::Dispatched::Upgrade(pending) => {
+ // With no `Send` bound on `I`, we can't try to do
+ // upgrades here. In case a user was trying to use
+ // `upgrade` with this API, send a special
+ // error letting them know about that.
+ pending.manual();
+ Poll::Ready(Ok(()))
+ }
+ }
+ }
+}
+
+// ===== impl Builder
+
+impl Builder {
+ /// Creates a new connection builder.
+ #[inline]
+ pub fn new() -> Builder {
+ Builder {
+ h09_responses: false,
+ h1_writev: None,
+ h1_read_buf_exact_size: None,
+ h1_parser_config: Default::default(),
+ h1_title_case_headers: false,
+ h1_preserve_header_case: false,
+ h1_max_headers: None,
+ #[cfg(feature = "ffi")]
+ h1_preserve_header_order: false,
+ h1_max_buf_size: None,
+ }
+ }
+
+ /// Set whether HTTP/0.9 responses should be tolerated.
+ ///
+ /// Default is false.
+ pub fn http09_responses(&mut self, enabled: bool) -> &mut Builder {
+ self.h09_responses = enabled;
+ self
+ }
+
+ /// Set whether HTTP/1 connections will accept spaces between header names
+ /// and the colon that follow them in responses.
+ ///
+ /// You probably don't need this, here is what [RFC 7230 Section 3.2.4.] has
+ /// to say about it:
+ ///
+ /// > No whitespace is allowed between the header field-name and colon. In
+ /// > the past, differences in the handling of such whitespace have led to
+ /// > security vulnerabilities in request routing and response handling. A
+ /// > server MUST reject any received request message that contains
+ /// > whitespace between a header field-name and colon with a response code
+ /// > of 400 (Bad Request). A proxy MUST remove any such whitespace from a
+ /// > response message before forwarding the message downstream.
+ ///
+ /// Default is false.
+ ///
+ /// [RFC 7230 Section 3.2.4.]: https://tools.ietf.org/html/rfc7230#section-3.2.4
+ pub fn allow_spaces_after_header_name_in_responses(&mut self, enabled: bool) -> &mut Builder {
+ self.h1_parser_config
+ .allow_spaces_after_header_name_in_responses(enabled);
+ self
+ }
+
+ /// Set whether HTTP/1 connections will accept obsolete line folding for
+ /// header values.
+ ///
+ /// Newline codepoints (`\r` and `\n`) will be transformed to spaces when
+ /// parsing.
+ ///
+ /// You probably don't need this, here is what [RFC 7230 Section 3.2.4.] has
+ /// to say about it:
+ ///
+ /// > A server that receives an obs-fold in a request message that is not
+ /// > within a message/http container MUST either reject the message by
+ /// > sending a 400 (Bad Request), preferably with a representation
+ /// > explaining that obsolete line folding is unacceptable, or replace
+ /// > each received obs-fold with one or more SP octets prior to
+ /// > interpreting the field value or forwarding the message downstream.
+ ///
+ /// > A proxy or gateway that receives an obs-fold in a response message
+ /// > that is not within a message/http container MUST either discard the
+ /// > message and replace it with a 502 (Bad Gateway) response, preferably
+ /// > with a representation explaining that unacceptable line folding was
+ /// > received, or replace each received obs-fold with one or more SP
+ /// > octets prior to interpreting the field value or forwarding the
+ /// > message downstream.
+ ///
+ /// > A user agent that receives an obs-fold in a response message that is
+ /// > not within a message/http container MUST replace each received
+ /// > obs-fold with one or more SP octets prior to interpreting the field
+ /// > value.
+ ///
+ /// Default is false.
+ ///
+ /// [RFC 7230 Section 3.2.4.]: https://tools.ietf.org/html/rfc7230#section-3.2.4
+ pub fn allow_obsolete_multiline_headers_in_responses(&mut self, enabled: bool) -> &mut Builder {
+ self.h1_parser_config
+ .allow_obsolete_multiline_headers_in_responses(enabled);
+ self
+ }
+
+ /// Set whether HTTP/1 connections will silently ignored malformed header lines.
+ ///
+ /// If this is enabled and a header line does not start with a valid header
+ /// name, or does not include a colon at all, the line will be silently ignored
+ /// and no error will be reported.
+ ///
+ /// Default is false.
+ pub fn ignore_invalid_headers_in_responses(&mut self, enabled: bool) -> &mut Builder {
+ self.h1_parser_config
+ .ignore_invalid_headers_in_responses(enabled);
+ self
+ }
+
+ /// Set whether HTTP/1 connections should try to use vectored writes,
+ /// or always flatten into a single buffer.
+ ///
+ /// Note that setting this to false may mean more copies of body data,
+ /// but may also improve performance when an IO transport doesn't
+ /// support vectored writes well, such as most TLS implementations.
+ ///
+ /// Setting this to true will force hyper to use queued strategy
+ /// which may eliminate unnecessary cloning on some TLS backends
+ ///
+ /// Default is `auto`. In this mode hyper will try to guess which
+ /// mode to use
+ pub fn writev(&mut self, enabled: bool) -> &mut Builder {
+ self.h1_writev = Some(enabled);
+ self
+ }
+
+ /// Set whether HTTP/1 connections will write header names as title case at
+ /// the socket level.
+ ///
+ /// Default is false.
+ pub fn title_case_headers(&mut self, enabled: bool) -> &mut Builder {
+ self.h1_title_case_headers = enabled;
+ self
+ }
+
+ /// Set whether to support preserving original header cases.
+ ///
+ /// Currently, this will record the original cases received, and store them
+ /// in a private extension on the `Response`. It will also look for and use
+ /// such an extension in any provided `Request`.
+ ///
+ /// Since the relevant extension is still private, there is no way to
+ /// interact with the original cases. The only effect this can have now is
+ /// to forward the cases in a proxy-like fashion.
+ ///
+ /// Default is false.
+ pub fn preserve_header_case(&mut self, enabled: bool) -> &mut Builder {
+ self.h1_preserve_header_case = enabled;
+ self
+ }
+
+ /// Set the maximum number of headers.
+ ///
+ /// When a response is received, the parser will reserve a buffer to store headers for optimal
+ /// performance.
+ ///
+ /// If client receives more headers than the buffer size, the error "message header too large"
+ /// is returned.
+ ///
+ /// Note that headers is allocated on the stack by default, which has higher performance. After
+ /// setting this value, headers will be allocated in heap memory, that is, heap memory
+ /// allocation will occur for each response, and there will be a performance drop of about 5%.
+ ///
+ /// Default is 100.
+ pub fn max_headers(&mut self, val: usize) -> &mut Self {
+ self.h1_max_headers = Some(val);
+ self
+ }
+
+ /// Set whether to support preserving original header order.
+ ///
+ /// Currently, this will record the order in which headers are received, and store this
+ /// ordering in a private extension on the `Response`. It will also look for and use
+ /// such an extension in any provided `Request`.
+ ///
+ /// Default is false.
+ #[cfg(feature = "ffi")]
+ pub fn preserve_header_order(&mut self, enabled: bool) -> &mut Builder {
+ self.h1_preserve_header_order = enabled;
+ self
+ }
+
+ /// Sets the exact size of the read buffer to *always* use.
+ ///
+ /// Note that setting this option unsets the `max_buf_size` option.
+ ///
+ /// Default is an adaptive read buffer.
+ pub fn read_buf_exact_size(&mut self, sz: Option<usize>) -> &mut Builder {
+ self.h1_read_buf_exact_size = sz;
+ self.h1_max_buf_size = None;
+ self
+ }
+
+ /// Set the maximum buffer size for the connection.
+ ///
+ /// Default is ~400kb.
+ ///
+ /// Note that setting this option unsets the `read_exact_buf_size` option.
+ ///
+ /// # Panics
+ ///
+ /// The minimum value allowed is 8192. This method panics if the passed `max` is less than the minimum.
+ pub fn max_buf_size(&mut self, max: usize) -> &mut Self {
+ assert!(
+ max >= proto::h1::MINIMUM_MAX_BUFFER_SIZE,
+ "the max_buf_size cannot be smaller than the minimum that h1 specifies."
+ );
+
+ self.h1_max_buf_size = Some(max);
+ self.h1_read_buf_exact_size = None;
+ self
+ }
+
+ /// Constructs a connection with the configured options and IO.
+ /// See [`client::conn`](crate::client::conn) for more.
+ ///
+ /// Note, if [`Connection`] is not `await`-ed, [`SendRequest`] will
+ /// do nothing.
+ pub fn handshake<T, B>(
+ &self,
+ io: T,
+ ) -> impl Future<Output = crate::Result<(SendRequest<B>, Connection<T, B>)>>
+ where
+ T: Read + Write + Unpin,
+ B: Body + 'static,
+ B::Data: Send,
+ B::Error: Into<Box<dyn StdError + Send + Sync>>,
+ {
+ let opts = self.clone();
+
+ async move {
+ trace!("client handshake HTTP/1");
+
+ let (tx, rx) = dispatch::channel();
+ let mut conn = proto::Conn::new(io);
+ conn.set_h1_parser_config(opts.h1_parser_config);
+ if let Some(writev) = opts.h1_writev {
+ if writev {
+ conn.set_write_strategy_queue();
+ } else {
+ conn.set_write_strategy_flatten();
+ }
+ }
+ if opts.h1_title_case_headers {
+ conn.set_title_case_headers();
+ }
+ if opts.h1_preserve_header_case {
+ conn.set_preserve_header_case();
+ }
+ if let Some(max_headers) = opts.h1_max_headers {
+ conn.set_http1_max_headers(max_headers);
+ }
+ #[cfg(feature = "ffi")]
+ if opts.h1_preserve_header_order {
+ conn.set_preserve_header_order();
+ }
+
+ if opts.h09_responses {
+ conn.set_h09_responses();
+ }
+
+ if let Some(sz) = opts.h1_read_buf_exact_size {
+ conn.set_read_buf_exact_size(sz);
+ }
+ if let Some(max) = opts.h1_max_buf_size {
+ conn.set_max_buf_size(max);
+ }
+ let cd = proto::h1::dispatch::Client::new(rx);
+ let proto = proto::h1::Dispatcher::new(cd, conn);
+
+ Ok((SendRequest { dispatch: tx }, Connection { inner: proto }))
+ }
+ }
+}
+
+mod upgrades {
+ use crate::upgrade::Upgraded;
+
+ use super::*;
+
+ // A future binding a connection with a Service with Upgrade support.
+ //
+ // This type is unnameable outside the crate.
+ #[must_use = "futures do nothing unless polled"]
+ #[allow(missing_debug_implementations)]
+ pub struct UpgradeableConnection<T, B>
+ where
+ T: Read + Write + Unpin + Send + 'static,
+ B: Body + 'static,
+ B::Error: Into<Box<dyn StdError + Send + Sync>>,
+ {
+ pub(super) inner: Option<Connection<T, B>>,
+ }
+
+ impl<I, B> Future for UpgradeableConnection<I, B>
+ where
+ I: Read + Write + Unpin + Send + 'static,
+ B: Body + 'static,
+ B::Data: Send,
+ B::Error: Into<Box<dyn StdError + Send + Sync>>,
+ {
+ type Output = crate::Result<()>;
+
+ fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
+ match ready!(Pin::new(&mut self.inner.as_mut().unwrap().inner).poll(cx)) {
+ Ok(proto::Dispatched::Shutdown) => Poll::Ready(Ok(())),
+ Ok(proto::Dispatched::Upgrade(pending)) => {
+ let Parts { io, read_buf } = self.inner.take().unwrap().into_parts();
+ pending.fulfill(Upgraded::new(io, read_buf));
+ Poll::Ready(Ok(()))
+ }
+ Err(e) => Poll::Ready(Err(e)),
+ }
+ }
+ }
+}
diff --git a/vendor/hyper/src/client/conn/http2.rs b/vendor/hyper/src/client/conn/http2.rs
new file mode 100644
index 00000000..3db28957
--- /dev/null
+++ b/vendor/hyper/src/client/conn/http2.rs
@@ -0,0 +1,718 @@
+//! HTTP/2 client connections
+
+use std::error::Error;
+use std::fmt;
+use std::future::Future;
+use std::marker::PhantomData;
+use std::pin::Pin;
+use std::sync::Arc;
+use std::task::{Context, Poll};
+use std::time::Duration;
+
+use crate::rt::{Read, Write};
+use futures_util::ready;
+use http::{Request, Response};
+
+use super::super::dispatch::{self, TrySendError};
+use crate::body::{Body, Incoming as IncomingBody};
+use crate::common::time::Time;
+use crate::proto;
+use crate::rt::bounds::Http2ClientConnExec;
+use crate::rt::Timer;
+
+/// The sender side of an established connection.
+pub struct SendRequest<B> {
+ dispatch: dispatch::UnboundedSender<Request<B>, Response<IncomingBody>>,
+}
+
+impl<B> Clone for SendRequest<B> {
+ fn clone(&self) -> SendRequest<B> {
+ SendRequest {
+ dispatch: self.dispatch.clone(),
+ }
+ }
+}
+
+/// A future that processes all HTTP state for the IO object.
+///
+/// In most cases, this should just be spawned into an executor, so that it
+/// can process incoming and outgoing messages, notice hangups, and the like.
+///
+/// Instances of this type are typically created via the [`handshake`] function
+#[must_use = "futures do nothing unless polled"]
+pub struct Connection<T, B, E>
+where
+ T: Read + Write + Unpin,
+ B: Body + 'static,
+ E: Http2ClientConnExec<B, T> + Unpin,
+ B::Error: Into<Box<dyn Error + Send + Sync>>,
+{
+ inner: (PhantomData<T>, proto::h2::ClientTask<B, E, T>),
+}
+
+/// A builder to configure an HTTP connection.
+///
+/// After setting options, the builder is used to create a handshake future.
+///
+/// **Note**: The default values of options are *not considered stable*. They
+/// are subject to change at any time.
+#[derive(Clone, Debug)]
+pub struct Builder<Ex> {
+ pub(super) exec: Ex,
+ pub(super) timer: Time,
+ h2_builder: proto::h2::client::Config,
+}
+
+/// Returns a handshake future over some IO.
+///
+/// This is a shortcut for `Builder::new(exec).handshake(io)`.
+/// See [`client::conn`](crate::client::conn) for more.
+pub async fn handshake<E, T, B>(
+ exec: E,
+ io: T,
+) -> crate::Result<(SendRequest<B>, Connection<T, B, E>)>
+where
+ T: Read + Write + Unpin,
+ B: Body + 'static,
+ B::Data: Send,
+ B::Error: Into<Box<dyn Error + Send + Sync>>,
+ E: Http2ClientConnExec<B, T> + Unpin + Clone,
+{
+ Builder::new(exec).handshake(io).await
+}
+
+// ===== impl SendRequest
+
+impl<B> SendRequest<B> {
+ /// Polls to determine whether this sender can be used yet for a request.
+ ///
+ /// If the associated connection is closed, this returns an Error.
+ pub fn poll_ready(&mut self, _cx: &mut Context<'_>) -> Poll<crate::Result<()>> {
+ if self.is_closed() {
+ Poll::Ready(Err(crate::Error::new_closed()))
+ } else {
+ Poll::Ready(Ok(()))
+ }
+ }
+
+ /// Waits until the dispatcher is ready
+ ///
+ /// If the associated connection is closed, this returns an Error.
+ pub async fn ready(&mut self) -> crate::Result<()> {
+ futures_util::future::poll_fn(|cx| self.poll_ready(cx)).await
+ }
+
+ /// Checks if the connection is currently ready to send a request.
+ ///
+ /// # Note
+ ///
+ /// This is mostly a hint. Due to inherent latency of networks, it is
+ /// possible that even after checking this is ready, sending a request
+ /// may still fail because the connection was closed in the meantime.
+ pub fn is_ready(&self) -> bool {
+ self.dispatch.is_ready()
+ }
+
+ /// Checks if the connection side has been closed.
+ pub fn is_closed(&self) -> bool {
+ self.dispatch.is_closed()
+ }
+}
+
+impl<B> SendRequest<B>
+where
+ B: Body + 'static,
+{
+ /// Sends a `Request` on the associated connection.
+ ///
+ /// Returns a future that if successful, yields the `Response`.
+ ///
+ /// `req` must have a `Host` header.
+ ///
+ /// Absolute-form `Uri`s are not required. If received, they will be serialized
+ /// as-is.
+ pub fn send_request(
+ &mut self,
+ req: Request<B>,
+ ) -> impl Future<Output = crate::Result<Response<IncomingBody>>> {
+ let sent = self.dispatch.send(req);
+
+ async move {
+ match sent {
+ Ok(rx) => match rx.await {
+ Ok(Ok(resp)) => Ok(resp),
+ Ok(Err(err)) => Err(err),
+ // this is definite bug if it happens, but it shouldn't happen!
+ Err(_canceled) => panic!("dispatch dropped without returning error"),
+ },
+ Err(_req) => {
+ debug!("connection was not ready");
+
+ Err(crate::Error::new_canceled().with("connection was not ready"))
+ }
+ }
+ }
+ }
+
+ /// Sends a `Request` on the associated connection.
+ ///
+ /// Returns a future that if successful, yields the `Response`.
+ ///
+ /// # Error
+ ///
+ /// If there was an error before trying to serialize the request to the
+ /// connection, the message will be returned as part of this error.
+ pub fn try_send_request(
+ &mut self,
+ req: Request<B>,
+ ) -> impl Future<Output = Result<Response<IncomingBody>, TrySendError<Request<B>>>> {
+ let sent = self.dispatch.try_send(req);
+ async move {
+ match sent {
+ Ok(rx) => match rx.await {
+ Ok(Ok(res)) => Ok(res),
+ Ok(Err(err)) => Err(err),
+ // this is definite bug if it happens, but it shouldn't happen!
+ Err(_) => panic!("dispatch dropped without returning error"),
+ },
+ Err(req) => {
+ debug!("connection was not ready");
+ let error = crate::Error::new_canceled().with("connection was not ready");
+ Err(TrySendError {
+ error,
+ message: Some(req),
+ })
+ }
+ }
+ }
+ }
+}
+
+impl<B> fmt::Debug for SendRequest<B> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_struct("SendRequest").finish()
+ }
+}
+
+// ===== impl Connection
+
+impl<T, B, E> Connection<T, B, E>
+where
+ T: Read + Write + Unpin + 'static,
+ B: Body + Unpin + 'static,
+ B::Data: Send,
+ B::Error: Into<Box<dyn Error + Send + Sync>>,
+ E: Http2ClientConnExec<B, T> + Unpin,
+{
+ /// Returns whether the [extended CONNECT protocol][1] is enabled or not.
+ ///
+ /// This setting is configured by the server peer by sending the
+ /// [`SETTINGS_ENABLE_CONNECT_PROTOCOL` parameter][2] in a `SETTINGS` frame.
+ /// This method returns the currently acknowledged value received from the
+ /// remote.
+ ///
+ /// [1]: https://datatracker.ietf.org/doc/html/rfc8441#section-4
+ /// [2]: https://datatracker.ietf.org/doc/html/rfc8441#section-3
+ pub fn is_extended_connect_protocol_enabled(&self) -> bool {
+ self.inner.1.is_extended_connect_protocol_enabled()
+ }
+}
+
+impl<T, B, E> fmt::Debug for Connection<T, B, E>
+where
+ T: Read + Write + fmt::Debug + 'static + Unpin,
+ B: Body + 'static,
+ E: Http2ClientConnExec<B, T> + Unpin,
+ B::Error: Into<Box<dyn Error + Send + Sync>>,
+{
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_struct("Connection").finish()
+ }
+}
+
+impl<T, B, E> Future for Connection<T, B, E>
+where
+ T: Read + Write + Unpin + 'static,
+ B: Body + 'static + Unpin,
+ B::Data: Send,
+ E: Unpin,
+ B::Error: Into<Box<dyn Error + Send + Sync>>,
+ E: Http2ClientConnExec<B, T> + Unpin,
+{
+ type Output = crate::Result<()>;
+
+ fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
+ match ready!(Pin::new(&mut self.inner.1).poll(cx))? {
+ proto::Dispatched::Shutdown => Poll::Ready(Ok(())),
+ #[cfg(feature = "http1")]
+ proto::Dispatched::Upgrade(_pending) => unreachable!("http2 cannot upgrade"),
+ }
+ }
+}
+
+// ===== impl Builder
+
+impl<Ex> Builder<Ex>
+where
+ Ex: Clone,
+{
+ /// Creates a new connection builder.
+ #[inline]
+ pub fn new(exec: Ex) -> Builder<Ex> {
+ Builder {
+ exec,
+ timer: Time::Empty,
+ h2_builder: Default::default(),
+ }
+ }
+
+ /// Provide a timer to execute background HTTP2 tasks.
+ pub fn timer<M>(&mut self, timer: M) -> &mut Builder<Ex>
+ where
+ M: Timer + Send + Sync + 'static,
+ {
+ self.timer = Time::Timer(Arc::new(timer));
+ self
+ }
+
+ /// Sets the [`SETTINGS_INITIAL_WINDOW_SIZE`][spec] option for HTTP2
+ /// stream-level flow control.
+ ///
+ /// Passing `None` will do nothing.
+ ///
+ /// If not set, hyper will use a default.
+ ///
+ /// [spec]: https://httpwg.org/specs/rfc9113.html#SETTINGS_INITIAL_WINDOW_SIZE
+ pub fn initial_stream_window_size(&mut self, sz: impl Into<Option<u32>>) -> &mut Self {
+ if let Some(sz) = sz.into() {
+ self.h2_builder.adaptive_window = false;
+ self.h2_builder.initial_stream_window_size = sz;
+ }
+ self
+ }
+
+ /// Sets the max connection-level flow control for HTTP2
+ ///
+ /// Passing `None` will do nothing.
+ ///
+ /// If not set, hyper will use a default.
+ pub fn initial_connection_window_size(&mut self, sz: impl Into<Option<u32>>) -> &mut Self {
+ if let Some(sz) = sz.into() {
+ self.h2_builder.adaptive_window = false;
+ self.h2_builder.initial_conn_window_size = sz;
+ }
+ self
+ }
+
+ /// Sets the initial maximum of locally initiated (send) streams.
+ ///
+ /// This value will be overwritten by the value included in the initial
+ /// SETTINGS frame received from the peer as part of a [connection preface].
+ ///
+ /// Passing `None` will do nothing.
+ ///
+ /// If not set, hyper will use a default.
+ ///
+ /// [connection preface]: https://httpwg.org/specs/rfc9113.html#preface
+ pub fn initial_max_send_streams(&mut self, initial: impl Into<Option<usize>>) -> &mut Self {
+ if let Some(initial) = initial.into() {
+ self.h2_builder.initial_max_send_streams = initial;
+ }
+ self
+ }
+
+ /// Sets whether to use an adaptive flow control.
+ ///
+ /// Enabling this will override the limits set in
+ /// `initial_stream_window_size` and
+ /// `initial_connection_window_size`.
+ pub fn adaptive_window(&mut self, enabled: bool) -> &mut Self {
+ use proto::h2::SPEC_WINDOW_SIZE;
+
+ self.h2_builder.adaptive_window = enabled;
+ if enabled {
+ self.h2_builder.initial_conn_window_size = SPEC_WINDOW_SIZE;
+ self.h2_builder.initial_stream_window_size = SPEC_WINDOW_SIZE;
+ }
+ self
+ }
+
+ /// Sets the maximum frame size to use for HTTP2.
+ ///
+ /// Default is currently 16KB, but can change.
+ pub fn max_frame_size(&mut self, sz: impl Into<Option<u32>>) -> &mut Self {
+ self.h2_builder.max_frame_size = sz.into();
+ self
+ }
+
+ /// Sets the max size of received header frames.
+ ///
+ /// Default is currently 16KB, but can change.
+ pub fn max_header_list_size(&mut self, max: u32) -> &mut Self {
+ self.h2_builder.max_header_list_size = max;
+ self
+ }
+
+ /// Sets the header table size.
+ ///
+ /// This setting informs the peer of the maximum size of the header compression
+ /// table used to encode header blocks, in octets. The encoder may select any value
+ /// equal to or less than the header table size specified by the sender.
+ ///
+ /// The default value of crate `h2` is 4,096.
+ pub fn header_table_size(&mut self, size: impl Into<Option<u32>>) -> &mut Self {
+ self.h2_builder.header_table_size = size.into();
+ self
+ }
+
+ /// Sets the maximum number of concurrent streams.
+ ///
+ /// The maximum concurrent streams setting only controls the maximum number
+ /// of streams that can be initiated by the remote peer. In other words,
+ /// when this setting is set to 100, this does not limit the number of
+ /// concurrent streams that can be created by the caller.
+ ///
+ /// It is recommended that this value be no smaller than 100, so as to not
+ /// unnecessarily limit parallelism. However, any value is legal, including
+ /// 0. If `max` is set to 0, then the remote will not be permitted to
+ /// initiate streams.
+ ///
+ /// Note that streams in the reserved state, i.e., push promises that have
+ /// been reserved but the stream has not started, do not count against this
+ /// setting.
+ ///
+ /// Also note that if the remote *does* exceed the value set here, it is not
+ /// a protocol level error. Instead, the `h2` library will immediately reset
+ /// the stream.
+ ///
+ /// See [Section 5.1.2] in the HTTP/2 spec for more details.
+ ///
+ /// [Section 5.1.2]: https://http2.github.io/http2-spec/#rfc.section.5.1.2
+ pub fn max_concurrent_streams(&mut self, max: impl Into<Option<u32>>) -> &mut Self {
+ self.h2_builder.max_concurrent_streams = max.into();
+ self
+ }
+
+ /// Sets an interval for HTTP2 Ping frames should be sent to keep a
+ /// connection alive.
+ ///
+ /// Pass `None` to disable HTTP2 keep-alive.
+ ///
+ /// Default is currently disabled.
+ pub fn keep_alive_interval(&mut self, interval: impl Into<Option<Duration>>) -> &mut Self {
+ self.h2_builder.keep_alive_interval = interval.into();
+ self
+ }
+
+ /// Sets a timeout for receiving an acknowledgement of the keep-alive ping.
+ ///
+ /// If the ping is not acknowledged within the timeout, the connection will
+ /// be closed. Does nothing if `keep_alive_interval` is disabled.
+ ///
+ /// Default is 20 seconds.
+ pub fn keep_alive_timeout(&mut self, timeout: Duration) -> &mut Self {
+ self.h2_builder.keep_alive_timeout = timeout;
+ self
+ }
+
+ /// Sets whether HTTP2 keep-alive should apply while the connection is idle.
+ ///
+ /// If disabled, keep-alive pings are only sent while there are open
+ /// request/responses streams. If enabled, pings are also sent when no
+ /// streams are active. Does nothing if `keep_alive_interval` is
+ /// disabled.
+ ///
+ /// Default is `false`.
+ pub fn keep_alive_while_idle(&mut self, enabled: bool) -> &mut Self {
+ self.h2_builder.keep_alive_while_idle = enabled;
+ self
+ }
+
+ /// Sets the maximum number of HTTP2 concurrent locally reset streams.
+ ///
+ /// See the documentation of [`h2::client::Builder::max_concurrent_reset_streams`] for more
+ /// details.
+ ///
+ /// The default value is determined by the `h2` crate.
+ ///
+ /// [`h2::client::Builder::max_concurrent_reset_streams`]: https://docs.rs/h2/client/struct.Builder.html#method.max_concurrent_reset_streams
+ pub fn max_concurrent_reset_streams(&mut self, max: usize) -> &mut Self {
+ self.h2_builder.max_concurrent_reset_streams = Some(max);
+ self
+ }
+
+ /// Set the maximum write buffer size for each HTTP/2 stream.
+ ///
+ /// Default is currently 1MB, but may change.
+ ///
+ /// # Panics
+ ///
+ /// The value must be no larger than `u32::MAX`.
+ pub fn max_send_buf_size(&mut self, max: usize) -> &mut Self {
+ assert!(max <= u32::MAX as usize);
+ self.h2_builder.max_send_buffer_size = max;
+ self
+ }
+
+ /// Configures the maximum number of pending reset streams allowed before a GOAWAY will be sent.
+ ///
+ /// This will default to the default value set by the [`h2` crate](https://crates.io/crates/h2).
+ /// As of v0.4.0, it is 20.
+ ///
+ /// See <https://github.com/hyperium/hyper/issues/2877> for more information.
+ pub fn max_pending_accept_reset_streams(&mut self, max: impl Into<Option<usize>>) -> &mut Self {
+ self.h2_builder.max_pending_accept_reset_streams = max.into();
+ self
+ }
+
+ /// Constructs a connection with the configured options and IO.
+ /// See [`client::conn`](crate::client::conn) for more.
+ ///
+ /// Note, if [`Connection`] is not `await`-ed, [`SendRequest`] will
+ /// do nothing.
+ pub fn handshake<T, B>(
+ &self,
+ io: T,
+ ) -> impl Future<Output = crate::Result<(SendRequest<B>, Connection<T, B, Ex>)>>
+ where
+ T: Read + Write + Unpin,
+ B: Body + 'static,
+ B::Data: Send,
+ B::Error: Into<Box<dyn Error + Send + Sync>>,
+ Ex: Http2ClientConnExec<B, T> + Unpin,
+ {
+ let opts = self.clone();
+
+ async move {
+ trace!("client handshake HTTP/2");
+
+ let (tx, rx) = dispatch::channel();
+ let h2 = proto::h2::client::handshake(io, rx, &opts.h2_builder, opts.exec, opts.timer)
+ .await?;
+ Ok((
+ SendRequest {
+ dispatch: tx.unbound(),
+ },
+ Connection {
+ inner: (PhantomData, h2),
+ },
+ ))
+ }
+ }
+}
+
+#[cfg(test)]
+mod tests {
+
+ #[tokio::test]
+ #[ignore] // only compilation is checked
+ async fn send_sync_executor_of_non_send_futures() {
+ #[derive(Clone)]
+ struct LocalTokioExecutor;
+
+ impl<F> crate::rt::Executor<F> for LocalTokioExecutor
+ where
+ F: std::future::Future + 'static, // not requiring `Send`
+ {
+ fn execute(&self, fut: F) {
+ // This will spawn into the currently running `LocalSet`.
+ tokio::task::spawn_local(fut);
+ }
+ }
+
+ #[allow(unused)]
+ async fn run(io: impl crate::rt::Read + crate::rt::Write + Unpin + 'static) {
+ let (_sender, conn) = crate::client::conn::http2::handshake::<
+ _,
+ _,
+ http_body_util::Empty<bytes::Bytes>,
+ >(LocalTokioExecutor, io)
+ .await
+ .unwrap();
+
+ tokio::task::spawn_local(async move {
+ conn.await.unwrap();
+ });
+ }
+ }
+
+ #[tokio::test]
+ #[ignore] // only compilation is checked
+ async fn not_send_not_sync_executor_of_not_send_futures() {
+ #[derive(Clone)]
+ struct LocalTokioExecutor {
+ _x: std::marker::PhantomData<std::rc::Rc<()>>,
+ }
+
+ impl<F> crate::rt::Executor<F> for LocalTokioExecutor
+ where
+ F: std::future::Future + 'static, // not requiring `Send`
+ {
+ fn execute(&self, fut: F) {
+ // This will spawn into the currently running `LocalSet`.
+ tokio::task::spawn_local(fut);
+ }
+ }
+
+ #[allow(unused)]
+ async fn run(io: impl crate::rt::Read + crate::rt::Write + Unpin + 'static) {
+ let (_sender, conn) =
+ crate::client::conn::http2::handshake::<_, _, http_body_util::Empty<bytes::Bytes>>(
+ LocalTokioExecutor {
+ _x: Default::default(),
+ },
+ io,
+ )
+ .await
+ .unwrap();
+
+ tokio::task::spawn_local(async move {
+ conn.await.unwrap();
+ });
+ }
+ }
+
+ #[tokio::test]
+ #[ignore] // only compilation is checked
+ async fn send_not_sync_executor_of_not_send_futures() {
+ #[derive(Clone)]
+ struct LocalTokioExecutor {
+ _x: std::marker::PhantomData<std::cell::Cell<()>>,
+ }
+
+ impl<F> crate::rt::Executor<F> for LocalTokioExecutor
+ where
+ F: std::future::Future + 'static, // not requiring `Send`
+ {
+ fn execute(&self, fut: F) {
+ // This will spawn into the currently running `LocalSet`.
+ tokio::task::spawn_local(fut);
+ }
+ }
+
+ #[allow(unused)]
+ async fn run(io: impl crate::rt::Read + crate::rt::Write + Unpin + 'static) {
+ let (_sender, conn) =
+ crate::client::conn::http2::handshake::<_, _, http_body_util::Empty<bytes::Bytes>>(
+ LocalTokioExecutor {
+ _x: Default::default(),
+ },
+ io,
+ )
+ .await
+ .unwrap();
+
+ tokio::task::spawn_local(async move {
+ conn.await.unwrap();
+ });
+ }
+ }
+
+ #[tokio::test]
+ #[ignore] // only compilation is checked
+ async fn send_sync_executor_of_send_futures() {
+ #[derive(Clone)]
+ struct TokioExecutor;
+
+ impl<F> crate::rt::Executor<F> for TokioExecutor
+ where
+ F: std::future::Future + 'static + Send,
+ F::Output: Send + 'static,
+ {
+ fn execute(&self, fut: F) {
+ tokio::task::spawn(fut);
+ }
+ }
+
+ #[allow(unused)]
+ async fn run(io: impl crate::rt::Read + crate::rt::Write + Send + Unpin + 'static) {
+ let (_sender, conn) = crate::client::conn::http2::handshake::<
+ _,
+ _,
+ http_body_util::Empty<bytes::Bytes>,
+ >(TokioExecutor, io)
+ .await
+ .unwrap();
+
+ tokio::task::spawn(async move {
+ conn.await.unwrap();
+ });
+ }
+ }
+
+ #[tokio::test]
+ #[ignore] // only compilation is checked
+ async fn not_send_not_sync_executor_of_send_futures() {
+ #[derive(Clone)]
+ struct TokioExecutor {
+ // !Send, !Sync
+ _x: std::marker::PhantomData<std::rc::Rc<()>>,
+ }
+
+ impl<F> crate::rt::Executor<F> for TokioExecutor
+ where
+ F: std::future::Future + 'static + Send,
+ F::Output: Send + 'static,
+ {
+ fn execute(&self, fut: F) {
+ tokio::task::spawn(fut);
+ }
+ }
+
+ #[allow(unused)]
+ async fn run(io: impl crate::rt::Read + crate::rt::Write + Send + Unpin + 'static) {
+ let (_sender, conn) =
+ crate::client::conn::http2::handshake::<_, _, http_body_util::Empty<bytes::Bytes>>(
+ TokioExecutor {
+ _x: Default::default(),
+ },
+ io,
+ )
+ .await
+ .unwrap();
+
+ tokio::task::spawn_local(async move {
+ // can't use spawn here because when executor is !Send
+ conn.await.unwrap();
+ });
+ }
+ }
+
+ #[tokio::test]
+ #[ignore] // only compilation is checked
+ async fn send_not_sync_executor_of_send_futures() {
+ #[derive(Clone)]
+ struct TokioExecutor {
+ // !Sync
+ _x: std::marker::PhantomData<std::cell::Cell<()>>,
+ }
+
+ impl<F> crate::rt::Executor<F> for TokioExecutor
+ where
+ F: std::future::Future + 'static + Send,
+ F::Output: Send + 'static,
+ {
+ fn execute(&self, fut: F) {
+ tokio::task::spawn(fut);
+ }
+ }
+
+ #[allow(unused)]
+ async fn run(io: impl crate::rt::Read + crate::rt::Write + Send + Unpin + 'static) {
+ let (_sender, conn) =
+ crate::client::conn::http2::handshake::<_, _, http_body_util::Empty<bytes::Bytes>>(
+ TokioExecutor {
+ _x: Default::default(),
+ },
+ io,
+ )
+ .await
+ .unwrap();
+
+ tokio::task::spawn_local(async move {
+ // can't use spawn here because when executor is !Send
+ conn.await.unwrap();
+ });
+ }
+ }
+}
diff --git a/vendor/hyper/src/client/conn/mod.rs b/vendor/hyper/src/client/conn/mod.rs
new file mode 100644
index 00000000..f982ae6d
--- /dev/null
+++ b/vendor/hyper/src/client/conn/mod.rs
@@ -0,0 +1,22 @@
+//! Lower-level client connection API.
+//!
+//! The types in this module are to provide a lower-level API based around a
+//! single connection. Connecting to a host, pooling connections, and the like
+//! are not handled at this level. This module provides the building blocks to
+//! customize those things externally.
+//!
+//! If you are looking for a convenient HTTP client, then you may wish to
+//! consider [reqwest](https://github.com/seanmonstar/reqwest) for a high level
+//! client or [`hyper-util`'s client](https://docs.rs/hyper-util/latest/hyper_util/client/index.html)
+//! if you want to keep it more low level / basic.
+//!
+//! ## Example
+//!
+//! See the [client guide](https://hyper.rs/guides/1/client/basic/).
+
+#[cfg(feature = "http1")]
+pub mod http1;
+#[cfg(feature = "http2")]
+pub mod http2;
+
+pub use super::dispatch::TrySendError;
diff --git a/vendor/hyper/src/client/dispatch.rs b/vendor/hyper/src/client/dispatch.rs
new file mode 100644
index 00000000..4ae41c50
--- /dev/null
+++ b/vendor/hyper/src/client/dispatch.rs
@@ -0,0 +1,510 @@
+use std::task::{Context, Poll};
+#[cfg(feature = "http2")]
+use std::{future::Future, pin::Pin};
+
+#[cfg(feature = "http2")]
+use http::{Request, Response};
+#[cfg(feature = "http2")]
+use http_body::Body;
+#[cfg(feature = "http2")]
+use pin_project_lite::pin_project;
+use tokio::sync::{mpsc, oneshot};
+
+#[cfg(feature = "http2")]
+use crate::{body::Incoming, proto::h2::client::ResponseFutMap};
+
+pub(crate) type RetryPromise<T, U> = oneshot::Receiver<Result<U, TrySendError<T>>>;
+pub(crate) type Promise<T> = oneshot::Receiver<Result<T, crate::Error>>;
+
+/// An error when calling `try_send_request`.
+///
+/// There is a possibility of an error occurring on a connection in-between the
+/// time that a request is queued and when it is actually written to the IO
+/// transport. If that happens, it is safe to return the request back to the
+/// caller, as it was never fully sent.
+#[derive(Debug)]
+pub struct TrySendError<T> {
+ pub(crate) error: crate::Error,
+ pub(crate) message: Option<T>,
+}
+
+pub(crate) fn channel<T, U>() -> (Sender<T, U>, Receiver<T, U>) {
+ let (tx, rx) = mpsc::unbounded_channel();
+ let (giver, taker) = want::new();
+ let tx = Sender {
+ #[cfg(feature = "http1")]
+ buffered_once: false,
+ giver,
+ inner: tx,
+ };
+ let rx = Receiver { inner: rx, taker };
+ (tx, rx)
+}
+
+/// A bounded sender of requests and callbacks for when responses are ready.
+///
+/// While the inner sender is unbounded, the Giver is used to determine
+/// if the Receiver is ready for another request.
+pub(crate) struct Sender<T, U> {
+ /// One message is always allowed, even if the Receiver hasn't asked
+ /// for it yet. This boolean keeps track of whether we've sent one
+ /// without notice.
+ #[cfg(feature = "http1")]
+ buffered_once: bool,
+ /// The Giver helps watch that the Receiver side has been polled
+ /// when the queue is empty. This helps us know when a request and
+ /// response have been fully processed, and a connection is ready
+ /// for more.
+ giver: want::Giver,
+ /// Actually bounded by the Giver, plus `buffered_once`.
+ inner: mpsc::UnboundedSender<Envelope<T, U>>,
+}
+
+/// An unbounded version.
+///
+/// Cannot poll the Giver, but can still use it to determine if the Receiver
+/// has been dropped. However, this version can be cloned.
+#[cfg(feature = "http2")]
+pub(crate) struct UnboundedSender<T, U> {
+ /// Only used for `is_closed`, since mpsc::UnboundedSender cannot be checked.
+ giver: want::SharedGiver,
+ inner: mpsc::UnboundedSender<Envelope<T, U>>,
+}
+
+impl<T, U> Sender<T, U> {
+ #[cfg(feature = "http1")]
+ pub(crate) fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll<crate::Result<()>> {
+ self.giver
+ .poll_want(cx)
+ .map_err(|_| crate::Error::new_closed())
+ }
+
+ #[cfg(feature = "http1")]
+ pub(crate) fn is_ready(&self) -> bool {
+ self.giver.is_wanting()
+ }
+
+ #[cfg(feature = "http1")]
+ pub(crate) fn is_closed(&self) -> bool {
+ self.giver.is_canceled()
+ }
+
+ #[cfg(feature = "http1")]
+ fn can_send(&mut self) -> bool {
+ if self.giver.give() || !self.buffered_once {
+ // If the receiver is ready *now*, then of course we can send.
+ //
+ // If the receiver isn't ready yet, but we don't have anything
+ // in the channel yet, then allow one message.
+ self.buffered_once = true;
+ true
+ } else {
+ false
+ }
+ }
+
+ #[cfg(feature = "http1")]
+ pub(crate) fn try_send(&mut self, val: T) -> Result<RetryPromise<T, U>, T> {
+ if !self.can_send() {
+ return Err(val);
+ }
+ let (tx, rx) = oneshot::channel();
+ self.inner
+ .send(Envelope(Some((val, Callback::Retry(Some(tx))))))
+ .map(move |_| rx)
+ .map_err(|mut e| (e.0).0.take().expect("envelope not dropped").0)
+ }
+
+ #[cfg(feature = "http1")]
+ pub(crate) fn send(&mut self, val: T) -> Result<Promise<U>, T> {
+ if !self.can_send() {
+ return Err(val);
+ }
+ let (tx, rx) = oneshot::channel();
+ self.inner
+ .send(Envelope(Some((val, Callback::NoRetry(Some(tx))))))
+ .map(move |_| rx)
+ .map_err(|mut e| (e.0).0.take().expect("envelope not dropped").0)
+ }
+
+ #[cfg(feature = "http2")]
+ pub(crate) fn unbound(self) -> UnboundedSender<T, U> {
+ UnboundedSender {
+ giver: self.giver.shared(),
+ inner: self.inner,
+ }
+ }
+}
+
+#[cfg(feature = "http2")]
+impl<T, U> UnboundedSender<T, U> {
+ pub(crate) fn is_ready(&self) -> bool {
+ !self.giver.is_canceled()
+ }
+
+ pub(crate) fn is_closed(&self) -> bool {
+ self.giver.is_canceled()
+ }
+
+ pub(crate) fn try_send(&mut self, val: T) -> Result<RetryPromise<T, U>, T> {
+ let (tx, rx) = oneshot::channel();
+ self.inner
+ .send(Envelope(Some((val, Callback::Retry(Some(tx))))))
+ .map(move |_| rx)
+ .map_err(|mut e| (e.0).0.take().expect("envelope not dropped").0)
+ }
+
+ pub(crate) fn send(&mut self, val: T) -> Result<Promise<U>, T> {
+ let (tx, rx) = oneshot::channel();
+ self.inner
+ .send(Envelope(Some((val, Callback::NoRetry(Some(tx))))))
+ .map(move |_| rx)
+ .map_err(|mut e| (e.0).0.take().expect("envelope not dropped").0)
+ }
+}
+
+#[cfg(feature = "http2")]
+impl<T, U> Clone for UnboundedSender<T, U> {
+ fn clone(&self) -> Self {
+ UnboundedSender {
+ giver: self.giver.clone(),
+ inner: self.inner.clone(),
+ }
+ }
+}
+
+pub(crate) struct Receiver<T, U> {
+ inner: mpsc::UnboundedReceiver<Envelope<T, U>>,
+ taker: want::Taker,
+}
+
+impl<T, U> Receiver<T, U> {
+ pub(crate) fn poll_recv(&mut self, cx: &mut Context<'_>) -> Poll<Option<(T, Callback<T, U>)>> {
+ match self.inner.poll_recv(cx) {
+ Poll::Ready(item) => {
+ Poll::Ready(item.map(|mut env| env.0.take().expect("envelope not dropped")))
+ }
+ Poll::Pending => {
+ self.taker.want();
+ Poll::Pending
+ }
+ }
+ }
+
+ #[cfg(feature = "http1")]
+ pub(crate) fn close(&mut self) {
+ self.taker.cancel();
+ self.inner.close();
+ }
+
+ #[cfg(feature = "http1")]
+ pub(crate) fn try_recv(&mut self) -> Option<(T, Callback<T, U>)> {
+ use futures_util::FutureExt;
+ match self.inner.recv().now_or_never() {
+ Some(Some(mut env)) => env.0.take(),
+ _ => None,
+ }
+ }
+}
+
+impl<T, U> Drop for Receiver<T, U> {
+ fn drop(&mut self) {
+ // Notify the giver about the closure first, before dropping
+ // the mpsc::Receiver.
+ self.taker.cancel();
+ }
+}
+
+struct Envelope<T, U>(Option<(T, Callback<T, U>)>);
+
+impl<T, U> Drop for Envelope<T, U> {
+ fn drop(&mut self) {
+ if let Some((val, cb)) = self.0.take() {
+ cb.send(Err(TrySendError {
+ error: crate::Error::new_canceled().with("connection closed"),
+ message: Some(val),
+ }));
+ }
+ }
+}
+
+pub(crate) enum Callback<T, U> {
+ #[allow(unused)]
+ Retry(Option<oneshot::Sender<Result<U, TrySendError<T>>>>),
+ NoRetry(Option<oneshot::Sender<Result<U, crate::Error>>>),
+}
+
+impl<T, U> Drop for Callback<T, U> {
+ fn drop(&mut self) {
+ match self {
+ Callback::Retry(tx) => {
+ if let Some(tx) = tx.take() {
+ let _ = tx.send(Err(TrySendError {
+ error: dispatch_gone(),
+ message: None,
+ }));
+ }
+ }
+ Callback::NoRetry(tx) => {
+ if let Some(tx) = tx.take() {
+ let _ = tx.send(Err(dispatch_gone()));
+ }
+ }
+ }
+ }
+}
+
+#[cold]
+fn dispatch_gone() -> crate::Error {
+ // FIXME(nox): What errors do we want here?
+ crate::Error::new_user_dispatch_gone().with(if std::thread::panicking() {
+ "user code panicked"
+ } else {
+ "runtime dropped the dispatch task"
+ })
+}
+
+impl<T, U> Callback<T, U> {
+ #[cfg(feature = "http2")]
+ pub(crate) fn is_canceled(&self) -> bool {
+ match *self {
+ Callback::Retry(Some(ref tx)) => tx.is_closed(),
+ Callback::NoRetry(Some(ref tx)) => tx.is_closed(),
+ _ => unreachable!(),
+ }
+ }
+
+ pub(crate) fn poll_canceled(&mut self, cx: &mut Context<'_>) -> Poll<()> {
+ match *self {
+ Callback::Retry(Some(ref mut tx)) => tx.poll_closed(cx),
+ Callback::NoRetry(Some(ref mut tx)) => tx.poll_closed(cx),
+ _ => unreachable!(),
+ }
+ }
+
+ pub(crate) fn send(mut self, val: Result<U, TrySendError<T>>) {
+ match self {
+ Callback::Retry(ref mut tx) => {
+ let _ = tx.take().unwrap().send(val);
+ }
+ Callback::NoRetry(ref mut tx) => {
+ let _ = tx.take().unwrap().send(val.map_err(|e| e.error));
+ }
+ }
+ }
+}
+
+impl<T> TrySendError<T> {
+ /// Take the message from this error.
+ ///
+ /// The message will not always have been recovered. If an error occurs
+ /// after the message has been serialized onto the connection, it will not
+ /// be available here.
+ pub fn take_message(&mut self) -> Option<T> {
+ self.message.take()
+ }
+
+ /// Consumes this to return the inner error.
+ pub fn into_error(self) -> crate::Error {
+ self.error
+ }
+}
+
+#[cfg(feature = "http2")]
+pin_project! {
+ pub struct SendWhen<B>
+ where
+ B: Body,
+ B: 'static,
+ {
+ #[pin]
+ pub(crate) when: ResponseFutMap<B>,
+ #[pin]
+ pub(crate) call_back: Option<Callback<Request<B>, Response<Incoming>>>,
+ }
+}
+
+#[cfg(feature = "http2")]
+impl<B> Future for SendWhen<B>
+where
+ B: Body + 'static,
+{
+ type Output = ();
+
+ fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
+ let mut this = self.project();
+
+ let mut call_back = this.call_back.take().expect("polled after complete");
+
+ match Pin::new(&mut this.when).poll(cx) {
+ Poll::Ready(Ok(res)) => {
+ call_back.send(Ok(res));
+ Poll::Ready(())
+ }
+ Poll::Pending => {
+ // check if the callback is canceled
+ match call_back.poll_canceled(cx) {
+ Poll::Ready(v) => v,
+ Poll::Pending => {
+ // Move call_back back to struct before return
+ this.call_back.set(Some(call_back));
+ return Poll::Pending;
+ }
+ };
+ trace!("send_when canceled");
+ Poll::Ready(())
+ }
+ Poll::Ready(Err((error, message))) => {
+ call_back.send(Err(TrySendError { error, message }));
+ Poll::Ready(())
+ }
+ }
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ #[cfg(feature = "nightly")]
+ extern crate test;
+
+ use std::future::Future;
+ use std::pin::Pin;
+ use std::task::{Context, Poll};
+
+ use super::{channel, Callback, Receiver};
+
+ #[derive(Debug)]
+ struct Custom(#[allow(dead_code)] i32);
+
+ impl<T, U> Future for Receiver<T, U> {
+ type Output = Option<(T, Callback<T, U>)>;
+
+ fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
+ self.poll_recv(cx)
+ }
+ }
+
+ /// Helper to check if the future is ready after polling once.
+ struct PollOnce<'a, F>(&'a mut F);
+
+ impl<F, T> Future for PollOnce<'_, F>
+ where
+ F: Future<Output = T> + Unpin,
+ {
+ type Output = Option<()>;
+
+ fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
+ match Pin::new(&mut self.0).poll(cx) {
+ Poll::Ready(_) => Poll::Ready(Some(())),
+ Poll::Pending => Poll::Ready(None),
+ }
+ }
+ }
+
+ #[cfg(not(miri))]
+ #[tokio::test]
+ async fn drop_receiver_sends_cancel_errors() {
+ let _ = pretty_env_logger::try_init();
+
+ let (mut tx, mut rx) = channel::<Custom, ()>();
+
+ // must poll once for try_send to succeed
+ assert!(PollOnce(&mut rx).await.is_none(), "rx empty");
+
+ let promise = tx.try_send(Custom(43)).unwrap();
+ drop(rx);
+
+ let fulfilled = promise.await;
+ let err = fulfilled
+ .expect("fulfilled")
+ .expect_err("promise should error");
+ match (err.error.is_canceled(), err.message) {
+ (true, Some(_)) => (),
+ e => panic!("expected Error::Cancel(_), found {:?}", e),
+ }
+ }
+
+ #[cfg(not(miri))]
+ #[tokio::test]
+ async fn sender_checks_for_want_on_send() {
+ let (mut tx, mut rx) = channel::<Custom, ()>();
+
+ // one is allowed to buffer, second is rejected
+ let _ = tx.try_send(Custom(1)).expect("1 buffered");
+ tx.try_send(Custom(2)).expect_err("2 not ready");
+
+ assert!(PollOnce(&mut rx).await.is_some(), "rx once");
+
+ // Even though 1 has been popped, only 1 could be buffered for the
+ // lifetime of the channel.
+ tx.try_send(Custom(2)).expect_err("2 still not ready");
+
+ assert!(PollOnce(&mut rx).await.is_none(), "rx empty");
+
+ let _ = tx.try_send(Custom(2)).expect("2 ready");
+ }
+
+ #[cfg(feature = "http2")]
+ #[test]
+ fn unbounded_sender_doesnt_bound_on_want() {
+ let (tx, rx) = channel::<Custom, ()>();
+ let mut tx = tx.unbound();
+
+ let _ = tx.try_send(Custom(1)).unwrap();
+ let _ = tx.try_send(Custom(2)).unwrap();
+ let _ = tx.try_send(Custom(3)).unwrap();
+
+ drop(rx);
+
+ let _ = tx.try_send(Custom(4)).unwrap_err();
+ }
+
+ #[cfg(feature = "nightly")]
+ #[bench]
+ fn giver_queue_throughput(b: &mut test::Bencher) {
+ use crate::{body::Incoming, Request, Response};
+
+ let rt = tokio::runtime::Builder::new_current_thread()
+ .build()
+ .unwrap();
+ let (mut tx, mut rx) = channel::<Request<Incoming>, Response<Incoming>>();
+
+ b.iter(move || {
+ let _ = tx.send(Request::new(Incoming::empty())).unwrap();
+ rt.block_on(async {
+ loop {
+ let poll_once = PollOnce(&mut rx);
+ let opt = poll_once.await;
+ if opt.is_none() {
+ break;
+ }
+ }
+ });
+ })
+ }
+
+ #[cfg(feature = "nightly")]
+ #[bench]
+ fn giver_queue_not_ready(b: &mut test::Bencher) {
+ let rt = tokio::runtime::Builder::new_current_thread()
+ .build()
+ .unwrap();
+ let (_tx, mut rx) = channel::<i32, ()>();
+ b.iter(move || {
+ rt.block_on(async {
+ let poll_once = PollOnce(&mut rx);
+ assert!(poll_once.await.is_none());
+ });
+ })
+ }
+
+ #[cfg(feature = "nightly")]
+ #[bench]
+ fn giver_queue_cancel(b: &mut test::Bencher) {
+ let (_tx, mut rx) = channel::<i32, ()>();
+
+ b.iter(move || {
+ rx.taker.cancel();
+ })
+ }
+}
diff --git a/vendor/hyper/src/client/mod.rs b/vendor/hyper/src/client/mod.rs
new file mode 100644
index 00000000..86e38973
--- /dev/null
+++ b/vendor/hyper/src/client/mod.rs
@@ -0,0 +1,22 @@
+//! HTTP Client
+//!
+//! hyper provides HTTP over a single connection. See the [`conn`] module.
+//!
+//! ## Examples
+//!
+//! * [`client`] - A simple CLI http client that requests the url passed in parameters and outputs the response content and details to the stdout, reading content chunk-by-chunk.
+//!
+//! * [`client_json`] - A simple program that GETs some json, reads the body asynchronously, parses it with serde and outputs the result.
+//!
+//! [`client`]: https://github.com/hyperium/hyper/blob/master/examples/client.rs
+//! [`client_json`]: https://github.com/hyperium/hyper/blob/master/examples/client_json.rs
+
+#[cfg(test)]
+mod tests;
+
+cfg_feature! {
+ #![any(feature = "http1", feature = "http2")]
+
+ pub mod conn;
+ pub(super) mod dispatch;
+}
diff --git a/vendor/hyper/src/client/tests.rs b/vendor/hyper/src/client/tests.rs
new file mode 100644
index 00000000..144349e5
--- /dev/null
+++ b/vendor/hyper/src/client/tests.rs
@@ -0,0 +1,261 @@
+/*
+// FIXME: re-implement tests with `async/await`
+#[test]
+fn retryable_request() {
+ let _ = pretty_env_logger::try_init();
+
+ let mut rt = Runtime::new().expect("new rt");
+ let mut connector = MockConnector::new();
+
+ let sock1 = connector.mock("http://mock.local");
+ let sock2 = connector.mock("http://mock.local");
+
+ let client = Client::builder()
+ .build::<_, crate::Body>(connector);
+
+ client.pool.no_timer();
+
+ {
+
+ let req = Request::builder()
+ .uri("http://mock.local/a")
+ .body(Default::default())
+ .unwrap();
+ let res1 = client.request(req);
+ let srv1 = poll_fn(|| {
+ try_ready!(sock1.read(&mut [0u8; 512]));
+ try_ready!(sock1.write(b"HTTP/1.1 200 OK\r\nContent-Length: 0\r\n\r\n"));
+ Ok(Async::Ready(()))
+ }).map_err(|e: std::io::Error| panic!("srv1 poll_fn error: {}", e));
+ rt.block_on(res1.join(srv1)).expect("res1");
+ }
+ drop(sock1);
+
+ let req = Request::builder()
+ .uri("http://mock.local/b")
+ .body(Default::default())
+ .unwrap();
+ let res2 = client.request(req)
+ .map(|res| {
+ assert_eq!(res.status().as_u16(), 222);
+ });
+ let srv2 = poll_fn(|| {
+ try_ready!(sock2.read(&mut [0u8; 512]));
+ try_ready!(sock2.write(b"HTTP/1.1 222 OK\r\nContent-Length: 0\r\n\r\n"));
+ Ok(Async::Ready(()))
+ }).map_err(|e: std::io::Error| panic!("srv2 poll_fn error: {}", e));
+
+ rt.block_on(res2.join(srv2)).expect("res2");
+}
+
+#[test]
+fn conn_reset_after_write() {
+ let _ = pretty_env_logger::try_init();
+
+ let mut rt = Runtime::new().expect("new rt");
+ let mut connector = MockConnector::new();
+
+ let sock1 = connector.mock("http://mock.local");
+
+ let client = Client::builder()
+ .build::<_, crate::Body>(connector);
+
+ client.pool.no_timer();
+
+ {
+ let req = Request::builder()
+ .uri("http://mock.local/a")
+ .body(Default::default())
+ .unwrap();
+ let res1 = client.request(req);
+ let srv1 = poll_fn(|| {
+ try_ready!(sock1.read(&mut [0u8; 512]));
+ try_ready!(sock1.write(b"HTTP/1.1 200 OK\r\nContent-Length: 0\r\n\r\n"));
+ Ok(Async::Ready(()))
+ }).map_err(|e: std::io::Error| panic!("srv1 poll_fn error: {}", e));
+ rt.block_on(res1.join(srv1)).expect("res1");
+ }
+
+ let req = Request::builder()
+ .uri("http://mock.local/a")
+ .body(Default::default())
+ .unwrap();
+ let res2 = client.request(req);
+ let mut sock1 = Some(sock1);
+ let srv2 = poll_fn(|| {
+ // We purposefully keep the socket open until the client
+ // has written the second request, and THEN disconnect.
+ //
+ // Not because we expect servers to be jerks, but to trigger
+ // state where we write on an assumedly good connection, and
+ // only reset the close AFTER we wrote bytes.
+ try_ready!(sock1.as_mut().unwrap().read(&mut [0u8; 512]));
+ sock1.take();
+ Ok(Async::Ready(()))
+ }).map_err(|e: std::io::Error| panic!("srv2 poll_fn error: {}", e));
+ let err = rt.block_on(res2.join(srv2)).expect_err("res2");
+ assert!(err.is_incomplete_message(), "{:?}", err);
+}
+
+#[test]
+fn checkout_win_allows_connect_future_to_be_pooled() {
+ let _ = pretty_env_logger::try_init();
+
+ let mut rt = Runtime::new().expect("new rt");
+ let mut connector = MockConnector::new();
+
+
+ let (tx, rx) = oneshot::channel::<()>();
+ let sock1 = connector.mock("http://mock.local");
+ let sock2 = connector.mock_fut("http://mock.local", rx);
+
+ let client = Client::builder()
+ .build::<_, crate::Body>(connector);
+
+ client.pool.no_timer();
+
+ let uri = "http://mock.local/a".parse::<crate::Uri>().expect("uri parse");
+
+ // First request just sets us up to have a connection able to be put
+ // back in the pool. *However*, it doesn't insert immediately. The
+ // body has 1 pending byte, and we will only drain in request 2, once
+ // the connect future has been started.
+ let mut body = {
+ let res1 = client.get(uri.clone())
+ .map(|res| res.into_body().concat2());
+ let srv1 = poll_fn(|| {
+ try_ready!(sock1.read(&mut [0u8; 512]));
+ // Chunked is used so as to force 2 body reads.
+ try_ready!(sock1.write(b"\
+ HTTP/1.1 200 OK\r\n\
+ transfer-encoding: chunked\r\n\
+ \r\n\
+ 1\r\nx\r\n\
+ 0\r\n\r\n\
+ "));
+ Ok(Async::Ready(()))
+ }).map_err(|e: std::io::Error| panic!("srv1 poll_fn error: {}", e));
+
+ rt.block_on(res1.join(srv1)).expect("res1").0
+ };
+
+
+ // The second request triggers the only mocked connect future, but then
+ // the drained body allows the first socket to go back to the pool,
+ // "winning" the checkout race.
+ {
+ let res2 = client.get(uri.clone());
+ let drain = poll_fn(move || {
+ body.poll()
+ });
+ let srv2 = poll_fn(|| {
+ try_ready!(sock1.read(&mut [0u8; 512]));
+ try_ready!(sock1.write(b"HTTP/1.1 200 OK\r\nConnection: close\r\n\r\nx"));
+ Ok(Async::Ready(()))
+ }).map_err(|e: std::io::Error| panic!("srv2 poll_fn error: {}", e));
+
+ rt.block_on(res2.join(drain).join(srv2)).expect("res2");
+ }
+
+ // "Release" the mocked connect future, and let the runtime spin once so
+ // it's all setup...
+ {
+ let mut tx = Some(tx);
+ let client = &client;
+ let key = client.pool.h1_key("http://mock.local");
+ let mut tick_cnt = 0;
+ let fut = poll_fn(move || {
+ tx.take();
+
+ if client.pool.idle_count(&key) == 0 {
+ tick_cnt += 1;
+ assert!(tick_cnt < 10, "ticked too many times waiting for idle");
+ trace!("no idle yet; tick count: {}", tick_cnt);
+ ::futures::task::current().notify();
+ Ok(Async::NotReady)
+ } else {
+ Ok::<_, ()>(Async::Ready(()))
+ }
+ });
+ rt.block_on(fut).unwrap();
+ }
+
+ // Third request just tests out that the "loser" connection was pooled. If
+ // it isn't, this will panic since the MockConnector doesn't have any more
+ // mocks to give out.
+ {
+ let res3 = client.get(uri);
+ let srv3 = poll_fn(|| {
+ try_ready!(sock2.read(&mut [0u8; 512]));
+ try_ready!(sock2.write(b"HTTP/1.1 200 OK\r\nContent-Length: 0\r\n\r\n"));
+ Ok(Async::Ready(()))
+ }).map_err(|e: std::io::Error| panic!("srv3 poll_fn error: {}", e));
+
+ rt.block_on(res3.join(srv3)).expect("res3");
+ }
+}
+
+#[cfg(feature = "nightly")]
+#[bench]
+fn bench_http1_get_0b(b: &mut test::Bencher) {
+ let _ = pretty_env_logger::try_init();
+
+ let mut rt = Runtime::new().expect("new rt");
+ let mut connector = MockConnector::new();
+
+
+ let client = Client::builder()
+ .build::<_, crate::Body>(connector.clone());
+
+ client.pool.no_timer();
+
+ let uri = Uri::from_static("http://mock.local/a");
+
+ b.iter(move || {
+ let sock1 = connector.mock("http://mock.local");
+ let res1 = client
+ .get(uri.clone())
+ .and_then(|res| {
+ res.into_body().for_each(|_| Ok(()))
+ });
+ let srv1 = poll_fn(|| {
+ try_ready!(sock1.read(&mut [0u8; 512]));
+ try_ready!(sock1.write(b"HTTP/1.1 200 OK\r\nContent-Length: 0\r\n\r\n"));
+ Ok(Async::Ready(()))
+ }).map_err(|e: std::io::Error| panic!("srv1 poll_fn error: {}", e));
+ rt.block_on(res1.join(srv1)).expect("res1");
+ });
+}
+
+#[cfg(feature = "nightly")]
+#[bench]
+fn bench_http1_get_10b(b: &mut test::Bencher) {
+ let _ = pretty_env_logger::try_init();
+
+ let mut rt = Runtime::new().expect("new rt");
+ let mut connector = MockConnector::new();
+
+
+ let client = Client::builder()
+ .build::<_, crate::Body>(connector.clone());
+
+ client.pool.no_timer();
+
+ let uri = Uri::from_static("http://mock.local/a");
+
+ b.iter(move || {
+ let sock1 = connector.mock("http://mock.local");
+ let res1 = client
+ .get(uri.clone())
+ .and_then(|res| {
+ res.into_body().for_each(|_| Ok(()))
+ });
+ let srv1 = poll_fn(|| {
+ try_ready!(sock1.read(&mut [0u8; 512]));
+ try_ready!(sock1.write(b"HTTP/1.1 200 OK\r\nContent-Length: 10\r\n\r\n0123456789"));
+ Ok(Async::Ready(()))
+ }).map_err(|e: std::io::Error| panic!("srv1 poll_fn error: {}", e));
+ rt.block_on(res1.join(srv1)).expect("res1");
+ });
+}
+*/
diff --git a/vendor/hyper/src/common/buf.rs b/vendor/hyper/src/common/buf.rs
new file mode 100644
index 00000000..d0007155
--- /dev/null
+++ b/vendor/hyper/src/common/buf.rs
@@ -0,0 +1,150 @@
+use std::collections::VecDeque;
+use std::io::IoSlice;
+
+use bytes::{Buf, BufMut, Bytes, BytesMut};
+
+pub(crate) struct BufList<T> {
+ bufs: VecDeque<T>,
+}
+
+impl<T: Buf> BufList<T> {
+ pub(crate) fn new() -> BufList<T> {
+ BufList {
+ bufs: VecDeque::new(),
+ }
+ }
+
+ #[inline]
+ pub(crate) fn push(&mut self, buf: T) {
+ debug_assert!(buf.has_remaining());
+ self.bufs.push_back(buf);
+ }
+
+ #[inline]
+ pub(crate) fn bufs_cnt(&self) -> usize {
+ self.bufs.len()
+ }
+}
+
+impl<T: Buf> Buf for BufList<T> {
+ #[inline]
+ fn remaining(&self) -> usize {
+ self.bufs.iter().map(|buf| buf.remaining()).sum()
+ }
+
+ #[inline]
+ fn chunk(&self) -> &[u8] {
+ self.bufs.front().map(Buf::chunk).unwrap_or_default()
+ }
+
+ #[inline]
+ fn advance(&mut self, mut cnt: usize) {
+ while cnt > 0 {
+ {
+ let front = &mut self.bufs[0];
+ let rem = front.remaining();
+ if rem > cnt {
+ front.advance(cnt);
+ return;
+ } else {
+ front.advance(rem);
+ cnt -= rem;
+ }
+ }
+ self.bufs.pop_front();
+ }
+ }
+
+ #[inline]
+ fn chunks_vectored<'t>(&'t self, dst: &mut [IoSlice<'t>]) -> usize {
+ if dst.is_empty() {
+ return 0;
+ }
+ let mut vecs = 0;
+ for buf in &self.bufs {
+ vecs += buf.chunks_vectored(&mut dst[vecs..]);
+ if vecs == dst.len() {
+ break;
+ }
+ }
+ vecs
+ }
+
+ #[inline]
+ fn copy_to_bytes(&mut self, len: usize) -> Bytes {
+ // Our inner buffer may have an optimized version of copy_to_bytes, and if the whole
+ // request can be fulfilled by the front buffer, we can take advantage.
+ match self.bufs.front_mut() {
+ Some(front) if front.remaining() == len => {
+ let b = front.copy_to_bytes(len);
+ self.bufs.pop_front();
+ b
+ }
+ Some(front) if front.remaining() > len => front.copy_to_bytes(len),
+ _ => {
+ assert!(len <= self.remaining(), "`len` greater than remaining");
+ let mut bm = BytesMut::with_capacity(len);
+ bm.put(self.take(len));
+ bm.freeze()
+ }
+ }
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use std::ptr;
+
+ use super::*;
+
+ fn hello_world_buf() -> BufList<Bytes> {
+ BufList {
+ bufs: vec![Bytes::from("Hello"), Bytes::from(" "), Bytes::from("World")].into(),
+ }
+ }
+
+ #[test]
+ fn to_bytes_shorter() {
+ let mut bufs = hello_world_buf();
+ let old_ptr = bufs.chunk().as_ptr();
+ let start = bufs.copy_to_bytes(4);
+ assert_eq!(start, "Hell");
+ assert!(ptr::eq(old_ptr, start.as_ptr()));
+ assert_eq!(bufs.chunk(), b"o");
+ assert!(ptr::eq(old_ptr.wrapping_add(4), bufs.chunk().as_ptr()));
+ assert_eq!(bufs.remaining(), 7);
+ }
+
+ #[test]
+ fn to_bytes_eq() {
+ let mut bufs = hello_world_buf();
+ let old_ptr = bufs.chunk().as_ptr();
+ let start = bufs.copy_to_bytes(5);
+ assert_eq!(start, "Hello");
+ assert!(ptr::eq(old_ptr, start.as_ptr()));
+ assert_eq!(bufs.chunk(), b" ");
+ assert_eq!(bufs.remaining(), 6);
+ }
+
+ #[test]
+ fn to_bytes_longer() {
+ let mut bufs = hello_world_buf();
+ let start = bufs.copy_to_bytes(7);
+ assert_eq!(start, "Hello W");
+ assert_eq!(bufs.remaining(), 4);
+ }
+
+ #[test]
+ fn one_long_buf_to_bytes() {
+ let mut buf = BufList::new();
+ buf.push(b"Hello World" as &[_]);
+ assert_eq!(buf.copy_to_bytes(5), "Hello");
+ assert_eq!(buf.chunk(), b" World");
+ }
+
+ #[test]
+ #[should_panic(expected = "`len` greater than remaining")]
+ fn buf_to_bytes_too_many() {
+ hello_world_buf().copy_to_bytes(42);
+ }
+}
diff --git a/vendor/hyper/src/common/date.rs b/vendor/hyper/src/common/date.rs
new file mode 100644
index 00000000..6eae6746
--- /dev/null
+++ b/vendor/hyper/src/common/date.rs
@@ -0,0 +1,138 @@
+use std::cell::RefCell;
+use std::fmt::{self, Write};
+use std::str;
+use std::time::{Duration, SystemTime};
+
+#[cfg(feature = "http2")]
+use http::header::HeaderValue;
+use httpdate::HttpDate;
+
+// "Sun, 06 Nov 1994 08:49:37 GMT".len()
+pub(crate) const DATE_VALUE_LENGTH: usize = 29;
+
+#[cfg(feature = "http1")]
+pub(crate) fn extend(dst: &mut Vec<u8>) {
+ CACHED.with(|cache| {
+ dst.extend_from_slice(cache.borrow().buffer());
+ })
+}
+
+#[cfg(feature = "http1")]
+pub(crate) fn update() {
+ CACHED.with(|cache| {
+ cache.borrow_mut().check();
+ })
+}
+
+#[cfg(feature = "http2")]
+pub(crate) fn update_and_header_value() -> HeaderValue {
+ CACHED.with(|cache| {
+ let mut cache = cache.borrow_mut();
+ cache.check();
+ cache.header_value.clone()
+ })
+}
+
+struct CachedDate {
+ bytes: [u8; DATE_VALUE_LENGTH],
+ pos: usize,
+ #[cfg(feature = "http2")]
+ header_value: HeaderValue,
+ next_update: SystemTime,
+}
+
+thread_local!(static CACHED: RefCell<CachedDate> = RefCell::new(CachedDate::new()));
+
+impl CachedDate {
+ fn new() -> Self {
+ let mut cache = CachedDate {
+ bytes: [0; DATE_VALUE_LENGTH],
+ pos: 0,
+ #[cfg(feature = "http2")]
+ header_value: HeaderValue::from_static(""),
+ next_update: SystemTime::now(),
+ };
+ cache.update(cache.next_update);
+ cache
+ }
+
+ fn buffer(&self) -> &[u8] {
+ &self.bytes[..]
+ }
+
+ fn check(&mut self) {
+ let now = SystemTime::now();
+ if now > self.next_update {
+ self.update(now);
+ }
+ }
+
+ fn update(&mut self, now: SystemTime) {
+ self.render(now);
+ self.next_update = now + Duration::new(1, 0);
+ }
+
+ fn render(&mut self, now: SystemTime) {
+ self.pos = 0;
+ let _ = write!(self, "{}", HttpDate::from(now));
+ debug_assert!(self.pos == DATE_VALUE_LENGTH);
+ self.render_http2();
+ }
+
+ #[cfg(feature = "http2")]
+ fn render_http2(&mut self) {
+ self.header_value = HeaderValue::from_bytes(self.buffer())
+ .expect("Date format should be valid HeaderValue");
+ }
+
+ #[cfg(not(feature = "http2"))]
+ fn render_http2(&mut self) {}
+}
+
+impl fmt::Write for CachedDate {
+ fn write_str(&mut self, s: &str) -> fmt::Result {
+ let len = s.len();
+ self.bytes[self.pos..self.pos + len].copy_from_slice(s.as_bytes());
+ self.pos += len;
+ Ok(())
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+
+ #[cfg(feature = "nightly")]
+ use test::Bencher;
+
+ #[test]
+ fn test_date_len() {
+ assert_eq!(DATE_VALUE_LENGTH, "Sun, 06 Nov 1994 08:49:37 GMT".len());
+ }
+
+ #[cfg(feature = "nightly")]
+ #[bench]
+ fn bench_date_check(b: &mut Bencher) {
+ let mut date = CachedDate::new();
+ // cache the first update
+ date.check();
+
+ b.iter(|| {
+ date.check();
+ });
+ }
+
+ #[cfg(feature = "nightly")]
+ #[bench]
+ fn bench_date_render(b: &mut Bencher) {
+ let mut date = CachedDate::new();
+ let now = SystemTime::now();
+ date.render(now);
+ b.bytes = date.buffer().len() as u64;
+
+ b.iter(|| {
+ date.render(now);
+ test::black_box(&date);
+ });
+ }
+}
diff --git a/vendor/hyper/src/common/io/compat.rs b/vendor/hyper/src/common/io/compat.rs
new file mode 100644
index 00000000..d026b6d3
--- /dev/null
+++ b/vendor/hyper/src/common/io/compat.rs
@@ -0,0 +1,150 @@
+use std::pin::Pin;
+use std::task::{Context, Poll};
+
+/// This adapts from `hyper` IO traits to the ones in Tokio.
+///
+/// This is currently used by `h2`, and by hyper internal unit tests.
+#[derive(Debug)]
+pub(crate) struct Compat<T>(pub(crate) T);
+
+impl<T> Compat<T> {
+ pub(crate) fn new(io: T) -> Self {
+ Compat(io)
+ }
+
+ fn p(self: Pin<&mut Self>) -> Pin<&mut T> {
+ // SAFETY: The simplest of projections. This is just
+ // a wrapper, we don't do anything that would undo the projection.
+ unsafe { self.map_unchecked_mut(|me| &mut me.0) }
+ }
+}
+
+impl<T> tokio::io::AsyncRead for Compat<T>
+where
+ T: crate::rt::Read,
+{
+ fn poll_read(
+ self: Pin<&mut Self>,
+ cx: &mut Context<'_>,
+ tbuf: &mut tokio::io::ReadBuf<'_>,
+ ) -> Poll<Result<(), std::io::Error>> {
+ let init = tbuf.initialized().len();
+ let filled = tbuf.filled().len();
+ let (new_init, new_filled) = unsafe {
+ let mut buf = crate::rt::ReadBuf::uninit(tbuf.inner_mut());
+ buf.set_init(init);
+ buf.set_filled(filled);
+
+ match crate::rt::Read::poll_read(self.p(), cx, buf.unfilled()) {
+ Poll::Ready(Ok(())) => (buf.init_len(), buf.len()),
+ other => return other,
+ }
+ };
+
+ let n_init = new_init - init;
+ unsafe {
+ tbuf.assume_init(n_init);
+ tbuf.set_filled(new_filled);
+ }
+
+ Poll::Ready(Ok(()))
+ }
+}
+
+impl<T> tokio::io::AsyncWrite for Compat<T>
+where
+ T: crate::rt::Write,
+{
+ fn poll_write(
+ self: Pin<&mut Self>,
+ cx: &mut Context<'_>,
+ buf: &[u8],
+ ) -> Poll<Result<usize, std::io::Error>> {
+ crate::rt::Write::poll_write(self.p(), cx, buf)
+ }
+
+ fn poll_flush(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<(), std::io::Error>> {
+ crate::rt::Write::poll_flush(self.p(), cx)
+ }
+
+ fn poll_shutdown(
+ self: Pin<&mut Self>,
+ cx: &mut Context<'_>,
+ ) -> Poll<Result<(), std::io::Error>> {
+ crate::rt::Write::poll_shutdown(self.p(), cx)
+ }
+
+ fn is_write_vectored(&self) -> bool {
+ crate::rt::Write::is_write_vectored(&self.0)
+ }
+
+ fn poll_write_vectored(
+ self: Pin<&mut Self>,
+ cx: &mut Context<'_>,
+ bufs: &[std::io::IoSlice<'_>],
+ ) -> Poll<Result<usize, std::io::Error>> {
+ crate::rt::Write::poll_write_vectored(self.p(), cx, bufs)
+ }
+}
+
+#[cfg(test)]
+impl<T> crate::rt::Read for Compat<T>
+where
+ T: tokio::io::AsyncRead,
+{
+ fn poll_read(
+ self: Pin<&mut Self>,
+ cx: &mut Context<'_>,
+ mut buf: crate::rt::ReadBufCursor<'_>,
+ ) -> Poll<Result<(), std::io::Error>> {
+ let n = unsafe {
+ let mut tbuf = tokio::io::ReadBuf::uninit(buf.as_mut());
+ match tokio::io::AsyncRead::poll_read(self.p(), cx, &mut tbuf) {
+ Poll::Ready(Ok(())) => tbuf.filled().len(),
+ other => return other,
+ }
+ };
+
+ unsafe {
+ buf.advance(n);
+ }
+ Poll::Ready(Ok(()))
+ }
+}
+
+#[cfg(test)]
+impl<T> crate::rt::Write for Compat<T>
+where
+ T: tokio::io::AsyncWrite,
+{
+ fn poll_write(
+ self: Pin<&mut Self>,
+ cx: &mut Context<'_>,
+ buf: &[u8],
+ ) -> Poll<Result<usize, std::io::Error>> {
+ tokio::io::AsyncWrite::poll_write(self.p(), cx, buf)
+ }
+
+ fn poll_flush(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<(), std::io::Error>> {
+ tokio::io::AsyncWrite::poll_flush(self.p(), cx)
+ }
+
+ fn poll_shutdown(
+ self: Pin<&mut Self>,
+ cx: &mut Context<'_>,
+ ) -> Poll<Result<(), std::io::Error>> {
+ tokio::io::AsyncWrite::poll_shutdown(self.p(), cx)
+ }
+
+ fn is_write_vectored(&self) -> bool {
+ tokio::io::AsyncWrite::is_write_vectored(&self.0)
+ }
+
+ fn poll_write_vectored(
+ self: Pin<&mut Self>,
+ cx: &mut Context<'_>,
+ bufs: &[std::io::IoSlice<'_>],
+ ) -> Poll<Result<usize, std::io::Error>> {
+ tokio::io::AsyncWrite::poll_write_vectored(self.p(), cx, bufs)
+ }
+}
diff --git a/vendor/hyper/src/common/io/mod.rs b/vendor/hyper/src/common/io/mod.rs
new file mode 100644
index 00000000..98c297ca
--- /dev/null
+++ b/vendor/hyper/src/common/io/mod.rs
@@ -0,0 +1,7 @@
+#[cfg(all(any(feature = "client", feature = "server"), feature = "http2"))]
+mod compat;
+mod rewind;
+
+#[cfg(all(any(feature = "client", feature = "server"), feature = "http2"))]
+pub(crate) use self::compat::Compat;
+pub(crate) use self::rewind::Rewind;
diff --git a/vendor/hyper/src/common/io/rewind.rs b/vendor/hyper/src/common/io/rewind.rs
new file mode 100644
index 00000000..c2556f01
--- /dev/null
+++ b/vendor/hyper/src/common/io/rewind.rs
@@ -0,0 +1,160 @@
+use std::pin::Pin;
+use std::task::{Context, Poll};
+use std::{cmp, io};
+
+use bytes::{Buf, Bytes};
+
+use crate::rt::{Read, ReadBufCursor, Write};
+
+/// Combine a buffer with an IO, rewinding reads to use the buffer.
+#[derive(Debug)]
+pub(crate) struct Rewind<T> {
+ pre: Option<Bytes>,
+ inner: T,
+}
+
+impl<T> Rewind<T> {
+ #[cfg(test)]
+ pub(crate) fn new(io: T) -> Self {
+ Rewind {
+ pre: None,
+ inner: io,
+ }
+ }
+
+ pub(crate) fn new_buffered(io: T, buf: Bytes) -> Self {
+ Rewind {
+ pre: Some(buf),
+ inner: io,
+ }
+ }
+
+ #[cfg(test)]
+ pub(crate) fn rewind(&mut self, bs: Bytes) {
+ debug_assert!(self.pre.is_none());
+ self.pre = Some(bs);
+ }
+
+ pub(crate) fn into_inner(self) -> (T, Bytes) {
+ (self.inner, self.pre.unwrap_or_default())
+ }
+
+ // pub(crate) fn get_mut(&mut self) -> &mut T {
+ // &mut self.inner
+ // }
+}
+
+impl<T> Read for Rewind<T>
+where
+ T: Read + Unpin,
+{
+ fn poll_read(
+ mut self: Pin<&mut Self>,
+ cx: &mut Context<'_>,
+ mut buf: ReadBufCursor<'_>,
+ ) -> Poll<io::Result<()>> {
+ if let Some(mut prefix) = self.pre.take() {
+ // If there are no remaining bytes, let the bytes get dropped.
+ if !prefix.is_empty() {
+ let copy_len = cmp::min(prefix.len(), buf.remaining());
+ // TODO: There should be a way to do following two lines cleaner...
+ buf.put_slice(&prefix[..copy_len]);
+ prefix.advance(copy_len);
+ // Put back what's left
+ if !prefix.is_empty() {
+ self.pre = Some(prefix);
+ }
+
+ return Poll::Ready(Ok(()));
+ }
+ }
+ Pin::new(&mut self.inner).poll_read(cx, buf)
+ }
+}
+
+impl<T> Write for Rewind<T>
+where
+ T: Write + Unpin,
+{
+ fn poll_write(
+ mut self: Pin<&mut Self>,
+ cx: &mut Context<'_>,
+ buf: &[u8],
+ ) -> Poll<io::Result<usize>> {
+ Pin::new(&mut self.inner).poll_write(cx, buf)
+ }
+
+ fn poll_write_vectored(
+ mut self: Pin<&mut Self>,
+ cx: &mut Context<'_>,
+ bufs: &[io::IoSlice<'_>],
+ ) -> Poll<io::Result<usize>> {
+ Pin::new(&mut self.inner).poll_write_vectored(cx, bufs)
+ }
+
+ fn poll_flush(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<()>> {
+ Pin::new(&mut self.inner).poll_flush(cx)
+ }
+
+ fn poll_shutdown(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<()>> {
+ Pin::new(&mut self.inner).poll_shutdown(cx)
+ }
+
+ fn is_write_vectored(&self) -> bool {
+ self.inner.is_write_vectored()
+ }
+}
+
+#[cfg(all(
+ any(feature = "client", feature = "server"),
+ any(feature = "http1", feature = "http2"),
+))]
+#[cfg(test)]
+mod tests {
+ use super::super::Compat;
+ use super::Rewind;
+ use bytes::Bytes;
+ use tokio::io::AsyncReadExt;
+
+ #[cfg(not(miri))]
+ #[tokio::test]
+ async fn partial_rewind() {
+ let underlying = [104, 101, 108, 108, 111];
+
+ let mock = tokio_test::io::Builder::new().read(&underlying).build();
+
+ let mut stream = Compat::new(Rewind::new(Compat::new(mock)));
+
+ // Read off some bytes, ensure we filled o1
+ let mut buf = [0; 2];
+ stream.read_exact(&mut buf).await.expect("read1");
+
+ // Rewind the stream so that it is as if we never read in the first place.
+ stream.0.rewind(Bytes::copy_from_slice(&buf[..]));
+
+ let mut buf = [0; 5];
+ stream.read_exact(&mut buf).await.expect("read1");
+
+ // At this point we should have read everything that was in the MockStream
+ assert_eq!(&buf, &underlying);
+ }
+
+ #[cfg(not(miri))]
+ #[tokio::test]
+ async fn full_rewind() {
+ let underlying = [104, 101, 108, 108, 111];
+
+ let mock = tokio_test::io::Builder::new().read(&underlying).build();
+
+ let mut stream = Compat::new(Rewind::new(Compat::new(mock)));
+
+ let mut buf = [0; 5];
+ stream.read_exact(&mut buf).await.expect("read1");
+
+ // Rewind the stream so that it is as if we never read in the first place.
+ stream.0.rewind(Bytes::copy_from_slice(&buf[..]));
+
+ let mut buf = [0; 5];
+ stream.read_exact(&mut buf).await.expect("read1");
+ }
+}
diff --git a/vendor/hyper/src/common/mod.rs b/vendor/hyper/src/common/mod.rs
new file mode 100644
index 00000000..a0c71385
--- /dev/null
+++ b/vendor/hyper/src/common/mod.rs
@@ -0,0 +1,14 @@
+#[cfg(all(any(feature = "client", feature = "server"), feature = "http1"))]
+pub(crate) mod buf;
+#[cfg(all(feature = "server", any(feature = "http1", feature = "http2")))]
+pub(crate) mod date;
+pub(crate) mod io;
+#[cfg(all(any(feature = "client", feature = "server"), feature = "http1"))]
+pub(crate) mod task;
+#[cfg(any(
+ all(feature = "server", feature = "http1"),
+ all(any(feature = "client", feature = "server"), feature = "http2"),
+))]
+pub(crate) mod time;
+#[cfg(all(any(feature = "client", feature = "server"), feature = "http1"))]
+pub(crate) mod watch;
diff --git a/vendor/hyper/src/common/task.rs b/vendor/hyper/src/common/task.rs
new file mode 100644
index 00000000..41671b14
--- /dev/null
+++ b/vendor/hyper/src/common/task.rs
@@ -0,0 +1,9 @@
+use std::task::{Context, Poll};
+
+/// A function to help "yield" a future, such that it is re-scheduled immediately.
+///
+/// Useful for spin counts, so a future doesn't hog too much time.
+pub(crate) fn yield_now(cx: &mut Context<'_>) -> Poll<std::convert::Infallible> {
+ cx.waker().wake_by_ref();
+ Poll::Pending
+}
diff --git a/vendor/hyper/src/common/time.rs b/vendor/hyper/src/common/time.rs
new file mode 100644
index 00000000..a8d3cc9c
--- /dev/null
+++ b/vendor/hyper/src/common/time.rs
@@ -0,0 +1,79 @@
+#[cfg(any(
+ all(any(feature = "client", feature = "server"), feature = "http2"),
+ all(feature = "server", feature = "http1"),
+))]
+use std::time::Duration;
+use std::{fmt, sync::Arc};
+use std::{pin::Pin, time::Instant};
+
+use crate::rt::Sleep;
+use crate::rt::Timer;
+
+/// A user-provided timer to time background tasks.
+#[derive(Clone)]
+pub(crate) enum Time {
+ Timer(Arc<dyn Timer + Send + Sync>),
+ Empty,
+}
+
+#[cfg(all(feature = "server", feature = "http1"))]
+#[derive(Clone, Copy, Debug)]
+pub(crate) enum Dur {
+ Default(Option<Duration>),
+ Configured(Option<Duration>),
+}
+
+impl fmt::Debug for Time {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_struct("Time").finish()
+ }
+}
+
+impl Time {
+ #[cfg(all(any(feature = "client", feature = "server"), feature = "http2"))]
+ pub(crate) fn sleep(&self, duration: Duration) -> Pin<Box<dyn Sleep>> {
+ match *self {
+ Time::Empty => {
+ panic!("You must supply a timer.")
+ }
+ Time::Timer(ref t) => t.sleep(duration),
+ }
+ }
+
+ #[cfg(feature = "http1")]
+ pub(crate) fn sleep_until(&self, deadline: Instant) -> Pin<Box<dyn Sleep>> {
+ match *self {
+ Time::Empty => {
+ panic!("You must supply a timer.")
+ }
+ Time::Timer(ref t) => t.sleep_until(deadline),
+ }
+ }
+
+ pub(crate) fn reset(&self, sleep: &mut Pin<Box<dyn Sleep>>, new_deadline: Instant) {
+ match *self {
+ Time::Empty => {
+ panic!("You must supply a timer.")
+ }
+ Time::Timer(ref t) => t.reset(sleep, new_deadline),
+ }
+ }
+
+ #[cfg(all(feature = "server", feature = "http1"))]
+ pub(crate) fn check(&self, dur: Dur, name: &'static str) -> Option<Duration> {
+ match dur {
+ Dur::Default(Some(dur)) => match self {
+ Time::Empty => {
+ warn!("timeout `{}` has default, but no timer set", name,);
+ None
+ }
+ Time::Timer(..) => Some(dur),
+ },
+ Dur::Configured(Some(dur)) => match self {
+ Time::Empty => panic!("timeout `{}` set, but no timer set", name,),
+ Time::Timer(..) => Some(dur),
+ },
+ Dur::Default(None) | Dur::Configured(None) => None,
+ }
+ }
+}
diff --git a/vendor/hyper/src/common/watch.rs b/vendor/hyper/src/common/watch.rs
new file mode 100644
index 00000000..ba17d551
--- /dev/null
+++ b/vendor/hyper/src/common/watch.rs
@@ -0,0 +1,73 @@
+//! An SPSC broadcast channel.
+//!
+//! - The value can only be a `usize`.
+//! - The consumer is only notified if the value is different.
+//! - The value `0` is reserved for closed.
+
+use futures_util::task::AtomicWaker;
+use std::sync::{
+ atomic::{AtomicUsize, Ordering},
+ Arc,
+};
+use std::task;
+
+type Value = usize;
+
+pub(crate) const CLOSED: usize = 0;
+
+pub(crate) fn channel(initial: Value) -> (Sender, Receiver) {
+ debug_assert!(
+ initial != CLOSED,
+ "watch::channel initial state of 0 is reserved"
+ );
+
+ let shared = Arc::new(Shared {
+ value: AtomicUsize::new(initial),
+ waker: AtomicWaker::new(),
+ });
+
+ (
+ Sender {
+ shared: shared.clone(),
+ },
+ Receiver { shared },
+ )
+}
+
+pub(crate) struct Sender {
+ shared: Arc<Shared>,
+}
+
+pub(crate) struct Receiver {
+ shared: Arc<Shared>,
+}
+
+struct Shared {
+ value: AtomicUsize,
+ waker: AtomicWaker,
+}
+
+impl Sender {
+ pub(crate) fn send(&mut self, value: Value) {
+ if self.shared.value.swap(value, Ordering::SeqCst) != value {
+ self.shared.waker.wake();
+ }
+ }
+}
+
+impl Drop for Sender {
+ fn drop(&mut self) {
+ self.send(CLOSED);
+ }
+}
+
+impl Receiver {
+ pub(crate) fn load(&mut self, cx: &mut task::Context<'_>) -> Value {
+ self.shared.waker.register(cx.waker());
+ self.shared.value.load(Ordering::SeqCst)
+ }
+
+ pub(crate) fn peek(&self) -> Value {
+ self.shared.value.load(Ordering::Relaxed)
+ }
+}
diff --git a/vendor/hyper/src/error.rs b/vendor/hyper/src/error.rs
new file mode 100644
index 00000000..48917db9
--- /dev/null
+++ b/vendor/hyper/src/error.rs
@@ -0,0 +1,658 @@
+//! Error and Result module.
+use std::error::Error as StdError;
+use std::fmt;
+
+/// Result type often returned from methods that can have hyper `Error`s.
+pub type Result<T> = std::result::Result<T, Error>;
+
+type Cause = Box<dyn StdError + Send + Sync>;
+
+/// Represents errors that can occur handling HTTP streams.
+///
+/// # Formatting
+///
+/// The `Display` implementation of this type will only print the details of
+/// this level of error, even though it may have been caused by another error
+/// and contain that error in its source. To print all the relevant
+/// information, including the source chain, using something like
+/// `std::error::Report`, or equivalent 3rd party types.
+///
+/// The contents of the formatted error message of this specific `Error` type
+/// is unspecified. **You must not depend on it.** The wording and details may
+/// change in any version, with the goal of improving error messages.
+///
+/// # Source
+///
+/// A `hyper::Error` may be caused by another error. To aid in debugging,
+/// those are exposed in `Error::source()` as erased types. While it is
+/// possible to check the exact type of the sources, they **can not be depended
+/// on**. They may come from private internal dependencies, and are subject to
+/// change at any moment.
+pub struct Error {
+ inner: Box<ErrorImpl>,
+}
+
+struct ErrorImpl {
+ kind: Kind,
+ cause: Option<Cause>,
+}
+
+#[derive(Debug)]
+pub(super) enum Kind {
+ Parse(Parse),
+ User(User),
+ /// A message reached EOF, but is not complete.
+ #[cfg(all(any(feature = "client", feature = "server"), feature = "http1"))]
+ IncompleteMessage,
+ /// A connection received a message (or bytes) when not waiting for one.
+ #[cfg(all(any(feature = "client", feature = "server"), feature = "http1"))]
+ UnexpectedMessage,
+ /// A pending item was dropped before ever being processed.
+ Canceled,
+ /// Indicates a channel (client or body sender) is closed.
+ #[cfg(any(
+ all(feature = "http1", any(feature = "client", feature = "server")),
+ all(feature = "http2", feature = "client")
+ ))]
+ ChannelClosed,
+ /// An `io::Error` that occurred while trying to read or write to a network stream.
+ #[cfg(all(
+ any(feature = "client", feature = "server"),
+ any(feature = "http1", feature = "http2")
+ ))]
+ Io,
+ /// User took too long to send headers
+ #[cfg(all(feature = "http1", feature = "server"))]
+ HeaderTimeout,
+ /// Error while reading a body from connection.
+ #[cfg(all(
+ any(feature = "client", feature = "server"),
+ any(feature = "http1", feature = "http2")
+ ))]
+ Body,
+ /// Error while writing a body to connection.
+ #[cfg(all(
+ any(feature = "client", feature = "server"),
+ any(feature = "http1", feature = "http2")
+ ))]
+ BodyWrite,
+ /// Error calling AsyncWrite::shutdown()
+ #[cfg(all(any(feature = "client", feature = "server"), feature = "http1"))]
+ Shutdown,
+
+ /// A general error from h2.
+ #[cfg(all(any(feature = "client", feature = "server"), feature = "http2"))]
+ Http2,
+}
+
+#[derive(Debug)]
+pub(super) enum Parse {
+ Method,
+ #[cfg(feature = "http1")]
+ Version,
+ #[cfg(all(any(feature = "client", feature = "server"), feature = "http1"))]
+ VersionH2,
+ Uri,
+ #[cfg(all(feature = "http1", feature = "server"))]
+ UriTooLong,
+ #[cfg(feature = "http1")]
+ Header(Header),
+ #[cfg(any(feature = "http1", feature = "http2"))]
+ #[cfg_attr(feature = "http2", allow(unused))]
+ TooLarge,
+ Status,
+ #[cfg(all(any(feature = "client", feature = "server"), feature = "http1"))]
+ Internal,
+}
+
+#[derive(Debug)]
+#[cfg(feature = "http1")]
+pub(super) enum Header {
+ Token,
+ #[cfg(any(feature = "client", feature = "server"))]
+ ContentLengthInvalid,
+ #[cfg(feature = "server")]
+ TransferEncodingInvalid,
+ #[cfg(any(feature = "client", feature = "server"))]
+ TransferEncodingUnexpected,
+}
+
+#[derive(Debug)]
+pub(super) enum User {
+ /// Error calling user's Body::poll_data().
+ #[cfg(all(
+ any(feature = "client", feature = "server"),
+ any(feature = "http1", feature = "http2")
+ ))]
+ Body,
+ /// The user aborted writing of the outgoing body.
+ #[cfg(any(
+ all(feature = "http1", any(feature = "client", feature = "server")),
+ feature = "ffi"
+ ))]
+ BodyWriteAborted,
+ /// Error from future of user's Service.
+ #[cfg(any(
+ all(any(feature = "client", feature = "server"), feature = "http1"),
+ all(feature = "server", feature = "http2")
+ ))]
+ Service,
+ /// User tried to send a certain header in an unexpected context.
+ ///
+ /// For example, sending both `content-length` and `transfer-encoding`.
+ #[cfg(any(feature = "http1", feature = "http2"))]
+ #[cfg(feature = "server")]
+ UnexpectedHeader,
+ /// User tried to respond with a 1xx (not 101) response code.
+ #[cfg(feature = "http1")]
+ #[cfg(feature = "server")]
+ UnsupportedStatusCode,
+
+ /// User tried polling for an upgrade that doesn't exist.
+ NoUpgrade,
+
+ /// User polled for an upgrade, but low-level API is not using upgrades.
+ #[cfg(all(any(feature = "client", feature = "server"), feature = "http1"))]
+ ManualUpgrade,
+
+ /// The dispatch task is gone.
+ #[cfg(all(feature = "client", any(feature = "http1", feature = "http2")))]
+ DispatchGone,
+
+ /// User aborted in an FFI callback.
+ #[cfg(feature = "ffi")]
+ AbortedByCallback,
+}
+
+// Sentinel type to indicate the error was caused by a timeout.
+#[derive(Debug)]
+pub(super) struct TimedOut;
+
+impl Error {
+ /// Returns true if this was an HTTP parse error.
+ pub fn is_parse(&self) -> bool {
+ matches!(self.inner.kind, Kind::Parse(_))
+ }
+
+ /// Returns true if this was an HTTP parse error caused by a message that was too large.
+ #[cfg(all(feature = "http1", feature = "server"))]
+ pub fn is_parse_too_large(&self) -> bool {
+ matches!(
+ self.inner.kind,
+ Kind::Parse(Parse::TooLarge) | Kind::Parse(Parse::UriTooLong)
+ )
+ }
+
+ /// Returns true if this was an HTTP parse error caused by an invalid response status code or
+ /// reason phrase.
+ pub fn is_parse_status(&self) -> bool {
+ matches!(self.inner.kind, Kind::Parse(Parse::Status))
+ }
+
+ /// Returns true if this error was caused by user code.
+ pub fn is_user(&self) -> bool {
+ matches!(self.inner.kind, Kind::User(_))
+ }
+
+ /// Returns true if this was about a `Request` that was canceled.
+ pub fn is_canceled(&self) -> bool {
+ matches!(self.inner.kind, Kind::Canceled)
+ }
+
+ /// Returns true if a sender's channel is closed.
+ pub fn is_closed(&self) -> bool {
+ #[cfg(not(any(
+ all(feature = "http1", any(feature = "client", feature = "server")),
+ all(feature = "http2", feature = "client")
+ )))]
+ return false;
+
+ #[cfg(any(
+ all(feature = "http1", any(feature = "client", feature = "server")),
+ all(feature = "http2", feature = "client")
+ ))]
+ matches!(self.inner.kind, Kind::ChannelClosed)
+ }
+
+ /// Returns true if the connection closed before a message could complete.
+ pub fn is_incomplete_message(&self) -> bool {
+ #[cfg(not(all(any(feature = "client", feature = "server"), feature = "http1")))]
+ return false;
+
+ #[cfg(all(any(feature = "client", feature = "server"), feature = "http1"))]
+ matches!(self.inner.kind, Kind::IncompleteMessage)
+ }
+
+ /// Returns true if the body write was aborted.
+ pub fn is_body_write_aborted(&self) -> bool {
+ #[cfg(not(any(
+ all(feature = "http1", any(feature = "client", feature = "server")),
+ feature = "ffi"
+ )))]
+ return false;
+
+ #[cfg(any(
+ all(feature = "http1", any(feature = "client", feature = "server")),
+ feature = "ffi"
+ ))]
+ matches!(self.inner.kind, Kind::User(User::BodyWriteAborted))
+ }
+
+ /// Returns true if the error was caused by a timeout.
+ pub fn is_timeout(&self) -> bool {
+ #[cfg(all(feature = "http1", feature = "server"))]
+ if matches!(self.inner.kind, Kind::HeaderTimeout) {
+ return true;
+ }
+ self.find_source::<TimedOut>().is_some()
+ }
+
+ pub(super) fn new(kind: Kind) -> Error {
+ Error {
+ inner: Box::new(ErrorImpl { kind, cause: None }),
+ }
+ }
+
+ pub(super) fn with<C: Into<Cause>>(mut self, cause: C) -> Error {
+ self.inner.cause = Some(cause.into());
+ self
+ }
+
+ #[cfg(any(all(feature = "http1", feature = "server"), feature = "ffi"))]
+ pub(super) fn kind(&self) -> &Kind {
+ &self.inner.kind
+ }
+
+ pub(crate) fn find_source<E: StdError + 'static>(&self) -> Option<&E> {
+ let mut cause = self.source();
+ while let Some(err) = cause {
+ if let Some(typed) = err.downcast_ref() {
+ return Some(typed);
+ }
+ cause = err.source();
+ }
+
+ // else
+ None
+ }
+
+ #[cfg(all(any(feature = "client", feature = "server"), feature = "http2"))]
+ pub(super) fn h2_reason(&self) -> h2::Reason {
+ // Find an h2::Reason somewhere in the cause stack, if it exists,
+ // otherwise assume an INTERNAL_ERROR.
+ self.find_source::<h2::Error>()
+ .and_then(|h2_err| h2_err.reason())
+ .unwrap_or(h2::Reason::INTERNAL_ERROR)
+ }
+
+ pub(super) fn new_canceled() -> Error {
+ Error::new(Kind::Canceled)
+ }
+
+ #[cfg(all(any(feature = "client", feature = "server"), feature = "http1"))]
+ pub(super) fn new_incomplete() -> Error {
+ Error::new(Kind::IncompleteMessage)
+ }
+
+ #[cfg(all(any(feature = "client", feature = "server"), feature = "http1"))]
+ pub(super) fn new_too_large() -> Error {
+ Error::new(Kind::Parse(Parse::TooLarge))
+ }
+
+ #[cfg(all(any(feature = "client", feature = "server"), feature = "http1"))]
+ pub(super) fn new_version_h2() -> Error {
+ Error::new(Kind::Parse(Parse::VersionH2))
+ }
+
+ #[cfg(all(any(feature = "client", feature = "server"), feature = "http1"))]
+ pub(super) fn new_unexpected_message() -> Error {
+ Error::new(Kind::UnexpectedMessage)
+ }
+
+ #[cfg(all(
+ any(feature = "client", feature = "server"),
+ any(feature = "http1", feature = "http2")
+ ))]
+ pub(super) fn new_io(cause: std::io::Error) -> Error {
+ Error::new(Kind::Io).with(cause)
+ }
+
+ #[cfg(any(
+ all(feature = "http1", any(feature = "client", feature = "server")),
+ all(feature = "http2", feature = "client")
+ ))]
+ pub(super) fn new_closed() -> Error {
+ Error::new(Kind::ChannelClosed)
+ }
+
+ #[cfg(all(
+ any(feature = "client", feature = "server"),
+ any(feature = "http1", feature = "http2")
+ ))]
+ pub(super) fn new_body<E: Into<Cause>>(cause: E) -> Error {
+ Error::new(Kind::Body).with(cause)
+ }
+
+ #[cfg(all(
+ any(feature = "client", feature = "server"),
+ any(feature = "http1", feature = "http2")
+ ))]
+ pub(super) fn new_body_write<E: Into<Cause>>(cause: E) -> Error {
+ Error::new(Kind::BodyWrite).with(cause)
+ }
+
+ #[cfg(any(
+ all(feature = "http1", any(feature = "client", feature = "server")),
+ feature = "ffi"
+ ))]
+ pub(super) fn new_body_write_aborted() -> Error {
+ Error::new(Kind::User(User::BodyWriteAborted))
+ }
+
+ fn new_user(user: User) -> Error {
+ Error::new(Kind::User(user))
+ }
+
+ #[cfg(any(feature = "http1", feature = "http2"))]
+ #[cfg(feature = "server")]
+ pub(super) fn new_user_header() -> Error {
+ Error::new_user(User::UnexpectedHeader)
+ }
+
+ #[cfg(all(feature = "http1", feature = "server"))]
+ pub(super) fn new_header_timeout() -> Error {
+ Error::new(Kind::HeaderTimeout)
+ }
+
+ #[cfg(feature = "http1")]
+ #[cfg(feature = "server")]
+ pub(super) fn new_user_unsupported_status_code() -> Error {
+ Error::new_user(User::UnsupportedStatusCode)
+ }
+
+ pub(super) fn new_user_no_upgrade() -> Error {
+ Error::new_user(User::NoUpgrade)
+ }
+
+ #[cfg(all(any(feature = "client", feature = "server"), feature = "http1"))]
+ pub(super) fn new_user_manual_upgrade() -> Error {
+ Error::new_user(User::ManualUpgrade)
+ }
+
+ #[cfg(any(
+ all(any(feature = "client", feature = "server"), feature = "http1"),
+ all(feature = "server", feature = "http2")
+ ))]
+ pub(super) fn new_user_service<E: Into<Cause>>(cause: E) -> Error {
+ Error::new_user(User::Service).with(cause)
+ }
+
+ #[cfg(all(
+ any(feature = "client", feature = "server"),
+ any(feature = "http1", feature = "http2")
+ ))]
+ pub(super) fn new_user_body<E: Into<Cause>>(cause: E) -> Error {
+ Error::new_user(User::Body).with(cause)
+ }
+
+ #[cfg(all(any(feature = "client", feature = "server"), feature = "http1"))]
+ pub(super) fn new_shutdown(cause: std::io::Error) -> Error {
+ Error::new(Kind::Shutdown).with(cause)
+ }
+
+ #[cfg(feature = "ffi")]
+ pub(super) fn new_user_aborted_by_callback() -> Error {
+ Error::new_user(User::AbortedByCallback)
+ }
+
+ #[cfg(all(feature = "client", any(feature = "http1", feature = "http2")))]
+ pub(super) fn new_user_dispatch_gone() -> Error {
+ Error::new(Kind::User(User::DispatchGone))
+ }
+
+ #[cfg(all(any(feature = "client", feature = "server"), feature = "http2"))]
+ pub(super) fn new_h2(cause: ::h2::Error) -> Error {
+ if cause.is_io() {
+ Error::new_io(cause.into_io().expect("h2::Error::is_io"))
+ } else {
+ Error::new(Kind::Http2).with(cause)
+ }
+ }
+
+ fn description(&self) -> &str {
+ match self.inner.kind {
+ Kind::Parse(Parse::Method) => "invalid HTTP method parsed",
+ #[cfg(feature = "http1")]
+ Kind::Parse(Parse::Version) => "invalid HTTP version parsed",
+ #[cfg(all(any(feature = "client", feature = "server"), feature = "http1"))]
+ Kind::Parse(Parse::VersionH2) => "invalid HTTP version parsed (found HTTP2 preface)",
+ Kind::Parse(Parse::Uri) => "invalid URI",
+ #[cfg(all(feature = "http1", feature = "server"))]
+ Kind::Parse(Parse::UriTooLong) => "URI too long",
+ #[cfg(feature = "http1")]
+ Kind::Parse(Parse::Header(Header::Token)) => "invalid HTTP header parsed",
+ #[cfg(all(any(feature = "client", feature = "server"), feature = "http1"))]
+ Kind::Parse(Parse::Header(Header::ContentLengthInvalid)) => {
+ "invalid content-length parsed"
+ }
+ #[cfg(all(feature = "http1", feature = "server"))]
+ Kind::Parse(Parse::Header(Header::TransferEncodingInvalid)) => {
+ "invalid transfer-encoding parsed"
+ }
+ #[cfg(all(feature = "http1", any(feature = "client", feature = "server")))]
+ Kind::Parse(Parse::Header(Header::TransferEncodingUnexpected)) => {
+ "unexpected transfer-encoding parsed"
+ }
+ #[cfg(any(feature = "http1", feature = "http2"))]
+ Kind::Parse(Parse::TooLarge) => "message head is too large",
+ Kind::Parse(Parse::Status) => "invalid HTTP status-code parsed",
+ #[cfg(all(any(feature = "client", feature = "server"), feature = "http1"))]
+ Kind::Parse(Parse::Internal) => {
+ "internal error inside Hyper and/or its dependencies, please report"
+ }
+ #[cfg(all(any(feature = "client", feature = "server"), feature = "http1"))]
+ Kind::IncompleteMessage => "connection closed before message completed",
+ #[cfg(all(any(feature = "client", feature = "server"), feature = "http1"))]
+ Kind::UnexpectedMessage => "received unexpected message from connection",
+ #[cfg(any(
+ all(feature = "http1", any(feature = "client", feature = "server")),
+ all(feature = "http2", feature = "client")
+ ))]
+ Kind::ChannelClosed => "channel closed",
+ Kind::Canceled => "operation was canceled",
+ #[cfg(all(feature = "http1", feature = "server"))]
+ Kind::HeaderTimeout => "read header from client timeout",
+ #[cfg(all(
+ any(feature = "client", feature = "server"),
+ any(feature = "http1", feature = "http2")
+ ))]
+ Kind::Body => "error reading a body from connection",
+ #[cfg(all(
+ any(feature = "client", feature = "server"),
+ any(feature = "http1", feature = "http2")
+ ))]
+ Kind::BodyWrite => "error writing a body to connection",
+ #[cfg(all(any(feature = "client", feature = "server"), feature = "http1"))]
+ Kind::Shutdown => "error shutting down connection",
+ #[cfg(all(any(feature = "client", feature = "server"), feature = "http2"))]
+ Kind::Http2 => "http2 error",
+ #[cfg(all(
+ any(feature = "client", feature = "server"),
+ any(feature = "http1", feature = "http2")
+ ))]
+ Kind::Io => "connection error",
+
+ #[cfg(all(
+ any(feature = "client", feature = "server"),
+ any(feature = "http1", feature = "http2")
+ ))]
+ Kind::User(User::Body) => "error from user's Body stream",
+ #[cfg(any(
+ all(feature = "http1", any(feature = "client", feature = "server")),
+ feature = "ffi"
+ ))]
+ Kind::User(User::BodyWriteAborted) => "user body write aborted",
+ #[cfg(any(
+ all(any(feature = "client", feature = "server"), feature = "http1"),
+ all(feature = "server", feature = "http2")
+ ))]
+ Kind::User(User::Service) => "error from user's Service",
+ #[cfg(any(feature = "http1", feature = "http2"))]
+ #[cfg(feature = "server")]
+ Kind::User(User::UnexpectedHeader) => "user sent unexpected header",
+ #[cfg(feature = "http1")]
+ #[cfg(feature = "server")]
+ Kind::User(User::UnsupportedStatusCode) => {
+ "response has 1xx status code, not supported by server"
+ }
+ Kind::User(User::NoUpgrade) => "no upgrade available",
+ #[cfg(all(any(feature = "client", feature = "server"), feature = "http1"))]
+ Kind::User(User::ManualUpgrade) => "upgrade expected but low level API in use",
+ #[cfg(all(feature = "client", any(feature = "http1", feature = "http2")))]
+ Kind::User(User::DispatchGone) => "dispatch task is gone",
+ #[cfg(feature = "ffi")]
+ Kind::User(User::AbortedByCallback) => "operation aborted by an application callback",
+ }
+ }
+}
+
+impl fmt::Debug for Error {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ let mut f = f.debug_tuple("hyper::Error");
+ f.field(&self.inner.kind);
+ if let Some(ref cause) = self.inner.cause {
+ f.field(cause);
+ }
+ f.finish()
+ }
+}
+
+impl fmt::Display for Error {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.write_str(self.description())
+ }
+}
+
+impl StdError for Error {
+ fn source(&self) -> Option<&(dyn StdError + 'static)> {
+ self.inner
+ .cause
+ .as_ref()
+ .map(|cause| &**cause as &(dyn StdError + 'static))
+ }
+}
+
+#[doc(hidden)]
+impl From<Parse> for Error {
+ fn from(err: Parse) -> Error {
+ Error::new(Kind::Parse(err))
+ }
+}
+
+#[cfg(feature = "http1")]
+impl Parse {
+ #[cfg(any(feature = "client", feature = "server"))]
+ pub(crate) fn content_length_invalid() -> Self {
+ Parse::Header(Header::ContentLengthInvalid)
+ }
+
+ #[cfg(feature = "server")]
+ pub(crate) fn transfer_encoding_invalid() -> Self {
+ Parse::Header(Header::TransferEncodingInvalid)
+ }
+
+ #[cfg(any(feature = "client", feature = "server"))]
+ pub(crate) fn transfer_encoding_unexpected() -> Self {
+ Parse::Header(Header::TransferEncodingUnexpected)
+ }
+}
+
+#[cfg(feature = "http1")]
+impl From<httparse::Error> for Parse {
+ fn from(err: httparse::Error) -> Parse {
+ match err {
+ httparse::Error::HeaderName
+ | httparse::Error::HeaderValue
+ | httparse::Error::NewLine
+ | httparse::Error::Token => Parse::Header(Header::Token),
+ httparse::Error::Status => Parse::Status,
+ httparse::Error::TooManyHeaders => Parse::TooLarge,
+ httparse::Error::Version => Parse::Version,
+ }
+ }
+}
+
+impl From<http::method::InvalidMethod> for Parse {
+ fn from(_: http::method::InvalidMethod) -> Parse {
+ Parse::Method
+ }
+}
+
+impl From<http::status::InvalidStatusCode> for Parse {
+ fn from(_: http::status::InvalidStatusCode) -> Parse {
+ Parse::Status
+ }
+}
+
+impl From<http::uri::InvalidUri> for Parse {
+ fn from(_: http::uri::InvalidUri) -> Parse {
+ Parse::Uri
+ }
+}
+
+impl From<http::uri::InvalidUriParts> for Parse {
+ fn from(_: http::uri::InvalidUriParts) -> Parse {
+ Parse::Uri
+ }
+}
+
+// ===== impl TimedOut ====
+
+impl fmt::Display for TimedOut {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.write_str("operation timed out")
+ }
+}
+
+impl StdError for TimedOut {}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+ use std::mem;
+
+ fn assert_send_sync<T: Send + Sync + 'static>() {}
+
+ #[test]
+ fn error_satisfies_send_sync() {
+ assert_send_sync::<Error>()
+ }
+
+ #[test]
+ fn error_size_of() {
+ assert_eq!(mem::size_of::<Error>(), mem::size_of::<usize>());
+ }
+
+ #[cfg(feature = "http2")]
+ #[test]
+ fn h2_reason_unknown() {
+ let closed = Error::new_closed();
+ assert_eq!(closed.h2_reason(), h2::Reason::INTERNAL_ERROR);
+ }
+
+ #[cfg(feature = "http2")]
+ #[test]
+ fn h2_reason_one_level() {
+ let body_err = Error::new_user_body(h2::Error::from(h2::Reason::ENHANCE_YOUR_CALM));
+ assert_eq!(body_err.h2_reason(), h2::Reason::ENHANCE_YOUR_CALM);
+ }
+
+ #[cfg(feature = "http2")]
+ #[test]
+ fn h2_reason_nested() {
+ let recvd = Error::new_h2(h2::Error::from(h2::Reason::HTTP_1_1_REQUIRED));
+ // Suppose a user were proxying the received error
+ let svc_err = Error::new_user_service(recvd);
+ assert_eq!(svc_err.h2_reason(), h2::Reason::HTTP_1_1_REQUIRED);
+ }
+}
diff --git a/vendor/hyper/src/ext/h1_reason_phrase.rs b/vendor/hyper/src/ext/h1_reason_phrase.rs
new file mode 100644
index 00000000..adb43636
--- /dev/null
+++ b/vendor/hyper/src/ext/h1_reason_phrase.rs
@@ -0,0 +1,221 @@
+use bytes::Bytes;
+
+/// A reason phrase in an HTTP/1 response.
+///
+/// # Clients
+///
+/// For clients, a `ReasonPhrase` will be present in the extensions of the `http::Response` returned
+/// for a request if the reason phrase is different from the canonical reason phrase for the
+/// response's status code. For example, if a server returns `HTTP/1.1 200 Awesome`, the
+/// `ReasonPhrase` will be present and contain `Awesome`, but if a server returns `HTTP/1.1 200 OK`,
+/// the response will not contain a `ReasonPhrase`.
+///
+/// ```no_run
+/// # #[cfg(all(feature = "tcp", feature = "client", feature = "http1"))]
+/// # async fn fake_fetch() -> hyper::Result<()> {
+/// use hyper::{Client, Uri};
+/// use hyper::ext::ReasonPhrase;
+///
+/// let res = Client::new().get(Uri::from_static("http://example.com/non_canonical_reason")).await?;
+///
+/// // Print out the non-canonical reason phrase, if it has one...
+/// if let Some(reason) = res.extensions().get::<ReasonPhrase>() {
+/// println!("non-canonical reason: {}", std::str::from_utf8(reason.as_bytes()).unwrap());
+/// }
+/// # Ok(())
+/// # }
+/// ```
+///
+/// # Servers
+///
+/// When a `ReasonPhrase` is present in the extensions of the `http::Response` written by a server,
+/// its contents will be written in place of the canonical reason phrase when responding via HTTP/1.
+#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)]
+pub struct ReasonPhrase(Bytes);
+
+impl ReasonPhrase {
+ /// Gets the reason phrase as bytes.
+ pub fn as_bytes(&self) -> &[u8] {
+ &self.0
+ }
+
+ /// Converts a static byte slice to a reason phrase.
+ pub const fn from_static(reason: &'static [u8]) -> Self {
+ // TODO: this can be made const once MSRV is >= 1.57.0
+ if find_invalid_byte(reason).is_some() {
+ panic!("invalid byte in static reason phrase");
+ }
+ Self(Bytes::from_static(reason))
+ }
+
+ // Not public on purpose.
+ /// Converts a `Bytes` directly into a `ReasonPhrase` without validating.
+ ///
+ /// Use with care; invalid bytes in a reason phrase can cause serious security problems if
+ /// emitted in a response.
+ #[cfg(feature = "client")]
+ pub(crate) fn from_bytes_unchecked(reason: Bytes) -> Self {
+ Self(reason)
+ }
+}
+
+impl TryFrom<&[u8]> for ReasonPhrase {
+ type Error = InvalidReasonPhrase;
+
+ fn try_from(reason: &[u8]) -> Result<Self, Self::Error> {
+ if let Some(bad_byte) = find_invalid_byte(reason) {
+ Err(InvalidReasonPhrase { bad_byte })
+ } else {
+ Ok(Self(Bytes::copy_from_slice(reason)))
+ }
+ }
+}
+
+impl TryFrom<Vec<u8>> for ReasonPhrase {
+ type Error = InvalidReasonPhrase;
+
+ fn try_from(reason: Vec<u8>) -> Result<Self, Self::Error> {
+ if let Some(bad_byte) = find_invalid_byte(&reason) {
+ Err(InvalidReasonPhrase { bad_byte })
+ } else {
+ Ok(Self(Bytes::from(reason)))
+ }
+ }
+}
+
+impl TryFrom<String> for ReasonPhrase {
+ type Error = InvalidReasonPhrase;
+
+ fn try_from(reason: String) -> Result<Self, Self::Error> {
+ if let Some(bad_byte) = find_invalid_byte(reason.as_bytes()) {
+ Err(InvalidReasonPhrase { bad_byte })
+ } else {
+ Ok(Self(Bytes::from(reason)))
+ }
+ }
+}
+
+impl TryFrom<Bytes> for ReasonPhrase {
+ type Error = InvalidReasonPhrase;
+
+ fn try_from(reason: Bytes) -> Result<Self, Self::Error> {
+ if let Some(bad_byte) = find_invalid_byte(&reason) {
+ Err(InvalidReasonPhrase { bad_byte })
+ } else {
+ Ok(Self(reason))
+ }
+ }
+}
+
+impl From<ReasonPhrase> for Bytes {
+ fn from(reason: ReasonPhrase) -> Self {
+ reason.0
+ }
+}
+
+impl AsRef<[u8]> for ReasonPhrase {
+ fn as_ref(&self) -> &[u8] {
+ &self.0
+ }
+}
+
+/// Error indicating an invalid byte when constructing a `ReasonPhrase`.
+///
+/// See [the spec][spec] for details on allowed bytes.
+///
+/// [spec]: https://httpwg.org/http-core/draft-ietf-httpbis-messaging-latest.html#rfc.section.4.p.7
+#[derive(Debug)]
+pub struct InvalidReasonPhrase {
+ bad_byte: u8,
+}
+
+impl std::fmt::Display for InvalidReasonPhrase {
+ fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
+ write!(f, "Invalid byte in reason phrase: {}", self.bad_byte)
+ }
+}
+
+impl std::error::Error for InvalidReasonPhrase {}
+
+const fn is_valid_byte(b: u8) -> bool {
+ // See https://www.rfc-editor.org/rfc/rfc5234.html#appendix-B.1
+ const fn is_vchar(b: u8) -> bool {
+ 0x21 <= b && b <= 0x7E
+ }
+
+ // See https://httpwg.org/http-core/draft-ietf-httpbis-semantics-latest.html#fields.values
+ //
+ // The 0xFF comparison is technically redundant, but it matches the text of the spec more
+ // clearly and will be optimized away.
+ #[allow(unused_comparisons, clippy::absurd_extreme_comparisons)]
+ const fn is_obs_text(b: u8) -> bool {
+ 0x80 <= b && b <= 0xFF
+ }
+
+ // See https://httpwg.org/http-core/draft-ietf-httpbis-messaging-latest.html#rfc.section.4.p.7
+ b == b'\t' || b == b' ' || is_vchar(b) || is_obs_text(b)
+}
+
+const fn find_invalid_byte(bytes: &[u8]) -> Option<u8> {
+ let mut i = 0;
+ while i < bytes.len() {
+ let b = bytes[i];
+ if !is_valid_byte(b) {
+ return Some(b);
+ }
+ i += 1;
+ }
+ None
+}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+
+ #[test]
+ fn basic_valid() {
+ const PHRASE: &[u8] = b"OK";
+ assert_eq!(ReasonPhrase::from_static(PHRASE).as_bytes(), PHRASE);
+ assert_eq!(ReasonPhrase::try_from(PHRASE).unwrap().as_bytes(), PHRASE);
+ }
+
+ #[test]
+ fn empty_valid() {
+ const PHRASE: &[u8] = b"";
+ assert_eq!(ReasonPhrase::from_static(PHRASE).as_bytes(), PHRASE);
+ assert_eq!(ReasonPhrase::try_from(PHRASE).unwrap().as_bytes(), PHRASE);
+ }
+
+ #[test]
+ fn obs_text_valid() {
+ const PHRASE: &[u8] = b"hyp\xe9r";
+ assert_eq!(ReasonPhrase::from_static(PHRASE).as_bytes(), PHRASE);
+ assert_eq!(ReasonPhrase::try_from(PHRASE).unwrap().as_bytes(), PHRASE);
+ }
+
+ const NEWLINE_PHRASE: &[u8] = b"hyp\ner";
+
+ #[test]
+ #[should_panic]
+ fn newline_invalid_panic() {
+ ReasonPhrase::from_static(NEWLINE_PHRASE);
+ }
+
+ #[test]
+ fn newline_invalid_err() {
+ assert!(ReasonPhrase::try_from(NEWLINE_PHRASE).is_err());
+ }
+
+ const CR_PHRASE: &[u8] = b"hyp\rer";
+
+ #[test]
+ #[should_panic]
+ fn cr_invalid_panic() {
+ ReasonPhrase::from_static(CR_PHRASE);
+ }
+
+ #[test]
+ fn cr_invalid_err() {
+ assert!(ReasonPhrase::try_from(CR_PHRASE).is_err());
+ }
+}
diff --git a/vendor/hyper/src/ext/informational.rs b/vendor/hyper/src/ext/informational.rs
new file mode 100644
index 00000000..e728580f
--- /dev/null
+++ b/vendor/hyper/src/ext/informational.rs
@@ -0,0 +1,86 @@
+use std::sync::Arc;
+
+#[derive(Clone)]
+pub(crate) struct OnInformational(Arc<dyn OnInformationalCallback + Send + Sync>);
+
+/// Add a callback for 1xx informational responses.
+///
+/// # Example
+///
+/// ```
+/// # let some_body = ();
+/// let mut req = hyper::Request::new(some_body);
+///
+/// hyper::ext::on_informational(&mut req, |res| {
+/// println!("informational: {:?}", res.status());
+/// });
+///
+/// // send request on a client connection...
+/// ```
+pub fn on_informational<B, F>(req: &mut http::Request<B>, callback: F)
+where
+ F: Fn(Response<'_>) + Send + Sync + 'static,
+{
+ on_informational_raw(req, OnInformationalClosure(callback));
+}
+
+pub(crate) fn on_informational_raw<B, C>(req: &mut http::Request<B>, callback: C)
+where
+ C: OnInformationalCallback + Send + Sync + 'static,
+{
+ req.extensions_mut()
+ .insert(OnInformational(Arc::new(callback)));
+}
+
+// Sealed, not actually nameable bounds
+pub(crate) trait OnInformationalCallback {
+ fn on_informational(&self, res: http::Response<()>);
+}
+
+impl OnInformational {
+ pub(crate) fn call(&self, res: http::Response<()>) {
+ self.0.on_informational(res);
+ }
+}
+
+struct OnInformationalClosure<F>(F);
+
+impl<F> OnInformationalCallback for OnInformationalClosure<F>
+where
+ F: Fn(Response<'_>) + Send + Sync + 'static,
+{
+ fn on_informational(&self, res: http::Response<()>) {
+ let res = Response(&res);
+ (self.0)(res);
+ }
+}
+
+// A facade over http::Response.
+//
+// It purposefully hides being able to move the response out of the closure,
+// while also not being able to expect it to be a reference `&Response`.
+// (Otherwise, a closure can be written as `|res: &_|`, and then be broken if
+// we make the closure take ownership.)
+//
+// With the type not being nameable, we could change from being a facade to
+// being either a real reference, or moving the http::Response into the closure,
+// in a backwards-compatible change in the future.
+#[derive(Debug)]
+pub struct Response<'a>(&'a http::Response<()>);
+
+impl Response<'_> {
+ #[inline]
+ pub fn status(&self) -> http::StatusCode {
+ self.0.status()
+ }
+
+ #[inline]
+ pub fn version(&self) -> http::Version {
+ self.0.version()
+ }
+
+ #[inline]
+ pub fn headers(&self) -> &http::HeaderMap {
+ self.0.headers()
+ }
+}
diff --git a/vendor/hyper/src/ext/mod.rs b/vendor/hyper/src/ext/mod.rs
new file mode 100644
index 00000000..da28da64
--- /dev/null
+++ b/vendor/hyper/src/ext/mod.rs
@@ -0,0 +1,246 @@
+//! HTTP extensions.
+
+#[cfg(all(any(feature = "client", feature = "server"), feature = "http1"))]
+use bytes::Bytes;
+#[cfg(any(
+ all(any(feature = "client", feature = "server"), feature = "http1"),
+ feature = "ffi"
+))]
+use http::header::HeaderName;
+#[cfg(all(any(feature = "client", feature = "server"), feature = "http1"))]
+use http::header::{HeaderMap, IntoHeaderName, ValueIter};
+#[cfg(feature = "ffi")]
+use std::collections::HashMap;
+#[cfg(feature = "http2")]
+use std::fmt;
+
+#[cfg(any(feature = "http1", feature = "ffi"))]
+mod h1_reason_phrase;
+#[cfg(any(feature = "http1", feature = "ffi"))]
+pub use h1_reason_phrase::ReasonPhrase;
+
+#[cfg(all(feature = "http1", feature = "client"))]
+mod informational;
+#[cfg(all(feature = "http1", feature = "client"))]
+pub use informational::on_informational;
+#[cfg(all(feature = "http1", feature = "client"))]
+pub(crate) use informational::OnInformational;
+#[cfg(all(feature = "http1", feature = "client", feature = "ffi"))]
+pub(crate) use informational::{on_informational_raw, OnInformationalCallback};
+
+#[cfg(feature = "http2")]
+/// Represents the `:protocol` pseudo-header used by
+/// the [Extended CONNECT Protocol].
+///
+/// [Extended CONNECT Protocol]: https://datatracker.ietf.org/doc/html/rfc8441#section-4
+#[derive(Clone, Eq, PartialEq)]
+pub struct Protocol {
+ inner: h2::ext::Protocol,
+}
+
+#[cfg(feature = "http2")]
+impl Protocol {
+ /// Converts a static string to a protocol name.
+ pub const fn from_static(value: &'static str) -> Self {
+ Self {
+ inner: h2::ext::Protocol::from_static(value),
+ }
+ }
+
+ /// Returns a str representation of the header.
+ pub fn as_str(&self) -> &str {
+ self.inner.as_str()
+ }
+
+ #[cfg(feature = "server")]
+ pub(crate) fn from_inner(inner: h2::ext::Protocol) -> Self {
+ Self { inner }
+ }
+
+ #[cfg(all(feature = "client", feature = "http2"))]
+ pub(crate) fn into_inner(self) -> h2::ext::Protocol {
+ self.inner
+ }
+}
+
+#[cfg(feature = "http2")]
+impl<'a> From<&'a str> for Protocol {
+ fn from(value: &'a str) -> Self {
+ Self {
+ inner: h2::ext::Protocol::from(value),
+ }
+ }
+}
+
+#[cfg(feature = "http2")]
+impl AsRef<[u8]> for Protocol {
+ fn as_ref(&self) -> &[u8] {
+ self.inner.as_ref()
+ }
+}
+
+#[cfg(feature = "http2")]
+impl fmt::Debug for Protocol {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ self.inner.fmt(f)
+ }
+}
+
+/// A map from header names to their original casing as received in an HTTP message.
+///
+/// If an HTTP/1 response `res` is parsed on a connection whose option
+/// [`preserve_header_case`] was set to true and the response included
+/// the following headers:
+///
+/// ```ignore
+/// x-Bread: Baguette
+/// X-BREAD: Pain
+/// x-bread: Ficelle
+/// ```
+///
+/// Then `res.extensions().get::<HeaderCaseMap>()` will return a map with:
+///
+/// ```ignore
+/// HeaderCaseMap({
+/// "x-bread": ["x-Bread", "X-BREAD", "x-bread"],
+/// })
+/// ```
+///
+/// [`preserve_header_case`]: /client/struct.Client.html#method.preserve_header_case
+#[cfg(all(any(feature = "client", feature = "server"), feature = "http1"))]
+#[derive(Clone, Debug)]
+pub(crate) struct HeaderCaseMap(HeaderMap<Bytes>);
+
+#[cfg(all(any(feature = "client", feature = "server"), feature = "http1"))]
+impl HeaderCaseMap {
+ /// Returns a view of all spellings associated with that header name,
+ /// in the order they were found.
+ #[cfg(feature = "client")]
+ pub(crate) fn get_all<'a>(
+ &'a self,
+ name: &HeaderName,
+ ) -> impl Iterator<Item = impl AsRef<[u8]> + 'a> + 'a {
+ self.get_all_internal(name)
+ }
+
+ /// Returns a view of all spellings associated with that header name,
+ /// in the order they were found.
+ #[cfg(any(feature = "client", feature = "server"))]
+ pub(crate) fn get_all_internal(&self, name: &HeaderName) -> ValueIter<'_, Bytes> {
+ self.0.get_all(name).into_iter()
+ }
+
+ #[cfg(any(feature = "client", feature = "server"))]
+ pub(crate) fn default() -> Self {
+ Self(Default::default())
+ }
+
+ #[cfg(any(test, feature = "ffi"))]
+ pub(crate) fn insert(&mut self, name: HeaderName, orig: Bytes) {
+ self.0.insert(name, orig);
+ }
+
+ #[cfg(any(feature = "client", feature = "server"))]
+ pub(crate) fn append<N>(&mut self, name: N, orig: Bytes)
+ where
+ N: IntoHeaderName,
+ {
+ self.0.append(name, orig);
+ }
+}
+
+#[cfg(feature = "ffi")]
+#[derive(Clone, Debug)]
+/// Hashmap<Headername, numheaders with that name>
+pub(crate) struct OriginalHeaderOrder {
+ /// Stores how many entries a Headername maps to. This is used
+ /// for accounting.
+ num_entries: HashMap<HeaderName, usize>,
+ /// Stores the ordering of the headers. ex: `vec[i] = (headerName, idx)`,
+ /// The vector is ordered such that the ith element
+ /// represents the ith header that came in off the line.
+ /// The `HeaderName` and `idx` are then used elsewhere to index into
+ /// the multi map that stores the header values.
+ entry_order: Vec<(HeaderName, usize)>,
+}
+
+#[cfg(all(feature = "http1", feature = "ffi"))]
+impl OriginalHeaderOrder {
+ pub(crate) fn default() -> Self {
+ OriginalHeaderOrder {
+ num_entries: HashMap::new(),
+ entry_order: Vec::new(),
+ }
+ }
+
+ pub(crate) fn insert(&mut self, name: HeaderName) {
+ if !self.num_entries.contains_key(&name) {
+ let idx = 0;
+ self.num_entries.insert(name.clone(), 1);
+ self.entry_order.push((name, idx));
+ }
+ // Replacing an already existing element does not
+ // change ordering, so we only care if its the first
+ // header name encountered
+ }
+
+ pub(crate) fn append<N>(&mut self, name: N)
+ where
+ N: IntoHeaderName + Into<HeaderName> + Clone,
+ {
+ let name: HeaderName = name.into();
+ let idx;
+ if self.num_entries.contains_key(&name) {
+ idx = self.num_entries[&name];
+ *self.num_entries.get_mut(&name).unwrap() += 1;
+ } else {
+ idx = 0;
+ self.num_entries.insert(name.clone(), 1);
+ }
+ self.entry_order.push((name, idx));
+ }
+
+ // No doc test is run here because `RUSTFLAGS='--cfg hyper_unstable_ffi'`
+ // is needed to compile. Once ffi is stabilized `no_run` should be removed
+ // here.
+ /// This returns an iterator that provides header names and indexes
+ /// in the original order received.
+ ///
+ /// # Examples
+ /// ```no_run
+ /// use hyper::ext::OriginalHeaderOrder;
+ /// use hyper::header::{HeaderName, HeaderValue, HeaderMap};
+ ///
+ /// let mut h_order = OriginalHeaderOrder::default();
+ /// let mut h_map = Headermap::new();
+ ///
+ /// let name1 = b"Set-CookiE";
+ /// let value1 = b"a=b";
+ /// h_map.append(name1);
+ /// h_order.append(name1);
+ ///
+ /// let name2 = b"Content-Encoding";
+ /// let value2 = b"gzip";
+ /// h_map.append(name2, value2);
+ /// h_order.append(name2);
+ ///
+ /// let name3 = b"SET-COOKIE";
+ /// let value3 = b"c=d";
+ /// h_map.append(name3, value3);
+ /// h_order.append(name3)
+ ///
+ /// let mut iter = h_order.get_in_order()
+ ///
+ /// let (name, idx) = iter.next();
+ /// assert_eq!(b"a=b", h_map.get_all(name).nth(idx).unwrap());
+ ///
+ /// let (name, idx) = iter.next();
+ /// assert_eq!(b"gzip", h_map.get_all(name).nth(idx).unwrap());
+ ///
+ /// let (name, idx) = iter.next();
+ /// assert_eq!(b"c=d", h_map.get_all(name).nth(idx).unwrap());
+ /// ```
+ pub(crate) fn get_in_order(&self) -> impl Iterator<Item = &(HeaderName, usize)> {
+ self.entry_order.iter()
+ }
+}
diff --git a/vendor/hyper/src/ffi/body.rs b/vendor/hyper/src/ffi/body.rs
new file mode 100644
index 00000000..e5a09e57
--- /dev/null
+++ b/vendor/hyper/src/ffi/body.rs
@@ -0,0 +1,302 @@
+use std::ffi::{c_int, c_void};
+use std::mem::ManuallyDrop;
+use std::ptr;
+use std::task::{Context, Poll};
+
+use http_body_util::BodyExt as _;
+
+use super::task::{hyper_context, hyper_task, hyper_task_return_type, AsTaskType};
+use super::{UserDataPointer, HYPER_ITER_CONTINUE};
+use crate::body::{Bytes, Frame, Incoming as IncomingBody};
+use crate::ffi::size_t;
+
+/// A streaming HTTP body.
+///
+/// This is used both for sending requests (with `hyper_request_set_body`) and
+/// for receiving responses (with `hyper_response_body`).
+///
+/// For outgoing request bodies, call `hyper_body_set_data_func` to provide the
+/// data.
+///
+/// For incoming response bodies, call `hyper_body_data` to get a task that will
+/// yield a chunk of data each time it is polled. That task must be then be
+/// added to the executor with `hyper_executor_push`.
+///
+/// Methods:
+///
+/// - hyper_body_new: Create a new “empty” body.
+/// - hyper_body_set_userdata: Set userdata on this body, which will be passed to callback functions.
+/// - hyper_body_set_data_func: Set the data callback for this body.
+/// - hyper_body_data: Creates a task that will poll a response body for the next buffer of data.
+/// - hyper_body_foreach: Creates a task to execute the callback with each body chunk received.
+/// - hyper_body_free: Free a body.
+pub struct hyper_body(pub(super) IncomingBody);
+
+/// A buffer of bytes that is sent or received on a `hyper_body`.
+///
+/// Obtain one of these in the callback of `hyper_body_foreach` or by receiving
+/// a task of type `HYPER_TASK_BUF` from `hyper_executor_poll` (after calling
+/// `hyper_body_data` and pushing the resulting task).
+///
+/// Methods:
+///
+/// - hyper_buf_bytes: Get a pointer to the bytes in this buffer.
+/// - hyper_buf_copy: Create a new hyper_buf * by copying the provided bytes.
+/// - hyper_buf_free: Free this buffer.
+/// - hyper_buf_len: Get the length of the bytes this buffer contains.
+pub struct hyper_buf(pub(crate) Bytes);
+
+pub(crate) struct UserBody {
+ data_func: hyper_body_data_callback,
+ userdata: *mut c_void,
+}
+
+// ===== Body =====
+
+type hyper_body_foreach_callback = extern "C" fn(*mut c_void, *const hyper_buf) -> c_int;
+
+type hyper_body_data_callback =
+ extern "C" fn(*mut c_void, *mut hyper_context<'_>, *mut *mut hyper_buf) -> c_int;
+
+ffi_fn! {
+ /// Creates a new "empty" body.
+ ///
+ /// If not configured, this body acts as an empty payload.
+ ///
+ /// To avoid a memory leak, the body must eventually be consumed by
+ /// `hyper_body_free`, `hyper_body_foreach`, or `hyper_request_set_body`.
+ fn hyper_body_new() -> *mut hyper_body {
+ Box::into_raw(Box::new(hyper_body(IncomingBody::ffi())))
+ } ?= ptr::null_mut()
+}
+
+ffi_fn! {
+ /// Free a body.
+ ///
+ /// This should only be used if the request isn't consumed by
+ /// `hyper_body_foreach` or `hyper_request_set_body`.
+ fn hyper_body_free(body: *mut hyper_body) {
+ drop(non_null!(Box::from_raw(body) ?= ()));
+ }
+}
+
+ffi_fn! {
+ /// Creates a task that will poll a response body for the next buffer of data.
+ ///
+ /// The task may have different types depending on the outcome:
+ ///
+ /// - `HYPER_TASK_BUF`: Success, and more data was received.
+ /// - `HYPER_TASK_ERROR`: An error retrieving the data.
+ /// - `HYPER_TASK_EMPTY`: The body has finished streaming data.
+ ///
+ /// When the application receives the task from `hyper_executor_poll`,
+ /// if the task type is `HYPER_TASK_BUF`, it should cast the task to
+ /// `hyper_buf *` and consume all the bytes in the buffer. Then
+ /// the application should call `hyper_body_data` again for the same
+ /// `hyper_body *`, to create a task for the next buffer of data.
+ /// Repeat until the polled task type is `HYPER_TASK_ERROR` or
+ /// `HYPER_TASK_EMPTY`.
+ ///
+ /// To avoid a memory leak, the task must eventually be consumed by
+ /// `hyper_task_free`, or taken ownership of by `hyper_executor_push`
+ /// without subsequently being given back by `hyper_executor_poll`.
+ ///
+ /// This does not consume the `hyper_body *`, so it may be used again.
+ /// However, the `hyper_body *` MUST NOT be used or freed until the
+ /// related task is returned from `hyper_executor_poll`.
+ ///
+ /// For a more convenient method, see also `hyper_body_foreach`.
+ fn hyper_body_data(body: *mut hyper_body) -> *mut hyper_task {
+ // This doesn't take ownership of the Body, so don't allow destructor
+ let mut body = ManuallyDrop::new(non_null!(Box::from_raw(body) ?= ptr::null_mut()));
+
+ Box::into_raw(hyper_task::boxed(async move {
+ loop {
+ match body.0.frame().await {
+ Some(Ok(frame)) => {
+ if let Ok(data) = frame.into_data() {
+ return Ok(Some(hyper_buf(data)));
+ } else {
+ continue;
+ }
+ },
+ Some(Err(e)) => return Err(e),
+ None => return Ok(None),
+ }
+ }
+ }))
+ } ?= ptr::null_mut()
+}
+
+ffi_fn! {
+ /// Creates a task to execute the callback with each body chunk received.
+ ///
+ /// To avoid a memory leak, the task must eventually be consumed by
+ /// `hyper_task_free`, or taken ownership of by `hyper_executor_push`
+ /// without subsequently being given back by `hyper_executor_poll`.
+ ///
+ /// The `hyper_buf` pointer is only a borrowed reference. It cannot live outside
+ /// the execution of the callback. You must make a copy of the bytes to retain them.
+ ///
+ /// The callback should return `HYPER_ITER_CONTINUE` to continue iterating
+ /// chunks as they are received, or `HYPER_ITER_BREAK` to cancel. Each
+ /// invocation of the callback must consume all the bytes it is provided.
+ /// There is no mechanism to signal to Hyper that only a subset of bytes were
+ /// consumed.
+ ///
+ /// This will consume the `hyper_body *`, you shouldn't use it anymore or free it.
+ fn hyper_body_foreach(body: *mut hyper_body, func: hyper_body_foreach_callback, userdata: *mut c_void) -> *mut hyper_task {
+ let mut body = non_null!(Box::from_raw(body) ?= ptr::null_mut());
+ let userdata = UserDataPointer(userdata);
+
+ Box::into_raw(hyper_task::boxed(async move {
+ let _ = &userdata;
+ while let Some(item) = body.0.frame().await {
+ let frame = item?;
+ if let Ok(chunk) = frame.into_data() {
+ if HYPER_ITER_CONTINUE != func(userdata.0, &hyper_buf(chunk)) {
+ return Err(crate::Error::new_user_aborted_by_callback());
+ }
+ }
+ }
+ Ok(())
+ }))
+ } ?= ptr::null_mut()
+}
+
+ffi_fn! {
+ /// Set userdata on this body, which will be passed to callback functions.
+ fn hyper_body_set_userdata(body: *mut hyper_body, userdata: *mut c_void) {
+ let b = non_null!(&mut *body ?= ());
+ b.0.as_ffi_mut().userdata = userdata;
+ }
+}
+
+ffi_fn! {
+ /// Set the outgoing data callback for this body.
+ ///
+ /// The callback is called each time hyper needs to send more data for the
+ /// body. It is passed the value from `hyper_body_set_userdata`.
+ ///
+ /// If there is data available, the `hyper_buf **` argument should be set
+ /// to a `hyper_buf *` containing the data, and `HYPER_POLL_READY` should
+ /// be returned.
+ ///
+ /// Returning `HYPER_POLL_READY` while the `hyper_buf **` argument points
+ /// to `NULL` will indicate the body has completed all data.
+ ///
+ /// If there is more data to send, but it isn't yet available, a
+ /// `hyper_waker` should be saved from the `hyper_context *` argument, and
+ /// `HYPER_POLL_PENDING` should be returned. You must wake the saved waker
+ /// to signal the task when data is available.
+ ///
+ /// If some error has occurred, you can return `HYPER_POLL_ERROR` to abort
+ /// the body.
+ fn hyper_body_set_data_func(body: *mut hyper_body, func: hyper_body_data_callback) {
+ let b = non_null!{ &mut *body ?= () };
+ b.0.as_ffi_mut().data_func = func;
+ }
+}
+
+// ===== impl UserBody =====
+
+impl UserBody {
+ pub(crate) fn new() -> UserBody {
+ UserBody {
+ data_func: data_noop,
+ userdata: std::ptr::null_mut(),
+ }
+ }
+
+ pub(crate) fn poll_data(
+ &mut self,
+ cx: &mut Context<'_>,
+ ) -> Poll<Option<crate::Result<Frame<Bytes>>>> {
+ let mut out = std::ptr::null_mut();
+ match (self.data_func)(self.userdata, hyper_context::wrap(cx), &mut out) {
+ super::task::HYPER_POLL_READY => {
+ if out.is_null() {
+ Poll::Ready(None)
+ } else {
+ let buf = unsafe { Box::from_raw(out) };
+ Poll::Ready(Some(Ok(Frame::data(buf.0))))
+ }
+ }
+ super::task::HYPER_POLL_PENDING => Poll::Pending,
+ super::task::HYPER_POLL_ERROR => {
+ Poll::Ready(Some(Err(crate::Error::new_body_write_aborted())))
+ }
+ unexpected => Poll::Ready(Some(Err(crate::Error::new_body_write(format!(
+ "unexpected hyper_body_data_func return code {}",
+ unexpected
+ ))))),
+ }
+ }
+}
+
+/// cbindgen:ignore
+extern "C" fn data_noop(
+ _userdata: *mut c_void,
+ _: *mut hyper_context<'_>,
+ _: *mut *mut hyper_buf,
+) -> c_int {
+ super::task::HYPER_POLL_READY
+}
+
+unsafe impl Send for UserBody {}
+unsafe impl Sync for UserBody {}
+
+// ===== Bytes =====
+
+ffi_fn! {
+ /// Create a new `hyper_buf *` by copying the provided bytes.
+ ///
+ /// This makes an owned copy of the bytes, so the `buf` argument can be
+ /// freed (with `hyper_buf_free`) or changed afterwards.
+ ///
+ /// To avoid a memory leak, the copy must eventually be consumed by
+ /// `hyper_buf_free`.
+ ///
+ /// This returns `NULL` if allocating a new buffer fails.
+ fn hyper_buf_copy(buf: *const u8, len: size_t) -> *mut hyper_buf {
+ let slice = unsafe {
+ std::slice::from_raw_parts(buf, len)
+ };
+ Box::into_raw(Box::new(hyper_buf(Bytes::copy_from_slice(slice))))
+ } ?= ptr::null_mut()
+}
+
+ffi_fn! {
+ /// Get a pointer to the bytes in this buffer.
+ ///
+ /// This should be used in conjunction with `hyper_buf_len` to get the length
+ /// of the bytes data.
+ ///
+ /// This pointer is borrowed data, and not valid once the `hyper_buf` is
+ /// consumed/freed.
+ fn hyper_buf_bytes(buf: *const hyper_buf) -> *const u8 {
+ unsafe { (*buf).0.as_ptr() }
+ } ?= ptr::null()
+}
+
+ffi_fn! {
+ /// Get the length of the bytes this buffer contains.
+ fn hyper_buf_len(buf: *const hyper_buf) -> size_t {
+ unsafe { (*buf).0.len() }
+ }
+}
+
+ffi_fn! {
+ /// Free this buffer.
+ ///
+ /// This should be used for any buffer once it is no longer needed.
+ fn hyper_buf_free(buf: *mut hyper_buf) {
+ drop(unsafe { Box::from_raw(buf) });
+ }
+}
+
+unsafe impl AsTaskType for hyper_buf {
+ fn as_task_type(&self) -> hyper_task_return_type {
+ hyper_task_return_type::HYPER_TASK_BUF
+ }
+}
diff --git a/vendor/hyper/src/ffi/client.rs b/vendor/hyper/src/ffi/client.rs
new file mode 100644
index 00000000..63b03d87
--- /dev/null
+++ b/vendor/hyper/src/ffi/client.rs
@@ -0,0 +1,274 @@
+use std::ffi::c_int;
+use std::ptr;
+use std::sync::Arc;
+
+use crate::client::conn;
+use crate::rt::Executor as _;
+
+use super::error::hyper_code;
+use super::http_types::{hyper_request, hyper_response};
+use super::io::hyper_io;
+use super::task::{hyper_executor, hyper_task, hyper_task_return_type, AsTaskType, WeakExec};
+
+/// An options builder to configure an HTTP client connection.
+///
+/// Methods:
+///
+/// - hyper_clientconn_options_new: Creates a new set of HTTP clientconn options to be used in a handshake.
+/// - hyper_clientconn_options_exec: Set the client background task executor.
+/// - hyper_clientconn_options_http2: Set whether to use HTTP2.
+/// - hyper_clientconn_options_set_preserve_header_case: Set whether header case is preserved.
+/// - hyper_clientconn_options_set_preserve_header_order: Set whether header order is preserved.
+/// - hyper_clientconn_options_http1_allow_multiline_headers: Set whether HTTP/1 connections accept obsolete line folding for header values.
+/// - hyper_clientconn_options_free: Free a set of HTTP clientconn options.
+pub struct hyper_clientconn_options {
+ http1_allow_obsolete_multiline_headers_in_responses: bool,
+ http1_preserve_header_case: bool,
+ http1_preserve_header_order: bool,
+ http2: bool,
+ /// Use a `Weak` to prevent cycles.
+ exec: WeakExec,
+}
+
+/// An HTTP client connection handle.
+///
+/// These are used to send one or more requests on a single connection.
+///
+/// It's possible to send multiple requests on a single connection, such
+/// as when HTTP/1 keep-alive or HTTP/2 is used.
+///
+/// To create a `hyper_clientconn`:
+///
+/// 1. Create a `hyper_io` with `hyper_io_new`.
+/// 2. Create a `hyper_clientconn_options` with `hyper_clientconn_options_new`.
+/// 3. Call `hyper_clientconn_handshake` with the `hyper_io` and `hyper_clientconn_options`.
+/// This creates a `hyper_task`.
+/// 5. Call `hyper_task_set_userdata` to assign an application-specific pointer to the task.
+/// This allows keeping track of multiple connections that may be handshaking
+/// simultaneously.
+/// 4. Add the `hyper_task` to an executor with `hyper_executor_push`.
+/// 5. Poll that executor until it yields a task of type `HYPER_TASK_CLIENTCONN`.
+/// 6. Extract the `hyper_clientconn` from the task with `hyper_task_value`.
+/// This will require a cast from `void *` to `hyper_clientconn *`.
+///
+/// This process results in a `hyper_clientconn` that permanently owns the
+/// `hyper_io`. Because the `hyper_io` in turn owns a TCP or TLS connection, that means
+/// the `hyper_clientconn` owns the connection for both the clientconn's lifetime
+/// and the connection's lifetime.
+///
+/// In other words, each connection (`hyper_io`) must have exactly one `hyper_clientconn`
+/// associated with it. That's because `hyper_clientconn_handshake` sends the
+/// [HTTP/2 Connection Preface] (for HTTP/2 connections). Since that preface can't
+/// be sent twice, handshake can't be called twice.
+///
+/// [HTTP/2 Connection Preface]: https://datatracker.ietf.org/doc/html/rfc9113#name-http-2-connection-preface
+///
+/// Methods:
+///
+/// - hyper_clientconn_handshake: Creates an HTTP client handshake task.
+/// - hyper_clientconn_send: Creates a task to send a request on the client connection.
+/// - hyper_clientconn_free: Free a hyper_clientconn *.
+pub struct hyper_clientconn {
+ tx: Tx,
+}
+
+enum Tx {
+ #[cfg(feature = "http1")]
+ Http1(conn::http1::SendRequest<crate::body::Incoming>),
+ #[cfg(feature = "http2")]
+ Http2(conn::http2::SendRequest<crate::body::Incoming>),
+}
+
+// ===== impl hyper_clientconn =====
+
+ffi_fn! {
+ /// Creates an HTTP client handshake task.
+ ///
+ /// Both the `io` and the `options` are consumed in this function call.
+ /// They should not be used or freed afterwards.
+ ///
+ /// The returned task must be polled with an executor until the handshake
+ /// completes, at which point the value can be taken.
+ ///
+ /// To avoid a memory leak, the task must eventually be consumed by
+ /// `hyper_task_free`, or taken ownership of by `hyper_executor_push`
+ /// without subsequently being given back by `hyper_executor_poll`.
+ fn hyper_clientconn_handshake(io: *mut hyper_io, options: *mut hyper_clientconn_options) -> *mut hyper_task {
+ let options = non_null! { Box::from_raw(options) ?= ptr::null_mut() };
+ let io = non_null! { Box::from_raw(io) ?= ptr::null_mut() };
+
+ Box::into_raw(hyper_task::boxed(async move {
+ #[cfg(feature = "http2")]
+ {
+ if options.http2 {
+ return conn::http2::Builder::new(options.exec.clone())
+ .handshake::<_, crate::body::Incoming>(io)
+ .await
+ .map(|(tx, conn)| {
+ options.exec.execute(Box::pin(async move {
+ let _ = conn.await;
+ }));
+ hyper_clientconn { tx: Tx::Http2(tx) }
+ });
+ }
+ }
+
+ conn::http1::Builder::new()
+ .allow_obsolete_multiline_headers_in_responses(options.http1_allow_obsolete_multiline_headers_in_responses)
+ .preserve_header_case(options.http1_preserve_header_case)
+ .preserve_header_order(options.http1_preserve_header_order)
+ .handshake::<_, crate::body::Incoming>(io)
+ .await
+ .map(|(tx, conn)| {
+ options.exec.execute(Box::pin(async move {
+ let _ = conn.await;
+ }));
+ hyper_clientconn { tx: Tx::Http1(tx) }
+ })
+ }))
+ } ?= std::ptr::null_mut()
+}
+
+ffi_fn! {
+ /// Creates a task to send a request on the client connection.
+ ///
+ /// This consumes the request. You should not use or free the request
+ /// afterwards.
+ ///
+ /// Returns a task that needs to be polled until it is ready. When ready, the
+ /// task yields a `hyper_response *`.
+ ///
+ /// To avoid a memory leak, the task must eventually be consumed by
+ /// `hyper_task_free`, or taken ownership of by `hyper_executor_push`
+ /// without subsequently being given back by `hyper_executor_poll`.
+ fn hyper_clientconn_send(conn: *mut hyper_clientconn, req: *mut hyper_request) -> *mut hyper_task {
+ let mut req = non_null! { Box::from_raw(req) ?= ptr::null_mut() };
+
+ // Update request with original-case map of headers
+ req.finalize_request();
+
+ let fut = match non_null! { &mut *conn ?= ptr::null_mut() }.tx {
+ Tx::Http1(ref mut tx) => futures_util::future::Either::Left(tx.send_request(req.0)),
+ Tx::Http2(ref mut tx) => futures_util::future::Either::Right(tx.send_request(req.0)),
+ };
+
+ let fut = async move {
+ fut.await.map(hyper_response::wrap)
+ };
+
+ Box::into_raw(hyper_task::boxed(fut))
+ } ?= std::ptr::null_mut()
+}
+
+ffi_fn! {
+ /// Free a `hyper_clientconn *`.
+ ///
+ /// This should be used for any connection once it is no longer needed.
+ fn hyper_clientconn_free(conn: *mut hyper_clientconn) {
+ drop(non_null! { Box::from_raw(conn) ?= () });
+ }
+}
+
+unsafe impl AsTaskType for hyper_clientconn {
+ fn as_task_type(&self) -> hyper_task_return_type {
+ hyper_task_return_type::HYPER_TASK_CLIENTCONN
+ }
+}
+
+// ===== impl hyper_clientconn_options =====
+
+ffi_fn! {
+ /// Creates a new set of HTTP clientconn options to be used in a handshake.
+ ///
+ /// To avoid a memory leak, the options must eventually be consumed by
+ /// `hyper_clientconn_options_free` or `hyper_clientconn_handshake`.
+ fn hyper_clientconn_options_new() -> *mut hyper_clientconn_options {
+ Box::into_raw(Box::new(hyper_clientconn_options {
+ http1_allow_obsolete_multiline_headers_in_responses: false,
+ http1_preserve_header_case: false,
+ http1_preserve_header_order: false,
+ http2: false,
+ exec: WeakExec::new(),
+ }))
+ } ?= std::ptr::null_mut()
+}
+
+ffi_fn! {
+ /// Set whether header case is preserved.
+ ///
+ /// Pass `0` to allow lowercase normalization (default), `1` to retain original case.
+ fn hyper_clientconn_options_set_preserve_header_case(opts: *mut hyper_clientconn_options, enabled: c_int) {
+ let opts = non_null! { &mut *opts ?= () };
+ opts.http1_preserve_header_case = enabled != 0;
+ }
+}
+
+ffi_fn! {
+ /// Set whether header order is preserved.
+ ///
+ /// Pass `0` to allow reordering (default), `1` to retain original ordering.
+ fn hyper_clientconn_options_set_preserve_header_order(opts: *mut hyper_clientconn_options, enabled: c_int) {
+ let opts = non_null! { &mut *opts ?= () };
+ opts.http1_preserve_header_order = enabled != 0;
+ }
+}
+
+ffi_fn! {
+ /// Free a set of HTTP clientconn options.
+ ///
+ /// This should only be used if the options aren't consumed by
+ /// `hyper_clientconn_handshake`.
+ fn hyper_clientconn_options_free(opts: *mut hyper_clientconn_options) {
+ drop(non_null! { Box::from_raw(opts) ?= () });
+ }
+}
+
+ffi_fn! {
+ /// Set the client background task executor.
+ ///
+ /// This does not consume the `options` or the `exec`.
+ fn hyper_clientconn_options_exec(opts: *mut hyper_clientconn_options, exec: *const hyper_executor) {
+ let opts = non_null! { &mut *opts ?= () };
+
+ let exec = non_null! { Arc::from_raw(exec) ?= () };
+ let weak_exec = hyper_executor::downgrade(&exec);
+ std::mem::forget(exec);
+
+ opts.exec = weak_exec;
+ }
+}
+
+ffi_fn! {
+ /// Set whether to use HTTP2.
+ ///
+ /// Pass `0` to disable, `1` to enable.
+ fn hyper_clientconn_options_http2(opts: *mut hyper_clientconn_options, enabled: c_int) -> hyper_code {
+ #[cfg(feature = "http2")]
+ {
+ let opts = non_null! { &mut *opts ?= hyper_code::HYPERE_INVALID_ARG };
+ opts.http2 = enabled != 0;
+ hyper_code::HYPERE_OK
+ }
+
+ #[cfg(not(feature = "http2"))]
+ {
+ drop(opts);
+ drop(enabled);
+ hyper_code::HYPERE_FEATURE_NOT_ENABLED
+ }
+ }
+}
+
+ffi_fn! {
+ /// Set whether HTTP/1 connections accept obsolete line folding for header values.
+ ///
+ /// Newline codepoints (\r and \n) will be transformed to spaces when parsing.
+ ///
+ /// Pass `0` to disable, `1` to enable.
+ ///
+ fn hyper_clientconn_options_http1_allow_multiline_headers(opts: *mut hyper_clientconn_options, enabled: c_int) -> hyper_code {
+ let opts = non_null! { &mut *opts ?= hyper_code::HYPERE_INVALID_ARG };
+ opts.http1_allow_obsolete_multiline_headers_in_responses = enabled != 0;
+ hyper_code::HYPERE_OK
+ }
+}
diff --git a/vendor/hyper/src/ffi/error.rs b/vendor/hyper/src/ffi/error.rs
new file mode 100644
index 00000000..cc289ed7
--- /dev/null
+++ b/vendor/hyper/src/ffi/error.rs
@@ -0,0 +1,96 @@
+use crate::ffi::size_t;
+
+/// A more detailed error object returned by some hyper functions.
+///
+/// Compare with `hyper_code`, which is a simpler error returned from
+/// some hyper functions.
+///
+/// Methods:
+///
+/// - hyper_error_code: Get an equivalent hyper_code from this error.
+/// - hyper_error_print: Print the details of this error to a buffer.
+/// - hyper_error_free: Frees a hyper_error.
+pub struct hyper_error(crate::Error);
+
+/// A return code for many of hyper's methods.
+#[repr(C)]
+pub enum hyper_code {
+ /// All is well.
+ HYPERE_OK,
+ /// General error, details in the `hyper_error *`.
+ HYPERE_ERROR,
+ /// A function argument was invalid.
+ HYPERE_INVALID_ARG,
+ /// The IO transport returned an EOF when one wasn't expected.
+ ///
+ /// This typically means an HTTP request or response was expected, but the
+ /// connection closed cleanly without sending (all of) it.
+ HYPERE_UNEXPECTED_EOF,
+ /// Aborted by a user supplied callback.
+ HYPERE_ABORTED_BY_CALLBACK,
+ /// An optional hyper feature was not enabled.
+ #[cfg_attr(feature = "http2", allow(unused))]
+ HYPERE_FEATURE_NOT_ENABLED,
+ /// The peer sent an HTTP message that could not be parsed.
+ HYPERE_INVALID_PEER_MESSAGE,
+}
+
+// ===== impl hyper_error =====
+
+impl hyper_error {
+ fn code(&self) -> hyper_code {
+ use crate::error::Kind as ErrorKind;
+ use crate::error::User;
+
+ match self.0.kind() {
+ ErrorKind::Parse(_) => hyper_code::HYPERE_INVALID_PEER_MESSAGE,
+ ErrorKind::IncompleteMessage => hyper_code::HYPERE_UNEXPECTED_EOF,
+ ErrorKind::User(User::AbortedByCallback) => hyper_code::HYPERE_ABORTED_BY_CALLBACK,
+ // TODO: add more variants
+ _ => hyper_code::HYPERE_ERROR,
+ }
+ }
+
+ fn print_to(&self, dst: &mut [u8]) -> usize {
+ use std::io::Write;
+
+ let mut dst = std::io::Cursor::new(dst);
+
+ // A write! error doesn't matter. As much as possible will have been
+ // written, and the Cursor position will know how far that is (even
+ // if that is zero).
+ let _ = write!(dst, "{}", &self.0);
+ dst.position() as usize
+ }
+}
+
+ffi_fn! {
+ /// Frees a `hyper_error`.
+ ///
+ /// This should be used for any error once it is no longer needed.
+ fn hyper_error_free(err: *mut hyper_error) {
+ drop(non_null!(Box::from_raw(err) ?= ()));
+ }
+}
+
+ffi_fn! {
+ /// Get an equivalent `hyper_code` from this error.
+ fn hyper_error_code(err: *const hyper_error) -> hyper_code {
+ non_null!(&*err ?= hyper_code::HYPERE_INVALID_ARG).code()
+ }
+}
+
+ffi_fn! {
+ /// Print the details of this error to a buffer.
+ ///
+ /// The `dst_len` value must be the maximum length that the buffer can
+ /// store.
+ ///
+ /// The return value is number of bytes that were written to `dst`.
+ fn hyper_error_print(err: *const hyper_error, dst: *mut u8, dst_len: size_t) -> size_t {
+ let dst = unsafe {
+ std::slice::from_raw_parts_mut(dst, dst_len)
+ };
+ non_null!(&*err ?= 0).print_to(dst)
+ }
+}
diff --git a/vendor/hyper/src/ffi/http_types.rs b/vendor/hyper/src/ffi/http_types.rs
new file mode 100644
index 00000000..3dc4a254
--- /dev/null
+++ b/vendor/hyper/src/ffi/http_types.rs
@@ -0,0 +1,703 @@
+use std::ffi::{c_int, c_void};
+
+use bytes::Bytes;
+
+use super::body::hyper_body;
+use super::error::hyper_code;
+use super::task::{hyper_task_return_type, AsTaskType};
+use super::{UserDataPointer, HYPER_ITER_CONTINUE};
+use crate::body::Incoming as IncomingBody;
+use crate::ext::{HeaderCaseMap, OriginalHeaderOrder, ReasonPhrase};
+use crate::ffi::size_t;
+use crate::header::{HeaderName, HeaderValue};
+use crate::{HeaderMap, Method, Request, Response, Uri};
+
+/// An HTTP request.
+///
+/// Once you've finished constructing a request, you can send it with
+/// `hyper_clientconn_send`.
+///
+/// Methods:
+///
+/// - hyper_request_new: Construct a new HTTP request.
+/// - hyper_request_headers: Gets a mutable reference to the HTTP headers of this request
+/// - hyper_request_set_body: Set the body of the request.
+/// - hyper_request_set_method: Set the HTTP Method of the request.
+/// - hyper_request_set_uri: Set the URI of the request.
+/// - hyper_request_set_uri_parts: Set the URI of the request with separate scheme, authority, and path/query strings.
+/// - hyper_request_set_version: Set the preferred HTTP version of the request.
+/// - hyper_request_on_informational: Set an informational (1xx) response callback.
+/// - hyper_request_free: Free an HTTP request.
+pub struct hyper_request(pub(super) Request<IncomingBody>);
+
+/// An HTTP response.
+///
+/// Obtain one of these by making a request with `hyper_clientconn_send`, then
+/// polling the executor unntil you get a `hyper_task` of type
+/// `HYPER_TASK_RESPONSE`. To figure out which request this response
+/// corresponds to, check the userdata of the task, which you should
+/// previously have set to an application-specific identifier for the
+/// request.
+///
+/// Methods:
+///
+/// - hyper_response_status: Get the HTTP-Status code of this response.
+/// - hyper_response_version: Get the HTTP version used by this response.
+/// - hyper_response_reason_phrase: Get a pointer to the reason-phrase of this response.
+/// - hyper_response_reason_phrase_len: Get the length of the reason-phrase of this response.
+/// - hyper_response_headers: Gets a reference to the HTTP headers of this response.
+/// - hyper_response_body: Take ownership of the body of this response.
+/// - hyper_response_free: Free an HTTP response.
+pub struct hyper_response(pub(super) Response<IncomingBody>);
+
+/// An HTTP header map.
+///
+/// These can be part of a request or response.
+///
+/// Obtain a pointer to read or modify these from `hyper_request_headers`
+/// or `hyper_response_headers`.
+///
+/// Methods:
+///
+/// - hyper_headers_add: Adds the provided value to the list of the provided name.
+/// - hyper_headers_foreach: Iterates the headers passing each name and value pair to the callback.
+/// - hyper_headers_set: Sets the header with the provided name to the provided value.
+#[derive(Clone)]
+pub struct hyper_headers {
+ pub(super) headers: HeaderMap,
+ orig_casing: HeaderCaseMap,
+ orig_order: OriginalHeaderOrder,
+}
+
+#[derive(Clone)]
+struct OnInformational {
+ func: hyper_request_on_informational_callback,
+ data: UserDataPointer,
+}
+
+type hyper_request_on_informational_callback = extern "C" fn(*mut c_void, *mut hyper_response);
+
+// ===== impl hyper_request =====
+
+ffi_fn! {
+ /// Construct a new HTTP request.
+ ///
+ /// The default request has an empty body. To send a body, call `hyper_request_set_body`.
+ ///
+ ///
+ /// To avoid a memory leak, the request must eventually be consumed by
+ /// `hyper_request_free` or `hyper_clientconn_send`.
+ fn hyper_request_new() -> *mut hyper_request {
+ Box::into_raw(Box::new(hyper_request(Request::new(IncomingBody::empty()))))
+ } ?= std::ptr::null_mut()
+}
+
+ffi_fn! {
+ /// Free an HTTP request.
+ ///
+ /// This should only be used if the request isn't consumed by
+ /// `hyper_clientconn_send`.
+ fn hyper_request_free(req: *mut hyper_request) {
+ drop(non_null!(Box::from_raw(req) ?= ()));
+ }
+}
+
+ffi_fn! {
+ /// Set the HTTP Method of the request.
+ fn hyper_request_set_method(req: *mut hyper_request, method: *const u8, method_len: size_t) -> hyper_code {
+ let bytes = unsafe {
+ std::slice::from_raw_parts(method, method_len as usize)
+ };
+ let req = non_null!(&mut *req ?= hyper_code::HYPERE_INVALID_ARG);
+ match Method::from_bytes(bytes) {
+ Ok(m) => {
+ *req.0.method_mut() = m;
+ hyper_code::HYPERE_OK
+ },
+ Err(_) => {
+ hyper_code::HYPERE_INVALID_ARG
+ }
+ }
+ }
+}
+
+ffi_fn! {
+ /// Set the URI of the request.
+ ///
+ /// The request's URI is best described as the `request-target` from the RFCs. So in HTTP/1,
+ /// whatever is set will get sent as-is in the first line (GET $uri HTTP/1.1). It
+ /// supports the 4 defined variants, origin-form, absolute-form, authority-form, and
+ /// asterisk-form.
+ ///
+ /// The underlying type was built to efficiently support HTTP/2 where the request-target is
+ /// split over :scheme, :authority, and :path. As such, each part can be set explicitly, or the
+ /// type can parse a single contiguous string and if a scheme is found, that slot is "set". If
+ /// the string just starts with a path, only the path portion is set. All pseudo headers that
+ /// have been parsed/set are sent when the connection type is HTTP/2.
+ ///
+ /// To set each slot explicitly, use `hyper_request_set_uri_parts`.
+ fn hyper_request_set_uri(req: *mut hyper_request, uri: *const u8, uri_len: size_t) -> hyper_code {
+ let bytes = unsafe {
+ std::slice::from_raw_parts(uri, uri_len as usize)
+ };
+ let req = non_null!(&mut *req ?= hyper_code::HYPERE_INVALID_ARG);
+ match Uri::from_maybe_shared(bytes) {
+ Ok(u) => {
+ *req.0.uri_mut() = u;
+ hyper_code::HYPERE_OK
+ },
+ Err(_) => {
+ hyper_code::HYPERE_INVALID_ARG
+ }
+ }
+ }
+}
+
+ffi_fn! {
+ /// Set the URI of the request with separate scheme, authority, and
+ /// path/query strings.
+ ///
+ /// Each of `scheme`, `authority`, and `path_and_query` should either be
+ /// null, to skip providing a component, or point to a UTF-8 encoded
+ /// string. If any string pointer argument is non-null, its corresponding
+ /// `len` parameter must be set to the string's length.
+ fn hyper_request_set_uri_parts(
+ req: *mut hyper_request,
+ scheme: *const u8,
+ scheme_len: size_t,
+ authority: *const u8,
+ authority_len: size_t,
+ path_and_query: *const u8,
+ path_and_query_len: size_t
+ ) -> hyper_code {
+ let mut builder = Uri::builder();
+ if !scheme.is_null() {
+ let scheme_bytes = unsafe {
+ std::slice::from_raw_parts(scheme, scheme_len as usize)
+ };
+ builder = builder.scheme(scheme_bytes);
+ }
+ if !authority.is_null() {
+ let authority_bytes = unsafe {
+ std::slice::from_raw_parts(authority, authority_len as usize)
+ };
+ builder = builder.authority(authority_bytes);
+ }
+ if !path_and_query.is_null() {
+ let path_and_query_bytes = unsafe {
+ std::slice::from_raw_parts(path_and_query, path_and_query_len as usize)
+ };
+ builder = builder.path_and_query(path_and_query_bytes);
+ }
+ match builder.build() {
+ Ok(u) => {
+ *unsafe { &mut *req }.0.uri_mut() = u;
+ hyper_code::HYPERE_OK
+ },
+ Err(_) => {
+ hyper_code::HYPERE_INVALID_ARG
+ }
+ }
+ }
+}
+
+ffi_fn! {
+ /// Set the preferred HTTP version of the request.
+ ///
+ /// The version value should be one of the `HYPER_HTTP_VERSION_` constants.
+ ///
+ /// Note that this won't change the major HTTP version of the connection,
+ /// since that is determined at the handshake step.
+ fn hyper_request_set_version(req: *mut hyper_request, version: c_int) -> hyper_code {
+ use http::Version;
+
+ let req = non_null!(&mut *req ?= hyper_code::HYPERE_INVALID_ARG);
+ *req.0.version_mut() = match version {
+ super::HYPER_HTTP_VERSION_NONE => Version::HTTP_11,
+ super::HYPER_HTTP_VERSION_1_0 => Version::HTTP_10,
+ super::HYPER_HTTP_VERSION_1_1 => Version::HTTP_11,
+ super::HYPER_HTTP_VERSION_2 => Version::HTTP_2,
+ _ => {
+ // We don't know this version
+ return hyper_code::HYPERE_INVALID_ARG;
+ }
+ };
+ hyper_code::HYPERE_OK
+ }
+}
+
+ffi_fn! {
+ /// Gets a mutable reference to the HTTP headers of this request
+ ///
+ /// This is not an owned reference, so it should not be accessed after the
+ /// `hyper_request` has been consumed.
+ fn hyper_request_headers(req: *mut hyper_request) -> *mut hyper_headers {
+ hyper_headers::get_or_default(unsafe { &mut *req }.0.extensions_mut())
+ } ?= std::ptr::null_mut()
+}
+
+ffi_fn! {
+ /// Set the body of the request.
+ ///
+ /// You can get a `hyper_body` by calling `hyper_body_new`.
+ ///
+ /// This takes ownership of the `hyper_body *`, you must not use it or
+ /// free it after setting it on the request.
+ fn hyper_request_set_body(req: *mut hyper_request, body: *mut hyper_body) -> hyper_code {
+ let body = non_null!(Box::from_raw(body) ?= hyper_code::HYPERE_INVALID_ARG);
+ let req = non_null!(&mut *req ?= hyper_code::HYPERE_INVALID_ARG);
+ *req.0.body_mut() = body.0;
+ hyper_code::HYPERE_OK
+ }
+}
+
+ffi_fn! {
+ /// Set an informational (1xx) response callback.
+ ///
+ /// The callback is called each time hyper receives an informational (1xx)
+ /// response for this request.
+ ///
+ /// The third argument is an opaque user data pointer, which is passed to
+ /// the callback each time.
+ ///
+ /// The callback is passed the `void *` data pointer, and a
+ /// `hyper_response *` which can be inspected as any other response. The
+ /// body of the response will always be empty.
+ ///
+ /// NOTE: The `hyper_response *` is just borrowed data, and will not
+ /// be valid after the callback finishes. You must copy any data you wish
+ /// to persist.
+ fn hyper_request_on_informational(req: *mut hyper_request, callback: hyper_request_on_informational_callback, data: *mut c_void) -> hyper_code {
+ #[cfg(feature = "client")]
+ {
+ let ext = OnInformational {
+ func: callback,
+ data: UserDataPointer(data),
+ };
+ let req = non_null!(&mut *req ?= hyper_code::HYPERE_INVALID_ARG);
+ crate::ext::on_informational_raw(&mut req.0, ext);
+ hyper_code::HYPERE_OK
+ }
+ #[cfg(not(feature = "client"))]
+ {
+ drop((req, callback, data));
+ hyper_code::HYPERE_FEATURE_NOT_ENABLED
+ }
+ }
+}
+
+impl hyper_request {
+ pub(super) fn finalize_request(&mut self) {
+ if let Some(headers) = self.0.extensions_mut().remove::<hyper_headers>() {
+ *self.0.headers_mut() = headers.headers;
+ self.0.extensions_mut().insert(headers.orig_casing);
+ self.0.extensions_mut().insert(headers.orig_order);
+ }
+ }
+}
+
+// ===== impl hyper_response =====
+
+ffi_fn! {
+ /// Free an HTTP response.
+ ///
+ /// This should be used for any response once it is no longer needed.
+ fn hyper_response_free(resp: *mut hyper_response) {
+ drop(non_null!(Box::from_raw(resp) ?= ()));
+ }
+}
+
+ffi_fn! {
+ /// Get the HTTP-Status code of this response.
+ ///
+ /// It will always be within the range of 100-599.
+ fn hyper_response_status(resp: *const hyper_response) -> u16 {
+ non_null!(&*resp ?= 0).0.status().as_u16()
+ }
+}
+
+ffi_fn! {
+ /// Get a pointer to the reason-phrase of this response.
+ ///
+ /// This buffer is not null-terminated.
+ ///
+ /// This buffer is owned by the response, and should not be used after
+ /// the response has been freed.
+ ///
+ /// Use `hyper_response_reason_phrase_len()` to get the length of this
+ /// buffer.
+ fn hyper_response_reason_phrase(resp: *const hyper_response) -> *const u8 {
+ non_null!(&*resp ?= std::ptr::null()).reason_phrase().as_ptr()
+ } ?= std::ptr::null()
+}
+
+ffi_fn! {
+ /// Get the length of the reason-phrase of this response.
+ ///
+ /// Use `hyper_response_reason_phrase()` to get the buffer pointer.
+ fn hyper_response_reason_phrase_len(resp: *const hyper_response) -> size_t {
+ non_null!(&*resp ?= 0).reason_phrase().len()
+ }
+}
+
+ffi_fn! {
+ /// Get the HTTP version used by this response.
+ ///
+ /// The returned value could be:
+ ///
+ /// - `HYPER_HTTP_VERSION_1_0`
+ /// - `HYPER_HTTP_VERSION_1_1`
+ /// - `HYPER_HTTP_VERSION_2`
+ /// - `HYPER_HTTP_VERSION_NONE` if newer (or older).
+ fn hyper_response_version(resp: *const hyper_response) -> c_int {
+ use http::Version;
+
+ match non_null!(&*resp ?= 0).0.version() {
+ Version::HTTP_10 => super::HYPER_HTTP_VERSION_1_0,
+ Version::HTTP_11 => super::HYPER_HTTP_VERSION_1_1,
+ Version::HTTP_2 => super::HYPER_HTTP_VERSION_2,
+ _ => super::HYPER_HTTP_VERSION_NONE,
+ }
+ }
+}
+
+ffi_fn! {
+ /// Gets a reference to the HTTP headers of this response.
+ ///
+ /// This is not an owned reference, so it should not be accessed after the
+ /// `hyper_response` has been freed.
+ fn hyper_response_headers(resp: *mut hyper_response) -> *mut hyper_headers {
+ hyper_headers::get_or_default(unsafe { &mut *resp }.0.extensions_mut())
+ } ?= std::ptr::null_mut()
+}
+
+ffi_fn! {
+ /// Take ownership of the body of this response.
+ ///
+ /// It is safe to free the response even after taking ownership of its body.
+ ///
+ /// To avoid a memory leak, the body must eventually be consumed by
+ /// `hyper_body_free`, `hyper_body_foreach`, or `hyper_request_set_body`.
+ fn hyper_response_body(resp: *mut hyper_response) -> *mut hyper_body {
+ let body = std::mem::replace(non_null!(&mut *resp ?= std::ptr::null_mut()).0.body_mut(), IncomingBody::empty());
+ Box::into_raw(Box::new(hyper_body(body)))
+ } ?= std::ptr::null_mut()
+}
+
+impl hyper_response {
+ pub(super) fn wrap(mut resp: Response<IncomingBody>) -> hyper_response {
+ let headers = std::mem::take(resp.headers_mut());
+ let orig_casing = resp
+ .extensions_mut()
+ .remove::<HeaderCaseMap>()
+ .unwrap_or_else(HeaderCaseMap::default);
+ let orig_order = resp
+ .extensions_mut()
+ .remove::<OriginalHeaderOrder>()
+ .unwrap_or_else(OriginalHeaderOrder::default);
+ resp.extensions_mut().insert(hyper_headers {
+ headers,
+ orig_casing,
+ orig_order,
+ });
+
+ hyper_response(resp)
+ }
+
+ fn reason_phrase(&self) -> &[u8] {
+ if let Some(reason) = self.0.extensions().get::<ReasonPhrase>() {
+ return reason.as_bytes();
+ }
+
+ if let Some(reason) = self.0.status().canonical_reason() {
+ return reason.as_bytes();
+ }
+
+ &[]
+ }
+}
+
+unsafe impl AsTaskType for hyper_response {
+ fn as_task_type(&self) -> hyper_task_return_type {
+ hyper_task_return_type::HYPER_TASK_RESPONSE
+ }
+}
+
+// ===== impl Headers =====
+
+type hyper_headers_foreach_callback =
+ extern "C" fn(*mut c_void, *const u8, size_t, *const u8, size_t) -> c_int;
+
+impl hyper_headers {
+ pub(super) fn get_or_default(ext: &mut http::Extensions) -> &mut hyper_headers {
+ if let None = ext.get_mut::<hyper_headers>() {
+ ext.insert(hyper_headers::default());
+ }
+
+ ext.get_mut::<hyper_headers>().unwrap()
+ }
+}
+
+ffi_fn! {
+ /// Iterates the headers passing each name and value pair to the callback.
+ ///
+ /// The `userdata` pointer is also passed to the callback.
+ ///
+ /// The callback should return `HYPER_ITER_CONTINUE` to keep iterating, or
+ /// `HYPER_ITER_BREAK` to stop.
+ fn hyper_headers_foreach(headers: *const hyper_headers, func: hyper_headers_foreach_callback, userdata: *mut c_void) {
+ let headers = non_null!(&*headers ?= ());
+ // For each header name/value pair, there may be a value in the casemap
+ // that corresponds to the HeaderValue. So, we iterator all the keys,
+ // and for each one, try to pair the originally cased name with the value.
+ //
+ // TODO: consider adding http::HeaderMap::entries() iterator
+ let mut ordered_iter = headers.orig_order.get_in_order().peekable();
+ if ordered_iter.peek().is_some() {
+ for (name, idx) in ordered_iter {
+ let (name_ptr, name_len) = if let Some(orig_name) = headers.orig_casing.get_all(name).nth(*idx) {
+ (orig_name.as_ref().as_ptr(), orig_name.as_ref().len())
+ } else {
+ (
+ name.as_str().as_bytes().as_ptr(),
+ name.as_str().as_bytes().len(),
+ )
+ };
+
+ let val_ptr;
+ let val_len;
+ if let Some(value) = headers.headers.get_all(name).iter().nth(*idx) {
+ val_ptr = value.as_bytes().as_ptr();
+ val_len = value.as_bytes().len();
+ } else {
+ // Stop iterating, something has gone wrong.
+ return;
+ }
+
+ if HYPER_ITER_CONTINUE != func(userdata, name_ptr, name_len, val_ptr, val_len) {
+ return;
+ }
+ }
+ } else {
+ for name in headers.headers.keys() {
+ let mut names = headers.orig_casing.get_all(name);
+
+ for value in headers.headers.get_all(name) {
+ let (name_ptr, name_len) = if let Some(orig_name) = names.next() {
+ (orig_name.as_ref().as_ptr(), orig_name.as_ref().len())
+ } else {
+ (
+ name.as_str().as_bytes().as_ptr(),
+ name.as_str().as_bytes().len(),
+ )
+ };
+
+ let val_ptr = value.as_bytes().as_ptr();
+ let val_len = value.as_bytes().len();
+
+ if HYPER_ITER_CONTINUE != func(userdata, name_ptr, name_len, val_ptr, val_len) {
+ return;
+ }
+ }
+ }
+ }
+ }
+}
+
+ffi_fn! {
+ /// Sets the header with the provided name to the provided value.
+ ///
+ /// This overwrites any previous value set for the header.
+ fn hyper_headers_set(headers: *mut hyper_headers, name: *const u8, name_len: size_t, value: *const u8, value_len: size_t) -> hyper_code {
+ let headers = non_null!(&mut *headers ?= hyper_code::HYPERE_INVALID_ARG);
+ match unsafe { raw_name_value(name, name_len, value, value_len) } {
+ Ok((name, value, orig_name)) => {
+ headers.headers.insert(&name, value);
+ headers.orig_casing.insert(name.clone(), orig_name.clone());
+ headers.orig_order.insert(name);
+ hyper_code::HYPERE_OK
+ }
+ Err(code) => code,
+ }
+ }
+}
+
+ffi_fn! {
+ /// Adds the provided value to the list of the provided name.
+ ///
+ /// If there were already existing values for the name, this will append the
+ /// new value to the internal list.
+ fn hyper_headers_add(headers: *mut hyper_headers, name: *const u8, name_len: size_t, value: *const u8, value_len: size_t) -> hyper_code {
+ let headers = non_null!(&mut *headers ?= hyper_code::HYPERE_INVALID_ARG);
+
+ match unsafe { raw_name_value(name, name_len, value, value_len) } {
+ Ok((name, value, orig_name)) => {
+ headers.headers.append(&name, value);
+ headers.orig_casing.append(&name, orig_name.clone());
+ headers.orig_order.append(name);
+ hyper_code::HYPERE_OK
+ }
+ Err(code) => code,
+ }
+ }
+}
+
+impl Default for hyper_headers {
+ fn default() -> Self {
+ Self {
+ headers: Default::default(),
+ orig_casing: HeaderCaseMap::default(),
+ orig_order: OriginalHeaderOrder::default(),
+ }
+ }
+}
+
+unsafe fn raw_name_value(
+ name: *const u8,
+ name_len: size_t,
+ value: *const u8,
+ value_len: size_t,
+) -> Result<(HeaderName, HeaderValue, Bytes), hyper_code> {
+ let name = std::slice::from_raw_parts(name, name_len);
+ let orig_name = Bytes::copy_from_slice(name);
+ let name = match HeaderName::from_bytes(name) {
+ Ok(name) => name,
+ Err(_) => return Err(hyper_code::HYPERE_INVALID_ARG),
+ };
+ let value = std::slice::from_raw_parts(value, value_len);
+ let value = match HeaderValue::from_bytes(value) {
+ Ok(val) => val,
+ Err(_) => return Err(hyper_code::HYPERE_INVALID_ARG),
+ };
+
+ Ok((name, value, orig_name))
+}
+
+// ===== impl OnInformational =====
+
+#[cfg(feature = "client")]
+impl crate::ext::OnInformationalCallback for OnInformational {
+ fn on_informational(&self, res: http::Response<()>) {
+ let res = res.map(|()| IncomingBody::empty());
+ let mut res = hyper_response::wrap(res);
+ (self.func)(self.data.0, &mut res);
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+
+ #[test]
+ fn test_headers_foreach_cases_preserved() {
+ let mut headers = hyper_headers::default();
+
+ let name1 = b"Set-CookiE";
+ let value1 = b"a=b";
+ hyper_headers_add(
+ &mut headers,
+ name1.as_ptr(),
+ name1.len(),
+ value1.as_ptr(),
+ value1.len(),
+ );
+
+ let name2 = b"SET-COOKIE";
+ let value2 = b"c=d";
+ hyper_headers_add(
+ &mut headers,
+ name2.as_ptr(),
+ name2.len(),
+ value2.as_ptr(),
+ value2.len(),
+ );
+
+ let mut vec = Vec::<u8>::new();
+ hyper_headers_foreach(&headers, concat, &mut vec as *mut _ as *mut c_void);
+
+ assert_eq!(vec, b"Set-CookiE: a=b\r\nSET-COOKIE: c=d\r\n");
+
+ extern "C" fn concat(
+ vec: *mut c_void,
+ name: *const u8,
+ name_len: usize,
+ value: *const u8,
+ value_len: usize,
+ ) -> c_int {
+ unsafe {
+ let vec = &mut *(vec as *mut Vec<u8>);
+ let name = std::slice::from_raw_parts(name, name_len);
+ let value = std::slice::from_raw_parts(value, value_len);
+ vec.extend(name);
+ vec.extend(b": ");
+ vec.extend(value);
+ vec.extend(b"\r\n");
+ }
+ HYPER_ITER_CONTINUE
+ }
+ }
+
+ #[cfg(all(feature = "http1", feature = "ffi"))]
+ #[test]
+ fn test_headers_foreach_order_preserved() {
+ let mut headers = hyper_headers::default();
+
+ let name1 = b"Set-CookiE";
+ let value1 = b"a=b";
+ hyper_headers_add(
+ &mut headers,
+ name1.as_ptr(),
+ name1.len(),
+ value1.as_ptr(),
+ value1.len(),
+ );
+
+ let name2 = b"Content-Encoding";
+ let value2 = b"gzip";
+ hyper_headers_add(
+ &mut headers,
+ name2.as_ptr(),
+ name2.len(),
+ value2.as_ptr(),
+ value2.len(),
+ );
+
+ let name3 = b"SET-COOKIE";
+ let value3 = b"c=d";
+ hyper_headers_add(
+ &mut headers,
+ name3.as_ptr(),
+ name3.len(),
+ value3.as_ptr(),
+ value3.len(),
+ );
+
+ let mut vec = Vec::<u8>::new();
+ hyper_headers_foreach(&headers, concat, &mut vec as *mut _ as *mut c_void);
+
+ println!("{}", std::str::from_utf8(&vec).unwrap());
+ assert_eq!(
+ vec,
+ b"Set-CookiE: a=b\r\nContent-Encoding: gzip\r\nSET-COOKIE: c=d\r\n"
+ );
+
+ extern "C" fn concat(
+ vec: *mut c_void,
+ name: *const u8,
+ name_len: usize,
+ value: *const u8,
+ value_len: usize,
+ ) -> c_int {
+ unsafe {
+ let vec = &mut *(vec as *mut Vec<u8>);
+ let name = std::slice::from_raw_parts(name, name_len);
+ let value = std::slice::from_raw_parts(value, value_len);
+ vec.extend(name);
+ vec.extend(b": ");
+ vec.extend(value);
+ vec.extend(b"\r\n");
+ }
+ HYPER_ITER_CONTINUE
+ }
+ }
+}
diff --git a/vendor/hyper/src/ffi/io.rs b/vendor/hyper/src/ffi/io.rs
new file mode 100644
index 00000000..89978b9e
--- /dev/null
+++ b/vendor/hyper/src/ffi/io.rs
@@ -0,0 +1,198 @@
+use std::ffi::c_void;
+use std::pin::Pin;
+use std::task::{Context, Poll};
+
+use super::task::hyper_context;
+use crate::ffi::size_t;
+use crate::rt::{Read, Write};
+
+/// Sentinel value to return from a read or write callback that the operation
+/// is pending.
+pub const HYPER_IO_PENDING: size_t = 0xFFFFFFFF;
+/// Sentinel value to return from a read or write callback that the operation
+/// has errored.
+pub const HYPER_IO_ERROR: size_t = 0xFFFFFFFE;
+
+type hyper_io_read_callback =
+ extern "C" fn(*mut c_void, *mut hyper_context<'_>, *mut u8, size_t) -> size_t;
+type hyper_io_write_callback =
+ extern "C" fn(*mut c_void, *mut hyper_context<'_>, *const u8, size_t) -> size_t;
+
+/// A read/write handle for a specific connection.
+///
+/// This owns a specific TCP or TLS connection for the lifetime of
+/// that connection. It contains a read and write callback, as well as a
+/// void *userdata. Typically the userdata will point to a struct
+/// containing a file descriptor and a TLS context.
+///
+/// Methods:
+///
+/// - hyper_io_new: Create a new IO type used to represent a transport.
+/// - hyper_io_set_read: Set the read function for this IO transport.
+/// - hyper_io_set_write: Set the write function for this IO transport.
+/// - hyper_io_set_userdata: Set the user data pointer for this IO to some value.
+/// - hyper_io_free: Free an IO handle.
+pub struct hyper_io {
+ read: hyper_io_read_callback,
+ write: hyper_io_write_callback,
+ userdata: *mut c_void,
+}
+
+ffi_fn! {
+ /// Create a new IO type used to represent a transport.
+ ///
+ /// The read and write functions of this transport should be set with
+ /// `hyper_io_set_read` and `hyper_io_set_write`.
+ ///
+ /// It is expected that the underlying transport is non-blocking. When
+ /// a read or write callback can't make progress because there is no
+ /// data available yet, it should use the `hyper_waker` mechanism to
+ /// arrange to be called again when data is available.
+ ///
+ /// To avoid a memory leak, the IO handle must eventually be consumed by
+ /// `hyper_io_free` or `hyper_clientconn_handshake`.
+ fn hyper_io_new() -> *mut hyper_io {
+ Box::into_raw(Box::new(hyper_io {
+ read: read_noop,
+ write: write_noop,
+ userdata: std::ptr::null_mut(),
+ }))
+ } ?= std::ptr::null_mut()
+}
+
+ffi_fn! {
+ /// Free an IO handle.
+ ///
+ /// This should only be used if the request isn't consumed by
+ /// `hyper_clientconn_handshake`.
+ fn hyper_io_free(io: *mut hyper_io) {
+ drop(non_null!(Box::from_raw(io) ?= ()));
+ }
+}
+
+ffi_fn! {
+ /// Set the user data pointer for this IO to some value.
+ ///
+ /// This value is passed as an argument to the read and write callbacks.
+ fn hyper_io_set_userdata(io: *mut hyper_io, data: *mut c_void) {
+ non_null!(&mut *io ?= ()).userdata = data;
+ }
+}
+
+ffi_fn! {
+ /// Set the read function for this IO transport.
+ ///
+ /// Data that is read from the transport should be put in the `buf` pointer,
+ /// up to `buf_len` bytes. The number of bytes read should be the return value.
+ ///
+ /// It is undefined behavior to try to access the bytes in the `buf` pointer,
+ /// unless you have already written them yourself. It is also undefined behavior
+ /// to return that more bytes have been written than actually set on the `buf`.
+ ///
+ /// If there is no data currently available, the callback should create a
+ /// `hyper_waker` from its `hyper_context` argument and register the waker
+ /// with whatever polling mechanism is used to signal when data is available
+ /// later on. The return value should be `HYPER_IO_PENDING`. See the
+ /// documentation for `hyper_waker`.
+ ///
+ /// If there is an irrecoverable error reading data, then `HYPER_IO_ERROR`
+ /// should be the return value.
+ fn hyper_io_set_read(io: *mut hyper_io, func: hyper_io_read_callback) {
+ non_null!(&mut *io ?= ()).read = func;
+ }
+}
+
+ffi_fn! {
+ /// Set the write function for this IO transport.
+ ///
+ /// Data from the `buf` pointer should be written to the transport, up to
+ /// `buf_len` bytes. The number of bytes written should be the return value.
+ ///
+ /// If there is no data currently available, the callback should create a
+ /// `hyper_waker` from its `hyper_context` argument and register the waker
+ /// with whatever polling mechanism is used to signal when data is available
+ /// later on. The return value should be `HYPER_IO_PENDING`. See the documentation
+ /// for `hyper_waker`.
+ ///
+ /// If there is an irrecoverable error reading data, then `HYPER_IO_ERROR`
+ /// should be the return value.
+ fn hyper_io_set_write(io: *mut hyper_io, func: hyper_io_write_callback) {
+ non_null!(&mut *io ?= ()).write = func;
+ }
+}
+
+/// cbindgen:ignore
+extern "C" fn read_noop(
+ _userdata: *mut c_void,
+ _: *mut hyper_context<'_>,
+ _buf: *mut u8,
+ _buf_len: size_t,
+) -> size_t {
+ 0
+}
+
+/// cbindgen:ignore
+extern "C" fn write_noop(
+ _userdata: *mut c_void,
+ _: *mut hyper_context<'_>,
+ _buf: *const u8,
+ _buf_len: size_t,
+) -> size_t {
+ 0
+}
+
+impl Read for hyper_io {
+ fn poll_read(
+ self: Pin<&mut Self>,
+ cx: &mut Context<'_>,
+ mut buf: crate::rt::ReadBufCursor<'_>,
+ ) -> Poll<std::io::Result<()>> {
+ let buf_ptr = unsafe { buf.as_mut() }.as_mut_ptr() as *mut u8;
+ let buf_len = buf.remaining();
+
+ match (self.read)(self.userdata, hyper_context::wrap(cx), buf_ptr, buf_len) {
+ HYPER_IO_PENDING => Poll::Pending,
+ HYPER_IO_ERROR => Poll::Ready(Err(std::io::Error::new(
+ std::io::ErrorKind::Other,
+ "io error",
+ ))),
+ ok => {
+ // We have to trust that the user's read callback actually
+ // filled in that many bytes... :(
+ unsafe { buf.advance(ok) };
+ Poll::Ready(Ok(()))
+ }
+ }
+ }
+}
+
+impl Write for hyper_io {
+ fn poll_write(
+ self: Pin<&mut Self>,
+ cx: &mut Context<'_>,
+ buf: &[u8],
+ ) -> Poll<std::io::Result<usize>> {
+ let buf_ptr = buf.as_ptr();
+ let buf_len = buf.len();
+
+ match (self.write)(self.userdata, hyper_context::wrap(cx), buf_ptr, buf_len) {
+ HYPER_IO_PENDING => Poll::Pending,
+ HYPER_IO_ERROR => Poll::Ready(Err(std::io::Error::new(
+ std::io::ErrorKind::Other,
+ "io error",
+ ))),
+ ok => Poll::Ready(Ok(ok)),
+ }
+ }
+
+ fn poll_flush(self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll<std::io::Result<()>> {
+ Poll::Ready(Ok(()))
+ }
+
+ fn poll_shutdown(self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll<std::io::Result<()>> {
+ Poll::Ready(Ok(()))
+ }
+}
+
+unsafe impl Send for hyper_io {}
+unsafe impl Sync for hyper_io {}
diff --git a/vendor/hyper/src/ffi/macros.rs b/vendor/hyper/src/ffi/macros.rs
new file mode 100644
index 00000000..022711ba
--- /dev/null
+++ b/vendor/hyper/src/ffi/macros.rs
@@ -0,0 +1,53 @@
+macro_rules! ffi_fn {
+ ($(#[$doc:meta])* fn $name:ident($($arg:ident: $arg_ty:ty),*) -> $ret:ty $body:block ?= $default:expr) => {
+ $(#[$doc])*
+ #[no_mangle]
+ pub extern fn $name($($arg: $arg_ty),*) -> $ret {
+ use std::panic::{self, AssertUnwindSafe};
+
+ match panic::catch_unwind(AssertUnwindSafe(move || $body)) {
+ Ok(v) => v,
+ Err(_) => {
+ $default
+ }
+ }
+ }
+ };
+
+ ($(#[$doc:meta])* fn $name:ident($($arg:ident: $arg_ty:ty),*) -> $ret:ty $body:block) => {
+ ffi_fn!($(#[$doc])* fn $name($($arg: $arg_ty),*) -> $ret $body ?= {
+ eprintln!("panic unwind caught, aborting");
+ std::process::abort()
+ });
+ };
+
+ ($(#[$doc:meta])* fn $name:ident($($arg:ident: $arg_ty:ty),*) $body:block ?= $default:expr) => {
+ ffi_fn!($(#[$doc])* fn $name($($arg: $arg_ty),*) -> () $body ?= $default);
+ };
+
+ ($(#[$doc:meta])* fn $name:ident($($arg:ident: $arg_ty:ty),*) $body:block) => {
+ ffi_fn!($(#[$doc])* fn $name($($arg: $arg_ty),*) -> () $body);
+ };
+}
+
+macro_rules! non_null {
+ ($ptr:ident, $eval:expr, $err:expr) => {{
+ debug_assert!(!$ptr.is_null(), "{:?} must not be null", stringify!($ptr));
+ if $ptr.is_null() {
+ return $err;
+ }
+ unsafe { $eval }
+ }};
+ (&*$ptr:ident ?= $err:expr) => {{
+ non_null!($ptr, &*$ptr, $err)
+ }};
+ (&mut *$ptr:ident ?= $err:expr) => {{
+ non_null!($ptr, &mut *$ptr, $err)
+ }};
+ (Box::from_raw($ptr:ident) ?= $err:expr) => {{
+ non_null!($ptr, Box::from_raw($ptr), $err)
+ }};
+ (Arc::from_raw($ptr:ident) ?= $err:expr) => {{
+ non_null!($ptr, Arc::from_raw($ptr), $err)
+ }};
+}
diff --git a/vendor/hyper/src/ffi/mod.rs b/vendor/hyper/src/ffi/mod.rs
new file mode 100644
index 00000000..cdcbc482
--- /dev/null
+++ b/vendor/hyper/src/ffi/mod.rs
@@ -0,0 +1,99 @@
+// We have a lot of c-types in here, stop warning about their names!
+#![allow(non_camel_case_types)]
+// fmt::Debug isn't helpful on FFI types
+#![allow(missing_debug_implementations)]
+// unreachable_pub warns `#[no_mangle] pub extern fn` in private mod.
+#![allow(unreachable_pub)]
+
+//! # hyper C API
+//!
+//! This part of the documentation describes the C API for hyper. That is, how
+//! to *use* the hyper library in C code. This is **not** a regular Rust
+//! module, and thus it is not accessible in Rust.
+//!
+//! ## Unstable
+//!
+//! The C API of hyper is currently **unstable**, which means it's not part of
+//! the semver contract as the rest of the Rust API is. Because of that, it's
+//! only accessible if `--cfg hyper_unstable_ffi` is passed to `rustc` when
+//! compiling. The easiest way to do that is setting the `RUSTFLAGS`
+//! environment variable.
+//!
+//! ## Building
+//!
+//! The C API is part of the Rust library, but isn't compiled by default. Using
+//! `cargo`, staring with `1.64.0`, it can be compiled with the following command:
+//!
+//! ```notrust
+//! RUSTFLAGS="--cfg hyper_unstable_ffi" cargo rustc --crate-type cdylib --features client,http1,http2,ffi
+//! ```
+
+// We may eventually allow the FFI to be enabled without `client` or `http1`,
+// that is why we don't auto enable them as `ffi = ["client", "http1"]` in
+// the `Cargo.toml`.
+//
+// But for now, give a clear message that this compile error is expected.
+#[cfg(not(all(feature = "client", feature = "http1")))]
+compile_error!("The `ffi` feature currently requires the `client` and `http1` features.");
+
+#[cfg(not(hyper_unstable_ffi))]
+compile_error!(
+ "\
+ The `ffi` feature is unstable, and requires the \
+ `RUSTFLAGS='--cfg hyper_unstable_ffi'` environment variable to be set.\
+"
+);
+
+#[macro_use]
+mod macros;
+
+mod body;
+mod client;
+mod error;
+mod http_types;
+mod io;
+mod task;
+
+pub use self::body::*;
+pub use self::client::*;
+pub use self::error::*;
+pub use self::http_types::*;
+pub use self::io::*;
+pub use self::task::*;
+
+/// Return in iter functions to continue iterating.
+pub const HYPER_ITER_CONTINUE: std::ffi::c_int = 0;
+/// Return in iter functions to stop iterating.
+#[allow(unused)]
+pub const HYPER_ITER_BREAK: std::ffi::c_int = 1;
+
+/// An HTTP Version that is unspecified.
+pub const HYPER_HTTP_VERSION_NONE: std::ffi::c_int = 0;
+/// The HTTP/1.0 version.
+pub const HYPER_HTTP_VERSION_1_0: std::ffi::c_int = 10;
+/// The HTTP/1.1 version.
+pub const HYPER_HTTP_VERSION_1_1: std::ffi::c_int = 11;
+/// The HTTP/2 version.
+pub const HYPER_HTTP_VERSION_2: std::ffi::c_int = 20;
+
+#[derive(Clone)]
+struct UserDataPointer(*mut std::ffi::c_void);
+
+// We don't actually know anything about this pointer, it's up to the user
+// to do the right thing.
+unsafe impl Send for UserDataPointer {}
+unsafe impl Sync for UserDataPointer {}
+
+/// cbindgen:ignore
+static VERSION_CSTR: &str = concat!(env!("CARGO_PKG_VERSION"), "\0");
+
+// `core::ffi::c_size_t` is a nightly-only experimental API.
+// https://github.com/rust-lang/rust/issues/88345
+type size_t = usize;
+
+ffi_fn! {
+ /// Returns a static ASCII (null terminated) string of the hyper version.
+ fn hyper_version() -> *const std::ffi::c_char {
+ VERSION_CSTR.as_ptr() as _
+ } ?= std::ptr::null()
+}
diff --git a/vendor/hyper/src/ffi/task.rs b/vendor/hyper/src/ffi/task.rs
new file mode 100644
index 00000000..5b33d42b
--- /dev/null
+++ b/vendor/hyper/src/ffi/task.rs
@@ -0,0 +1,549 @@
+use std::ffi::{c_int, c_void};
+use std::future::Future;
+use std::pin::Pin;
+use std::ptr;
+use std::sync::{
+ atomic::{AtomicBool, Ordering},
+ Arc, Mutex, Weak,
+};
+use std::task::{Context, Poll};
+
+use futures_util::stream::{FuturesUnordered, Stream};
+
+use super::error::hyper_code;
+use super::UserDataPointer;
+
+type BoxFuture<T> = Pin<Box<dyn Future<Output = T> + Send>>;
+type BoxAny = Box<dyn AsTaskType + Send + Sync>;
+
+/// Return in a poll function to indicate it was ready.
+pub const HYPER_POLL_READY: c_int = 0;
+/// Return in a poll function to indicate it is still pending.
+///
+/// The passed in `hyper_waker` should be registered to wake up the task at
+/// some later point.
+pub const HYPER_POLL_PENDING: c_int = 1;
+/// Return in a poll function indicate an error.
+pub const HYPER_POLL_ERROR: c_int = 3;
+
+/// A task executor for `hyper_task`s.
+///
+/// A task is a unit of work that may be blocked on IO, and can be polled to
+/// make progress on that work.
+///
+/// An executor can hold many tasks, included from unrelated HTTP connections.
+/// An executor is single threaded. Typically you might have one executor per
+/// thread. Or, for simplicity, you may choose one executor per connection.
+///
+/// Progress on tasks happens only when `hyper_executor_poll` is called, and only
+/// on tasks whose corresponding `hyper_waker` has been called to indicate they
+/// are ready to make progress (for instance, because the OS has indicated there
+/// is more data to read or more buffer space available to write).
+///
+/// Deadlock potential: `hyper_executor_poll` must not be called from within a task's
+/// callback. Doing so will result in a deadlock.
+///
+/// Methods:
+///
+/// - hyper_executor_new: Creates a new task executor.
+/// - hyper_executor_push: Push a task onto the executor.
+/// - hyper_executor_poll: Polls the executor, trying to make progress on any tasks that have notified that they are ready again.
+/// - hyper_executor_free: Frees an executor and any incomplete tasks still part of it.
+pub struct hyper_executor {
+ /// The executor of all task futures.
+ ///
+ /// There should never be contention on the mutex, as it is only locked
+ /// to drive the futures. However, we cannot guarantee proper usage from
+ /// `hyper_executor_poll()`, which in C could potentially be called inside
+ /// one of the stored futures. The mutex isn't re-entrant, so doing so
+ /// would result in a deadlock, but that's better than data corruption.
+ driver: Mutex<FuturesUnordered<TaskFuture>>,
+
+ /// The queue of futures that need to be pushed into the `driver`.
+ ///
+ /// This is has a separate mutex since `spawn` could be called from inside
+ /// a future, which would mean the driver's mutex is already locked.
+ spawn_queue: Mutex<Vec<TaskFuture>>,
+
+ /// This is used to track when a future calls `wake` while we are within
+ /// `hyper_executor::poll_next`.
+ is_woken: Arc<ExecWaker>,
+}
+
+#[derive(Clone)]
+pub(crate) struct WeakExec(Weak<hyper_executor>);
+
+struct ExecWaker(AtomicBool);
+
+/// An async task.
+///
+/// A task represents a chunk of work that will eventually yield exactly one
+/// `hyper_task_value`. Tasks are pushed onto an executor, and that executor is
+/// responsible for calling the necessary private functions on the task to make
+/// progress. In most cases those private functions will eventually cause read
+/// or write callbacks on a `hyper_io` object to be called.
+///
+/// Tasks are created by various functions:
+///
+/// - hyper_clientconn_handshake: Creates an HTTP client handshake task.
+/// - hyper_clientconn_send: Creates a task to send a request on the client connection.
+/// - hyper_body_data: Creates a task that will poll a response body for the next buffer of data.
+/// - hyper_body_foreach: Creates a task to execute the callback with each body chunk received.
+///
+/// Tasks then have a userdata associated with them using `hyper_task_set_userdata``. This
+/// is important, for instance, to associate a request id with a given request. When multiple
+/// tasks are running on the same executor, this allows distinguishing tasks for different
+/// requests.
+///
+/// Tasks are then pushed onto an executor, and eventually yielded from hyper_executor_poll:
+///
+/// - hyper_executor_push: Push a task onto the executor.
+/// - hyper_executor_poll: Polls the executor, trying to make progress on any tasks that have notified that they are ready again.
+///
+/// Once a task is yielded from poll, retrieve its userdata, check its type,
+/// and extract its value. This will require a case from void* to the appropriate type.
+///
+/// Methods on hyper_task:
+///
+/// - hyper_task_type: Query the return type of this task.
+/// - hyper_task_value: Takes the output value of this task.
+/// - hyper_task_set_userdata: Set a user data pointer to be associated with this task.
+/// - hyper_task_userdata: Retrieve the userdata that has been set via hyper_task_set_userdata.
+/// - hyper_task_free: Free a task.
+pub struct hyper_task {
+ future: BoxFuture<BoxAny>,
+ output: Option<BoxAny>,
+ userdata: UserDataPointer,
+}
+
+struct TaskFuture {
+ task: Option<Box<hyper_task>>,
+}
+
+/// An async context for a task that contains the related waker.
+///
+/// This is provided to `hyper_io`'s read and write callbacks. Currently
+/// its only purpose is to provide access to the waker. See `hyper_waker`.
+///
+/// Corresponding Rust type: <https://doc.rust-lang.org/std/task/struct.Context.html>
+pub struct hyper_context<'a>(Context<'a>);
+
+/// A waker that is saved and used to waken a pending task.
+///
+/// This is provided to `hyper_io`'s read and write callbacks via `hyper_context`
+/// and `hyper_context_waker`.
+///
+/// When nonblocking I/O in one of those callbacks can't make progress (returns
+/// `EAGAIN` or `EWOULDBLOCK`), the callback has to return to avoid blocking the
+/// executor. But it also has to arrange to get called in the future when more
+/// data is available. That's the role of the async context and the waker. The
+/// waker can be used to tell the executor "this task is ready to make progress."
+///
+/// The read or write callback, upon finding it can't make progress, must get a
+/// waker from the context (`hyper_context_waker`), arrange for that waker to be
+/// called in the future, and then return `HYPER_POLL_PENDING`.
+///
+/// The arrangements for the waker to be called in the future are up to the
+/// application, but usually it will involve one big `select(2)` loop that checks which
+/// FDs are ready, and a correspondence between FDs and waker objects. For each
+/// FD that is ready, the corresponding waker must be called. Then `hyper_executor_poll`
+/// must be called. That will cause the executor to attempt to make progress on each
+/// woken task.
+///
+/// Corresponding Rust type: <https://doc.rust-lang.org/std/task/struct.Waker.html>
+pub struct hyper_waker {
+ waker: std::task::Waker,
+}
+
+/// A descriptor for what type a `hyper_task` value is.
+#[repr(C)]
+pub enum hyper_task_return_type {
+ /// The value of this task is null (does not imply an error).
+ HYPER_TASK_EMPTY,
+ /// The value of this task is `hyper_error *`.
+ HYPER_TASK_ERROR,
+ /// The value of this task is `hyper_clientconn *`.
+ HYPER_TASK_CLIENTCONN,
+ /// The value of this task is `hyper_response *`.
+ HYPER_TASK_RESPONSE,
+ /// The value of this task is `hyper_buf *`.
+ HYPER_TASK_BUF,
+}
+
+pub(crate) unsafe trait AsTaskType {
+ fn as_task_type(&self) -> hyper_task_return_type;
+}
+
+pub(crate) trait IntoDynTaskType {
+ fn into_dyn_task_type(self) -> BoxAny;
+}
+
+// ===== impl hyper_executor =====
+
+impl hyper_executor {
+ fn new() -> Arc<hyper_executor> {
+ Arc::new(hyper_executor {
+ driver: Mutex::new(FuturesUnordered::new()),
+ spawn_queue: Mutex::new(Vec::new()),
+ is_woken: Arc::new(ExecWaker(AtomicBool::new(false))),
+ })
+ }
+
+ pub(crate) fn downgrade(exec: &Arc<hyper_executor>) -> WeakExec {
+ WeakExec(Arc::downgrade(exec))
+ }
+
+ fn spawn(&self, task: Box<hyper_task>) {
+ self.spawn_queue
+ .lock()
+ .unwrap()
+ .push(TaskFuture { task: Some(task) });
+ }
+
+ fn poll_next(&self) -> Option<Box<hyper_task>> {
+ // Drain the queue first.
+ self.drain_queue();
+
+ let waker = futures_util::task::waker_ref(&self.is_woken);
+ let mut cx = Context::from_waker(&waker);
+
+ loop {
+ {
+ // Scope the lock on the driver to ensure it is dropped before
+ // calling drain_queue below.
+ let mut driver = self.driver.lock().unwrap();
+ match Pin::new(&mut *driver).poll_next(&mut cx) {
+ Poll::Ready(val) => return val,
+ Poll::Pending => {}
+ };
+ }
+
+ // poll_next returned Pending.
+ // Check if any of the pending tasks tried to spawn
+ // some new tasks. If so, drain into the driver and loop.
+ if self.drain_queue() {
+ continue;
+ }
+
+ // If the driver called `wake` while we were polling,
+ // we should poll again immediately!
+ if self.is_woken.0.swap(false, Ordering::SeqCst) {
+ continue;
+ }
+
+ return None;
+ }
+ }
+
+ /// drain_queue locks both self.spawn_queue and self.driver, so it requires
+ /// that neither of them be locked already.
+ fn drain_queue(&self) -> bool {
+ let mut queue = self.spawn_queue.lock().unwrap();
+ if queue.is_empty() {
+ return false;
+ }
+
+ let driver = self.driver.lock().unwrap();
+
+ for task in queue.drain(..) {
+ driver.push(task);
+ }
+
+ true
+ }
+}
+
+impl futures_util::task::ArcWake for ExecWaker {
+ fn wake_by_ref(me: &Arc<ExecWaker>) {
+ me.0.store(true, Ordering::SeqCst);
+ }
+}
+
+// ===== impl WeakExec =====
+
+impl WeakExec {
+ pub(crate) fn new() -> Self {
+ WeakExec(Weak::new())
+ }
+}
+
+impl<F> crate::rt::Executor<F> for WeakExec
+where
+ F: Future + Send + 'static,
+ F::Output: Send + Sync + AsTaskType,
+{
+ fn execute(&self, fut: F) {
+ if let Some(exec) = self.0.upgrade() {
+ exec.spawn(hyper_task::boxed(fut));
+ }
+ }
+}
+
+ffi_fn! {
+ /// Creates a new task executor.
+ ///
+ /// To avoid a memory leak, the executor must eventually be consumed by
+ /// `hyper_executor_free`.
+ fn hyper_executor_new() -> *const hyper_executor {
+ Arc::into_raw(hyper_executor::new())
+ } ?= ptr::null()
+}
+
+ffi_fn! {
+ /// Frees an executor and any incomplete tasks still part of it.
+ ///
+ /// This should be used for any executor once it is no longer needed.
+ fn hyper_executor_free(exec: *const hyper_executor) {
+ drop(non_null!(Arc::from_raw(exec) ?= ()));
+ }
+}
+
+ffi_fn! {
+ /// Push a task onto the executor.
+ ///
+ /// The executor takes ownership of the task, which must not be accessed
+ /// again.
+ ///
+ /// Ownership of the task will eventually be returned to the user from
+ /// `hyper_executor_poll`.
+ ///
+ /// To distinguish multiple tasks running on the same executor, use
+ /// hyper_task_set_userdata.
+ fn hyper_executor_push(exec: *const hyper_executor, task: *mut hyper_task) -> hyper_code {
+ let exec = non_null!(&*exec ?= hyper_code::HYPERE_INVALID_ARG);
+ let task = non_null!(Box::from_raw(task) ?= hyper_code::HYPERE_INVALID_ARG);
+ exec.spawn(task);
+ hyper_code::HYPERE_OK
+ }
+}
+
+ffi_fn! {
+ /// Polls the executor, trying to make progress on any tasks that can do so.
+ ///
+ /// If any task from the executor is ready, returns one of them. The way
+ /// tasks signal being finished is internal to Hyper. The order in which tasks
+ /// are returned is not guaranteed. Use userdata to distinguish between tasks.
+ ///
+ /// To avoid a memory leak, the task must eventually be consumed by
+ /// `hyper_task_free`.
+ ///
+ /// If there are no ready tasks, this returns `NULL`.
+ fn hyper_executor_poll(exec: *const hyper_executor) -> *mut hyper_task {
+ let exec = non_null!(&*exec ?= ptr::null_mut());
+ match exec.poll_next() {
+ Some(task) => Box::into_raw(task),
+ None => ptr::null_mut(),
+ }
+ } ?= ptr::null_mut()
+}
+
+// ===== impl hyper_task =====
+
+impl hyper_task {
+ pub(crate) fn boxed<F>(fut: F) -> Box<hyper_task>
+ where
+ F: Future + Send + 'static,
+ F::Output: IntoDynTaskType + Send + Sync + 'static,
+ {
+ Box::new(hyper_task {
+ future: Box::pin(async move { fut.await.into_dyn_task_type() }),
+ output: None,
+ userdata: UserDataPointer(ptr::null_mut()),
+ })
+ }
+
+ fn output_type(&self) -> hyper_task_return_type {
+ match self.output {
+ None => hyper_task_return_type::HYPER_TASK_EMPTY,
+ Some(ref val) => val.as_task_type(),
+ }
+ }
+}
+
+impl Future for TaskFuture {
+ type Output = Box<hyper_task>;
+
+ fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
+ match Pin::new(&mut self.task.as_mut().unwrap().future).poll(cx) {
+ Poll::Ready(val) => {
+ let mut task = self.task.take().unwrap();
+ task.output = Some(val);
+ Poll::Ready(task)
+ }
+ Poll::Pending => Poll::Pending,
+ }
+ }
+}
+
+ffi_fn! {
+ /// Free a task.
+ ///
+ /// This should only be used if the task isn't consumed by
+ /// `hyper_clientconn_handshake` or taken ownership of by
+ /// `hyper_executor_push`.
+ fn hyper_task_free(task: *mut hyper_task) {
+ drop(non_null!(Box::from_raw(task) ?= ()));
+ }
+}
+
+ffi_fn! {
+ /// Takes the output value of this task.
+ ///
+ /// This must only be called once polling the task on an executor has finished
+ /// this task.
+ ///
+ /// Use `hyper_task_type` to determine the type of the `void *` return value.
+ ///
+ /// To avoid a memory leak, a non-empty return value must eventually be
+ /// consumed by a function appropriate for its type, one of
+ /// `hyper_error_free`, `hyper_clientconn_free`, `hyper_response_free`, or
+ /// `hyper_buf_free`.
+ fn hyper_task_value(task: *mut hyper_task) -> *mut c_void {
+ let task = non_null!(&mut *task ?= ptr::null_mut());
+
+ if let Some(val) = task.output.take() {
+ let p = Box::into_raw(val) as *mut c_void;
+ // protect from returning fake pointers to empty types
+ if p == std::ptr::NonNull::<c_void>::dangling().as_ptr() {
+ ptr::null_mut()
+ } else {
+ p
+ }
+ } else {
+ ptr::null_mut()
+ }
+ } ?= ptr::null_mut()
+}
+
+ffi_fn! {
+ /// Query the return type of this task.
+ fn hyper_task_type(task: *mut hyper_task) -> hyper_task_return_type {
+ // instead of blowing up spectacularly, just say this null task
+ // doesn't have a value to retrieve.
+ non_null!(&*task ?= hyper_task_return_type::HYPER_TASK_EMPTY).output_type()
+ }
+}
+
+ffi_fn! {
+ /// Set a user data pointer to be associated with this task.
+ ///
+ /// This value will be passed to task callbacks, and can be checked later
+ /// with `hyper_task_userdata`.
+ ///
+ /// This is useful for telling apart tasks for different requests that are
+ /// running on the same executor.
+ fn hyper_task_set_userdata(task: *mut hyper_task, userdata: *mut c_void) {
+ if task.is_null() {
+ return;
+ }
+
+ unsafe { (*task).userdata = UserDataPointer(userdata) };
+ }
+}
+
+ffi_fn! {
+ /// Retrieve the userdata that has been set via `hyper_task_set_userdata`.
+ fn hyper_task_userdata(task: *mut hyper_task) -> *mut c_void {
+ non_null!(&*task ?= ptr::null_mut()).userdata.0
+ } ?= ptr::null_mut()
+}
+
+// ===== impl AsTaskType =====
+
+unsafe impl AsTaskType for () {
+ fn as_task_type(&self) -> hyper_task_return_type {
+ hyper_task_return_type::HYPER_TASK_EMPTY
+ }
+}
+
+unsafe impl AsTaskType for crate::Error {
+ fn as_task_type(&self) -> hyper_task_return_type {
+ hyper_task_return_type::HYPER_TASK_ERROR
+ }
+}
+
+impl<T> IntoDynTaskType for T
+where
+ T: AsTaskType + Send + Sync + 'static,
+{
+ fn into_dyn_task_type(self) -> BoxAny {
+ Box::new(self)
+ }
+}
+
+impl<T> IntoDynTaskType for crate::Result<T>
+where
+ T: IntoDynTaskType + Send + Sync + 'static,
+{
+ fn into_dyn_task_type(self) -> BoxAny {
+ match self {
+ Ok(val) => val.into_dyn_task_type(),
+ Err(err) => Box::new(err),
+ }
+ }
+}
+
+impl<T> IntoDynTaskType for Option<T>
+where
+ T: IntoDynTaskType + Send + Sync + 'static,
+{
+ fn into_dyn_task_type(self) -> BoxAny {
+ match self {
+ Some(val) => val.into_dyn_task_type(),
+ None => ().into_dyn_task_type(),
+ }
+ }
+}
+
+// ===== impl hyper_context =====
+
+impl hyper_context<'_> {
+ pub(crate) fn wrap<'a, 'b>(cx: &'a mut Context<'b>) -> &'a mut hyper_context<'b> {
+ // A struct with only one field has the same layout as that field.
+ unsafe { std::mem::transmute::<&mut Context<'_>, &mut hyper_context<'_>>(cx) }
+ }
+}
+
+ffi_fn! {
+ /// Creates a waker associated with the task context.
+ ///
+ /// The waker can be used to inform the task's executor that the task is
+ /// ready to make progress (using `hyper_waker_wake``).
+ ///
+ /// Typically this only needs to be called once, but it can be called
+ /// multiple times, returning a new waker each time.
+ ///
+ /// To avoid a memory leak, the waker must eventually be consumed by
+ /// `hyper_waker_free` or `hyper_waker_wake`.
+ fn hyper_context_waker(cx: *mut hyper_context<'_>) -> *mut hyper_waker {
+ let waker = non_null!(&mut *cx ?= ptr::null_mut()).0.waker().clone();
+ Box::into_raw(Box::new(hyper_waker { waker }))
+ } ?= ptr::null_mut()
+}
+
+// ===== impl hyper_waker =====
+
+ffi_fn! {
+ /// Free a waker.
+ ///
+ /// This should only be used if the request isn't consumed by
+ /// `hyper_waker_wake`.
+ fn hyper_waker_free(waker: *mut hyper_waker) {
+ drop(non_null!(Box::from_raw(waker) ?= ()));
+ }
+}
+
+ffi_fn! {
+ /// Wake up the task associated with a waker.
+ ///
+ /// This does not do work towards associated task. Instead, it signals
+ /// to the task's executor that the task is ready to make progress. The
+ /// application is responsible for calling hyper_executor_poll, which
+ /// will in turn do work on all tasks that are ready to make progress.
+ ///
+ /// NOTE: This consumes the waker. You should not use or free the waker afterwards.
+ fn hyper_waker_wake(waker: *mut hyper_waker) {
+ let waker = non_null!(Box::from_raw(waker) ?= ());
+ waker.waker.wake();
+ }
+}
diff --git a/vendor/hyper/src/headers.rs b/vendor/hyper/src/headers.rs
new file mode 100644
index 00000000..8bebdb9b
--- /dev/null
+++ b/vendor/hyper/src/headers.rs
@@ -0,0 +1,159 @@
+#[cfg(all(feature = "client", feature = "http1"))]
+use bytes::BytesMut;
+use http::header::HeaderValue;
+#[cfg(all(feature = "http2", feature = "client"))]
+use http::Method;
+#[cfg(any(feature = "client", all(feature = "server", feature = "http2")))]
+use http::{
+ header::{ValueIter, CONTENT_LENGTH},
+ HeaderMap,
+};
+
+#[cfg(feature = "http1")]
+pub(super) fn connection_keep_alive(value: &HeaderValue) -> bool {
+ connection_has(value, "keep-alive")
+}
+
+#[cfg(feature = "http1")]
+pub(super) fn connection_close(value: &HeaderValue) -> bool {
+ connection_has(value, "close")
+}
+
+#[cfg(feature = "http1")]
+fn connection_has(value: &HeaderValue, needle: &str) -> bool {
+ if let Ok(s) = value.to_str() {
+ for val in s.split(',') {
+ if val.trim().eq_ignore_ascii_case(needle) {
+ return true;
+ }
+ }
+ }
+ false
+}
+
+#[cfg(all(feature = "http1", feature = "server"))]
+pub(super) fn content_length_parse(value: &HeaderValue) -> Option<u64> {
+ from_digits(value.as_bytes())
+}
+
+#[cfg(any(feature = "client", all(feature = "server", feature = "http2")))]
+pub(super) fn content_length_parse_all(headers: &HeaderMap) -> Option<u64> {
+ content_length_parse_all_values(headers.get_all(CONTENT_LENGTH).into_iter())
+}
+
+#[cfg(any(feature = "client", all(feature = "server", feature = "http2")))]
+pub(super) fn content_length_parse_all_values(values: ValueIter<'_, HeaderValue>) -> Option<u64> {
+ // If multiple Content-Length headers were sent, everything can still
+ // be alright if they all contain the same value, and all parse
+ // correctly. If not, then it's an error.
+
+ let mut content_length: Option<u64> = None;
+ for h in values {
+ if let Ok(line) = h.to_str() {
+ for v in line.split(',') {
+ if let Some(n) = from_digits(v.trim().as_bytes()) {
+ if content_length.is_none() {
+ content_length = Some(n)
+ } else if content_length != Some(n) {
+ return None;
+ }
+ } else {
+ return None;
+ }
+ }
+ } else {
+ return None;
+ }
+ }
+
+ content_length
+}
+
+fn from_digits(bytes: &[u8]) -> Option<u64> {
+ // cannot use FromStr for u64, since it allows a signed prefix
+ let mut result = 0u64;
+ const RADIX: u64 = 10;
+
+ if bytes.is_empty() {
+ return None;
+ }
+
+ for &b in bytes {
+ // can't use char::to_digit, since we haven't verified these bytes
+ // are utf-8.
+ match b {
+ b'0'..=b'9' => {
+ result = result.checked_mul(RADIX)?;
+ result = result.checked_add((b - b'0') as u64)?;
+ }
+ _ => {
+ // not a DIGIT, get outta here!
+ return None;
+ }
+ }
+ }
+
+ Some(result)
+}
+
+#[cfg(all(feature = "http2", feature = "client"))]
+pub(super) fn method_has_defined_payload_semantics(method: &Method) -> bool {
+ !matches!(
+ *method,
+ Method::GET | Method::HEAD | Method::DELETE | Method::CONNECT
+ )
+}
+
+#[cfg(feature = "http2")]
+pub(super) fn set_content_length_if_missing(headers: &mut HeaderMap, len: u64) {
+ headers
+ .entry(CONTENT_LENGTH)
+ .or_insert_with(|| HeaderValue::from(len));
+}
+
+#[cfg(all(feature = "client", feature = "http1"))]
+pub(super) fn transfer_encoding_is_chunked(headers: &HeaderMap) -> bool {
+ is_chunked(headers.get_all(http::header::TRANSFER_ENCODING).into_iter())
+}
+
+#[cfg(all(feature = "client", feature = "http1"))]
+pub(super) fn is_chunked(mut encodings: ValueIter<'_, HeaderValue>) -> bool {
+ // chunked must always be the last encoding, according to spec
+ if let Some(line) = encodings.next_back() {
+ return is_chunked_(line);
+ }
+
+ false
+}
+
+#[cfg(feature = "http1")]
+pub(super) fn is_chunked_(value: &HeaderValue) -> bool {
+ // chunked must always be the last encoding, according to spec
+ if let Ok(s) = value.to_str() {
+ if let Some(encoding) = s.rsplit(',').next() {
+ return encoding.trim().eq_ignore_ascii_case("chunked");
+ }
+ }
+
+ false
+}
+
+#[cfg(all(feature = "client", feature = "http1"))]
+pub(super) fn add_chunked(mut entry: http::header::OccupiedEntry<'_, HeaderValue>) {
+ const CHUNKED: &str = "chunked";
+
+ if let Some(line) = entry.iter_mut().next_back() {
+ // + 2 for ", "
+ let new_cap = line.as_bytes().len() + CHUNKED.len() + 2;
+ let mut buf = BytesMut::with_capacity(new_cap);
+ buf.extend_from_slice(line.as_bytes());
+ buf.extend_from_slice(b", ");
+ buf.extend_from_slice(CHUNKED.as_bytes());
+
+ *line = HeaderValue::from_maybe_shared(buf.freeze())
+ .expect("original header value plus ascii is valid");
+ return;
+ }
+
+ entry.insert(HeaderValue::from_static(CHUNKED));
+}
diff --git a/vendor/hyper/src/lib.rs b/vendor/hyper/src/lib.rs
new file mode 100644
index 00000000..4683fd65
--- /dev/null
+++ b/vendor/hyper/src/lib.rs
@@ -0,0 +1,139 @@
+#![deny(missing_docs)]
+#![deny(missing_debug_implementations)]
+#![cfg_attr(test, deny(rust_2018_idioms))]
+#![cfg_attr(all(test, feature = "full"), deny(unreachable_pub))]
+#![cfg_attr(all(test, feature = "full"), deny(warnings))]
+#![cfg_attr(all(test, feature = "nightly"), feature(test))]
+#![cfg_attr(docsrs, feature(doc_cfg))]
+
+//! # hyper
+//!
+//! hyper is a **fast** and **correct** HTTP implementation written in and for Rust.
+//!
+//! ## Features
+//!
+//! - HTTP/1 and HTTP/2
+//! - Asynchronous design
+//! - Leading in performance
+//! - Tested and **correct**
+//! - Extensive production use
+//! - [Client](client/index.html) and [Server](server/index.html) APIs
+//!
+//! If just starting out, **check out the [Guides](https://hyper.rs/guides/1/)
+//! first.**
+//!
+//! ## "Low-level"
+//!
+//! hyper is a lower-level HTTP library, meant to be a building block
+//! for libraries and applications.
+//!
+//! If looking for just a convenient HTTP client, consider the
+//! [reqwest](https://crates.io/crates/reqwest) crate.
+//!
+//! # Optional Features
+//!
+//! hyper uses a set of [feature flags] to reduce the amount of compiled code.
+//! It is possible to just enable certain features over others. By default,
+//! hyper does not enable any features but allows one to enable a subset for
+//! their use case. Below is a list of the available feature flags. You may
+//! also notice above each function, struct and trait there is listed one or
+//! more feature flags that are required for that item to be used.
+//!
+//! If you are new to hyper it is possible to enable the `full` feature flag
+//! which will enable all public APIs. Beware though that this will pull in
+//! many extra dependencies that you may not need.
+//!
+//! The following optional features are available:
+//!
+//! - `http1`: Enables HTTP/1 support.
+//! - `http2`: Enables HTTP/2 support.
+//! - `client`: Enables the HTTP `client`.
+//! - `server`: Enables the HTTP `server`.
+//!
+//! [feature flags]: https://doc.rust-lang.org/cargo/reference/manifest.html#the-features-section
+//!
+//! ## Unstable Features
+//!
+//! hyper includes a set of unstable optional features that can be enabled through the use of a
+//! feature flag and a [configuration flag].
+//!
+//! The following is a list of feature flags and their corresponding `RUSTFLAG`:
+//!
+//! - `ffi`: Enables C API for hyper `hyper_unstable_ffi`.
+//! - `tracing`: Enables debug logging with `hyper_unstable_tracing`.
+//!
+//! For example:
+//!
+//! ```notrust
+//! RUSTFLAGS="--cfg hyper_unstable_tracing" cargo build
+//! ```
+//!
+//! [configuration flag]: https://doc.rust-lang.org/reference/conditional-compilation.html
+//!
+//! # Stability
+//!
+//! It's worth talking a bit about the stability of hyper. hyper's API follows
+//! [SemVer](https://semver.org). Breaking changes will only be introduced in
+//! major versions, if ever. New additions to the API, such as new types,
+//! methods, or traits will only be added in minor versions.
+//!
+//! Some parts of hyper are documented as NOT being part of the stable API. The
+//! following is a brief list, you can read more about each one in the relevant
+//! part of the documentation.
+//!
+//! - Downcasting error types from `Error::source()` is not considered stable.
+//! - Private dependencies use of global variables is not considered stable.
+//! So, if a dependency uses `log` or `tracing`, hyper doesn't promise it
+//! will continue to do so.
+//! - Behavior from default options is not stable. hyper reserves the right to
+//! add new options that are enabled by default which might alter the
+//! behavior, for the purposes of protection. It is also possible to _change_
+//! what the default options are set to, also in efforts to protect the
+//! most people possible.
+#[doc(hidden)]
+pub use http;
+
+#[cfg(all(test, feature = "nightly"))]
+extern crate test;
+
+#[doc(no_inline)]
+pub use http::{header, HeaderMap, Method, Request, Response, StatusCode, Uri, Version};
+
+pub use crate::error::{Error, Result};
+
+#[macro_use]
+mod cfg;
+
+#[macro_use]
+mod trace;
+
+pub mod body;
+mod common;
+mod error;
+pub mod ext;
+#[cfg(test)]
+mod mock;
+pub mod rt;
+pub mod service;
+pub mod upgrade;
+
+#[cfg(feature = "ffi")]
+#[cfg_attr(docsrs, doc(cfg(all(feature = "ffi", hyper_unstable_ffi))))]
+pub mod ffi;
+
+cfg_proto! {
+ mod headers;
+ mod proto;
+}
+
+cfg_feature! {
+ #![feature = "client"]
+
+ pub mod client;
+}
+
+cfg_feature! {
+ #![feature = "server"]
+
+ pub mod server;
+}
diff --git a/vendor/hyper/src/mock.rs b/vendor/hyper/src/mock.rs
new file mode 100644
index 00000000..1dd57de3
--- /dev/null
+++ b/vendor/hyper/src/mock.rs
@@ -0,0 +1,235 @@
+// FIXME: re-implement tests with `async/await`
+/*
+#[cfg(feature = "runtime")]
+use std::collections::HashMap;
+use std::cmp;
+use std::io::{self, Read, Write};
+#[cfg(feature = "runtime")]
+use std::sync::{Arc, Mutex};
+
+use bytes::Buf;
+use futures::{Async, Poll};
+#[cfg(feature = "runtime")]
+use futures::Future;
+use futures::task::{self, Task};
+use tokio_io::{AsyncRead, AsyncWrite};
+
+#[cfg(feature = "runtime")]
+use crate::client::connect::{Connect, Connected, Destination};
+
+
+
+#[cfg(feature = "runtime")]
+pub struct Duplex {
+ inner: Arc<Mutex<DuplexInner>>,
+}
+
+#[cfg(feature = "runtime")]
+struct DuplexInner {
+ handle_read_task: Option<Task>,
+ read: AsyncIo<MockCursor>,
+ write: AsyncIo<MockCursor>,
+}
+
+#[cfg(feature = "runtime")]
+impl Duplex {
+ pub(crate) fn channel() -> (Duplex, DuplexHandle) {
+ let mut inner = DuplexInner {
+ handle_read_task: None,
+ read: AsyncIo::new_buf(Vec::new(), 0),
+ write: AsyncIo::new_buf(Vec::new(), std::usize::MAX),
+ };
+
+ inner.read.park_tasks(true);
+ inner.write.park_tasks(true);
+
+ let inner = Arc::new(Mutex::new(inner));
+
+ let duplex = Duplex {
+ inner: inner.clone(),
+ };
+ let handle = DuplexHandle {
+ inner: inner,
+ };
+
+ (duplex, handle)
+ }
+}
+
+#[cfg(feature = "runtime")]
+impl Read for Duplex {
+ fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
+ self.inner.lock().unwrap().read.read(buf)
+ }
+}
+
+#[cfg(feature = "runtime")]
+impl Write for Duplex {
+ fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
+ let mut inner = self.inner.lock().unwrap();
+ let ret = inner.write.write(buf);
+ if let Some(task) = inner.handle_read_task.take() {
+ trace!("waking DuplexHandle read");
+ task.notify();
+ }
+ ret
+ }
+
+ fn flush(&mut self) -> io::Result<()> {
+ self.inner.lock().unwrap().write.flush()
+ }
+}
+
+#[cfg(feature = "runtime")]
+impl AsyncRead for Duplex {
+}
+
+#[cfg(feature = "runtime")]
+impl AsyncWrite for Duplex {
+ fn shutdown(&mut self) -> Poll<(), io::Error> {
+ Ok(().into())
+ }
+
+ fn write_buf<B: Buf>(&mut self, buf: &mut B) -> Poll<usize, io::Error> {
+ let mut inner = self.inner.lock().unwrap();
+ if let Some(task) = inner.handle_read_task.take() {
+ task.notify();
+ }
+ inner.write.write_buf(buf)
+ }
+}
+
+#[cfg(feature = "runtime")]
+pub struct DuplexHandle {
+ inner: Arc<Mutex<DuplexInner>>,
+}
+
+#[cfg(feature = "runtime")]
+impl DuplexHandle {
+ pub fn read(&self, buf: &mut [u8]) -> Poll<usize, io::Error> {
+ let mut inner = self.inner.lock().unwrap();
+ assert!(buf.len() >= inner.write.inner.len());
+ if inner.write.inner.is_empty() {
+ trace!("DuplexHandle read parking");
+ inner.handle_read_task = Some(task::current());
+ return Ok(Async::NotReady);
+ }
+ inner.write.read(buf).map(Async::Ready)
+ }
+
+ pub fn write(&self, bytes: &[u8]) -> Poll<usize, io::Error> {
+ let mut inner = self.inner.lock().unwrap();
+ assert_eq!(inner.read.inner.pos, 0);
+ assert_eq!(inner.read.inner.vec.len(), 0, "write but read isn't empty");
+ inner
+ .read
+ .inner
+ .vec
+ .extend(bytes);
+ inner.read.block_in(bytes.len());
+ Ok(Async::Ready(bytes.len()))
+ }
+}
+
+#[cfg(feature = "runtime")]
+impl Drop for DuplexHandle {
+ fn drop(&mut self) {
+ trace!("mock duplex handle drop");
+ if !::std::thread::panicking() {
+ let mut inner = self.inner.lock().unwrap();
+ inner.read.close();
+ inner.write.close();
+ }
+ }
+}
+
+#[cfg(feature = "runtime")]
+type BoxedConnectFut = Box<dyn Future<Item=(Duplex, Connected), Error=io::Error> + Send>;
+
+#[cfg(feature = "runtime")]
+#[derive(Clone)]
+pub struct MockConnector {
+ mocks: Arc<Mutex<MockedConnections>>,
+}
+
+#[cfg(feature = "runtime")]
+struct MockedConnections(HashMap<String, Vec<BoxedConnectFut>>);
+
+#[cfg(feature = "runtime")]
+impl MockConnector {
+ pub fn new() -> MockConnector {
+ MockConnector {
+ mocks: Arc::new(Mutex::new(MockedConnections(HashMap::new()))),
+ }
+ }
+
+ pub fn mock(&mut self, key: &str) -> DuplexHandle {
+ use futures::future;
+ self.mock_fut(key, future::ok::<_, ()>(()))
+ }
+
+ pub fn mock_fut<F>(&mut self, key: &str, fut: F) -> DuplexHandle
+ where
+ F: Future + Send + 'static,
+ {
+ self.mock_opts(key, Connected::new(), fut)
+ }
+
+ pub fn mock_opts<F>(&mut self, key: &str, connected: Connected, fut: F) -> DuplexHandle
+ where
+ F: Future + Send + 'static,
+ {
+ let key = key.to_owned();
+
+ let (duplex, handle) = Duplex::channel();
+
+ let fut = Box::new(fut.then(move |_| {
+ trace!("MockConnector mocked fut ready");
+ Ok((duplex, connected))
+ }));
+ self.mocks.lock().unwrap().0.entry(key)
+ .or_insert(Vec::new())
+ .push(fut);
+
+ handle
+ }
+}
+
+#[cfg(feature = "runtime")]
+impl Connect for MockConnector {
+ type Transport = Duplex;
+ type Error = io::Error;
+ type Future = BoxedConnectFut;
+
+ fn connect(&self, dst: Destination) -> Self::Future {
+ trace!("mock connect: {:?}", dst);
+ let key = format!("{}://{}{}", dst.scheme(), dst.host(), if let Some(port) = dst.port() {
+ format!(":{}", port)
+ } else {
+ "".to_owned()
+ });
+ let mut mocks = self.mocks.lock().unwrap();
+ let mocks = mocks.0.get_mut(&key)
+ .expect(&format!("unknown mocks uri: {}", key));
+ assert!(!mocks.is_empty(), "no additional mocks for {}", key);
+ mocks.remove(0)
+ }
+}
+
+
+#[cfg(feature = "runtime")]
+impl Drop for MockedConnections {
+ fn drop(&mut self) {
+ if !::std::thread::panicking() {
+ for (key, mocks) in self.0.iter() {
+ assert_eq!(
+ mocks.len(),
+ 0,
+ "not all mocked connects for {:?} were used",
+ key,
+ );
+ }
+ }
+ }
+}
+*/
diff --git a/vendor/hyper/src/proto/h1/conn.rs b/vendor/hyper/src/proto/h1/conn.rs
new file mode 100644
index 00000000..bea8faa2
--- /dev/null
+++ b/vendor/hyper/src/proto/h1/conn.rs
@@ -0,0 +1,1530 @@
+use std::fmt;
+#[cfg(feature = "server")]
+use std::future::Future;
+use std::io;
+use std::marker::{PhantomData, Unpin};
+use std::pin::Pin;
+use std::task::{Context, Poll};
+#[cfg(feature = "server")]
+use std::time::{Duration, Instant};
+
+use crate::rt::{Read, Write};
+use bytes::{Buf, Bytes};
+use futures_util::ready;
+use http::header::{HeaderValue, CONNECTION, TE};
+use http::{HeaderMap, Method, Version};
+use http_body::Frame;
+use httparse::ParserConfig;
+
+use super::io::Buffered;
+use super::{Decoder, Encode, EncodedBuf, Encoder, Http1Transaction, ParseContext, Wants};
+use crate::body::DecodedLength;
+#[cfg(feature = "server")]
+use crate::common::time::Time;
+use crate::headers;
+use crate::proto::{BodyLength, MessageHead};
+#[cfg(feature = "server")]
+use crate::rt::Sleep;
+
+const H2_PREFACE: &[u8] = b"PRI * HTTP/2.0\r\n\r\nSM\r\n\r\n";
+
+/// This handles a connection, which will have been established over an
+/// `Read + Write` (like a socket), and will likely include multiple
+/// `Transaction`s over HTTP.
+///
+/// The connection will determine when a message begins and ends as well as
+/// determine if this connection can be kept alive after the message,
+/// or if it is complete.
+pub(crate) struct Conn<I, B, T> {
+ io: Buffered<I, EncodedBuf<B>>,
+ state: State,
+ _marker: PhantomData<fn(T)>,
+}
+
+impl<I, B, T> Conn<I, B, T>
+where
+ I: Read + Write + Unpin,
+ B: Buf,
+ T: Http1Transaction,
+{
+ pub(crate) fn new(io: I) -> Conn<I, B, T> {
+ Conn {
+ io: Buffered::new(io),
+ state: State {
+ allow_half_close: false,
+ cached_headers: None,
+ error: None,
+ keep_alive: KA::Busy,
+ method: None,
+ h1_parser_config: ParserConfig::default(),
+ h1_max_headers: None,
+ #[cfg(feature = "server")]
+ h1_header_read_timeout: None,
+ #[cfg(feature = "server")]
+ h1_header_read_timeout_fut: None,
+ #[cfg(feature = "server")]
+ h1_header_read_timeout_running: false,
+ #[cfg(feature = "server")]
+ date_header: true,
+ #[cfg(feature = "server")]
+ timer: Time::Empty,
+ preserve_header_case: false,
+ #[cfg(feature = "ffi")]
+ preserve_header_order: false,
+ title_case_headers: false,
+ h09_responses: false,
+ #[cfg(feature = "client")]
+ on_informational: None,
+ notify_read: false,
+ reading: Reading::Init,
+ writing: Writing::Init,
+ upgrade: None,
+ // We assume a modern world where the remote speaks HTTP/1.1.
+ // If they tell us otherwise, we'll downgrade in `read_head`.
+ version: Version::HTTP_11,
+ allow_trailer_fields: false,
+ },
+ _marker: PhantomData,
+ }
+ }
+
+ #[cfg(feature = "server")]
+ pub(crate) fn set_timer(&mut self, timer: Time) {
+ self.state.timer = timer;
+ }
+
+ #[cfg(feature = "server")]
+ pub(crate) fn set_flush_pipeline(&mut self, enabled: bool) {
+ self.io.set_flush_pipeline(enabled);
+ }
+
+ pub(crate) fn set_write_strategy_queue(&mut self) {
+ self.io.set_write_strategy_queue();
+ }
+
+ pub(crate) fn set_max_buf_size(&mut self, max: usize) {
+ self.io.set_max_buf_size(max);
+ }
+
+ #[cfg(feature = "client")]
+ pub(crate) fn set_read_buf_exact_size(&mut self, sz: usize) {
+ self.io.set_read_buf_exact_size(sz);
+ }
+
+ pub(crate) fn set_write_strategy_flatten(&mut self) {
+ self.io.set_write_strategy_flatten();
+ }
+
+ pub(crate) fn set_h1_parser_config(&mut self, parser_config: ParserConfig) {
+ self.state.h1_parser_config = parser_config;
+ }
+
+ pub(crate) fn set_title_case_headers(&mut self) {
+ self.state.title_case_headers = true;
+ }
+
+ pub(crate) fn set_preserve_header_case(&mut self) {
+ self.state.preserve_header_case = true;
+ }
+
+ #[cfg(feature = "ffi")]
+ pub(crate) fn set_preserve_header_order(&mut self) {
+ self.state.preserve_header_order = true;
+ }
+
+ #[cfg(feature = "client")]
+ pub(crate) fn set_h09_responses(&mut self) {
+ self.state.h09_responses = true;
+ }
+
+ pub(crate) fn set_http1_max_headers(&mut self, val: usize) {
+ self.state.h1_max_headers = Some(val);
+ }
+
+ #[cfg(feature = "server")]
+ pub(crate) fn set_http1_header_read_timeout(&mut self, val: Duration) {
+ self.state.h1_header_read_timeout = Some(val);
+ }
+
+ #[cfg(feature = "server")]
+ pub(crate) fn set_allow_half_close(&mut self) {
+ self.state.allow_half_close = true;
+ }
+
+ #[cfg(feature = "server")]
+ pub(crate) fn disable_date_header(&mut self) {
+ self.state.date_header = false;
+ }
+
+ pub(crate) fn into_inner(self) -> (I, Bytes) {
+ self.io.into_inner()
+ }
+
+ pub(crate) fn pending_upgrade(&mut self) -> Option<crate::upgrade::Pending> {
+ self.state.upgrade.take()
+ }
+
+ pub(crate) fn is_read_closed(&self) -> bool {
+ self.state.is_read_closed()
+ }
+
+ pub(crate) fn is_write_closed(&self) -> bool {
+ self.state.is_write_closed()
+ }
+
+ pub(crate) fn can_read_head(&self) -> bool {
+ if !matches!(self.state.reading, Reading::Init) {
+ return false;
+ }
+
+ if T::should_read_first() {
+ return true;
+ }
+
+ !matches!(self.state.writing, Writing::Init)
+ }
+
+ pub(crate) fn can_read_body(&self) -> bool {
+ matches!(
+ self.state.reading,
+ Reading::Body(..) | Reading::Continue(..)
+ )
+ }
+
+ #[cfg(feature = "server")]
+ pub(crate) fn has_initial_read_write_state(&self) -> bool {
+ matches!(self.state.reading, Reading::Init)
+ && matches!(self.state.writing, Writing::Init)
+ && self.io.read_buf().is_empty()
+ }
+
+ fn should_error_on_eof(&self) -> bool {
+ // If we're idle, it's probably just the connection closing gracefully.
+ T::should_error_on_parse_eof() && !self.state.is_idle()
+ }
+
+ fn has_h2_prefix(&self) -> bool {
+ let read_buf = self.io.read_buf();
+ read_buf.len() >= 24 && read_buf[..24] == *H2_PREFACE
+ }
+
+ pub(super) fn poll_read_head(
+ &mut self,
+ cx: &mut Context<'_>,
+ ) -> Poll<Option<crate::Result<(MessageHead<T::Incoming>, DecodedLength, Wants)>>> {
+ debug_assert!(self.can_read_head());
+ trace!("Conn::read_head");
+
+ #[cfg(feature = "server")]
+ if !self.state.h1_header_read_timeout_running {
+ if let Some(h1_header_read_timeout) = self.state.h1_header_read_timeout {
+ let deadline = Instant::now() + h1_header_read_timeout;
+ self.state.h1_header_read_timeout_running = true;
+ match self.state.h1_header_read_timeout_fut {
+ Some(ref mut h1_header_read_timeout_fut) => {
+ trace!("resetting h1 header read timeout timer");
+ self.state.timer.reset(h1_header_read_timeout_fut, deadline);
+ }
+ None => {
+ trace!("setting h1 header read timeout timer");
+ self.state.h1_header_read_timeout_fut =
+ Some(self.state.timer.sleep_until(deadline));
+ }
+ }
+ }
+ }
+
+ let msg = match self.io.parse::<T>(
+ cx,
+ ParseContext {
+ cached_headers: &mut self.state.cached_headers,
+ req_method: &mut self.state.method,
+ h1_parser_config: self.state.h1_parser_config.clone(),
+ h1_max_headers: self.state.h1_max_headers,
+ preserve_header_case: self.state.preserve_header_case,
+ #[cfg(feature = "ffi")]
+ preserve_header_order: self.state.preserve_header_order,
+ h09_responses: self.state.h09_responses,
+ #[cfg(feature = "client")]
+ on_informational: &mut self.state.on_informational,
+ },
+ ) {
+ Poll::Ready(Ok(msg)) => msg,
+ Poll::Ready(Err(e)) => return self.on_read_head_error(e),
+ Poll::Pending => {
+ #[cfg(feature = "server")]
+ if self.state.h1_header_read_timeout_running {
+ if let Some(ref mut h1_header_read_timeout_fut) =
+ self.state.h1_header_read_timeout_fut
+ {
+ if Pin::new(h1_header_read_timeout_fut).poll(cx).is_ready() {
+ self.state.h1_header_read_timeout_running = false;
+
+ warn!("read header from client timeout");
+ return Poll::Ready(Some(Err(crate::Error::new_header_timeout())));
+ }
+ }
+ }
+
+ return Poll::Pending;
+ }
+ };
+
+ #[cfg(feature = "server")]
+ {
+ self.state.h1_header_read_timeout_running = false;
+ self.state.h1_header_read_timeout_fut = None;
+ }
+
+ // Note: don't deconstruct `msg` into local variables, it appears
+ // the optimizer doesn't remove the extra copies.
+
+ debug!("incoming body is {}", msg.decode);
+
+ // Prevent accepting HTTP/0.9 responses after the initial one, if any.
+ self.state.h09_responses = false;
+
+ // Drop any OnInformational callbacks, we're done there!
+ #[cfg(feature = "client")]
+ {
+ self.state.on_informational = None;
+ }
+
+ self.state.busy();
+ self.state.keep_alive &= msg.keep_alive;
+ self.state.version = msg.head.version;
+
+ let mut wants = if msg.wants_upgrade {
+ Wants::UPGRADE
+ } else {
+ Wants::EMPTY
+ };
+
+ if msg.decode == DecodedLength::ZERO {
+ if msg.expect_continue {
+ debug!("ignoring expect-continue since body is empty");
+ }
+ self.state.reading = Reading::KeepAlive;
+ if !T::should_read_first() {
+ self.try_keep_alive(cx);
+ }
+ } else if msg.expect_continue && msg.head.version.gt(&Version::HTTP_10) {
+ let h1_max_header_size = None; // TODO: remove this when we land h1_max_header_size support
+ self.state.reading = Reading::Continue(Decoder::new(
+ msg.decode,
+ self.state.h1_max_headers,
+ h1_max_header_size,
+ ));
+ wants = wants.add(Wants::EXPECT);
+ } else {
+ let h1_max_header_size = None; // TODO: remove this when we land h1_max_header_size support
+ self.state.reading = Reading::Body(Decoder::new(
+ msg.decode,
+ self.state.h1_max_headers,
+ h1_max_header_size,
+ ));
+ }
+
+ self.state.allow_trailer_fields = msg
+ .head
+ .headers
+ .get(TE)
+ .map_or(false, |te_header| te_header == "trailers");
+
+ Poll::Ready(Some(Ok((msg.head, msg.decode, wants))))
+ }
+
+ fn on_read_head_error<Z>(&mut self, e: crate::Error) -> Poll<Option<crate::Result<Z>>> {
+ // If we are currently waiting on a message, then an empty
+ // message should be reported as an error. If not, it is just
+ // the connection closing gracefully.
+ let must_error = self.should_error_on_eof();
+ self.close_read();
+ self.io.consume_leading_lines();
+ let was_mid_parse = e.is_parse() || !self.io.read_buf().is_empty();
+ if was_mid_parse || must_error {
+ // We check if the buf contains the h2 Preface
+ debug!(
+ "parse error ({}) with {} bytes",
+ e,
+ self.io.read_buf().len()
+ );
+ match self.on_parse_error(e) {
+ Ok(()) => Poll::Pending, // XXX: wat?
+ Err(e) => Poll::Ready(Some(Err(e))),
+ }
+ } else {
+ debug!("read eof");
+ self.close_write();
+ Poll::Ready(None)
+ }
+ }
+
+ pub(crate) fn poll_read_body(
+ &mut self,
+ cx: &mut Context<'_>,
+ ) -> Poll<Option<io::Result<Frame<Bytes>>>> {
+ debug_assert!(self.can_read_body());
+
+ let (reading, ret) = match self.state.reading {
+ Reading::Body(ref mut decoder) => {
+ match ready!(decoder.decode(cx, &mut self.io)) {
+ Ok(frame) => {
+ if frame.is_data() {
+ let slice = frame.data_ref().unwrap_or_else(|| unreachable!());
+ let (reading, maybe_frame) = if decoder.is_eof() {
+ debug!("incoming body completed");
+ (
+ Reading::KeepAlive,
+ if !slice.is_empty() {
+ Some(Ok(frame))
+ } else {
+ None
+ },
+ )
+ } else if slice.is_empty() {
+ error!("incoming body unexpectedly ended");
+ // This should be unreachable, since all 3 decoders
+ // either set eof=true or return an Err when reading
+ // an empty slice...
+ (Reading::Closed, None)
+ } else {
+ return Poll::Ready(Some(Ok(frame)));
+ };
+ (reading, Poll::Ready(maybe_frame))
+ } else if frame.is_trailers() {
+ (Reading::Closed, Poll::Ready(Some(Ok(frame))))
+ } else {
+ trace!("discarding unknown frame");
+ (Reading::Closed, Poll::Ready(None))
+ }
+ }
+ Err(e) => {
+ debug!("incoming body decode error: {}", e);
+ (Reading::Closed, Poll::Ready(Some(Err(e))))
+ }
+ }
+ }
+ Reading::Continue(ref decoder) => {
+ // Write the 100 Continue if not already responded...
+ if let Writing::Init = self.state.writing {
+ trace!("automatically sending 100 Continue");
+ let cont = b"HTTP/1.1 100 Continue\r\n\r\n";
+ self.io.headers_buf().extend_from_slice(cont);
+ }
+
+ // And now recurse once in the Reading::Body state...
+ self.state.reading = Reading::Body(decoder.clone());
+ return self.poll_read_body(cx);
+ }
+ _ => unreachable!("poll_read_body invalid state: {:?}", self.state.reading),
+ };
+
+ self.state.reading = reading;
+ self.try_keep_alive(cx);
+ ret
+ }
+
+ pub(crate) fn wants_read_again(&mut self) -> bool {
+ let ret = self.state.notify_read;
+ self.state.notify_read = false;
+ ret
+ }
+
+ pub(crate) fn poll_read_keep_alive(&mut self, cx: &mut Context<'_>) -> Poll<crate::Result<()>> {
+ debug_assert!(!self.can_read_head() && !self.can_read_body());
+
+ if self.is_read_closed() {
+ Poll::Pending
+ } else if self.is_mid_message() {
+ self.mid_message_detect_eof(cx)
+ } else {
+ self.require_empty_read(cx)
+ }
+ }
+
+ fn is_mid_message(&self) -> bool {
+ !matches!(
+ (&self.state.reading, &self.state.writing),
+ (&Reading::Init, &Writing::Init)
+ )
+ }
+
+ // This will check to make sure the io object read is empty.
+ //
+ // This should only be called for Clients wanting to enter the idle
+ // state.
+ fn require_empty_read(&mut self, cx: &mut Context<'_>) -> Poll<crate::Result<()>> {
+ debug_assert!(!self.can_read_head() && !self.can_read_body() && !self.is_read_closed());
+ debug_assert!(!self.is_mid_message());
+ debug_assert!(T::is_client());
+
+ if !self.io.read_buf().is_empty() {
+ debug!("received an unexpected {} bytes", self.io.read_buf().len());
+ return Poll::Ready(Err(crate::Error::new_unexpected_message()));
+ }
+
+ let num_read = ready!(self.force_io_read(cx)).map_err(crate::Error::new_io)?;
+
+ if num_read == 0 {
+ let ret = if self.should_error_on_eof() {
+ trace!("found unexpected EOF on busy connection: {:?}", self.state);
+ Poll::Ready(Err(crate::Error::new_incomplete()))
+ } else {
+ trace!("found EOF on idle connection, closing");
+ Poll::Ready(Ok(()))
+ };
+
+ // order is important: should_error needs state BEFORE close_read
+ self.state.close_read();
+ return ret;
+ }
+
+ debug!(
+ "received unexpected {} bytes on an idle connection",
+ num_read
+ );
+ Poll::Ready(Err(crate::Error::new_unexpected_message()))
+ }
+
+ fn mid_message_detect_eof(&mut self, cx: &mut Context<'_>) -> Poll<crate::Result<()>> {
+ debug_assert!(!self.can_read_head() && !self.can_read_body() && !self.is_read_closed());
+ debug_assert!(self.is_mid_message());
+
+ if self.state.allow_half_close || !self.io.read_buf().is_empty() {
+ return Poll::Pending;
+ }
+
+ let num_read = ready!(self.force_io_read(cx)).map_err(crate::Error::new_io)?;
+
+ if num_read == 0 {
+ trace!("found unexpected EOF on busy connection: {:?}", self.state);
+ self.state.close_read();
+ Poll::Ready(Err(crate::Error::new_incomplete()))
+ } else {
+ Poll::Ready(Ok(()))
+ }
+ }
+
+ fn force_io_read(&mut self, cx: &mut Context<'_>) -> Poll<io::Result<usize>> {
+ debug_assert!(!self.state.is_read_closed());
+
+ let result = ready!(self.io.poll_read_from_io(cx));
+ Poll::Ready(result.map_err(|e| {
+ trace!(error = %e, "force_io_read; io error");
+ self.state.close();
+ e
+ }))
+ }
+
+ fn maybe_notify(&mut self, cx: &mut Context<'_>) {
+ // its possible that we returned NotReady from poll() without having
+ // exhausted the underlying Io. We would have done this when we
+ // determined we couldn't keep reading until we knew how writing
+ // would finish.
+
+ match self.state.reading {
+ Reading::Continue(..) | Reading::Body(..) | Reading::KeepAlive | Reading::Closed => {
+ return
+ }
+ Reading::Init => (),
+ };
+
+ match self.state.writing {
+ Writing::Body(..) => return,
+ Writing::Init | Writing::KeepAlive | Writing::Closed => (),
+ }
+
+ if !self.io.is_read_blocked() {
+ if self.io.read_buf().is_empty() {
+ match self.io.poll_read_from_io(cx) {
+ Poll::Ready(Ok(n)) => {
+ if n == 0 {
+ trace!("maybe_notify; read eof");
+ if self.state.is_idle() {
+ self.state.close();
+ } else {
+ self.close_read()
+ }
+ return;
+ }
+ }
+ Poll::Pending => {
+ trace!("maybe_notify; read_from_io blocked");
+ return;
+ }
+ Poll::Ready(Err(e)) => {
+ trace!("maybe_notify; read_from_io error: {}", e);
+ self.state.close();
+ self.state.error = Some(crate::Error::new_io(e));
+ }
+ }
+ }
+ self.state.notify_read = true;
+ }
+ }
+
+ fn try_keep_alive(&mut self, cx: &mut Context<'_>) {
+ self.state.try_keep_alive::<T>();
+ self.maybe_notify(cx);
+ }
+
+ pub(crate) fn can_write_head(&self) -> bool {
+ if !T::should_read_first() && matches!(self.state.reading, Reading::Closed) {
+ return false;
+ }
+
+ match self.state.writing {
+ Writing::Init => self.io.can_headers_buf(),
+ _ => false,
+ }
+ }
+
+ pub(crate) fn can_write_body(&self) -> bool {
+ match self.state.writing {
+ Writing::Body(..) => true,
+ Writing::Init | Writing::KeepAlive | Writing::Closed => false,
+ }
+ }
+
+ pub(crate) fn can_buffer_body(&self) -> bool {
+ self.io.can_buffer()
+ }
+
+ pub(crate) fn write_head(&mut self, head: MessageHead<T::Outgoing>, body: Option<BodyLength>) {
+ if let Some(encoder) = self.encode_head(head, body) {
+ self.state.writing = if !encoder.is_eof() {
+ Writing::Body(encoder)
+ } else if encoder.is_last() {
+ Writing::Closed
+ } else {
+ Writing::KeepAlive
+ };
+ }
+ }
+
+ fn encode_head(
+ &mut self,
+ mut head: MessageHead<T::Outgoing>,
+ body: Option<BodyLength>,
+ ) -> Option<Encoder> {
+ debug_assert!(self.can_write_head());
+
+ if !T::should_read_first() {
+ self.state.busy();
+ }
+
+ self.enforce_version(&mut head);
+
+ let buf = self.io.headers_buf();
+ match super::role::encode_headers::<T>(
+ Encode {
+ head: &mut head,
+ body,
+ #[cfg(feature = "server")]
+ keep_alive: self.state.wants_keep_alive(),
+ req_method: &mut self.state.method,
+ title_case_headers: self.state.title_case_headers,
+ #[cfg(feature = "server")]
+ date_header: self.state.date_header,
+ },
+ buf,
+ ) {
+ Ok(encoder) => {
+ debug_assert!(self.state.cached_headers.is_none());
+ debug_assert!(head.headers.is_empty());
+ self.state.cached_headers = Some(head.headers);
+
+ #[cfg(feature = "client")]
+ {
+ self.state.on_informational =
+ head.extensions.remove::<crate::ext::OnInformational>();
+ }
+
+ Some(encoder)
+ }
+ Err(err) => {
+ self.state.error = Some(err);
+ self.state.writing = Writing::Closed;
+ None
+ }
+ }
+ }
+
+ // Fix keep-alive when Connection: keep-alive header is not present
+ fn fix_keep_alive(&mut self, head: &mut MessageHead<T::Outgoing>) {
+ let outgoing_is_keep_alive = head
+ .headers
+ .get(CONNECTION)
+ .map_or(false, headers::connection_keep_alive);
+
+ if !outgoing_is_keep_alive {
+ match head.version {
+ // If response is version 1.0 and keep-alive is not present in the response,
+ // disable keep-alive so the server closes the connection
+ Version::HTTP_10 => self.state.disable_keep_alive(),
+ // If response is version 1.1 and keep-alive is wanted, add
+ // Connection: keep-alive header when not present
+ Version::HTTP_11 => {
+ if self.state.wants_keep_alive() {
+ head.headers
+ .insert(CONNECTION, HeaderValue::from_static("keep-alive"));
+ }
+ }
+ _ => (),
+ }
+ }
+ }
+
+ // If we know the remote speaks an older version, we try to fix up any messages
+ // to work with our older peer.
+ fn enforce_version(&mut self, head: &mut MessageHead<T::Outgoing>) {
+ match self.state.version {
+ Version::HTTP_10 => {
+ // Fixes response or connection when keep-alive header is not present
+ self.fix_keep_alive(head);
+ // If the remote only knows HTTP/1.0, we should force ourselves
+ // to do only speak HTTP/1.0 as well.
+ head.version = Version::HTTP_10;
+ }
+ Version::HTTP_11 => {
+ if let KA::Disabled = self.state.keep_alive.status() {
+ head.headers
+ .insert(CONNECTION, HeaderValue::from_static("close"));
+ }
+ }
+ _ => (),
+ }
+ // If the remote speaks HTTP/1.1, then it *should* be fine with
+ // both HTTP/1.0 and HTTP/1.1 from us. So again, we just let
+ // the user's headers be.
+ }
+
+ pub(crate) fn write_body(&mut self, chunk: B) {
+ debug_assert!(self.can_write_body() && self.can_buffer_body());
+ // empty chunks should be discarded at Dispatcher level
+ debug_assert!(chunk.remaining() != 0);
+
+ let state = match self.state.writing {
+ Writing::Body(ref mut encoder) => {
+ self.io.buffer(encoder.encode(chunk));
+
+ if !encoder.is_eof() {
+ return;
+ }
+
+ if encoder.is_last() {
+ Writing::Closed
+ } else {
+ Writing::KeepAlive
+ }
+ }
+ _ => unreachable!("write_body invalid state: {:?}", self.state.writing),
+ };
+
+ self.state.writing = state;
+ }
+
+ pub(crate) fn write_trailers(&mut self, trailers: HeaderMap) {
+ if T::is_server() && !self.state.allow_trailer_fields {
+ debug!("trailers not allowed to be sent");
+ return;
+ }
+ debug_assert!(self.can_write_body() && self.can_buffer_body());
+
+ match self.state.writing {
+ Writing::Body(ref encoder) => {
+ if let Some(enc_buf) =
+ encoder.encode_trailers(trailers, self.state.title_case_headers)
+ {
+ self.io.buffer(enc_buf);
+
+ self.state.writing = if encoder.is_last() || encoder.is_close_delimited() {
+ Writing::Closed
+ } else {
+ Writing::KeepAlive
+ };
+ }
+ }
+ _ => unreachable!("write_trailers invalid state: {:?}", self.state.writing),
+ }
+ }
+
+ pub(crate) fn write_body_and_end(&mut self, chunk: B) {
+ debug_assert!(self.can_write_body() && self.can_buffer_body());
+ // empty chunks should be discarded at Dispatcher level
+ debug_assert!(chunk.remaining() != 0);
+
+ let state = match self.state.writing {
+ Writing::Body(ref encoder) => {
+ let can_keep_alive = encoder.encode_and_end(chunk, self.io.write_buf());
+ if can_keep_alive {
+ Writing::KeepAlive
+ } else {
+ Writing::Closed
+ }
+ }
+ _ => unreachable!("write_body invalid state: {:?}", self.state.writing),
+ };
+
+ self.state.writing = state;
+ }
+
+ pub(crate) fn end_body(&mut self) -> crate::Result<()> {
+ debug_assert!(self.can_write_body());
+
+ let encoder = match self.state.writing {
+ Writing::Body(ref mut enc) => enc,
+ _ => return Ok(()),
+ };
+
+ // end of stream, that means we should try to eof
+ match encoder.end() {
+ Ok(end) => {
+ if let Some(end) = end {
+ self.io.buffer(end);
+ }
+
+ self.state.writing = if encoder.is_last() || encoder.is_close_delimited() {
+ Writing::Closed
+ } else {
+ Writing::KeepAlive
+ };
+
+ Ok(())
+ }
+ Err(not_eof) => {
+ self.state.writing = Writing::Closed;
+ Err(crate::Error::new_body_write_aborted().with(not_eof))
+ }
+ }
+ }
+
+ // When we get a parse error, depending on what side we are, we might be able
+ // to write a response before closing the connection.
+ //
+ // - Client: there is nothing we can do
+ // - Server: if Response hasn't been written yet, we can send a 4xx response
+ fn on_parse_error(&mut self, err: crate::Error) -> crate::Result<()> {
+ if let Writing::Init = self.state.writing {
+ if self.has_h2_prefix() {
+ return Err(crate::Error::new_version_h2());
+ }
+ if let Some(msg) = T::on_error(&err) {
+ // Drop the cached headers so as to not trigger a debug
+ // assert in `write_head`...
+ self.state.cached_headers.take();
+ self.write_head(msg, None);
+ self.state.error = Some(err);
+ return Ok(());
+ }
+ }
+
+ // fallback is pass the error back up
+ Err(err)
+ }
+
+ pub(crate) fn poll_flush(&mut self, cx: &mut Context<'_>) -> Poll<io::Result<()>> {
+ ready!(Pin::new(&mut self.io).poll_flush(cx))?;
+ self.try_keep_alive(cx);
+ trace!("flushed({}): {:?}", T::LOG, self.state);
+ Poll::Ready(Ok(()))
+ }
+
+ pub(crate) fn poll_shutdown(&mut self, cx: &mut Context<'_>) -> Poll<io::Result<()>> {
+ match ready!(Pin::new(self.io.io_mut()).poll_shutdown(cx)) {
+ Ok(()) => {
+ trace!("shut down IO complete");
+ Poll::Ready(Ok(()))
+ }
+ Err(e) => {
+ debug!("error shutting down IO: {}", e);
+ Poll::Ready(Err(e))
+ }
+ }
+ }
+
+ /// If the read side can be cheaply drained, do so. Otherwise, close.
+ pub(super) fn poll_drain_or_close_read(&mut self, cx: &mut Context<'_>) {
+ if let Reading::Continue(ref decoder) = self.state.reading {
+ // skip sending the 100-continue
+ // just move forward to a read, in case a tiny body was included
+ self.state.reading = Reading::Body(decoder.clone());
+ }
+
+ let _ = self.poll_read_body(cx);
+
+ // If still in Reading::Body, just give up
+ match self.state.reading {
+ Reading::Init | Reading::KeepAlive => {
+ trace!("body drained")
+ }
+ _ => self.close_read(),
+ }
+ }
+
+ pub(crate) fn close_read(&mut self) {
+ self.state.close_read();
+ }
+
+ pub(crate) fn close_write(&mut self) {
+ self.state.close_write();
+ }
+
+ #[cfg(feature = "server")]
+ pub(crate) fn disable_keep_alive(&mut self) {
+ if self.state.is_idle() {
+ trace!("disable_keep_alive; closing idle connection");
+ self.state.close();
+ } else {
+ trace!("disable_keep_alive; in-progress connection");
+ self.state.disable_keep_alive();
+ }
+ }
+
+ pub(crate) fn take_error(&mut self) -> crate::Result<()> {
+ if let Some(err) = self.state.error.take() {
+ Err(err)
+ } else {
+ Ok(())
+ }
+ }
+
+ pub(super) fn on_upgrade(&mut self) -> crate::upgrade::OnUpgrade {
+ trace!("{}: prepare possible HTTP upgrade", T::LOG);
+ self.state.prepare_upgrade()
+ }
+}
+
+impl<I, B: Buf, T> fmt::Debug for Conn<I, B, T> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_struct("Conn")
+ .field("state", &self.state)
+ .field("io", &self.io)
+ .finish()
+ }
+}
+
+// B and T are never pinned
+impl<I: Unpin, B, T> Unpin for Conn<I, B, T> {}
+
+struct State {
+ allow_half_close: bool,
+ /// Re-usable HeaderMap to reduce allocating new ones.
+ cached_headers: Option<HeaderMap>,
+ /// If an error occurs when there wasn't a direct way to return it
+ /// back to the user, this is set.
+ error: Option<crate::Error>,
+ /// Current keep-alive status.
+ keep_alive: KA,
+ /// If mid-message, the HTTP Method that started it.
+ ///
+ /// This is used to know things such as if the message can include
+ /// a body or not.
+ method: Option<Method>,
+ h1_parser_config: ParserConfig,
+ h1_max_headers: Option<usize>,
+ #[cfg(feature = "server")]
+ h1_header_read_timeout: Option<Duration>,
+ #[cfg(feature = "server")]
+ h1_header_read_timeout_fut: Option<Pin<Box<dyn Sleep>>>,
+ #[cfg(feature = "server")]
+ h1_header_read_timeout_running: bool,
+ #[cfg(feature = "server")]
+ date_header: bool,
+ #[cfg(feature = "server")]
+ timer: Time,
+ preserve_header_case: bool,
+ #[cfg(feature = "ffi")]
+ preserve_header_order: bool,
+ title_case_headers: bool,
+ h09_responses: bool,
+ /// If set, called with each 1xx informational response received for
+ /// the current request. MUST be unset after a non-1xx response is
+ /// received.
+ #[cfg(feature = "client")]
+ on_informational: Option<crate::ext::OnInformational>,
+ /// Set to true when the Dispatcher should poll read operations
+ /// again. See the `maybe_notify` method for more.
+ notify_read: bool,
+ /// State of allowed reads
+ reading: Reading,
+ /// State of allowed writes
+ writing: Writing,
+ /// An expected pending HTTP upgrade.
+ upgrade: Option<crate::upgrade::Pending>,
+ /// Either HTTP/1.0 or 1.1 connection
+ version: Version,
+ /// Flag to track if trailer fields are allowed to be sent
+ allow_trailer_fields: bool,
+}
+
+#[derive(Debug)]
+enum Reading {
+ Init,
+ Continue(Decoder),
+ Body(Decoder),
+ KeepAlive,
+ Closed,
+}
+
+enum Writing {
+ Init,
+ Body(Encoder),
+ KeepAlive,
+ Closed,
+}
+
+impl fmt::Debug for State {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ let mut builder = f.debug_struct("State");
+ builder
+ .field("reading", &self.reading)
+ .field("writing", &self.writing)
+ .field("keep_alive", &self.keep_alive);
+
+ // Only show error field if it's interesting...
+ if let Some(ref error) = self.error {
+ builder.field("error", error);
+ }
+
+ if self.allow_half_close {
+ builder.field("allow_half_close", &true);
+ }
+
+ // Purposefully leaving off other fields..
+
+ builder.finish()
+ }
+}
+
+impl fmt::Debug for Writing {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ match *self {
+ Writing::Init => f.write_str("Init"),
+ Writing::Body(ref enc) => f.debug_tuple("Body").field(enc).finish(),
+ Writing::KeepAlive => f.write_str("KeepAlive"),
+ Writing::Closed => f.write_str("Closed"),
+ }
+ }
+}
+
+impl std::ops::BitAndAssign<bool> for KA {
+ fn bitand_assign(&mut self, enabled: bool) {
+ if !enabled {
+ trace!("remote disabling keep-alive");
+ *self = KA::Disabled;
+ }
+ }
+}
+
+#[derive(Clone, Copy, Debug, Default)]
+enum KA {
+ Idle,
+ #[default]
+ Busy,
+ Disabled,
+}
+
+impl KA {
+ fn idle(&mut self) {
+ *self = KA::Idle;
+ }
+
+ fn busy(&mut self) {
+ *self = KA::Busy;
+ }
+
+ fn disable(&mut self) {
+ *self = KA::Disabled;
+ }
+
+ fn status(&self) -> KA {
+ *self
+ }
+}
+
+impl State {
+ fn close(&mut self) {
+ trace!("State::close()");
+ self.reading = Reading::Closed;
+ self.writing = Writing::Closed;
+ self.keep_alive.disable();
+ }
+
+ fn close_read(&mut self) {
+ trace!("State::close_read()");
+ self.reading = Reading::Closed;
+ self.keep_alive.disable();
+ }
+
+ fn close_write(&mut self) {
+ trace!("State::close_write()");
+ self.writing = Writing::Closed;
+ self.keep_alive.disable();
+ }
+
+ fn wants_keep_alive(&self) -> bool {
+ !matches!(self.keep_alive.status(), KA::Disabled)
+ }
+
+ fn try_keep_alive<T: Http1Transaction>(&mut self) {
+ match (&self.reading, &self.writing) {
+ (&Reading::KeepAlive, &Writing::KeepAlive) => {
+ if let KA::Busy = self.keep_alive.status() {
+ self.idle::<T>();
+ } else {
+ trace!(
+ "try_keep_alive({}): could keep-alive, but status = {:?}",
+ T::LOG,
+ self.keep_alive
+ );
+ self.close();
+ }
+ }
+ (&Reading::Closed, &Writing::KeepAlive) | (&Reading::KeepAlive, &Writing::Closed) => {
+ self.close()
+ }
+ _ => (),
+ }
+ }
+
+ fn disable_keep_alive(&mut self) {
+ self.keep_alive.disable()
+ }
+
+ fn busy(&mut self) {
+ if let KA::Disabled = self.keep_alive.status() {
+ return;
+ }
+ self.keep_alive.busy();
+ }
+
+ fn idle<T: Http1Transaction>(&mut self) {
+ debug_assert!(!self.is_idle(), "State::idle() called while idle");
+
+ self.method = None;
+ self.keep_alive.idle();
+
+ if !self.is_idle() {
+ self.close();
+ return;
+ }
+
+ self.reading = Reading::Init;
+ self.writing = Writing::Init;
+
+ // !T::should_read_first() means Client.
+ //
+ // If Client connection has just gone idle, the Dispatcher
+ // should try the poll loop one more time, so as to poll the
+ // pending requests stream.
+ if !T::should_read_first() {
+ self.notify_read = true;
+ }
+
+ #[cfg(feature = "server")]
+ if self.h1_header_read_timeout.is_some() {
+ // Next read will start and poll the header read timeout,
+ // so we can close the connection if another header isn't
+ // received in a timely manner.
+ self.notify_read = true;
+ }
+ }
+
+ fn is_idle(&self) -> bool {
+ matches!(self.keep_alive.status(), KA::Idle)
+ }
+
+ fn is_read_closed(&self) -> bool {
+ matches!(self.reading, Reading::Closed)
+ }
+
+ fn is_write_closed(&self) -> bool {
+ matches!(self.writing, Writing::Closed)
+ }
+
+ fn prepare_upgrade(&mut self) -> crate::upgrade::OnUpgrade {
+ let (tx, rx) = crate::upgrade::pending();
+ self.upgrade = Some(tx);
+ rx
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ #[cfg(all(feature = "nightly", not(miri)))]
+ #[bench]
+ fn bench_read_head_short(b: &mut ::test::Bencher) {
+ use super::*;
+ use crate::common::io::Compat;
+ let s = b"GET / HTTP/1.1\r\nHost: localhost:8080\r\n\r\n";
+ let len = s.len();
+ b.bytes = len as u64;
+
+ // an empty IO, we'll be skipping and using the read buffer anyways
+ let io = Compat(tokio_test::io::Builder::new().build());
+ let mut conn = Conn::<_, bytes::Bytes, crate::proto::h1::ServerTransaction>::new(io);
+ *conn.io.read_buf_mut() = ::bytes::BytesMut::from(&s[..]);
+ conn.state.cached_headers = Some(HeaderMap::with_capacity(2));
+
+ let rt = tokio::runtime::Builder::new_current_thread()
+ .enable_all()
+ .build()
+ .unwrap();
+
+ b.iter(|| {
+ rt.block_on(futures_util::future::poll_fn(|cx| {
+ match conn.poll_read_head(cx) {
+ Poll::Ready(Some(Ok(x))) => {
+ ::test::black_box(&x);
+ let mut headers = x.0.headers;
+ headers.clear();
+ conn.state.cached_headers = Some(headers);
+ }
+ f => panic!("expected Ready(Some(Ok(..))): {:?}", f),
+ }
+
+ conn.io.read_buf_mut().reserve(1);
+ unsafe {
+ conn.io.read_buf_mut().set_len(len);
+ }
+ conn.state.reading = Reading::Init;
+ Poll::Ready(())
+ }));
+ });
+ }
+
+ /*
+ //TODO: rewrite these using dispatch... someday...
+ use futures::{Async, Future, Stream, Sink};
+ use futures::future;
+
+ use proto::{self, ClientTransaction, MessageHead, ServerTransaction};
+ use super::super::Encoder;
+ use mock::AsyncIo;
+
+ use super::{Conn, Decoder, Reading, Writing};
+ use ::uri::Uri;
+
+ use std::str::FromStr;
+
+ #[test]
+ fn test_conn_init_read() {
+ let good_message = b"GET / HTTP/1.1\r\n\r\n".to_vec();
+ let len = good_message.len();
+ let io = AsyncIo::new_buf(good_message, len);
+ let mut conn = Conn::<_, proto::Bytes, ServerTransaction>::new(io);
+
+ match conn.poll().unwrap() {
+ Async::Ready(Some(Frame::Message { message, body: false })) => {
+ assert_eq!(message, MessageHead {
+ subject: ::proto::RequestLine(::Get, Uri::from_str("/").unwrap()),
+ .. MessageHead::default()
+ })
+ },
+ f => panic!("frame is not Frame::Message: {:?}", f)
+ }
+ }
+
+ #[test]
+ fn test_conn_parse_partial() {
+ let _: Result<(), ()> = future::lazy(|| {
+ let good_message = b"GET / HTTP/1.1\r\nHost: foo.bar\r\n\r\n".to_vec();
+ let io = AsyncIo::new_buf(good_message, 10);
+ let mut conn = Conn::<_, proto::Bytes, ServerTransaction>::new(io);
+ assert!(conn.poll().unwrap().is_not_ready());
+ conn.io.io_mut().block_in(50);
+ let async = conn.poll().unwrap();
+ assert!(async.is_ready());
+ match async {
+ Async::Ready(Some(Frame::Message { .. })) => (),
+ f => panic!("frame is not Message: {:?}", f),
+ }
+ Ok(())
+ }).wait();
+ }
+
+ #[test]
+ fn test_conn_init_read_eof_idle() {
+ let io = AsyncIo::new_buf(vec![], 1);
+ let mut conn = Conn::<_, proto::Bytes, ServerTransaction>::new(io);
+ conn.state.idle();
+
+ match conn.poll().unwrap() {
+ Async::Ready(None) => {},
+ other => panic!("frame is not None: {:?}", other)
+ }
+ }
+
+ #[test]
+ fn test_conn_init_read_eof_idle_partial_parse() {
+ let io = AsyncIo::new_buf(b"GET / HTTP/1.1".to_vec(), 100);
+ let mut conn = Conn::<_, proto::Bytes, ServerTransaction>::new(io);
+ conn.state.idle();
+
+ match conn.poll() {
+ Err(ref err) if err.kind() == std::io::ErrorKind::UnexpectedEof => {},
+ other => panic!("unexpected frame: {:?}", other)
+ }
+ }
+
+ #[test]
+ fn test_conn_init_read_eof_busy() {
+ let _: Result<(), ()> = future::lazy(|| {
+ // server ignores
+ let io = AsyncIo::new_eof();
+ let mut conn = Conn::<_, proto::Bytes, ServerTransaction>::new(io);
+ conn.state.busy();
+
+ match conn.poll().unwrap() {
+ Async::Ready(None) => {},
+ other => panic!("unexpected frame: {:?}", other)
+ }
+
+ // client
+ let io = AsyncIo::new_eof();
+ let mut conn = Conn::<_, proto::Bytes, ClientTransaction>::new(io);
+ conn.state.busy();
+
+ match conn.poll() {
+ Err(ref err) if err.kind() == std::io::ErrorKind::UnexpectedEof => {},
+ other => panic!("unexpected frame: {:?}", other)
+ }
+ Ok(())
+ }).wait();
+ }
+
+ #[test]
+ fn test_conn_body_finish_read_eof() {
+ let _: Result<(), ()> = future::lazy(|| {
+ let io = AsyncIo::new_eof();
+ let mut conn = Conn::<_, proto::Bytes, ClientTransaction>::new(io);
+ conn.state.busy();
+ conn.state.writing = Writing::KeepAlive;
+ conn.state.reading = Reading::Body(Decoder::length(0));
+
+ match conn.poll() {
+ Ok(Async::Ready(Some(Frame::Body { chunk: None }))) => (),
+ other => panic!("unexpected frame: {:?}", other)
+ }
+
+ // conn eofs, but tokio-proto will call poll() again, before calling flush()
+ // the conn eof in this case is perfectly fine
+
+ match conn.poll() {
+ Ok(Async::Ready(None)) => (),
+ other => panic!("unexpected frame: {:?}", other)
+ }
+ Ok(())
+ }).wait();
+ }
+
+ #[test]
+ fn test_conn_message_empty_body_read_eof() {
+ let _: Result<(), ()> = future::lazy(|| {
+ let io = AsyncIo::new_buf(b"HTTP/1.1 200 OK\r\nContent-Length: 0\r\n\r\n".to_vec(), 1024);
+ let mut conn = Conn::<_, proto::Bytes, ClientTransaction>::new(io);
+ conn.state.busy();
+ conn.state.writing = Writing::KeepAlive;
+
+ match conn.poll() {
+ Ok(Async::Ready(Some(Frame::Message { body: false, .. }))) => (),
+ other => panic!("unexpected frame: {:?}", other)
+ }
+
+ // conn eofs, but tokio-proto will call poll() again, before calling flush()
+ // the conn eof in this case is perfectly fine
+
+ match conn.poll() {
+ Ok(Async::Ready(None)) => (),
+ other => panic!("unexpected frame: {:?}", other)
+ }
+ Ok(())
+ }).wait();
+ }
+
+ #[test]
+ fn test_conn_read_body_end() {
+ let _: Result<(), ()> = future::lazy(|| {
+ let io = AsyncIo::new_buf(b"POST / HTTP/1.1\r\nContent-Length: 5\r\n\r\n12345".to_vec(), 1024);
+ let mut conn = Conn::<_, proto::Bytes, ServerTransaction>::new(io);
+ conn.state.busy();
+
+ match conn.poll() {
+ Ok(Async::Ready(Some(Frame::Message { body: true, .. }))) => (),
+ other => panic!("unexpected frame: {:?}", other)
+ }
+
+ match conn.poll() {
+ Ok(Async::Ready(Some(Frame::Body { chunk: Some(_) }))) => (),
+ other => panic!("unexpected frame: {:?}", other)
+ }
+
+ // When the body is done, `poll` MUST return a `Body` frame with chunk set to `None`
+ match conn.poll() {
+ Ok(Async::Ready(Some(Frame::Body { chunk: None }))) => (),
+ other => panic!("unexpected frame: {:?}", other)
+ }
+
+ match conn.poll() {
+ Ok(Async::NotReady) => (),
+ other => panic!("unexpected frame: {:?}", other)
+ }
+ Ok(())
+ }).wait();
+ }
+
+ #[test]
+ fn test_conn_closed_read() {
+ let io = AsyncIo::new_buf(vec![], 0);
+ let mut conn = Conn::<_, proto::Bytes, ServerTransaction>::new(io);
+ conn.state.close();
+
+ match conn.poll().unwrap() {
+ Async::Ready(None) => {},
+ other => panic!("frame is not None: {:?}", other)
+ }
+ }
+
+ #[test]
+ fn test_conn_body_write_length() {
+ let _ = pretty_env_logger::try_init();
+ let _: Result<(), ()> = future::lazy(|| {
+ let io = AsyncIo::new_buf(vec![], 0);
+ let mut conn = Conn::<_, proto::Bytes, ServerTransaction>::new(io);
+ let max = super::super::io::DEFAULT_MAX_BUFFER_SIZE + 4096;
+ conn.state.writing = Writing::Body(Encoder::length((max * 2) as u64));
+
+ assert!(conn.start_send(Frame::Body { chunk: Some(vec![b'a'; max].into()) }).unwrap().is_ready());
+ assert!(!conn.can_buffer_body());
+
+ assert!(conn.start_send(Frame::Body { chunk: Some(vec![b'b'; 1024 * 8].into()) }).unwrap().is_not_ready());
+
+ conn.io.io_mut().block_in(1024 * 3);
+ assert!(conn.poll_complete().unwrap().is_not_ready());
+ conn.io.io_mut().block_in(1024 * 3);
+ assert!(conn.poll_complete().unwrap().is_not_ready());
+ conn.io.io_mut().block_in(max * 2);
+ assert!(conn.poll_complete().unwrap().is_ready());
+
+ assert!(conn.start_send(Frame::Body { chunk: Some(vec![b'c'; 1024 * 8].into()) }).unwrap().is_ready());
+ Ok(())
+ }).wait();
+ }
+
+ #[test]
+ fn test_conn_body_write_chunked() {
+ let _: Result<(), ()> = future::lazy(|| {
+ let io = AsyncIo::new_buf(vec![], 4096);
+ let mut conn = Conn::<_, proto::Bytes, ServerTransaction>::new(io);
+ conn.state.writing = Writing::Body(Encoder::chunked());
+
+ assert!(conn.start_send(Frame::Body { chunk: Some("headers".into()) }).unwrap().is_ready());
+ assert!(conn.start_send(Frame::Body { chunk: Some(vec![b'x'; 8192].into()) }).unwrap().is_ready());
+ Ok(())
+ }).wait();
+ }
+
+ #[test]
+ fn test_conn_body_flush() {
+ let _: Result<(), ()> = future::lazy(|| {
+ let io = AsyncIo::new_buf(vec![], 1024 * 1024 * 5);
+ let mut conn = Conn::<_, proto::Bytes, ServerTransaction>::new(io);
+ conn.state.writing = Writing::Body(Encoder::length(1024 * 1024));
+ assert!(conn.start_send(Frame::Body { chunk: Some(vec![b'a'; 1024 * 1024].into()) }).unwrap().is_ready());
+ assert!(!conn.can_buffer_body());
+ conn.io.io_mut().block_in(1024 * 1024 * 5);
+ assert!(conn.poll_complete().unwrap().is_ready());
+ assert!(conn.can_buffer_body());
+ assert!(conn.io.io_mut().flushed());
+
+ Ok(())
+ }).wait();
+ }
+
+ #[test]
+ fn test_conn_parking() {
+ use std::sync::Arc;
+ use futures::executor::Notify;
+ use futures::executor::NotifyHandle;
+
+ struct Car {
+ permit: bool,
+ }
+ impl Notify for Car {
+ fn notify(&self, _id: usize) {
+ assert!(self.permit, "unparked without permit");
+ }
+ }
+
+ fn car(permit: bool) -> NotifyHandle {
+ Arc::new(Car {
+ permit: permit,
+ }).into()
+ }
+
+ // test that once writing is done, unparks
+ let f = future::lazy(|| {
+ let io = AsyncIo::new_buf(vec![], 4096);
+ let mut conn = Conn::<_, proto::Bytes, ServerTransaction>::new(io);
+ conn.state.reading = Reading::KeepAlive;
+ assert!(conn.poll().unwrap().is_not_ready());
+
+ conn.state.writing = Writing::KeepAlive;
+ assert!(conn.poll_complete().unwrap().is_ready());
+ Ok::<(), ()>(())
+ });
+ ::futures::executor::spawn(f).poll_future_notify(&car(true), 0).unwrap();
+
+
+ // test that flushing when not waiting on read doesn't unpark
+ let f = future::lazy(|| {
+ let io = AsyncIo::new_buf(vec![], 4096);
+ let mut conn = Conn::<_, proto::Bytes, ServerTransaction>::new(io);
+ conn.state.writing = Writing::KeepAlive;
+ assert!(conn.poll_complete().unwrap().is_ready());
+ Ok::<(), ()>(())
+ });
+ ::futures::executor::spawn(f).poll_future_notify(&car(false), 0).unwrap();
+
+
+ // test that flushing and writing isn't done doesn't unpark
+ let f = future::lazy(|| {
+ let io = AsyncIo::new_buf(vec![], 4096);
+ let mut conn = Conn::<_, proto::Bytes, ServerTransaction>::new(io);
+ conn.state.reading = Reading::KeepAlive;
+ assert!(conn.poll().unwrap().is_not_ready());
+ conn.state.writing = Writing::Body(Encoder::length(5_000));
+ assert!(conn.poll_complete().unwrap().is_ready());
+ Ok::<(), ()>(())
+ });
+ ::futures::executor::spawn(f).poll_future_notify(&car(false), 0).unwrap();
+ }
+
+ #[test]
+ fn test_conn_closed_write() {
+ let io = AsyncIo::new_buf(vec![], 0);
+ let mut conn = Conn::<_, proto::Bytes, ServerTransaction>::new(io);
+ conn.state.close();
+
+ match conn.start_send(Frame::Body { chunk: Some(b"foobar".to_vec().into()) }) {
+ Err(_e) => {},
+ other => panic!("did not return Err: {:?}", other)
+ }
+
+ assert!(conn.state.is_write_closed());
+ }
+
+ #[test]
+ fn test_conn_write_empty_chunk() {
+ let io = AsyncIo::new_buf(vec![], 0);
+ let mut conn = Conn::<_, proto::Bytes, ServerTransaction>::new(io);
+ conn.state.writing = Writing::KeepAlive;
+
+ assert!(conn.start_send(Frame::Body { chunk: None }).unwrap().is_ready());
+ assert!(conn.start_send(Frame::Body { chunk: Some(Vec::new().into()) }).unwrap().is_ready());
+ conn.start_send(Frame::Body { chunk: Some(vec![b'a'].into()) }).unwrap_err();
+ }
+ */
+}
diff --git a/vendor/hyper/src/proto/h1/decode.rs b/vendor/hyper/src/proto/h1/decode.rs
new file mode 100644
index 00000000..dd293e12
--- /dev/null
+++ b/vendor/hyper/src/proto/h1/decode.rs
@@ -0,0 +1,1236 @@
+use std::error::Error as StdError;
+use std::fmt;
+use std::io;
+use std::task::{Context, Poll};
+
+use bytes::{BufMut, Bytes, BytesMut};
+use futures_util::ready;
+use http::{HeaderMap, HeaderName, HeaderValue};
+use http_body::Frame;
+
+use super::io::MemRead;
+use super::role::DEFAULT_MAX_HEADERS;
+use super::DecodedLength;
+
+use self::Kind::{Chunked, Eof, Length};
+
+/// Maximum amount of bytes allowed in chunked extensions.
+///
+/// This limit is currentlty applied for the entire body, not per chunk.
+const CHUNKED_EXTENSIONS_LIMIT: u64 = 1024 * 16;
+
+/// Maximum number of bytes allowed for all trailer fields.
+///
+/// TODO: remove this when we land h1_max_header_size support
+const TRAILER_LIMIT: usize = 1024 * 16;
+
+/// Decoders to handle different Transfer-Encodings.
+///
+/// If a message body does not include a Transfer-Encoding, it *should*
+/// include a Content-Length header.
+#[derive(Clone, PartialEq)]
+pub(crate) struct Decoder {
+ kind: Kind,
+}
+
+#[derive(Debug, Clone, PartialEq)]
+enum Kind {
+ /// A Reader used when a Content-Length header is passed with a positive integer.
+ Length(u64),
+ /// A Reader used when Transfer-Encoding is `chunked`.
+ Chunked {
+ state: ChunkedState,
+ chunk_len: u64,
+ extensions_cnt: u64,
+ trailers_buf: Option<BytesMut>,
+ trailers_cnt: usize,
+ h1_max_headers: Option<usize>,
+ h1_max_header_size: Option<usize>,
+ },
+ /// A Reader used for responses that don't indicate a length or chunked.
+ ///
+ /// The bool tracks when EOF is seen on the transport.
+ ///
+ /// Note: This should only used for `Response`s. It is illegal for a
+ /// `Request` to be made with both `Content-Length` and
+ /// `Transfer-Encoding: chunked` missing, as explained from the spec:
+ ///
+ /// > If a Transfer-Encoding header field is present in a response and
+ /// > the chunked transfer coding is not the final encoding, the
+ /// > message body length is determined by reading the connection until
+ /// > it is closed by the server. If a Transfer-Encoding header field
+ /// > is present in a request and the chunked transfer coding is not
+ /// > the final encoding, the message body length cannot be determined
+ /// > reliably; the server MUST respond with the 400 (Bad Request)
+ /// > status code and then close the connection.
+ Eof(bool),
+}
+
+#[derive(Debug, PartialEq, Clone, Copy)]
+enum ChunkedState {
+ Start,
+ Size,
+ SizeLws,
+ Extension,
+ SizeLf,
+ Body,
+ BodyCr,
+ BodyLf,
+ Trailer,
+ TrailerLf,
+ EndCr,
+ EndLf,
+ End,
+}
+
+impl Decoder {
+ // constructors
+
+ pub(crate) fn length(x: u64) -> Decoder {
+ Decoder {
+ kind: Kind::Length(x),
+ }
+ }
+
+ pub(crate) fn chunked(
+ h1_max_headers: Option<usize>,
+ h1_max_header_size: Option<usize>,
+ ) -> Decoder {
+ Decoder {
+ kind: Kind::Chunked {
+ state: ChunkedState::new(),
+ chunk_len: 0,
+ extensions_cnt: 0,
+ trailers_buf: None,
+ trailers_cnt: 0,
+ h1_max_headers,
+ h1_max_header_size,
+ },
+ }
+ }
+
+ pub(crate) fn eof() -> Decoder {
+ Decoder {
+ kind: Kind::Eof(false),
+ }
+ }
+
+ pub(super) fn new(
+ len: DecodedLength,
+ h1_max_headers: Option<usize>,
+ h1_max_header_size: Option<usize>,
+ ) -> Self {
+ match len {
+ DecodedLength::CHUNKED => Decoder::chunked(h1_max_headers, h1_max_header_size),
+ DecodedLength::CLOSE_DELIMITED => Decoder::eof(),
+ length => Decoder::length(length.danger_len()),
+ }
+ }
+
+ // methods
+
+ pub(crate) fn is_eof(&self) -> bool {
+ matches!(
+ self.kind,
+ Length(0)
+ | Chunked {
+ state: ChunkedState::End,
+ ..
+ }
+ | Eof(true)
+ )
+ }
+
+ pub(crate) fn decode<R: MemRead>(
+ &mut self,
+ cx: &mut Context<'_>,
+ body: &mut R,
+ ) -> Poll<Result<Frame<Bytes>, io::Error>> {
+ trace!("decode; state={:?}", self.kind);
+ match self.kind {
+ Length(ref mut remaining) => {
+ if *remaining == 0 {
+ Poll::Ready(Ok(Frame::data(Bytes::new())))
+ } else {
+ let to_read = *remaining as usize;
+ let buf = ready!(body.read_mem(cx, to_read))?;
+ let num = buf.as_ref().len() as u64;
+ if num > *remaining {
+ *remaining = 0;
+ } else if num == 0 {
+ return Poll::Ready(Err(io::Error::new(
+ io::ErrorKind::UnexpectedEof,
+ IncompleteBody,
+ )));
+ } else {
+ *remaining -= num;
+ }
+ Poll::Ready(Ok(Frame::data(buf)))
+ }
+ }
+ Chunked {
+ ref mut state,
+ ref mut chunk_len,
+ ref mut extensions_cnt,
+ ref mut trailers_buf,
+ ref mut trailers_cnt,
+ ref h1_max_headers,
+ ref h1_max_header_size,
+ } => {
+ let h1_max_headers = h1_max_headers.unwrap_or(DEFAULT_MAX_HEADERS);
+ let h1_max_header_size = h1_max_header_size.unwrap_or(TRAILER_LIMIT);
+ loop {
+ let mut buf = None;
+ // advances the chunked state
+ *state = ready!(state.step(
+ cx,
+ body,
+ chunk_len,
+ extensions_cnt,
+ &mut buf,
+ trailers_buf,
+ trailers_cnt,
+ h1_max_headers,
+ h1_max_header_size
+ ))?;
+ if *state == ChunkedState::End {
+ trace!("end of chunked");
+
+ if trailers_buf.is_some() {
+ trace!("found possible trailers");
+
+ // decoder enforces that trailers count will not exceed h1_max_headers
+ if *trailers_cnt >= h1_max_headers {
+ return Poll::Ready(Err(io::Error::new(
+ io::ErrorKind::InvalidData,
+ "chunk trailers count overflow",
+ )));
+ }
+ match decode_trailers(
+ &mut trailers_buf.take().expect("Trailer is None"),
+ *trailers_cnt,
+ ) {
+ Ok(headers) => {
+ return Poll::Ready(Ok(Frame::trailers(headers)));
+ }
+ Err(e) => {
+ return Poll::Ready(Err(e));
+ }
+ }
+ }
+
+ return Poll::Ready(Ok(Frame::data(Bytes::new())));
+ }
+ if let Some(buf) = buf {
+ return Poll::Ready(Ok(Frame::data(buf)));
+ }
+ }
+ }
+ Eof(ref mut is_eof) => {
+ if *is_eof {
+ Poll::Ready(Ok(Frame::data(Bytes::new())))
+ } else {
+ // 8192 chosen because its about 2 packets, there probably
+ // won't be that much available, so don't have MemReaders
+ // allocate buffers to big
+ body.read_mem(cx, 8192).map_ok(|slice| {
+ *is_eof = slice.is_empty();
+ Frame::data(slice)
+ })
+ }
+ }
+ }
+ }
+
+ #[cfg(test)]
+ async fn decode_fut<R: MemRead>(&mut self, body: &mut R) -> Result<Frame<Bytes>, io::Error> {
+ futures_util::future::poll_fn(move |cx| self.decode(cx, body)).await
+ }
+}
+
+impl fmt::Debug for Decoder {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ fmt::Debug::fmt(&self.kind, f)
+ }
+}
+
+macro_rules! byte (
+ ($rdr:ident, $cx:expr) => ({
+ let buf = ready!($rdr.read_mem($cx, 1))?;
+ if !buf.is_empty() {
+ buf[0]
+ } else {
+ return Poll::Ready(Err(io::Error::new(io::ErrorKind::UnexpectedEof,
+ "unexpected EOF during chunk size line")));
+ }
+ })
+);
+
+macro_rules! or_overflow {
+ ($e:expr) => (
+ match $e {
+ Some(val) => val,
+ None => return Poll::Ready(Err(io::Error::new(
+ io::ErrorKind::InvalidData,
+ "invalid chunk size: overflow",
+ ))),
+ }
+ )
+}
+
+macro_rules! put_u8 {
+ ($trailers_buf:expr, $byte:expr, $limit:expr) => {
+ $trailers_buf.put_u8($byte);
+
+ if $trailers_buf.len() >= $limit {
+ return Poll::Ready(Err(io::Error::new(
+ io::ErrorKind::InvalidData,
+ "chunk trailers bytes over limit",
+ )));
+ }
+ };
+}
+
+impl ChunkedState {
+ fn new() -> ChunkedState {
+ ChunkedState::Start
+ }
+ fn step<R: MemRead>(
+ &self,
+ cx: &mut Context<'_>,
+ body: &mut R,
+ size: &mut u64,
+ extensions_cnt: &mut u64,
+ buf: &mut Option<Bytes>,
+ trailers_buf: &mut Option<BytesMut>,
+ trailers_cnt: &mut usize,
+ h1_max_headers: usize,
+ h1_max_header_size: usize,
+ ) -> Poll<Result<ChunkedState, io::Error>> {
+ use self::ChunkedState::*;
+ match *self {
+ Start => ChunkedState::read_start(cx, body, size),
+ Size => ChunkedState::read_size(cx, body, size),
+ SizeLws => ChunkedState::read_size_lws(cx, body),
+ Extension => ChunkedState::read_extension(cx, body, extensions_cnt),
+ SizeLf => ChunkedState::read_size_lf(cx, body, *size),
+ Body => ChunkedState::read_body(cx, body, size, buf),
+ BodyCr => ChunkedState::read_body_cr(cx, body),
+ BodyLf => ChunkedState::read_body_lf(cx, body),
+ Trailer => ChunkedState::read_trailer(cx, body, trailers_buf, h1_max_header_size),
+ TrailerLf => ChunkedState::read_trailer_lf(
+ cx,
+ body,
+ trailers_buf,
+ trailers_cnt,
+ h1_max_headers,
+ h1_max_header_size,
+ ),
+ EndCr => ChunkedState::read_end_cr(cx, body, trailers_buf, h1_max_header_size),
+ EndLf => ChunkedState::read_end_lf(cx, body, trailers_buf, h1_max_header_size),
+ End => Poll::Ready(Ok(ChunkedState::End)),
+ }
+ }
+
+ fn read_start<R: MemRead>(
+ cx: &mut Context<'_>,
+ rdr: &mut R,
+ size: &mut u64,
+ ) -> Poll<Result<ChunkedState, io::Error>> {
+ trace!("Read chunk start");
+
+ let radix = 16;
+ match byte!(rdr, cx) {
+ b @ b'0'..=b'9' => {
+ *size = or_overflow!(size.checked_mul(radix));
+ *size = or_overflow!(size.checked_add((b - b'0') as u64));
+ }
+ b @ b'a'..=b'f' => {
+ *size = or_overflow!(size.checked_mul(radix));
+ *size = or_overflow!(size.checked_add((b + 10 - b'a') as u64));
+ }
+ b @ b'A'..=b'F' => {
+ *size = or_overflow!(size.checked_mul(radix));
+ *size = or_overflow!(size.checked_add((b + 10 - b'A') as u64));
+ }
+ _ => {
+ return Poll::Ready(Err(io::Error::new(
+ io::ErrorKind::InvalidInput,
+ "Invalid chunk size line: missing size digit",
+ )));
+ }
+ }
+
+ Poll::Ready(Ok(ChunkedState::Size))
+ }
+
+ fn read_size<R: MemRead>(
+ cx: &mut Context<'_>,
+ rdr: &mut R,
+ size: &mut u64,
+ ) -> Poll<Result<ChunkedState, io::Error>> {
+ trace!("Read chunk hex size");
+
+ let radix = 16;
+ match byte!(rdr, cx) {
+ b @ b'0'..=b'9' => {
+ *size = or_overflow!(size.checked_mul(radix));
+ *size = or_overflow!(size.checked_add((b - b'0') as u64));
+ }
+ b @ b'a'..=b'f' => {
+ *size = or_overflow!(size.checked_mul(radix));
+ *size = or_overflow!(size.checked_add((b + 10 - b'a') as u64));
+ }
+ b @ b'A'..=b'F' => {
+ *size = or_overflow!(size.checked_mul(radix));
+ *size = or_overflow!(size.checked_add((b + 10 - b'A') as u64));
+ }
+ b'\t' | b' ' => return Poll::Ready(Ok(ChunkedState::SizeLws)),
+ b';' => return Poll::Ready(Ok(ChunkedState::Extension)),
+ b'\r' => return Poll::Ready(Ok(ChunkedState::SizeLf)),
+ _ => {
+ return Poll::Ready(Err(io::Error::new(
+ io::ErrorKind::InvalidInput,
+ "Invalid chunk size line: Invalid Size",
+ )));
+ }
+ }
+ Poll::Ready(Ok(ChunkedState::Size))
+ }
+ fn read_size_lws<R: MemRead>(
+ cx: &mut Context<'_>,
+ rdr: &mut R,
+ ) -> Poll<Result<ChunkedState, io::Error>> {
+ trace!("read_size_lws");
+ match byte!(rdr, cx) {
+ // LWS can follow the chunk size, but no more digits can come
+ b'\t' | b' ' => Poll::Ready(Ok(ChunkedState::SizeLws)),
+ b';' => Poll::Ready(Ok(ChunkedState::Extension)),
+ b'\r' => Poll::Ready(Ok(ChunkedState::SizeLf)),
+ _ => Poll::Ready(Err(io::Error::new(
+ io::ErrorKind::InvalidInput,
+ "Invalid chunk size linear white space",
+ ))),
+ }
+ }
+ fn read_extension<R: MemRead>(
+ cx: &mut Context<'_>,
+ rdr: &mut R,
+ extensions_cnt: &mut u64,
+ ) -> Poll<Result<ChunkedState, io::Error>> {
+ trace!("read_extension");
+ // We don't care about extensions really at all. Just ignore them.
+ // They "end" at the next CRLF.
+ //
+ // However, some implementations may not check for the CR, so to save
+ // them from themselves, we reject extensions containing plain LF as
+ // well.
+ match byte!(rdr, cx) {
+ b'\r' => Poll::Ready(Ok(ChunkedState::SizeLf)),
+ b'\n' => Poll::Ready(Err(io::Error::new(
+ io::ErrorKind::InvalidData,
+ "invalid chunk extension contains newline",
+ ))),
+ _ => {
+ *extensions_cnt += 1;
+ if *extensions_cnt >= CHUNKED_EXTENSIONS_LIMIT {
+ Poll::Ready(Err(io::Error::new(
+ io::ErrorKind::InvalidData,
+ "chunk extensions over limit",
+ )))
+ } else {
+ Poll::Ready(Ok(ChunkedState::Extension))
+ }
+ } // no supported extensions
+ }
+ }
+ fn read_size_lf<R: MemRead>(
+ cx: &mut Context<'_>,
+ rdr: &mut R,
+ size: u64,
+ ) -> Poll<Result<ChunkedState, io::Error>> {
+ trace!("Chunk size is {:?}", size);
+ match byte!(rdr, cx) {
+ b'\n' => {
+ if size == 0 {
+ Poll::Ready(Ok(ChunkedState::EndCr))
+ } else {
+ debug!("incoming chunked header: {0:#X} ({0} bytes)", size);
+ Poll::Ready(Ok(ChunkedState::Body))
+ }
+ }
+ _ => Poll::Ready(Err(io::Error::new(
+ io::ErrorKind::InvalidInput,
+ "Invalid chunk size LF",
+ ))),
+ }
+ }
+
+ fn read_body<R: MemRead>(
+ cx: &mut Context<'_>,
+ rdr: &mut R,
+ rem: &mut u64,
+ buf: &mut Option<Bytes>,
+ ) -> Poll<Result<ChunkedState, io::Error>> {
+ trace!("Chunked read, remaining={:?}", rem);
+
+ // cap remaining bytes at the max capacity of usize
+ let rem_cap = match *rem {
+ r if r > usize::MAX as u64 => usize::MAX,
+ r => r as usize,
+ };
+
+ let to_read = rem_cap;
+ let slice = ready!(rdr.read_mem(cx, to_read))?;
+ let count = slice.len();
+
+ if count == 0 {
+ *rem = 0;
+ return Poll::Ready(Err(io::Error::new(
+ io::ErrorKind::UnexpectedEof,
+ IncompleteBody,
+ )));
+ }
+ *buf = Some(slice);
+ *rem -= count as u64;
+
+ if *rem > 0 {
+ Poll::Ready(Ok(ChunkedState::Body))
+ } else {
+ Poll::Ready(Ok(ChunkedState::BodyCr))
+ }
+ }
+ fn read_body_cr<R: MemRead>(
+ cx: &mut Context<'_>,
+ rdr: &mut R,
+ ) -> Poll<Result<ChunkedState, io::Error>> {
+ match byte!(rdr, cx) {
+ b'\r' => Poll::Ready(Ok(ChunkedState::BodyLf)),
+ _ => Poll::Ready(Err(io::Error::new(
+ io::ErrorKind::InvalidInput,
+ "Invalid chunk body CR",
+ ))),
+ }
+ }
+ fn read_body_lf<R: MemRead>(
+ cx: &mut Context<'_>,
+ rdr: &mut R,
+ ) -> Poll<Result<ChunkedState, io::Error>> {
+ match byte!(rdr, cx) {
+ b'\n' => Poll::Ready(Ok(ChunkedState::Start)),
+ _ => Poll::Ready(Err(io::Error::new(
+ io::ErrorKind::InvalidInput,
+ "Invalid chunk body LF",
+ ))),
+ }
+ }
+
+ fn read_trailer<R: MemRead>(
+ cx: &mut Context<'_>,
+ rdr: &mut R,
+ trailers_buf: &mut Option<BytesMut>,
+ h1_max_header_size: usize,
+ ) -> Poll<Result<ChunkedState, io::Error>> {
+ trace!("read_trailer");
+ let byte = byte!(rdr, cx);
+
+ put_u8!(
+ trailers_buf.as_mut().expect("trailers_buf is None"),
+ byte,
+ h1_max_header_size
+ );
+
+ match byte {
+ b'\r' => Poll::Ready(Ok(ChunkedState::TrailerLf)),
+ _ => Poll::Ready(Ok(ChunkedState::Trailer)),
+ }
+ }
+
+ fn read_trailer_lf<R: MemRead>(
+ cx: &mut Context<'_>,
+ rdr: &mut R,
+ trailers_buf: &mut Option<BytesMut>,
+ trailers_cnt: &mut usize,
+ h1_max_headers: usize,
+ h1_max_header_size: usize,
+ ) -> Poll<Result<ChunkedState, io::Error>> {
+ let byte = byte!(rdr, cx);
+ match byte {
+ b'\n' => {
+ if *trailers_cnt >= h1_max_headers {
+ return Poll::Ready(Err(io::Error::new(
+ io::ErrorKind::InvalidData,
+ "chunk trailers count overflow",
+ )));
+ }
+ *trailers_cnt += 1;
+
+ put_u8!(
+ trailers_buf.as_mut().expect("trailers_buf is None"),
+ byte,
+ h1_max_header_size
+ );
+
+ Poll::Ready(Ok(ChunkedState::EndCr))
+ }
+ _ => Poll::Ready(Err(io::Error::new(
+ io::ErrorKind::InvalidInput,
+ "Invalid trailer end LF",
+ ))),
+ }
+ }
+
+ fn read_end_cr<R: MemRead>(
+ cx: &mut Context<'_>,
+ rdr: &mut R,
+ trailers_buf: &mut Option<BytesMut>,
+ h1_max_header_size: usize,
+ ) -> Poll<Result<ChunkedState, io::Error>> {
+ let byte = byte!(rdr, cx);
+ match byte {
+ b'\r' => {
+ if let Some(trailers_buf) = trailers_buf {
+ put_u8!(trailers_buf, byte, h1_max_header_size);
+ }
+ Poll::Ready(Ok(ChunkedState::EndLf))
+ }
+ byte => {
+ match trailers_buf {
+ None => {
+ // 64 will fit a single Expires header without reallocating
+ let mut buf = BytesMut::with_capacity(64);
+ buf.put_u8(byte);
+ *trailers_buf = Some(buf);
+ }
+ Some(ref mut trailers_buf) => {
+ put_u8!(trailers_buf, byte, h1_max_header_size);
+ }
+ }
+
+ Poll::Ready(Ok(ChunkedState::Trailer))
+ }
+ }
+ }
+ fn read_end_lf<R: MemRead>(
+ cx: &mut Context<'_>,
+ rdr: &mut R,
+ trailers_buf: &mut Option<BytesMut>,
+ h1_max_header_size: usize,
+ ) -> Poll<Result<ChunkedState, io::Error>> {
+ let byte = byte!(rdr, cx);
+ match byte {
+ b'\n' => {
+ if let Some(trailers_buf) = trailers_buf {
+ put_u8!(trailers_buf, byte, h1_max_header_size);
+ }
+ Poll::Ready(Ok(ChunkedState::End))
+ }
+ _ => Poll::Ready(Err(io::Error::new(
+ io::ErrorKind::InvalidInput,
+ "Invalid chunk end LF",
+ ))),
+ }
+ }
+}
+
+// TODO: disallow Transfer-Encoding, Content-Length, Trailer, etc in trailers ??
+fn decode_trailers(buf: &mut BytesMut, count: usize) -> Result<HeaderMap, io::Error> {
+ let mut trailers = HeaderMap::new();
+ let mut headers = vec![httparse::EMPTY_HEADER; count];
+ let res = httparse::parse_headers(buf, &mut headers);
+ match res {
+ Ok(httparse::Status::Complete((_, headers))) => {
+ for header in headers.iter() {
+ use std::convert::TryFrom;
+ let name = match HeaderName::try_from(header.name) {
+ Ok(name) => name,
+ Err(_) => {
+ return Err(io::Error::new(
+ io::ErrorKind::InvalidInput,
+ format!("Invalid header name: {:?}", &header),
+ ));
+ }
+ };
+
+ let value = match HeaderValue::from_bytes(header.value) {
+ Ok(value) => value,
+ Err(_) => {
+ return Err(io::Error::new(
+ io::ErrorKind::InvalidInput,
+ format!("Invalid header value: {:?}", &header),
+ ));
+ }
+ };
+
+ trailers.insert(name, value);
+ }
+
+ Ok(trailers)
+ }
+ Ok(httparse::Status::Partial) => Err(io::Error::new(
+ io::ErrorKind::InvalidInput,
+ "Partial header",
+ )),
+ Err(e) => Err(io::Error::new(io::ErrorKind::InvalidInput, e)),
+ }
+}
+
+#[derive(Debug)]
+struct IncompleteBody;
+
+impl fmt::Display for IncompleteBody {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ write!(f, "end of file before message length reached")
+ }
+}
+
+impl StdError for IncompleteBody {}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+ use crate::rt::{Read, ReadBuf};
+ use std::pin::Pin;
+ use std::time::Duration;
+
+ impl MemRead for &[u8] {
+ fn read_mem(&mut self, _: &mut Context<'_>, len: usize) -> Poll<io::Result<Bytes>> {
+ let n = std::cmp::min(len, self.len());
+ if n > 0 {
+ let (a, b) = self.split_at(n);
+ let buf = Bytes::copy_from_slice(a);
+ *self = b;
+ Poll::Ready(Ok(buf))
+ } else {
+ Poll::Ready(Ok(Bytes::new()))
+ }
+ }
+ }
+
+ impl MemRead for &mut (dyn Read + Unpin) {
+ fn read_mem(&mut self, cx: &mut Context<'_>, len: usize) -> Poll<io::Result<Bytes>> {
+ let mut v = vec![0; len];
+ let mut buf = ReadBuf::new(&mut v);
+ ready!(Pin::new(self).poll_read(cx, buf.unfilled())?);
+ Poll::Ready(Ok(Bytes::copy_from_slice(buf.filled())))
+ }
+ }
+
+ impl MemRead for Bytes {
+ fn read_mem(&mut self, _: &mut Context<'_>, len: usize) -> Poll<io::Result<Bytes>> {
+ let n = std::cmp::min(len, self.len());
+ let ret = self.split_to(n);
+ Poll::Ready(Ok(ret))
+ }
+ }
+
+ /*
+ use std::io;
+ use std::io::Write;
+ use super::Decoder;
+ use super::ChunkedState;
+ use futures::{Async, Poll};
+ use bytes::{BytesMut, Bytes};
+ use crate::mock::AsyncIo;
+ */
+
+ #[cfg(not(miri))]
+ #[tokio::test]
+ async fn test_read_chunk_size() {
+ use std::io::ErrorKind::{InvalidData, InvalidInput, UnexpectedEof};
+
+ async fn read(s: &str) -> u64 {
+ let mut state = ChunkedState::new();
+ let rdr = &mut s.as_bytes();
+ let mut size = 0;
+ let mut ext_cnt = 0;
+ let mut trailers_cnt = 0;
+ loop {
+ let result = futures_util::future::poll_fn(|cx| {
+ state.step(
+ cx,
+ rdr,
+ &mut size,
+ &mut ext_cnt,
+ &mut None,
+ &mut None,
+ &mut trailers_cnt,
+ DEFAULT_MAX_HEADERS,
+ TRAILER_LIMIT,
+ )
+ })
+ .await;
+ let desc = format!("read_size failed for {:?}", s);
+ state = result.expect(&desc);
+ if state == ChunkedState::Body || state == ChunkedState::EndCr {
+ break;
+ }
+ }
+ size
+ }
+
+ async fn read_err(s: &str, expected_err: io::ErrorKind) {
+ let mut state = ChunkedState::new();
+ let rdr = &mut s.as_bytes();
+ let mut size = 0;
+ let mut ext_cnt = 0;
+ let mut trailers_cnt = 0;
+ loop {
+ let result = futures_util::future::poll_fn(|cx| {
+ state.step(
+ cx,
+ rdr,
+ &mut size,
+ &mut ext_cnt,
+ &mut None,
+ &mut None,
+ &mut trailers_cnt,
+ DEFAULT_MAX_HEADERS,
+ TRAILER_LIMIT,
+ )
+ })
+ .await;
+ state = match result {
+ Ok(s) => s,
+ Err(e) => {
+ assert!(
+ expected_err == e.kind(),
+ "Reading {:?}, expected {:?}, but got {:?}",
+ s,
+ expected_err,
+ e.kind()
+ );
+ return;
+ }
+ };
+ if state == ChunkedState::Body || state == ChunkedState::End {
+ panic!("Was Ok. Expected Err for {:?}", s);
+ }
+ }
+ }
+
+ assert_eq!(1, read("1\r\n").await);
+ assert_eq!(1, read("01\r\n").await);
+ assert_eq!(0, read("0\r\n").await);
+ assert_eq!(0, read("00\r\n").await);
+ assert_eq!(10, read("A\r\n").await);
+ assert_eq!(10, read("a\r\n").await);
+ assert_eq!(255, read("Ff\r\n").await);
+ assert_eq!(255, read("Ff \r\n").await);
+ // Missing LF or CRLF
+ read_err("F\rF", InvalidInput).await;
+ read_err("F", UnexpectedEof).await;
+ // Missing digit
+ read_err("\r\n\r\n", InvalidInput).await;
+ read_err("\r\n", InvalidInput).await;
+ // Invalid hex digit
+ read_err("X\r\n", InvalidInput).await;
+ read_err("1X\r\n", InvalidInput).await;
+ read_err("-\r\n", InvalidInput).await;
+ read_err("-1\r\n", InvalidInput).await;
+ // Acceptable (if not fully valid) extensions do not influence the size
+ assert_eq!(1, read("1;extension\r\n").await);
+ assert_eq!(10, read("a;ext name=value\r\n").await);
+ assert_eq!(1, read("1;extension;extension2\r\n").await);
+ assert_eq!(1, read("1;;; ;\r\n").await);
+ assert_eq!(2, read("2; extension...\r\n").await);
+ assert_eq!(3, read("3 ; extension=123\r\n").await);
+ assert_eq!(3, read("3 ;\r\n").await);
+ assert_eq!(3, read("3 ; \r\n").await);
+ // Invalid extensions cause an error
+ read_err("1 invalid extension\r\n", InvalidInput).await;
+ read_err("1 A\r\n", InvalidInput).await;
+ read_err("1;no CRLF", UnexpectedEof).await;
+ read_err("1;reject\nnewlines\r\n", InvalidData).await;
+ // Overflow
+ read_err("f0000000000000003\r\n", InvalidData).await;
+ }
+
+ #[cfg(not(miri))]
+ #[tokio::test]
+ async fn test_read_sized_early_eof() {
+ let mut bytes = &b"foo bar"[..];
+ let mut decoder = Decoder::length(10);
+ assert_eq!(
+ decoder
+ .decode_fut(&mut bytes)
+ .await
+ .unwrap()
+ .data_ref()
+ .unwrap()
+ .len(),
+ 7
+ );
+ let e = decoder.decode_fut(&mut bytes).await.unwrap_err();
+ assert_eq!(e.kind(), io::ErrorKind::UnexpectedEof);
+ }
+
+ #[cfg(not(miri))]
+ #[tokio::test]
+ async fn test_read_chunked_early_eof() {
+ let mut bytes = &b"\
+ 9\r\n\
+ foo bar\
+ "[..];
+ let mut decoder = Decoder::chunked(None, None);
+ assert_eq!(
+ decoder
+ .decode_fut(&mut bytes)
+ .await
+ .unwrap()
+ .data_ref()
+ .unwrap()
+ .len(),
+ 7
+ );
+ let e = decoder.decode_fut(&mut bytes).await.unwrap_err();
+ assert_eq!(e.kind(), io::ErrorKind::UnexpectedEof);
+ }
+
+ #[cfg(not(miri))]
+ #[tokio::test]
+ async fn test_read_chunked_single_read() {
+ let mut mock_buf = &b"10\r\n1234567890abcdef\r\n0\r\n"[..];
+ let buf = Decoder::chunked(None, None)
+ .decode_fut(&mut mock_buf)
+ .await
+ .expect("decode")
+ .into_data()
+ .expect("unknown frame type");
+ assert_eq!(16, buf.len());
+ let result = String::from_utf8(buf.as_ref().to_vec()).expect("decode String");
+ assert_eq!("1234567890abcdef", &result);
+ }
+
+ #[tokio::test]
+ async fn test_read_chunked_with_missing_zero_digit() {
+ // After reading a valid chunk, the ending is missing a zero.
+ let mut mock_buf = &b"1\r\nZ\r\n\r\n\r\n"[..];
+ let mut decoder = Decoder::chunked(None, None);
+ let buf = decoder
+ .decode_fut(&mut mock_buf)
+ .await
+ .expect("decode")
+ .into_data()
+ .expect("unknown frame type");
+ assert_eq!("Z", buf);
+
+ let err = decoder
+ .decode_fut(&mut mock_buf)
+ .await
+ .expect_err("decode 2");
+ assert_eq!(err.kind(), io::ErrorKind::InvalidInput);
+ }
+
+ #[tokio::test]
+ async fn test_read_chunked_extensions_over_limit() {
+ // construct a chunked body where each individual chunked extension
+ // is totally fine, but combined is over the limit.
+ let per_chunk = super::CHUNKED_EXTENSIONS_LIMIT * 2 / 3;
+ let mut scratch = vec![];
+ for _ in 0..2 {
+ scratch.extend(b"1;");
+ scratch.extend(b"x".repeat(per_chunk as usize));
+ scratch.extend(b"\r\nA\r\n");
+ }
+ scratch.extend(b"0\r\n\r\n");
+ let mut mock_buf = Bytes::from(scratch);
+
+ let mut decoder = Decoder::chunked(None, None);
+ let buf1 = decoder
+ .decode_fut(&mut mock_buf)
+ .await
+ .expect("decode1")
+ .into_data()
+ .expect("unknown frame type");
+ assert_eq!(&buf1[..], b"A");
+
+ let err = decoder
+ .decode_fut(&mut mock_buf)
+ .await
+ .expect_err("decode2");
+ assert_eq!(err.kind(), io::ErrorKind::InvalidData);
+ assert_eq!(err.to_string(), "chunk extensions over limit");
+ }
+
+ #[cfg(not(miri))]
+ #[tokio::test]
+ async fn test_read_chunked_trailer_with_missing_lf() {
+ let mut mock_buf = &b"10\r\n1234567890abcdef\r\n0\r\nbad\r\r\n"[..];
+ let mut decoder = Decoder::chunked(None, None);
+ decoder.decode_fut(&mut mock_buf).await.expect("decode");
+ let e = decoder.decode_fut(&mut mock_buf).await.unwrap_err();
+ assert_eq!(e.kind(), io::ErrorKind::InvalidInput);
+ }
+
+ #[cfg(not(miri))]
+ #[tokio::test]
+ async fn test_read_chunked_after_eof() {
+ let mut mock_buf = &b"10\r\n1234567890abcdef\r\n0\r\n\r\n"[..];
+ let mut decoder = Decoder::chunked(None, None);
+
+ // normal read
+ let buf = decoder
+ .decode_fut(&mut mock_buf)
+ .await
+ .unwrap()
+ .into_data()
+ .expect("unknown frame type");
+ assert_eq!(16, buf.len());
+ let result = String::from_utf8(buf.as_ref().to_vec()).expect("decode String");
+ assert_eq!("1234567890abcdef", &result);
+
+ // eof read
+ let buf = decoder
+ .decode_fut(&mut mock_buf)
+ .await
+ .expect("decode")
+ .into_data()
+ .expect("unknown frame type");
+ assert_eq!(0, buf.len());
+
+ // ensure read after eof also returns eof
+ let buf = decoder
+ .decode_fut(&mut mock_buf)
+ .await
+ .expect("decode")
+ .into_data()
+ .expect("unknown frame type");
+ assert_eq!(0, buf.len());
+ }
+
+ // perform an async read using a custom buffer size and causing a blocking
+ // read at the specified byte
+ async fn read_async(mut decoder: Decoder, content: &[u8], block_at: usize) -> String {
+ let mut outs = Vec::new();
+
+ let mut ins = crate::common::io::Compat::new(if block_at == 0 {
+ tokio_test::io::Builder::new()
+ .wait(Duration::from_millis(10))
+ .read(content)
+ .build()
+ } else {
+ tokio_test::io::Builder::new()
+ .read(&content[..block_at])
+ .wait(Duration::from_millis(10))
+ .read(&content[block_at..])
+ .build()
+ });
+
+ let mut ins = &mut ins as &mut (dyn Read + Unpin);
+
+ loop {
+ let buf = decoder
+ .decode_fut(&mut ins)
+ .await
+ .expect("unexpected decode error")
+ .into_data()
+ .expect("unexpected frame type");
+ if buf.is_empty() {
+ break; // eof
+ }
+ outs.extend(buf.as_ref());
+ }
+
+ String::from_utf8(outs).expect("decode String")
+ }
+
+ // iterate over the different ways that this async read could go.
+ // tests blocking a read at each byte along the content - The shotgun approach
+ async fn all_async_cases(content: &str, expected: &str, decoder: Decoder) {
+ let content_len = content.len();
+ for block_at in 0..content_len {
+ let actual = read_async(decoder.clone(), content.as_bytes(), block_at).await;
+ assert_eq!(expected, &actual) //, "Failed async. Blocking at {}", block_at);
+ }
+ }
+
+ #[cfg(not(miri))]
+ #[tokio::test]
+ async fn test_read_length_async() {
+ let content = "foobar";
+ all_async_cases(content, content, Decoder::length(content.len() as u64)).await;
+ }
+
+ #[cfg(not(miri))]
+ #[tokio::test]
+ async fn test_read_chunked_async() {
+ let content = "3\r\nfoo\r\n3\r\nbar\r\n0\r\n\r\n";
+ let expected = "foobar";
+ all_async_cases(content, expected, Decoder::chunked(None, None)).await;
+ }
+
+ #[cfg(not(miri))]
+ #[tokio::test]
+ async fn test_read_eof_async() {
+ let content = "foobar";
+ all_async_cases(content, content, Decoder::eof()).await;
+ }
+
+ #[cfg(all(feature = "nightly", not(miri)))]
+ #[bench]
+ fn bench_decode_chunked_1kb(b: &mut test::Bencher) {
+ let rt = new_runtime();
+
+ const LEN: usize = 1024;
+ let mut vec = Vec::new();
+ vec.extend(format!("{:x}\r\n", LEN).as_bytes());
+ vec.extend(&[0; LEN][..]);
+ vec.extend(b"\r\n");
+ let content = Bytes::from(vec);
+
+ b.bytes = LEN as u64;
+
+ b.iter(|| {
+ let mut decoder = Decoder::chunked(None, None);
+ rt.block_on(async {
+ let mut raw = content.clone();
+ let chunk = decoder
+ .decode_fut(&mut raw)
+ .await
+ .unwrap()
+ .into_data()
+ .unwrap();
+ assert_eq!(chunk.len(), LEN);
+ });
+ });
+ }
+
+ #[cfg(all(feature = "nightly", not(miri)))]
+ #[bench]
+ fn bench_decode_length_1kb(b: &mut test::Bencher) {
+ let rt = new_runtime();
+
+ const LEN: usize = 1024;
+ let content = Bytes::from(&[0; LEN][..]);
+ b.bytes = LEN as u64;
+
+ b.iter(|| {
+ let mut decoder = Decoder::length(LEN as u64);
+ rt.block_on(async {
+ let mut raw = content.clone();
+ let chunk = decoder
+ .decode_fut(&mut raw)
+ .await
+ .unwrap()
+ .into_data()
+ .unwrap();
+ assert_eq!(chunk.len(), LEN);
+ });
+ });
+ }
+
+ #[cfg(feature = "nightly")]
+ fn new_runtime() -> tokio::runtime::Runtime {
+ tokio::runtime::Builder::new_current_thread()
+ .enable_all()
+ .build()
+ .expect("rt build")
+ }
+
+ #[test]
+ fn test_decode_trailers() {
+ let mut buf = BytesMut::new();
+ buf.extend_from_slice(
+ b"Expires: Wed, 21 Oct 2015 07:28:00 GMT\r\nX-Stream-Error: failed to decode\r\n\r\n",
+ );
+ let headers = decode_trailers(&mut buf, 2).expect("decode_trailers");
+ assert_eq!(headers.len(), 2);
+ assert_eq!(
+ headers.get("Expires").unwrap(),
+ "Wed, 21 Oct 2015 07:28:00 GMT"
+ );
+ assert_eq!(headers.get("X-Stream-Error").unwrap(), "failed to decode");
+ }
+
+ #[tokio::test]
+ async fn test_trailer_max_headers_enforced() {
+ let h1_max_headers = 10;
+ let mut scratch = vec![];
+ scratch.extend(b"10\r\n1234567890abcdef\r\n0\r\n");
+ for i in 0..h1_max_headers {
+ scratch.extend(format!("trailer{}: {}\r\n", i, i).as_bytes());
+ }
+ scratch.extend(b"\r\n");
+ let mut mock_buf = Bytes::from(scratch);
+
+ let mut decoder = Decoder::chunked(Some(h1_max_headers), None);
+
+ // ready chunked body
+ let buf = decoder
+ .decode_fut(&mut mock_buf)
+ .await
+ .unwrap()
+ .into_data()
+ .expect("unknown frame type");
+ assert_eq!(16, buf.len());
+
+ // eof read
+ let err = decoder
+ .decode_fut(&mut mock_buf)
+ .await
+ .expect_err("trailer fields over limit");
+ assert_eq!(err.kind(), io::ErrorKind::InvalidData);
+ }
+
+ #[tokio::test]
+ async fn test_trailer_max_header_size_huge_trailer() {
+ let max_header_size = 1024;
+ let mut scratch = vec![];
+ scratch.extend(b"10\r\n1234567890abcdef\r\n0\r\n");
+ scratch.extend(format!("huge_trailer: {}\r\n", "x".repeat(max_header_size)).as_bytes());
+ scratch.extend(b"\r\n");
+ let mut mock_buf = Bytes::from(scratch);
+
+ let mut decoder = Decoder::chunked(None, Some(max_header_size));
+
+ // ready chunked body
+ let buf = decoder
+ .decode_fut(&mut mock_buf)
+ .await
+ .unwrap()
+ .into_data()
+ .expect("unknown frame type");
+ assert_eq!(16, buf.len());
+
+ // eof read
+ let err = decoder
+ .decode_fut(&mut mock_buf)
+ .await
+ .expect_err("trailers over limit");
+ assert_eq!(err.kind(), io::ErrorKind::InvalidData);
+ }
+
+ #[tokio::test]
+ async fn test_trailer_max_header_size_many_small_trailers() {
+ let max_headers = 10;
+ let header_size = 64;
+ let mut scratch = vec![];
+ scratch.extend(b"10\r\n1234567890abcdef\r\n0\r\n");
+
+ for i in 0..max_headers {
+ scratch.extend(format!("trailer{}: {}\r\n", i, "x".repeat(header_size)).as_bytes());
+ }
+
+ scratch.extend(b"\r\n");
+ let mut mock_buf = Bytes::from(scratch);
+
+ let mut decoder = Decoder::chunked(None, Some(max_headers * header_size));
+
+ // ready chunked body
+ let buf = decoder
+ .decode_fut(&mut mock_buf)
+ .await
+ .unwrap()
+ .into_data()
+ .expect("unknown frame type");
+ assert_eq!(16, buf.len());
+
+ // eof read
+ let err = decoder
+ .decode_fut(&mut mock_buf)
+ .await
+ .expect_err("trailers over limit");
+ assert_eq!(err.kind(), io::ErrorKind::InvalidData);
+ }
+}
diff --git a/vendor/hyper/src/proto/h1/dispatch.rs b/vendor/hyper/src/proto/h1/dispatch.rs
new file mode 100644
index 00000000..4d921a3b
--- /dev/null
+++ b/vendor/hyper/src/proto/h1/dispatch.rs
@@ -0,0 +1,808 @@
+use std::{
+ error::Error as StdError,
+ future::Future,
+ marker::Unpin,
+ pin::Pin,
+ task::{Context, Poll},
+};
+
+use crate::rt::{Read, Write};
+use bytes::{Buf, Bytes};
+use futures_util::ready;
+use http::Request;
+
+use super::{Http1Transaction, Wants};
+use crate::body::{Body, DecodedLength, Incoming as IncomingBody};
+#[cfg(feature = "client")]
+use crate::client::dispatch::TrySendError;
+use crate::common::task;
+use crate::proto::{BodyLength, Conn, Dispatched, MessageHead, RequestHead};
+use crate::upgrade::OnUpgrade;
+
+pub(crate) struct Dispatcher<D, Bs: Body, I, T> {
+ conn: Conn<I, Bs::Data, T>,
+ dispatch: D,
+ body_tx: Option<crate::body::Sender>,
+ body_rx: Pin<Box<Option<Bs>>>,
+ is_closing: bool,
+}
+
+pub(crate) trait Dispatch {
+ type PollItem;
+ type PollBody;
+ type PollError;
+ type RecvItem;
+ fn poll_msg(
+ self: Pin<&mut Self>,
+ cx: &mut Context<'_>,
+ ) -> Poll<Option<Result<(Self::PollItem, Self::PollBody), Self::PollError>>>;
+ fn recv_msg(&mut self, msg: crate::Result<(Self::RecvItem, IncomingBody)>)
+ -> crate::Result<()>;
+ fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll<Result<(), ()>>;
+ fn should_poll(&self) -> bool;
+}
+
+cfg_server! {
+ use crate::service::HttpService;
+
+ pub(crate) struct Server<S: HttpService<B>, B> {
+ in_flight: Pin<Box<Option<S::Future>>>,
+ pub(crate) service: S,
+ }
+}
+
+cfg_client! {
+ pin_project_lite::pin_project! {
+ pub(crate) struct Client<B> {
+ callback: Option<crate::client::dispatch::Callback<Request<B>, http::Response<IncomingBody>>>,
+ #[pin]
+ rx: ClientRx<B>,
+ rx_closed: bool,
+ }
+ }
+
+ type ClientRx<B> = crate::client::dispatch::Receiver<Request<B>, http::Response<IncomingBody>>;
+}
+
+impl<D, Bs, I, T> Dispatcher<D, Bs, I, T>
+where
+ D: Dispatch<
+ PollItem = MessageHead<T::Outgoing>,
+ PollBody = Bs,
+ RecvItem = MessageHead<T::Incoming>,
+ > + Unpin,
+ D::PollError: Into<Box<dyn StdError + Send + Sync>>,
+ I: Read + Write + Unpin,
+ T: Http1Transaction + Unpin,
+ Bs: Body + 'static,
+ Bs::Error: Into<Box<dyn StdError + Send + Sync>>,
+{
+ pub(crate) fn new(dispatch: D, conn: Conn<I, Bs::Data, T>) -> Self {
+ Dispatcher {
+ conn,
+ dispatch,
+ body_tx: None,
+ body_rx: Box::pin(None),
+ is_closing: false,
+ }
+ }
+
+ #[cfg(feature = "server")]
+ pub(crate) fn disable_keep_alive(&mut self) {
+ self.conn.disable_keep_alive();
+
+ // If keep alive has been disabled and no read or write has been seen on
+ // the connection yet, we must be in a state where the server is being asked to
+ // shut down before any data has been seen on the connection
+ if self.conn.is_write_closed() || self.conn.has_initial_read_write_state() {
+ self.close();
+ }
+ }
+
+ pub(crate) fn into_inner(self) -> (I, Bytes, D) {
+ let (io, buf) = self.conn.into_inner();
+ (io, buf, self.dispatch)
+ }
+
+ /// Run this dispatcher until HTTP says this connection is done,
+ /// but don't call `Write::shutdown` on the underlying IO.
+ ///
+ /// This is useful for old-style HTTP upgrades, but ignores
+ /// newer-style upgrade API.
+ pub(crate) fn poll_without_shutdown(
+ &mut self,
+ cx: &mut Context<'_>,
+ ) -> Poll<crate::Result<()>> {
+ Pin::new(self).poll_catch(cx, false).map_ok(|ds| {
+ if let Dispatched::Upgrade(pending) = ds {
+ pending.manual();
+ }
+ })
+ }
+
+ fn poll_catch(
+ &mut self,
+ cx: &mut Context<'_>,
+ should_shutdown: bool,
+ ) -> Poll<crate::Result<Dispatched>> {
+ Poll::Ready(ready!(self.poll_inner(cx, should_shutdown)).or_else(|e| {
+ // Be sure to alert a streaming body of the failure.
+ if let Some(mut body) = self.body_tx.take() {
+ body.send_error(crate::Error::new_body("connection error"));
+ }
+ // An error means we're shutting down either way.
+ // We just try to give the error to the user,
+ // and close the connection with an Ok. If we
+ // cannot give it to the user, then return the Err.
+ self.dispatch.recv_msg(Err(e))?;
+ Ok(Dispatched::Shutdown)
+ }))
+ }
+
+ fn poll_inner(
+ &mut self,
+ cx: &mut Context<'_>,
+ should_shutdown: bool,
+ ) -> Poll<crate::Result<Dispatched>> {
+ T::update_date();
+
+ ready!(self.poll_loop(cx))?;
+
+ if self.is_done() {
+ if let Some(pending) = self.conn.pending_upgrade() {
+ self.conn.take_error()?;
+ return Poll::Ready(Ok(Dispatched::Upgrade(pending)));
+ } else if should_shutdown {
+ ready!(self.conn.poll_shutdown(cx)).map_err(crate::Error::new_shutdown)?;
+ }
+ self.conn.take_error()?;
+ Poll::Ready(Ok(Dispatched::Shutdown))
+ } else {
+ Poll::Pending
+ }
+ }
+
+ fn poll_loop(&mut self, cx: &mut Context<'_>) -> Poll<crate::Result<()>> {
+ // Limit the looping on this connection, in case it is ready far too
+ // often, so that other futures don't starve.
+ //
+ // 16 was chosen arbitrarily, as that is number of pipelined requests
+ // benchmarks often use. Perhaps it should be a config option instead.
+ for _ in 0..16 {
+ let _ = self.poll_read(cx)?;
+ let _ = self.poll_write(cx)?;
+ let _ = self.poll_flush(cx)?;
+
+ // This could happen if reading paused before blocking on IO,
+ // such as getting to the end of a framed message, but then
+ // writing/flushing set the state back to Init. In that case,
+ // if the read buffer still had bytes, we'd want to try poll_read
+ // again, or else we wouldn't ever be woken up again.
+ //
+ // Using this instead of task::current() and notify() inside
+ // the Conn is noticeably faster in pipelined benchmarks.
+ if !self.conn.wants_read_again() {
+ //break;
+ return Poll::Ready(Ok(()));
+ }
+ }
+
+ trace!("poll_loop yielding (self = {:p})", self);
+
+ task::yield_now(cx).map(|never| match never {})
+ }
+
+ fn poll_read(&mut self, cx: &mut Context<'_>) -> Poll<crate::Result<()>> {
+ loop {
+ if self.is_closing {
+ return Poll::Ready(Ok(()));
+ } else if self.conn.can_read_head() {
+ ready!(self.poll_read_head(cx))?;
+ } else if let Some(mut body) = self.body_tx.take() {
+ if self.conn.can_read_body() {
+ match body.poll_ready(cx) {
+ Poll::Ready(Ok(())) => (),
+ Poll::Pending => {
+ self.body_tx = Some(body);
+ return Poll::Pending;
+ }
+ Poll::Ready(Err(_canceled)) => {
+ // user doesn't care about the body
+ // so we should stop reading
+ trace!("body receiver dropped before eof, draining or closing");
+ self.conn.poll_drain_or_close_read(cx);
+ continue;
+ }
+ }
+ match self.conn.poll_read_body(cx) {
+ Poll::Ready(Some(Ok(frame))) => {
+ if frame.is_data() {
+ let chunk = frame.into_data().unwrap_or_else(|_| unreachable!());
+ match body.try_send_data(chunk) {
+ Ok(()) => {
+ self.body_tx = Some(body);
+ }
+ Err(_canceled) => {
+ if self.conn.can_read_body() {
+ trace!("body receiver dropped before eof, closing");
+ self.conn.close_read();
+ }
+ }
+ }
+ } else if frame.is_trailers() {
+ let trailers =
+ frame.into_trailers().unwrap_or_else(|_| unreachable!());
+ match body.try_send_trailers(trailers) {
+ Ok(()) => {
+ self.body_tx = Some(body);
+ }
+ Err(_canceled) => {
+ if self.conn.can_read_body() {
+ trace!("body receiver dropped before eof, closing");
+ self.conn.close_read();
+ }
+ }
+ }
+ } else {
+ // we should have dropped all unknown frames in poll_read_body
+ error!("unexpected frame");
+ }
+ }
+ Poll::Ready(None) => {
+ // just drop, the body will close automatically
+ }
+ Poll::Pending => {
+ self.body_tx = Some(body);
+ return Poll::Pending;
+ }
+ Poll::Ready(Some(Err(e))) => {
+ body.send_error(crate::Error::new_body(e));
+ }
+ }
+ } else {
+ // just drop, the body will close automatically
+ }
+ } else {
+ return self.conn.poll_read_keep_alive(cx);
+ }
+ }
+ }
+
+ fn poll_read_head(&mut self, cx: &mut Context<'_>) -> Poll<crate::Result<()>> {
+ // can dispatch receive, or does it still care about other incoming message?
+ match ready!(self.dispatch.poll_ready(cx)) {
+ Ok(()) => (),
+ Err(()) => {
+ trace!("dispatch no longer receiving messages");
+ self.close();
+ return Poll::Ready(Ok(()));
+ }
+ }
+
+ // dispatch is ready for a message, try to read one
+ match ready!(self.conn.poll_read_head(cx)) {
+ Some(Ok((mut head, body_len, wants))) => {
+ let body = match body_len {
+ DecodedLength::ZERO => IncomingBody::empty(),
+ other => {
+ let (tx, rx) =
+ IncomingBody::new_channel(other, wants.contains(Wants::EXPECT));
+ self.body_tx = Some(tx);
+ rx
+ }
+ };
+ if wants.contains(Wants::UPGRADE) {
+ let upgrade = self.conn.on_upgrade();
+ debug_assert!(!upgrade.is_none(), "empty upgrade");
+ debug_assert!(
+ head.extensions.get::<OnUpgrade>().is_none(),
+ "OnUpgrade already set"
+ );
+ head.extensions.insert(upgrade);
+ }
+ self.dispatch.recv_msg(Ok((head, body)))?;
+ Poll::Ready(Ok(()))
+ }
+ Some(Err(err)) => {
+ debug!("read_head error: {}", err);
+ self.dispatch.recv_msg(Err(err))?;
+ // if here, the dispatcher gave the user the error
+ // somewhere else. we still need to shutdown, but
+ // not as a second error.
+ self.close();
+ Poll::Ready(Ok(()))
+ }
+ None => {
+ // read eof, the write side will have been closed too unless
+ // allow_read_close was set to true, in which case just do
+ // nothing...
+ debug_assert!(self.conn.is_read_closed());
+ if self.conn.is_write_closed() {
+ self.close();
+ }
+ Poll::Ready(Ok(()))
+ }
+ }
+ }
+
+ fn poll_write(&mut self, cx: &mut Context<'_>) -> Poll<crate::Result<()>> {
+ loop {
+ if self.is_closing {
+ return Poll::Ready(Ok(()));
+ } else if self.body_rx.is_none()
+ && self.conn.can_write_head()
+ && self.dispatch.should_poll()
+ {
+ if let Some(msg) = ready!(Pin::new(&mut self.dispatch).poll_msg(cx)) {
+ let (head, body) = msg.map_err(crate::Error::new_user_service)?;
+
+ let body_type = if body.is_end_stream() {
+ self.body_rx.set(None);
+ None
+ } else {
+ let btype = body
+ .size_hint()
+ .exact()
+ .map(BodyLength::Known)
+ .or(Some(BodyLength::Unknown));
+ self.body_rx.set(Some(body));
+ btype
+ };
+ self.conn.write_head(head, body_type);
+ } else {
+ self.close();
+ return Poll::Ready(Ok(()));
+ }
+ } else if !self.conn.can_buffer_body() {
+ ready!(self.poll_flush(cx))?;
+ } else {
+ // A new scope is needed :(
+ if let (Some(mut body), clear_body) =
+ OptGuard::new(self.body_rx.as_mut()).guard_mut()
+ {
+ debug_assert!(!*clear_body, "opt guard defaults to keeping body");
+ if !self.conn.can_write_body() {
+ trace!(
+ "no more write body allowed, user body is_end_stream = {}",
+ body.is_end_stream(),
+ );
+ *clear_body = true;
+ continue;
+ }
+
+ let item = ready!(body.as_mut().poll_frame(cx));
+ if let Some(item) = item {
+ let frame = item.map_err(|e| {
+ *clear_body = true;
+ crate::Error::new_user_body(e)
+ })?;
+
+ if frame.is_data() {
+ let chunk = frame.into_data().unwrap_or_else(|_| unreachable!());
+ let eos = body.is_end_stream();
+ if eos {
+ *clear_body = true;
+ if chunk.remaining() == 0 {
+ trace!("discarding empty chunk");
+ self.conn.end_body()?;
+ } else {
+ self.conn.write_body_and_end(chunk);
+ }
+ } else {
+ if chunk.remaining() == 0 {
+ trace!("discarding empty chunk");
+ continue;
+ }
+ self.conn.write_body(chunk);
+ }
+ } else if frame.is_trailers() {
+ *clear_body = true;
+ self.conn.write_trailers(
+ frame.into_trailers().unwrap_or_else(|_| unreachable!()),
+ );
+ } else {
+ trace!("discarding unknown frame");
+ continue;
+ }
+ } else {
+ *clear_body = true;
+ self.conn.end_body()?;
+ }
+ } else {
+ // If there's no body_rx, end the body
+ if self.conn.can_write_body() {
+ self.conn.end_body()?;
+ } else {
+ return Poll::Pending;
+ }
+ }
+ }
+ }
+ }
+
+ fn poll_flush(&mut self, cx: &mut Context<'_>) -> Poll<crate::Result<()>> {
+ self.conn.poll_flush(cx).map_err(|err| {
+ debug!("error writing: {}", err);
+ crate::Error::new_body_write(err)
+ })
+ }
+
+ fn close(&mut self) {
+ self.is_closing = true;
+ self.conn.close_read();
+ self.conn.close_write();
+ }
+
+ fn is_done(&self) -> bool {
+ if self.is_closing {
+ return true;
+ }
+
+ let read_done = self.conn.is_read_closed();
+
+ if !T::should_read_first() && read_done {
+ // a client that cannot read may was well be done.
+ true
+ } else {
+ let write_done = self.conn.is_write_closed()
+ || (!self.dispatch.should_poll() && self.body_rx.is_none());
+ read_done && write_done
+ }
+ }
+}
+
+impl<D, Bs, I, T> Future for Dispatcher<D, Bs, I, T>
+where
+ D: Dispatch<
+ PollItem = MessageHead<T::Outgoing>,
+ PollBody = Bs,
+ RecvItem = MessageHead<T::Incoming>,
+ > + Unpin,
+ D::PollError: Into<Box<dyn StdError + Send + Sync>>,
+ I: Read + Write + Unpin,
+ T: Http1Transaction + Unpin,
+ Bs: Body + 'static,
+ Bs::Error: Into<Box<dyn StdError + Send + Sync>>,
+{
+ type Output = crate::Result<Dispatched>;
+
+ #[inline]
+ fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
+ self.poll_catch(cx, true)
+ }
+}
+
+// ===== impl OptGuard =====
+
+/// A drop guard to allow a mutable borrow of an Option while being able to
+/// set whether the `Option` should be cleared on drop.
+struct OptGuard<'a, T>(Pin<&'a mut Option<T>>, bool);
+
+impl<'a, T> OptGuard<'a, T> {
+ fn new(pin: Pin<&'a mut Option<T>>) -> Self {
+ OptGuard(pin, false)
+ }
+
+ fn guard_mut(&mut self) -> (Option<Pin<&mut T>>, &mut bool) {
+ (self.0.as_mut().as_pin_mut(), &mut self.1)
+ }
+}
+
+impl<T> Drop for OptGuard<'_, T> {
+ fn drop(&mut self) {
+ if self.1 {
+ self.0.set(None);
+ }
+ }
+}
+
+// ===== impl Server =====
+
+cfg_server! {
+ impl<S, B> Server<S, B>
+ where
+ S: HttpService<B>,
+ {
+ pub(crate) fn new(service: S) -> Server<S, B> {
+ Server {
+ in_flight: Box::pin(None),
+ service,
+ }
+ }
+
+ pub(crate) fn into_service(self) -> S {
+ self.service
+ }
+ }
+
+ // Service is never pinned
+ impl<S: HttpService<B>, B> Unpin for Server<S, B> {}
+
+ impl<S, Bs> Dispatch for Server<S, IncomingBody>
+ where
+ S: HttpService<IncomingBody, ResBody = Bs>,
+ S::Error: Into<Box<dyn StdError + Send + Sync>>,
+ Bs: Body,
+ {
+ type PollItem = MessageHead<http::StatusCode>;
+ type PollBody = Bs;
+ type PollError = S::Error;
+ type RecvItem = RequestHead;
+
+ fn poll_msg(
+ mut self: Pin<&mut Self>,
+ cx: &mut Context<'_>,
+ ) -> Poll<Option<Result<(Self::PollItem, Self::PollBody), Self::PollError>>> {
+ let mut this = self.as_mut();
+ let ret = if let Some(ref mut fut) = this.in_flight.as_mut().as_pin_mut() {
+ let resp = ready!(fut.as_mut().poll(cx)?);
+ let (parts, body) = resp.into_parts();
+ let head = MessageHead {
+ version: parts.version,
+ subject: parts.status,
+ headers: parts.headers,
+ extensions: parts.extensions,
+ };
+ Poll::Ready(Some(Ok((head, body))))
+ } else {
+ unreachable!("poll_msg shouldn't be called if no inflight");
+ };
+
+ // Since in_flight finished, remove it
+ this.in_flight.set(None);
+ ret
+ }
+
+ fn recv_msg(&mut self, msg: crate::Result<(Self::RecvItem, IncomingBody)>) -> crate::Result<()> {
+ let (msg, body) = msg?;
+ let mut req = Request::new(body);
+ *req.method_mut() = msg.subject.0;
+ *req.uri_mut() = msg.subject.1;
+ *req.headers_mut() = msg.headers;
+ *req.version_mut() = msg.version;
+ *req.extensions_mut() = msg.extensions;
+ let fut = self.service.call(req);
+ self.in_flight.set(Some(fut));
+ Ok(())
+ }
+
+ fn poll_ready(&mut self, _cx: &mut Context<'_>) -> Poll<Result<(), ()>> {
+ if self.in_flight.is_some() {
+ Poll::Pending
+ } else {
+ Poll::Ready(Ok(()))
+ }
+ }
+
+ fn should_poll(&self) -> bool {
+ self.in_flight.is_some()
+ }
+ }
+}
+
+// ===== impl Client =====
+
+cfg_client! {
+ use std::convert::Infallible;
+
+ impl<B> Client<B> {
+ pub(crate) fn new(rx: ClientRx<B>) -> Client<B> {
+ Client {
+ callback: None,
+ rx,
+ rx_closed: false,
+ }
+ }
+ }
+
+ impl<B> Dispatch for Client<B>
+ where
+ B: Body,
+ {
+ type PollItem = RequestHead;
+ type PollBody = B;
+ type PollError = Infallible;
+ type RecvItem = crate::proto::ResponseHead;
+
+ fn poll_msg(
+ mut self: Pin<&mut Self>,
+ cx: &mut Context<'_>,
+ ) -> Poll<Option<Result<(Self::PollItem, Self::PollBody), Infallible>>> {
+ let mut this = self.as_mut();
+ debug_assert!(!this.rx_closed);
+ match this.rx.poll_recv(cx) {
+ Poll::Ready(Some((req, mut cb))) => {
+ // check that future hasn't been canceled already
+ match cb.poll_canceled(cx) {
+ Poll::Ready(()) => {
+ trace!("request canceled");
+ Poll::Ready(None)
+ }
+ Poll::Pending => {
+ let (parts, body) = req.into_parts();
+ let head = RequestHead {
+ version: parts.version,
+ subject: crate::proto::RequestLine(parts.method, parts.uri),
+ headers: parts.headers,
+ extensions: parts.extensions,
+ };
+ this.callback = Some(cb);
+ Poll::Ready(Some(Ok((head, body))))
+ }
+ }
+ }
+ Poll::Ready(None) => {
+ // user has dropped sender handle
+ trace!("client tx closed");
+ this.rx_closed = true;
+ Poll::Ready(None)
+ }
+ Poll::Pending => Poll::Pending,
+ }
+ }
+
+ fn recv_msg(&mut self, msg: crate::Result<(Self::RecvItem, IncomingBody)>) -> crate::Result<()> {
+ match msg {
+ Ok((msg, body)) => {
+ if let Some(cb) = self.callback.take() {
+ let res = msg.into_response(body);
+ cb.send(Ok(res));
+ Ok(())
+ } else {
+ // Getting here is likely a bug! An error should have happened
+ // in Conn::require_empty_read() before ever parsing a
+ // full message!
+ Err(crate::Error::new_unexpected_message())
+ }
+ }
+ Err(err) => {
+ if let Some(cb) = self.callback.take() {
+ cb.send(Err(TrySendError {
+ error: err,
+ message: None,
+ }));
+ Ok(())
+ } else if !self.rx_closed {
+ self.rx.close();
+ if let Some((req, cb)) = self.rx.try_recv() {
+ trace!("canceling queued request with connection error: {}", err);
+ // in this case, the message was never even started, so it's safe to tell
+ // the user that the request was completely canceled
+ cb.send(Err(TrySendError {
+ error: crate::Error::new_canceled().with(err),
+ message: Some(req),
+ }));
+ Ok(())
+ } else {
+ Err(err)
+ }
+ } else {
+ Err(err)
+ }
+ }
+ }
+ }
+
+ fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll<Result<(), ()>> {
+ match self.callback {
+ Some(ref mut cb) => match cb.poll_canceled(cx) {
+ Poll::Ready(()) => {
+ trace!("callback receiver has dropped");
+ Poll::Ready(Err(()))
+ }
+ Poll::Pending => Poll::Ready(Ok(())),
+ },
+ None => Poll::Ready(Err(())),
+ }
+ }
+
+ fn should_poll(&self) -> bool {
+ self.callback.is_none()
+ }
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+ use crate::common::io::Compat;
+ use crate::proto::h1::ClientTransaction;
+ use std::time::Duration;
+
+ #[test]
+ fn client_read_bytes_before_writing_request() {
+ let _ = pretty_env_logger::try_init();
+
+ tokio_test::task::spawn(()).enter(|cx, _| {
+ let (io, mut handle) = tokio_test::io::Builder::new().build_with_handle();
+
+ // Block at 0 for now, but we will release this response before
+ // the request is ready to write later...
+ let (mut tx, rx) = crate::client::dispatch::channel();
+ let conn = Conn::<_, bytes::Bytes, ClientTransaction>::new(Compat::new(io));
+ let mut dispatcher = Dispatcher::new(Client::new(rx), conn);
+
+ // First poll is needed to allow tx to send...
+ assert!(Pin::new(&mut dispatcher).poll(cx).is_pending());
+
+ // Unblock our IO, which has a response before we've sent request!
+ //
+ handle.read(b"HTTP/1.1 200 OK\r\n\r\n");
+
+ let mut res_rx = tx
+ .try_send(crate::Request::new(IncomingBody::empty()))
+ .unwrap();
+
+ tokio_test::assert_ready_ok!(Pin::new(&mut dispatcher).poll(cx));
+ let err = tokio_test::assert_ready_ok!(Pin::new(&mut res_rx).poll(cx))
+ .expect_err("callback should send error");
+
+ match (err.error.is_canceled(), err.message.as_ref()) {
+ (true, Some(_)) => (),
+ _ => panic!("expected Canceled, got {:?}", err),
+ }
+ });
+ }
+
+ #[cfg(not(miri))]
+ #[tokio::test]
+ async fn client_flushing_is_not_ready_for_next_request() {
+ let _ = pretty_env_logger::try_init();
+
+ let (io, _handle) = tokio_test::io::Builder::new()
+ .write(b"POST / HTTP/1.1\r\ncontent-length: 4\r\n\r\n")
+ .read(b"HTTP/1.1 200 OK\r\ncontent-length: 0\r\n\r\n")
+ .wait(std::time::Duration::from_secs(2))
+ .build_with_handle();
+
+ let (mut tx, rx) = crate::client::dispatch::channel();
+ let mut conn = Conn::<_, bytes::Bytes, ClientTransaction>::new(Compat::new(io));
+ conn.set_write_strategy_queue();
+
+ let dispatcher = Dispatcher::new(Client::new(rx), conn);
+ let _dispatcher = tokio::spawn(async move { dispatcher.await });
+
+ let body = {
+ let (mut tx, body) = IncomingBody::new_channel(DecodedLength::new(4), false);
+ tx.try_send_data("reee".into()).unwrap();
+ body
+ };
+
+ let req = crate::Request::builder().method("POST").body(body).unwrap();
+
+ let res = tx.try_send(req).unwrap().await.expect("response");
+ drop(res);
+
+ assert!(!tx.is_ready());
+ }
+
+ #[cfg(not(miri))]
+ #[tokio::test]
+ async fn body_empty_chunks_ignored() {
+ let _ = pretty_env_logger::try_init();
+
+ let io = tokio_test::io::Builder::new()
+ // no reading or writing, just be blocked for the test...
+ .wait(Duration::from_secs(5))
+ .build();
+
+ let (mut tx, rx) = crate::client::dispatch::channel();
+ let conn = Conn::<_, bytes::Bytes, ClientTransaction>::new(Compat::new(io));
+ let mut dispatcher = tokio_test::task::spawn(Dispatcher::new(Client::new(rx), conn));
+
+ // First poll is needed to allow tx to send...
+ assert!(dispatcher.poll().is_pending());
+
+ let body = {
+ let (mut tx, body) = IncomingBody::channel();
+ tx.try_send_data("".into()).unwrap();
+ body
+ };
+
+ let _res_rx = tx.try_send(crate::Request::new(body)).unwrap();
+
+ // Ensure conn.write_body wasn't called with the empty chunk.
+ // If it is, it will trigger an assertion.
+ assert!(dispatcher.poll().is_pending());
+ }
+}
diff --git a/vendor/hyper/src/proto/h1/encode.rs b/vendor/hyper/src/proto/h1/encode.rs
new file mode 100644
index 00000000..2df0c396
--- /dev/null
+++ b/vendor/hyper/src/proto/h1/encode.rs
@@ -0,0 +1,660 @@
+use std::collections::HashMap;
+use std::fmt;
+use std::io::IoSlice;
+
+use bytes::buf::{Chain, Take};
+use bytes::{Buf, Bytes};
+use http::{
+ header::{
+ AUTHORIZATION, CACHE_CONTROL, CONTENT_ENCODING, CONTENT_LENGTH, CONTENT_RANGE,
+ CONTENT_TYPE, HOST, MAX_FORWARDS, SET_COOKIE, TE, TRAILER, TRANSFER_ENCODING,
+ },
+ HeaderMap, HeaderName, HeaderValue,
+};
+
+use super::io::WriteBuf;
+use super::role::{write_headers, write_headers_title_case};
+
+type StaticBuf = &'static [u8];
+
+/// Encoders to handle different Transfer-Encodings.
+#[derive(Debug, Clone, PartialEq)]
+pub(crate) struct Encoder {
+ kind: Kind,
+ is_last: bool,
+}
+
+#[derive(Debug)]
+pub(crate) struct EncodedBuf<B> {
+ kind: BufKind<B>,
+}
+
+#[derive(Debug)]
+pub(crate) struct NotEof(u64);
+
+#[derive(Debug, PartialEq, Clone)]
+enum Kind {
+ /// An Encoder for when Transfer-Encoding includes `chunked`.
+ Chunked(Option<Vec<HeaderValue>>),
+ /// An Encoder for when Content-Length is set.
+ ///
+ /// Enforces that the body is not longer than the Content-Length header.
+ Length(u64),
+ /// An Encoder for when neither Content-Length nor Chunked encoding is set.
+ ///
+ /// This is mostly only used with HTTP/1.0 with a length. This kind requires
+ /// the connection to be closed when the body is finished.
+ #[cfg(feature = "server")]
+ CloseDelimited,
+}
+
+#[derive(Debug)]
+enum BufKind<B> {
+ Exact(B),
+ Limited(Take<B>),
+ Chunked(Chain<Chain<ChunkSize, B>, StaticBuf>),
+ ChunkedEnd(StaticBuf),
+ Trailers(Chain<Chain<StaticBuf, Bytes>, StaticBuf>),
+}
+
+impl Encoder {
+ fn new(kind: Kind) -> Encoder {
+ Encoder {
+ kind,
+ is_last: false,
+ }
+ }
+ pub(crate) fn chunked() -> Encoder {
+ Encoder::new(Kind::Chunked(None))
+ }
+
+ pub(crate) fn length(len: u64) -> Encoder {
+ Encoder::new(Kind::Length(len))
+ }
+
+ #[cfg(feature = "server")]
+ pub(crate) fn close_delimited() -> Encoder {
+ Encoder::new(Kind::CloseDelimited)
+ }
+
+ pub(crate) fn into_chunked_with_trailing_fields(self, trailers: Vec<HeaderValue>) -> Encoder {
+ match self.kind {
+ Kind::Chunked(_) => Encoder {
+ kind: Kind::Chunked(Some(trailers)),
+ is_last: self.is_last,
+ },
+ _ => self,
+ }
+ }
+
+ pub(crate) fn is_eof(&self) -> bool {
+ matches!(self.kind, Kind::Length(0))
+ }
+
+ #[cfg(feature = "server")]
+ pub(crate) fn set_last(mut self, is_last: bool) -> Self {
+ self.is_last = is_last;
+ self
+ }
+
+ pub(crate) fn is_last(&self) -> bool {
+ self.is_last
+ }
+
+ pub(crate) fn is_close_delimited(&self) -> bool {
+ match self.kind {
+ #[cfg(feature = "server")]
+ Kind::CloseDelimited => true,
+ _ => false,
+ }
+ }
+
+ pub(crate) fn is_chunked(&self) -> bool {
+ matches!(self.kind, Kind::Chunked(_))
+ }
+
+ pub(crate) fn end<B>(&self) -> Result<Option<EncodedBuf<B>>, NotEof> {
+ match self.kind {
+ Kind::Length(0) => Ok(None),
+ Kind::Chunked(_) => Ok(Some(EncodedBuf {
+ kind: BufKind::ChunkedEnd(b"0\r\n\r\n"),
+ })),
+ #[cfg(feature = "server")]
+ Kind::CloseDelimited => Ok(None),
+ Kind::Length(n) => Err(NotEof(n)),
+ }
+ }
+
+ pub(crate) fn encode<B>(&mut self, msg: B) -> EncodedBuf<B>
+ where
+ B: Buf,
+ {
+ let len = msg.remaining();
+ debug_assert!(len > 0, "encode() called with empty buf");
+
+ let kind = match self.kind {
+ Kind::Chunked(_) => {
+ trace!("encoding chunked {}B", len);
+ let buf = ChunkSize::new(len)
+ .chain(msg)
+ .chain(b"\r\n" as &'static [u8]);
+ BufKind::Chunked(buf)
+ }
+ Kind::Length(ref mut remaining) => {
+ trace!("sized write, len = {}", len);
+ if len as u64 > *remaining {
+ let limit = *remaining as usize;
+ *remaining = 0;
+ BufKind::Limited(msg.take(limit))
+ } else {
+ *remaining -= len as u64;
+ BufKind::Exact(msg)
+ }
+ }
+ #[cfg(feature = "server")]
+ Kind::CloseDelimited => {
+ trace!("close delimited write {}B", len);
+ BufKind::Exact(msg)
+ }
+ };
+ EncodedBuf { kind }
+ }
+
+ pub(crate) fn encode_trailers<B>(
+ &self,
+ trailers: HeaderMap,
+ title_case_headers: bool,
+ ) -> Option<EncodedBuf<B>> {
+ trace!("encoding trailers");
+ match &self.kind {
+ Kind::Chunked(Some(allowed_trailer_fields)) => {
+ let allowed_trailer_field_map = allowed_trailer_field_map(allowed_trailer_fields);
+
+ let mut cur_name = None;
+ let mut allowed_trailers = HeaderMap::new();
+
+ for (opt_name, value) in trailers {
+ if let Some(n) = opt_name {
+ cur_name = Some(n);
+ }
+ let name = cur_name.as_ref().expect("current header name");
+
+ if allowed_trailer_field_map.contains_key(name.as_str()) {
+ if is_valid_trailer_field(name) {
+ allowed_trailers.insert(name, value);
+ } else {
+ debug!("trailer field is not valid: {}", &name);
+ }
+ } else {
+ debug!("trailer header name not found in trailer header: {}", &name);
+ }
+ }
+
+ let mut buf = Vec::new();
+ if title_case_headers {
+ write_headers_title_case(&allowed_trailers, &mut buf);
+ } else {
+ write_headers(&allowed_trailers, &mut buf);
+ }
+
+ if buf.is_empty() {
+ return None;
+ }
+
+ Some(EncodedBuf {
+ kind: BufKind::Trailers(b"0\r\n".chain(Bytes::from(buf)).chain(b"\r\n")),
+ })
+ }
+ Kind::Chunked(None) => {
+ debug!("attempted to encode trailers, but the trailer header is not set");
+ None
+ }
+ _ => {
+ debug!("attempted to encode trailers for non-chunked response");
+ None
+ }
+ }
+ }
+
+ pub(super) fn encode_and_end<B>(&self, msg: B, dst: &mut WriteBuf<EncodedBuf<B>>) -> bool
+ where
+ B: Buf,
+ {
+ let len = msg.remaining();
+ debug_assert!(len > 0, "encode() called with empty buf");
+
+ match self.kind {
+ Kind::Chunked(_) => {
+ trace!("encoding chunked {}B", len);
+ let buf = ChunkSize::new(len)
+ .chain(msg)
+ .chain(b"\r\n0\r\n\r\n" as &'static [u8]);
+ dst.buffer(buf);
+ !self.is_last
+ }
+ Kind::Length(remaining) => {
+ use std::cmp::Ordering;
+
+ trace!("sized write, len = {}", len);
+ match (len as u64).cmp(&remaining) {
+ Ordering::Equal => {
+ dst.buffer(msg);
+ !self.is_last
+ }
+ Ordering::Greater => {
+ dst.buffer(msg.take(remaining as usize));
+ !self.is_last
+ }
+ Ordering::Less => {
+ dst.buffer(msg);
+ false
+ }
+ }
+ }
+ #[cfg(feature = "server")]
+ Kind::CloseDelimited => {
+ trace!("close delimited write {}B", len);
+ dst.buffer(msg);
+ false
+ }
+ }
+ }
+}
+
+fn is_valid_trailer_field(name: &HeaderName) -> bool {
+ !matches!(
+ *name,
+ AUTHORIZATION
+ | CACHE_CONTROL
+ | CONTENT_ENCODING
+ | CONTENT_LENGTH
+ | CONTENT_RANGE
+ | CONTENT_TYPE
+ | HOST
+ | MAX_FORWARDS
+ | SET_COOKIE
+ | TRAILER
+ | TRANSFER_ENCODING
+ | TE
+ )
+}
+
+fn allowed_trailer_field_map(allowed_trailer_fields: &Vec<HeaderValue>) -> HashMap<String, ()> {
+ let mut trailer_map = HashMap::new();
+
+ for header_value in allowed_trailer_fields {
+ if let Ok(header_str) = header_value.to_str() {
+ let items: Vec<&str> = header_str.split(',').map(|item| item.trim()).collect();
+
+ for item in items {
+ trailer_map.entry(item.to_string()).or_insert(());
+ }
+ }
+ }
+
+ trailer_map
+}
+
+impl<B> Buf for EncodedBuf<B>
+where
+ B: Buf,
+{
+ #[inline]
+ fn remaining(&self) -> usize {
+ match self.kind {
+ BufKind::Exact(ref b) => b.remaining(),
+ BufKind::Limited(ref b) => b.remaining(),
+ BufKind::Chunked(ref b) => b.remaining(),
+ BufKind::ChunkedEnd(ref b) => b.remaining(),
+ BufKind::Trailers(ref b) => b.remaining(),
+ }
+ }
+
+ #[inline]
+ fn chunk(&self) -> &[u8] {
+ match self.kind {
+ BufKind::Exact(ref b) => b.chunk(),
+ BufKind::Limited(ref b) => b.chunk(),
+ BufKind::Chunked(ref b) => b.chunk(),
+ BufKind::ChunkedEnd(ref b) => b.chunk(),
+ BufKind::Trailers(ref b) => b.chunk(),
+ }
+ }
+
+ #[inline]
+ fn advance(&mut self, cnt: usize) {
+ match self.kind {
+ BufKind::Exact(ref mut b) => b.advance(cnt),
+ BufKind::Limited(ref mut b) => b.advance(cnt),
+ BufKind::Chunked(ref mut b) => b.advance(cnt),
+ BufKind::ChunkedEnd(ref mut b) => b.advance(cnt),
+ BufKind::Trailers(ref mut b) => b.advance(cnt),
+ }
+ }
+
+ #[inline]
+ fn chunks_vectored<'t>(&'t self, dst: &mut [IoSlice<'t>]) -> usize {
+ match self.kind {
+ BufKind::Exact(ref b) => b.chunks_vectored(dst),
+ BufKind::Limited(ref b) => b.chunks_vectored(dst),
+ BufKind::Chunked(ref b) => b.chunks_vectored(dst),
+ BufKind::ChunkedEnd(ref b) => b.chunks_vectored(dst),
+ BufKind::Trailers(ref b) => b.chunks_vectored(dst),
+ }
+ }
+}
+
+#[cfg(target_pointer_width = "32")]
+const USIZE_BYTES: usize = 4;
+
+#[cfg(target_pointer_width = "64")]
+const USIZE_BYTES: usize = 8;
+
+// each byte will become 2 hex
+const CHUNK_SIZE_MAX_BYTES: usize = USIZE_BYTES * 2;
+
+#[derive(Clone, Copy)]
+struct ChunkSize {
+ bytes: [u8; CHUNK_SIZE_MAX_BYTES + 2],
+ pos: u8,
+ len: u8,
+}
+
+impl ChunkSize {
+ fn new(len: usize) -> ChunkSize {
+ use std::fmt::Write;
+ let mut size = ChunkSize {
+ bytes: [0; CHUNK_SIZE_MAX_BYTES + 2],
+ pos: 0,
+ len: 0,
+ };
+ write!(&mut size, "{:X}\r\n", len).expect("CHUNK_SIZE_MAX_BYTES should fit any usize");
+ size
+ }
+}
+
+impl Buf for ChunkSize {
+ #[inline]
+ fn remaining(&self) -> usize {
+ (self.len - self.pos).into()
+ }
+
+ #[inline]
+ fn chunk(&self) -> &[u8] {
+ &self.bytes[self.pos.into()..self.len.into()]
+ }
+
+ #[inline]
+ fn advance(&mut self, cnt: usize) {
+ assert!(cnt <= self.remaining());
+ self.pos += cnt as u8; // just asserted cnt fits in u8
+ }
+}
+
+impl fmt::Debug for ChunkSize {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_struct("ChunkSize")
+ .field("bytes", &&self.bytes[..self.len.into()])
+ .field("pos", &self.pos)
+ .finish()
+ }
+}
+
+impl fmt::Write for ChunkSize {
+ fn write_str(&mut self, num: &str) -> fmt::Result {
+ use std::io::Write;
+ (&mut self.bytes[self.len.into()..])
+ .write_all(num.as_bytes())
+ .expect("&mut [u8].write() cannot error");
+ self.len += num.len() as u8; // safe because bytes is never bigger than 256
+ Ok(())
+ }
+}
+
+impl<B: Buf> From<B> for EncodedBuf<B> {
+ fn from(buf: B) -> Self {
+ EncodedBuf {
+ kind: BufKind::Exact(buf),
+ }
+ }
+}
+
+impl<B: Buf> From<Take<B>> for EncodedBuf<B> {
+ fn from(buf: Take<B>) -> Self {
+ EncodedBuf {
+ kind: BufKind::Limited(buf),
+ }
+ }
+}
+
+impl<B: Buf> From<Chain<Chain<ChunkSize, B>, StaticBuf>> for EncodedBuf<B> {
+ fn from(buf: Chain<Chain<ChunkSize, B>, StaticBuf>) -> Self {
+ EncodedBuf {
+ kind: BufKind::Chunked(buf),
+ }
+ }
+}
+
+impl fmt::Display for NotEof {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ write!(f, "early end, expected {} more bytes", self.0)
+ }
+}
+
+impl std::error::Error for NotEof {}
+
+#[cfg(test)]
+mod tests {
+ use bytes::BufMut;
+ use http::{
+ header::{
+ AUTHORIZATION, CACHE_CONTROL, CONTENT_ENCODING, CONTENT_LENGTH, CONTENT_RANGE,
+ CONTENT_TYPE, HOST, MAX_FORWARDS, SET_COOKIE, TE, TRAILER, TRANSFER_ENCODING,
+ },
+ HeaderMap, HeaderName, HeaderValue,
+ };
+
+ use super::super::io::Cursor;
+ use super::Encoder;
+
+ #[test]
+ fn chunked() {
+ let mut encoder = Encoder::chunked();
+ let mut dst = Vec::new();
+
+ let msg1 = b"foo bar".as_ref();
+ let buf1 = encoder.encode(msg1);
+ dst.put(buf1);
+ assert_eq!(dst, b"7\r\nfoo bar\r\n");
+
+ let msg2 = b"baz quux herp".as_ref();
+ let buf2 = encoder.encode(msg2);
+ dst.put(buf2);
+
+ assert_eq!(dst, b"7\r\nfoo bar\r\nD\r\nbaz quux herp\r\n");
+
+ let end = encoder.end::<Cursor<Vec<u8>>>().unwrap().unwrap();
+ dst.put(end);
+
+ assert_eq!(
+ dst,
+ b"7\r\nfoo bar\r\nD\r\nbaz quux herp\r\n0\r\n\r\n".as_ref()
+ );
+ }
+
+ #[test]
+ fn length() {
+ let max_len = 8;
+ let mut encoder = Encoder::length(max_len as u64);
+ let mut dst = Vec::new();
+
+ let msg1 = b"foo bar".as_ref();
+ let buf1 = encoder.encode(msg1);
+ dst.put(buf1);
+
+ assert_eq!(dst, b"foo bar");
+ assert!(!encoder.is_eof());
+ encoder.end::<()>().unwrap_err();
+
+ let msg2 = b"baz".as_ref();
+ let buf2 = encoder.encode(msg2);
+ dst.put(buf2);
+
+ assert_eq!(dst.len(), max_len);
+ assert_eq!(dst, b"foo barb");
+ assert!(encoder.is_eof());
+ assert!(encoder.end::<()>().unwrap().is_none());
+ }
+
+ #[cfg(feature = "server")]
+ #[test]
+ fn eof() {
+ let mut encoder = Encoder::close_delimited();
+ let mut dst = Vec::new();
+
+ let msg1 = b"foo bar".as_ref();
+ let buf1 = encoder.encode(msg1);
+ dst.put(buf1);
+
+ assert_eq!(dst, b"foo bar");
+ assert!(!encoder.is_eof());
+ encoder.end::<()>().unwrap();
+
+ let msg2 = b"baz".as_ref();
+ let buf2 = encoder.encode(msg2);
+ dst.put(buf2);
+
+ assert_eq!(dst, b"foo barbaz");
+ assert!(!encoder.is_eof());
+ encoder.end::<()>().unwrap();
+ }
+
+ #[test]
+ fn chunked_with_valid_trailers() {
+ let encoder = Encoder::chunked();
+ let trailers = vec![HeaderValue::from_static("chunky-trailer")];
+ let encoder = encoder.into_chunked_with_trailing_fields(trailers);
+
+ let headers = HeaderMap::from_iter(vec![
+ (
+ HeaderName::from_static("chunky-trailer"),
+ HeaderValue::from_static("header data"),
+ ),
+ (
+ HeaderName::from_static("should-not-be-included"),
+ HeaderValue::from_static("oops"),
+ ),
+ ]);
+
+ let buf1 = encoder.encode_trailers::<&[u8]>(headers, false).unwrap();
+
+ let mut dst = Vec::new();
+ dst.put(buf1);
+ assert_eq!(dst, b"0\r\nchunky-trailer: header data\r\n\r\n");
+ }
+
+ #[test]
+ fn chunked_with_multiple_trailer_headers() {
+ let encoder = Encoder::chunked();
+ let trailers = vec![
+ HeaderValue::from_static("chunky-trailer"),
+ HeaderValue::from_static("chunky-trailer-2"),
+ ];
+ let encoder = encoder.into_chunked_with_trailing_fields(trailers);
+
+ let headers = HeaderMap::from_iter(vec![
+ (
+ HeaderName::from_static("chunky-trailer"),
+ HeaderValue::from_static("header data"),
+ ),
+ (
+ HeaderName::from_static("chunky-trailer-2"),
+ HeaderValue::from_static("more header data"),
+ ),
+ ]);
+
+ let buf1 = encoder.encode_trailers::<&[u8]>(headers, false).unwrap();
+
+ let mut dst = Vec::new();
+ dst.put(buf1);
+ assert_eq!(
+ dst,
+ b"0\r\nchunky-trailer: header data\r\nchunky-trailer-2: more header data\r\n\r\n"
+ );
+ }
+
+ #[test]
+ fn chunked_with_no_trailer_header() {
+ let encoder = Encoder::chunked();
+
+ let headers = HeaderMap::from_iter(vec![(
+ HeaderName::from_static("chunky-trailer"),
+ HeaderValue::from_static("header data"),
+ )]);
+
+ assert!(encoder
+ .encode_trailers::<&[u8]>(headers.clone(), false)
+ .is_none());
+
+ let trailers = vec![];
+ let encoder = encoder.into_chunked_with_trailing_fields(trailers);
+
+ assert!(encoder.encode_trailers::<&[u8]>(headers, false).is_none());
+ }
+
+ #[test]
+ fn chunked_with_invalid_trailers() {
+ let encoder = Encoder::chunked();
+
+ let trailers = format!(
+ "{},{},{},{},{},{},{},{},{},{},{},{}",
+ AUTHORIZATION,
+ CACHE_CONTROL,
+ CONTENT_ENCODING,
+ CONTENT_LENGTH,
+ CONTENT_RANGE,
+ CONTENT_TYPE,
+ HOST,
+ MAX_FORWARDS,
+ SET_COOKIE,
+ TRAILER,
+ TRANSFER_ENCODING,
+ TE,
+ );
+ let trailers = vec![HeaderValue::from_str(&trailers).unwrap()];
+ let encoder = encoder.into_chunked_with_trailing_fields(trailers);
+
+ let mut headers = HeaderMap::new();
+ headers.insert(AUTHORIZATION, HeaderValue::from_static("header data"));
+ headers.insert(CACHE_CONTROL, HeaderValue::from_static("header data"));
+ headers.insert(CONTENT_ENCODING, HeaderValue::from_static("header data"));
+ headers.insert(CONTENT_LENGTH, HeaderValue::from_static("header data"));
+ headers.insert(CONTENT_RANGE, HeaderValue::from_static("header data"));
+ headers.insert(CONTENT_TYPE, HeaderValue::from_static("header data"));
+ headers.insert(HOST, HeaderValue::from_static("header data"));
+ headers.insert(MAX_FORWARDS, HeaderValue::from_static("header data"));
+ headers.insert(SET_COOKIE, HeaderValue::from_static("header data"));
+ headers.insert(TRAILER, HeaderValue::from_static("header data"));
+ headers.insert(TRANSFER_ENCODING, HeaderValue::from_static("header data"));
+ headers.insert(TE, HeaderValue::from_static("header data"));
+
+ assert!(encoder.encode_trailers::<&[u8]>(headers, true).is_none());
+ }
+
+ #[test]
+ fn chunked_with_title_case_headers() {
+ let encoder = Encoder::chunked();
+ let trailers = vec![HeaderValue::from_static("chunky-trailer")];
+ let encoder = encoder.into_chunked_with_trailing_fields(trailers);
+
+ let headers = HeaderMap::from_iter(vec![(
+ HeaderName::from_static("chunky-trailer"),
+ HeaderValue::from_static("header data"),
+ )]);
+ let buf1 = encoder.encode_trailers::<&[u8]>(headers, true).unwrap();
+
+ let mut dst = Vec::new();
+ dst.put(buf1);
+ assert_eq!(dst, b"0\r\nChunky-Trailer: header data\r\n\r\n");
+ }
+}
diff --git a/vendor/hyper/src/proto/h1/io.rs b/vendor/hyper/src/proto/h1/io.rs
new file mode 100644
index 00000000..d5afba68
--- /dev/null
+++ b/vendor/hyper/src/proto/h1/io.rs
@@ -0,0 +1,967 @@
+use std::cmp;
+use std::fmt;
+use std::io::{self, IoSlice};
+use std::pin::Pin;
+use std::task::{Context, Poll};
+
+use crate::rt::{Read, ReadBuf, Write};
+use bytes::{Buf, BufMut, Bytes, BytesMut};
+use futures_util::ready;
+
+use super::{Http1Transaction, ParseContext, ParsedMessage};
+use crate::common::buf::BufList;
+
+/// The initial buffer size allocated before trying to read from IO.
+pub(crate) const INIT_BUFFER_SIZE: usize = 8192;
+
+/// The minimum value that can be set to max buffer size.
+pub(crate) const MINIMUM_MAX_BUFFER_SIZE: usize = INIT_BUFFER_SIZE;
+
+/// The default maximum read buffer size. If the buffer gets this big and
+/// a message is still not complete, a `TooLarge` error is triggered.
+// Note: if this changes, update server::conn::Http::max_buf_size docs.
+pub(crate) const DEFAULT_MAX_BUFFER_SIZE: usize = 8192 + 4096 * 100;
+
+/// The maximum number of distinct `Buf`s to hold in a list before requiring
+/// a flush. Only affects when the buffer strategy is to queue buffers.
+///
+/// Note that a flush can happen before reaching the maximum. This simply
+/// forces a flush if the queue gets this big.
+const MAX_BUF_LIST_BUFFERS: usize = 16;
+
+pub(crate) struct Buffered<T, B> {
+ flush_pipeline: bool,
+ io: T,
+ partial_len: Option<usize>,
+ read_blocked: bool,
+ read_buf: BytesMut,
+ read_buf_strategy: ReadStrategy,
+ write_buf: WriteBuf<B>,
+}
+
+impl<T, B> fmt::Debug for Buffered<T, B>
+where
+ B: Buf,
+{
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_struct("Buffered")
+ .field("read_buf", &self.read_buf)
+ .field("write_buf", &self.write_buf)
+ .finish()
+ }
+}
+
+impl<T, B> Buffered<T, B>
+where
+ T: Read + Write + Unpin,
+ B: Buf,
+{
+ pub(crate) fn new(io: T) -> Buffered<T, B> {
+ let strategy = if io.is_write_vectored() {
+ WriteStrategy::Queue
+ } else {
+ WriteStrategy::Flatten
+ };
+ let write_buf = WriteBuf::new(strategy);
+ Buffered {
+ flush_pipeline: false,
+ io,
+ partial_len: None,
+ read_blocked: false,
+ read_buf: BytesMut::with_capacity(0),
+ read_buf_strategy: ReadStrategy::default(),
+ write_buf,
+ }
+ }
+
+ #[cfg(feature = "server")]
+ pub(crate) fn set_flush_pipeline(&mut self, enabled: bool) {
+ debug_assert!(!self.write_buf.has_remaining());
+ self.flush_pipeline = enabled;
+ if enabled {
+ self.set_write_strategy_flatten();
+ }
+ }
+
+ pub(crate) fn set_max_buf_size(&mut self, max: usize) {
+ assert!(
+ max >= MINIMUM_MAX_BUFFER_SIZE,
+ "The max_buf_size cannot be smaller than {}.",
+ MINIMUM_MAX_BUFFER_SIZE,
+ );
+ self.read_buf_strategy = ReadStrategy::with_max(max);
+ self.write_buf.max_buf_size = max;
+ }
+
+ #[cfg(feature = "client")]
+ pub(crate) fn set_read_buf_exact_size(&mut self, sz: usize) {
+ self.read_buf_strategy = ReadStrategy::Exact(sz);
+ }
+
+ pub(crate) fn set_write_strategy_flatten(&mut self) {
+ // this should always be called only at construction time,
+ // so this assert is here to catch myself
+ debug_assert!(self.write_buf.queue.bufs_cnt() == 0);
+ self.write_buf.set_strategy(WriteStrategy::Flatten);
+ }
+
+ pub(crate) fn set_write_strategy_queue(&mut self) {
+ // this should always be called only at construction time,
+ // so this assert is here to catch myself
+ debug_assert!(self.write_buf.queue.bufs_cnt() == 0);
+ self.write_buf.set_strategy(WriteStrategy::Queue);
+ }
+
+ pub(crate) fn read_buf(&self) -> &[u8] {
+ self.read_buf.as_ref()
+ }
+
+ #[cfg(test)]
+ #[cfg(feature = "nightly")]
+ pub(super) fn read_buf_mut(&mut self) -> &mut BytesMut {
+ &mut self.read_buf
+ }
+
+ /// Return the "allocated" available space, not the potential space
+ /// that could be allocated in the future.
+ fn read_buf_remaining_mut(&self) -> usize {
+ self.read_buf.capacity() - self.read_buf.len()
+ }
+
+ /// Return whether we can append to the headers buffer.
+ ///
+ /// Reasons we can't:
+ /// - The write buf is in queue mode, and some of the past body is still
+ /// needing to be flushed.
+ pub(crate) fn can_headers_buf(&self) -> bool {
+ !self.write_buf.queue.has_remaining()
+ }
+
+ pub(crate) fn headers_buf(&mut self) -> &mut Vec<u8> {
+ let buf = self.write_buf.headers_mut();
+ &mut buf.bytes
+ }
+
+ pub(super) fn write_buf(&mut self) -> &mut WriteBuf<B> {
+ &mut self.write_buf
+ }
+
+ pub(crate) fn buffer<BB: Buf + Into<B>>(&mut self, buf: BB) {
+ self.write_buf.buffer(buf)
+ }
+
+ pub(crate) fn can_buffer(&self) -> bool {
+ self.flush_pipeline || self.write_buf.can_buffer()
+ }
+
+ pub(crate) fn consume_leading_lines(&mut self) {
+ if !self.read_buf.is_empty() {
+ let mut i = 0;
+ while i < self.read_buf.len() {
+ match self.read_buf[i] {
+ b'\r' | b'\n' => i += 1,
+ _ => break,
+ }
+ }
+ self.read_buf.advance(i);
+ }
+ }
+
+ pub(super) fn parse<S>(
+ &mut self,
+ cx: &mut Context<'_>,
+ parse_ctx: ParseContext<'_>,
+ ) -> Poll<crate::Result<ParsedMessage<S::Incoming>>>
+ where
+ S: Http1Transaction,
+ {
+ loop {
+ match super::role::parse_headers::<S>(
+ &mut self.read_buf,
+ self.partial_len,
+ ParseContext {
+ cached_headers: parse_ctx.cached_headers,
+ req_method: parse_ctx.req_method,
+ h1_parser_config: parse_ctx.h1_parser_config.clone(),
+ h1_max_headers: parse_ctx.h1_max_headers,
+ preserve_header_case: parse_ctx.preserve_header_case,
+ #[cfg(feature = "ffi")]
+ preserve_header_order: parse_ctx.preserve_header_order,
+ h09_responses: parse_ctx.h09_responses,
+ #[cfg(feature = "client")]
+ on_informational: parse_ctx.on_informational,
+ },
+ )? {
+ Some(msg) => {
+ debug!("parsed {} headers", msg.head.headers.len());
+ self.partial_len = None;
+ return Poll::Ready(Ok(msg));
+ }
+ None => {
+ let max = self.read_buf_strategy.max();
+ let curr_len = self.read_buf.len();
+ if curr_len >= max {
+ debug!("max_buf_size ({}) reached, closing", max);
+ return Poll::Ready(Err(crate::Error::new_too_large()));
+ }
+ if curr_len > 0 {
+ trace!("partial headers; {} bytes so far", curr_len);
+ self.partial_len = Some(curr_len);
+ } else {
+ // 1xx gobled some bytes
+ self.partial_len = None;
+ }
+ }
+ }
+ if ready!(self.poll_read_from_io(cx)).map_err(crate::Error::new_io)? == 0 {
+ trace!("parse eof");
+ return Poll::Ready(Err(crate::Error::new_incomplete()));
+ }
+ }
+ }
+
+ pub(crate) fn poll_read_from_io(&mut self, cx: &mut Context<'_>) -> Poll<io::Result<usize>> {
+ self.read_blocked = false;
+ let next = self.read_buf_strategy.next();
+ if self.read_buf_remaining_mut() < next {
+ self.read_buf.reserve(next);
+ }
+
+ // SAFETY: ReadBuf and poll_read promise not to set any uninitialized
+ // bytes onto `dst`.
+ let dst = unsafe { self.read_buf.chunk_mut().as_uninit_slice_mut() };
+ let mut buf = ReadBuf::uninit(dst);
+ match Pin::new(&mut self.io).poll_read(cx, buf.unfilled()) {
+ Poll::Ready(Ok(_)) => {
+ let n = buf.filled().len();
+ trace!("received {} bytes", n);
+ unsafe {
+ // Safety: we just read that many bytes into the
+ // uninitialized part of the buffer, so this is okay.
+ // @tokio pls give me back `poll_read_buf` thanks
+ self.read_buf.advance_mut(n);
+ }
+ self.read_buf_strategy.record(n);
+ Poll::Ready(Ok(n))
+ }
+ Poll::Pending => {
+ self.read_blocked = true;
+ Poll::Pending
+ }
+ Poll::Ready(Err(e)) => Poll::Ready(Err(e)),
+ }
+ }
+
+ pub(crate) fn into_inner(self) -> (T, Bytes) {
+ (self.io, self.read_buf.freeze())
+ }
+
+ pub(crate) fn io_mut(&mut self) -> &mut T {
+ &mut self.io
+ }
+
+ pub(crate) fn is_read_blocked(&self) -> bool {
+ self.read_blocked
+ }
+
+ pub(crate) fn poll_flush(&mut self, cx: &mut Context<'_>) -> Poll<io::Result<()>> {
+ if self.flush_pipeline && !self.read_buf.is_empty() {
+ Poll::Ready(Ok(()))
+ } else if self.write_buf.remaining() == 0 {
+ Pin::new(&mut self.io).poll_flush(cx)
+ } else {
+ if let WriteStrategy::Flatten = self.write_buf.strategy {
+ return self.poll_flush_flattened(cx);
+ }
+
+ const MAX_WRITEV_BUFS: usize = 64;
+ loop {
+ let n = {
+ let mut iovs = [IoSlice::new(&[]); MAX_WRITEV_BUFS];
+ let len = self.write_buf.chunks_vectored(&mut iovs);
+ ready!(Pin::new(&mut self.io).poll_write_vectored(cx, &iovs[..len]))?
+ };
+ // TODO(eliza): we have to do this manually because
+ // `poll_write_buf` doesn't exist in Tokio 0.3 yet...when
+ // `poll_write_buf` comes back, the manual advance will need to leave!
+ self.write_buf.advance(n);
+ debug!("flushed {} bytes", n);
+ if self.write_buf.remaining() == 0 {
+ break;
+ } else if n == 0 {
+ trace!(
+ "write returned zero, but {} bytes remaining",
+ self.write_buf.remaining()
+ );
+ return Poll::Ready(Err(io::ErrorKind::WriteZero.into()));
+ }
+ }
+ Pin::new(&mut self.io).poll_flush(cx)
+ }
+ }
+
+ /// Specialized version of `flush` when strategy is Flatten.
+ ///
+ /// Since all buffered bytes are flattened into the single headers buffer,
+ /// that skips some bookkeeping around using multiple buffers.
+ fn poll_flush_flattened(&mut self, cx: &mut Context<'_>) -> Poll<io::Result<()>> {
+ loop {
+ let n = ready!(Pin::new(&mut self.io).poll_write(cx, self.write_buf.headers.chunk()))?;
+ debug!("flushed {} bytes", n);
+ self.write_buf.headers.advance(n);
+ if self.write_buf.headers.remaining() == 0 {
+ self.write_buf.headers.reset();
+ break;
+ } else if n == 0 {
+ trace!(
+ "write returned zero, but {} bytes remaining",
+ self.write_buf.remaining()
+ );
+ return Poll::Ready(Err(io::ErrorKind::WriteZero.into()));
+ }
+ }
+ Pin::new(&mut self.io).poll_flush(cx)
+ }
+
+ #[cfg(test)]
+ fn flush(&mut self) -> impl std::future::Future<Output = io::Result<()>> + '_ {
+ futures_util::future::poll_fn(move |cx| self.poll_flush(cx))
+ }
+}
+
+// The `B` is a `Buf`, we never project a pin to it
+impl<T: Unpin, B> Unpin for Buffered<T, B> {}
+
+// TODO: This trait is old... at least rename to PollBytes or something...
+pub(crate) trait MemRead {
+ fn read_mem(&mut self, cx: &mut Context<'_>, len: usize) -> Poll<io::Result<Bytes>>;
+}
+
+impl<T, B> MemRead for Buffered<T, B>
+where
+ T: Read + Write + Unpin,
+ B: Buf,
+{
+ fn read_mem(&mut self, cx: &mut Context<'_>, len: usize) -> Poll<io::Result<Bytes>> {
+ if !self.read_buf.is_empty() {
+ let n = std::cmp::min(len, self.read_buf.len());
+ Poll::Ready(Ok(self.read_buf.split_to(n).freeze()))
+ } else {
+ let n = ready!(self.poll_read_from_io(cx))?;
+ Poll::Ready(Ok(self.read_buf.split_to(::std::cmp::min(len, n)).freeze()))
+ }
+ }
+}
+
+#[derive(Clone, Copy, Debug)]
+enum ReadStrategy {
+ Adaptive {
+ decrease_now: bool,
+ next: usize,
+ max: usize,
+ },
+ #[cfg(feature = "client")]
+ Exact(usize),
+}
+
+impl ReadStrategy {
+ fn with_max(max: usize) -> ReadStrategy {
+ ReadStrategy::Adaptive {
+ decrease_now: false,
+ next: INIT_BUFFER_SIZE,
+ max,
+ }
+ }
+
+ fn next(&self) -> usize {
+ match *self {
+ ReadStrategy::Adaptive { next, .. } => next,
+ #[cfg(feature = "client")]
+ ReadStrategy::Exact(exact) => exact,
+ }
+ }
+
+ fn max(&self) -> usize {
+ match *self {
+ ReadStrategy::Adaptive { max, .. } => max,
+ #[cfg(feature = "client")]
+ ReadStrategy::Exact(exact) => exact,
+ }
+ }
+
+ fn record(&mut self, bytes_read: usize) {
+ match *self {
+ ReadStrategy::Adaptive {
+ ref mut decrease_now,
+ ref mut next,
+ max,
+ ..
+ } => {
+ if bytes_read >= *next {
+ *next = cmp::min(incr_power_of_two(*next), max);
+ *decrease_now = false;
+ } else {
+ let decr_to = prev_power_of_two(*next);
+ if bytes_read < decr_to {
+ if *decrease_now {
+ *next = cmp::max(decr_to, INIT_BUFFER_SIZE);
+ *decrease_now = false;
+ } else {
+ // Decreasing is a two "record" process.
+ *decrease_now = true;
+ }
+ } else {
+ // A read within the current range should cancel
+ // a potential decrease, since we just saw proof
+ // that we still need this size.
+ *decrease_now = false;
+ }
+ }
+ }
+ #[cfg(feature = "client")]
+ ReadStrategy::Exact(_) => (),
+ }
+ }
+}
+
+fn incr_power_of_two(n: usize) -> usize {
+ n.saturating_mul(2)
+}
+
+fn prev_power_of_two(n: usize) -> usize {
+ // Only way this shift can underflow is if n is less than 4.
+ // (Which would means `usize::MAX >> 64` and underflowed!)
+ debug_assert!(n >= 4);
+ (usize::MAX >> (n.leading_zeros() + 2)) + 1
+}
+
+impl Default for ReadStrategy {
+ fn default() -> ReadStrategy {
+ ReadStrategy::with_max(DEFAULT_MAX_BUFFER_SIZE)
+ }
+}
+
+#[derive(Clone)]
+pub(crate) struct Cursor<T> {
+ bytes: T,
+ pos: usize,
+}
+
+impl<T: AsRef<[u8]>> Cursor<T> {
+ #[inline]
+ pub(crate) fn new(bytes: T) -> Cursor<T> {
+ Cursor { bytes, pos: 0 }
+ }
+}
+
+impl Cursor<Vec<u8>> {
+ /// If we've advanced the position a bit in this cursor, and wish to
+ /// extend the underlying vector, we may wish to unshift the "read" bytes
+ /// off, and move everything else over.
+ fn maybe_unshift(&mut self, additional: usize) {
+ if self.pos == 0 {
+ // nothing to do
+ return;
+ }
+
+ if self.bytes.capacity() - self.bytes.len() >= additional {
+ // there's room!
+ return;
+ }
+
+ self.bytes.drain(0..self.pos);
+ self.pos = 0;
+ }
+
+ fn reset(&mut self) {
+ self.pos = 0;
+ self.bytes.clear();
+ }
+}
+
+impl<T: AsRef<[u8]>> fmt::Debug for Cursor<T> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_struct("Cursor")
+ .field("pos", &self.pos)
+ .field("len", &self.bytes.as_ref().len())
+ .finish()
+ }
+}
+
+impl<T: AsRef<[u8]>> Buf for Cursor<T> {
+ #[inline]
+ fn remaining(&self) -> usize {
+ self.bytes.as_ref().len() - self.pos
+ }
+
+ #[inline]
+ fn chunk(&self) -> &[u8] {
+ &self.bytes.as_ref()[self.pos..]
+ }
+
+ #[inline]
+ fn advance(&mut self, cnt: usize) {
+ debug_assert!(self.pos + cnt <= self.bytes.as_ref().len());
+ self.pos += cnt;
+ }
+}
+
+// an internal buffer to collect writes before flushes
+pub(super) struct WriteBuf<B> {
+ /// Re-usable buffer that holds message headers
+ headers: Cursor<Vec<u8>>,
+ max_buf_size: usize,
+ /// Deque of user buffers if strategy is Queue
+ queue: BufList<B>,
+ strategy: WriteStrategy,
+}
+
+impl<B: Buf> WriteBuf<B> {
+ fn new(strategy: WriteStrategy) -> WriteBuf<B> {
+ WriteBuf {
+ headers: Cursor::new(Vec::with_capacity(INIT_BUFFER_SIZE)),
+ max_buf_size: DEFAULT_MAX_BUFFER_SIZE,
+ queue: BufList::new(),
+ strategy,
+ }
+ }
+}
+
+impl<B> WriteBuf<B>
+where
+ B: Buf,
+{
+ fn set_strategy(&mut self, strategy: WriteStrategy) {
+ self.strategy = strategy;
+ }
+
+ pub(super) fn buffer<BB: Buf + Into<B>>(&mut self, mut buf: BB) {
+ debug_assert!(buf.has_remaining());
+ match self.strategy {
+ WriteStrategy::Flatten => {
+ let head = self.headers_mut();
+
+ head.maybe_unshift(buf.remaining());
+ trace!(
+ self.len = head.remaining(),
+ buf.len = buf.remaining(),
+ "buffer.flatten"
+ );
+ //perf: This is a little faster than <Vec as BufMut>>::put,
+ //but accomplishes the same result.
+ loop {
+ let adv = {
+ let slice = buf.chunk();
+ if slice.is_empty() {
+ return;
+ }
+ head.bytes.extend_from_slice(slice);
+ slice.len()
+ };
+ buf.advance(adv);
+ }
+ }
+ WriteStrategy::Queue => {
+ trace!(
+ self.len = self.remaining(),
+ buf.len = buf.remaining(),
+ "buffer.queue"
+ );
+ self.queue.push(buf.into());
+ }
+ }
+ }
+
+ fn can_buffer(&self) -> bool {
+ match self.strategy {
+ WriteStrategy::Flatten => self.remaining() < self.max_buf_size,
+ WriteStrategy::Queue => {
+ self.queue.bufs_cnt() < MAX_BUF_LIST_BUFFERS && self.remaining() < self.max_buf_size
+ }
+ }
+ }
+
+ fn headers_mut(&mut self) -> &mut Cursor<Vec<u8>> {
+ debug_assert!(!self.queue.has_remaining());
+ &mut self.headers
+ }
+}
+
+impl<B: Buf> fmt::Debug for WriteBuf<B> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_struct("WriteBuf")
+ .field("remaining", &self.remaining())
+ .field("strategy", &self.strategy)
+ .finish()
+ }
+}
+
+impl<B: Buf> Buf for WriteBuf<B> {
+ #[inline]
+ fn remaining(&self) -> usize {
+ self.headers.remaining() + self.queue.remaining()
+ }
+
+ #[inline]
+ fn chunk(&self) -> &[u8] {
+ let headers = self.headers.chunk();
+ if !headers.is_empty() {
+ headers
+ } else {
+ self.queue.chunk()
+ }
+ }
+
+ #[inline]
+ fn advance(&mut self, cnt: usize) {
+ let hrem = self.headers.remaining();
+
+ match hrem.cmp(&cnt) {
+ cmp::Ordering::Equal => self.headers.reset(),
+ cmp::Ordering::Greater => self.headers.advance(cnt),
+ cmp::Ordering::Less => {
+ let qcnt = cnt - hrem;
+ self.headers.reset();
+ self.queue.advance(qcnt);
+ }
+ }
+ }
+
+ #[inline]
+ fn chunks_vectored<'t>(&'t self, dst: &mut [IoSlice<'t>]) -> usize {
+ let n = self.headers.chunks_vectored(dst);
+ self.queue.chunks_vectored(&mut dst[n..]) + n
+ }
+}
+
+#[derive(Debug)]
+enum WriteStrategy {
+ Flatten,
+ Queue,
+}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+ use crate::common::io::Compat;
+ use std::time::Duration;
+
+ use tokio_test::io::Builder as Mock;
+
+ // #[cfg(feature = "nightly")]
+ // use test::Bencher;
+
+ /*
+ impl<T: Read> MemRead for AsyncIo<T> {
+ fn read_mem(&mut self, len: usize) -> Poll<Bytes, io::Error> {
+ let mut v = vec![0; len];
+ let n = try_nb!(self.read(v.as_mut_slice()));
+ Ok(Async::Ready(BytesMut::from(&v[..n]).freeze()))
+ }
+ }
+ */
+
+ #[tokio::test]
+ #[ignore]
+ async fn iobuf_write_empty_slice() {
+ // TODO(eliza): can i have writev back pls T_T
+ // // First, let's just check that the Mock would normally return an
+ // // error on an unexpected write, even if the buffer is empty...
+ // let mut mock = Mock::new().build();
+ // futures_util::future::poll_fn(|cx| {
+ // Pin::new(&mut mock).poll_write_buf(cx, &mut Cursor::new(&[]))
+ // })
+ // .await
+ // .expect_err("should be a broken pipe");
+
+ // // underlying io will return the logic error upon write,
+ // // so we are testing that the io_buf does not trigger a write
+ // // when there is nothing to flush
+ // let mock = Mock::new().build();
+ // let mut io_buf = Buffered::<_, Cursor<Vec<u8>>>::new(mock);
+ // io_buf.flush().await.expect("should short-circuit flush");
+ }
+
+ #[cfg(not(miri))]
+ #[tokio::test]
+ async fn parse_reads_until_blocked() {
+ use crate::proto::h1::ClientTransaction;
+
+ let _ = pretty_env_logger::try_init();
+ let mock = Mock::new()
+ // Split over multiple reads will read all of it
+ .read(b"HTTP/1.1 200 OK\r\n")
+ .read(b"Server: hyper\r\n")
+ // missing last line ending
+ .wait(Duration::from_secs(1))
+ .build();
+
+ let mut buffered = Buffered::<_, Cursor<Vec<u8>>>::new(Compat::new(mock));
+
+ // We expect a `parse` to be not ready, and so can't await it directly.
+ // Rather, this `poll_fn` will wrap the `Poll` result.
+ futures_util::future::poll_fn(|cx| {
+ let parse_ctx = ParseContext {
+ cached_headers: &mut None,
+ req_method: &mut None,
+ h1_parser_config: Default::default(),
+ h1_max_headers: None,
+ preserve_header_case: false,
+ #[cfg(feature = "ffi")]
+ preserve_header_order: false,
+ h09_responses: false,
+ #[cfg(feature = "client")]
+ on_informational: &mut None,
+ };
+ assert!(buffered
+ .parse::<ClientTransaction>(cx, parse_ctx)
+ .is_pending());
+ Poll::Ready(())
+ })
+ .await;
+
+ assert_eq!(
+ buffered.read_buf,
+ b"HTTP/1.1 200 OK\r\nServer: hyper\r\n"[..]
+ );
+ }
+
+ #[test]
+ fn read_strategy_adaptive_increments() {
+ let mut strategy = ReadStrategy::default();
+ assert_eq!(strategy.next(), 8192);
+
+ // Grows if record == next
+ strategy.record(8192);
+ assert_eq!(strategy.next(), 16384);
+
+ strategy.record(16384);
+ assert_eq!(strategy.next(), 32768);
+
+ // Enormous records still increment at same rate
+ strategy.record(usize::MAX);
+ assert_eq!(strategy.next(), 65536);
+
+ let max = strategy.max();
+ while strategy.next() < max {
+ strategy.record(max);
+ }
+
+ assert_eq!(strategy.next(), max, "never goes over max");
+ strategy.record(max + 1);
+ assert_eq!(strategy.next(), max, "never goes over max");
+ }
+
+ #[test]
+ fn read_strategy_adaptive_decrements() {
+ let mut strategy = ReadStrategy::default();
+ strategy.record(8192);
+ assert_eq!(strategy.next(), 16384);
+
+ strategy.record(1);
+ assert_eq!(
+ strategy.next(),
+ 16384,
+ "first smaller record doesn't decrement yet"
+ );
+ strategy.record(8192);
+ assert_eq!(strategy.next(), 16384, "record was with range");
+
+ strategy.record(1);
+ assert_eq!(
+ strategy.next(),
+ 16384,
+ "in-range record should make this the 'first' again"
+ );
+
+ strategy.record(1);
+ assert_eq!(strategy.next(), 8192, "second smaller record decrements");
+
+ strategy.record(1);
+ assert_eq!(strategy.next(), 8192, "first doesn't decrement");
+ strategy.record(1);
+ assert_eq!(strategy.next(), 8192, "doesn't decrement under minimum");
+ }
+
+ #[test]
+ fn read_strategy_adaptive_stays_the_same() {
+ let mut strategy = ReadStrategy::default();
+ strategy.record(8192);
+ assert_eq!(strategy.next(), 16384);
+
+ strategy.record(8193);
+ assert_eq!(
+ strategy.next(),
+ 16384,
+ "first smaller record doesn't decrement yet"
+ );
+
+ strategy.record(8193);
+ assert_eq!(
+ strategy.next(),
+ 16384,
+ "with current step does not decrement"
+ );
+ }
+
+ #[test]
+ fn read_strategy_adaptive_max_fuzz() {
+ fn fuzz(max: usize) {
+ let mut strategy = ReadStrategy::with_max(max);
+ while strategy.next() < max {
+ strategy.record(usize::MAX);
+ }
+ let mut next = strategy.next();
+ while next > 8192 {
+ strategy.record(1);
+ strategy.record(1);
+ next = strategy.next();
+ assert!(
+ next.is_power_of_two(),
+ "decrement should be powers of two: {} (max = {})",
+ next,
+ max,
+ );
+ }
+ }
+
+ let mut max = 8192;
+ while max < std::usize::MAX {
+ fuzz(max);
+ max = (max / 2).saturating_mul(3);
+ }
+ fuzz(usize::MAX);
+ }
+
+ #[test]
+ #[should_panic]
+ #[cfg(debug_assertions)] // needs to trigger a debug_assert
+ fn write_buf_requires_non_empty_bufs() {
+ let mock = Mock::new().build();
+ let mut buffered = Buffered::<_, Cursor<Vec<u8>>>::new(Compat::new(mock));
+
+ buffered.buffer(Cursor::new(Vec::new()));
+ }
+
+ /*
+ TODO: needs tokio_test::io to allow configure write_buf calls
+ #[test]
+ fn write_buf_queue() {
+ let _ = pretty_env_logger::try_init();
+
+ let mock = AsyncIo::new_buf(vec![], 1024);
+ let mut buffered = Buffered::<_, Cursor<Vec<u8>>>::new(mock);
+
+
+ buffered.headers_buf().extend(b"hello ");
+ buffered.buffer(Cursor::new(b"world, ".to_vec()));
+ buffered.buffer(Cursor::new(b"it's ".to_vec()));
+ buffered.buffer(Cursor::new(b"hyper!".to_vec()));
+ assert_eq!(buffered.write_buf.queue.bufs_cnt(), 3);
+ buffered.flush().unwrap();
+
+ assert_eq!(buffered.io, b"hello world, it's hyper!");
+ assert_eq!(buffered.io.num_writes(), 1);
+ assert_eq!(buffered.write_buf.queue.bufs_cnt(), 0);
+ }
+ */
+
+ #[cfg(not(miri))]
+ #[tokio::test]
+ async fn write_buf_flatten() {
+ let _ = pretty_env_logger::try_init();
+
+ let mock = Mock::new().write(b"hello world, it's hyper!").build();
+
+ let mut buffered = Buffered::<_, Cursor<Vec<u8>>>::new(Compat::new(mock));
+ buffered.write_buf.set_strategy(WriteStrategy::Flatten);
+
+ buffered.headers_buf().extend(b"hello ");
+ buffered.buffer(Cursor::new(b"world, ".to_vec()));
+ buffered.buffer(Cursor::new(b"it's ".to_vec()));
+ buffered.buffer(Cursor::new(b"hyper!".to_vec()));
+ assert_eq!(buffered.write_buf.queue.bufs_cnt(), 0);
+
+ buffered.flush().await.expect("flush");
+ }
+
+ #[test]
+ fn write_buf_flatten_partially_flushed() {
+ let _ = pretty_env_logger::try_init();
+
+ let b = |s: &str| Cursor::new(s.as_bytes().to_vec());
+
+ let mut write_buf = WriteBuf::<Cursor<Vec<u8>>>::new(WriteStrategy::Flatten);
+
+ write_buf.buffer(b("hello "));
+ write_buf.buffer(b("world, "));
+
+ assert_eq!(write_buf.chunk(), b"hello world, ");
+
+ // advance most of the way, but not all
+ write_buf.advance(11);
+
+ assert_eq!(write_buf.chunk(), b", ");
+ assert_eq!(write_buf.headers.pos, 11);
+ assert_eq!(write_buf.headers.bytes.capacity(), INIT_BUFFER_SIZE);
+
+ // there's still room in the headers buffer, so just push on the end
+ write_buf.buffer(b("it's hyper!"));
+
+ assert_eq!(write_buf.chunk(), b", it's hyper!");
+ assert_eq!(write_buf.headers.pos, 11);
+
+ let rem1 = write_buf.remaining();
+ let cap = write_buf.headers.bytes.capacity();
+
+ // but when this would go over capacity, don't copy the old bytes
+ write_buf.buffer(Cursor::new(vec![b'X'; cap]));
+ assert_eq!(write_buf.remaining(), cap + rem1);
+ assert_eq!(write_buf.headers.pos, 0);
+ }
+
+ #[cfg(not(miri))]
+ #[tokio::test]
+ async fn write_buf_queue_disable_auto() {
+ let _ = pretty_env_logger::try_init();
+
+ let mock = Mock::new()
+ .write(b"hello ")
+ .write(b"world, ")
+ .write(b"it's ")
+ .write(b"hyper!")
+ .build();
+
+ let mut buffered = Buffered::<_, Cursor<Vec<u8>>>::new(Compat::new(mock));
+ buffered.write_buf.set_strategy(WriteStrategy::Queue);
+
+ // we have 4 buffers, and vec IO disabled, but explicitly said
+ // don't try to auto detect (via setting strategy above)
+
+ buffered.headers_buf().extend(b"hello ");
+ buffered.buffer(Cursor::new(b"world, ".to_vec()));
+ buffered.buffer(Cursor::new(b"it's ".to_vec()));
+ buffered.buffer(Cursor::new(b"hyper!".to_vec()));
+ assert_eq!(buffered.write_buf.queue.bufs_cnt(), 3);
+
+ buffered.flush().await.expect("flush");
+
+ assert_eq!(buffered.write_buf.queue.bufs_cnt(), 0);
+ }
+
+ // #[cfg(feature = "nightly")]
+ // #[bench]
+ // fn bench_write_buf_flatten_buffer_chunk(b: &mut Bencher) {
+ // let s = "Hello, World!";
+ // b.bytes = s.len() as u64;
+
+ // let mut write_buf = WriteBuf::<bytes::Bytes>::new();
+ // write_buf.set_strategy(WriteStrategy::Flatten);
+ // b.iter(|| {
+ // let chunk = bytes::Bytes::from(s);
+ // write_buf.buffer(chunk);
+ // ::test::black_box(&write_buf);
+ // write_buf.headers.bytes.clear();
+ // })
+ // }
+}
diff --git a/vendor/hyper/src/proto/h1/mod.rs b/vendor/hyper/src/proto/h1/mod.rs
new file mode 100644
index 00000000..a8f36f5f
--- /dev/null
+++ b/vendor/hyper/src/proto/h1/mod.rs
@@ -0,0 +1,113 @@
+use bytes::BytesMut;
+use http::{HeaderMap, Method};
+use httparse::ParserConfig;
+
+use crate::body::DecodedLength;
+use crate::proto::{BodyLength, MessageHead};
+
+pub(crate) use self::conn::Conn;
+pub(crate) use self::decode::Decoder;
+pub(crate) use self::dispatch::Dispatcher;
+pub(crate) use self::encode::{EncodedBuf, Encoder};
+//TODO: move out of h1::io
+pub(crate) use self::io::MINIMUM_MAX_BUFFER_SIZE;
+
+mod conn;
+mod decode;
+pub(crate) mod dispatch;
+mod encode;
+mod io;
+mod role;
+
+cfg_client! {
+ pub(crate) type ClientTransaction = role::Client;
+}
+
+cfg_server! {
+ pub(crate) type ServerTransaction = role::Server;
+}
+
+pub(crate) trait Http1Transaction {
+ type Incoming;
+ type Outgoing: Default;
+ #[cfg(feature = "tracing")]
+ const LOG: &'static str;
+ fn parse(bytes: &mut BytesMut, ctx: ParseContext<'_>) -> ParseResult<Self::Incoming>;
+ fn encode(enc: Encode<'_, Self::Outgoing>, dst: &mut Vec<u8>) -> crate::Result<Encoder>;
+
+ fn on_error(err: &crate::Error) -> Option<MessageHead<Self::Outgoing>>;
+
+ fn is_client() -> bool {
+ !Self::is_server()
+ }
+
+ fn is_server() -> bool {
+ !Self::is_client()
+ }
+
+ fn should_error_on_parse_eof() -> bool {
+ Self::is_client()
+ }
+
+ fn should_read_first() -> bool {
+ Self::is_server()
+ }
+
+ fn update_date() {}
+}
+
+/// Result newtype for Http1Transaction::parse.
+pub(crate) type ParseResult<T> = Result<Option<ParsedMessage<T>>, crate::error::Parse>;
+
+#[derive(Debug)]
+pub(crate) struct ParsedMessage<T> {
+ head: MessageHead<T>,
+ decode: DecodedLength,
+ expect_continue: bool,
+ keep_alive: bool,
+ wants_upgrade: bool,
+}
+
+pub(crate) struct ParseContext<'a> {
+ cached_headers: &'a mut Option<HeaderMap>,
+ req_method: &'a mut Option<Method>,
+ h1_parser_config: ParserConfig,
+ h1_max_headers: Option<usize>,
+ preserve_header_case: bool,
+ #[cfg(feature = "ffi")]
+ preserve_header_order: bool,
+ h09_responses: bool,
+ #[cfg(feature = "client")]
+ on_informational: &'a mut Option<crate::ext::OnInformational>,
+}
+
+/// Passed to Http1Transaction::encode
+pub(crate) struct Encode<'a, T> {
+ head: &'a mut MessageHead<T>,
+ body: Option<BodyLength>,
+ #[cfg(feature = "server")]
+ keep_alive: bool,
+ req_method: &'a mut Option<Method>,
+ title_case_headers: bool,
+ #[cfg(feature = "server")]
+ date_header: bool,
+}
+
+/// Extra flags that a request "wants", like expect-continue or upgrades.
+#[derive(Clone, Copy, Debug)]
+struct Wants(u8);
+
+impl Wants {
+ const EMPTY: Wants = Wants(0b00);
+ const EXPECT: Wants = Wants(0b01);
+ const UPGRADE: Wants = Wants(0b10);
+
+ #[must_use]
+ fn add(self, other: Wants) -> Wants {
+ Wants(self.0 | other.0)
+ }
+
+ fn contains(&self, other: Wants) -> bool {
+ (self.0 & other.0) == other.0
+ }
+}
diff --git a/vendor/hyper/src/proto/h1/role.rs b/vendor/hyper/src/proto/h1/role.rs
new file mode 100644
index 00000000..1674e26b
--- /dev/null
+++ b/vendor/hyper/src/proto/h1/role.rs
@@ -0,0 +1,3098 @@
+use std::mem::MaybeUninit;
+
+#[cfg(feature = "client")]
+use std::fmt::{self, Write as _};
+
+use bytes::Bytes;
+use bytes::BytesMut;
+#[cfg(feature = "client")]
+use http::header::Entry;
+#[cfg(feature = "server")]
+use http::header::ValueIter;
+use http::header::{self, HeaderMap, HeaderName, HeaderValue};
+use http::{Method, StatusCode, Version};
+use smallvec::{smallvec, smallvec_inline, SmallVec};
+
+use crate::body::DecodedLength;
+#[cfg(feature = "server")]
+use crate::common::date;
+use crate::error::Parse;
+use crate::ext::HeaderCaseMap;
+#[cfg(feature = "ffi")]
+use crate::ext::OriginalHeaderOrder;
+use crate::headers;
+use crate::proto::h1::{
+ Encode, Encoder, Http1Transaction, ParseContext, ParseResult, ParsedMessage,
+};
+#[cfg(feature = "client")]
+use crate::proto::RequestHead;
+use crate::proto::{BodyLength, MessageHead, RequestLine};
+
+pub(crate) const DEFAULT_MAX_HEADERS: usize = 100;
+const AVERAGE_HEADER_SIZE: usize = 30; // totally scientific
+#[cfg(feature = "server")]
+const MAX_URI_LEN: usize = (u16::MAX - 1) as usize;
+
+macro_rules! header_name {
+ ($bytes:expr) => {{
+ {
+ match HeaderName::from_bytes($bytes) {
+ Ok(name) => name,
+ Err(e) => maybe_panic!(e),
+ }
+ }
+ }};
+}
+
+macro_rules! header_value {
+ ($bytes:expr) => {{
+ {
+ unsafe { HeaderValue::from_maybe_shared_unchecked($bytes) }
+ }
+ }};
+}
+
+macro_rules! maybe_panic {
+ ($($arg:tt)*) => ({
+ let _err = ($($arg)*);
+ if cfg!(debug_assertions) {
+ panic!("{:?}", _err);
+ } else {
+ error!("Internal Hyper error, please report {:?}", _err);
+ return Err(Parse::Internal)
+ }
+ })
+}
+
+pub(super) fn parse_headers<T>(
+ bytes: &mut BytesMut,
+ prev_len: Option<usize>,
+ ctx: ParseContext<'_>,
+) -> ParseResult<T::Incoming>
+where
+ T: Http1Transaction,
+{
+ // If the buffer is empty, don't bother entering the span, it's just noise.
+ if bytes.is_empty() {
+ return Ok(None);
+ }
+
+ let _entered = trace_span!("parse_headers");
+
+ if let Some(prev_len) = prev_len {
+ if !is_complete_fast(bytes, prev_len) {
+ return Ok(None);
+ }
+ }
+
+ T::parse(bytes, ctx)
+}
+
+/// A fast scan for the end of a message.
+/// Used when there was a partial read, to skip full parsing on a
+/// a slow connection.
+fn is_complete_fast(bytes: &[u8], prev_len: usize) -> bool {
+ let start = if prev_len < 3 { 0 } else { prev_len - 3 };
+ let bytes = &bytes[start..];
+
+ for (i, b) in bytes.iter().copied().enumerate() {
+ if b == b'\r' {
+ if bytes[i + 1..].chunks(3).next() == Some(&b"\n\r\n"[..]) {
+ return true;
+ }
+ } else if b == b'\n' && bytes.get(i + 1) == Some(&b'\n') {
+ return true;
+ }
+ }
+
+ false
+}
+
+pub(super) fn encode_headers<T>(
+ enc: Encode<'_, T::Outgoing>,
+ dst: &mut Vec<u8>,
+) -> crate::Result<Encoder>
+where
+ T: Http1Transaction,
+{
+ let _entered = trace_span!("encode_headers");
+ T::encode(enc, dst)
+}
+
+// There are 2 main roles, Client and Server.
+
+#[cfg(feature = "client")]
+pub(crate) enum Client {}
+
+#[cfg(feature = "server")]
+pub(crate) enum Server {}
+
+#[cfg(feature = "server")]
+impl Http1Transaction for Server {
+ type Incoming = RequestLine;
+ type Outgoing = StatusCode;
+ #[cfg(feature = "tracing")]
+ const LOG: &'static str = "{role=server}";
+
+ fn parse(buf: &mut BytesMut, ctx: ParseContext<'_>) -> ParseResult<RequestLine> {
+ debug_assert!(!buf.is_empty(), "parse called with empty buf");
+
+ let mut keep_alive;
+ let is_http_11;
+ let subject;
+ let version;
+ let len;
+ let headers_len;
+ let method;
+ let path_range;
+
+ // Both headers_indices and headers are using uninitialized memory,
+ // but we *never* read any of it until after httparse has assigned
+ // values into it. By not zeroing out the stack memory, this saves
+ // a good ~5% on pipeline benchmarks.
+ let mut headers_indices: SmallVec<[MaybeUninit<HeaderIndices>; DEFAULT_MAX_HEADERS]> =
+ match ctx.h1_max_headers {
+ Some(cap) => smallvec![MaybeUninit::uninit(); cap],
+ None => smallvec_inline![MaybeUninit::uninit(); DEFAULT_MAX_HEADERS],
+ };
+ {
+ let mut headers: SmallVec<[MaybeUninit<httparse::Header<'_>>; DEFAULT_MAX_HEADERS]> =
+ match ctx.h1_max_headers {
+ Some(cap) => smallvec![MaybeUninit::uninit(); cap],
+ None => smallvec_inline![MaybeUninit::uninit(); DEFAULT_MAX_HEADERS],
+ };
+ trace!(bytes = buf.len(), "Request.parse");
+ let mut req = httparse::Request::new(&mut []);
+ let bytes = buf.as_ref();
+ match req.parse_with_uninit_headers(bytes, &mut headers) {
+ Ok(httparse::Status::Complete(parsed_len)) => {
+ trace!("Request.parse Complete({})", parsed_len);
+ len = parsed_len;
+ let uri = req.path.unwrap();
+ if uri.len() > MAX_URI_LEN {
+ return Err(Parse::UriTooLong);
+ }
+ method = Method::from_bytes(req.method.unwrap().as_bytes())?;
+ path_range = Server::record_path_range(bytes, uri);
+ version = if req.version.unwrap() == 1 {
+ keep_alive = true;
+ is_http_11 = true;
+ Version::HTTP_11
+ } else {
+ keep_alive = false;
+ is_http_11 = false;
+ Version::HTTP_10
+ };
+
+ record_header_indices(bytes, req.headers, &mut headers_indices)?;
+ headers_len = req.headers.len();
+ }
+ Ok(httparse::Status::Partial) => return Ok(None),
+ Err(err) => {
+ return Err(match err {
+ // if invalid Token, try to determine if for method or path
+ httparse::Error::Token => {
+ if req.method.is_none() {
+ Parse::Method
+ } else {
+ debug_assert!(req.path.is_none());
+ Parse::Uri
+ }
+ }
+ other => other.into(),
+ });
+ }
+ }
+ };
+
+ let slice = buf.split_to(len).freeze();
+ let uri = {
+ let uri_bytes = slice.slice_ref(&slice[path_range]);
+ // TODO(lucab): switch to `Uri::from_shared()` once public.
+ http::Uri::from_maybe_shared(uri_bytes)?
+ };
+ subject = RequestLine(method, uri);
+
+ // According to https://tools.ietf.org/html/rfc7230#section-3.3.3
+ // 1. (irrelevant to Request)
+ // 2. (irrelevant to Request)
+ // 3. Transfer-Encoding: chunked has a chunked body.
+ // 4. If multiple differing Content-Length headers or invalid, close connection.
+ // 5. Content-Length header has a sized body.
+ // 6. Length 0.
+ // 7. (irrelevant to Request)
+
+ let mut decoder = DecodedLength::ZERO;
+ let mut expect_continue = false;
+ let mut con_len = None;
+ let mut is_te = false;
+ let mut is_te_chunked = false;
+ let mut wants_upgrade = subject.0 == Method::CONNECT;
+
+ let mut header_case_map = if ctx.preserve_header_case {
+ Some(HeaderCaseMap::default())
+ } else {
+ None
+ };
+
+ #[cfg(feature = "ffi")]
+ let mut header_order = if ctx.preserve_header_order {
+ Some(OriginalHeaderOrder::default())
+ } else {
+ None
+ };
+
+ let mut headers = ctx.cached_headers.take().unwrap_or_default();
+
+ headers.reserve(headers_len);
+
+ for header in &headers_indices[..headers_len] {
+ // SAFETY: array is valid up to `headers_len`
+ let header = unsafe { header.assume_init_ref() };
+ let name = header_name!(&slice[header.name.0..header.name.1]);
+ let value = header_value!(slice.slice(header.value.0..header.value.1));
+
+ match name {
+ header::TRANSFER_ENCODING => {
+ // https://tools.ietf.org/html/rfc7230#section-3.3.3
+ // If Transfer-Encoding header is present, and 'chunked' is
+ // not the final encoding, and this is a Request, then it is
+ // malformed. A server should respond with 400 Bad Request.
+ if !is_http_11 {
+ debug!("HTTP/1.0 cannot have Transfer-Encoding header");
+ return Err(Parse::transfer_encoding_unexpected());
+ }
+ is_te = true;
+ if headers::is_chunked_(&value) {
+ is_te_chunked = true;
+ decoder = DecodedLength::CHUNKED;
+ } else {
+ is_te_chunked = false;
+ }
+ }
+ header::CONTENT_LENGTH => {
+ if is_te {
+ continue;
+ }
+ let len = headers::content_length_parse(&value)
+ .ok_or_else(Parse::content_length_invalid)?;
+ if let Some(prev) = con_len {
+ if prev != len {
+ debug!(
+ "multiple Content-Length headers with different values: [{}, {}]",
+ prev, len,
+ );
+ return Err(Parse::content_length_invalid());
+ }
+ // we don't need to append this secondary length
+ continue;
+ }
+ decoder = DecodedLength::checked_new(len)?;
+ con_len = Some(len);
+ }
+ header::CONNECTION => {
+ // keep_alive was previously set to default for Version
+ if keep_alive {
+ // HTTP/1.1
+ keep_alive = !headers::connection_close(&value);
+ } else {
+ // HTTP/1.0
+ keep_alive = headers::connection_keep_alive(&value);
+ }
+ }
+ header::EXPECT => {
+ // According to https://datatracker.ietf.org/doc/html/rfc2616#section-14.20
+ // Comparison of expectation values is case-insensitive for unquoted tokens
+ // (including the 100-continue token)
+ expect_continue = value.as_bytes().eq_ignore_ascii_case(b"100-continue");
+ }
+ header::UPGRADE => {
+ // Upgrades are only allowed with HTTP/1.1
+ wants_upgrade = is_http_11;
+ }
+
+ _ => (),
+ }
+
+ if let Some(ref mut header_case_map) = header_case_map {
+ header_case_map.append(&name, slice.slice(header.name.0..header.name.1));
+ }
+
+ #[cfg(feature = "ffi")]
+ if let Some(ref mut header_order) = header_order {
+ header_order.append(&name);
+ }
+
+ headers.append(name, value);
+ }
+
+ if is_te && !is_te_chunked {
+ debug!("request with transfer-encoding header, but not chunked, bad request");
+ return Err(Parse::transfer_encoding_invalid());
+ }
+
+ let mut extensions = http::Extensions::default();
+
+ if let Some(header_case_map) = header_case_map {
+ extensions.insert(header_case_map);
+ }
+
+ #[cfg(feature = "ffi")]
+ if let Some(header_order) = header_order {
+ extensions.insert(header_order);
+ }
+
+ *ctx.req_method = Some(subject.0.clone());
+
+ Ok(Some(ParsedMessage {
+ head: MessageHead {
+ version,
+ subject,
+ headers,
+ extensions,
+ },
+ decode: decoder,
+ expect_continue,
+ keep_alive,
+ wants_upgrade,
+ }))
+ }
+
+ fn encode(mut msg: Encode<'_, Self::Outgoing>, dst: &mut Vec<u8>) -> crate::Result<Encoder> {
+ trace!(
+ "Server::encode status={:?}, body={:?}, req_method={:?}",
+ msg.head.subject,
+ msg.body,
+ msg.req_method
+ );
+
+ let mut wrote_len = false;
+
+ // hyper currently doesn't support returning 1xx status codes as a Response
+ // This is because Service only allows returning a single Response, and
+ // so if you try to reply with a e.g. 100 Continue, you have no way of
+ // replying with the latter status code response.
+ let (ret, is_last) = if msg.head.subject == StatusCode::SWITCHING_PROTOCOLS {
+ (Ok(()), true)
+ } else if msg.req_method == &Some(Method::CONNECT) && msg.head.subject.is_success() {
+ // Sending content-length or transfer-encoding header on 2xx response
+ // to CONNECT is forbidden in RFC 7231.
+ wrote_len = true;
+ (Ok(()), true)
+ } else if msg.head.subject.is_informational() {
+ warn!("response with 1xx status code not supported");
+ *msg.head = MessageHead::default();
+ msg.head.subject = StatusCode::INTERNAL_SERVER_ERROR;
+ msg.body = None;
+ (Err(crate::Error::new_user_unsupported_status_code()), true)
+ } else {
+ (Ok(()), !msg.keep_alive)
+ };
+
+ // In some error cases, we don't know about the invalid message until already
+ // pushing some bytes onto the `dst`. In those cases, we don't want to send
+ // the half-pushed message, so rewind to before.
+ let orig_len = dst.len();
+
+ let init_cap = 30 + msg.head.headers.len() * AVERAGE_HEADER_SIZE;
+ dst.reserve(init_cap);
+
+ let custom_reason_phrase = msg.head.extensions.get::<crate::ext::ReasonPhrase>();
+
+ if msg.head.version == Version::HTTP_11
+ && msg.head.subject == StatusCode::OK
+ && custom_reason_phrase.is_none()
+ {
+ extend(dst, b"HTTP/1.1 200 OK\r\n");
+ } else {
+ match msg.head.version {
+ Version::HTTP_10 => extend(dst, b"HTTP/1.0 "),
+ Version::HTTP_11 => extend(dst, b"HTTP/1.1 "),
+ Version::HTTP_2 => {
+ debug!("response with HTTP2 version coerced to HTTP/1.1");
+ extend(dst, b"HTTP/1.1 ");
+ }
+ other => panic!("unexpected response version: {:?}", other),
+ }
+
+ extend(dst, msg.head.subject.as_str().as_bytes());
+ extend(dst, b" ");
+
+ if let Some(reason) = custom_reason_phrase {
+ extend(dst, reason.as_bytes());
+ } else {
+ // a reason MUST be written, as many parsers will expect it.
+ extend(
+ dst,
+ msg.head
+ .subject
+ .canonical_reason()
+ .unwrap_or("<none>")
+ .as_bytes(),
+ );
+ }
+
+ extend(dst, b"\r\n");
+ }
+
+ let orig_headers;
+ let extensions = std::mem::take(&mut msg.head.extensions);
+ let orig_headers = match extensions.get::<HeaderCaseMap>() {
+ None if msg.title_case_headers => {
+ orig_headers = HeaderCaseMap::default();
+ Some(&orig_headers)
+ }
+ orig_headers => orig_headers,
+ };
+ let encoder = if let Some(orig_headers) = orig_headers {
+ Self::encode_headers_with_original_case(
+ msg,
+ dst,
+ is_last,
+ orig_len,
+ wrote_len,
+ orig_headers,
+ )?
+ } else {
+ Self::encode_headers_with_lower_case(msg, dst, is_last, orig_len, wrote_len)?
+ };
+
+ ret.map(|()| encoder)
+ }
+
+ fn on_error(err: &crate::Error) -> Option<MessageHead<Self::Outgoing>> {
+ use crate::error::Kind;
+ let status = match *err.kind() {
+ Kind::Parse(Parse::Method)
+ | Kind::Parse(Parse::Header(_))
+ | Kind::Parse(Parse::Uri)
+ | Kind::Parse(Parse::Version) => StatusCode::BAD_REQUEST,
+ Kind::Parse(Parse::TooLarge) => StatusCode::REQUEST_HEADER_FIELDS_TOO_LARGE,
+ Kind::Parse(Parse::UriTooLong) => StatusCode::URI_TOO_LONG,
+ _ => return None,
+ };
+
+ debug!("sending automatic response ({}) for parse error", status);
+ let msg = MessageHead {
+ subject: status,
+ ..Default::default()
+ };
+ Some(msg)
+ }
+
+ fn is_server() -> bool {
+ true
+ }
+
+ fn update_date() {
+ date::update();
+ }
+}
+
+#[cfg(feature = "server")]
+impl Server {
+ fn can_have_body(method: &Option<Method>, status: StatusCode) -> bool {
+ Server::can_chunked(method, status)
+ }
+
+ fn can_chunked(method: &Option<Method>, status: StatusCode) -> bool {
+ if method == &Some(Method::HEAD)
+ || method == &Some(Method::CONNECT) && status.is_success()
+ || status.is_informational()
+ {
+ false
+ } else {
+ !matches!(status, StatusCode::NO_CONTENT | StatusCode::NOT_MODIFIED)
+ }
+ }
+
+ fn can_have_content_length(method: &Option<Method>, status: StatusCode) -> bool {
+ if status.is_informational() || method == &Some(Method::CONNECT) && status.is_success() {
+ false
+ } else {
+ !matches!(status, StatusCode::NO_CONTENT | StatusCode::NOT_MODIFIED)
+ }
+ }
+
+ fn can_have_implicit_zero_content_length(method: &Option<Method>, status: StatusCode) -> bool {
+ Server::can_have_content_length(method, status) && method != &Some(Method::HEAD)
+ }
+
+ fn encode_headers_with_lower_case(
+ msg: Encode<'_, StatusCode>,
+ dst: &mut Vec<u8>,
+ is_last: bool,
+ orig_len: usize,
+ wrote_len: bool,
+ ) -> crate::Result<Encoder> {
+ struct LowercaseWriter;
+
+ impl HeaderNameWriter for LowercaseWriter {
+ #[inline]
+ fn write_full_header_line(
+ &mut self,
+ dst: &mut Vec<u8>,
+ line: &str,
+ _: (HeaderName, &str),
+ ) {
+ extend(dst, line.as_bytes())
+ }
+
+ #[inline]
+ fn write_header_name_with_colon(
+ &mut self,
+ dst: &mut Vec<u8>,
+ name_with_colon: &str,
+ _: HeaderName,
+ ) {
+ extend(dst, name_with_colon.as_bytes())
+ }
+
+ #[inline]
+ fn write_header_name(&mut self, dst: &mut Vec<u8>, name: &HeaderName) {
+ extend(dst, name.as_str().as_bytes())
+ }
+ }
+
+ Self::encode_headers(msg, dst, is_last, orig_len, wrote_len, LowercaseWriter)
+ }
+
+ #[cold]
+ #[inline(never)]
+ fn encode_headers_with_original_case(
+ msg: Encode<'_, StatusCode>,
+ dst: &mut Vec<u8>,
+ is_last: bool,
+ orig_len: usize,
+ wrote_len: bool,
+ orig_headers: &HeaderCaseMap,
+ ) -> crate::Result<Encoder> {
+ struct OrigCaseWriter<'map> {
+ map: &'map HeaderCaseMap,
+ current: Option<(HeaderName, ValueIter<'map, Bytes>)>,
+ title_case_headers: bool,
+ }
+
+ impl HeaderNameWriter for OrigCaseWriter<'_> {
+ #[inline]
+ fn write_full_header_line(
+ &mut self,
+ dst: &mut Vec<u8>,
+ _: &str,
+ (name, rest): (HeaderName, &str),
+ ) {
+ self.write_header_name(dst, &name);
+ extend(dst, rest.as_bytes());
+ }
+
+ #[inline]
+ fn write_header_name_with_colon(
+ &mut self,
+ dst: &mut Vec<u8>,
+ _: &str,
+ name: HeaderName,
+ ) {
+ self.write_header_name(dst, &name);
+ extend(dst, b": ");
+ }
+
+ #[inline]
+ fn write_header_name(&mut self, dst: &mut Vec<u8>, name: &HeaderName) {
+ let Self {
+ map,
+ ref mut current,
+ title_case_headers,
+ } = *self;
+ if current.as_ref().map_or(true, |(last, _)| last != name) {
+ *current = None;
+ }
+ let (_, values) =
+ current.get_or_insert_with(|| (name.clone(), map.get_all_internal(name)));
+
+ if let Some(orig_name) = values.next() {
+ extend(dst, orig_name);
+ } else if title_case_headers {
+ title_case(dst, name.as_str().as_bytes());
+ } else {
+ extend(dst, name.as_str().as_bytes());
+ }
+ }
+ }
+
+ let header_name_writer = OrigCaseWriter {
+ map: orig_headers,
+ current: None,
+ title_case_headers: msg.title_case_headers,
+ };
+
+ Self::encode_headers(msg, dst, is_last, orig_len, wrote_len, header_name_writer)
+ }
+
+ #[inline]
+ fn encode_headers<W>(
+ msg: Encode<'_, StatusCode>,
+ dst: &mut Vec<u8>,
+ mut is_last: bool,
+ orig_len: usize,
+ mut wrote_len: bool,
+ mut header_name_writer: W,
+ ) -> crate::Result<Encoder>
+ where
+ W: HeaderNameWriter,
+ {
+ // In some error cases, we don't know about the invalid message until already
+ // pushing some bytes onto the `dst`. In those cases, we don't want to send
+ // the half-pushed message, so rewind to before.
+ let rewind = |dst: &mut Vec<u8>| {
+ dst.truncate(orig_len);
+ };
+
+ let mut encoder = Encoder::length(0);
+ let mut allowed_trailer_fields: Option<Vec<HeaderValue>> = None;
+ let mut wrote_date = false;
+ let mut cur_name = None;
+ let mut is_name_written = false;
+ let mut must_write_chunked = false;
+ let mut prev_con_len = None;
+
+ macro_rules! handle_is_name_written {
+ () => {{
+ if is_name_written {
+ // we need to clean up and write the newline
+ debug_assert_ne!(
+ &dst[dst.len() - 2..],
+ b"\r\n",
+ "previous header wrote newline but set is_name_written"
+ );
+
+ if must_write_chunked {
+ extend(dst, b", chunked\r\n");
+ } else {
+ extend(dst, b"\r\n");
+ }
+ }
+ }};
+ }
+
+ 'headers: for (opt_name, value) in msg.head.headers.drain() {
+ if let Some(n) = opt_name {
+ cur_name = Some(n);
+ handle_is_name_written!();
+ is_name_written = false;
+ }
+ let name = cur_name.as_ref().expect("current header name");
+ match *name {
+ header::CONTENT_LENGTH => {
+ if wrote_len && !is_name_written {
+ warn!("unexpected content-length found, canceling");
+ rewind(dst);
+ return Err(crate::Error::new_user_header());
+ }
+ match msg.body {
+ Some(BodyLength::Known(known_len)) => {
+ // The Body claims to know a length, and
+ // the headers are already set. For performance
+ // reasons, we are just going to trust that
+ // the values match.
+ //
+ // In debug builds, we'll assert they are the
+ // same to help developers find bugs.
+ #[cfg(debug_assertions)]
+ {
+ if let Some(len) = headers::content_length_parse(&value) {
+ if msg.req_method != &Some(Method::HEAD) || known_len != 0 {
+ assert!(
+ len == known_len,
+ "payload claims content-length of {}, custom content-length header claims {}",
+ known_len,
+ len,
+ );
+ }
+ }
+ }
+
+ if !is_name_written {
+ encoder = Encoder::length(known_len);
+ header_name_writer.write_header_name_with_colon(
+ dst,
+ "content-length: ",
+ header::CONTENT_LENGTH,
+ );
+ extend(dst, value.as_bytes());
+ wrote_len = true;
+ is_name_written = true;
+ }
+ continue 'headers;
+ }
+ Some(BodyLength::Unknown) => {
+ // The Body impl didn't know how long the
+ // body is, but a length header was included.
+ // We have to parse the value to return our
+ // Encoder...
+
+ if let Some(len) = headers::content_length_parse(&value) {
+ if let Some(prev) = prev_con_len {
+ if prev != len {
+ warn!(
+ "multiple Content-Length values found: [{}, {}]",
+ prev, len
+ );
+ rewind(dst);
+ return Err(crate::Error::new_user_header());
+ }
+ debug_assert!(is_name_written);
+ continue 'headers;
+ } else {
+ // we haven't written content-length yet!
+ encoder = Encoder::length(len);
+ header_name_writer.write_header_name_with_colon(
+ dst,
+ "content-length: ",
+ header::CONTENT_LENGTH,
+ );
+ extend(dst, value.as_bytes());
+ wrote_len = true;
+ is_name_written = true;
+ prev_con_len = Some(len);
+ continue 'headers;
+ }
+ } else {
+ warn!("illegal Content-Length value: {:?}", value);
+ rewind(dst);
+ return Err(crate::Error::new_user_header());
+ }
+ }
+ None => {
+ // We have no body to actually send,
+ // but the headers claim a content-length.
+ // There's only 2 ways this makes sense:
+ //
+ // - The header says the length is `0`.
+ // - This is a response to a `HEAD` request.
+ if msg.req_method == &Some(Method::HEAD) {
+ debug_assert_eq!(encoder, Encoder::length(0));
+ } else {
+ if value.as_bytes() != b"0" {
+ warn!(
+ "content-length value found, but empty body provided: {:?}",
+ value
+ );
+ }
+ continue 'headers;
+ }
+ }
+ }
+ wrote_len = true;
+ }
+ header::TRANSFER_ENCODING => {
+ if wrote_len && !is_name_written {
+ warn!("unexpected transfer-encoding found, canceling");
+ rewind(dst);
+ return Err(crate::Error::new_user_header());
+ }
+ // check that we actually can send a chunked body...
+ if msg.head.version == Version::HTTP_10
+ || !Server::can_chunked(msg.req_method, msg.head.subject)
+ {
+ continue;
+ }
+ wrote_len = true;
+ // Must check each value, because `chunked` needs to be the
+ // last encoding, or else we add it.
+ must_write_chunked = !headers::is_chunked_(&value);
+
+ if !is_name_written {
+ encoder = Encoder::chunked();
+ is_name_written = true;
+ header_name_writer.write_header_name_with_colon(
+ dst,
+ "transfer-encoding: ",
+ header::TRANSFER_ENCODING,
+ );
+ extend(dst, value.as_bytes());
+ } else {
+ extend(dst, b", ");
+ extend(dst, value.as_bytes());
+ }
+ continue 'headers;
+ }
+ header::CONNECTION => {
+ if !is_last && headers::connection_close(&value) {
+ is_last = true;
+ }
+ if !is_name_written {
+ is_name_written = true;
+ header_name_writer.write_header_name_with_colon(
+ dst,
+ "connection: ",
+ header::CONNECTION,
+ );
+ extend(dst, value.as_bytes());
+ } else {
+ extend(dst, b", ");
+ extend(dst, value.as_bytes());
+ }
+ continue 'headers;
+ }
+ header::DATE => {
+ wrote_date = true;
+ }
+ header::TRAILER => {
+ // check that we actually can send a chunked body...
+ if msg.head.version == Version::HTTP_10
+ || !Server::can_chunked(msg.req_method, msg.head.subject)
+ {
+ continue;
+ }
+
+ if !is_name_written {
+ is_name_written = true;
+ header_name_writer.write_header_name_with_colon(
+ dst,
+ "trailer: ",
+ header::TRAILER,
+ );
+ extend(dst, value.as_bytes());
+ } else {
+ extend(dst, b", ");
+ extend(dst, value.as_bytes());
+ }
+
+ match allowed_trailer_fields {
+ Some(ref mut allowed_trailer_fields) => {
+ allowed_trailer_fields.push(value);
+ }
+ None => {
+ allowed_trailer_fields = Some(vec![value]);
+ }
+ }
+
+ continue 'headers;
+ }
+ _ => (),
+ }
+ //TODO: this should perhaps instead combine them into
+ //single lines, as RFC7230 suggests is preferable.
+
+ // non-special write Name and Value
+ debug_assert!(
+ !is_name_written,
+ "{:?} set is_name_written and didn't continue loop",
+ name,
+ );
+ header_name_writer.write_header_name(dst, name);
+ extend(dst, b": ");
+ extend(dst, value.as_bytes());
+ extend(dst, b"\r\n");
+ }
+
+ handle_is_name_written!();
+
+ if !wrote_len {
+ encoder = match msg.body {
+ Some(BodyLength::Unknown) => {
+ if msg.head.version == Version::HTTP_10
+ || !Server::can_chunked(msg.req_method, msg.head.subject)
+ {
+ Encoder::close_delimited()
+ } else {
+ header_name_writer.write_full_header_line(
+ dst,
+ "transfer-encoding: chunked\r\n",
+ (header::TRANSFER_ENCODING, ": chunked\r\n"),
+ );
+ Encoder::chunked()
+ }
+ }
+ None | Some(BodyLength::Known(0)) => {
+ if Server::can_have_implicit_zero_content_length(
+ msg.req_method,
+ msg.head.subject,
+ ) {
+ header_name_writer.write_full_header_line(
+ dst,
+ "content-length: 0\r\n",
+ (header::CONTENT_LENGTH, ": 0\r\n"),
+ )
+ }
+ Encoder::length(0)
+ }
+ Some(BodyLength::Known(len)) => {
+ if !Server::can_have_content_length(msg.req_method, msg.head.subject) {
+ Encoder::length(0)
+ } else {
+ header_name_writer.write_header_name_with_colon(
+ dst,
+ "content-length: ",
+ header::CONTENT_LENGTH,
+ );
+ extend(dst, ::itoa::Buffer::new().format(len).as_bytes());
+ extend(dst, b"\r\n");
+ Encoder::length(len)
+ }
+ }
+ };
+ }
+
+ if !Server::can_have_body(msg.req_method, msg.head.subject) {
+ trace!(
+ "server body forced to 0; method={:?}, status={:?}",
+ msg.req_method,
+ msg.head.subject
+ );
+ encoder = Encoder::length(0);
+ }
+
+ // cached date is much faster than formatting every request
+ // don't force the write if disabled
+ if !wrote_date && msg.date_header {
+ dst.reserve(date::DATE_VALUE_LENGTH + 8);
+ header_name_writer.write_header_name_with_colon(dst, "date: ", header::DATE);
+ date::extend(dst);
+ extend(dst, b"\r\n\r\n");
+ } else {
+ extend(dst, b"\r\n");
+ }
+
+ if encoder.is_chunked() {
+ if let Some(allowed_trailer_fields) = allowed_trailer_fields {
+ encoder = encoder.into_chunked_with_trailing_fields(allowed_trailer_fields);
+ }
+ }
+
+ Ok(encoder.set_last(is_last))
+ }
+
+ /// Helper for zero-copy parsing of request path URI.
+ #[inline]
+ fn record_path_range(bytes: &[u8], req_path: &str) -> std::ops::Range<usize> {
+ let bytes_ptr = bytes.as_ptr() as usize;
+ let start = req_path.as_ptr() as usize - bytes_ptr;
+ let end = start + req_path.len();
+ std::ops::Range { start, end }
+ }
+}
+
+#[cfg(feature = "server")]
+trait HeaderNameWriter {
+ fn write_full_header_line(
+ &mut self,
+ dst: &mut Vec<u8>,
+ line: &str,
+ name_value_pair: (HeaderName, &str),
+ );
+ fn write_header_name_with_colon(
+ &mut self,
+ dst: &mut Vec<u8>,
+ name_with_colon: &str,
+ name: HeaderName,
+ );
+ fn write_header_name(&mut self, dst: &mut Vec<u8>, name: &HeaderName);
+}
+
+#[cfg(feature = "client")]
+impl Http1Transaction for Client {
+ type Incoming = StatusCode;
+ type Outgoing = RequestLine;
+ #[cfg(feature = "tracing")]
+ const LOG: &'static str = "{role=client}";
+
+ fn parse(buf: &mut BytesMut, ctx: ParseContext<'_>) -> ParseResult<StatusCode> {
+ debug_assert!(!buf.is_empty(), "parse called with empty buf");
+
+ // Loop to skip information status code headers (100 Continue, etc).
+ loop {
+ let mut headers_indices: SmallVec<[MaybeUninit<HeaderIndices>; DEFAULT_MAX_HEADERS]> =
+ match ctx.h1_max_headers {
+ Some(cap) => smallvec![MaybeUninit::uninit(); cap],
+ None => smallvec_inline![MaybeUninit::uninit(); DEFAULT_MAX_HEADERS],
+ };
+ let (len, status, reason, version, headers_len) = {
+ let mut headers: SmallVec<
+ [MaybeUninit<httparse::Header<'_>>; DEFAULT_MAX_HEADERS],
+ > = match ctx.h1_max_headers {
+ Some(cap) => smallvec![MaybeUninit::uninit(); cap],
+ None => smallvec_inline![MaybeUninit::uninit(); DEFAULT_MAX_HEADERS],
+ };
+ trace!(bytes = buf.len(), "Response.parse");
+ let mut res = httparse::Response::new(&mut []);
+ let bytes = buf.as_ref();
+ match ctx.h1_parser_config.parse_response_with_uninit_headers(
+ &mut res,
+ bytes,
+ &mut headers,
+ ) {
+ Ok(httparse::Status::Complete(len)) => {
+ trace!("Response.parse Complete({})", len);
+ let status = StatusCode::from_u16(res.code.unwrap())?;
+
+ let reason = {
+ let reason = res.reason.unwrap();
+ // Only save the reason phrase if it isn't the canonical reason
+ if Some(reason) != status.canonical_reason() {
+ Some(Bytes::copy_from_slice(reason.as_bytes()))
+ } else {
+ None
+ }
+ };
+
+ let version = if res.version.unwrap() == 1 {
+ Version::HTTP_11
+ } else {
+ Version::HTTP_10
+ };
+ record_header_indices(bytes, res.headers, &mut headers_indices)?;
+ let headers_len = res.headers.len();
+ (len, status, reason, version, headers_len)
+ }
+ Ok(httparse::Status::Partial) => return Ok(None),
+ Err(httparse::Error::Version) if ctx.h09_responses => {
+ trace!("Response.parse accepted HTTP/0.9 response");
+
+ (0, StatusCode::OK, None, Version::HTTP_09, 0)
+ }
+ Err(e) => return Err(e.into()),
+ }
+ };
+
+ let mut slice = buf.split_to(len);
+
+ if ctx
+ .h1_parser_config
+ .obsolete_multiline_headers_in_responses_are_allowed()
+ {
+ for header in &mut headers_indices[..headers_len] {
+ // SAFETY: array is valid up to `headers_len`
+ let header = unsafe { header.assume_init_mut() };
+ Client::obs_fold_line(&mut slice, header);
+ }
+ }
+
+ let slice = slice.freeze();
+
+ let mut headers = ctx.cached_headers.take().unwrap_or_default();
+
+ let mut keep_alive = version == Version::HTTP_11;
+
+ let mut header_case_map = if ctx.preserve_header_case {
+ Some(HeaderCaseMap::default())
+ } else {
+ None
+ };
+
+ #[cfg(feature = "ffi")]
+ let mut header_order = if ctx.preserve_header_order {
+ Some(OriginalHeaderOrder::default())
+ } else {
+ None
+ };
+
+ headers.reserve(headers_len);
+ for header in &headers_indices[..headers_len] {
+ // SAFETY: array is valid up to `headers_len`
+ let header = unsafe { header.assume_init_ref() };
+ let name = header_name!(&slice[header.name.0..header.name.1]);
+ let value = header_value!(slice.slice(header.value.0..header.value.1));
+
+ if let header::CONNECTION = name {
+ // keep_alive was previously set to default for Version
+ if keep_alive {
+ // HTTP/1.1
+ keep_alive = !headers::connection_close(&value);
+ } else {
+ // HTTP/1.0
+ keep_alive = headers::connection_keep_alive(&value);
+ }
+ }
+
+ if let Some(ref mut header_case_map) = header_case_map {
+ header_case_map.append(&name, slice.slice(header.name.0..header.name.1));
+ }
+
+ #[cfg(feature = "ffi")]
+ if let Some(ref mut header_order) = header_order {
+ header_order.append(&name);
+ }
+
+ headers.append(name, value);
+ }
+
+ let mut extensions = http::Extensions::default();
+
+ if let Some(header_case_map) = header_case_map {
+ extensions.insert(header_case_map);
+ }
+
+ #[cfg(feature = "ffi")]
+ if let Some(header_order) = header_order {
+ extensions.insert(header_order);
+ }
+
+ if let Some(reason) = reason {
+ // Safety: httparse ensures that only valid reason phrase bytes are present in this
+ // field.
+ let reason = crate::ext::ReasonPhrase::from_bytes_unchecked(reason);
+ extensions.insert(reason);
+ }
+
+ let head = MessageHead {
+ version,
+ subject: status,
+ headers,
+ extensions,
+ };
+ if let Some((decode, is_upgrade)) = Client::decoder(&head, ctx.req_method)? {
+ return Ok(Some(ParsedMessage {
+ head,
+ decode,
+ expect_continue: false,
+ // a client upgrade means the connection can't be used
+ // again, as it is definitely upgrading.
+ keep_alive: keep_alive && !is_upgrade,
+ wants_upgrade: is_upgrade,
+ }));
+ }
+
+ if head.subject.is_informational() {
+ if let Some(callback) = ctx.on_informational {
+ callback.call(head.into_response(()));
+ }
+ }
+
+ // Parsing a 1xx response could have consumed the buffer, check if
+ // it is empty now...
+ if buf.is_empty() {
+ return Ok(None);
+ }
+ }
+ }
+
+ fn encode(msg: Encode<'_, Self::Outgoing>, dst: &mut Vec<u8>) -> crate::Result<Encoder> {
+ trace!(
+ "Client::encode method={:?}, body={:?}",
+ msg.head.subject.0,
+ msg.body
+ );
+
+ *msg.req_method = Some(msg.head.subject.0.clone());
+
+ let body = Client::set_length(msg.head, msg.body);
+
+ let init_cap = 30 + msg.head.headers.len() * AVERAGE_HEADER_SIZE;
+ dst.reserve(init_cap);
+
+ extend(dst, msg.head.subject.0.as_str().as_bytes());
+ extend(dst, b" ");
+ //TODO: add API to http::Uri to encode without std::fmt
+ let _ = write!(FastWrite(dst), "{} ", msg.head.subject.1);
+
+ match msg.head.version {
+ Version::HTTP_10 => extend(dst, b"HTTP/1.0"),
+ Version::HTTP_11 => extend(dst, b"HTTP/1.1"),
+ Version::HTTP_2 => {
+ debug!("request with HTTP2 version coerced to HTTP/1.1");
+ extend(dst, b"HTTP/1.1");
+ }
+ other => panic!("unexpected request version: {:?}", other),
+ }
+ extend(dst, b"\r\n");
+
+ if let Some(orig_headers) = msg.head.extensions.get::<HeaderCaseMap>() {
+ write_headers_original_case(
+ &msg.head.headers,
+ orig_headers,
+ dst,
+ msg.title_case_headers,
+ );
+ } else if msg.title_case_headers {
+ write_headers_title_case(&msg.head.headers, dst);
+ } else {
+ write_headers(&msg.head.headers, dst);
+ }
+
+ extend(dst, b"\r\n");
+ msg.head.headers.clear(); //TODO: remove when switching to drain()
+
+ Ok(body)
+ }
+
+ fn on_error(_err: &crate::Error) -> Option<MessageHead<Self::Outgoing>> {
+ // we can't tell the server about any errors it creates
+ None
+ }
+
+ fn is_client() -> bool {
+ true
+ }
+}
+
+#[cfg(feature = "client")]
+impl Client {
+ /// Returns Some(length, wants_upgrade) if successful.
+ ///
+ /// Returns None if this message head should be skipped (like a 100 status).
+ fn decoder(
+ inc: &MessageHead<StatusCode>,
+ method: &mut Option<Method>,
+ ) -> Result<Option<(DecodedLength, bool)>, Parse> {
+ // According to https://tools.ietf.org/html/rfc7230#section-3.3.3
+ // 1. HEAD responses, and Status 1xx, 204, and 304 cannot have a body.
+ // 2. Status 2xx to a CONNECT cannot have a body.
+ // 3. Transfer-Encoding: chunked has a chunked body.
+ // 4. If multiple differing Content-Length headers or invalid, close connection.
+ // 5. Content-Length header has a sized body.
+ // 6. (irrelevant to Response)
+ // 7. Read till EOF.
+
+ match inc.subject.as_u16() {
+ 101 => {
+ return Ok(Some((DecodedLength::ZERO, true)));
+ }
+ 100 | 102..=199 => {
+ trace!("ignoring informational response: {}", inc.subject.as_u16());
+ return Ok(None);
+ }
+ 204 | 304 => return Ok(Some((DecodedLength::ZERO, false))),
+ _ => (),
+ }
+ match *method {
+ Some(Method::HEAD) => {
+ return Ok(Some((DecodedLength::ZERO, false)));
+ }
+ Some(Method::CONNECT) => {
+ if let 200..=299 = inc.subject.as_u16() {
+ return Ok(Some((DecodedLength::ZERO, true)));
+ }
+ }
+ Some(_) => {}
+ None => {
+ trace!("Client::decoder is missing the Method");
+ }
+ }
+
+ if inc.headers.contains_key(header::TRANSFER_ENCODING) {
+ // https://tools.ietf.org/html/rfc7230#section-3.3.3
+ // If Transfer-Encoding header is present, and 'chunked' is
+ // not the final encoding, and this is a Request, then it is
+ // malformed. A server should respond with 400 Bad Request.
+ if inc.version == Version::HTTP_10 {
+ debug!("HTTP/1.0 cannot have Transfer-Encoding header");
+ Err(Parse::transfer_encoding_unexpected())
+ } else if headers::transfer_encoding_is_chunked(&inc.headers) {
+ Ok(Some((DecodedLength::CHUNKED, false)))
+ } else {
+ trace!("not chunked, read till eof");
+ Ok(Some((DecodedLength::CLOSE_DELIMITED, false)))
+ }
+ } else if let Some(len) = headers::content_length_parse_all(&inc.headers) {
+ Ok(Some((DecodedLength::checked_new(len)?, false)))
+ } else if inc.headers.contains_key(header::CONTENT_LENGTH) {
+ debug!("illegal Content-Length header");
+ Err(Parse::content_length_invalid())
+ } else {
+ trace!("neither Transfer-Encoding nor Content-Length");
+ Ok(Some((DecodedLength::CLOSE_DELIMITED, false)))
+ }
+ }
+ fn set_length(head: &mut RequestHead, body: Option<BodyLength>) -> Encoder {
+ let body = if let Some(body) = body {
+ body
+ } else {
+ head.headers.remove(header::TRANSFER_ENCODING);
+ return Encoder::length(0);
+ };
+
+ // HTTP/1.0 doesn't know about chunked
+ let can_chunked = head.version == Version::HTTP_11;
+ let headers = &mut head.headers;
+
+ // If the user already set specific headers, we should respect them, regardless
+ // of what the Body knows about itself. They set them for a reason.
+
+ // Because of the borrow checker, we can't check the for an existing
+ // Content-Length header while holding an `Entry` for the Transfer-Encoding
+ // header, so unfortunately, we must do the check here, first.
+
+ let existing_con_len = headers::content_length_parse_all(headers);
+ let mut should_remove_con_len = false;
+
+ if !can_chunked {
+ // Chunked isn't legal, so if it is set, we need to remove it.
+ if headers.remove(header::TRANSFER_ENCODING).is_some() {
+ trace!("removing illegal transfer-encoding header");
+ }
+
+ return if let Some(len) = existing_con_len {
+ Encoder::length(len)
+ } else if let BodyLength::Known(len) = body {
+ set_content_length(headers, len)
+ } else {
+ // HTTP/1.0 client requests without a content-length
+ // cannot have any body at all.
+ Encoder::length(0)
+ };
+ }
+
+ // If the user set a transfer-encoding, respect that. Let's just
+ // make sure `chunked` is the final encoding.
+ let encoder = match headers.entry(header::TRANSFER_ENCODING) {
+ Entry::Occupied(te) => {
+ should_remove_con_len = true;
+ if headers::is_chunked(te.iter()) {
+ Some(Encoder::chunked())
+ } else {
+ warn!("user provided transfer-encoding does not end in 'chunked'");
+
+ // There's a Transfer-Encoding, but it doesn't end in 'chunked'!
+ // An example that could trigger this:
+ //
+ // Transfer-Encoding: gzip
+ //
+ // This can be bad, depending on if this is a request or a
+ // response.
+ //
+ // - A request is illegal if there is a `Transfer-Encoding`
+ // but it doesn't end in `chunked`.
+ // - A response that has `Transfer-Encoding` but doesn't
+ // end in `chunked` isn't illegal, it just forces this
+ // to be close-delimited.
+ //
+ // We can try to repair this, by adding `chunked` ourselves.
+
+ headers::add_chunked(te);
+ Some(Encoder::chunked())
+ }
+ }
+ Entry::Vacant(te) => {
+ if let Some(len) = existing_con_len {
+ Some(Encoder::length(len))
+ } else if let BodyLength::Unknown = body {
+ // GET, HEAD, and CONNECT almost never have bodies.
+ //
+ // So instead of sending a "chunked" body with a 0-chunk,
+ // assume no body here. If you *must* send a body,
+ // set the headers explicitly.
+ match head.subject.0 {
+ Method::GET | Method::HEAD | Method::CONNECT => Some(Encoder::length(0)),
+ _ => {
+ te.insert(HeaderValue::from_static("chunked"));
+ Some(Encoder::chunked())
+ }
+ }
+ } else {
+ None
+ }
+ }
+ };
+
+ let encoder = encoder.map(|enc| {
+ if enc.is_chunked() {
+ let allowed_trailer_fields: Vec<HeaderValue> =
+ headers.get_all(header::TRAILER).iter().cloned().collect();
+
+ if !allowed_trailer_fields.is_empty() {
+ return enc.into_chunked_with_trailing_fields(allowed_trailer_fields);
+ }
+ }
+
+ enc
+ });
+
+ // This is because we need a second mutable borrow to remove
+ // content-length header.
+ if let Some(encoder) = encoder {
+ if should_remove_con_len && existing_con_len.is_some() {
+ headers.remove(header::CONTENT_LENGTH);
+ }
+ return encoder;
+ }
+
+ // User didn't set transfer-encoding, AND we know body length,
+ // so we can just set the Content-Length automatically.
+
+ let len = if let BodyLength::Known(len) = body {
+ len
+ } else {
+ unreachable!("BodyLength::Unknown would set chunked");
+ };
+
+ set_content_length(headers, len)
+ }
+
+ fn obs_fold_line(all: &mut [u8], idx: &mut HeaderIndices) {
+ // If the value has obs-folded text, then in-place shift the bytes out
+ // of here.
+ //
+ // https://httpwg.org/specs/rfc9112.html#line.folding
+ //
+ // > A user agent that receives an obs-fold MUST replace each received
+ // > obs-fold with one or more SP octets prior to interpreting the
+ // > field value.
+ //
+ // This means strings like "\r\n\t foo" must replace the "\r\n\t " with
+ // a single space.
+
+ let buf = &mut all[idx.value.0..idx.value.1];
+
+ // look for a newline, otherwise bail out
+ let first_nl = match buf.iter().position(|b| *b == b'\n') {
+ Some(i) => i,
+ None => return,
+ };
+
+ // not on standard slices because whatever, sigh
+ fn trim_start(mut s: &[u8]) -> &[u8] {
+ while let [first, rest @ ..] = s {
+ if first.is_ascii_whitespace() {
+ s = rest;
+ } else {
+ break;
+ }
+ }
+ s
+ }
+
+ fn trim_end(mut s: &[u8]) -> &[u8] {
+ while let [rest @ .., last] = s {
+ if last.is_ascii_whitespace() {
+ s = rest;
+ } else {
+ break;
+ }
+ }
+ s
+ }
+
+ fn trim(s: &[u8]) -> &[u8] {
+ trim_start(trim_end(s))
+ }
+
+ // TODO(perf): we could do the moves in-place, but this is so uncommon
+ // that it shouldn't matter.
+ let mut unfolded = trim_end(&buf[..first_nl]).to_vec();
+ for line in buf[first_nl + 1..].split(|b| *b == b'\n') {
+ unfolded.push(b' ');
+ unfolded.extend_from_slice(trim(line));
+ }
+ buf[..unfolded.len()].copy_from_slice(&unfolded);
+ idx.value.1 = idx.value.0 + unfolded.len();
+ }
+}
+
+#[cfg(feature = "client")]
+fn set_content_length(headers: &mut HeaderMap, len: u64) -> Encoder {
+ // At this point, there should not be a valid Content-Length
+ // header. However, since we'll be indexing in anyways, we can
+ // warn the user if there was an existing illegal header.
+ //
+ // Or at least, we can in theory. It's actually a little bit slower,
+ // so perhaps only do that while the user is developing/testing.
+
+ if cfg!(debug_assertions) {
+ match headers.entry(header::CONTENT_LENGTH) {
+ Entry::Occupied(mut cl) => {
+ // Internal sanity check, we should have already determined
+ // that the header was illegal before calling this function.
+ debug_assert!(headers::content_length_parse_all_values(cl.iter()).is_none());
+ // Uh oh, the user set `Content-Length` headers, but set bad ones.
+ // This would be an illegal message anyways, so let's try to repair
+ // with our known good length.
+ error!("user provided content-length header was invalid");
+
+ cl.insert(HeaderValue::from(len));
+ Encoder::length(len)
+ }
+ Entry::Vacant(cl) => {
+ cl.insert(HeaderValue::from(len));
+ Encoder::length(len)
+ }
+ }
+ } else {
+ headers.insert(header::CONTENT_LENGTH, HeaderValue::from(len));
+ Encoder::length(len)
+ }
+}
+
+#[derive(Clone, Copy)]
+struct HeaderIndices {
+ name: (usize, usize),
+ value: (usize, usize),
+}
+
+fn record_header_indices(
+ bytes: &[u8],
+ headers: &[httparse::Header<'_>],
+ indices: &mut [MaybeUninit<HeaderIndices>],
+) -> Result<(), crate::error::Parse> {
+ let bytes_ptr = bytes.as_ptr() as usize;
+
+ for (header, indices) in headers.iter().zip(indices.iter_mut()) {
+ if header.name.len() >= (1 << 16) {
+ debug!("header name larger than 64kb: {:?}", header.name);
+ return Err(crate::error::Parse::TooLarge);
+ }
+ let name_start = header.name.as_ptr() as usize - bytes_ptr;
+ let name_end = name_start + header.name.len();
+ let value_start = header.value.as_ptr() as usize - bytes_ptr;
+ let value_end = value_start + header.value.len();
+
+ indices.write(HeaderIndices {
+ name: (name_start, name_end),
+ value: (value_start, value_end),
+ });
+ }
+
+ Ok(())
+}
+
+// Write header names as title case. The header name is assumed to be ASCII.
+fn title_case(dst: &mut Vec<u8>, name: &[u8]) {
+ dst.reserve(name.len());
+
+ // Ensure first character is uppercased
+ let mut prev = b'-';
+ for &(mut c) in name {
+ if prev == b'-' {
+ c.make_ascii_uppercase();
+ }
+ dst.push(c);
+ prev = c;
+ }
+}
+
+pub(crate) fn write_headers_title_case(headers: &HeaderMap, dst: &mut Vec<u8>) {
+ for (name, value) in headers {
+ title_case(dst, name.as_str().as_bytes());
+ extend(dst, b": ");
+ extend(dst, value.as_bytes());
+ extend(dst, b"\r\n");
+ }
+}
+
+pub(crate) fn write_headers(headers: &HeaderMap, dst: &mut Vec<u8>) {
+ for (name, value) in headers {
+ extend(dst, name.as_str().as_bytes());
+ extend(dst, b": ");
+ extend(dst, value.as_bytes());
+ extend(dst, b"\r\n");
+ }
+}
+
+#[cold]
+#[cfg(feature = "client")]
+fn write_headers_original_case(
+ headers: &HeaderMap,
+ orig_case: &HeaderCaseMap,
+ dst: &mut Vec<u8>,
+ title_case_headers: bool,
+) {
+ // For each header name/value pair, there may be a value in the casemap
+ // that corresponds to the HeaderValue. So, we iterator all the keys,
+ // and for each one, try to pair the originally cased name with the value.
+ //
+ // TODO: consider adding http::HeaderMap::entries() iterator
+ for name in headers.keys() {
+ let mut names = orig_case.get_all(name);
+
+ for value in headers.get_all(name) {
+ if let Some(orig_name) = names.next() {
+ extend(dst, orig_name.as_ref());
+ } else if title_case_headers {
+ title_case(dst, name.as_str().as_bytes());
+ } else {
+ extend(dst, name.as_str().as_bytes());
+ }
+
+ // Wanted for curl test cases that send `X-Custom-Header:\r\n`
+ if value.is_empty() {
+ extend(dst, b":\r\n");
+ } else {
+ extend(dst, b": ");
+ extend(dst, value.as_bytes());
+ extend(dst, b"\r\n");
+ }
+ }
+ }
+}
+
+#[cfg(feature = "client")]
+struct FastWrite<'a>(&'a mut Vec<u8>);
+
+#[cfg(feature = "client")]
+impl fmt::Write for FastWrite<'_> {
+ #[inline]
+ fn write_str(&mut self, s: &str) -> fmt::Result {
+ extend(self.0, s.as_bytes());
+ Ok(())
+ }
+
+ #[inline]
+ fn write_fmt(&mut self, args: fmt::Arguments<'_>) -> fmt::Result {
+ fmt::write(self, args)
+ }
+}
+
+#[inline]
+fn extend(dst: &mut Vec<u8>, data: &[u8]) {
+ dst.extend_from_slice(data);
+}
+
+#[cfg(test)]
+mod tests {
+ use bytes::BytesMut;
+
+ use super::*;
+
+ #[cfg(feature = "server")]
+ #[test]
+ fn test_parse_request() {
+ let _ = pretty_env_logger::try_init();
+ let mut raw = BytesMut::from("GET /echo HTTP/1.1\r\nHost: hyper.rs\r\n\r\n");
+ let mut method = None;
+ let msg = Server::parse(
+ &mut raw,
+ ParseContext {
+ cached_headers: &mut None,
+ req_method: &mut method,
+ h1_parser_config: Default::default(),
+ h1_max_headers: None,
+ preserve_header_case: false,
+ #[cfg(feature = "ffi")]
+ preserve_header_order: false,
+ h09_responses: false,
+ #[cfg(feature = "client")]
+ on_informational: &mut None,
+ },
+ )
+ .unwrap()
+ .unwrap();
+ assert_eq!(raw.len(), 0);
+ assert_eq!(msg.head.subject.0, crate::Method::GET);
+ assert_eq!(msg.head.subject.1, "/echo");
+ assert_eq!(msg.head.version, crate::Version::HTTP_11);
+ assert_eq!(msg.head.headers.len(), 1);
+ assert_eq!(msg.head.headers["Host"], "hyper.rs");
+ assert_eq!(method, Some(crate::Method::GET));
+ }
+
+ #[test]
+ fn test_parse_response() {
+ let _ = pretty_env_logger::try_init();
+ let mut raw = BytesMut::from("HTTP/1.1 200 OK\r\nContent-Length: 0\r\n\r\n");
+ let ctx = ParseContext {
+ cached_headers: &mut None,
+ req_method: &mut Some(crate::Method::GET),
+ h1_parser_config: Default::default(),
+ h1_max_headers: None,
+ preserve_header_case: false,
+ #[cfg(feature = "ffi")]
+ preserve_header_order: false,
+ h09_responses: false,
+ #[cfg(feature = "client")]
+ on_informational: &mut None,
+ };
+ let msg = Client::parse(&mut raw, ctx).unwrap().unwrap();
+ assert_eq!(raw.len(), 0);
+ assert_eq!(msg.head.subject, crate::StatusCode::OK);
+ assert_eq!(msg.head.version, crate::Version::HTTP_11);
+ assert_eq!(msg.head.headers.len(), 1);
+ assert_eq!(msg.head.headers["Content-Length"], "0");
+ }
+
+ #[cfg(feature = "server")]
+ #[test]
+ fn test_parse_request_errors() {
+ let mut raw = BytesMut::from("GET htt:p// HTTP/1.1\r\nHost: hyper.rs\r\n\r\n");
+ let ctx = ParseContext {
+ cached_headers: &mut None,
+ req_method: &mut None,
+ h1_parser_config: Default::default(),
+ h1_max_headers: None,
+ preserve_header_case: false,
+ #[cfg(feature = "ffi")]
+ preserve_header_order: false,
+ h09_responses: false,
+ #[cfg(feature = "client")]
+ on_informational: &mut None,
+ };
+ Server::parse(&mut raw, ctx).unwrap_err();
+ }
+
+ const H09_RESPONSE: &str = "Baguettes are super delicious, don't you agree?";
+
+ #[test]
+ fn test_parse_response_h09_allowed() {
+ let _ = pretty_env_logger::try_init();
+ let mut raw = BytesMut::from(H09_RESPONSE);
+ let ctx = ParseContext {
+ cached_headers: &mut None,
+ req_method: &mut Some(crate::Method::GET),
+ h1_parser_config: Default::default(),
+ h1_max_headers: None,
+ preserve_header_case: false,
+ #[cfg(feature = "ffi")]
+ preserve_header_order: false,
+ h09_responses: true,
+ #[cfg(feature = "client")]
+ on_informational: &mut None,
+ };
+ let msg = Client::parse(&mut raw, ctx).unwrap().unwrap();
+ assert_eq!(raw, H09_RESPONSE);
+ assert_eq!(msg.head.subject, crate::StatusCode::OK);
+ assert_eq!(msg.head.version, crate::Version::HTTP_09);
+ assert_eq!(msg.head.headers.len(), 0);
+ }
+
+ #[test]
+ fn test_parse_response_h09_rejected() {
+ let _ = pretty_env_logger::try_init();
+ let mut raw = BytesMut::from(H09_RESPONSE);
+ let ctx = ParseContext {
+ cached_headers: &mut None,
+ req_method: &mut Some(crate::Method::GET),
+ h1_parser_config: Default::default(),
+ h1_max_headers: None,
+ preserve_header_case: false,
+ #[cfg(feature = "ffi")]
+ preserve_header_order: false,
+ h09_responses: false,
+ #[cfg(feature = "client")]
+ on_informational: &mut None,
+ };
+ Client::parse(&mut raw, ctx).unwrap_err();
+ assert_eq!(raw, H09_RESPONSE);
+ }
+
+ const RESPONSE_WITH_WHITESPACE_BETWEEN_HEADER_NAME_AND_COLON: &str =
+ "HTTP/1.1 200 OK\r\nAccess-Control-Allow-Credentials : true\r\n\r\n";
+
+ #[test]
+ fn test_parse_allow_response_with_spaces_before_colons() {
+ use httparse::ParserConfig;
+
+ let _ = pretty_env_logger::try_init();
+ let mut raw = BytesMut::from(RESPONSE_WITH_WHITESPACE_BETWEEN_HEADER_NAME_AND_COLON);
+ let mut h1_parser_config = ParserConfig::default();
+ h1_parser_config.allow_spaces_after_header_name_in_responses(true);
+ let ctx = ParseContext {
+ cached_headers: &mut None,
+ req_method: &mut Some(crate::Method::GET),
+ h1_parser_config,
+ h1_max_headers: None,
+ preserve_header_case: false,
+ #[cfg(feature = "ffi")]
+ preserve_header_order: false,
+ h09_responses: false,
+ #[cfg(feature = "client")]
+ on_informational: &mut None,
+ };
+ let msg = Client::parse(&mut raw, ctx).unwrap().unwrap();
+ assert_eq!(raw.len(), 0);
+ assert_eq!(msg.head.subject, crate::StatusCode::OK);
+ assert_eq!(msg.head.version, crate::Version::HTTP_11);
+ assert_eq!(msg.head.headers.len(), 1);
+ assert_eq!(msg.head.headers["Access-Control-Allow-Credentials"], "true");
+ }
+
+ #[test]
+ fn test_parse_reject_response_with_spaces_before_colons() {
+ let _ = pretty_env_logger::try_init();
+ let mut raw = BytesMut::from(RESPONSE_WITH_WHITESPACE_BETWEEN_HEADER_NAME_AND_COLON);
+ let ctx = ParseContext {
+ cached_headers: &mut None,
+ req_method: &mut Some(crate::Method::GET),
+ h1_parser_config: Default::default(),
+ h1_max_headers: None,
+ preserve_header_case: false,
+ #[cfg(feature = "ffi")]
+ preserve_header_order: false,
+ h09_responses: false,
+ #[cfg(feature = "client")]
+ on_informational: &mut None,
+ };
+ Client::parse(&mut raw, ctx).unwrap_err();
+ }
+
+ #[cfg(feature = "server")]
+ #[test]
+ fn test_parse_preserve_header_case_in_request() {
+ let mut raw =
+ BytesMut::from("GET / HTTP/1.1\r\nHost: hyper.rs\r\nX-BREAD: baguette\r\n\r\n");
+ let ctx = ParseContext {
+ cached_headers: &mut None,
+ req_method: &mut None,
+ h1_parser_config: Default::default(),
+ h1_max_headers: None,
+ preserve_header_case: true,
+ #[cfg(feature = "ffi")]
+ preserve_header_order: false,
+ h09_responses: false,
+ #[cfg(feature = "client")]
+ on_informational: &mut None,
+ };
+ let parsed_message = Server::parse(&mut raw, ctx).unwrap().unwrap();
+ let orig_headers = parsed_message
+ .head
+ .extensions
+ .get::<HeaderCaseMap>()
+ .unwrap();
+ assert_eq!(
+ orig_headers
+ .get_all_internal(&HeaderName::from_static("host"))
+ .collect::<Vec<_>>(),
+ vec![&Bytes::from("Host")]
+ );
+ assert_eq!(
+ orig_headers
+ .get_all_internal(&HeaderName::from_static("x-bread"))
+ .collect::<Vec<_>>(),
+ vec![&Bytes::from("X-BREAD")]
+ );
+ }
+
+ #[cfg(feature = "server")]
+ #[test]
+ fn test_decoder_request() {
+ fn parse(s: &str) -> ParsedMessage<RequestLine> {
+ let mut bytes = BytesMut::from(s);
+ Server::parse(
+ &mut bytes,
+ ParseContext {
+ cached_headers: &mut None,
+ req_method: &mut None,
+ h1_parser_config: Default::default(),
+ h1_max_headers: None,
+ preserve_header_case: false,
+ #[cfg(feature = "ffi")]
+ preserve_header_order: false,
+ h09_responses: false,
+ #[cfg(feature = "client")]
+ on_informational: &mut None,
+ },
+ )
+ .expect("parse ok")
+ .expect("parse complete")
+ }
+
+ fn parse_err(s: &str, comment: &str) -> crate::error::Parse {
+ let mut bytes = BytesMut::from(s);
+ Server::parse(
+ &mut bytes,
+ ParseContext {
+ cached_headers: &mut None,
+ req_method: &mut None,
+ h1_parser_config: Default::default(),
+ h1_max_headers: None,
+ preserve_header_case: false,
+ #[cfg(feature = "ffi")]
+ preserve_header_order: false,
+ h09_responses: false,
+ #[cfg(feature = "client")]
+ on_informational: &mut None,
+ },
+ )
+ .expect_err(comment)
+ }
+
+ // no length or transfer-encoding means 0-length body
+ assert_eq!(
+ parse(
+ "\
+ GET / HTTP/1.1\r\n\
+ \r\n\
+ "
+ )
+ .decode,
+ DecodedLength::ZERO
+ );
+
+ assert_eq!(
+ parse(
+ "\
+ POST / HTTP/1.1\r\n\
+ \r\n\
+ "
+ )
+ .decode,
+ DecodedLength::ZERO
+ );
+
+ // transfer-encoding: chunked
+ assert_eq!(
+ parse(
+ "\
+ POST / HTTP/1.1\r\n\
+ transfer-encoding: chunked\r\n\
+ \r\n\
+ "
+ )
+ .decode,
+ DecodedLength::CHUNKED
+ );
+
+ assert_eq!(
+ parse(
+ "\
+ POST / HTTP/1.1\r\n\
+ transfer-encoding: gzip, chunked\r\n\
+ \r\n\
+ "
+ )
+ .decode,
+ DecodedLength::CHUNKED
+ );
+
+ assert_eq!(
+ parse(
+ "\
+ POST / HTTP/1.1\r\n\
+ transfer-encoding: gzip\r\n\
+ transfer-encoding: chunked\r\n\
+ \r\n\
+ "
+ )
+ .decode,
+ DecodedLength::CHUNKED
+ );
+
+ // content-length
+ assert_eq!(
+ parse(
+ "\
+ POST / HTTP/1.1\r\n\
+ content-length: 10\r\n\
+ \r\n\
+ "
+ )
+ .decode,
+ DecodedLength::new(10)
+ );
+
+ // transfer-encoding and content-length = chunked
+ assert_eq!(
+ parse(
+ "\
+ POST / HTTP/1.1\r\n\
+ content-length: 10\r\n\
+ transfer-encoding: chunked\r\n\
+ \r\n\
+ "
+ )
+ .decode,
+ DecodedLength::CHUNKED
+ );
+
+ assert_eq!(
+ parse(
+ "\
+ POST / HTTP/1.1\r\n\
+ transfer-encoding: chunked\r\n\
+ content-length: 10\r\n\
+ \r\n\
+ "
+ )
+ .decode,
+ DecodedLength::CHUNKED
+ );
+
+ assert_eq!(
+ parse(
+ "\
+ POST / HTTP/1.1\r\n\
+ transfer-encoding: gzip\r\n\
+ content-length: 10\r\n\
+ transfer-encoding: chunked\r\n\
+ \r\n\
+ "
+ )
+ .decode,
+ DecodedLength::CHUNKED
+ );
+
+ // multiple content-lengths of same value are fine
+ assert_eq!(
+ parse(
+ "\
+ POST / HTTP/1.1\r\n\
+ content-length: 10\r\n\
+ content-length: 10\r\n\
+ \r\n\
+ "
+ )
+ .decode,
+ DecodedLength::new(10)
+ );
+
+ // multiple content-lengths with different values is an error
+ parse_err(
+ "\
+ POST / HTTP/1.1\r\n\
+ content-length: 10\r\n\
+ content-length: 11\r\n\
+ \r\n\
+ ",
+ "multiple content-lengths",
+ );
+
+ // content-length with prefix is not allowed
+ parse_err(
+ "\
+ POST / HTTP/1.1\r\n\
+ content-length: +10\r\n\
+ \r\n\
+ ",
+ "prefixed content-length",
+ );
+
+ // transfer-encoding that isn't chunked is an error
+ parse_err(
+ "\
+ POST / HTTP/1.1\r\n\
+ transfer-encoding: gzip\r\n\
+ \r\n\
+ ",
+ "transfer-encoding but not chunked",
+ );
+
+ parse_err(
+ "\
+ POST / HTTP/1.1\r\n\
+ transfer-encoding: chunked, gzip\r\n\
+ \r\n\
+ ",
+ "transfer-encoding doesn't end in chunked",
+ );
+
+ parse_err(
+ "\
+ POST / HTTP/1.1\r\n\
+ transfer-encoding: chunked\r\n\
+ transfer-encoding: afterlol\r\n\
+ \r\n\
+ ",
+ "transfer-encoding multiple lines doesn't end in chunked",
+ );
+
+ // http/1.0
+
+ assert_eq!(
+ parse(
+ "\
+ POST / HTTP/1.0\r\n\
+ content-length: 10\r\n\
+ \r\n\
+ "
+ )
+ .decode,
+ DecodedLength::new(10)
+ );
+
+ // 1.0 doesn't understand chunked, so its an error
+ parse_err(
+ "\
+ POST / HTTP/1.0\r\n\
+ transfer-encoding: chunked\r\n\
+ \r\n\
+ ",
+ "1.0 chunked",
+ );
+ }
+
+ #[test]
+ fn test_decoder_response() {
+ fn parse(s: &str) -> ParsedMessage<StatusCode> {
+ parse_with_method(s, Method::GET)
+ }
+
+ fn parse_ignores(s: &str) {
+ let mut bytes = BytesMut::from(s);
+ assert!(Client::parse(
+ &mut bytes,
+ ParseContext {
+ cached_headers: &mut None,
+ req_method: &mut Some(Method::GET),
+ h1_parser_config: Default::default(),
+ h1_max_headers: None,
+ preserve_header_case: false,
+ #[cfg(feature = "ffi")]
+ preserve_header_order: false,
+ h09_responses: false,
+ #[cfg(feature = "client")]
+ on_informational: &mut None,
+ }
+ )
+ .expect("parse ok")
+ .is_none())
+ }
+
+ fn parse_with_method(s: &str, m: Method) -> ParsedMessage<StatusCode> {
+ let mut bytes = BytesMut::from(s);
+ Client::parse(
+ &mut bytes,
+ ParseContext {
+ cached_headers: &mut None,
+ req_method: &mut Some(m),
+ h1_parser_config: Default::default(),
+ h1_max_headers: None,
+ preserve_header_case: false,
+ #[cfg(feature = "ffi")]
+ preserve_header_order: false,
+ h09_responses: false,
+ #[cfg(feature = "client")]
+ on_informational: &mut None,
+ },
+ )
+ .expect("parse ok")
+ .expect("parse complete")
+ }
+
+ fn parse_err(s: &str) -> crate::error::Parse {
+ let mut bytes = BytesMut::from(s);
+ Client::parse(
+ &mut bytes,
+ ParseContext {
+ cached_headers: &mut None,
+ req_method: &mut Some(Method::GET),
+ h1_parser_config: Default::default(),
+ h1_max_headers: None,
+ preserve_header_case: false,
+ #[cfg(feature = "ffi")]
+ preserve_header_order: false,
+ h09_responses: false,
+ #[cfg(feature = "client")]
+ on_informational: &mut None,
+ },
+ )
+ .expect_err("parse should err")
+ }
+
+ // no content-length or transfer-encoding means close-delimited
+ assert_eq!(
+ parse(
+ "\
+ HTTP/1.1 200 OK\r\n\
+ \r\n\
+ "
+ )
+ .decode,
+ DecodedLength::CLOSE_DELIMITED
+ );
+
+ // 204 and 304 never have a body
+ assert_eq!(
+ parse(
+ "\
+ HTTP/1.1 204 No Content\r\n\
+ \r\n\
+ "
+ )
+ .decode,
+ DecodedLength::ZERO
+ );
+
+ assert_eq!(
+ parse(
+ "\
+ HTTP/1.1 304 Not Modified\r\n\
+ \r\n\
+ "
+ )
+ .decode,
+ DecodedLength::ZERO
+ );
+
+ // content-length
+ assert_eq!(
+ parse(
+ "\
+ HTTP/1.1 200 OK\r\n\
+ content-length: 8\r\n\
+ \r\n\
+ "
+ )
+ .decode,
+ DecodedLength::new(8)
+ );
+
+ assert_eq!(
+ parse(
+ "\
+ HTTP/1.1 200 OK\r\n\
+ content-length: 8\r\n\
+ content-length: 8\r\n\
+ \r\n\
+ "
+ )
+ .decode,
+ DecodedLength::new(8)
+ );
+
+ parse_err(
+ "\
+ HTTP/1.1 200 OK\r\n\
+ content-length: 8\r\n\
+ content-length: 9\r\n\
+ \r\n\
+ ",
+ );
+
+ parse_err(
+ "\
+ HTTP/1.1 200 OK\r\n\
+ content-length: +8\r\n\
+ \r\n\
+ ",
+ );
+
+ // transfer-encoding: chunked
+ assert_eq!(
+ parse(
+ "\
+ HTTP/1.1 200 OK\r\n\
+ transfer-encoding: chunked\r\n\
+ \r\n\
+ "
+ )
+ .decode,
+ DecodedLength::CHUNKED
+ );
+
+ // transfer-encoding not-chunked is close-delimited
+ assert_eq!(
+ parse(
+ "\
+ HTTP/1.1 200 OK\r\n\
+ transfer-encoding: yolo\r\n\
+ \r\n\
+ "
+ )
+ .decode,
+ DecodedLength::CLOSE_DELIMITED
+ );
+
+ // transfer-encoding and content-length = chunked
+ assert_eq!(
+ parse(
+ "\
+ HTTP/1.1 200 OK\r\n\
+ content-length: 10\r\n\
+ transfer-encoding: chunked\r\n\
+ \r\n\
+ "
+ )
+ .decode,
+ DecodedLength::CHUNKED
+ );
+
+ // HEAD can have content-length, but not body
+ assert_eq!(
+ parse_with_method(
+ "\
+ HTTP/1.1 200 OK\r\n\
+ content-length: 8\r\n\
+ \r\n\
+ ",
+ Method::HEAD
+ )
+ .decode,
+ DecodedLength::ZERO
+ );
+
+ // CONNECT with 200 never has body
+ {
+ let msg = parse_with_method(
+ "\
+ HTTP/1.1 200 OK\r\n\
+ \r\n\
+ ",
+ Method::CONNECT,
+ );
+ assert_eq!(msg.decode, DecodedLength::ZERO);
+ assert!(!msg.keep_alive, "should be upgrade");
+ assert!(msg.wants_upgrade, "should be upgrade");
+ }
+
+ // CONNECT receiving non 200 can have a body
+ assert_eq!(
+ parse_with_method(
+ "\
+ HTTP/1.1 400 Bad Request\r\n\
+ \r\n\
+ ",
+ Method::CONNECT
+ )
+ .decode,
+ DecodedLength::CLOSE_DELIMITED
+ );
+
+ // 1xx status codes
+ parse_ignores(
+ "\
+ HTTP/1.1 100 Continue\r\n\
+ \r\n\
+ ",
+ );
+
+ parse_ignores(
+ "\
+ HTTP/1.1 103 Early Hints\r\n\
+ \r\n\
+ ",
+ );
+
+ // 101 upgrade not supported yet
+ {
+ let msg = parse(
+ "\
+ HTTP/1.1 101 Switching Protocols\r\n\
+ \r\n\
+ ",
+ );
+ assert_eq!(msg.decode, DecodedLength::ZERO);
+ assert!(!msg.keep_alive, "should be last");
+ assert!(msg.wants_upgrade, "should be upgrade");
+ }
+
+ // http/1.0
+ assert_eq!(
+ parse(
+ "\
+ HTTP/1.0 200 OK\r\n\
+ \r\n\
+ "
+ )
+ .decode,
+ DecodedLength::CLOSE_DELIMITED
+ );
+
+ // 1.0 doesn't understand chunked
+ parse_err(
+ "\
+ HTTP/1.0 200 OK\r\n\
+ transfer-encoding: chunked\r\n\
+ \r\n\
+ ",
+ );
+
+ // keep-alive
+ assert!(
+ parse(
+ "\
+ HTTP/1.1 200 OK\r\n\
+ content-length: 0\r\n\
+ \r\n\
+ "
+ )
+ .keep_alive,
+ "HTTP/1.1 keep-alive is default"
+ );
+
+ assert!(
+ !parse(
+ "\
+ HTTP/1.1 200 OK\r\n\
+ content-length: 0\r\n\
+ connection: foo, close, bar\r\n\
+ \r\n\
+ "
+ )
+ .keep_alive,
+ "connection close is always close"
+ );
+
+ assert!(
+ !parse(
+ "\
+ HTTP/1.0 200 OK\r\n\
+ content-length: 0\r\n\
+ \r\n\
+ "
+ )
+ .keep_alive,
+ "HTTP/1.0 close is default"
+ );
+
+ assert!(
+ parse(
+ "\
+ HTTP/1.0 200 OK\r\n\
+ content-length: 0\r\n\
+ connection: foo, keep-alive, bar\r\n\
+ \r\n\
+ "
+ )
+ .keep_alive,
+ "connection keep-alive is always keep-alive"
+ );
+ }
+
+ #[cfg(feature = "client")]
+ #[test]
+ fn test_client_obs_fold_line() {
+ fn unfold(src: &str) -> String {
+ let mut buf = src.as_bytes().to_vec();
+ let mut idx = HeaderIndices {
+ name: (0, 0),
+ value: (0, buf.len()),
+ };
+ Client::obs_fold_line(&mut buf, &mut idx);
+ String::from_utf8(buf[idx.value.0..idx.value.1].to_vec()).unwrap()
+ }
+
+ assert_eq!(unfold("a normal line"), "a normal line",);
+
+ assert_eq!(unfold("obs\r\n fold\r\n\t line"), "obs fold line",);
+ }
+
+ #[test]
+ fn test_client_request_encode_title_case() {
+ use crate::proto::BodyLength;
+ use http::header::HeaderValue;
+
+ let mut head = MessageHead::default();
+ head.headers
+ .insert("content-length", HeaderValue::from_static("10"));
+ head.headers
+ .insert("content-type", HeaderValue::from_static("application/json"));
+ head.headers.insert("*-*", HeaderValue::from_static("o_o"));
+
+ let mut vec = Vec::new();
+ Client::encode(
+ Encode {
+ head: &mut head,
+ body: Some(BodyLength::Known(10)),
+ #[cfg(feature = "server")]
+ keep_alive: true,
+ req_method: &mut None,
+ title_case_headers: true,
+ #[cfg(feature = "server")]
+ date_header: true,
+ },
+ &mut vec,
+ )
+ .unwrap();
+
+ assert_eq!(vec, b"GET / HTTP/1.1\r\nContent-Length: 10\r\nContent-Type: application/json\r\n*-*: o_o\r\n\r\n".to_vec());
+ }
+
+ #[test]
+ fn test_client_request_encode_orig_case() {
+ use crate::proto::BodyLength;
+ use http::header::{HeaderValue, CONTENT_LENGTH};
+
+ let mut head = MessageHead::default();
+ head.headers
+ .insert("content-length", HeaderValue::from_static("10"));
+ head.headers
+ .insert("content-type", HeaderValue::from_static("application/json"));
+
+ let mut orig_headers = HeaderCaseMap::default();
+ orig_headers.insert(CONTENT_LENGTH, "CONTENT-LENGTH".into());
+ head.extensions.insert(orig_headers);
+
+ let mut vec = Vec::new();
+ Client::encode(
+ Encode {
+ head: &mut head,
+ body: Some(BodyLength::Known(10)),
+ #[cfg(feature = "server")]
+ keep_alive: true,
+ req_method: &mut None,
+ title_case_headers: false,
+ #[cfg(feature = "server")]
+ date_header: true,
+ },
+ &mut vec,
+ )
+ .unwrap();
+
+ assert_eq!(
+ &*vec,
+ b"GET / HTTP/1.1\r\nCONTENT-LENGTH: 10\r\ncontent-type: application/json\r\n\r\n"
+ .as_ref(),
+ );
+ }
+ #[test]
+ fn test_client_request_encode_orig_and_title_case() {
+ use crate::proto::BodyLength;
+ use http::header::{HeaderValue, CONTENT_LENGTH};
+
+ let mut head = MessageHead::default();
+ head.headers
+ .insert("content-length", HeaderValue::from_static("10"));
+ head.headers
+ .insert("content-type", HeaderValue::from_static("application/json"));
+
+ let mut orig_headers = HeaderCaseMap::default();
+ orig_headers.insert(CONTENT_LENGTH, "CONTENT-LENGTH".into());
+ head.extensions.insert(orig_headers);
+
+ let mut vec = Vec::new();
+ Client::encode(
+ Encode {
+ head: &mut head,
+ body: Some(BodyLength::Known(10)),
+ #[cfg(feature = "server")]
+ keep_alive: true,
+ req_method: &mut None,
+ title_case_headers: true,
+ #[cfg(feature = "server")]
+ date_header: true,
+ },
+ &mut vec,
+ )
+ .unwrap();
+
+ assert_eq!(
+ &*vec,
+ b"GET / HTTP/1.1\r\nCONTENT-LENGTH: 10\r\nContent-Type: application/json\r\n\r\n"
+ .as_ref(),
+ );
+ }
+
+ #[cfg(feature = "server")]
+ #[test]
+ fn test_server_encode_connect_method() {
+ let mut head = MessageHead::default();
+
+ let mut vec = Vec::new();
+ let encoder = Server::encode(
+ Encode {
+ head: &mut head,
+ body: None,
+ keep_alive: true,
+ req_method: &mut Some(Method::CONNECT),
+ title_case_headers: false,
+ date_header: true,
+ },
+ &mut vec,
+ )
+ .unwrap();
+
+ assert!(encoder.is_last());
+ }
+
+ #[cfg(feature = "server")]
+ #[test]
+ fn test_server_response_encode_title_case() {
+ use crate::proto::BodyLength;
+ use http::header::HeaderValue;
+
+ let mut head = MessageHead::default();
+ head.headers
+ .insert("content-length", HeaderValue::from_static("10"));
+ head.headers
+ .insert("content-type", HeaderValue::from_static("application/json"));
+ head.headers
+ .insert("weird--header", HeaderValue::from_static(""));
+
+ let mut vec = Vec::new();
+ Server::encode(
+ Encode {
+ head: &mut head,
+ body: Some(BodyLength::Known(10)),
+ keep_alive: true,
+ req_method: &mut None,
+ title_case_headers: true,
+ date_header: true,
+ },
+ &mut vec,
+ )
+ .unwrap();
+
+ let expected_response =
+ b"HTTP/1.1 200 OK\r\nContent-Length: 10\r\nContent-Type: application/json\r\nWeird--Header: \r\n";
+
+ assert_eq!(&vec[..expected_response.len()], &expected_response[..]);
+ }
+
+ #[cfg(feature = "server")]
+ #[test]
+ fn test_server_response_encode_orig_case() {
+ use crate::proto::BodyLength;
+ use http::header::{HeaderValue, CONTENT_LENGTH};
+
+ let mut head = MessageHead::default();
+ head.headers
+ .insert("content-length", HeaderValue::from_static("10"));
+ head.headers
+ .insert("content-type", HeaderValue::from_static("application/json"));
+
+ let mut orig_headers = HeaderCaseMap::default();
+ orig_headers.insert(CONTENT_LENGTH, "CONTENT-LENGTH".into());
+ head.extensions.insert(orig_headers);
+
+ let mut vec = Vec::new();
+ Server::encode(
+ Encode {
+ head: &mut head,
+ body: Some(BodyLength::Known(10)),
+ keep_alive: true,
+ req_method: &mut None,
+ title_case_headers: false,
+ date_header: true,
+ },
+ &mut vec,
+ )
+ .unwrap();
+
+ let expected_response =
+ b"HTTP/1.1 200 OK\r\nCONTENT-LENGTH: 10\r\ncontent-type: application/json\r\ndate: ";
+
+ assert_eq!(&vec[..expected_response.len()], &expected_response[..]);
+ }
+
+ #[cfg(feature = "server")]
+ #[test]
+ fn test_server_response_encode_orig_and_title_case() {
+ use crate::proto::BodyLength;
+ use http::header::{HeaderValue, CONTENT_LENGTH};
+
+ let mut head = MessageHead::default();
+ head.headers
+ .insert("content-length", HeaderValue::from_static("10"));
+ head.headers
+ .insert("content-type", HeaderValue::from_static("application/json"));
+
+ let mut orig_headers = HeaderCaseMap::default();
+ orig_headers.insert(CONTENT_LENGTH, "CONTENT-LENGTH".into());
+ head.extensions.insert(orig_headers);
+
+ let mut vec = Vec::new();
+ Server::encode(
+ Encode {
+ head: &mut head,
+ body: Some(BodyLength::Known(10)),
+ keep_alive: true,
+ req_method: &mut None,
+ title_case_headers: true,
+ date_header: true,
+ },
+ &mut vec,
+ )
+ .unwrap();
+
+ // this will also test that the date does exist
+ let expected_response =
+ b"HTTP/1.1 200 OK\r\nCONTENT-LENGTH: 10\r\nContent-Type: application/json\r\nDate: ";
+
+ assert_eq!(&vec[..expected_response.len()], &expected_response[..]);
+ }
+
+ #[cfg(feature = "server")]
+ #[test]
+ fn test_disabled_date_header() {
+ use crate::proto::BodyLength;
+ use http::header::{HeaderValue, CONTENT_LENGTH};
+
+ let mut head = MessageHead::default();
+ head.headers
+ .insert("content-length", HeaderValue::from_static("10"));
+ head.headers
+ .insert("content-type", HeaderValue::from_static("application/json"));
+
+ let mut orig_headers = HeaderCaseMap::default();
+ orig_headers.insert(CONTENT_LENGTH, "CONTENT-LENGTH".into());
+ head.extensions.insert(orig_headers);
+
+ let mut vec = Vec::new();
+ Server::encode(
+ Encode {
+ head: &mut head,
+ body: Some(BodyLength::Known(10)),
+ keep_alive: true,
+ req_method: &mut None,
+ title_case_headers: true,
+ date_header: false,
+ },
+ &mut vec,
+ )
+ .unwrap();
+
+ let expected_response =
+ b"HTTP/1.1 200 OK\r\nCONTENT-LENGTH: 10\r\nContent-Type: application/json\r\n\r\n";
+
+ assert_eq!(&vec, &expected_response);
+ }
+
+ #[test]
+ fn parse_header_htabs() {
+ let mut bytes = BytesMut::from("HTTP/1.1 200 OK\r\nserver: hello\tworld\r\n\r\n");
+ let parsed = Client::parse(
+ &mut bytes,
+ ParseContext {
+ cached_headers: &mut None,
+ req_method: &mut Some(Method::GET),
+ h1_parser_config: Default::default(),
+ h1_max_headers: None,
+ preserve_header_case: false,
+ #[cfg(feature = "ffi")]
+ preserve_header_order: false,
+ h09_responses: false,
+ #[cfg(feature = "client")]
+ on_informational: &mut None,
+ },
+ )
+ .expect("parse ok")
+ .expect("parse complete");
+
+ assert_eq!(parsed.head.headers["server"], "hello\tworld");
+ }
+
+ #[cfg(feature = "server")]
+ #[test]
+ fn parse_too_large_headers() {
+ fn gen_req_with_headers(num: usize) -> String {
+ let mut req = String::from("GET / HTTP/1.1\r\n");
+ for i in 0..num {
+ req.push_str(&format!("key{i}: val{i}\r\n"));
+ }
+ req.push_str("\r\n");
+ req
+ }
+ fn gen_resp_with_headers(num: usize) -> String {
+ let mut req = String::from("HTTP/1.1 200 OK\r\n");
+ for i in 0..num {
+ req.push_str(&format!("key{i}: val{i}\r\n"));
+ }
+ req.push_str("\r\n");
+ req
+ }
+ fn parse(max_headers: Option<usize>, gen_size: usize, should_success: bool) {
+ {
+ // server side
+ let mut bytes = BytesMut::from(gen_req_with_headers(gen_size).as_str());
+ let result = Server::parse(
+ &mut bytes,
+ ParseContext {
+ cached_headers: &mut None,
+ req_method: &mut None,
+ h1_parser_config: Default::default(),
+ h1_max_headers: max_headers,
+ preserve_header_case: false,
+ #[cfg(feature = "ffi")]
+ preserve_header_order: false,
+ h09_responses: false,
+ #[cfg(feature = "client")]
+ on_informational: &mut None,
+ },
+ );
+ if should_success {
+ result.expect("parse ok").expect("parse complete");
+ } else {
+ result.expect_err("parse should err");
+ }
+ }
+ {
+ // client side
+ let mut bytes = BytesMut::from(gen_resp_with_headers(gen_size).as_str());
+ let result = Client::parse(
+ &mut bytes,
+ ParseContext {
+ cached_headers: &mut None,
+ req_method: &mut None,
+ h1_parser_config: Default::default(),
+ h1_max_headers: max_headers,
+ preserve_header_case: false,
+ #[cfg(feature = "ffi")]
+ preserve_header_order: false,
+ h09_responses: false,
+ #[cfg(feature = "client")]
+ on_informational: &mut None,
+ },
+ );
+ if should_success {
+ result.expect("parse ok").expect("parse complete");
+ } else {
+ result.expect_err("parse should err");
+ }
+ }
+ }
+
+ // check generator
+ assert_eq!(
+ gen_req_with_headers(0),
+ String::from("GET / HTTP/1.1\r\n\r\n")
+ );
+ assert_eq!(
+ gen_req_with_headers(1),
+ String::from("GET / HTTP/1.1\r\nkey0: val0\r\n\r\n")
+ );
+ assert_eq!(
+ gen_req_with_headers(2),
+ String::from("GET / HTTP/1.1\r\nkey0: val0\r\nkey1: val1\r\n\r\n")
+ );
+ assert_eq!(
+ gen_req_with_headers(3),
+ String::from("GET / HTTP/1.1\r\nkey0: val0\r\nkey1: val1\r\nkey2: val2\r\n\r\n")
+ );
+
+ // default max_headers is 100, so
+ //
+ // - less than or equal to 100, accepted
+ //
+ parse(None, 0, true);
+ parse(None, 1, true);
+ parse(None, 50, true);
+ parse(None, 99, true);
+ parse(None, 100, true);
+ //
+ // - more than 100, rejected
+ //
+ parse(None, 101, false);
+ parse(None, 102, false);
+ parse(None, 200, false);
+
+ // max_headers is 0, parser will reject any headers
+ //
+ // - without header, accepted
+ //
+ parse(Some(0), 0, true);
+ //
+ // - with header(s), rejected
+ //
+ parse(Some(0), 1, false);
+ parse(Some(0), 100, false);
+
+ // max_headers is 200
+ //
+ // - less than or equal to 200, accepted
+ //
+ parse(Some(200), 0, true);
+ parse(Some(200), 1, true);
+ parse(Some(200), 100, true);
+ parse(Some(200), 200, true);
+ //
+ // - more than 200, rejected
+ //
+ parse(Some(200), 201, false);
+ parse(Some(200), 210, false);
+ }
+
+ #[test]
+ fn test_is_complete_fast() {
+ let s = b"GET / HTTP/1.1\r\na: b\r\n\r\n";
+ for n in 0..s.len() {
+ assert!(is_complete_fast(s, n), "{:?}; {}", s, n);
+ }
+ let s = b"GET / HTTP/1.1\na: b\n\n";
+ for n in 0..s.len() {
+ assert!(is_complete_fast(s, n));
+ }
+
+ // Not
+ let s = b"GET / HTTP/1.1\r\na: b\r\n\r";
+ for n in 0..s.len() {
+ assert!(!is_complete_fast(s, n));
+ }
+ let s = b"GET / HTTP/1.1\na: b\n";
+ for n in 0..s.len() {
+ assert!(!is_complete_fast(s, n));
+ }
+ }
+
+ #[test]
+ fn test_write_headers_orig_case_empty_value() {
+ let mut headers = HeaderMap::new();
+ let name = http::header::HeaderName::from_static("x-empty");
+ headers.insert(&name, "".parse().expect("parse empty"));
+ let mut orig_cases = HeaderCaseMap::default();
+ orig_cases.insert(name, Bytes::from_static(b"X-EmptY"));
+
+ let mut dst = Vec::new();
+ super::write_headers_original_case(&headers, &orig_cases, &mut dst, false);
+
+ assert_eq!(
+ dst, b"X-EmptY:\r\n",
+ "there should be no space between the colon and CRLF"
+ );
+ }
+
+ #[test]
+ fn test_write_headers_orig_case_multiple_entries() {
+ let mut headers = HeaderMap::new();
+ let name = http::header::HeaderName::from_static("x-empty");
+ headers.insert(&name, "a".parse().unwrap());
+ headers.append(&name, "b".parse().unwrap());
+
+ let mut orig_cases = HeaderCaseMap::default();
+ orig_cases.insert(name.clone(), Bytes::from_static(b"X-Empty"));
+ orig_cases.append(name, Bytes::from_static(b"X-EMPTY"));
+
+ let mut dst = Vec::new();
+ super::write_headers_original_case(&headers, &orig_cases, &mut dst, false);
+
+ assert_eq!(dst, b"X-Empty: a\r\nX-EMPTY: b\r\n");
+ }
+
+ #[cfg(feature = "nightly")]
+ use test::Bencher;
+
+ #[cfg(feature = "nightly")]
+ #[bench]
+ fn bench_parse_incoming(b: &mut Bencher) {
+ let mut raw = BytesMut::from(
+ &b"GET /super_long_uri/and_whatever?what_should_we_talk_about/\
+ I_wonder/Hard_to_write_in_an_uri_after_all/you_have_to_make\
+ _up_the_punctuation_yourself/how_fun_is_that?test=foo&test1=\
+ foo1&test2=foo2&test3=foo3&test4=foo4 HTTP/1.1\r\nHost: \
+ hyper.rs\r\nAccept: a lot of things\r\nAccept-Charset: \
+ utf8\r\nAccept-Encoding: *\r\nAccess-Control-Allow-\
+ Credentials: None\r\nAccess-Control-Allow-Origin: None\r\n\
+ Access-Control-Allow-Methods: None\r\nAccess-Control-Allow-\
+ Headers: None\r\nContent-Encoding: utf8\r\nContent-Security-\
+ Policy: None\r\nContent-Type: text/html\r\nOrigin: hyper\
+ \r\nSec-Websocket-Extensions: It looks super important!\r\n\
+ Sec-Websocket-Origin: hyper\r\nSec-Websocket-Version: 4.3\r\
+ \nStrict-Transport-Security: None\r\nUser-Agent: hyper\r\n\
+ X-Content-Duration: None\r\nX-Content-Security-Policy: None\
+ \r\nX-DNSPrefetch-Control: None\r\nX-Frame-Options: \
+ Something important obviously\r\nX-Requested-With: Nothing\
+ \r\n\r\n"[..],
+ );
+ let len = raw.len();
+ let mut headers = Some(HeaderMap::new());
+
+ b.bytes = len as u64;
+ b.iter(|| {
+ let mut msg = Server::parse(
+ &mut raw,
+ ParseContext {
+ cached_headers: &mut headers,
+ req_method: &mut None,
+ h1_parser_config: Default::default(),
+ h1_max_headers: None,
+ preserve_header_case: false,
+ #[cfg(feature = "ffi")]
+ preserve_header_order: false,
+ h09_responses: false,
+ #[cfg(feature = "client")]
+ on_informational: &mut None,
+ },
+ )
+ .unwrap()
+ .unwrap();
+ ::test::black_box(&msg);
+
+ // Remove all references pointing into BytesMut.
+ msg.head.headers.clear();
+ headers = Some(msg.head.headers);
+ std::mem::take(&mut msg.head.subject);
+
+ restart(&mut raw, len);
+ });
+
+ fn restart(b: &mut BytesMut, len: usize) {
+ b.reserve(1);
+ unsafe {
+ b.set_len(len);
+ }
+ }
+ }
+
+ #[cfg(feature = "nightly")]
+ #[bench]
+ fn bench_parse_short(b: &mut Bencher) {
+ let s = &b"GET / HTTP/1.1\r\nHost: localhost:8080\r\n\r\n"[..];
+ let mut raw = BytesMut::from(s);
+ let len = raw.len();
+ let mut headers = Some(HeaderMap::new());
+
+ b.bytes = len as u64;
+ b.iter(|| {
+ let mut msg = Server::parse(
+ &mut raw,
+ ParseContext {
+ cached_headers: &mut headers,
+ req_method: &mut None,
+ h1_parser_config: Default::default(),
+ h1_max_headers: None,
+ preserve_header_case: false,
+ #[cfg(feature = "ffi")]
+ preserve_header_order: false,
+ h09_responses: false,
+ #[cfg(feature = "client")]
+ on_informational: &mut None,
+ },
+ )
+ .unwrap()
+ .unwrap();
+ ::test::black_box(&msg);
+ msg.head.headers.clear();
+ headers = Some(msg.head.headers);
+ restart(&mut raw, len);
+ });
+
+ fn restart(b: &mut BytesMut, len: usize) {
+ b.reserve(1);
+ unsafe {
+ b.set_len(len);
+ }
+ }
+ }
+
+ #[cfg(feature = "nightly")]
+ #[bench]
+ fn bench_server_encode_headers_preset(b: &mut Bencher) {
+ use crate::proto::BodyLength;
+ use http::header::HeaderValue;
+
+ let len = 108;
+ b.bytes = len as u64;
+
+ let mut head = MessageHead::default();
+ let mut headers = HeaderMap::new();
+ headers.insert("content-length", HeaderValue::from_static("10"));
+ headers.insert("content-type", HeaderValue::from_static("application/json"));
+
+ b.iter(|| {
+ let mut vec = Vec::new();
+ head.headers = headers.clone();
+ Server::encode(
+ Encode {
+ head: &mut head,
+ body: Some(BodyLength::Known(10)),
+ keep_alive: true,
+ req_method: &mut Some(Method::GET),
+ title_case_headers: false,
+ date_header: true,
+ },
+ &mut vec,
+ )
+ .unwrap();
+ assert_eq!(vec.len(), len);
+ ::test::black_box(vec);
+ })
+ }
+
+ #[cfg(feature = "nightly")]
+ #[bench]
+ fn bench_server_encode_no_headers(b: &mut Bencher) {
+ use crate::proto::BodyLength;
+
+ let len = 76;
+ b.bytes = len as u64;
+
+ let mut head = MessageHead::default();
+ let mut vec = Vec::with_capacity(128);
+
+ b.iter(|| {
+ Server::encode(
+ Encode {
+ head: &mut head,
+ body: Some(BodyLength::Known(10)),
+ keep_alive: true,
+ req_method: &mut Some(Method::GET),
+ title_case_headers: false,
+ date_header: true,
+ },
+ &mut vec,
+ )
+ .unwrap();
+ assert_eq!(vec.len(), len);
+ ::test::black_box(&vec);
+
+ vec.clear();
+ })
+ }
+}
diff --git a/vendor/hyper/src/proto/h2/client.rs b/vendor/hyper/src/proto/h2/client.rs
new file mode 100644
index 00000000..5e9641e4
--- /dev/null
+++ b/vendor/hyper/src/proto/h2/client.rs
@@ -0,0 +1,749 @@
+use std::{
+ convert::Infallible,
+ future::Future,
+ marker::PhantomData,
+ pin::Pin,
+ task::{Context, Poll},
+ time::Duration,
+};
+
+use crate::rt::{Read, Write};
+use bytes::Bytes;
+use futures_channel::mpsc::{Receiver, Sender};
+use futures_channel::{mpsc, oneshot};
+use futures_util::future::{Either, FusedFuture, FutureExt as _};
+use futures_util::ready;
+use futures_util::stream::{StreamExt as _, StreamFuture};
+use h2::client::{Builder, Connection, SendRequest};
+use h2::SendStream;
+use http::{Method, StatusCode};
+use pin_project_lite::pin_project;
+
+use super::ping::{Ponger, Recorder};
+use super::{ping, H2Upgraded, PipeToSendStream, SendBuf};
+use crate::body::{Body, Incoming as IncomingBody};
+use crate::client::dispatch::{Callback, SendWhen, TrySendError};
+use crate::common::io::Compat;
+use crate::common::time::Time;
+use crate::ext::Protocol;
+use crate::headers;
+use crate::proto::h2::UpgradedSendStream;
+use crate::proto::Dispatched;
+use crate::rt::bounds::Http2ClientConnExec;
+use crate::upgrade::Upgraded;
+use crate::{Request, Response};
+use h2::client::ResponseFuture;
+
+type ClientRx<B> = crate::client::dispatch::Receiver<Request<B>, Response<IncomingBody>>;
+
+///// An mpsc channel is used to help notify the `Connection` task when *all*
+///// other handles to it have been dropped, so that it can shutdown.
+type ConnDropRef = mpsc::Sender<Infallible>;
+
+///// A oneshot channel watches the `Connection` task, and when it completes,
+///// the "dispatch" task will be notified and can shutdown sooner.
+type ConnEof = oneshot::Receiver<Infallible>;
+
+// Our defaults are chosen for the "majority" case, which usually are not
+// resource constrained, and so the spec default of 64kb can be too limiting
+// for performance.
+const DEFAULT_CONN_WINDOW: u32 = 1024 * 1024 * 5; // 5mb
+const DEFAULT_STREAM_WINDOW: u32 = 1024 * 1024 * 2; // 2mb
+const DEFAULT_MAX_FRAME_SIZE: u32 = 1024 * 16; // 16kb
+const DEFAULT_MAX_SEND_BUF_SIZE: usize = 1024 * 1024; // 1mb
+const DEFAULT_MAX_HEADER_LIST_SIZE: u32 = 1024 * 16; // 16kb
+
+// The maximum number of concurrent streams that the client is allowed to open
+// before it receives the initial SETTINGS frame from the server.
+// This default value is derived from what the HTTP/2 spec recommends as the
+// minimum value that endpoints advertise to their peers. It means that using
+// this value will minimize the chance of the failure where the local endpoint
+// attempts to open too many streams and gets rejected by the remote peer with
+// the `REFUSED_STREAM` error.
+const DEFAULT_INITIAL_MAX_SEND_STREAMS: usize = 100;
+
+#[derive(Clone, Debug)]
+pub(crate) struct Config {
+ pub(crate) adaptive_window: bool,
+ pub(crate) initial_conn_window_size: u32,
+ pub(crate) initial_stream_window_size: u32,
+ pub(crate) initial_max_send_streams: usize,
+ pub(crate) max_frame_size: Option<u32>,
+ pub(crate) max_header_list_size: u32,
+ pub(crate) keep_alive_interval: Option<Duration>,
+ pub(crate) keep_alive_timeout: Duration,
+ pub(crate) keep_alive_while_idle: bool,
+ pub(crate) max_concurrent_reset_streams: Option<usize>,
+ pub(crate) max_send_buffer_size: usize,
+ pub(crate) max_pending_accept_reset_streams: Option<usize>,
+ pub(crate) header_table_size: Option<u32>,
+ pub(crate) max_concurrent_streams: Option<u32>,
+}
+
+impl Default for Config {
+ fn default() -> Config {
+ Config {
+ adaptive_window: false,
+ initial_conn_window_size: DEFAULT_CONN_WINDOW,
+ initial_stream_window_size: DEFAULT_STREAM_WINDOW,
+ initial_max_send_streams: DEFAULT_INITIAL_MAX_SEND_STREAMS,
+ max_frame_size: Some(DEFAULT_MAX_FRAME_SIZE),
+ max_header_list_size: DEFAULT_MAX_HEADER_LIST_SIZE,
+ keep_alive_interval: None,
+ keep_alive_timeout: Duration::from_secs(20),
+ keep_alive_while_idle: false,
+ max_concurrent_reset_streams: None,
+ max_send_buffer_size: DEFAULT_MAX_SEND_BUF_SIZE,
+ max_pending_accept_reset_streams: None,
+ header_table_size: None,
+ max_concurrent_streams: None,
+ }
+ }
+}
+
+fn new_builder(config: &Config) -> Builder {
+ let mut builder = Builder::default();
+ builder
+ .initial_max_send_streams(config.initial_max_send_streams)
+ .initial_window_size(config.initial_stream_window_size)
+ .initial_connection_window_size(config.initial_conn_window_size)
+ .max_header_list_size(config.max_header_list_size)
+ .max_send_buffer_size(config.max_send_buffer_size)
+ .enable_push(false);
+ if let Some(max) = config.max_frame_size {
+ builder.max_frame_size(max);
+ }
+ if let Some(max) = config.max_concurrent_reset_streams {
+ builder.max_concurrent_reset_streams(max);
+ }
+ if let Some(max) = config.max_pending_accept_reset_streams {
+ builder.max_pending_accept_reset_streams(max);
+ }
+ if let Some(size) = config.header_table_size {
+ builder.header_table_size(size);
+ }
+ if let Some(max) = config.max_concurrent_streams {
+ builder.max_concurrent_streams(max);
+ }
+ builder
+}
+
+fn new_ping_config(config: &Config) -> ping::Config {
+ ping::Config {
+ bdp_initial_window: if config.adaptive_window {
+ Some(config.initial_stream_window_size)
+ } else {
+ None
+ },
+ keep_alive_interval: config.keep_alive_interval,
+ keep_alive_timeout: config.keep_alive_timeout,
+ keep_alive_while_idle: config.keep_alive_while_idle,
+ }
+}
+
+pub(crate) async fn handshake<T, B, E>(
+ io: T,
+ req_rx: ClientRx<B>,
+ config: &Config,
+ mut exec: E,
+ timer: Time,
+) -> crate::Result<ClientTask<B, E, T>>
+where
+ T: Read + Write + Unpin,
+ B: Body + 'static,
+ B::Data: Send + 'static,
+ E: Http2ClientConnExec<B, T> + Unpin,
+ B::Error: Into<Box<dyn std::error::Error + Send + Sync>>,
+{
+ let (h2_tx, mut conn) = new_builder(config)
+ .handshake::<_, SendBuf<B::Data>>(Compat::new(io))
+ .await
+ .map_err(crate::Error::new_h2)?;
+
+ // An mpsc channel is used entirely to detect when the
+ // 'Client' has been dropped. This is to get around a bug
+ // in h2 where dropping all SendRequests won't notify a
+ // parked Connection.
+ let (conn_drop_ref, rx) = mpsc::channel(1);
+ let (cancel_tx, conn_eof) = oneshot::channel();
+
+ let conn_drop_rx = rx.into_future();
+
+ let ping_config = new_ping_config(config);
+
+ let (conn, ping) = if ping_config.is_enabled() {
+ let pp = conn.ping_pong().expect("conn.ping_pong");
+ let (recorder, ponger) = ping::channel(pp, ping_config, timer);
+
+ let conn: Conn<_, B> = Conn::new(ponger, conn);
+ (Either::Left(conn), recorder)
+ } else {
+ (Either::Right(conn), ping::disabled())
+ };
+ let conn: ConnMapErr<T, B> = ConnMapErr {
+ conn,
+ is_terminated: false,
+ };
+
+ exec.execute_h2_future(H2ClientFuture::Task {
+ task: ConnTask::new(conn, conn_drop_rx, cancel_tx),
+ });
+
+ Ok(ClientTask {
+ ping,
+ conn_drop_ref,
+ conn_eof,
+ executor: exec,
+ h2_tx,
+ req_rx,
+ fut_ctx: None,
+ marker: PhantomData,
+ })
+}
+
+pin_project! {
+ struct Conn<T, B>
+ where
+ B: Body,
+ {
+ #[pin]
+ ponger: Ponger,
+ #[pin]
+ conn: Connection<Compat<T>, SendBuf<<B as Body>::Data>>,
+ }
+}
+
+impl<T, B> Conn<T, B>
+where
+ B: Body,
+ T: Read + Write + Unpin,
+{
+ fn new(ponger: Ponger, conn: Connection<Compat<T>, SendBuf<<B as Body>::Data>>) -> Self {
+ Conn { ponger, conn }
+ }
+}
+
+impl<T, B> Future for Conn<T, B>
+where
+ B: Body,
+ T: Read + Write + Unpin,
+{
+ type Output = Result<(), h2::Error>;
+
+ fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
+ let mut this = self.project();
+ match this.ponger.poll(cx) {
+ Poll::Ready(ping::Ponged::SizeUpdate(wnd)) => {
+ this.conn.set_target_window_size(wnd);
+ this.conn.set_initial_window_size(wnd)?;
+ }
+ Poll::Ready(ping::Ponged::KeepAliveTimedOut) => {
+ debug!("connection keep-alive timed out");
+ return Poll::Ready(Ok(()));
+ }
+ Poll::Pending => {}
+ }
+
+ Pin::new(&mut this.conn).poll(cx)
+ }
+}
+
+pin_project! {
+ struct ConnMapErr<T, B>
+ where
+ B: Body,
+ T: Read,
+ T: Write,
+ T: Unpin,
+ {
+ #[pin]
+ conn: Either<Conn<T, B>, Connection<Compat<T>, SendBuf<<B as Body>::Data>>>,
+ #[pin]
+ is_terminated: bool,
+ }
+}
+
+impl<T, B> Future for ConnMapErr<T, B>
+where
+ B: Body,
+ T: Read + Write + Unpin,
+{
+ type Output = Result<(), ()>;
+
+ fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
+ let mut this = self.project();
+
+ if *this.is_terminated {
+ return Poll::Pending;
+ }
+ let polled = this.conn.poll(cx);
+ if polled.is_ready() {
+ *this.is_terminated = true;
+ }
+ polled.map_err(|_e| {
+ debug!(error = %_e, "connection error");
+ })
+ }
+}
+
+impl<T, B> FusedFuture for ConnMapErr<T, B>
+where
+ B: Body,
+ T: Read + Write + Unpin,
+{
+ fn is_terminated(&self) -> bool {
+ self.is_terminated
+ }
+}
+
+pin_project! {
+ pub struct ConnTask<T, B>
+ where
+ B: Body,
+ T: Read,
+ T: Write,
+ T: Unpin,
+ {
+ #[pin]
+ drop_rx: StreamFuture<Receiver<Infallible>>,
+ #[pin]
+ cancel_tx: Option<oneshot::Sender<Infallible>>,
+ #[pin]
+ conn: ConnMapErr<T, B>,
+ }
+}
+
+impl<T, B> ConnTask<T, B>
+where
+ B: Body,
+ T: Read + Write + Unpin,
+{
+ fn new(
+ conn: ConnMapErr<T, B>,
+ drop_rx: StreamFuture<Receiver<Infallible>>,
+ cancel_tx: oneshot::Sender<Infallible>,
+ ) -> Self {
+ Self {
+ drop_rx,
+ cancel_tx: Some(cancel_tx),
+ conn,
+ }
+ }
+}
+
+impl<T, B> Future for ConnTask<T, B>
+where
+ B: Body,
+ T: Read + Write + Unpin,
+{
+ type Output = ();
+
+ fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
+ let mut this = self.project();
+
+ if !this.conn.is_terminated() && this.conn.poll_unpin(cx).is_ready() {
+ // ok or err, the `conn` has finished.
+ return Poll::Ready(());
+ }
+
+ if !this.drop_rx.is_terminated() && this.drop_rx.poll_unpin(cx).is_ready() {
+ // mpsc has been dropped, hopefully polling
+ // the connection some more should start shutdown
+ // and then close.
+ trace!("send_request dropped, starting conn shutdown");
+ drop(this.cancel_tx.take().expect("ConnTask Future polled twice"));
+ }
+
+ Poll::Pending
+ }
+}
+
+pin_project! {
+ #[project = H2ClientFutureProject]
+ pub enum H2ClientFuture<B, T>
+ where
+ B: http_body::Body,
+ B: 'static,
+ B::Error: Into<Box<dyn std::error::Error + Send + Sync>>,
+ T: Read,
+ T: Write,
+ T: Unpin,
+ {
+ Pipe {
+ #[pin]
+ pipe: PipeMap<B>,
+ },
+ Send {
+ #[pin]
+ send_when: SendWhen<B>,
+ },
+ Task {
+ #[pin]
+ task: ConnTask<T, B>,
+ },
+ }
+}
+
+impl<B, T> Future for H2ClientFuture<B, T>
+where
+ B: http_body::Body + 'static,
+ B::Error: Into<Box<dyn std::error::Error + Send + Sync>>,
+ T: Read + Write + Unpin,
+{
+ type Output = ();
+
+ fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> std::task::Poll<Self::Output> {
+ let this = self.project();
+
+ match this {
+ H2ClientFutureProject::Pipe { pipe } => pipe.poll(cx),
+ H2ClientFutureProject::Send { send_when } => send_when.poll(cx),
+ H2ClientFutureProject::Task { task } => task.poll(cx),
+ }
+ }
+}
+
+struct FutCtx<B>
+where
+ B: Body,
+{
+ is_connect: bool,
+ eos: bool,
+ fut: ResponseFuture,
+ body_tx: SendStream<SendBuf<B::Data>>,
+ body: B,
+ cb: Callback<Request<B>, Response<IncomingBody>>,
+}
+
+impl<B: Body> Unpin for FutCtx<B> {}
+
+pub(crate) struct ClientTask<B, E, T>
+where
+ B: Body,
+ E: Unpin,
+{
+ ping: ping::Recorder,
+ conn_drop_ref: ConnDropRef,
+ conn_eof: ConnEof,
+ executor: E,
+ h2_tx: SendRequest<SendBuf<B::Data>>,
+ req_rx: ClientRx<B>,
+ fut_ctx: Option<FutCtx<B>>,
+ marker: PhantomData<T>,
+}
+
+impl<B, E, T> ClientTask<B, E, T>
+where
+ B: Body + 'static,
+ E: Http2ClientConnExec<B, T> + Unpin,
+ B::Error: Into<Box<dyn std::error::Error + Send + Sync>>,
+ T: Read + Write + Unpin,
+{
+ pub(crate) fn is_extended_connect_protocol_enabled(&self) -> bool {
+ self.h2_tx.is_extended_connect_protocol_enabled()
+ }
+}
+
+pin_project! {
+ pub struct PipeMap<S>
+ where
+ S: Body,
+ {
+ #[pin]
+ pipe: PipeToSendStream<S>,
+ #[pin]
+ conn_drop_ref: Option<Sender<Infallible>>,
+ #[pin]
+ ping: Option<Recorder>,
+ }
+}
+
+impl<B> Future for PipeMap<B>
+where
+ B: http_body::Body,
+ B::Error: Into<Box<dyn std::error::Error + Send + Sync>>,
+{
+ type Output = ();
+
+ fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> std::task::Poll<Self::Output> {
+ let mut this = self.project();
+
+ match this.pipe.poll_unpin(cx) {
+ Poll::Ready(result) => {
+ if let Err(_e) = result {
+ debug!("client request body error: {}", _e);
+ }
+ drop(this.conn_drop_ref.take().expect("Future polled twice"));
+ drop(this.ping.take().expect("Future polled twice"));
+ return Poll::Ready(());
+ }
+ Poll::Pending => (),
+ };
+ Poll::Pending
+ }
+}
+
+impl<B, E, T> ClientTask<B, E, T>
+where
+ B: Body + 'static + Unpin,
+ B::Data: Send,
+ E: Http2ClientConnExec<B, T> + Unpin,
+ B::Error: Into<Box<dyn std::error::Error + Send + Sync>>,
+ T: Read + Write + Unpin,
+{
+ fn poll_pipe(&mut self, f: FutCtx<B>, cx: &mut Context<'_>) {
+ let ping = self.ping.clone();
+
+ let send_stream = if !f.is_connect {
+ if !f.eos {
+ let mut pipe = PipeToSendStream::new(f.body, f.body_tx);
+
+ // eagerly see if the body pipe is ready and
+ // can thus skip allocating in the executor
+ match Pin::new(&mut pipe).poll(cx) {
+ Poll::Ready(_) => (),
+ Poll::Pending => {
+ let conn_drop_ref = self.conn_drop_ref.clone();
+ // keep the ping recorder's knowledge of an
+ // "open stream" alive while this body is
+ // still sending...
+ let ping = ping.clone();
+
+ let pipe = PipeMap {
+ pipe,
+ conn_drop_ref: Some(conn_drop_ref),
+ ping: Some(ping),
+ };
+ // Clear send task
+ self.executor
+ .execute_h2_future(H2ClientFuture::Pipe { pipe });
+ }
+ }
+ }
+
+ None
+ } else {
+ Some(f.body_tx)
+ };
+
+ self.executor.execute_h2_future(H2ClientFuture::Send {
+ send_when: SendWhen {
+ when: ResponseFutMap {
+ fut: f.fut,
+ ping: Some(ping),
+ send_stream: Some(send_stream),
+ },
+ call_back: Some(f.cb),
+ },
+ });
+ }
+}
+
+pin_project! {
+ pub(crate) struct ResponseFutMap<B>
+ where
+ B: Body,
+ B: 'static,
+ {
+ #[pin]
+ fut: ResponseFuture,
+ #[pin]
+ ping: Option<Recorder>,
+ #[pin]
+ send_stream: Option<Option<SendStream<SendBuf<<B as Body>::Data>>>>,
+ }
+}
+
+impl<B> Future for ResponseFutMap<B>
+where
+ B: Body + 'static,
+{
+ type Output = Result<Response<crate::body::Incoming>, (crate::Error, Option<Request<B>>)>;
+
+ fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
+ let mut this = self.project();
+
+ let result = ready!(this.fut.poll(cx));
+
+ let ping = this.ping.take().expect("Future polled twice");
+ let send_stream = this.send_stream.take().expect("Future polled twice");
+
+ match result {
+ Ok(res) => {
+ // record that we got the response headers
+ ping.record_non_data();
+
+ let content_length = headers::content_length_parse_all(res.headers());
+ if let (Some(mut send_stream), StatusCode::OK) = (send_stream, res.status()) {
+ if content_length.map_or(false, |len| len != 0) {
+ warn!("h2 connect response with non-zero body not supported");
+
+ send_stream.send_reset(h2::Reason::INTERNAL_ERROR);
+ return Poll::Ready(Err((
+ crate::Error::new_h2(h2::Reason::INTERNAL_ERROR.into()),
+ None::<Request<B>>,
+ )));
+ }
+ let (parts, recv_stream) = res.into_parts();
+ let mut res = Response::from_parts(parts, IncomingBody::empty());
+
+ let (pending, on_upgrade) = crate::upgrade::pending();
+ let io = H2Upgraded {
+ ping,
+ send_stream: unsafe { UpgradedSendStream::new(send_stream) },
+ recv_stream,
+ buf: Bytes::new(),
+ };
+ let upgraded = Upgraded::new(io, Bytes::new());
+
+ pending.fulfill(upgraded);
+ res.extensions_mut().insert(on_upgrade);
+
+ Poll::Ready(Ok(res))
+ } else {
+ let res = res.map(|stream| {
+ let ping = ping.for_stream(&stream);
+ IncomingBody::h2(stream, content_length.into(), ping)
+ });
+ Poll::Ready(Ok(res))
+ }
+ }
+ Err(err) => {
+ ping.ensure_not_timed_out().map_err(|e| (e, None))?;
+
+ debug!("client response error: {}", err);
+ Poll::Ready(Err((crate::Error::new_h2(err), None::<Request<B>>)))
+ }
+ }
+ }
+}
+
+impl<B, E, T> Future for ClientTask<B, E, T>
+where
+ B: Body + 'static + Unpin,
+ B::Data: Send,
+ B::Error: Into<Box<dyn std::error::Error + Send + Sync>>,
+ E: Http2ClientConnExec<B, T> + Unpin,
+ T: Read + Write + Unpin,
+{
+ type Output = crate::Result<Dispatched>;
+
+ fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
+ loop {
+ match ready!(self.h2_tx.poll_ready(cx)) {
+ Ok(()) => (),
+ Err(err) => {
+ self.ping.ensure_not_timed_out()?;
+ return if err.reason() == Some(::h2::Reason::NO_ERROR) {
+ trace!("connection gracefully shutdown");
+ Poll::Ready(Ok(Dispatched::Shutdown))
+ } else {
+ Poll::Ready(Err(crate::Error::new_h2(err)))
+ };
+ }
+ };
+
+ // If we were waiting on pending open
+ // continue where we left off.
+ if let Some(f) = self.fut_ctx.take() {
+ self.poll_pipe(f, cx);
+ continue;
+ }
+
+ match self.req_rx.poll_recv(cx) {
+ Poll::Ready(Some((req, cb))) => {
+ // check that future hasn't been canceled already
+ if cb.is_canceled() {
+ trace!("request callback is canceled");
+ continue;
+ }
+ let (head, body) = req.into_parts();
+ let mut req = ::http::Request::from_parts(head, ());
+ super::strip_connection_headers(req.headers_mut(), true);
+ if let Some(len) = body.size_hint().exact() {
+ if len != 0 || headers::method_has_defined_payload_semantics(req.method()) {
+ headers::set_content_length_if_missing(req.headers_mut(), len);
+ }
+ }
+
+ let is_connect = req.method() == Method::CONNECT;
+ let eos = body.is_end_stream();
+
+ if is_connect
+ && headers::content_length_parse_all(req.headers())
+ .map_or(false, |len| len != 0)
+ {
+ warn!("h2 connect request with non-zero body not supported");
+ cb.send(Err(TrySendError {
+ error: crate::Error::new_h2(h2::Reason::INTERNAL_ERROR.into()),
+ message: None,
+ }));
+ continue;
+ }
+
+ if let Some(protocol) = req.extensions_mut().remove::<Protocol>() {
+ req.extensions_mut().insert(protocol.into_inner());
+ }
+
+ let (fut, body_tx) = match self.h2_tx.send_request(req, !is_connect && eos) {
+ Ok(ok) => ok,
+ Err(err) => {
+ debug!("client send request error: {}", err);
+ cb.send(Err(TrySendError {
+ error: crate::Error::new_h2(err),
+ message: None,
+ }));
+ continue;
+ }
+ };
+
+ let f = FutCtx {
+ is_connect,
+ eos,
+ fut,
+ body_tx,
+ body,
+ cb,
+ };
+
+ // Check poll_ready() again.
+ // If the call to send_request() resulted in the new stream being pending open
+ // we have to wait for the open to complete before accepting new requests.
+ match self.h2_tx.poll_ready(cx) {
+ Poll::Pending => {
+ // Save Context
+ self.fut_ctx = Some(f);
+ return Poll::Pending;
+ }
+ Poll::Ready(Ok(())) => (),
+ Poll::Ready(Err(err)) => {
+ f.cb.send(Err(TrySendError {
+ error: crate::Error::new_h2(err),
+ message: None,
+ }));
+ continue;
+ }
+ }
+ self.poll_pipe(f, cx);
+ continue;
+ }
+
+ Poll::Ready(None) => {
+ trace!("client::dispatch::Sender dropped");
+ return Poll::Ready(Ok(Dispatched::Shutdown));
+ }
+
+ Poll::Pending => match ready!(Pin::new(&mut self.conn_eof).poll(cx)) {
+ // As of Rust 1.82, this pattern is no longer needed, and emits a warning.
+ // But we cannot remove it as long as MSRV is less than that.
+ #[allow(unused)]
+ Ok(never) => match never {},
+ Err(_conn_is_eof) => {
+ trace!("connection task is closed, closing dispatch task");
+ return Poll::Ready(Ok(Dispatched::Shutdown));
+ }
+ },
+ }
+ }
+ }
+}
diff --git a/vendor/hyper/src/proto/h2/mod.rs b/vendor/hyper/src/proto/h2/mod.rs
new file mode 100644
index 00000000..adb6de87
--- /dev/null
+++ b/vendor/hyper/src/proto/h2/mod.rs
@@ -0,0 +1,446 @@
+use std::error::Error as StdError;
+use std::future::Future;
+use std::io::{Cursor, IoSlice};
+use std::mem;
+use std::pin::Pin;
+use std::task::{Context, Poll};
+
+use bytes::{Buf, Bytes};
+use futures_util::ready;
+use h2::{Reason, RecvStream, SendStream};
+use http::header::{HeaderName, CONNECTION, TE, TRANSFER_ENCODING, UPGRADE};
+use http::HeaderMap;
+use pin_project_lite::pin_project;
+
+use crate::body::Body;
+use crate::proto::h2::ping::Recorder;
+use crate::rt::{Read, ReadBufCursor, Write};
+
+pub(crate) mod ping;
+
+cfg_client! {
+ pub(crate) mod client;
+ pub(crate) use self::client::ClientTask;
+}
+
+cfg_server! {
+ pub(crate) mod server;
+ pub(crate) use self::server::Server;
+}
+
+/// Default initial stream window size defined in HTTP2 spec.
+pub(crate) const SPEC_WINDOW_SIZE: u32 = 65_535;
+
+// List of connection headers from RFC 9110 Section 7.6.1
+//
+// TE headers are allowed in HTTP/2 requests as long as the value is "trailers", so they're
+// tested separately.
+static CONNECTION_HEADERS: [HeaderName; 4] = [
+ HeaderName::from_static("keep-alive"),
+ HeaderName::from_static("proxy-connection"),
+ TRANSFER_ENCODING,
+ UPGRADE,
+];
+
+fn strip_connection_headers(headers: &mut HeaderMap, is_request: bool) {
+ for header in &CONNECTION_HEADERS {
+ if headers.remove(header).is_some() {
+ warn!("Connection header illegal in HTTP/2: {}", header.as_str());
+ }
+ }
+
+ if is_request {
+ if headers
+ .get(TE)
+ .map_or(false, |te_header| te_header != "trailers")
+ {
+ warn!("TE headers not set to \"trailers\" are illegal in HTTP/2 requests");
+ headers.remove(TE);
+ }
+ } else if headers.remove(TE).is_some() {
+ warn!("TE headers illegal in HTTP/2 responses");
+ }
+
+ if let Some(header) = headers.remove(CONNECTION) {
+ warn!(
+ "Connection header illegal in HTTP/2: {}",
+ CONNECTION.as_str()
+ );
+ let header_contents = header.to_str().unwrap();
+
+ // A `Connection` header may have a comma-separated list of names of other headers that
+ // are meant for only this specific connection.
+ //
+ // Iterate these names and remove them as headers. Connection-specific headers are
+ // forbidden in HTTP2, as that information has been moved into frame types of the h2
+ // protocol.
+ for name in header_contents.split(',') {
+ let name = name.trim();
+ headers.remove(name);
+ }
+ }
+}
+
+// body adapters used by both Client and Server
+
+pin_project! {
+ pub(crate) struct PipeToSendStream<S>
+ where
+ S: Body,
+ {
+ body_tx: SendStream<SendBuf<S::Data>>,
+ data_done: bool,
+ #[pin]
+ stream: S,
+ }
+}
+
+impl<S> PipeToSendStream<S>
+where
+ S: Body,
+{
+ fn new(stream: S, tx: SendStream<SendBuf<S::Data>>) -> PipeToSendStream<S> {
+ PipeToSendStream {
+ body_tx: tx,
+ data_done: false,
+ stream,
+ }
+ }
+}
+
+impl<S> Future for PipeToSendStream<S>
+where
+ S: Body,
+ S::Error: Into<Box<dyn StdError + Send + Sync>>,
+{
+ type Output = crate::Result<()>;
+
+ fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
+ let mut me = self.project();
+ loop {
+ // we don't have the next chunk of data yet, so just reserve 1 byte to make
+ // sure there's some capacity available. h2 will handle the capacity management
+ // for the actual body chunk.
+ me.body_tx.reserve_capacity(1);
+
+ if me.body_tx.capacity() == 0 {
+ loop {
+ match ready!(me.body_tx.poll_capacity(cx)) {
+ Some(Ok(0)) => {}
+ Some(Ok(_)) => break,
+ Some(Err(e)) => return Poll::Ready(Err(crate::Error::new_body_write(e))),
+ None => {
+ // None means the stream is no longer in a
+ // streaming state, we either finished it
+ // somehow, or the remote reset us.
+ return Poll::Ready(Err(crate::Error::new_body_write(
+ "send stream capacity unexpectedly closed",
+ )));
+ }
+ }
+ }
+ } else if let Poll::Ready(reason) = me
+ .body_tx
+ .poll_reset(cx)
+ .map_err(crate::Error::new_body_write)?
+ {
+ debug!("stream received RST_STREAM: {:?}", reason);
+ return Poll::Ready(Err(crate::Error::new_body_write(::h2::Error::from(reason))));
+ }
+
+ match ready!(me.stream.as_mut().poll_frame(cx)) {
+ Some(Ok(frame)) => {
+ if frame.is_data() {
+ let chunk = frame.into_data().unwrap_or_else(|_| unreachable!());
+ let is_eos = me.stream.is_end_stream();
+ trace!(
+ "send body chunk: {} bytes, eos={}",
+ chunk.remaining(),
+ is_eos,
+ );
+
+ let buf = SendBuf::Buf(chunk);
+ me.body_tx
+ .send_data(buf, is_eos)
+ .map_err(crate::Error::new_body_write)?;
+
+ if is_eos {
+ return Poll::Ready(Ok(()));
+ }
+ } else if frame.is_trailers() {
+ // no more DATA, so give any capacity back
+ me.body_tx.reserve_capacity(0);
+ me.body_tx
+ .send_trailers(frame.into_trailers().unwrap_or_else(|_| unreachable!()))
+ .map_err(crate::Error::new_body_write)?;
+ return Poll::Ready(Ok(()));
+ } else {
+ trace!("discarding unknown frame");
+ // loop again
+ }
+ }
+ Some(Err(e)) => return Poll::Ready(Err(me.body_tx.on_user_err(e))),
+ None => {
+ // no more frames means we're done here
+ // but at this point, we haven't sent an EOS DATA, or
+ // any trailers, so send an empty EOS DATA.
+ return Poll::Ready(me.body_tx.send_eos_frame());
+ }
+ }
+ }
+ }
+}
+
+trait SendStreamExt {
+ fn on_user_err<E>(&mut self, err: E) -> crate::Error
+ where
+ E: Into<Box<dyn std::error::Error + Send + Sync>>;
+ fn send_eos_frame(&mut self) -> crate::Result<()>;
+}
+
+impl<B: Buf> SendStreamExt for SendStream<SendBuf<B>> {
+ fn on_user_err<E>(&mut self, err: E) -> crate::Error
+ where
+ E: Into<Box<dyn std::error::Error + Send + Sync>>,
+ {
+ let err = crate::Error::new_user_body(err);
+ debug!("send body user stream error: {}", err);
+ self.send_reset(err.h2_reason());
+ err
+ }
+
+ fn send_eos_frame(&mut self) -> crate::Result<()> {
+ trace!("send body eos");
+ self.send_data(SendBuf::None, true)
+ .map_err(crate::Error::new_body_write)
+ }
+}
+
+#[repr(usize)]
+enum SendBuf<B> {
+ Buf(B),
+ Cursor(Cursor<Box<[u8]>>),
+ None,
+}
+
+impl<B: Buf> Buf for SendBuf<B> {
+ #[inline]
+ fn remaining(&self) -> usize {
+ match *self {
+ Self::Buf(ref b) => b.remaining(),
+ Self::Cursor(ref c) => Buf::remaining(c),
+ Self::None => 0,
+ }
+ }
+
+ #[inline]
+ fn chunk(&self) -> &[u8] {
+ match *self {
+ Self::Buf(ref b) => b.chunk(),
+ Self::Cursor(ref c) => c.chunk(),
+ Self::None => &[],
+ }
+ }
+
+ #[inline]
+ fn advance(&mut self, cnt: usize) {
+ match *self {
+ Self::Buf(ref mut b) => b.advance(cnt),
+ Self::Cursor(ref mut c) => c.advance(cnt),
+ Self::None => {}
+ }
+ }
+
+ fn chunks_vectored<'a>(&'a self, dst: &mut [IoSlice<'a>]) -> usize {
+ match *self {
+ Self::Buf(ref b) => b.chunks_vectored(dst),
+ Self::Cursor(ref c) => c.chunks_vectored(dst),
+ Self::None => 0,
+ }
+ }
+}
+
+struct H2Upgraded<B>
+where
+ B: Buf,
+{
+ ping: Recorder,
+ send_stream: UpgradedSendStream<B>,
+ recv_stream: RecvStream,
+ buf: Bytes,
+}
+
+impl<B> Read for H2Upgraded<B>
+where
+ B: Buf,
+{
+ fn poll_read(
+ mut self: Pin<&mut Self>,
+ cx: &mut Context<'_>,
+ mut read_buf: ReadBufCursor<'_>,
+ ) -> Poll<Result<(), std::io::Error>> {
+ if self.buf.is_empty() {
+ self.buf = loop {
+ match ready!(self.recv_stream.poll_data(cx)) {
+ None => return Poll::Ready(Ok(())),
+ Some(Ok(buf)) if buf.is_empty() && !self.recv_stream.is_end_stream() => {
+ continue
+ }
+ Some(Ok(buf)) => {
+ self.ping.record_data(buf.len());
+ break buf;
+ }
+ Some(Err(e)) => {
+ return Poll::Ready(match e.reason() {
+ Some(Reason::NO_ERROR) | Some(Reason::CANCEL) => Ok(()),
+ Some(Reason::STREAM_CLOSED) => {
+ Err(std::io::Error::new(std::io::ErrorKind::BrokenPipe, e))
+ }
+ _ => Err(h2_to_io_error(e)),
+ })
+ }
+ }
+ };
+ }
+ let cnt = std::cmp::min(self.buf.len(), read_buf.remaining());
+ read_buf.put_slice(&self.buf[..cnt]);
+ self.buf.advance(cnt);
+ let _ = self.recv_stream.flow_control().release_capacity(cnt);
+ Poll::Ready(Ok(()))
+ }
+}
+
+impl<B> Write for H2Upgraded<B>
+where
+ B: Buf,
+{
+ fn poll_write(
+ mut self: Pin<&mut Self>,
+ cx: &mut Context<'_>,
+ buf: &[u8],
+ ) -> Poll<Result<usize, std::io::Error>> {
+ if buf.is_empty() {
+ return Poll::Ready(Ok(0));
+ }
+ self.send_stream.reserve_capacity(buf.len());
+
+ // We ignore all errors returned by `poll_capacity` and `write`, as we
+ // will get the correct from `poll_reset` anyway.
+ let cnt = match ready!(self.send_stream.poll_capacity(cx)) {
+ None => Some(0),
+ Some(Ok(cnt)) => self
+ .send_stream
+ .write(&buf[..cnt], false)
+ .ok()
+ .map(|()| cnt),
+ Some(Err(_)) => None,
+ };
+
+ if let Some(cnt) = cnt {
+ return Poll::Ready(Ok(cnt));
+ }
+
+ Poll::Ready(Err(h2_to_io_error(
+ match ready!(self.send_stream.poll_reset(cx)) {
+ Ok(Reason::NO_ERROR) | Ok(Reason::CANCEL) | Ok(Reason::STREAM_CLOSED) => {
+ return Poll::Ready(Err(std::io::ErrorKind::BrokenPipe.into()))
+ }
+ Ok(reason) => reason.into(),
+ Err(e) => e,
+ },
+ )))
+ }
+
+ fn poll_flush(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll<Result<(), std::io::Error>> {
+ Poll::Ready(Ok(()))
+ }
+
+ fn poll_shutdown(
+ mut self: Pin<&mut Self>,
+ cx: &mut Context<'_>,
+ ) -> Poll<Result<(), std::io::Error>> {
+ if self.send_stream.write(&[], true).is_ok() {
+ return Poll::Ready(Ok(()));
+ }
+
+ Poll::Ready(Err(h2_to_io_error(
+ match ready!(self.send_stream.poll_reset(cx)) {
+ Ok(Reason::NO_ERROR) => return Poll::Ready(Ok(())),
+ Ok(Reason::CANCEL) | Ok(Reason::STREAM_CLOSED) => {
+ return Poll::Ready(Err(std::io::ErrorKind::BrokenPipe.into()))
+ }
+ Ok(reason) => reason.into(),
+ Err(e) => e,
+ },
+ )))
+ }
+}
+
+fn h2_to_io_error(e: h2::Error) -> std::io::Error {
+ if e.is_io() {
+ e.into_io().unwrap()
+ } else {
+ std::io::Error::new(std::io::ErrorKind::Other, e)
+ }
+}
+
+struct UpgradedSendStream<B>(SendStream<SendBuf<Neutered<B>>>);
+
+impl<B> UpgradedSendStream<B>
+where
+ B: Buf,
+{
+ unsafe fn new(inner: SendStream<SendBuf<B>>) -> Self {
+ assert_eq!(mem::size_of::<B>(), mem::size_of::<Neutered<B>>());
+ Self(mem::transmute(inner))
+ }
+
+ fn reserve_capacity(&mut self, cnt: usize) {
+ unsafe { self.as_inner_unchecked().reserve_capacity(cnt) }
+ }
+
+ fn poll_capacity(&mut self, cx: &mut Context<'_>) -> Poll<Option<Result<usize, h2::Error>>> {
+ unsafe { self.as_inner_unchecked().poll_capacity(cx) }
+ }
+
+ fn poll_reset(&mut self, cx: &mut Context<'_>) -> Poll<Result<h2::Reason, h2::Error>> {
+ unsafe { self.as_inner_unchecked().poll_reset(cx) }
+ }
+
+ fn write(&mut self, buf: &[u8], end_of_stream: bool) -> Result<(), std::io::Error> {
+ let send_buf = SendBuf::Cursor(Cursor::new(buf.into()));
+ unsafe {
+ self.as_inner_unchecked()
+ .send_data(send_buf, end_of_stream)
+ .map_err(h2_to_io_error)
+ }
+ }
+
+ unsafe fn as_inner_unchecked(&mut self) -> &mut SendStream<SendBuf<B>> {
+ &mut *(&mut self.0 as *mut _ as *mut _)
+ }
+}
+
+#[repr(transparent)]
+struct Neutered<B> {
+ _inner: B,
+ impossible: Impossible,
+}
+
+enum Impossible {}
+
+unsafe impl<B> Send for Neutered<B> {}
+
+impl<B> Buf for Neutered<B> {
+ fn remaining(&self) -> usize {
+ match self.impossible {}
+ }
+
+ fn chunk(&self) -> &[u8] {
+ match self.impossible {}
+ }
+
+ fn advance(&mut self, _cnt: usize) {
+ match self.impossible {}
+ }
+}
diff --git a/vendor/hyper/src/proto/h2/ping.rs b/vendor/hyper/src/proto/h2/ping.rs
new file mode 100644
index 00000000..749cf1b7
--- /dev/null
+++ b/vendor/hyper/src/proto/h2/ping.rs
@@ -0,0 +1,509 @@
+/// HTTP2 Ping usage
+///
+/// hyper uses HTTP2 pings for two purposes:
+///
+/// 1. Adaptive flow control using BDP
+/// 2. Connection keep-alive
+///
+/// Both cases are optional.
+///
+/// # BDP Algorithm
+///
+/// 1. When receiving a DATA frame, if a BDP ping isn't outstanding:
+/// 1a. Record current time.
+/// 1b. Send a BDP ping.
+/// 2. Increment the number of received bytes.
+/// 3. When the BDP ping ack is received:
+/// 3a. Record duration from sent time.
+/// 3b. Merge RTT with a running average.
+/// 3c. Calculate bdp as bytes/rtt.
+/// 3d. If bdp is over 2/3 max, set new max to bdp and update windows.
+use std::fmt;
+use std::future::Future;
+use std::pin::Pin;
+use std::sync::{Arc, Mutex};
+use std::task::{self, Poll};
+use std::time::{Duration, Instant};
+
+use h2::{Ping, PingPong};
+
+use crate::common::time::Time;
+use crate::rt::Sleep;
+
+type WindowSize = u32;
+
+pub(super) fn disabled() -> Recorder {
+ Recorder { shared: None }
+}
+
+pub(super) fn channel(ping_pong: PingPong, config: Config, __timer: Time) -> (Recorder, Ponger) {
+ debug_assert!(
+ config.is_enabled(),
+ "ping channel requires bdp or keep-alive config",
+ );
+
+ let bdp = config.bdp_initial_window.map(|wnd| Bdp {
+ bdp: wnd,
+ max_bandwidth: 0.0,
+ rtt: 0.0,
+ ping_delay: Duration::from_millis(100),
+ stable_count: 0,
+ });
+
+ let (bytes, next_bdp_at) = if bdp.is_some() {
+ (Some(0), Some(Instant::now()))
+ } else {
+ (None, None)
+ };
+
+ let keep_alive = config.keep_alive_interval.map(|interval| KeepAlive {
+ interval,
+ timeout: config.keep_alive_timeout,
+ while_idle: config.keep_alive_while_idle,
+ sleep: __timer.sleep(interval),
+ state: KeepAliveState::Init,
+ timer: __timer,
+ });
+
+ let last_read_at = keep_alive.as_ref().map(|_| Instant::now());
+
+ let shared = Arc::new(Mutex::new(Shared {
+ bytes,
+ last_read_at,
+ is_keep_alive_timed_out: false,
+ ping_pong,
+ ping_sent_at: None,
+ next_bdp_at,
+ }));
+
+ (
+ Recorder {
+ shared: Some(shared.clone()),
+ },
+ Ponger {
+ bdp,
+ keep_alive,
+ shared,
+ },
+ )
+}
+
+#[derive(Clone)]
+pub(super) struct Config {
+ pub(super) bdp_initial_window: Option<WindowSize>,
+ /// If no frames are received in this amount of time, a PING frame is sent.
+ pub(super) keep_alive_interval: Option<Duration>,
+ /// After sending a keepalive PING, the connection will be closed if
+ /// a pong is not received in this amount of time.
+ pub(super) keep_alive_timeout: Duration,
+ /// If true, sends pings even when there are no active streams.
+ pub(super) keep_alive_while_idle: bool,
+}
+
+#[derive(Clone)]
+pub(crate) struct Recorder {
+ shared: Option<Arc<Mutex<Shared>>>,
+}
+
+pub(super) struct Ponger {
+ bdp: Option<Bdp>,
+ keep_alive: Option<KeepAlive>,
+ shared: Arc<Mutex<Shared>>,
+}
+
+struct Shared {
+ ping_pong: PingPong,
+ ping_sent_at: Option<Instant>,
+
+ // bdp
+ /// If `Some`, bdp is enabled, and this tracks how many bytes have been
+ /// read during the current sample.
+ bytes: Option<usize>,
+ /// We delay a variable amount of time between BDP pings. This allows us
+ /// to send less pings as the bandwidth stabilizes.
+ next_bdp_at: Option<Instant>,
+
+ // keep-alive
+ /// If `Some`, keep-alive is enabled, and the Instant is how long ago
+ /// the connection read the last frame.
+ last_read_at: Option<Instant>,
+
+ is_keep_alive_timed_out: bool,
+}
+
+struct Bdp {
+ /// Current BDP in bytes
+ bdp: u32,
+ /// Largest bandwidth we've seen so far.
+ max_bandwidth: f64,
+ /// Round trip time in seconds
+ rtt: f64,
+ /// Delay the next ping by this amount.
+ ///
+ /// This will change depending on how stable the current bandwidth is.
+ ping_delay: Duration,
+ /// The count of ping round trips where BDP has stayed the same.
+ stable_count: u32,
+}
+
+struct KeepAlive {
+ /// If no frames are received in this amount of time, a PING frame is sent.
+ interval: Duration,
+ /// After sending a keepalive PING, the connection will be closed if
+ /// a pong is not received in this amount of time.
+ timeout: Duration,
+ /// If true, sends pings even when there are no active streams.
+ while_idle: bool,
+ state: KeepAliveState,
+ sleep: Pin<Box<dyn Sleep>>,
+ timer: Time,
+}
+
+enum KeepAliveState {
+ Init,
+ Scheduled(Instant),
+ PingSent,
+}
+
+pub(super) enum Ponged {
+ SizeUpdate(WindowSize),
+ KeepAliveTimedOut,
+}
+
+#[derive(Debug)]
+pub(super) struct KeepAliveTimedOut;
+
+// ===== impl Config =====
+
+impl Config {
+ pub(super) fn is_enabled(&self) -> bool {
+ self.bdp_initial_window.is_some() || self.keep_alive_interval.is_some()
+ }
+}
+
+// ===== impl Recorder =====
+
+impl Recorder {
+ pub(crate) fn record_data(&self, len: usize) {
+ let shared = if let Some(ref shared) = self.shared {
+ shared
+ } else {
+ return;
+ };
+
+ let mut locked = shared.lock().unwrap();
+
+ locked.update_last_read_at();
+
+ // are we ready to send another bdp ping?
+ // if not, we don't need to record bytes either
+
+ if let Some(ref next_bdp_at) = locked.next_bdp_at {
+ if Instant::now() < *next_bdp_at {
+ return;
+ } else {
+ locked.next_bdp_at = None;
+ }
+ }
+
+ if let Some(ref mut bytes) = locked.bytes {
+ *bytes += len;
+ } else {
+ // no need to send bdp ping if bdp is disabled
+ return;
+ }
+
+ if !locked.is_ping_sent() {
+ locked.send_ping();
+ }
+ }
+
+ pub(crate) fn record_non_data(&self) {
+ let shared = if let Some(ref shared) = self.shared {
+ shared
+ } else {
+ return;
+ };
+
+ let mut locked = shared.lock().unwrap();
+
+ locked.update_last_read_at();
+ }
+
+ /// If the incoming stream is already closed, convert self into
+ /// a disabled reporter.
+ #[cfg(feature = "client")]
+ pub(super) fn for_stream(self, stream: &h2::RecvStream) -> Self {
+ if stream.is_end_stream() {
+ disabled()
+ } else {
+ self
+ }
+ }
+
+ pub(super) fn ensure_not_timed_out(&self) -> crate::Result<()> {
+ if let Some(ref shared) = self.shared {
+ let locked = shared.lock().unwrap();
+ if locked.is_keep_alive_timed_out {
+ return Err(KeepAliveTimedOut.crate_error());
+ }
+ }
+
+ // else
+ Ok(())
+ }
+}
+
+// ===== impl Ponger =====
+
+impl Ponger {
+ pub(super) fn poll(&mut self, cx: &mut task::Context<'_>) -> Poll<Ponged> {
+ let now = Instant::now();
+ let mut locked = self.shared.lock().unwrap();
+ let is_idle = self.is_idle();
+
+ if let Some(ref mut ka) = self.keep_alive {
+ ka.maybe_schedule(is_idle, &locked);
+ ka.maybe_ping(cx, is_idle, &mut locked);
+ }
+
+ if !locked.is_ping_sent() {
+ // XXX: this doesn't register a waker...?
+ return Poll::Pending;
+ }
+
+ match locked.ping_pong.poll_pong(cx) {
+ Poll::Ready(Ok(_pong)) => {
+ let start = locked
+ .ping_sent_at
+ .expect("pong received implies ping_sent_at");
+ locked.ping_sent_at = None;
+ let rtt = now - start;
+ trace!("recv pong");
+
+ if let Some(ref mut ka) = self.keep_alive {
+ locked.update_last_read_at();
+ ka.maybe_schedule(is_idle, &locked);
+ ka.maybe_ping(cx, is_idle, &mut locked);
+ }
+
+ if let Some(ref mut bdp) = self.bdp {
+ let bytes = locked.bytes.expect("bdp enabled implies bytes");
+ locked.bytes = Some(0); // reset
+ trace!("received BDP ack; bytes = {}, rtt = {:?}", bytes, rtt);
+
+ let update = bdp.calculate(bytes, rtt);
+ locked.next_bdp_at = Some(now + bdp.ping_delay);
+ if let Some(update) = update {
+ return Poll::Ready(Ponged::SizeUpdate(update));
+ }
+ }
+ }
+ Poll::Ready(Err(_e)) => {
+ debug!("pong error: {}", _e);
+ }
+ Poll::Pending => {
+ if let Some(ref mut ka) = self.keep_alive {
+ if let Err(KeepAliveTimedOut) = ka.maybe_timeout(cx) {
+ self.keep_alive = None;
+ locked.is_keep_alive_timed_out = true;
+ return Poll::Ready(Ponged::KeepAliveTimedOut);
+ }
+ }
+ }
+ }
+
+ // XXX: this doesn't register a waker...?
+ Poll::Pending
+ }
+
+ fn is_idle(&self) -> bool {
+ Arc::strong_count(&self.shared) <= 2
+ }
+}
+
+// ===== impl Shared =====
+
+impl Shared {
+ fn send_ping(&mut self) {
+ match self.ping_pong.send_ping(Ping::opaque()) {
+ Ok(()) => {
+ self.ping_sent_at = Some(Instant::now());
+ trace!("sent ping");
+ }
+ Err(_err) => {
+ debug!("error sending ping: {}", _err);
+ }
+ }
+ }
+
+ fn is_ping_sent(&self) -> bool {
+ self.ping_sent_at.is_some()
+ }
+
+ fn update_last_read_at(&mut self) {
+ if self.last_read_at.is_some() {
+ self.last_read_at = Some(Instant::now());
+ }
+ }
+
+ fn last_read_at(&self) -> Instant {
+ self.last_read_at.expect("keep_alive expects last_read_at")
+ }
+}
+
+// ===== impl Bdp =====
+
+/// Any higher than this likely will be hitting the TCP flow control.
+const BDP_LIMIT: usize = 1024 * 1024 * 16;
+
+impl Bdp {
+ fn calculate(&mut self, bytes: usize, rtt: Duration) -> Option<WindowSize> {
+ // No need to do any math if we're at the limit.
+ if self.bdp as usize == BDP_LIMIT {
+ self.stabilize_delay();
+ return None;
+ }
+
+ // average the rtt
+ let rtt = seconds(rtt);
+ if self.rtt == 0.0 {
+ // First sample means rtt is first rtt.
+ self.rtt = rtt;
+ } else {
+ // Weigh this rtt as 1/8 for a moving average.
+ self.rtt += (rtt - self.rtt) * 0.125;
+ }
+
+ // calculate the current bandwidth
+ let bw = (bytes as f64) / (self.rtt * 1.5);
+ trace!("current bandwidth = {:.1}B/s", bw);
+
+ if bw < self.max_bandwidth {
+ // not a faster bandwidth, so don't update
+ self.stabilize_delay();
+ return None;
+ } else {
+ self.max_bandwidth = bw;
+ }
+
+ // if the current `bytes` sample is at least 2/3 the previous
+ // bdp, increase to double the current sample.
+ if bytes >= self.bdp as usize * 2 / 3 {
+ self.bdp = (bytes * 2).min(BDP_LIMIT) as WindowSize;
+ trace!("BDP increased to {}", self.bdp);
+
+ self.stable_count = 0;
+ self.ping_delay /= 2;
+ Some(self.bdp)
+ } else {
+ self.stabilize_delay();
+ None
+ }
+ }
+
+ fn stabilize_delay(&mut self) {
+ if self.ping_delay < Duration::from_secs(10) {
+ self.stable_count += 1;
+
+ if self.stable_count >= 2 {
+ self.ping_delay *= 4;
+ self.stable_count = 0;
+ }
+ }
+ }
+}
+
+fn seconds(dur: Duration) -> f64 {
+ const NANOS_PER_SEC: f64 = 1_000_000_000.0;
+ let secs = dur.as_secs() as f64;
+ secs + (dur.subsec_nanos() as f64) / NANOS_PER_SEC
+}
+
+// ===== impl KeepAlive =====
+
+impl KeepAlive {
+ fn maybe_schedule(&mut self, is_idle: bool, shared: &Shared) {
+ match self.state {
+ KeepAliveState::Init => {
+ if !self.while_idle && is_idle {
+ return;
+ }
+
+ self.schedule(shared);
+ }
+ KeepAliveState::PingSent => {
+ if shared.is_ping_sent() {
+ return;
+ }
+ self.schedule(shared);
+ }
+ KeepAliveState::Scheduled(..) => (),
+ }
+ }
+
+ fn schedule(&mut self, shared: &Shared) {
+ let interval = shared.last_read_at() + self.interval;
+ self.state = KeepAliveState::Scheduled(interval);
+ self.timer.reset(&mut self.sleep, interval);
+ }
+
+ fn maybe_ping(&mut self, cx: &mut task::Context<'_>, is_idle: bool, shared: &mut Shared) {
+ match self.state {
+ KeepAliveState::Scheduled(at) => {
+ if Pin::new(&mut self.sleep).poll(cx).is_pending() {
+ return;
+ }
+ // check if we've received a frame while we were scheduled
+ if shared.last_read_at() + self.interval > at {
+ self.state = KeepAliveState::Init;
+ cx.waker().wake_by_ref(); // schedule us again
+ return;
+ }
+ if !self.while_idle && is_idle {
+ trace!("keep-alive no need to ping when idle and while_idle=false");
+ return;
+ }
+ trace!("keep-alive interval ({:?}) reached", self.interval);
+ shared.send_ping();
+ self.state = KeepAliveState::PingSent;
+ let timeout = Instant::now() + self.timeout;
+ self.timer.reset(&mut self.sleep, timeout);
+ }
+ KeepAliveState::Init | KeepAliveState::PingSent => (),
+ }
+ }
+
+ fn maybe_timeout(&mut self, cx: &mut task::Context<'_>) -> Result<(), KeepAliveTimedOut> {
+ match self.state {
+ KeepAliveState::PingSent => {
+ if Pin::new(&mut self.sleep).poll(cx).is_pending() {
+ return Ok(());
+ }
+ trace!("keep-alive timeout ({:?}) reached", self.timeout);
+ Err(KeepAliveTimedOut)
+ }
+ KeepAliveState::Init | KeepAliveState::Scheduled(..) => Ok(()),
+ }
+ }
+}
+
+// ===== impl KeepAliveTimedOut =====
+
+impl KeepAliveTimedOut {
+ pub(super) fn crate_error(self) -> crate::Error {
+ crate::Error::new(crate::error::Kind::Http2).with(self)
+ }
+}
+
+impl fmt::Display for KeepAliveTimedOut {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.write_str("keep-alive timed out")
+ }
+}
+
+impl std::error::Error for KeepAliveTimedOut {
+ fn source(&self) -> Option<&(dyn std::error::Error + 'static)> {
+ Some(&crate::error::TimedOut)
+ }
+}
diff --git a/vendor/hyper/src/proto/h2/server.rs b/vendor/hyper/src/proto/h2/server.rs
new file mode 100644
index 00000000..a8a20dd6
--- /dev/null
+++ b/vendor/hyper/src/proto/h2/server.rs
@@ -0,0 +1,545 @@
+use std::error::Error as StdError;
+use std::future::Future;
+use std::pin::Pin;
+use std::task::{Context, Poll};
+use std::time::Duration;
+
+use bytes::Bytes;
+use futures_util::ready;
+use h2::server::{Connection, Handshake, SendResponse};
+use h2::{Reason, RecvStream};
+use http::{Method, Request};
+use pin_project_lite::pin_project;
+
+use super::{ping, PipeToSendStream, SendBuf};
+use crate::body::{Body, Incoming as IncomingBody};
+use crate::common::date;
+use crate::common::io::Compat;
+use crate::common::time::Time;
+use crate::ext::Protocol;
+use crate::headers;
+use crate::proto::h2::ping::Recorder;
+use crate::proto::h2::{H2Upgraded, UpgradedSendStream};
+use crate::proto::Dispatched;
+use crate::rt::bounds::Http2ServerConnExec;
+use crate::rt::{Read, Write};
+use crate::service::HttpService;
+
+use crate::upgrade::{OnUpgrade, Pending, Upgraded};
+use crate::Response;
+
+// Our defaults are chosen for the "majority" case, which usually are not
+// resource constrained, and so the spec default of 64kb can be too limiting
+// for performance.
+//
+// At the same time, a server more often has multiple clients connected, and
+// so is more likely to use more resources than a client would.
+const DEFAULT_CONN_WINDOW: u32 = 1024 * 1024; // 1mb
+const DEFAULT_STREAM_WINDOW: u32 = 1024 * 1024; // 1mb
+const DEFAULT_MAX_FRAME_SIZE: u32 = 1024 * 16; // 16kb
+const DEFAULT_MAX_SEND_BUF_SIZE: usize = 1024 * 400; // 400kb
+const DEFAULT_SETTINGS_MAX_HEADER_LIST_SIZE: u32 = 1024 * 16; // 16kb
+const DEFAULT_MAX_LOCAL_ERROR_RESET_STREAMS: usize = 1024;
+
+#[derive(Clone, Debug)]
+pub(crate) struct Config {
+ pub(crate) adaptive_window: bool,
+ pub(crate) initial_conn_window_size: u32,
+ pub(crate) initial_stream_window_size: u32,
+ pub(crate) max_frame_size: u32,
+ pub(crate) enable_connect_protocol: bool,
+ pub(crate) max_concurrent_streams: Option<u32>,
+ pub(crate) max_pending_accept_reset_streams: Option<usize>,
+ pub(crate) max_local_error_reset_streams: Option<usize>,
+ pub(crate) keep_alive_interval: Option<Duration>,
+ pub(crate) keep_alive_timeout: Duration,
+ pub(crate) max_send_buffer_size: usize,
+ pub(crate) max_header_list_size: u32,
+ pub(crate) date_header: bool,
+}
+
+impl Default for Config {
+ fn default() -> Config {
+ Config {
+ adaptive_window: false,
+ initial_conn_window_size: DEFAULT_CONN_WINDOW,
+ initial_stream_window_size: DEFAULT_STREAM_WINDOW,
+ max_frame_size: DEFAULT_MAX_FRAME_SIZE,
+ enable_connect_protocol: false,
+ max_concurrent_streams: Some(200),
+ max_pending_accept_reset_streams: None,
+ max_local_error_reset_streams: Some(DEFAULT_MAX_LOCAL_ERROR_RESET_STREAMS),
+ keep_alive_interval: None,
+ keep_alive_timeout: Duration::from_secs(20),
+ max_send_buffer_size: DEFAULT_MAX_SEND_BUF_SIZE,
+ max_header_list_size: DEFAULT_SETTINGS_MAX_HEADER_LIST_SIZE,
+ date_header: true,
+ }
+ }
+}
+
+pin_project! {
+ pub(crate) struct Server<T, S, B, E>
+ where
+ S: HttpService<IncomingBody>,
+ B: Body,
+ {
+ exec: E,
+ timer: Time,
+ service: S,
+ state: State<T, B>,
+ date_header: bool,
+ close_pending: bool
+ }
+}
+
+enum State<T, B>
+where
+ B: Body,
+{
+ Handshaking {
+ ping_config: ping::Config,
+ hs: Handshake<Compat<T>, SendBuf<B::Data>>,
+ },
+ Serving(Serving<T, B>),
+}
+
+struct Serving<T, B>
+where
+ B: Body,
+{
+ ping: Option<(ping::Recorder, ping::Ponger)>,
+ conn: Connection<Compat<T>, SendBuf<B::Data>>,
+ closing: Option<crate::Error>,
+ date_header: bool,
+}
+
+impl<T, S, B, E> Server<T, S, B, E>
+where
+ T: Read + Write + Unpin,
+ S: HttpService<IncomingBody, ResBody = B>,
+ S::Error: Into<Box<dyn StdError + Send + Sync>>,
+ B: Body + 'static,
+ E: Http2ServerConnExec<S::Future, B>,
+{
+ pub(crate) fn new(
+ io: T,
+ service: S,
+ config: &Config,
+ exec: E,
+ timer: Time,
+ ) -> Server<T, S, B, E> {
+ let mut builder = h2::server::Builder::default();
+ builder
+ .initial_window_size(config.initial_stream_window_size)
+ .initial_connection_window_size(config.initial_conn_window_size)
+ .max_frame_size(config.max_frame_size)
+ .max_header_list_size(config.max_header_list_size)
+ .max_local_error_reset_streams(config.max_local_error_reset_streams)
+ .max_send_buffer_size(config.max_send_buffer_size);
+ if let Some(max) = config.max_concurrent_streams {
+ builder.max_concurrent_streams(max);
+ }
+ if let Some(max) = config.max_pending_accept_reset_streams {
+ builder.max_pending_accept_reset_streams(max);
+ }
+ if config.enable_connect_protocol {
+ builder.enable_connect_protocol();
+ }
+ let handshake = builder.handshake(Compat::new(io));
+
+ let bdp = if config.adaptive_window {
+ Some(config.initial_stream_window_size)
+ } else {
+ None
+ };
+
+ let ping_config = ping::Config {
+ bdp_initial_window: bdp,
+ keep_alive_interval: config.keep_alive_interval,
+ keep_alive_timeout: config.keep_alive_timeout,
+ // If keep-alive is enabled for servers, always enabled while
+ // idle, so it can more aggressively close dead connections.
+ keep_alive_while_idle: true,
+ };
+
+ Server {
+ exec,
+ timer,
+ state: State::Handshaking {
+ ping_config,
+ hs: handshake,
+ },
+ service,
+ date_header: config.date_header,
+ close_pending: false,
+ }
+ }
+
+ pub(crate) fn graceful_shutdown(&mut self) {
+ trace!("graceful_shutdown");
+ match self.state {
+ State::Handshaking { .. } => {
+ self.close_pending = true;
+ }
+ State::Serving(ref mut srv) => {
+ if srv.closing.is_none() {
+ srv.conn.graceful_shutdown();
+ }
+ }
+ }
+ }
+}
+
+impl<T, S, B, E> Future for Server<T, S, B, E>
+where
+ T: Read + Write + Unpin,
+ S: HttpService<IncomingBody, ResBody = B>,
+ S::Error: Into<Box<dyn StdError + Send + Sync>>,
+ B: Body + 'static,
+ E: Http2ServerConnExec<S::Future, B>,
+{
+ type Output = crate::Result<Dispatched>;
+
+ fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
+ let me = &mut *self;
+ loop {
+ let next = match me.state {
+ State::Handshaking {
+ ref mut hs,
+ ref ping_config,
+ } => {
+ let mut conn = ready!(Pin::new(hs).poll(cx).map_err(crate::Error::new_h2))?;
+ let ping = if ping_config.is_enabled() {
+ let pp = conn.ping_pong().expect("conn.ping_pong");
+ Some(ping::channel(pp, ping_config.clone(), me.timer.clone()))
+ } else {
+ None
+ };
+ State::Serving(Serving {
+ ping,
+ conn,
+ closing: None,
+ date_header: me.date_header,
+ })
+ }
+ State::Serving(ref mut srv) => {
+ // graceful_shutdown was called before handshaking finished,
+ if me.close_pending && srv.closing.is_none() {
+ srv.conn.graceful_shutdown();
+ }
+ ready!(srv.poll_server(cx, &mut me.service, &mut me.exec))?;
+ return Poll::Ready(Ok(Dispatched::Shutdown));
+ }
+ };
+ me.state = next;
+ }
+ }
+}
+
+impl<T, B> Serving<T, B>
+where
+ T: Read + Write + Unpin,
+ B: Body + 'static,
+{
+ fn poll_server<S, E>(
+ &mut self,
+ cx: &mut Context<'_>,
+ service: &mut S,
+ exec: &mut E,
+ ) -> Poll<crate::Result<()>>
+ where
+ S: HttpService<IncomingBody, ResBody = B>,
+ S::Error: Into<Box<dyn StdError + Send + Sync>>,
+ E: Http2ServerConnExec<S::Future, B>,
+ {
+ if self.closing.is_none() {
+ loop {
+ self.poll_ping(cx);
+
+ match ready!(self.conn.poll_accept(cx)) {
+ Some(Ok((req, mut respond))) => {
+ trace!("incoming request");
+ let content_length = headers::content_length_parse_all(req.headers());
+ let ping = self
+ .ping
+ .as_ref()
+ .map(|ping| ping.0.clone())
+ .unwrap_or_else(ping::disabled);
+
+ // Record the headers received
+ ping.record_non_data();
+
+ let is_connect = req.method() == Method::CONNECT;
+ let (mut parts, stream) = req.into_parts();
+ let (mut req, connect_parts) = if !is_connect {
+ (
+ Request::from_parts(
+ parts,
+ IncomingBody::h2(stream, content_length.into(), ping),
+ ),
+ None,
+ )
+ } else {
+ if content_length.map_or(false, |len| len != 0) {
+ warn!("h2 connect request with non-zero body not supported");
+ respond.send_reset(h2::Reason::INTERNAL_ERROR);
+ return Poll::Ready(Ok(()));
+ }
+ let (pending, upgrade) = crate::upgrade::pending();
+ debug_assert!(parts.extensions.get::<OnUpgrade>().is_none());
+ parts.extensions.insert(upgrade);
+ (
+ Request::from_parts(parts, IncomingBody::empty()),
+ Some(ConnectParts {
+ pending,
+ ping,
+ recv_stream: stream,
+ }),
+ )
+ };
+
+ if let Some(protocol) = req.extensions_mut().remove::<h2::ext::Protocol>() {
+ req.extensions_mut().insert(Protocol::from_inner(protocol));
+ }
+
+ let fut = H2Stream::new(
+ service.call(req),
+ connect_parts,
+ respond,
+ self.date_header,
+ );
+
+ exec.execute_h2stream(fut);
+ }
+ Some(Err(e)) => {
+ return Poll::Ready(Err(crate::Error::new_h2(e)));
+ }
+ None => {
+ // no more incoming streams...
+ if let Some((ref ping, _)) = self.ping {
+ ping.ensure_not_timed_out()?;
+ }
+
+ trace!("incoming connection complete");
+ return Poll::Ready(Ok(()));
+ }
+ }
+ }
+ }
+
+ debug_assert!(
+ self.closing.is_some(),
+ "poll_server broke loop without closing"
+ );
+
+ ready!(self.conn.poll_closed(cx).map_err(crate::Error::new_h2))?;
+
+ Poll::Ready(Err(self.closing.take().expect("polled after error")))
+ }
+
+ fn poll_ping(&mut self, cx: &mut Context<'_>) {
+ if let Some((_, ref mut estimator)) = self.ping {
+ match estimator.poll(cx) {
+ Poll::Ready(ping::Ponged::SizeUpdate(wnd)) => {
+ self.conn.set_target_window_size(wnd);
+ let _ = self.conn.set_initial_window_size(wnd);
+ }
+ Poll::Ready(ping::Ponged::KeepAliveTimedOut) => {
+ debug!("keep-alive timed out, closing connection");
+ self.conn.abrupt_shutdown(h2::Reason::NO_ERROR);
+ }
+ Poll::Pending => {}
+ }
+ }
+ }
+}
+
+pin_project! {
+ #[allow(missing_debug_implementations)]
+ pub struct H2Stream<F, B>
+ where
+ B: Body,
+ {
+ reply: SendResponse<SendBuf<B::Data>>,
+ #[pin]
+ state: H2StreamState<F, B>,
+ date_header: bool,
+ }
+}
+
+pin_project! {
+ #[project = H2StreamStateProj]
+ enum H2StreamState<F, B>
+ where
+ B: Body,
+ {
+ Service {
+ #[pin]
+ fut: F,
+ connect_parts: Option<ConnectParts>,
+ },
+ Body {
+ #[pin]
+ pipe: PipeToSendStream<B>,
+ },
+ }
+}
+
+struct ConnectParts {
+ pending: Pending,
+ ping: Recorder,
+ recv_stream: RecvStream,
+}
+
+impl<F, B> H2Stream<F, B>
+where
+ B: Body,
+{
+ fn new(
+ fut: F,
+ connect_parts: Option<ConnectParts>,
+ respond: SendResponse<SendBuf<B::Data>>,
+ date_header: bool,
+ ) -> H2Stream<F, B> {
+ H2Stream {
+ reply: respond,
+ state: H2StreamState::Service { fut, connect_parts },
+ date_header,
+ }
+ }
+}
+
+macro_rules! reply {
+ ($me:expr, $res:expr, $eos:expr) => {{
+ match $me.reply.send_response($res, $eos) {
+ Ok(tx) => tx,
+ Err(e) => {
+ debug!("send response error: {}", e);
+ $me.reply.send_reset(Reason::INTERNAL_ERROR);
+ return Poll::Ready(Err(crate::Error::new_h2(e)));
+ }
+ }
+ }};
+}
+
+impl<F, B, E> H2Stream<F, B>
+where
+ F: Future<Output = Result<Response<B>, E>>,
+ B: Body,
+ B::Data: 'static,
+ B::Error: Into<Box<dyn StdError + Send + Sync>>,
+ E: Into<Box<dyn StdError + Send + Sync>>,
+{
+ fn poll2(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<crate::Result<()>> {
+ let mut me = self.project();
+ loop {
+ let next = match me.state.as_mut().project() {
+ H2StreamStateProj::Service {
+ fut: h,
+ connect_parts,
+ } => {
+ let res = match h.poll(cx) {
+ Poll::Ready(Ok(r)) => r,
+ Poll::Pending => {
+ // Response is not yet ready, so we want to check if the client has sent a
+ // RST_STREAM frame which would cancel the current request.
+ if let Poll::Ready(reason) =
+ me.reply.poll_reset(cx).map_err(crate::Error::new_h2)?
+ {
+ debug!("stream received RST_STREAM: {:?}", reason);
+ return Poll::Ready(Err(crate::Error::new_h2(reason.into())));
+ }
+ return Poll::Pending;
+ }
+ Poll::Ready(Err(e)) => {
+ let err = crate::Error::new_user_service(e);
+ warn!("http2 service errored: {}", err);
+ me.reply.send_reset(err.h2_reason());
+ return Poll::Ready(Err(err));
+ }
+ };
+
+ let (head, body) = res.into_parts();
+ let mut res = ::http::Response::from_parts(head, ());
+ super::strip_connection_headers(res.headers_mut(), false);
+
+ // set Date header if it isn't already set if instructed
+ if *me.date_header {
+ res.headers_mut()
+ .entry(::http::header::DATE)
+ .or_insert_with(date::update_and_header_value);
+ }
+
+ if let Some(connect_parts) = connect_parts.take() {
+ if res.status().is_success() {
+ if headers::content_length_parse_all(res.headers())
+ .map_or(false, |len| len != 0)
+ {
+ warn!("h2 successful response to CONNECT request with body not supported");
+ me.reply.send_reset(h2::Reason::INTERNAL_ERROR);
+ return Poll::Ready(Err(crate::Error::new_user_header()));
+ }
+ if res
+ .headers_mut()
+ .remove(::http::header::CONTENT_LENGTH)
+ .is_some()
+ {
+ warn!("successful response to CONNECT request disallows content-length header");
+ }
+ let send_stream = reply!(me, res, false);
+ connect_parts.pending.fulfill(Upgraded::new(
+ H2Upgraded {
+ ping: connect_parts.ping,
+ recv_stream: connect_parts.recv_stream,
+ send_stream: unsafe { UpgradedSendStream::new(send_stream) },
+ buf: Bytes::new(),
+ },
+ Bytes::new(),
+ ));
+ return Poll::Ready(Ok(()));
+ }
+ }
+
+ if !body.is_end_stream() {
+ // automatically set Content-Length from body...
+ if let Some(len) = body.size_hint().exact() {
+ headers::set_content_length_if_missing(res.headers_mut(), len);
+ }
+
+ let body_tx = reply!(me, res, false);
+ H2StreamState::Body {
+ pipe: PipeToSendStream::new(body, body_tx),
+ }
+ } else {
+ reply!(me, res, true);
+ return Poll::Ready(Ok(()));
+ }
+ }
+ H2StreamStateProj::Body { pipe } => {
+ return pipe.poll(cx);
+ }
+ };
+ me.state.set(next);
+ }
+ }
+}
+
+impl<F, B, E> Future for H2Stream<F, B>
+where
+ F: Future<Output = Result<Response<B>, E>>,
+ B: Body,
+ B::Data: 'static,
+ B::Error: Into<Box<dyn StdError + Send + Sync>>,
+ E: Into<Box<dyn StdError + Send + Sync>>,
+{
+ type Output = ();
+
+ fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
+ self.poll2(cx).map(|res| {
+ if let Err(_e) = res {
+ debug!("stream error: {}", _e);
+ }
+ })
+ }
+}
diff --git a/vendor/hyper/src/proto/mod.rs b/vendor/hyper/src/proto/mod.rs
new file mode 100644
index 00000000..fcdf2b97
--- /dev/null
+++ b/vendor/hyper/src/proto/mod.rs
@@ -0,0 +1,73 @@
+//! Pieces pertaining to the HTTP message protocol.
+
+cfg_feature! {
+ #![feature = "http1"]
+
+ pub(crate) mod h1;
+
+ pub(crate) use self::h1::Conn;
+
+ #[cfg(feature = "client")]
+ pub(crate) use self::h1::dispatch;
+ #[cfg(feature = "server")]
+ pub(crate) use self::h1::ServerTransaction;
+}
+
+#[cfg(feature = "http2")]
+pub(crate) mod h2;
+
+/// An Incoming Message head. Includes request/status line, and headers.
+#[cfg(feature = "http1")]
+#[derive(Debug, Default)]
+pub(crate) struct MessageHead<S> {
+ /// HTTP version of the message.
+ pub(crate) version: http::Version,
+ /// Subject (request line or status line) of Incoming message.
+ pub(crate) subject: S,
+ /// Headers of the Incoming message.
+ pub(crate) headers: http::HeaderMap,
+ /// Extensions.
+ extensions: http::Extensions,
+}
+
+/// An incoming request message.
+#[cfg(feature = "http1")]
+pub(crate) type RequestHead = MessageHead<RequestLine>;
+
+#[derive(Debug, Default, PartialEq)]
+#[cfg(feature = "http1")]
+pub(crate) struct RequestLine(pub(crate) http::Method, pub(crate) http::Uri);
+
+/// An incoming response message.
+#[cfg(all(feature = "http1", feature = "client"))]
+pub(crate) type ResponseHead = MessageHead<http::StatusCode>;
+
+#[derive(Debug)]
+#[cfg(feature = "http1")]
+pub(crate) enum BodyLength {
+ /// Content-Length
+ Known(u64),
+ /// Transfer-Encoding: chunked (if h1)
+ Unknown,
+}
+
+/// Status of when a Dispatcher future completes.
+pub(crate) enum Dispatched {
+ /// Dispatcher completely shutdown connection.
+ Shutdown,
+ /// Dispatcher has pending upgrade, and so did not shutdown.
+ #[cfg(feature = "http1")]
+ Upgrade(crate::upgrade::Pending),
+}
+
+#[cfg(all(feature = "client", feature = "http1"))]
+impl MessageHead<http::StatusCode> {
+ fn into_response<B>(self, body: B) -> http::Response<B> {
+ let mut res = http::Response::new(body);
+ *res.status_mut() = self.subject;
+ *res.headers_mut() = self.headers;
+ *res.version_mut() = self.version;
+ *res.extensions_mut() = self.extensions;
+ res
+ }
+}
diff --git a/vendor/hyper/src/rt/bounds.rs b/vendor/hyper/src/rt/bounds.rs
new file mode 100644
index 00000000..aa3075e0
--- /dev/null
+++ b/vendor/hyper/src/rt/bounds.rs
@@ -0,0 +1,109 @@
+//! Trait aliases
+//!
+//! Traits in this module ease setting bounds and usually automatically
+//! implemented by implementing another trait.
+
+#[cfg(all(feature = "server", feature = "http2"))]
+pub use self::h2::Http2ServerConnExec;
+
+#[cfg(all(feature = "client", feature = "http2"))]
+pub use self::h2_client::Http2ClientConnExec;
+
+#[cfg(all(feature = "client", feature = "http2"))]
+#[cfg_attr(docsrs, doc(cfg(all(feature = "client", feature = "http2"))))]
+mod h2_client {
+ use std::{error::Error, future::Future};
+
+ use crate::rt::{Read, Write};
+ use crate::{proto::h2::client::H2ClientFuture, rt::Executor};
+
+ /// An executor to spawn http2 futures for the client.
+ ///
+ /// This trait is implemented for any type that implements [`Executor`]
+ /// trait for any future.
+ ///
+ /// This trait is sealed and cannot be implemented for types outside this crate.
+ ///
+ /// [`Executor`]: crate::rt::Executor
+ pub trait Http2ClientConnExec<B, T>: sealed_client::Sealed<(B, T)>
+ where
+ B: http_body::Body,
+ B::Error: Into<Box<dyn Error + Send + Sync>>,
+ T: Read + Write + Unpin,
+ {
+ #[doc(hidden)]
+ fn execute_h2_future(&mut self, future: H2ClientFuture<B, T>);
+ }
+
+ impl<E, B, T> Http2ClientConnExec<B, T> for E
+ where
+ E: Executor<H2ClientFuture<B, T>>,
+ B: http_body::Body + 'static,
+ B::Error: Into<Box<dyn Error + Send + Sync>>,
+ H2ClientFuture<B, T>: Future<Output = ()>,
+ T: Read + Write + Unpin,
+ {
+ fn execute_h2_future(&mut self, future: H2ClientFuture<B, T>) {
+ self.execute(future)
+ }
+ }
+
+ impl<E, B, T> sealed_client::Sealed<(B, T)> for E
+ where
+ E: Executor<H2ClientFuture<B, T>>,
+ B: http_body::Body + 'static,
+ B::Error: Into<Box<dyn Error + Send + Sync>>,
+ H2ClientFuture<B, T>: Future<Output = ()>,
+ T: Read + Write + Unpin,
+ {
+ }
+
+ mod sealed_client {
+ pub trait Sealed<X> {}
+ }
+}
+
+#[cfg(all(feature = "server", feature = "http2"))]
+#[cfg_attr(docsrs, doc(cfg(all(feature = "server", feature = "http2"))))]
+mod h2 {
+ use crate::{proto::h2::server::H2Stream, rt::Executor};
+ use http_body::Body;
+ use std::future::Future;
+
+ /// An executor to spawn http2 connections.
+ ///
+ /// This trait is implemented for any type that implements [`Executor`]
+ /// trait for any future.
+ ///
+ /// This trait is sealed and cannot be implemented for types outside this crate.
+ ///
+ /// [`Executor`]: crate::rt::Executor
+ pub trait Http2ServerConnExec<F, B: Body>: sealed::Sealed<(F, B)> + Clone {
+ #[doc(hidden)]
+ fn execute_h2stream(&mut self, fut: H2Stream<F, B>);
+ }
+
+ #[doc(hidden)]
+ impl<E, F, B> Http2ServerConnExec<F, B> for E
+ where
+ E: Executor<H2Stream<F, B>> + Clone,
+ H2Stream<F, B>: Future<Output = ()>,
+ B: Body,
+ {
+ fn execute_h2stream(&mut self, fut: H2Stream<F, B>) {
+ self.execute(fut)
+ }
+ }
+
+ impl<E, F, B> sealed::Sealed<(F, B)> for E
+ where
+ E: Executor<H2Stream<F, B>> + Clone,
+ H2Stream<F, B>: Future<Output = ()>,
+ B: Body,
+ {
+ }
+
+ mod sealed {
+ pub trait Sealed<T> {}
+ }
+}
diff --git a/vendor/hyper/src/rt/io.rs b/vendor/hyper/src/rt/io.rs
new file mode 100644
index 00000000..ed4af092
--- /dev/null
+++ b/vendor/hyper/src/rt/io.rs
@@ -0,0 +1,405 @@
+use std::fmt;
+use std::mem::MaybeUninit;
+use std::ops::DerefMut;
+use std::pin::Pin;
+use std::task::{Context, Poll};
+
+// New IO traits? What?! Why, are you bonkers?
+//
+// I mean, yes, probably. But, here's the goals:
+//
+// 1. Supports poll-based IO operations.
+// 2. Opt-in vectored IO.
+// 3. Can use an optional buffer pool.
+// 4. Able to add completion-based (uring) IO eventually.
+//
+// Frankly, the last point is the entire reason we're doing this. We want to
+// have forwards-compatibility with an eventually stable io-uring runtime. We
+// don't need that to work right away. But it must be possible to add in here
+// without breaking hyper 1.0.
+//
+// While in here, if there's small tweaks to poll_read or poll_write that would
+// allow even the "slow" path to be faster, such as if someone didn't remember
+// to forward along an `is_completion` call.
+
+/// Reads bytes from a source.
+///
+/// This trait is similar to `std::io::Read`, but supports asynchronous reads.
+pub trait Read {
+ /// Attempts to read bytes into the `buf`.
+ ///
+ /// On success, returns `Poll::Ready(Ok(()))` and places data in the
+ /// unfilled portion of `buf`. If no data was read (`buf.remaining()` is
+ /// unchanged), it implies that EOF has been reached.
+ ///
+ /// If no data is available for reading, the method returns `Poll::Pending`
+ /// and arranges for the current task (via `cx.waker()`) to receive a
+ /// notification when the object becomes readable or is closed.
+ fn poll_read(
+ self: Pin<&mut Self>,
+ cx: &mut Context<'_>,
+ buf: ReadBufCursor<'_>,
+ ) -> Poll<Result<(), std::io::Error>>;
+}
+
+/// Write bytes asynchronously.
+///
+/// This trait is similar to `std::io::Write`, but for asynchronous writes.
+pub trait Write {
+ /// Attempt to write bytes from `buf` into the destination.
+ ///
+ /// On success, returns `Poll::Ready(Ok(num_bytes_written)))`. If
+ /// successful, it must be guaranteed that `n <= buf.len()`. A return value
+ /// of `0` means that the underlying object is no longer able to accept
+ /// bytes, or that the provided buffer is empty.
+ ///
+ /// If the object is not ready for writing, the method returns
+ /// `Poll::Pending` and arranges for the current task (via `cx.waker()`) to
+ /// receive a notification when the object becomes writable or is closed.
+ fn poll_write(
+ self: Pin<&mut Self>,
+ cx: &mut Context<'_>,
+ buf: &[u8],
+ ) -> Poll<Result<usize, std::io::Error>>;
+
+ /// Attempts to flush the object.
+ ///
+ /// On success, returns `Poll::Ready(Ok(()))`.
+ ///
+ /// If flushing cannot immediately complete, this method returns
+ /// `Poll::Pending` and arranges for the current task (via `cx.waker()`) to
+ /// receive a notification when the object can make progress.
+ fn poll_flush(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<(), std::io::Error>>;
+
+ /// Attempts to shut down this writer.
+ fn poll_shutdown(
+ self: Pin<&mut Self>,
+ cx: &mut Context<'_>,
+ ) -> Poll<Result<(), std::io::Error>>;
+
+ /// Returns whether this writer has an efficient `poll_write_vectored`
+ /// implementation.
+ ///
+ /// The default implementation returns `false`.
+ fn is_write_vectored(&self) -> bool {
+ false
+ }
+
+ /// Like `poll_write`, except that it writes from a slice of buffers.
+ fn poll_write_vectored(
+ self: Pin<&mut Self>,
+ cx: &mut Context<'_>,
+ bufs: &[std::io::IoSlice<'_>],
+ ) -> Poll<Result<usize, std::io::Error>> {
+ let buf = bufs
+ .iter()
+ .find(|b| !b.is_empty())
+ .map_or(&[][..], |b| &**b);
+ self.poll_write(cx, buf)
+ }
+}
+
+/// A wrapper around a byte buffer that is incrementally filled and initialized.
+///
+/// This type is a sort of "double cursor". It tracks three regions in the
+/// buffer: a region at the beginning of the buffer that has been logically
+/// filled with data, a region that has been initialized at some point but not
+/// yet logically filled, and a region at the end that may be uninitialized.
+/// The filled region is guaranteed to be a subset of the initialized region.
+///
+/// In summary, the contents of the buffer can be visualized as:
+///
+/// ```not_rust
+/// [ capacity ]
+/// [ filled | unfilled ]
+/// [ initialized | uninitialized ]
+/// ```
+///
+/// It is undefined behavior to de-initialize any bytes from the uninitialized
+/// region, since it is merely unknown whether this region is uninitialized or
+/// not, and if part of it turns out to be initialized, it must stay initialized.
+pub struct ReadBuf<'a> {
+ raw: &'a mut [MaybeUninit<u8>],
+ filled: usize,
+ init: usize,
+}
+
+/// The cursor part of a [`ReadBuf`].
+///
+/// This is created by calling `ReadBuf::unfilled()`.
+#[derive(Debug)]
+pub struct ReadBufCursor<'a> {
+ buf: &'a mut ReadBuf<'a>,
+}
+
+impl<'data> ReadBuf<'data> {
+ /// Create a new `ReadBuf` with a slice of initialized bytes.
+ #[inline]
+ pub fn new(raw: &'data mut [u8]) -> Self {
+ let len = raw.len();
+ Self {
+ // SAFETY: We never de-init the bytes ourselves.
+ raw: unsafe { &mut *(raw as *mut [u8] as *mut [MaybeUninit<u8>]) },
+ filled: 0,
+ init: len,
+ }
+ }
+
+ /// Create a new `ReadBuf` with a slice of uninitialized bytes.
+ #[inline]
+ pub fn uninit(raw: &'data mut [MaybeUninit<u8>]) -> Self {
+ Self {
+ raw,
+ filled: 0,
+ init: 0,
+ }
+ }
+
+ /// Get a slice of the buffer that has been filled in with bytes.
+ #[inline]
+ pub fn filled(&self) -> &[u8] {
+ // SAFETY: We only slice the filled part of the buffer, which is always valid
+ unsafe { &*(&self.raw[0..self.filled] as *const [MaybeUninit<u8>] as *const [u8]) }
+ }
+
+ /// Get a cursor to the unfilled portion of the buffer.
+ #[inline]
+ pub fn unfilled<'cursor>(&'cursor mut self) -> ReadBufCursor<'cursor> {
+ ReadBufCursor {
+ // SAFETY: self.buf is never re-assigned, so its safe to narrow
+ // the lifetime.
+ buf: unsafe {
+ std::mem::transmute::<&'cursor mut ReadBuf<'data>, &'cursor mut ReadBuf<'cursor>>(
+ self,
+ )
+ },
+ }
+ }
+
+ #[inline]
+ #[cfg(all(any(feature = "client", feature = "server"), feature = "http2"))]
+ pub(crate) unsafe fn set_init(&mut self, n: usize) {
+ self.init = self.init.max(n);
+ }
+
+ #[inline]
+ #[cfg(all(any(feature = "client", feature = "server"), feature = "http2"))]
+ pub(crate) unsafe fn set_filled(&mut self, n: usize) {
+ self.filled = self.filled.max(n);
+ }
+
+ #[inline]
+ #[cfg(all(any(feature = "client", feature = "server"), feature = "http2"))]
+ pub(crate) fn len(&self) -> usize {
+ self.filled
+ }
+
+ #[inline]
+ #[cfg(all(any(feature = "client", feature = "server"), feature = "http2"))]
+ pub(crate) fn init_len(&self) -> usize {
+ self.init
+ }
+
+ #[inline]
+ fn remaining(&self) -> usize {
+ self.capacity() - self.filled
+ }
+
+ #[inline]
+ fn capacity(&self) -> usize {
+ self.raw.len()
+ }
+}
+
+impl fmt::Debug for ReadBuf<'_> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_struct("ReadBuf")
+ .field("filled", &self.filled)
+ .field("init", &self.init)
+ .field("capacity", &self.capacity())
+ .finish()
+ }
+}
+
+impl ReadBufCursor<'_> {
+ /// Access the unfilled part of the buffer.
+ ///
+ /// # Safety
+ ///
+ /// The caller must not uninitialize any bytes that may have been
+ /// initialized before.
+ #[inline]
+ pub unsafe fn as_mut(&mut self) -> &mut [MaybeUninit<u8>] {
+ &mut self.buf.raw[self.buf.filled..]
+ }
+
+ /// Advance the `filled` cursor by `n` bytes.
+ ///
+ /// # Safety
+ ///
+ /// The caller must take care that `n` more bytes have been initialized.
+ #[inline]
+ pub unsafe fn advance(&mut self, n: usize) {
+ self.buf.filled = self.buf.filled.checked_add(n).expect("overflow");
+ self.buf.init = self.buf.filled.max(self.buf.init);
+ }
+
+ /// Returns the number of bytes that can be written from the current
+ /// position until the end of the buffer is reached.
+ ///
+ /// This value is equal to the length of the slice returned by `as_mut()``.
+ #[inline]
+ pub fn remaining(&self) -> usize {
+ self.buf.remaining()
+ }
+
+ /// Transfer bytes into `self`` from `src` and advance the cursor
+ /// by the number of bytes written.
+ ///
+ /// # Panics
+ ///
+ /// `self` must have enough remaining capacity to contain all of `src`.
+ #[inline]
+ pub fn put_slice(&mut self, src: &[u8]) {
+ assert!(
+ self.buf.remaining() >= src.len(),
+ "src.len() must fit in remaining()"
+ );
+
+ let amt = src.len();
+ // Cannot overflow, asserted above
+ let end = self.buf.filled + amt;
+
+ // Safety: the length is asserted above
+ unsafe {
+ self.buf.raw[self.buf.filled..end]
+ .as_mut_ptr()
+ .cast::<u8>()
+ .copy_from_nonoverlapping(src.as_ptr(), amt);
+ }
+
+ if self.buf.init < end {
+ self.buf.init = end;
+ }
+ self.buf.filled = end;
+ }
+}
+
+macro_rules! deref_async_read {
+ () => {
+ fn poll_read(
+ mut self: Pin<&mut Self>,
+ cx: &mut Context<'_>,
+ buf: ReadBufCursor<'_>,
+ ) -> Poll<std::io::Result<()>> {
+ Pin::new(&mut **self).poll_read(cx, buf)
+ }
+ };
+}
+
+impl<T: ?Sized + Read + Unpin> Read for Box<T> {
+ deref_async_read!();
+}
+
+impl<T: ?Sized + Read + Unpin> Read for &mut T {
+ deref_async_read!();
+}
+
+impl<P> Read for Pin<P>
+where
+ P: DerefMut,
+ P::Target: Read,
+{
+ fn poll_read(
+ self: Pin<&mut Self>,
+ cx: &mut Context<'_>,
+ buf: ReadBufCursor<'_>,
+ ) -> Poll<std::io::Result<()>> {
+ pin_as_deref_mut(self).poll_read(cx, buf)
+ }
+}
+
+macro_rules! deref_async_write {
+ () => {
+ fn poll_write(
+ mut self: Pin<&mut Self>,
+ cx: &mut Context<'_>,
+ buf: &[u8],
+ ) -> Poll<std::io::Result<usize>> {
+ Pin::new(&mut **self).poll_write(cx, buf)
+ }
+
+ fn poll_write_vectored(
+ mut self: Pin<&mut Self>,
+ cx: &mut Context<'_>,
+ bufs: &[std::io::IoSlice<'_>],
+ ) -> Poll<std::io::Result<usize>> {
+ Pin::new(&mut **self).poll_write_vectored(cx, bufs)
+ }
+
+ fn is_write_vectored(&self) -> bool {
+ (**self).is_write_vectored()
+ }
+
+ fn poll_flush(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<std::io::Result<()>> {
+ Pin::new(&mut **self).poll_flush(cx)
+ }
+
+ fn poll_shutdown(
+ mut self: Pin<&mut Self>,
+ cx: &mut Context<'_>,
+ ) -> Poll<std::io::Result<()>> {
+ Pin::new(&mut **self).poll_shutdown(cx)
+ }
+ };
+}
+
+impl<T: ?Sized + Write + Unpin> Write for Box<T> {
+ deref_async_write!();
+}
+
+impl<T: ?Sized + Write + Unpin> Write for &mut T {
+ deref_async_write!();
+}
+
+impl<P> Write for Pin<P>
+where
+ P: DerefMut,
+ P::Target: Write,
+{
+ fn poll_write(
+ self: Pin<&mut Self>,
+ cx: &mut Context<'_>,
+ buf: &[u8],
+ ) -> Poll<std::io::Result<usize>> {
+ pin_as_deref_mut(self).poll_write(cx, buf)
+ }
+
+ fn poll_write_vectored(
+ self: Pin<&mut Self>,
+ cx: &mut Context<'_>,
+ bufs: &[std::io::IoSlice<'_>],
+ ) -> Poll<std::io::Result<usize>> {
+ pin_as_deref_mut(self).poll_write_vectored(cx, bufs)
+ }
+
+ fn is_write_vectored(&self) -> bool {
+ (**self).is_write_vectored()
+ }
+
+ fn poll_flush(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<std::io::Result<()>> {
+ pin_as_deref_mut(self).poll_flush(cx)
+ }
+
+ fn poll_shutdown(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<std::io::Result<()>> {
+ pin_as_deref_mut(self).poll_shutdown(cx)
+ }
+}
+
+/// Polyfill for Pin::as_deref_mut()
+/// TODO: use Pin::as_deref_mut() instead once stabilized
+fn pin_as_deref_mut<P: DerefMut>(pin: Pin<&mut Pin<P>>) -> Pin<&mut P::Target> {
+ // SAFETY: we go directly from Pin<&mut Pin<P>> to Pin<&mut P::Target>, without moving or
+ // giving out the &mut Pin<P> in the process. See Pin::as_deref_mut() for more detail.
+ unsafe { pin.get_unchecked_mut() }.as_mut()
+}
diff --git a/vendor/hyper/src/rt/mod.rs b/vendor/hyper/src/rt/mod.rs
new file mode 100644
index 00000000..de67c3fc
--- /dev/null
+++ b/vendor/hyper/src/rt/mod.rs
@@ -0,0 +1,42 @@
+//! Runtime components
+//!
+//! The traits and types within this module are used to allow plugging in
+//! runtime types. These include:
+//!
+//! - Executors
+//! - Timers
+//! - IO transports
+
+pub mod bounds;
+mod io;
+mod timer;
+
+pub use self::io::{Read, ReadBuf, ReadBufCursor, Write};
+pub use self::timer::{Sleep, Timer};
+
+/// An executor of futures.
+///
+/// This trait allows Hyper to abstract over async runtimes. Implement this trait for your own type.
+///
+/// # Example
+///
+/// ```
+/// # use hyper::rt::Executor;
+/// # use std::future::Future;
+/// #[derive(Clone)]
+/// struct TokioExecutor;
+///
+/// impl<F> Executor<F> for TokioExecutor
+/// where
+/// F: Future + Send + 'static,
+/// F::Output: Send + 'static,
+/// {
+/// fn execute(&self, future: F) {
+/// tokio::spawn(future);
+/// }
+/// }
+/// ```
+pub trait Executor<Fut> {
+ /// Place the future into the executor to be run.
+ fn execute(&self, fut: Fut);
+}
diff --git a/vendor/hyper/src/rt/timer.rs b/vendor/hyper/src/rt/timer.rs
new file mode 100644
index 00000000..c6a6f1db
--- /dev/null
+++ b/vendor/hyper/src/rt/timer.rs
@@ -0,0 +1,127 @@
+//! Provides a timer trait with timer-like functions
+//!
+//! Example using tokio timer:
+//! ```rust
+//! use std::{
+//! future::Future,
+//! pin::Pin,
+//! task::{Context, Poll},
+//! time::{Duration, Instant},
+//! };
+//!
+//! use pin_project_lite::pin_project;
+//! use hyper::rt::{Timer, Sleep};
+//!
+//! #[derive(Clone, Debug)]
+//! pub struct TokioTimer;
+//!
+//! impl Timer for TokioTimer {
+//! fn sleep(&self, duration: Duration) -> Pin<Box<dyn Sleep>> {
+//! Box::pin(TokioSleep {
+//! inner: tokio::time::sleep(duration),
+//! })
+//! }
+//!
+//! fn sleep_until(&self, deadline: Instant) -> Pin<Box<dyn Sleep>> {
+//! Box::pin(TokioSleep {
+//! inner: tokio::time::sleep_until(deadline.into()),
+//! })
+//! }
+//!
+//! fn reset(&self, sleep: &mut Pin<Box<dyn Sleep>>, new_deadline: Instant) {
+//! if let Some(sleep) = sleep.as_mut().downcast_mut_pin::<TokioSleep>() {
+//! sleep.reset(new_deadline.into())
+//! }
+//! }
+//! }
+//!
+//! pin_project! {
+//! pub(crate) struct TokioSleep {
+//! #[pin]
+//! pub(crate) inner: tokio::time::Sleep,
+//! }
+//! }
+//!
+//! impl Future for TokioSleep {
+//! type Output = ();
+//!
+//! fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
+//! self.project().inner.poll(cx)
+//! }
+//! }
+//!
+//! impl Sleep for TokioSleep {}
+//!
+//! impl TokioSleep {
+//! pub fn reset(self: Pin<&mut Self>, deadline: Instant) {
+//! self.project().inner.as_mut().reset(deadline.into());
+//! }
+//! }
+//! ```
+
+use std::{
+ any::TypeId,
+ future::Future,
+ pin::Pin,
+ time::{Duration, Instant},
+};
+
+/// A timer which provides timer-like functions.
+pub trait Timer {
+ /// Return a future that resolves in `duration` time.
+ fn sleep(&self, duration: Duration) -> Pin<Box<dyn Sleep>>;
+
+ /// Return a future that resolves at `deadline`.
+ fn sleep_until(&self, deadline: Instant) -> Pin<Box<dyn Sleep>>;
+
+ /// Reset a future to resolve at `new_deadline` instead.
+ fn reset(&self, sleep: &mut Pin<Box<dyn Sleep>>, new_deadline: Instant) {
+ *sleep = self.sleep_until(new_deadline);
+ }
+}
+
+/// A future returned by a `Timer`.
+pub trait Sleep: Send + Sync + Future<Output = ()> {
+ #[doc(hidden)]
+ /// This method is private and can not be implemented by downstream crate
+ fn __type_id(&self, _: private::Sealed) -> TypeId
+ where
+ Self: 'static,
+ {
+ TypeId::of::<Self>()
+ }
+}
+
+impl dyn Sleep {
+ //! This is a re-implementation of downcast methods from std::any::Any
+
+ /// Check whether the type is the same as `T`
+ pub fn is<T>(&self) -> bool
+ where
+ T: Sleep + 'static,
+ {
+ self.__type_id(private::Sealed {}) == TypeId::of::<T>()
+ }
+
+ /// Downcast a pinned &mut Sleep object to its original type
+ pub fn downcast_mut_pin<T>(self: Pin<&mut Self>) -> Option<Pin<&mut T>>
+ where
+ T: Sleep + 'static,
+ {
+ if self.is::<T>() {
+ unsafe {
+ let inner = Pin::into_inner_unchecked(self);
+ Some(Pin::new_unchecked(
+ &mut *(&mut *inner as *mut dyn Sleep as *mut T),
+ ))
+ }
+ } else {
+ None
+ }
+ }
+}
+
+mod private {
+ #![allow(missing_debug_implementations)]
+ pub struct Sealed {}
+}
diff --git a/vendor/hyper/src/server/conn/http1.rs b/vendor/hyper/src/server/conn/http1.rs
new file mode 100644
index 00000000..af703018
--- /dev/null
+++ b/vendor/hyper/src/server/conn/http1.rs
@@ -0,0 +1,544 @@
+//! HTTP/1 Server Connections
+
+use std::error::Error as StdError;
+use std::fmt;
+use std::future::Future;
+use std::pin::Pin;
+use std::sync::Arc;
+use std::task::{Context, Poll};
+use std::time::Duration;
+
+use crate::rt::{Read, Write};
+use crate::upgrade::Upgraded;
+use bytes::Bytes;
+use futures_util::ready;
+
+use crate::body::{Body, Incoming as IncomingBody};
+use crate::proto;
+use crate::service::HttpService;
+use crate::{
+ common::time::{Dur, Time},
+ rt::Timer,
+};
+
+type Http1Dispatcher<T, B, S> = proto::h1::Dispatcher<
+ proto::h1::dispatch::Server<S, IncomingBody>,
+ B,
+ T,
+ proto::ServerTransaction,
+>;
+
+pin_project_lite::pin_project! {
+ /// A [`Future`](core::future::Future) representing an HTTP/1 connection, bound to a
+ /// [`Service`](crate::service::Service), returned from
+ /// [`Builder::serve_connection`](struct.Builder.html#method.serve_connection).
+ ///
+ /// To drive HTTP on this connection this future **must be polled**, typically with
+ /// `.await`. If it isn't polled, no progress will be made on this connection.
+ #[must_use = "futures do nothing unless polled"]
+ pub struct Connection<T, S>
+ where
+ S: HttpService<IncomingBody>,
+ {
+ conn: Http1Dispatcher<T, S::ResBody, S>,
+ }
+}
+
+/// A configuration builder for HTTP/1 server connections.
+///
+/// **Note**: The default values of options are *not considered stable*. They
+/// are subject to change at any time.
+///
+/// # Example
+///
+/// ```
+/// # use std::time::Duration;
+/// # use hyper::server::conn::http1::Builder;
+/// # fn main() {
+/// let mut http = Builder::new();
+/// // Set options one at a time
+/// http.half_close(false);
+///
+/// // Or, chain multiple options
+/// http.keep_alive(false).title_case_headers(true).max_buf_size(8192);
+///
+/// # }
+/// ```
+///
+/// Use [`Builder::serve_connection`](struct.Builder.html#method.serve_connection)
+/// to bind the built connection to a service.
+#[derive(Clone, Debug)]
+pub struct Builder {
+ h1_parser_config: httparse::ParserConfig,
+ timer: Time,
+ h1_half_close: bool,
+ h1_keep_alive: bool,
+ h1_title_case_headers: bool,
+ h1_preserve_header_case: bool,
+ h1_max_headers: Option<usize>,
+ h1_header_read_timeout: Dur,
+ h1_writev: Option<bool>,
+ max_buf_size: Option<usize>,
+ pipeline_flush: bool,
+ date_header: bool,
+}
+
+/// Deconstructed parts of a `Connection`.
+///
+/// This allows taking apart a `Connection` at a later time, in order to
+/// reclaim the IO object, and additional related pieces.
+#[derive(Debug)]
+#[non_exhaustive]
+pub struct Parts<T, S> {
+ /// The original IO object used in the handshake.
+ pub io: T,
+ /// A buffer of bytes that have been read but not processed as HTTP.
+ ///
+ /// If the client sent additional bytes after its last request, and
+ /// this connection "ended" with an upgrade, the read buffer will contain
+ /// those bytes.
+ ///
+ /// You will want to check for any existing bytes if you plan to continue
+ /// communicating on the IO object.
+ pub read_buf: Bytes,
+ /// The `Service` used to serve this connection.
+ pub service: S,
+}
+
+// ===== impl Connection =====
+
+impl<I, S> fmt::Debug for Connection<I, S>
+where
+ S: HttpService<IncomingBody>,
+{
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_struct("Connection").finish()
+ }
+}
+
+impl<I, B, S> Connection<I, S>
+where
+ S: HttpService<IncomingBody, ResBody = B>,
+ S::Error: Into<Box<dyn StdError + Send + Sync>>,
+ I: Read + Write + Unpin,
+ B: Body + 'static,
+ B::Error: Into<Box<dyn StdError + Send + Sync>>,
+{
+ /// Start a graceful shutdown process for this connection.
+ ///
+ /// This `Connection` should continue to be polled until shutdown
+ /// can finish.
+ ///
+ /// # Note
+ ///
+ /// This should only be called while the `Connection` future is still
+ /// pending. If called after `Connection::poll` has resolved, this does
+ /// nothing.
+ pub fn graceful_shutdown(mut self: Pin<&mut Self>) {
+ self.conn.disable_keep_alive();
+ }
+
+ /// Return the inner IO object, and additional information.
+ ///
+ /// If the IO object has been "rewound" the io will not contain those bytes rewound.
+ /// This should only be called after `poll_without_shutdown` signals
+ /// that the connection is "done". Otherwise, it may not have finished
+ /// flushing all necessary HTTP bytes.
+ ///
+ /// # Panics
+ /// This method will panic if this connection is using an h2 protocol.
+ pub fn into_parts(self) -> Parts<I, S> {
+ let (io, read_buf, dispatch) = self.conn.into_inner();
+ Parts {
+ io,
+ read_buf,
+ service: dispatch.into_service(),
+ }
+ }
+
+ /// Poll the connection for completion, but without calling `shutdown`
+ /// on the underlying IO.
+ ///
+ /// This is useful to allow running a connection while doing an HTTP
+ /// upgrade. Once the upgrade is completed, the connection would be "done",
+ /// but it is not desired to actually shutdown the IO object. Instead you
+ /// would take it back using `into_parts`.
+ pub fn poll_without_shutdown(&mut self, cx: &mut Context<'_>) -> Poll<crate::Result<()>>
+ where
+ S: Unpin,
+ S::Future: Unpin,
+ {
+ self.conn.poll_without_shutdown(cx)
+ }
+
+ /// Prevent shutdown of the underlying IO object at the end of service the request,
+ /// instead run `into_parts`. This is a convenience wrapper over `poll_without_shutdown`.
+ ///
+ /// # Error
+ ///
+ /// This errors if the underlying connection protocol is not HTTP/1.
+ pub fn without_shutdown(self) -> impl Future<Output = crate::Result<Parts<I, S>>> {
+ let mut zelf = Some(self);
+ futures_util::future::poll_fn(move |cx| {
+ ready!(zelf.as_mut().unwrap().conn.poll_without_shutdown(cx))?;
+ Poll::Ready(Ok(zelf.take().unwrap().into_parts()))
+ })
+ }
+
+ /// Enable this connection to support higher-level HTTP upgrades.
+ ///
+ /// See [the `upgrade` module](crate::upgrade) for more.
+ pub fn with_upgrades(self) -> UpgradeableConnection<I, S>
+ where
+ I: Send,
+ {
+ UpgradeableConnection { inner: Some(self) }
+ }
+}
+
+impl<I, B, S> Future for Connection<I, S>
+where
+ S: HttpService<IncomingBody, ResBody = B>,
+ S::Error: Into<Box<dyn StdError + Send + Sync>>,
+ I: Read + Write + Unpin,
+ B: Body + 'static,
+ B::Error: Into<Box<dyn StdError + Send + Sync>>,
+{
+ type Output = crate::Result<()>;
+
+ fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
+ match ready!(Pin::new(&mut self.conn).poll(cx)) {
+ Ok(done) => {
+ match done {
+ proto::Dispatched::Shutdown => {}
+ proto::Dispatched::Upgrade(pending) => {
+ // With no `Send` bound on `I`, we can't try to do
+ // upgrades here. In case a user was trying to use
+ // `Body::on_upgrade` with this API, send a special
+ // error letting them know about that.
+ pending.manual();
+ }
+ };
+ Poll::Ready(Ok(()))
+ }
+ Err(e) => Poll::Ready(Err(e)),
+ }
+ }
+}
+
+// ===== impl Builder =====
+
+impl Builder {
+ /// Create a new connection builder.
+ pub fn new() -> Self {
+ Self {
+ h1_parser_config: Default::default(),
+ timer: Time::Empty,
+ h1_half_close: false,
+ h1_keep_alive: true,
+ h1_title_case_headers: false,
+ h1_preserve_header_case: false,
+ h1_max_headers: None,
+ h1_header_read_timeout: Dur::Default(Some(Duration::from_secs(30))),
+ h1_writev: None,
+ max_buf_size: None,
+ pipeline_flush: false,
+ date_header: true,
+ }
+ }
+ /// Set whether HTTP/1 connections should support half-closures.
+ ///
+ /// Clients can chose to shutdown their write-side while waiting
+ /// for the server to respond. Setting this to `true` will
+ /// prevent closing the connection immediately if `read`
+ /// detects an EOF in the middle of a request.
+ ///
+ /// Default is `false`.
+ pub fn half_close(&mut self, val: bool) -> &mut Self {
+ self.h1_half_close = val;
+ self
+ }
+
+ /// Enables or disables HTTP/1 keep-alive.
+ ///
+ /// Default is true.
+ pub fn keep_alive(&mut self, val: bool) -> &mut Self {
+ self.h1_keep_alive = val;
+ self
+ }
+
+ /// Set whether HTTP/1 connections will write header names as title case at
+ /// the socket level.
+ ///
+ /// Default is false.
+ pub fn title_case_headers(&mut self, enabled: bool) -> &mut Self {
+ self.h1_title_case_headers = enabled;
+ self
+ }
+
+ /// Set whether HTTP/1 connections will silently ignored malformed header lines.
+ ///
+ /// If this is enabled and a header line does not start with a valid header
+ /// name, or does not include a colon at all, the line will be silently ignored
+ /// and no error will be reported.
+ ///
+ /// Default is false.
+ pub fn ignore_invalid_headers(&mut self, enabled: bool) -> &mut Builder {
+ self.h1_parser_config
+ .ignore_invalid_headers_in_requests(enabled);
+ self
+ }
+
+ /// Set whether to support preserving original header cases.
+ ///
+ /// Currently, this will record the original cases received, and store them
+ /// in a private extension on the `Request`. It will also look for and use
+ /// such an extension in any provided `Response`.
+ ///
+ /// Since the relevant extension is still private, there is no way to
+ /// interact with the original cases. The only effect this can have now is
+ /// to forward the cases in a proxy-like fashion.
+ ///
+ /// Default is false.
+ pub fn preserve_header_case(&mut self, enabled: bool) -> &mut Self {
+ self.h1_preserve_header_case = enabled;
+ self
+ }
+
+ /// Set the maximum number of headers.
+ ///
+ /// When a request is received, the parser will reserve a buffer to store headers for optimal
+ /// performance.
+ ///
+ /// If server receives more headers than the buffer size, it responds to the client with
+ /// "431 Request Header Fields Too Large".
+ ///
+ /// Note that headers is allocated on the stack by default, which has higher performance. After
+ /// setting this value, headers will be allocated in heap memory, that is, heap memory
+ /// allocation will occur for each request, and there will be a performance drop of about 5%.
+ ///
+ /// Default is 100.
+ pub fn max_headers(&mut self, val: usize) -> &mut Self {
+ self.h1_max_headers = Some(val);
+ self
+ }
+
+ /// Set a timeout for reading client request headers. If a client does not
+ /// transmit the entire header within this time, the connection is closed.
+ ///
+ /// Requires a [`Timer`] set by [`Builder::timer`] to take effect. Panics if `header_read_timeout` is configured
+ /// without a [`Timer`].
+ ///
+ /// Pass `None` to disable.
+ ///
+ /// Default is 30 seconds.
+ pub fn header_read_timeout(&mut self, read_timeout: impl Into<Option<Duration>>) -> &mut Self {
+ self.h1_header_read_timeout = Dur::Configured(read_timeout.into());
+ self
+ }
+
+ /// Set whether HTTP/1 connections should try to use vectored writes,
+ /// or always flatten into a single buffer.
+ ///
+ /// Note that setting this to false may mean more copies of body data,
+ /// but may also improve performance when an IO transport doesn't
+ /// support vectored writes well, such as most TLS implementations.
+ ///
+ /// Setting this to true will force hyper to use queued strategy
+ /// which may eliminate unnecessary cloning on some TLS backends
+ ///
+ /// Default is `auto`. In this mode hyper will try to guess which
+ /// mode to use
+ pub fn writev(&mut self, val: bool) -> &mut Self {
+ self.h1_writev = Some(val);
+ self
+ }
+
+ /// Set the maximum buffer size for the connection.
+ ///
+ /// Default is ~400kb.
+ ///
+ /// # Panics
+ ///
+ /// The minimum value allowed is 8192. This method panics if the passed `max` is less than the minimum.
+ pub fn max_buf_size(&mut self, max: usize) -> &mut Self {
+ assert!(
+ max >= proto::h1::MINIMUM_MAX_BUFFER_SIZE,
+ "the max_buf_size cannot be smaller than the minimum that h1 specifies."
+ );
+ self.max_buf_size = Some(max);
+ self
+ }
+
+ /// Set whether the `date` header should be included in HTTP responses.
+ ///
+ /// Note that including the `date` header is recommended by RFC 7231.
+ ///
+ /// Default is true.
+ pub fn auto_date_header(&mut self, enabled: bool) -> &mut Self {
+ self.date_header = enabled;
+ self
+ }
+
+ /// Aggregates flushes to better support pipelined responses.
+ ///
+ /// Experimental, may have bugs.
+ ///
+ /// Default is false.
+ pub fn pipeline_flush(&mut self, enabled: bool) -> &mut Self {
+ self.pipeline_flush = enabled;
+ self
+ }
+
+ /// Set the timer used in background tasks.
+ pub fn timer<M>(&mut self, timer: M) -> &mut Self
+ where
+ M: Timer + Send + Sync + 'static,
+ {
+ self.timer = Time::Timer(Arc::new(timer));
+ self
+ }
+
+ /// Bind a connection together with a [`Service`](crate::service::Service).
+ ///
+ /// This returns a Future that must be polled in order for HTTP to be
+ /// driven on the connection.
+ ///
+ /// # Panics
+ ///
+ /// If a timeout option has been configured, but a `timer` has not been
+ /// provided, calling `serve_connection` will panic.
+ ///
+ /// # Example
+ ///
+ /// ```
+ /// # use hyper::{body::Incoming, Request, Response};
+ /// # use hyper::service::Service;
+ /// # use hyper::server::conn::http1::Builder;
+ /// # use hyper::rt::{Read, Write};
+ /// # async fn run<I, S>(some_io: I, some_service: S)
+ /// # where
+ /// # I: Read + Write + Unpin + Send + 'static,
+ /// # S: Service<hyper::Request<Incoming>, Response=hyper::Response<Incoming>> + Send + 'static,
+ /// # S::Error: Into<Box<dyn std::error::Error + Send + Sync>>,
+ /// # S::Future: Send,
+ /// # {
+ /// let http = Builder::new();
+ /// let conn = http.serve_connection(some_io, some_service);
+ ///
+ /// if let Err(e) = conn.await {
+ /// eprintln!("server connection error: {}", e);
+ /// }
+ /// # }
+ /// # fn main() {}
+ /// ```
+ pub fn serve_connection<I, S>(&self, io: I, service: S) -> Connection<I, S>
+ where
+ S: HttpService<IncomingBody>,
+ S::Error: Into<Box<dyn StdError + Send + Sync>>,
+ S::ResBody: 'static,
+ <S::ResBody as Body>::Error: Into<Box<dyn StdError + Send + Sync>>,
+ I: Read + Write + Unpin,
+ {
+ let mut conn = proto::Conn::new(io);
+ conn.set_h1_parser_config(self.h1_parser_config.clone());
+ conn.set_timer(self.timer.clone());
+ if !self.h1_keep_alive {
+ conn.disable_keep_alive();
+ }
+ if self.h1_half_close {
+ conn.set_allow_half_close();
+ }
+ if self.h1_title_case_headers {
+ conn.set_title_case_headers();
+ }
+ if self.h1_preserve_header_case {
+ conn.set_preserve_header_case();
+ }
+ if let Some(max_headers) = self.h1_max_headers {
+ conn.set_http1_max_headers(max_headers);
+ }
+ if let Some(dur) = self
+ .timer
+ .check(self.h1_header_read_timeout, "header_read_timeout")
+ {
+ conn.set_http1_header_read_timeout(dur);
+ };
+ if let Some(writev) = self.h1_writev {
+ if writev {
+ conn.set_write_strategy_queue();
+ } else {
+ conn.set_write_strategy_flatten();
+ }
+ }
+ conn.set_flush_pipeline(self.pipeline_flush);
+ if let Some(max) = self.max_buf_size {
+ conn.set_max_buf_size(max);
+ }
+ if !self.date_header {
+ conn.disable_date_header();
+ }
+ let sd = proto::h1::dispatch::Server::new(service);
+ let proto = proto::h1::Dispatcher::new(sd, conn);
+ Connection { conn: proto }
+ }
+}
+
+/// A future binding a connection with a Service with Upgrade support.
+#[must_use = "futures do nothing unless polled"]
+#[allow(missing_debug_implementations)]
+pub struct UpgradeableConnection<T, S>
+where
+ S: HttpService<IncomingBody>,
+{
+ pub(super) inner: Option<Connection<T, S>>,
+}
+
+impl<I, B, S> UpgradeableConnection<I, S>
+where
+ S: HttpService<IncomingBody, ResBody = B>,
+ S::Error: Into<Box<dyn StdError + Send + Sync>>,
+ I: Read + Write + Unpin,
+ B: Body + 'static,
+ B::Error: Into<Box<dyn StdError + Send + Sync>>,
+{
+ /// Start a graceful shutdown process for this connection.
+ ///
+ /// This `Connection` should continue to be polled until shutdown
+ /// can finish.
+ pub fn graceful_shutdown(mut self: Pin<&mut Self>) {
+ // Connection (`inner`) is `None` if it was upgraded (and `poll` is `Ready`).
+ // In that case, we don't need to call `graceful_shutdown`.
+ if let Some(conn) = self.inner.as_mut() {
+ Pin::new(conn).graceful_shutdown()
+ }
+ }
+}
+
+impl<I, B, S> Future for UpgradeableConnection<I, S>
+where
+ S: HttpService<IncomingBody, ResBody = B>,
+ S::Error: Into<Box<dyn StdError + Send + Sync>>,
+ I: Read + Write + Unpin + Send + 'static,
+ B: Body + 'static,
+ B::Error: Into<Box<dyn StdError + Send + Sync>>,
+{
+ type Output = crate::Result<()>;
+
+ fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
+ if let Some(conn) = self.inner.as_mut() {
+ match ready!(Pin::new(&mut conn.conn).poll(cx)) {
+ Ok(proto::Dispatched::Shutdown) => Poll::Ready(Ok(())),
+ Ok(proto::Dispatched::Upgrade(pending)) => {
+ let (io, buf, _) = self.inner.take().unwrap().conn.into_inner();
+ pending.fulfill(Upgraded::new(io, buf));
+ Poll::Ready(Ok(()))
+ }
+ Err(e) => Poll::Ready(Err(e)),
+ }
+ } else {
+ // inner is `None`, meaning the connection was upgraded, thus it's `Poll::Ready(Ok(()))`
+ Poll::Ready(Ok(()))
+ }
+ }
+}
diff --git a/vendor/hyper/src/server/conn/http2.rs b/vendor/hyper/src/server/conn/http2.rs
new file mode 100644
index 00000000..e0d61c13
--- /dev/null
+++ b/vendor/hyper/src/server/conn/http2.rs
@@ -0,0 +1,312 @@
+//! HTTP/2 Server Connections
+
+use std::error::Error as StdError;
+use std::fmt;
+use std::future::Future;
+use std::pin::Pin;
+use std::sync::Arc;
+use std::task::{Context, Poll};
+use std::time::Duration;
+
+use crate::rt::{Read, Write};
+use futures_util::ready;
+use pin_project_lite::pin_project;
+
+use crate::body::{Body, Incoming as IncomingBody};
+use crate::proto;
+use crate::rt::bounds::Http2ServerConnExec;
+use crate::service::HttpService;
+use crate::{common::time::Time, rt::Timer};
+
+pin_project! {
+ /// A [`Future`](core::future::Future) representing an HTTP/2 connection, bound to a
+ /// [`Service`](crate::service::Service), returned from
+ /// [`Builder::serve_connection`](struct.Builder.html#method.serve_connection).
+ ///
+ /// To drive HTTP on this connection this future **must be polled**, typically with
+ /// `.await`. If it isn't polled, no progress will be made on this connection.
+ #[must_use = "futures do nothing unless polled"]
+ pub struct Connection<T, S, E>
+ where
+ S: HttpService<IncomingBody>,
+ {
+ conn: proto::h2::Server<T, S, S::ResBody, E>,
+ }
+}
+
+/// A configuration builder for HTTP/2 server connections.
+///
+/// **Note**: The default values of options are *not considered stable*. They
+/// are subject to change at any time.
+#[derive(Clone, Debug)]
+pub struct Builder<E> {
+ exec: E,
+ timer: Time,
+ h2_builder: proto::h2::server::Config,
+}
+
+// ===== impl Connection =====
+
+impl<I, S, E> fmt::Debug for Connection<I, S, E>
+where
+ S: HttpService<IncomingBody>,
+{
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_struct("Connection").finish()
+ }
+}
+
+impl<I, B, S, E> Connection<I, S, E>
+where
+ S: HttpService<IncomingBody, ResBody = B>,
+ S::Error: Into<Box<dyn StdError + Send + Sync>>,
+ I: Read + Write + Unpin,
+ B: Body + 'static,
+ B::Error: Into<Box<dyn StdError + Send + Sync>>,
+ E: Http2ServerConnExec<S::Future, B>,
+{
+ /// Start a graceful shutdown process for this connection.
+ ///
+ /// This `Connection` should continue to be polled until shutdown
+ /// can finish.
+ ///
+ /// # Note
+ ///
+ /// This should only be called while the `Connection` future is still
+ /// pending. If called after `Connection::poll` has resolved, this does
+ /// nothing.
+ pub fn graceful_shutdown(mut self: Pin<&mut Self>) {
+ self.conn.graceful_shutdown();
+ }
+}
+
+impl<I, B, S, E> Future for Connection<I, S, E>
+where
+ S: HttpService<IncomingBody, ResBody = B>,
+ S::Error: Into<Box<dyn StdError + Send + Sync>>,
+ I: Read + Write + Unpin,
+ B: Body + 'static,
+ B::Error: Into<Box<dyn StdError + Send + Sync>>,
+ E: Http2ServerConnExec<S::Future, B>,
+{
+ type Output = crate::Result<()>;
+
+ fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
+ match ready!(Pin::new(&mut self.conn).poll(cx)) {
+ Ok(_done) => {
+ //TODO: the proto::h2::Server no longer needs to return
+ //the Dispatched enum
+ Poll::Ready(Ok(()))
+ }
+ Err(e) => Poll::Ready(Err(e)),
+ }
+ }
+}
+
+// ===== impl Builder =====
+
+impl<E> Builder<E> {
+ /// Create a new connection builder.
+ ///
+ /// This starts with the default options, and an executor which is a type
+ /// that implements [`Http2ServerConnExec`] trait.
+ ///
+ /// [`Http2ServerConnExec`]: crate::rt::bounds::Http2ServerConnExec
+ pub fn new(exec: E) -> Self {
+ Self {
+ exec,
+ timer: Time::Empty,
+ h2_builder: Default::default(),
+ }
+ }
+
+ /// Configures the maximum number of pending reset streams allowed before a GOAWAY will be sent.
+ ///
+ /// This will default to the default value set by the [`h2` crate](https://crates.io/crates/h2).
+ /// As of v0.4.0, it is 20.
+ ///
+ /// See <https://github.com/hyperium/hyper/issues/2877> for more information.
+ pub fn max_pending_accept_reset_streams(&mut self, max: impl Into<Option<usize>>) -> &mut Self {
+ self.h2_builder.max_pending_accept_reset_streams = max.into();
+ self
+ }
+
+ /// Configures the maximum number of local reset streams allowed before a GOAWAY will be sent.
+ ///
+ /// If not set, hyper will use a default, currently of 1024.
+ ///
+ /// If `None` is supplied, hyper will not apply any limit.
+ /// This is not advised, as it can potentially expose servers to DOS vulnerabilities.
+ ///
+ /// See <https://rustsec.org/advisories/RUSTSEC-2024-0003.html> for more information.
+ #[cfg(feature = "http2")]
+ #[cfg_attr(docsrs, doc(cfg(feature = "http2")))]
+ pub fn max_local_error_reset_streams(&mut self, max: impl Into<Option<usize>>) -> &mut Self {
+ self.h2_builder.max_local_error_reset_streams = max.into();
+ self
+ }
+
+ /// Sets the [`SETTINGS_INITIAL_WINDOW_SIZE`][spec] option for HTTP2
+ /// stream-level flow control.
+ ///
+ /// Passing `None` will do nothing.
+ ///
+ /// If not set, hyper will use a default.
+ ///
+ /// [spec]: https://httpwg.org/specs/rfc9113.html#SETTINGS_INITIAL_WINDOW_SIZE
+ pub fn initial_stream_window_size(&mut self, sz: impl Into<Option<u32>>) -> &mut Self {
+ if let Some(sz) = sz.into() {
+ self.h2_builder.adaptive_window = false;
+ self.h2_builder.initial_stream_window_size = sz;
+ }
+ self
+ }
+
+ /// Sets the max connection-level flow control for HTTP2.
+ ///
+ /// Passing `None` will do nothing.
+ ///
+ /// If not set, hyper will use a default.
+ pub fn initial_connection_window_size(&mut self, sz: impl Into<Option<u32>>) -> &mut Self {
+ if let Some(sz) = sz.into() {
+ self.h2_builder.adaptive_window = false;
+ self.h2_builder.initial_conn_window_size = sz;
+ }
+ self
+ }
+
+ /// Sets whether to use an adaptive flow control.
+ ///
+ /// Enabling this will override the limits set in
+ /// `initial_stream_window_size` and
+ /// `initial_connection_window_size`.
+ pub fn adaptive_window(&mut self, enabled: bool) -> &mut Self {
+ use proto::h2::SPEC_WINDOW_SIZE;
+
+ self.h2_builder.adaptive_window = enabled;
+ if enabled {
+ self.h2_builder.initial_conn_window_size = SPEC_WINDOW_SIZE;
+ self.h2_builder.initial_stream_window_size = SPEC_WINDOW_SIZE;
+ }
+ self
+ }
+
+ /// Sets the maximum frame size to use for HTTP2.
+ ///
+ /// Passing `None` will do nothing.
+ ///
+ /// If not set, hyper will use a default.
+ pub fn max_frame_size(&mut self, sz: impl Into<Option<u32>>) -> &mut Self {
+ if let Some(sz) = sz.into() {
+ self.h2_builder.max_frame_size = sz;
+ }
+ self
+ }
+
+ /// Sets the [`SETTINGS_MAX_CONCURRENT_STREAMS`][spec] option for HTTP2
+ /// connections.
+ ///
+ /// Default is 200, but not part of the stability of hyper. It could change
+ /// in a future release. You are encouraged to set your own limit.
+ ///
+ /// Passing `None` will remove any limit.
+ ///
+ /// [spec]: https://httpwg.org/specs/rfc9113.html#SETTINGS_MAX_CONCURRENT_STREAMS
+ pub fn max_concurrent_streams(&mut self, max: impl Into<Option<u32>>) -> &mut Self {
+ self.h2_builder.max_concurrent_streams = max.into();
+ self
+ }
+
+ /// Sets an interval for HTTP2 Ping frames should be sent to keep a
+ /// connection alive.
+ ///
+ /// Pass `None` to disable HTTP2 keep-alive.
+ ///
+ /// Default is currently disabled.
+ pub fn keep_alive_interval(&mut self, interval: impl Into<Option<Duration>>) -> &mut Self {
+ self.h2_builder.keep_alive_interval = interval.into();
+ self
+ }
+
+ /// Sets a timeout for receiving an acknowledgement of the keep-alive ping.
+ ///
+ /// If the ping is not acknowledged within the timeout, the connection will
+ /// be closed. Does nothing if `keep_alive_interval` is disabled.
+ ///
+ /// Default is 20 seconds.
+ pub fn keep_alive_timeout(&mut self, timeout: Duration) -> &mut Self {
+ self.h2_builder.keep_alive_timeout = timeout;
+ self
+ }
+
+ /// Set the maximum write buffer size for each HTTP/2 stream.
+ ///
+ /// Default is currently ~400KB, but may change.
+ ///
+ /// # Panics
+ ///
+ /// The value must be no larger than `u32::MAX`.
+ pub fn max_send_buf_size(&mut self, max: usize) -> &mut Self {
+ assert!(max <= u32::MAX as usize);
+ self.h2_builder.max_send_buffer_size = max;
+ self
+ }
+
+ /// Enables the [extended CONNECT protocol].
+ ///
+ /// [extended CONNECT protocol]: https://datatracker.ietf.org/doc/html/rfc8441#section-4
+ pub fn enable_connect_protocol(&mut self) -> &mut Self {
+ self.h2_builder.enable_connect_protocol = true;
+ self
+ }
+
+ /// Sets the max size of received header frames.
+ ///
+ /// Default is currently 16KB, but can change.
+ pub fn max_header_list_size(&mut self, max: u32) -> &mut Self {
+ self.h2_builder.max_header_list_size = max;
+ self
+ }
+
+ /// Set the timer used in background tasks.
+ pub fn timer<M>(&mut self, timer: M) -> &mut Self
+ where
+ M: Timer + Send + Sync + 'static,
+ {
+ self.timer = Time::Timer(Arc::new(timer));
+ self
+ }
+
+ /// Set whether the `date` header should be included in HTTP responses.
+ ///
+ /// Note that including the `date` header is recommended by RFC 7231.
+ ///
+ /// Default is true.
+ pub fn auto_date_header(&mut self, enabled: bool) -> &mut Self {
+ self.h2_builder.date_header = enabled;
+ self
+ }
+
+ /// Bind a connection together with a [`Service`](crate::service::Service).
+ ///
+ /// This returns a Future that must be polled in order for HTTP to be
+ /// driven on the connection.
+ pub fn serve_connection<S, I, Bd>(&self, io: I, service: S) -> Connection<I, S, E>
+ where
+ S: HttpService<IncomingBody, ResBody = Bd>,
+ S::Error: Into<Box<dyn StdError + Send + Sync>>,
+ Bd: Body + 'static,
+ Bd::Error: Into<Box<dyn StdError + Send + Sync>>,
+ I: Read + Write + Unpin,
+ E: Http2ServerConnExec<S::Future, Bd>,
+ {
+ let proto = proto::h2::Server::new(
+ io,
+ service,
+ &self.h2_builder,
+ self.exec.clone(),
+ self.timer.clone(),
+ );
+ Connection { conn: proto }
+ }
+}
diff --git a/vendor/hyper/src/server/conn/mod.rs b/vendor/hyper/src/server/conn/mod.rs
new file mode 100644
index 00000000..54b309e8
--- /dev/null
+++ b/vendor/hyper/src/server/conn/mod.rs
@@ -0,0 +1,20 @@
+//! Server connection API.
+//!
+//! The types in this module are to provide a lower-level API based around a
+//! single connection. Accepting a connection and binding it with a service
+//! are not handled at this level. This module provides the building blocks to
+//! customize those things externally.
+//!
+//! This module is split by HTTP version, providing a connection builder for
+//! each. They work similarly, but they each have specific options.
+//!
+//! If your server needs to support both versions, an auto-connection builder is
+//! provided in the [`hyper-util`](https://github.com/hyperium/hyper-util/tree/master)
+//! crate. This builder wraps the HTTP/1 and HTTP/2 connection builders from this
+//! module, allowing you to set configuration for both. The builder will then check
+//! the version of the incoming connection and serve it accordingly.
+
+#[cfg(feature = "http1")]
+pub mod http1;
+#[cfg(feature = "http2")]
+pub mod http2;
diff --git a/vendor/hyper/src/server/mod.rs b/vendor/hyper/src/server/mod.rs
new file mode 100644
index 00000000..980553e5
--- /dev/null
+++ b/vendor/hyper/src/server/mod.rs
@@ -0,0 +1,9 @@
+//! HTTP Server
+//!
+//! A "server" is usually created by listening on a port for new connections,
+//! parse HTTP requests, and hand them off to a `Service`.
+//!
+//! How exactly you choose to listen for connections is not something hyper
+//! concerns itself with. After you have a connection, you can handle HTTP over
+//! it with the types in the [`conn`] module.
+pub mod conn;
diff --git a/vendor/hyper/src/service/http.rs b/vendor/hyper/src/service/http.rs
new file mode 100644
index 00000000..dd174316
--- /dev/null
+++ b/vendor/hyper/src/service/http.rs
@@ -0,0 +1,52 @@
+use std::error::Error as StdError;
+use std::future::Future;
+
+use crate::body::Body;
+use crate::service::service::Service;
+use crate::{Request, Response};
+
+/// An asynchronous function from `Request` to `Response`.
+pub trait HttpService<ReqBody>: sealed::Sealed<ReqBody> {
+ /// The `Body` body of the `http::Response`.
+ type ResBody: Body;
+
+ /// The error type that can occur within this `Service`.
+ ///
+ /// Note: Returning an `Error` to a hyper server will cause the connection
+ /// to be abruptly aborted. In most cases, it is better to return a `Response`
+ /// with a 4xx or 5xx status code.
+ type Error: Into<Box<dyn StdError + Send + Sync>>;
+
+ /// The `Future` returned by this `Service`.
+ type Future: Future<Output = Result<Response<Self::ResBody>, Self::Error>>;
+
+ #[doc(hidden)]
+ fn call(&mut self, req: Request<ReqBody>) -> Self::Future;
+}
+
+impl<T, B1, B2> HttpService<B1> for T
+where
+ T: Service<Request<B1>, Response = Response<B2>>,
+ B2: Body,
+ T::Error: Into<Box<dyn StdError + Send + Sync>>,
+{
+ type ResBody = B2;
+
+ type Error = T::Error;
+ type Future = T::Future;
+
+ fn call(&mut self, req: Request<B1>) -> Self::Future {
+ Service::call(self, req)
+ }
+}
+
+impl<T, B1, B2> sealed::Sealed<B1> for T
+where
+ T: Service<Request<B1>, Response = Response<B2>>,
+ B2: Body,
+{
+}
+
+mod sealed {
+ pub trait Sealed<T> {}
+}
diff --git a/vendor/hyper/src/service/mod.rs b/vendor/hyper/src/service/mod.rs
new file mode 100644
index 00000000..28ffaddb
--- /dev/null
+++ b/vendor/hyper/src/service/mod.rs
@@ -0,0 +1,30 @@
+//! Asynchronous Services
+//!
+//! A [`Service`] is a trait representing an asynchronous
+//! function of a request to a response. It's similar to
+//! `async fn(Request) -> Result<Response, Error>`.
+//!
+//! The argument and return value isn't strictly required to be for HTTP.
+//! Therefore, hyper uses several "trait aliases" to reduce clutter around
+//! bounds. These are:
+//!
+//! - `HttpService`: This is blanketly implemented for all types that
+//! implement `Service<http::Request<B1>, Response = http::Response<B2>>`.
+//!
+//! # HttpService
+//!
+//! In hyper, especially in the server setting, a `Service` is usually bound
+//! to a single connection. It defines how to respond to **all** requests that
+//! connection will receive.
+//!
+//! The helper [`service_fn`] should be sufficient for most cases, but
+//! if you need to implement `Service` for a type manually, you can follow the example
+//! in `service_struct_impl.rs`.
+
+mod http;
+mod service;
+mod util;
+
+pub use self::http::HttpService;
+pub use self::service::Service;
+pub use self::util::service_fn;
diff --git a/vendor/hyper/src/service/service.rs b/vendor/hyper/src/service/service.rs
new file mode 100644
index 00000000..42c18e72
--- /dev/null
+++ b/vendor/hyper/src/service/service.rs
@@ -0,0 +1,100 @@
+use std::future::Future;
+
+/// An asynchronous function from a `Request` to a `Response`.
+///
+/// The `Service` trait is a simplified interface making it easy to write
+/// network applications in a modular and reusable way, decoupled from the
+/// underlying protocol.
+///
+/// # Functional
+///
+/// A `Service` is a function of a `Request`. It immediately returns a
+/// `Future` representing the eventual completion of processing the
+/// request. The actual request processing may happen at any time in the
+/// future, on any thread or executor. The processing may depend on calling
+/// other services. At some point in the future, the processing will complete,
+/// and the `Future` will resolve to a response or error.
+///
+/// At a high level, the `Service::call` function represents an RPC request. The
+/// `Service` value can be a server or a client.
+pub trait Service<Request> {
+ /// Responses given by the service.
+ type Response;
+
+ /// Errors produced by the service.
+ ///
+ /// Note: Returning an `Error` to a hyper server, the behavior depends on the
+ /// protocol. In most cases, hyper will cause the connection to be abruptly aborted.
+ /// It will abort the request however the protocol allows, either with some sort of RST_STREAM,
+ /// or killing the connection if that doesn't exist.
+ type Error;
+
+ /// The future response value.
+ type Future: Future<Output = Result<Self::Response, Self::Error>>;
+
+ /// Process the request and return the response asynchronously.
+ /// `call` takes `&self` instead of `mut &self` because:
+ /// - It prepares the way for async fn,
+ /// since then the future only borrows `&self`, and thus a Service can concurrently handle
+ /// multiple outstanding requests at once.
+ /// - It's clearer that Services can likely be cloned
+ /// - To share state across clones, you generally need `Arc<Mutex<_>>`
+ /// That means you're not really using the `&mut self` and could do with a `&self`.
+ /// The discussion on this is here: <https://github.com/hyperium/hyper/issues/3040>
+ fn call(&self, req: Request) -> Self::Future;
+}
+
+impl<Request, S: Service<Request> + ?Sized> Service<Request> for &'_ S {
+ type Response = S::Response;
+ type Error = S::Error;
+ type Future = S::Future;
+
+ #[inline]
+ fn call(&self, req: Request) -> Self::Future {
+ (**self).call(req)
+ }
+}
+
+impl<Request, S: Service<Request> + ?Sized> Service<Request> for &'_ mut S {
+ type Response = S::Response;
+ type Error = S::Error;
+ type Future = S::Future;
+
+ #[inline]
+ fn call(&self, req: Request) -> Self::Future {
+ (**self).call(req)
+ }
+}
+
+impl<Request, S: Service<Request> + ?Sized> Service<Request> for Box<S> {
+ type Response = S::Response;
+ type Error = S::Error;
+ type Future = S::Future;
+
+ #[inline]
+ fn call(&self, req: Request) -> Self::Future {
+ (**self).call(req)
+ }
+}
+
+impl<Request, S: Service<Request> + ?Sized> Service<Request> for std::rc::Rc<S> {
+ type Response = S::Response;
+ type Error = S::Error;
+ type Future = S::Future;
+
+ #[inline]
+ fn call(&self, req: Request) -> Self::Future {
+ (**self).call(req)
+ }
+}
+
+impl<Request, S: Service<Request> + ?Sized> Service<Request> for std::sync::Arc<S> {
+ type Response = S::Response;
+ type Error = S::Error;
+ type Future = S::Future;
+
+ #[inline]
+ fn call(&self, req: Request) -> Self::Future {
+ (**self).call(req)
+ }
+}
diff --git a/vendor/hyper/src/service/util.rs b/vendor/hyper/src/service/util.rs
new file mode 100644
index 00000000..3e017a78
--- /dev/null
+++ b/vendor/hyper/src/service/util.rs
@@ -0,0 +1,82 @@
+use std::error::Error as StdError;
+use std::fmt;
+use std::future::Future;
+use std::marker::PhantomData;
+
+use crate::body::Body;
+use crate::service::service::Service;
+use crate::{Request, Response};
+
+/// Create a `Service` from a function.
+///
+/// # Example
+///
+/// ```
+/// use bytes::Bytes;
+/// use hyper::{body, Request, Response, Version};
+/// use http_body_util::Full;
+/// use hyper::service::service_fn;
+///
+/// let service = service_fn(|req: Request<body::Incoming>| async move {
+/// if req.version() == Version::HTTP_11 {
+/// Ok(Response::new(Full::<Bytes>::from("Hello World")))
+/// } else {
+/// // Note: it's usually better to return a Response
+/// // with an appropriate StatusCode instead of an Err.
+/// Err("not HTTP/1.1, abort connection")
+/// }
+/// });
+/// ```
+pub fn service_fn<F, R, S>(f: F) -> ServiceFn<F, R>
+where
+ F: Fn(Request<R>) -> S,
+ S: Future,
+{
+ ServiceFn {
+ f,
+ _req: PhantomData,
+ }
+}
+
+/// Service returned by [`service_fn`]
+pub struct ServiceFn<F, R> {
+ f: F,
+ _req: PhantomData<fn(R)>,
+}
+
+impl<F, ReqBody, Ret, ResBody, E> Service<Request<ReqBody>> for ServiceFn<F, ReqBody>
+where
+ F: Fn(Request<ReqBody>) -> Ret,
+ ReqBody: Body,
+ Ret: Future<Output = Result<Response<ResBody>, E>>,
+ E: Into<Box<dyn StdError + Send + Sync>>,
+ ResBody: Body,
+{
+ type Response = crate::Response<ResBody>;
+ type Error = E;
+ type Future = Ret;
+
+ fn call(&self, req: Request<ReqBody>) -> Self::Future {
+ (self.f)(req)
+ }
+}
+
+impl<F, R> fmt::Debug for ServiceFn<F, R> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_struct("impl Service").finish()
+ }
+}
+
+impl<F, R> Clone for ServiceFn<F, R>
+where
+ F: Clone,
+{
+ fn clone(&self) -> Self {
+ ServiceFn {
+ f: self.f.clone(),
+ _req: PhantomData,
+ }
+ }
+}
+
+impl<F, R> Copy for ServiceFn<F, R> where F: Copy {}
diff --git a/vendor/hyper/src/trace.rs b/vendor/hyper/src/trace.rs
new file mode 100644
index 00000000..88f9a243
--- /dev/null
+++ b/vendor/hyper/src/trace.rs
@@ -0,0 +1,128 @@
+// For completeness, wrappers around all of tracing's public logging and span macros are provided,
+// even if they are not used at the present time.
+#![allow(unused_macros)]
+
+#[cfg(all(not(hyper_unstable_tracing), feature = "tracing"))]
+compile_error!(
+ "\
+ The `tracing` feature is unstable, and requires the \
+ `RUSTFLAGS='--cfg hyper_unstable_tracing'` environment variable to be set.\
+"
+);
+
+macro_rules! debug {
+ ($($arg:tt)+) => {
+ #[cfg(feature = "tracing")]
+ {
+ tracing::debug!($($arg)+);
+ }
+ }
+}
+
+macro_rules! debug_span {
+ ($($arg:tt)*) => {
+ {
+ #[cfg(feature = "tracing")]
+ {
+ let _span = tracing::debug_span!($($arg)+);
+ _span.entered()
+ }
+ }
+ }
+}
+
+macro_rules! error {
+ ($($arg:tt)*) => {
+ #[cfg(feature = "tracing")]
+ {
+ tracing::error!($($arg)+);
+ }
+ }
+}
+
+macro_rules! error_span {
+ ($($arg:tt)*) => {
+ {
+ #[cfg(feature = "tracing")]
+ {
+ let _span = tracing::error_span!($($arg)+);
+ _span.entered()
+ }
+ }
+ }
+}
+
+macro_rules! info {
+ ($($arg:tt)*) => {
+ #[cfg(feature = "tracing")]
+ {
+ tracing::info!($($arg)+);
+ }
+ }
+}
+
+macro_rules! info_span {
+ ($($arg:tt)*) => {
+ {
+ #[cfg(feature = "tracing")]
+ {
+ let _span = tracing::info_span!($($arg)+);
+ _span.entered()
+ }
+ }
+ }
+}
+
+macro_rules! trace {
+ ($($arg:tt)*) => {
+ #[cfg(feature = "tracing")]
+ {
+ tracing::trace!($($arg)+);
+ }
+ }
+}
+
+macro_rules! trace_span {
+ ($($arg:tt)*) => {
+ {
+ #[cfg(feature = "tracing")]
+ {
+ let _span = tracing::trace_span!($($arg)+);
+ _span.entered()
+ }
+ }
+ }
+}
+
+macro_rules! span {
+ ($($arg:tt)*) => {
+ {
+ #[cfg(feature = "tracing")]
+ {
+ let _span = tracing::span!($($arg)+);
+ _span.entered()
+ }
+ }
+ }
+}
+
+macro_rules! warn {
+ ($($arg:tt)*) => {
+ #[cfg(feature = "tracing")]
+ {
+ tracing::warn!($($arg)+);
+ }
+ }
+}
+
+macro_rules! warn_span {
+ ($($arg:tt)*) => {
+ {
+ #[cfg(feature = "tracing")]
+ {
+ let _span = tracing::warn_span!($($arg)+);
+ _span.entered()
+ }
+ }
+ }
+}
diff --git a/vendor/hyper/src/upgrade.rs b/vendor/hyper/src/upgrade.rs
new file mode 100644
index 00000000..9d23a290
--- /dev/null
+++ b/vendor/hyper/src/upgrade.rs
@@ -0,0 +1,407 @@
+//! HTTP Upgrades
+//!
+//! This module deals with managing [HTTP Upgrades][mdn] in hyper. Since
+//! several concepts in HTTP allow for first talking HTTP, and then converting
+//! to a different protocol, this module conflates them into a single API.
+//! Those include:
+//!
+//! - HTTP/1.1 Upgrades
+//! - HTTP `CONNECT`
+//!
+//! You are responsible for any other pre-requisites to establish an upgrade,
+//! such as sending the appropriate headers, methods, and status codes. You can
+//! then use [`on`][] to grab a `Future` which will resolve to the upgraded
+//! connection object, or an error if the upgrade fails.
+//!
+//! [mdn]: https://developer.mozilla.org/en-US/docs/Web/HTTP/Protocol_upgrade_mechanism
+//!
+//! # Client
+//!
+//! Sending an HTTP upgrade from the [`client`](super::client) involves setting
+//! either the appropriate method, if wanting to `CONNECT`, or headers such as
+//! `Upgrade` and `Connection`, on the `http::Request`. Once receiving the
+//! `http::Response` back, you must check for the specific information that the
+//! upgrade is agreed upon by the server (such as a `101` status code), and then
+//! get the `Future` from the `Response`.
+//!
+//! # Server
+//!
+//! Receiving upgrade requests in a server requires you to check the relevant
+//! headers in a `Request`, and if an upgrade should be done, you then send the
+//! corresponding headers in a response. To then wait for hyper to finish the
+//! upgrade, you call `on()` with the `Request`, and then can spawn a task
+//! awaiting it.
+//!
+//! # Example
+//!
+//! See [this example][example] showing how upgrades work with both
+//! Clients and Servers.
+//!
+//! [example]: https://github.com/hyperium/hyper/blob/master/examples/upgrades.rs
+
+use std::any::TypeId;
+use std::error::Error as StdError;
+use std::fmt;
+use std::future::Future;
+use std::io;
+use std::pin::Pin;
+use std::sync::{Arc, Mutex};
+use std::task::{Context, Poll};
+
+use crate::rt::{Read, ReadBufCursor, Write};
+use bytes::Bytes;
+use tokio::sync::oneshot;
+
+use crate::common::io::Rewind;
+
+/// An upgraded HTTP connection.
+///
+/// This type holds a trait object internally of the original IO that
+/// was used to speak HTTP before the upgrade. It can be used directly
+/// as a [`Read`] or [`Write`] for convenience.
+///
+/// Alternatively, if the exact type is known, this can be deconstructed
+/// into its parts.
+pub struct Upgraded {
+ io: Rewind<Box<dyn Io + Send>>,
+}
+
+/// A future for a possible HTTP upgrade.
+///
+/// If no upgrade was available, or it doesn't succeed, yields an `Error`.
+#[derive(Clone)]
+pub struct OnUpgrade {
+ rx: Option<Arc<Mutex<oneshot::Receiver<crate::Result<Upgraded>>>>>,
+}
+
+/// The deconstructed parts of an [`Upgraded`] type.
+///
+/// Includes the original IO type, and a read buffer of bytes that the
+/// HTTP state machine may have already read before completing an upgrade.
+#[derive(Debug)]
+#[non_exhaustive]
+pub struct Parts<T> {
+ /// The original IO object used before the upgrade.
+ pub io: T,
+ /// A buffer of bytes that have been read but not processed as HTTP.
+ ///
+ /// For instance, if the `Connection` is used for an HTTP upgrade request,
+ /// it is possible the server sent back the first bytes of the new protocol
+ /// along with the response upgrade.
+ ///
+ /// You will want to check for any existing bytes if you plan to continue
+ /// communicating on the IO object.
+ pub read_buf: Bytes,
+}
+
+/// Gets a pending HTTP upgrade from this message.
+///
+/// This can be called on the following types:
+///
+/// - `http::Request<B>`
+/// - `http::Response<B>`
+/// - `&mut http::Request<B>`
+/// - `&mut http::Response<B>`
+pub fn on<T: sealed::CanUpgrade>(msg: T) -> OnUpgrade {
+ msg.on_upgrade()
+}
+
+#[cfg(all(
+ any(feature = "client", feature = "server"),
+ any(feature = "http1", feature = "http2"),
+))]
+pub(super) struct Pending {
+ tx: oneshot::Sender<crate::Result<Upgraded>>,
+}
+
+#[cfg(all(
+ any(feature = "client", feature = "server"),
+ any(feature = "http1", feature = "http2"),
+))]
+pub(super) fn pending() -> (Pending, OnUpgrade) {
+ let (tx, rx) = oneshot::channel();
+ (
+ Pending { tx },
+ OnUpgrade {
+ rx: Some(Arc::new(Mutex::new(rx))),
+ },
+ )
+}
+
+// ===== impl Upgraded =====
+
+impl Upgraded {
+ #[cfg(all(
+ any(feature = "client", feature = "server"),
+ any(feature = "http1", feature = "http2")
+ ))]
+ pub(super) fn new<T>(io: T, read_buf: Bytes) -> Self
+ where
+ T: Read + Write + Unpin + Send + 'static,
+ {
+ Upgraded {
+ io: Rewind::new_buffered(Box::new(io), read_buf),
+ }
+ }
+
+ /// Tries to downcast the internal trait object to the type passed.
+ ///
+ /// On success, returns the downcasted parts. On error, returns the
+ /// `Upgraded` back.
+ pub fn downcast<T: Read + Write + Unpin + 'static>(self) -> Result<Parts<T>, Self> {
+ let (io, buf) = self.io.into_inner();
+ match io.__hyper_downcast() {
+ Ok(t) => Ok(Parts {
+ io: *t,
+ read_buf: buf,
+ }),
+ Err(io) => Err(Upgraded {
+ io: Rewind::new_buffered(io, buf),
+ }),
+ }
+ }
+}
+
+impl Read for Upgraded {
+ fn poll_read(
+ mut self: Pin<&mut Self>,
+ cx: &mut Context<'_>,
+ buf: ReadBufCursor<'_>,
+ ) -> Poll<io::Result<()>> {
+ Pin::new(&mut self.io).poll_read(cx, buf)
+ }
+}
+
+impl Write for Upgraded {
+ fn poll_write(
+ mut self: Pin<&mut Self>,
+ cx: &mut Context<'_>,
+ buf: &[u8],
+ ) -> Poll<io::Result<usize>> {
+ Pin::new(&mut self.io).poll_write(cx, buf)
+ }
+
+ fn poll_write_vectored(
+ mut self: Pin<&mut Self>,
+ cx: &mut Context<'_>,
+ bufs: &[io::IoSlice<'_>],
+ ) -> Poll<io::Result<usize>> {
+ Pin::new(&mut self.io).poll_write_vectored(cx, bufs)
+ }
+
+ fn poll_flush(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<()>> {
+ Pin::new(&mut self.io).poll_flush(cx)
+ }
+
+ fn poll_shutdown(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<()>> {
+ Pin::new(&mut self.io).poll_shutdown(cx)
+ }
+
+ fn is_write_vectored(&self) -> bool {
+ self.io.is_write_vectored()
+ }
+}
+
+impl fmt::Debug for Upgraded {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_struct("Upgraded").finish()
+ }
+}
+
+// ===== impl OnUpgrade =====
+
+impl OnUpgrade {
+ pub(super) fn none() -> Self {
+ OnUpgrade { rx: None }
+ }
+
+ #[cfg(all(any(feature = "client", feature = "server"), feature = "http1"))]
+ pub(super) fn is_none(&self) -> bool {
+ self.rx.is_none()
+ }
+}
+
+impl Future for OnUpgrade {
+ type Output = Result<Upgraded, crate::Error>;
+
+ fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
+ match self.rx {
+ Some(ref rx) => Pin::new(&mut *rx.lock().unwrap())
+ .poll(cx)
+ .map(|res| match res {
+ Ok(Ok(upgraded)) => Ok(upgraded),
+ Ok(Err(err)) => Err(err),
+ Err(_oneshot_canceled) => {
+ Err(crate::Error::new_canceled().with(UpgradeExpected))
+ }
+ }),
+ None => Poll::Ready(Err(crate::Error::new_user_no_upgrade())),
+ }
+ }
+}
+
+impl fmt::Debug for OnUpgrade {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_struct("OnUpgrade").finish()
+ }
+}
+
+// ===== impl Pending =====
+
+#[cfg(all(
+ any(feature = "client", feature = "server"),
+ any(feature = "http1", feature = "http2")
+))]
+impl Pending {
+ pub(super) fn fulfill(self, upgraded: Upgraded) {
+ trace!("pending upgrade fulfill");
+ let _ = self.tx.send(Ok(upgraded));
+ }
+
+ #[cfg(feature = "http1")]
+ /// Don't fulfill the pending Upgrade, but instead signal that
+ /// upgrades are handled manually.
+ pub(super) fn manual(self) {
+ #[cfg(any(feature = "http1", feature = "http2"))]
+ trace!("pending upgrade handled manually");
+ let _ = self.tx.send(Err(crate::Error::new_user_manual_upgrade()));
+ }
+}
+
+// ===== impl UpgradeExpected =====
+
+/// Error cause returned when an upgrade was expected but canceled
+/// for whatever reason.
+///
+/// This likely means the actual `Conn` future wasn't polled and upgraded.
+#[derive(Debug)]
+struct UpgradeExpected;
+
+impl fmt::Display for UpgradeExpected {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.write_str("upgrade expected but not completed")
+ }
+}
+
+impl StdError for UpgradeExpected {}
+
+// ===== impl Io =====
+
+pub(super) trait Io: Read + Write + Unpin + 'static {
+ fn __hyper_type_id(&self) -> TypeId {
+ TypeId::of::<Self>()
+ }
+}
+
+impl<T: Read + Write + Unpin + 'static> Io for T {}
+
+impl dyn Io + Send {
+ fn __hyper_is<T: Io>(&self) -> bool {
+ let t = TypeId::of::<T>();
+ self.__hyper_type_id() == t
+ }
+
+ fn __hyper_downcast<T: Io>(self: Box<Self>) -> Result<Box<T>, Box<Self>> {
+ if self.__hyper_is::<T>() {
+ // Taken from `std::error::Error::downcast()`.
+ unsafe {
+ let raw: *mut dyn Io = Box::into_raw(self);
+ Ok(Box::from_raw(raw as *mut T))
+ }
+ } else {
+ Err(self)
+ }
+ }
+}
+
+mod sealed {
+ use super::OnUpgrade;
+
+ pub trait CanUpgrade {
+ fn on_upgrade(self) -> OnUpgrade;
+ }
+
+ impl<B> CanUpgrade for http::Request<B> {
+ fn on_upgrade(mut self) -> OnUpgrade {
+ self.extensions_mut()
+ .remove::<OnUpgrade>()
+ .unwrap_or_else(OnUpgrade::none)
+ }
+ }
+
+ impl<B> CanUpgrade for &'_ mut http::Request<B> {
+ fn on_upgrade(self) -> OnUpgrade {
+ self.extensions_mut()
+ .remove::<OnUpgrade>()
+ .unwrap_or_else(OnUpgrade::none)
+ }
+ }
+
+ impl<B> CanUpgrade for http::Response<B> {
+ fn on_upgrade(mut self) -> OnUpgrade {
+ self.extensions_mut()
+ .remove::<OnUpgrade>()
+ .unwrap_or_else(OnUpgrade::none)
+ }
+ }
+
+ impl<B> CanUpgrade for &'_ mut http::Response<B> {
+ fn on_upgrade(self) -> OnUpgrade {
+ self.extensions_mut()
+ .remove::<OnUpgrade>()
+ .unwrap_or_else(OnUpgrade::none)
+ }
+ }
+}
+
+#[cfg(all(
+ any(feature = "client", feature = "server"),
+ any(feature = "http1", feature = "http2"),
+))]
+#[cfg(test)]
+mod tests {
+ use super::*;
+
+ #[test]
+ fn upgraded_downcast() {
+ let upgraded = Upgraded::new(Mock, Bytes::new());
+
+ let upgraded = upgraded
+ .downcast::<crate::common::io::Compat<std::io::Cursor<Vec<u8>>>>()
+ .unwrap_err();
+
+ upgraded.downcast::<Mock>().unwrap();
+ }
+
+ // TODO: replace with tokio_test::io when it can test write_buf
+ struct Mock;
+
+ impl Read for Mock {
+ fn poll_read(
+ self: Pin<&mut Self>,
+ _cx: &mut Context<'_>,
+ _buf: ReadBufCursor<'_>,
+ ) -> Poll<io::Result<()>> {
+ unreachable!("Mock::poll_read")
+ }
+ }
+
+ impl Write for Mock {
+ fn poll_write(
+ self: Pin<&mut Self>,
+ _: &mut Context<'_>,
+ buf: &[u8],
+ ) -> Poll<io::Result<usize>> {
+ // panic!("poll_write shouldn't be called");
+ Poll::Ready(Ok(buf.len()))
+ }
+
+ fn poll_flush(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll<io::Result<()>> {
+ unreachable!("Mock::poll_flush")
+ }
+
+ fn poll_shutdown(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll<io::Result<()>> {
+ unreachable!("Mock::poll_shutdown")
+ }
+ }
+}