summaryrefslogtreecommitdiff
path: root/vendor/hyper/src/client/conn
diff options
context:
space:
mode:
Diffstat (limited to 'vendor/hyper/src/client/conn')
-rw-r--r--vendor/hyper/src/client/conn/http1.rs611
-rw-r--r--vendor/hyper/src/client/conn/http2.rs718
-rw-r--r--vendor/hyper/src/client/conn/mod.rs22
3 files changed, 1351 insertions, 0 deletions
diff --git a/vendor/hyper/src/client/conn/http1.rs b/vendor/hyper/src/client/conn/http1.rs
new file mode 100644
index 00000000..ecfe6eb8
--- /dev/null
+++ b/vendor/hyper/src/client/conn/http1.rs
@@ -0,0 +1,611 @@
+//! HTTP/1 client connections
+
+use std::error::Error as StdError;
+use std::fmt;
+use std::future::Future;
+use std::pin::Pin;
+use std::task::{Context, Poll};
+
+use crate::rt::{Read, Write};
+use bytes::Bytes;
+use futures_util::ready;
+use http::{Request, Response};
+use httparse::ParserConfig;
+
+use super::super::dispatch::{self, TrySendError};
+use crate::body::{Body, Incoming as IncomingBody};
+use crate::proto;
+
+type Dispatcher<T, B> =
+ proto::dispatch::Dispatcher<proto::dispatch::Client<B>, B, T, proto::h1::ClientTransaction>;
+
+/// The sender side of an established connection.
+pub struct SendRequest<B> {
+ dispatch: dispatch::Sender<Request<B>, Response<IncomingBody>>,
+}
+
+/// Deconstructed parts of a `Connection`.
+///
+/// This allows taking apart a `Connection` at a later time, in order to
+/// reclaim the IO object, and additional related pieces.
+#[derive(Debug)]
+#[non_exhaustive]
+pub struct Parts<T> {
+ /// The original IO object used in the handshake.
+ pub io: T,
+ /// A buffer of bytes that have been read but not processed as HTTP.
+ ///
+ /// For instance, if the `Connection` is used for an HTTP upgrade request,
+ /// it is possible the server sent back the first bytes of the new protocol
+ /// along with the response upgrade.
+ ///
+ /// You will want to check for any existing bytes if you plan to continue
+ /// communicating on the IO object.
+ pub read_buf: Bytes,
+}
+
+/// A future that processes all HTTP state for the IO object.
+///
+/// In most cases, this should just be spawned into an executor, so that it
+/// can process incoming and outgoing messages, notice hangups, and the like.
+///
+/// Instances of this type are typically created via the [`handshake`] function
+#[must_use = "futures do nothing unless polled"]
+pub struct Connection<T, B>
+where
+ T: Read + Write,
+ B: Body + 'static,
+{
+ inner: Dispatcher<T, B>,
+}
+
+impl<T, B> Connection<T, B>
+where
+ T: Read + Write + Unpin,
+ B: Body + 'static,
+ B::Error: Into<Box<dyn StdError + Send + Sync>>,
+{
+ /// Return the inner IO object, and additional information.
+ ///
+ /// Only works for HTTP/1 connections. HTTP/2 connections will panic.
+ pub fn into_parts(self) -> Parts<T> {
+ let (io, read_buf, _) = self.inner.into_inner();
+ Parts { io, read_buf }
+ }
+
+ /// Poll the connection for completion, but without calling `shutdown`
+ /// on the underlying IO.
+ ///
+ /// This is useful to allow running a connection while doing an HTTP
+ /// upgrade. Once the upgrade is completed, the connection would be "done",
+ /// but it is not desired to actually shutdown the IO object. Instead you
+ /// would take it back using `into_parts`.
+ ///
+ /// Use [`poll_fn`](https://docs.rs/futures/0.1.25/futures/future/fn.poll_fn.html)
+ /// and [`try_ready!`](https://docs.rs/futures/0.1.25/futures/macro.try_ready.html)
+ /// to work with this function; or use the `without_shutdown` wrapper.
+ pub fn poll_without_shutdown(&mut self, cx: &mut Context<'_>) -> Poll<crate::Result<()>> {
+ self.inner.poll_without_shutdown(cx)
+ }
+
+ /// Prevent shutdown of the underlying IO object at the end of service the request,
+ /// instead run `into_parts`. This is a convenience wrapper over `poll_without_shutdown`.
+ pub async fn without_shutdown(self) -> crate::Result<Parts<T>> {
+ let mut conn = Some(self);
+ futures_util::future::poll_fn(move |cx| -> Poll<crate::Result<Parts<T>>> {
+ ready!(conn.as_mut().unwrap().poll_without_shutdown(cx))?;
+ Poll::Ready(Ok(conn.take().unwrap().into_parts()))
+ })
+ .await
+ }
+}
+
+/// A builder to configure an HTTP connection.
+///
+/// After setting options, the builder is used to create a handshake future.
+///
+/// **Note**: The default values of options are *not considered stable*. They
+/// are subject to change at any time.
+#[derive(Clone, Debug)]
+pub struct Builder {
+ h09_responses: bool,
+ h1_parser_config: ParserConfig,
+ h1_writev: Option<bool>,
+ h1_title_case_headers: bool,
+ h1_preserve_header_case: bool,
+ h1_max_headers: Option<usize>,
+ #[cfg(feature = "ffi")]
+ h1_preserve_header_order: bool,
+ h1_read_buf_exact_size: Option<usize>,
+ h1_max_buf_size: Option<usize>,
+}
+
+/// Returns a handshake future over some IO.
+///
+/// This is a shortcut for `Builder::new().handshake(io)`.
+/// See [`client::conn`](crate::client::conn) for more.
+pub async fn handshake<T, B>(io: T) -> crate::Result<(SendRequest<B>, Connection<T, B>)>
+where
+ T: Read + Write + Unpin,
+ B: Body + 'static,
+ B::Data: Send,
+ B::Error: Into<Box<dyn StdError + Send + Sync>>,
+{
+ Builder::new().handshake(io).await
+}
+
+// ===== impl SendRequest
+
+impl<B> SendRequest<B> {
+ /// Polls to determine whether this sender can be used yet for a request.
+ ///
+ /// If the associated connection is closed, this returns an Error.
+ pub fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll<crate::Result<()>> {
+ self.dispatch.poll_ready(cx)
+ }
+
+ /// Waits until the dispatcher is ready
+ ///
+ /// If the associated connection is closed, this returns an Error.
+ pub async fn ready(&mut self) -> crate::Result<()> {
+ futures_util::future::poll_fn(|cx| self.poll_ready(cx)).await
+ }
+
+ /// Checks if the connection is currently ready to send a request.
+ ///
+ /// # Note
+ ///
+ /// This is mostly a hint. Due to inherent latency of networks, it is
+ /// possible that even after checking this is ready, sending a request
+ /// may still fail because the connection was closed in the meantime.
+ pub fn is_ready(&self) -> bool {
+ self.dispatch.is_ready()
+ }
+
+ /// Checks if the connection side has been closed.
+ pub fn is_closed(&self) -> bool {
+ self.dispatch.is_closed()
+ }
+}
+
+impl<B> SendRequest<B>
+where
+ B: Body + 'static,
+{
+ /// Sends a `Request` on the associated connection.
+ ///
+ /// Returns a future that if successful, yields the `Response`.
+ ///
+ /// `req` must have a `Host` header.
+ ///
+ /// # Uri
+ ///
+ /// The `Uri` of the request is serialized as-is.
+ ///
+ /// - Usually you want origin-form (`/path?query`).
+ /// - For sending to an HTTP proxy, you want to send in absolute-form
+ /// (`https://hyper.rs/guides`).
+ ///
+ /// This is however not enforced or validated and it is up to the user
+ /// of this method to ensure the `Uri` is correct for their intended purpose.
+ pub fn send_request(
+ &mut self,
+ req: Request<B>,
+ ) -> impl Future<Output = crate::Result<Response<IncomingBody>>> {
+ let sent = self.dispatch.send(req);
+
+ async move {
+ match sent {
+ Ok(rx) => match rx.await {
+ Ok(Ok(resp)) => Ok(resp),
+ Ok(Err(err)) => Err(err),
+ // this is definite bug if it happens, but it shouldn't happen!
+ Err(_canceled) => panic!("dispatch dropped without returning error"),
+ },
+ Err(_req) => {
+ debug!("connection was not ready");
+ Err(crate::Error::new_canceled().with("connection was not ready"))
+ }
+ }
+ }
+ }
+
+ /// Sends a `Request` on the associated connection.
+ ///
+ /// Returns a future that if successful, yields the `Response`.
+ ///
+ /// # Error
+ ///
+ /// If there was an error before trying to serialize the request to the
+ /// connection, the message will be returned as part of this error.
+ pub fn try_send_request(
+ &mut self,
+ req: Request<B>,
+ ) -> impl Future<Output = Result<Response<IncomingBody>, TrySendError<Request<B>>>> {
+ let sent = self.dispatch.try_send(req);
+ async move {
+ match sent {
+ Ok(rx) => match rx.await {
+ Ok(Ok(res)) => Ok(res),
+ Ok(Err(err)) => Err(err),
+ // this is definite bug if it happens, but it shouldn't happen!
+ Err(_) => panic!("dispatch dropped without returning error"),
+ },
+ Err(req) => {
+ debug!("connection was not ready");
+ let error = crate::Error::new_canceled().with("connection was not ready");
+ Err(TrySendError {
+ error,
+ message: Some(req),
+ })
+ }
+ }
+ }
+ }
+}
+
+impl<B> fmt::Debug for SendRequest<B> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_struct("SendRequest").finish()
+ }
+}
+
+// ===== impl Connection
+
+impl<T, B> Connection<T, B>
+where
+ T: Read + Write + Unpin + Send,
+ B: Body + 'static,
+ B::Error: Into<Box<dyn StdError + Send + Sync>>,
+{
+ /// Enable this connection to support higher-level HTTP upgrades.
+ ///
+ /// See [the `upgrade` module](crate::upgrade) for more.
+ pub fn with_upgrades(self) -> upgrades::UpgradeableConnection<T, B> {
+ upgrades::UpgradeableConnection { inner: Some(self) }
+ }
+}
+
+impl<T, B> fmt::Debug for Connection<T, B>
+where
+ T: Read + Write + fmt::Debug,
+ B: Body + 'static,
+{
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_struct("Connection").finish()
+ }
+}
+
+impl<T, B> Future for Connection<T, B>
+where
+ T: Read + Write + Unpin,
+ B: Body + 'static,
+ B::Data: Send,
+ B::Error: Into<Box<dyn StdError + Send + Sync>>,
+{
+ type Output = crate::Result<()>;
+
+ fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
+ match ready!(Pin::new(&mut self.inner).poll(cx))? {
+ proto::Dispatched::Shutdown => Poll::Ready(Ok(())),
+ proto::Dispatched::Upgrade(pending) => {
+ // With no `Send` bound on `I`, we can't try to do
+ // upgrades here. In case a user was trying to use
+ // `upgrade` with this API, send a special
+ // error letting them know about that.
+ pending.manual();
+ Poll::Ready(Ok(()))
+ }
+ }
+ }
+}
+
+// ===== impl Builder
+
+impl Builder {
+ /// Creates a new connection builder.
+ #[inline]
+ pub fn new() -> Builder {
+ Builder {
+ h09_responses: false,
+ h1_writev: None,
+ h1_read_buf_exact_size: None,
+ h1_parser_config: Default::default(),
+ h1_title_case_headers: false,
+ h1_preserve_header_case: false,
+ h1_max_headers: None,
+ #[cfg(feature = "ffi")]
+ h1_preserve_header_order: false,
+ h1_max_buf_size: None,
+ }
+ }
+
+ /// Set whether HTTP/0.9 responses should be tolerated.
+ ///
+ /// Default is false.
+ pub fn http09_responses(&mut self, enabled: bool) -> &mut Builder {
+ self.h09_responses = enabled;
+ self
+ }
+
+ /// Set whether HTTP/1 connections will accept spaces between header names
+ /// and the colon that follow them in responses.
+ ///
+ /// You probably don't need this, here is what [RFC 7230 Section 3.2.4.] has
+ /// to say about it:
+ ///
+ /// > No whitespace is allowed between the header field-name and colon. In
+ /// > the past, differences in the handling of such whitespace have led to
+ /// > security vulnerabilities in request routing and response handling. A
+ /// > server MUST reject any received request message that contains
+ /// > whitespace between a header field-name and colon with a response code
+ /// > of 400 (Bad Request). A proxy MUST remove any such whitespace from a
+ /// > response message before forwarding the message downstream.
+ ///
+ /// Default is false.
+ ///
+ /// [RFC 7230 Section 3.2.4.]: https://tools.ietf.org/html/rfc7230#section-3.2.4
+ pub fn allow_spaces_after_header_name_in_responses(&mut self, enabled: bool) -> &mut Builder {
+ self.h1_parser_config
+ .allow_spaces_after_header_name_in_responses(enabled);
+ self
+ }
+
+ /// Set whether HTTP/1 connections will accept obsolete line folding for
+ /// header values.
+ ///
+ /// Newline codepoints (`\r` and `\n`) will be transformed to spaces when
+ /// parsing.
+ ///
+ /// You probably don't need this, here is what [RFC 7230 Section 3.2.4.] has
+ /// to say about it:
+ ///
+ /// > A server that receives an obs-fold in a request message that is not
+ /// > within a message/http container MUST either reject the message by
+ /// > sending a 400 (Bad Request), preferably with a representation
+ /// > explaining that obsolete line folding is unacceptable, or replace
+ /// > each received obs-fold with one or more SP octets prior to
+ /// > interpreting the field value or forwarding the message downstream.
+ ///
+ /// > A proxy or gateway that receives an obs-fold in a response message
+ /// > that is not within a message/http container MUST either discard the
+ /// > message and replace it with a 502 (Bad Gateway) response, preferably
+ /// > with a representation explaining that unacceptable line folding was
+ /// > received, or replace each received obs-fold with one or more SP
+ /// > octets prior to interpreting the field value or forwarding the
+ /// > message downstream.
+ ///
+ /// > A user agent that receives an obs-fold in a response message that is
+ /// > not within a message/http container MUST replace each received
+ /// > obs-fold with one or more SP octets prior to interpreting the field
+ /// > value.
+ ///
+ /// Default is false.
+ ///
+ /// [RFC 7230 Section 3.2.4.]: https://tools.ietf.org/html/rfc7230#section-3.2.4
+ pub fn allow_obsolete_multiline_headers_in_responses(&mut self, enabled: bool) -> &mut Builder {
+ self.h1_parser_config
+ .allow_obsolete_multiline_headers_in_responses(enabled);
+ self
+ }
+
+ /// Set whether HTTP/1 connections will silently ignored malformed header lines.
+ ///
+ /// If this is enabled and a header line does not start with a valid header
+ /// name, or does not include a colon at all, the line will be silently ignored
+ /// and no error will be reported.
+ ///
+ /// Default is false.
+ pub fn ignore_invalid_headers_in_responses(&mut self, enabled: bool) -> &mut Builder {
+ self.h1_parser_config
+ .ignore_invalid_headers_in_responses(enabled);
+ self
+ }
+
+ /// Set whether HTTP/1 connections should try to use vectored writes,
+ /// or always flatten into a single buffer.
+ ///
+ /// Note that setting this to false may mean more copies of body data,
+ /// but may also improve performance when an IO transport doesn't
+ /// support vectored writes well, such as most TLS implementations.
+ ///
+ /// Setting this to true will force hyper to use queued strategy
+ /// which may eliminate unnecessary cloning on some TLS backends
+ ///
+ /// Default is `auto`. In this mode hyper will try to guess which
+ /// mode to use
+ pub fn writev(&mut self, enabled: bool) -> &mut Builder {
+ self.h1_writev = Some(enabled);
+ self
+ }
+
+ /// Set whether HTTP/1 connections will write header names as title case at
+ /// the socket level.
+ ///
+ /// Default is false.
+ pub fn title_case_headers(&mut self, enabled: bool) -> &mut Builder {
+ self.h1_title_case_headers = enabled;
+ self
+ }
+
+ /// Set whether to support preserving original header cases.
+ ///
+ /// Currently, this will record the original cases received, and store them
+ /// in a private extension on the `Response`. It will also look for and use
+ /// such an extension in any provided `Request`.
+ ///
+ /// Since the relevant extension is still private, there is no way to
+ /// interact with the original cases. The only effect this can have now is
+ /// to forward the cases in a proxy-like fashion.
+ ///
+ /// Default is false.
+ pub fn preserve_header_case(&mut self, enabled: bool) -> &mut Builder {
+ self.h1_preserve_header_case = enabled;
+ self
+ }
+
+ /// Set the maximum number of headers.
+ ///
+ /// When a response is received, the parser will reserve a buffer to store headers for optimal
+ /// performance.
+ ///
+ /// If client receives more headers than the buffer size, the error "message header too large"
+ /// is returned.
+ ///
+ /// Note that headers is allocated on the stack by default, which has higher performance. After
+ /// setting this value, headers will be allocated in heap memory, that is, heap memory
+ /// allocation will occur for each response, and there will be a performance drop of about 5%.
+ ///
+ /// Default is 100.
+ pub fn max_headers(&mut self, val: usize) -> &mut Self {
+ self.h1_max_headers = Some(val);
+ self
+ }
+
+ /// Set whether to support preserving original header order.
+ ///
+ /// Currently, this will record the order in which headers are received, and store this
+ /// ordering in a private extension on the `Response`. It will also look for and use
+ /// such an extension in any provided `Request`.
+ ///
+ /// Default is false.
+ #[cfg(feature = "ffi")]
+ pub fn preserve_header_order(&mut self, enabled: bool) -> &mut Builder {
+ self.h1_preserve_header_order = enabled;
+ self
+ }
+
+ /// Sets the exact size of the read buffer to *always* use.
+ ///
+ /// Note that setting this option unsets the `max_buf_size` option.
+ ///
+ /// Default is an adaptive read buffer.
+ pub fn read_buf_exact_size(&mut self, sz: Option<usize>) -> &mut Builder {
+ self.h1_read_buf_exact_size = sz;
+ self.h1_max_buf_size = None;
+ self
+ }
+
+ /// Set the maximum buffer size for the connection.
+ ///
+ /// Default is ~400kb.
+ ///
+ /// Note that setting this option unsets the `read_exact_buf_size` option.
+ ///
+ /// # Panics
+ ///
+ /// The minimum value allowed is 8192. This method panics if the passed `max` is less than the minimum.
+ pub fn max_buf_size(&mut self, max: usize) -> &mut Self {
+ assert!(
+ max >= proto::h1::MINIMUM_MAX_BUFFER_SIZE,
+ "the max_buf_size cannot be smaller than the minimum that h1 specifies."
+ );
+
+ self.h1_max_buf_size = Some(max);
+ self.h1_read_buf_exact_size = None;
+ self
+ }
+
+ /// Constructs a connection with the configured options and IO.
+ /// See [`client::conn`](crate::client::conn) for more.
+ ///
+ /// Note, if [`Connection`] is not `await`-ed, [`SendRequest`] will
+ /// do nothing.
+ pub fn handshake<T, B>(
+ &self,
+ io: T,
+ ) -> impl Future<Output = crate::Result<(SendRequest<B>, Connection<T, B>)>>
+ where
+ T: Read + Write + Unpin,
+ B: Body + 'static,
+ B::Data: Send,
+ B::Error: Into<Box<dyn StdError + Send + Sync>>,
+ {
+ let opts = self.clone();
+
+ async move {
+ trace!("client handshake HTTP/1");
+
+ let (tx, rx) = dispatch::channel();
+ let mut conn = proto::Conn::new(io);
+ conn.set_h1_parser_config(opts.h1_parser_config);
+ if let Some(writev) = opts.h1_writev {
+ if writev {
+ conn.set_write_strategy_queue();
+ } else {
+ conn.set_write_strategy_flatten();
+ }
+ }
+ if opts.h1_title_case_headers {
+ conn.set_title_case_headers();
+ }
+ if opts.h1_preserve_header_case {
+ conn.set_preserve_header_case();
+ }
+ if let Some(max_headers) = opts.h1_max_headers {
+ conn.set_http1_max_headers(max_headers);
+ }
+ #[cfg(feature = "ffi")]
+ if opts.h1_preserve_header_order {
+ conn.set_preserve_header_order();
+ }
+
+ if opts.h09_responses {
+ conn.set_h09_responses();
+ }
+
+ if let Some(sz) = opts.h1_read_buf_exact_size {
+ conn.set_read_buf_exact_size(sz);
+ }
+ if let Some(max) = opts.h1_max_buf_size {
+ conn.set_max_buf_size(max);
+ }
+ let cd = proto::h1::dispatch::Client::new(rx);
+ let proto = proto::h1::Dispatcher::new(cd, conn);
+
+ Ok((SendRequest { dispatch: tx }, Connection { inner: proto }))
+ }
+ }
+}
+
+mod upgrades {
+ use crate::upgrade::Upgraded;
+
+ use super::*;
+
+ // A future binding a connection with a Service with Upgrade support.
+ //
+ // This type is unnameable outside the crate.
+ #[must_use = "futures do nothing unless polled"]
+ #[allow(missing_debug_implementations)]
+ pub struct UpgradeableConnection<T, B>
+ where
+ T: Read + Write + Unpin + Send + 'static,
+ B: Body + 'static,
+ B::Error: Into<Box<dyn StdError + Send + Sync>>,
+ {
+ pub(super) inner: Option<Connection<T, B>>,
+ }
+
+ impl<I, B> Future for UpgradeableConnection<I, B>
+ where
+ I: Read + Write + Unpin + Send + 'static,
+ B: Body + 'static,
+ B::Data: Send,
+ B::Error: Into<Box<dyn StdError + Send + Sync>>,
+ {
+ type Output = crate::Result<()>;
+
+ fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
+ match ready!(Pin::new(&mut self.inner.as_mut().unwrap().inner).poll(cx)) {
+ Ok(proto::Dispatched::Shutdown) => Poll::Ready(Ok(())),
+ Ok(proto::Dispatched::Upgrade(pending)) => {
+ let Parts { io, read_buf } = self.inner.take().unwrap().into_parts();
+ pending.fulfill(Upgraded::new(io, read_buf));
+ Poll::Ready(Ok(()))
+ }
+ Err(e) => Poll::Ready(Err(e)),
+ }
+ }
+ }
+}
diff --git a/vendor/hyper/src/client/conn/http2.rs b/vendor/hyper/src/client/conn/http2.rs
new file mode 100644
index 00000000..3db28957
--- /dev/null
+++ b/vendor/hyper/src/client/conn/http2.rs
@@ -0,0 +1,718 @@
+//! HTTP/2 client connections
+
+use std::error::Error;
+use std::fmt;
+use std::future::Future;
+use std::marker::PhantomData;
+use std::pin::Pin;
+use std::sync::Arc;
+use std::task::{Context, Poll};
+use std::time::Duration;
+
+use crate::rt::{Read, Write};
+use futures_util::ready;
+use http::{Request, Response};
+
+use super::super::dispatch::{self, TrySendError};
+use crate::body::{Body, Incoming as IncomingBody};
+use crate::common::time::Time;
+use crate::proto;
+use crate::rt::bounds::Http2ClientConnExec;
+use crate::rt::Timer;
+
+/// The sender side of an established connection.
+pub struct SendRequest<B> {
+ dispatch: dispatch::UnboundedSender<Request<B>, Response<IncomingBody>>,
+}
+
+impl<B> Clone for SendRequest<B> {
+ fn clone(&self) -> SendRequest<B> {
+ SendRequest {
+ dispatch: self.dispatch.clone(),
+ }
+ }
+}
+
+/// A future that processes all HTTP state for the IO object.
+///
+/// In most cases, this should just be spawned into an executor, so that it
+/// can process incoming and outgoing messages, notice hangups, and the like.
+///
+/// Instances of this type are typically created via the [`handshake`] function
+#[must_use = "futures do nothing unless polled"]
+pub struct Connection<T, B, E>
+where
+ T: Read + Write + Unpin,
+ B: Body + 'static,
+ E: Http2ClientConnExec<B, T> + Unpin,
+ B::Error: Into<Box<dyn Error + Send + Sync>>,
+{
+ inner: (PhantomData<T>, proto::h2::ClientTask<B, E, T>),
+}
+
+/// A builder to configure an HTTP connection.
+///
+/// After setting options, the builder is used to create a handshake future.
+///
+/// **Note**: The default values of options are *not considered stable*. They
+/// are subject to change at any time.
+#[derive(Clone, Debug)]
+pub struct Builder<Ex> {
+ pub(super) exec: Ex,
+ pub(super) timer: Time,
+ h2_builder: proto::h2::client::Config,
+}
+
+/// Returns a handshake future over some IO.
+///
+/// This is a shortcut for `Builder::new(exec).handshake(io)`.
+/// See [`client::conn`](crate::client::conn) for more.
+pub async fn handshake<E, T, B>(
+ exec: E,
+ io: T,
+) -> crate::Result<(SendRequest<B>, Connection<T, B, E>)>
+where
+ T: Read + Write + Unpin,
+ B: Body + 'static,
+ B::Data: Send,
+ B::Error: Into<Box<dyn Error + Send + Sync>>,
+ E: Http2ClientConnExec<B, T> + Unpin + Clone,
+{
+ Builder::new(exec).handshake(io).await
+}
+
+// ===== impl SendRequest
+
+impl<B> SendRequest<B> {
+ /// Polls to determine whether this sender can be used yet for a request.
+ ///
+ /// If the associated connection is closed, this returns an Error.
+ pub fn poll_ready(&mut self, _cx: &mut Context<'_>) -> Poll<crate::Result<()>> {
+ if self.is_closed() {
+ Poll::Ready(Err(crate::Error::new_closed()))
+ } else {
+ Poll::Ready(Ok(()))
+ }
+ }
+
+ /// Waits until the dispatcher is ready
+ ///
+ /// If the associated connection is closed, this returns an Error.
+ pub async fn ready(&mut self) -> crate::Result<()> {
+ futures_util::future::poll_fn(|cx| self.poll_ready(cx)).await
+ }
+
+ /// Checks if the connection is currently ready to send a request.
+ ///
+ /// # Note
+ ///
+ /// This is mostly a hint. Due to inherent latency of networks, it is
+ /// possible that even after checking this is ready, sending a request
+ /// may still fail because the connection was closed in the meantime.
+ pub fn is_ready(&self) -> bool {
+ self.dispatch.is_ready()
+ }
+
+ /// Checks if the connection side has been closed.
+ pub fn is_closed(&self) -> bool {
+ self.dispatch.is_closed()
+ }
+}
+
+impl<B> SendRequest<B>
+where
+ B: Body + 'static,
+{
+ /// Sends a `Request` on the associated connection.
+ ///
+ /// Returns a future that if successful, yields the `Response`.
+ ///
+ /// `req` must have a `Host` header.
+ ///
+ /// Absolute-form `Uri`s are not required. If received, they will be serialized
+ /// as-is.
+ pub fn send_request(
+ &mut self,
+ req: Request<B>,
+ ) -> impl Future<Output = crate::Result<Response<IncomingBody>>> {
+ let sent = self.dispatch.send(req);
+
+ async move {
+ match sent {
+ Ok(rx) => match rx.await {
+ Ok(Ok(resp)) => Ok(resp),
+ Ok(Err(err)) => Err(err),
+ // this is definite bug if it happens, but it shouldn't happen!
+ Err(_canceled) => panic!("dispatch dropped without returning error"),
+ },
+ Err(_req) => {
+ debug!("connection was not ready");
+
+ Err(crate::Error::new_canceled().with("connection was not ready"))
+ }
+ }
+ }
+ }
+
+ /// Sends a `Request` on the associated connection.
+ ///
+ /// Returns a future that if successful, yields the `Response`.
+ ///
+ /// # Error
+ ///
+ /// If there was an error before trying to serialize the request to the
+ /// connection, the message will be returned as part of this error.
+ pub fn try_send_request(
+ &mut self,
+ req: Request<B>,
+ ) -> impl Future<Output = Result<Response<IncomingBody>, TrySendError<Request<B>>>> {
+ let sent = self.dispatch.try_send(req);
+ async move {
+ match sent {
+ Ok(rx) => match rx.await {
+ Ok(Ok(res)) => Ok(res),
+ Ok(Err(err)) => Err(err),
+ // this is definite bug if it happens, but it shouldn't happen!
+ Err(_) => panic!("dispatch dropped without returning error"),
+ },
+ Err(req) => {
+ debug!("connection was not ready");
+ let error = crate::Error::new_canceled().with("connection was not ready");
+ Err(TrySendError {
+ error,
+ message: Some(req),
+ })
+ }
+ }
+ }
+ }
+}
+
+impl<B> fmt::Debug for SendRequest<B> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_struct("SendRequest").finish()
+ }
+}
+
+// ===== impl Connection
+
+impl<T, B, E> Connection<T, B, E>
+where
+ T: Read + Write + Unpin + 'static,
+ B: Body + Unpin + 'static,
+ B::Data: Send,
+ B::Error: Into<Box<dyn Error + Send + Sync>>,
+ E: Http2ClientConnExec<B, T> + Unpin,
+{
+ /// Returns whether the [extended CONNECT protocol][1] is enabled or not.
+ ///
+ /// This setting is configured by the server peer by sending the
+ /// [`SETTINGS_ENABLE_CONNECT_PROTOCOL` parameter][2] in a `SETTINGS` frame.
+ /// This method returns the currently acknowledged value received from the
+ /// remote.
+ ///
+ /// [1]: https://datatracker.ietf.org/doc/html/rfc8441#section-4
+ /// [2]: https://datatracker.ietf.org/doc/html/rfc8441#section-3
+ pub fn is_extended_connect_protocol_enabled(&self) -> bool {
+ self.inner.1.is_extended_connect_protocol_enabled()
+ }
+}
+
+impl<T, B, E> fmt::Debug for Connection<T, B, E>
+where
+ T: Read + Write + fmt::Debug + 'static + Unpin,
+ B: Body + 'static,
+ E: Http2ClientConnExec<B, T> + Unpin,
+ B::Error: Into<Box<dyn Error + Send + Sync>>,
+{
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_struct("Connection").finish()
+ }
+}
+
+impl<T, B, E> Future for Connection<T, B, E>
+where
+ T: Read + Write + Unpin + 'static,
+ B: Body + 'static + Unpin,
+ B::Data: Send,
+ E: Unpin,
+ B::Error: Into<Box<dyn Error + Send + Sync>>,
+ E: Http2ClientConnExec<B, T> + Unpin,
+{
+ type Output = crate::Result<()>;
+
+ fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
+ match ready!(Pin::new(&mut self.inner.1).poll(cx))? {
+ proto::Dispatched::Shutdown => Poll::Ready(Ok(())),
+ #[cfg(feature = "http1")]
+ proto::Dispatched::Upgrade(_pending) => unreachable!("http2 cannot upgrade"),
+ }
+ }
+}
+
+// ===== impl Builder
+
+impl<Ex> Builder<Ex>
+where
+ Ex: Clone,
+{
+ /// Creates a new connection builder.
+ #[inline]
+ pub fn new(exec: Ex) -> Builder<Ex> {
+ Builder {
+ exec,
+ timer: Time::Empty,
+ h2_builder: Default::default(),
+ }
+ }
+
+ /// Provide a timer to execute background HTTP2 tasks.
+ pub fn timer<M>(&mut self, timer: M) -> &mut Builder<Ex>
+ where
+ M: Timer + Send + Sync + 'static,
+ {
+ self.timer = Time::Timer(Arc::new(timer));
+ self
+ }
+
+ /// Sets the [`SETTINGS_INITIAL_WINDOW_SIZE`][spec] option for HTTP2
+ /// stream-level flow control.
+ ///
+ /// Passing `None` will do nothing.
+ ///
+ /// If not set, hyper will use a default.
+ ///
+ /// [spec]: https://httpwg.org/specs/rfc9113.html#SETTINGS_INITIAL_WINDOW_SIZE
+ pub fn initial_stream_window_size(&mut self, sz: impl Into<Option<u32>>) -> &mut Self {
+ if let Some(sz) = sz.into() {
+ self.h2_builder.adaptive_window = false;
+ self.h2_builder.initial_stream_window_size = sz;
+ }
+ self
+ }
+
+ /// Sets the max connection-level flow control for HTTP2
+ ///
+ /// Passing `None` will do nothing.
+ ///
+ /// If not set, hyper will use a default.
+ pub fn initial_connection_window_size(&mut self, sz: impl Into<Option<u32>>) -> &mut Self {
+ if let Some(sz) = sz.into() {
+ self.h2_builder.adaptive_window = false;
+ self.h2_builder.initial_conn_window_size = sz;
+ }
+ self
+ }
+
+ /// Sets the initial maximum of locally initiated (send) streams.
+ ///
+ /// This value will be overwritten by the value included in the initial
+ /// SETTINGS frame received from the peer as part of a [connection preface].
+ ///
+ /// Passing `None` will do nothing.
+ ///
+ /// If not set, hyper will use a default.
+ ///
+ /// [connection preface]: https://httpwg.org/specs/rfc9113.html#preface
+ pub fn initial_max_send_streams(&mut self, initial: impl Into<Option<usize>>) -> &mut Self {
+ if let Some(initial) = initial.into() {
+ self.h2_builder.initial_max_send_streams = initial;
+ }
+ self
+ }
+
+ /// Sets whether to use an adaptive flow control.
+ ///
+ /// Enabling this will override the limits set in
+ /// `initial_stream_window_size` and
+ /// `initial_connection_window_size`.
+ pub fn adaptive_window(&mut self, enabled: bool) -> &mut Self {
+ use proto::h2::SPEC_WINDOW_SIZE;
+
+ self.h2_builder.adaptive_window = enabled;
+ if enabled {
+ self.h2_builder.initial_conn_window_size = SPEC_WINDOW_SIZE;
+ self.h2_builder.initial_stream_window_size = SPEC_WINDOW_SIZE;
+ }
+ self
+ }
+
+ /// Sets the maximum frame size to use for HTTP2.
+ ///
+ /// Default is currently 16KB, but can change.
+ pub fn max_frame_size(&mut self, sz: impl Into<Option<u32>>) -> &mut Self {
+ self.h2_builder.max_frame_size = sz.into();
+ self
+ }
+
+ /// Sets the max size of received header frames.
+ ///
+ /// Default is currently 16KB, but can change.
+ pub fn max_header_list_size(&mut self, max: u32) -> &mut Self {
+ self.h2_builder.max_header_list_size = max;
+ self
+ }
+
+ /// Sets the header table size.
+ ///
+ /// This setting informs the peer of the maximum size of the header compression
+ /// table used to encode header blocks, in octets. The encoder may select any value
+ /// equal to or less than the header table size specified by the sender.
+ ///
+ /// The default value of crate `h2` is 4,096.
+ pub fn header_table_size(&mut self, size: impl Into<Option<u32>>) -> &mut Self {
+ self.h2_builder.header_table_size = size.into();
+ self
+ }
+
+ /// Sets the maximum number of concurrent streams.
+ ///
+ /// The maximum concurrent streams setting only controls the maximum number
+ /// of streams that can be initiated by the remote peer. In other words,
+ /// when this setting is set to 100, this does not limit the number of
+ /// concurrent streams that can be created by the caller.
+ ///
+ /// It is recommended that this value be no smaller than 100, so as to not
+ /// unnecessarily limit parallelism. However, any value is legal, including
+ /// 0. If `max` is set to 0, then the remote will not be permitted to
+ /// initiate streams.
+ ///
+ /// Note that streams in the reserved state, i.e., push promises that have
+ /// been reserved but the stream has not started, do not count against this
+ /// setting.
+ ///
+ /// Also note that if the remote *does* exceed the value set here, it is not
+ /// a protocol level error. Instead, the `h2` library will immediately reset
+ /// the stream.
+ ///
+ /// See [Section 5.1.2] in the HTTP/2 spec for more details.
+ ///
+ /// [Section 5.1.2]: https://http2.github.io/http2-spec/#rfc.section.5.1.2
+ pub fn max_concurrent_streams(&mut self, max: impl Into<Option<u32>>) -> &mut Self {
+ self.h2_builder.max_concurrent_streams = max.into();
+ self
+ }
+
+ /// Sets an interval for HTTP2 Ping frames should be sent to keep a
+ /// connection alive.
+ ///
+ /// Pass `None` to disable HTTP2 keep-alive.
+ ///
+ /// Default is currently disabled.
+ pub fn keep_alive_interval(&mut self, interval: impl Into<Option<Duration>>) -> &mut Self {
+ self.h2_builder.keep_alive_interval = interval.into();
+ self
+ }
+
+ /// Sets a timeout for receiving an acknowledgement of the keep-alive ping.
+ ///
+ /// If the ping is not acknowledged within the timeout, the connection will
+ /// be closed. Does nothing if `keep_alive_interval` is disabled.
+ ///
+ /// Default is 20 seconds.
+ pub fn keep_alive_timeout(&mut self, timeout: Duration) -> &mut Self {
+ self.h2_builder.keep_alive_timeout = timeout;
+ self
+ }
+
+ /// Sets whether HTTP2 keep-alive should apply while the connection is idle.
+ ///
+ /// If disabled, keep-alive pings are only sent while there are open
+ /// request/responses streams. If enabled, pings are also sent when no
+ /// streams are active. Does nothing if `keep_alive_interval` is
+ /// disabled.
+ ///
+ /// Default is `false`.
+ pub fn keep_alive_while_idle(&mut self, enabled: bool) -> &mut Self {
+ self.h2_builder.keep_alive_while_idle = enabled;
+ self
+ }
+
+ /// Sets the maximum number of HTTP2 concurrent locally reset streams.
+ ///
+ /// See the documentation of [`h2::client::Builder::max_concurrent_reset_streams`] for more
+ /// details.
+ ///
+ /// The default value is determined by the `h2` crate.
+ ///
+ /// [`h2::client::Builder::max_concurrent_reset_streams`]: https://docs.rs/h2/client/struct.Builder.html#method.max_concurrent_reset_streams
+ pub fn max_concurrent_reset_streams(&mut self, max: usize) -> &mut Self {
+ self.h2_builder.max_concurrent_reset_streams = Some(max);
+ self
+ }
+
+ /// Set the maximum write buffer size for each HTTP/2 stream.
+ ///
+ /// Default is currently 1MB, but may change.
+ ///
+ /// # Panics
+ ///
+ /// The value must be no larger than `u32::MAX`.
+ pub fn max_send_buf_size(&mut self, max: usize) -> &mut Self {
+ assert!(max <= u32::MAX as usize);
+ self.h2_builder.max_send_buffer_size = max;
+ self
+ }
+
+ /// Configures the maximum number of pending reset streams allowed before a GOAWAY will be sent.
+ ///
+ /// This will default to the default value set by the [`h2` crate](https://crates.io/crates/h2).
+ /// As of v0.4.0, it is 20.
+ ///
+ /// See <https://github.com/hyperium/hyper/issues/2877> for more information.
+ pub fn max_pending_accept_reset_streams(&mut self, max: impl Into<Option<usize>>) -> &mut Self {
+ self.h2_builder.max_pending_accept_reset_streams = max.into();
+ self
+ }
+
+ /// Constructs a connection with the configured options and IO.
+ /// See [`client::conn`](crate::client::conn) for more.
+ ///
+ /// Note, if [`Connection`] is not `await`-ed, [`SendRequest`] will
+ /// do nothing.
+ pub fn handshake<T, B>(
+ &self,
+ io: T,
+ ) -> impl Future<Output = crate::Result<(SendRequest<B>, Connection<T, B, Ex>)>>
+ where
+ T: Read + Write + Unpin,
+ B: Body + 'static,
+ B::Data: Send,
+ B::Error: Into<Box<dyn Error + Send + Sync>>,
+ Ex: Http2ClientConnExec<B, T> + Unpin,
+ {
+ let opts = self.clone();
+
+ async move {
+ trace!("client handshake HTTP/2");
+
+ let (tx, rx) = dispatch::channel();
+ let h2 = proto::h2::client::handshake(io, rx, &opts.h2_builder, opts.exec, opts.timer)
+ .await?;
+ Ok((
+ SendRequest {
+ dispatch: tx.unbound(),
+ },
+ Connection {
+ inner: (PhantomData, h2),
+ },
+ ))
+ }
+ }
+}
+
+#[cfg(test)]
+mod tests {
+
+ #[tokio::test]
+ #[ignore] // only compilation is checked
+ async fn send_sync_executor_of_non_send_futures() {
+ #[derive(Clone)]
+ struct LocalTokioExecutor;
+
+ impl<F> crate::rt::Executor<F> for LocalTokioExecutor
+ where
+ F: std::future::Future + 'static, // not requiring `Send`
+ {
+ fn execute(&self, fut: F) {
+ // This will spawn into the currently running `LocalSet`.
+ tokio::task::spawn_local(fut);
+ }
+ }
+
+ #[allow(unused)]
+ async fn run(io: impl crate::rt::Read + crate::rt::Write + Unpin + 'static) {
+ let (_sender, conn) = crate::client::conn::http2::handshake::<
+ _,
+ _,
+ http_body_util::Empty<bytes::Bytes>,
+ >(LocalTokioExecutor, io)
+ .await
+ .unwrap();
+
+ tokio::task::spawn_local(async move {
+ conn.await.unwrap();
+ });
+ }
+ }
+
+ #[tokio::test]
+ #[ignore] // only compilation is checked
+ async fn not_send_not_sync_executor_of_not_send_futures() {
+ #[derive(Clone)]
+ struct LocalTokioExecutor {
+ _x: std::marker::PhantomData<std::rc::Rc<()>>,
+ }
+
+ impl<F> crate::rt::Executor<F> for LocalTokioExecutor
+ where
+ F: std::future::Future + 'static, // not requiring `Send`
+ {
+ fn execute(&self, fut: F) {
+ // This will spawn into the currently running `LocalSet`.
+ tokio::task::spawn_local(fut);
+ }
+ }
+
+ #[allow(unused)]
+ async fn run(io: impl crate::rt::Read + crate::rt::Write + Unpin + 'static) {
+ let (_sender, conn) =
+ crate::client::conn::http2::handshake::<_, _, http_body_util::Empty<bytes::Bytes>>(
+ LocalTokioExecutor {
+ _x: Default::default(),
+ },
+ io,
+ )
+ .await
+ .unwrap();
+
+ tokio::task::spawn_local(async move {
+ conn.await.unwrap();
+ });
+ }
+ }
+
+ #[tokio::test]
+ #[ignore] // only compilation is checked
+ async fn send_not_sync_executor_of_not_send_futures() {
+ #[derive(Clone)]
+ struct LocalTokioExecutor {
+ _x: std::marker::PhantomData<std::cell::Cell<()>>,
+ }
+
+ impl<F> crate::rt::Executor<F> for LocalTokioExecutor
+ where
+ F: std::future::Future + 'static, // not requiring `Send`
+ {
+ fn execute(&self, fut: F) {
+ // This will spawn into the currently running `LocalSet`.
+ tokio::task::spawn_local(fut);
+ }
+ }
+
+ #[allow(unused)]
+ async fn run(io: impl crate::rt::Read + crate::rt::Write + Unpin + 'static) {
+ let (_sender, conn) =
+ crate::client::conn::http2::handshake::<_, _, http_body_util::Empty<bytes::Bytes>>(
+ LocalTokioExecutor {
+ _x: Default::default(),
+ },
+ io,
+ )
+ .await
+ .unwrap();
+
+ tokio::task::spawn_local(async move {
+ conn.await.unwrap();
+ });
+ }
+ }
+
+ #[tokio::test]
+ #[ignore] // only compilation is checked
+ async fn send_sync_executor_of_send_futures() {
+ #[derive(Clone)]
+ struct TokioExecutor;
+
+ impl<F> crate::rt::Executor<F> for TokioExecutor
+ where
+ F: std::future::Future + 'static + Send,
+ F::Output: Send + 'static,
+ {
+ fn execute(&self, fut: F) {
+ tokio::task::spawn(fut);
+ }
+ }
+
+ #[allow(unused)]
+ async fn run(io: impl crate::rt::Read + crate::rt::Write + Send + Unpin + 'static) {
+ let (_sender, conn) = crate::client::conn::http2::handshake::<
+ _,
+ _,
+ http_body_util::Empty<bytes::Bytes>,
+ >(TokioExecutor, io)
+ .await
+ .unwrap();
+
+ tokio::task::spawn(async move {
+ conn.await.unwrap();
+ });
+ }
+ }
+
+ #[tokio::test]
+ #[ignore] // only compilation is checked
+ async fn not_send_not_sync_executor_of_send_futures() {
+ #[derive(Clone)]
+ struct TokioExecutor {
+ // !Send, !Sync
+ _x: std::marker::PhantomData<std::rc::Rc<()>>,
+ }
+
+ impl<F> crate::rt::Executor<F> for TokioExecutor
+ where
+ F: std::future::Future + 'static + Send,
+ F::Output: Send + 'static,
+ {
+ fn execute(&self, fut: F) {
+ tokio::task::spawn(fut);
+ }
+ }
+
+ #[allow(unused)]
+ async fn run(io: impl crate::rt::Read + crate::rt::Write + Send + Unpin + 'static) {
+ let (_sender, conn) =
+ crate::client::conn::http2::handshake::<_, _, http_body_util::Empty<bytes::Bytes>>(
+ TokioExecutor {
+ _x: Default::default(),
+ },
+ io,
+ )
+ .await
+ .unwrap();
+
+ tokio::task::spawn_local(async move {
+ // can't use spawn here because when executor is !Send
+ conn.await.unwrap();
+ });
+ }
+ }
+
+ #[tokio::test]
+ #[ignore] // only compilation is checked
+ async fn send_not_sync_executor_of_send_futures() {
+ #[derive(Clone)]
+ struct TokioExecutor {
+ // !Sync
+ _x: std::marker::PhantomData<std::cell::Cell<()>>,
+ }
+
+ impl<F> crate::rt::Executor<F> for TokioExecutor
+ where
+ F: std::future::Future + 'static + Send,
+ F::Output: Send + 'static,
+ {
+ fn execute(&self, fut: F) {
+ tokio::task::spawn(fut);
+ }
+ }
+
+ #[allow(unused)]
+ async fn run(io: impl crate::rt::Read + crate::rt::Write + Send + Unpin + 'static) {
+ let (_sender, conn) =
+ crate::client::conn::http2::handshake::<_, _, http_body_util::Empty<bytes::Bytes>>(
+ TokioExecutor {
+ _x: Default::default(),
+ },
+ io,
+ )
+ .await
+ .unwrap();
+
+ tokio::task::spawn_local(async move {
+ // can't use spawn here because when executor is !Send
+ conn.await.unwrap();
+ });
+ }
+ }
+}
diff --git a/vendor/hyper/src/client/conn/mod.rs b/vendor/hyper/src/client/conn/mod.rs
new file mode 100644
index 00000000..f982ae6d
--- /dev/null
+++ b/vendor/hyper/src/client/conn/mod.rs
@@ -0,0 +1,22 @@
+//! Lower-level client connection API.
+//!
+//! The types in this module are to provide a lower-level API based around a
+//! single connection. Connecting to a host, pooling connections, and the like
+//! are not handled at this level. This module provides the building blocks to
+//! customize those things externally.
+//!
+//! If you are looking for a convenient HTTP client, then you may wish to
+//! consider [reqwest](https://github.com/seanmonstar/reqwest) for a high level
+//! client or [`hyper-util`'s client](https://docs.rs/hyper-util/latest/hyper_util/client/index.html)
+//! if you want to keep it more low level / basic.
+//!
+//! ## Example
+//!
+//! See the [client guide](https://hyper.rs/guides/1/client/basic/).
+
+#[cfg(feature = "http1")]
+pub mod http1;
+#[cfg(feature = "http2")]
+pub mod http2;
+
+pub use super::dispatch::TrySendError;