//! HTTP/1 client connections use std::error::Error as StdError; use std::fmt; use std::future::Future; use std::marker::Unpin; use std::pin::Pin; use std::task::{Context, Poll}; use bytes::Bytes; use http::{Request, Response}; use httparse::ParserConfig; use tokio::io::{AsyncRead, AsyncWrite}; use super::super::dispatch; use crate::body::{Body as IncomingBody, HttpBody as Body}; use crate::proto; use crate::upgrade::Upgraded; type Dispatcher = proto::dispatch::Dispatcher, B, T, proto::h1::ClientTransaction>; /// The sender side of an established connection. pub struct SendRequest { dispatch: dispatch::Sender, Response>, } /// Deconstructed parts of a `Connection`. /// /// This allows taking apart a `Connection` at a later time, in order to /// reclaim the IO object, and additional related pieces. #[derive(Debug)] pub struct Parts { /// The original IO object used in the handshake. pub io: T, /// A buffer of bytes that have been read but not processed as HTTP. /// /// For instance, if the `Connection` is used for an HTTP upgrade request, /// it is possible the server sent back the first bytes of the new protocol /// along with the response upgrade. /// /// You will want to check for any existing bytes if you plan to continue /// communicating on the IO object. pub read_buf: Bytes, _inner: (), } /// A future that processes all HTTP state for the IO object. /// /// In most cases, this should just be spawned into an executor, so that it /// can process incoming and outgoing messages, notice hangups, and the like. #[must_use = "futures do nothing unless polled"] pub struct Connection where T: AsyncRead + AsyncWrite + Send + 'static, B: Body + 'static, { inner: Option>, } impl Connection where T: AsyncRead + AsyncWrite + Send + Unpin + 'static, B: Body + 'static, B::Error: Into>, { /// Return the inner IO object, and additional information. /// /// Only works for HTTP/1 connections. HTTP/2 connections will panic. pub fn into_parts(self) -> Parts { let (io, read_buf, _) = self.inner.expect("already upgraded").into_inner(); Parts { io, read_buf, _inner: (), } } /// Poll the connection for completion, but without calling `shutdown` /// on the underlying IO. /// /// This is useful to allow running a connection while doing an HTTP /// upgrade. Once the upgrade is completed, the connection would be "done", /// but it is not desired to actually shutdown the IO object. Instead you /// would take it back using `into_parts`. /// /// Use [`poll_fn`](https://docs.rs/futures/0.1.25/futures/future/fn.poll_fn.html) /// and [`try_ready!`](https://docs.rs/futures/0.1.25/futures/macro.try_ready.html) /// to work with this function; or use the `without_shutdown` wrapper. pub fn poll_without_shutdown(&mut self, cx: &mut Context<'_>) -> Poll> { self.inner .as_mut() .expect("algready upgraded") .poll_without_shutdown(cx) } /// Prevent shutdown of the underlying IO object at the end of service the request, /// instead run `into_parts`. This is a convenience wrapper over `poll_without_shutdown`. pub fn without_shutdown(self) -> impl Future>> { let mut conn = Some(self); futures_util::future::poll_fn(move |cx| -> Poll>> { ready!(conn.as_mut().unwrap().poll_without_shutdown(cx))?; Poll::Ready(Ok(conn.take().unwrap().into_parts())) }) } } /// A builder to configure an HTTP connection. /// /// After setting options, the builder is used to create a handshake future. #[derive(Clone, Debug)] pub struct Builder { h09_responses: bool, h1_parser_config: ParserConfig, h1_writev: Option, h1_title_case_headers: bool, h1_preserve_header_case: bool, #[cfg(feature = "ffi")] h1_preserve_header_order: bool, h1_read_buf_exact_size: Option, h1_max_buf_size: Option, } /// Returns a handshake future over some IO. /// /// This is a shortcut for `Builder::new().handshake(io)`. /// See [`client::conn`](crate::client::conn) for more. pub async fn handshake(io: T) -> crate::Result<(SendRequest, Connection)> where T: AsyncRead + AsyncWrite + Unpin + Send + 'static, B: Body + 'static, B::Data: Send, B::Error: Into>, { Builder::new().handshake(io).await } // ===== impl SendRequest impl SendRequest { /// Polls to determine whether this sender can be used yet for a request. /// /// If the associated connection is closed, this returns an Error. pub fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll> { self.dispatch.poll_ready(cx) } /// Waits until the dispatcher is ready /// /// If the associated connection is closed, this returns an Error. pub async fn ready(&mut self) -> crate::Result<()> { futures_util::future::poll_fn(|cx| self.poll_ready(cx)).await } /* pub(super) async fn when_ready(self) -> crate::Result { let mut me = Some(self); future::poll_fn(move |cx| { ready!(me.as_mut().unwrap().poll_ready(cx))?; Poll::Ready(Ok(me.take().unwrap())) }) .await } pub(super) fn is_ready(&self) -> bool { self.dispatch.is_ready() } pub(super) fn is_closed(&self) -> bool { self.dispatch.is_closed() } */ } impl SendRequest where B: Body + 'static, { /// Sends a `Request` on the associated connection. /// /// Returns a future that if successful, yields the `Response`. /// /// # Note /// /// There are some key differences in what automatic things the `Client` /// does for you that will not be done here: /// /// - `Client` requires absolute-form `Uri`s, since the scheme and /// authority are needed to connect. They aren't required here. /// - Since the `Client` requires absolute-form `Uri`s, it can add /// the `Host` header based on it. You must add a `Host` header yourself /// before calling this method. /// - Since absolute-form `Uri`s are not required, if received, they will /// be serialized as-is. pub fn send_request( &mut self, req: Request, ) -> impl Future>> { let sent = self.dispatch.send(req); async move { match sent { Ok(rx) => match rx.await { Ok(Ok(resp)) => Ok(resp), Ok(Err(err)) => Err(err), // this is definite bug if it happens, but it shouldn't happen! Err(_canceled) => panic!("dispatch dropped without returning error"), }, Err(_req) => { tracing::debug!("connection was not ready"); Err(crate::Error::new_canceled().with("connection was not ready")) } } } } /* pub(super) fn send_request_retryable( &mut self, req: Request, ) -> impl Future, (crate::Error, Option>)>> + Unpin where B: Send, { match self.dispatch.try_send(req) { Ok(rx) => { Either::Left(rx.then(move |res| { match res { Ok(Ok(res)) => future::ok(res), Ok(Err(err)) => future::err(err), // this is definite bug if it happens, but it shouldn't happen! Err(_) => panic!("dispatch dropped without returning error"), } })) } Err(req) => { tracing::debug!("connection was not ready"); let err = crate::Error::new_canceled().with("connection was not ready"); Either::Right(future::err((err, Some(req)))) } } } */ } impl fmt::Debug for SendRequest { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("SendRequest").finish() } } // ===== impl Connection impl fmt::Debug for Connection where T: AsyncRead + AsyncWrite + fmt::Debug + Send + 'static, B: Body + 'static, { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("Connection").finish() } } impl Future for Connection where T: AsyncRead + AsyncWrite + Unpin + Send + 'static, B: Body + Send + 'static, B::Data: Send, B::Error: Into>, { type Output = crate::Result<()>; fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { match ready!(Pin::new(self.inner.as_mut().unwrap()).poll(cx))? { proto::Dispatched::Shutdown => Poll::Ready(Ok(())), proto::Dispatched::Upgrade(pending) => match self.inner.take() { Some(h1) => { let (io, buf, _) = h1.into_inner(); pending.fulfill(Upgraded::new(io, buf)); Poll::Ready(Ok(())) } _ => { drop(pending); unreachable!("Upgraded twice"); } }, } } } // ===== impl Builder impl Builder { /// Creates a new connection builder. #[inline] pub fn new() -> Builder { Builder { h09_responses: false, h1_writev: None, h1_read_buf_exact_size: None, h1_parser_config: Default::default(), h1_title_case_headers: false, h1_preserve_header_case: false, #[cfg(feature = "ffi")] h1_preserve_header_order: false, h1_max_buf_size: None, } } /// Set whether HTTP/0.9 responses should be tolerated. /// /// Default is false. pub fn http09_responses(&mut self, enabled: bool) -> &mut Builder { self.h09_responses = enabled; self } /// Set whether HTTP/1 connections will accept spaces between header names /// and the colon that follow them in responses. /// /// You probably don't need this, here is what [RFC 7230 Section 3.2.4.] has /// to say about it: /// /// > No whitespace is allowed between the header field-name and colon. In /// > the past, differences in the handling of such whitespace have led to /// > security vulnerabilities in request routing and response handling. A /// > server MUST reject any received request message that contains /// > whitespace between a header field-name and colon with a response code /// > of 400 (Bad Request). A proxy MUST remove any such whitespace from a /// > response message before forwarding the message downstream. /// /// Note that this setting does not affect HTTP/2. /// /// Default is false. /// /// [RFC 7230 Section 3.2.4.]: https://tools.ietf.org/html/rfc7230#section-3.2.4 pub fn allow_spaces_after_header_name_in_responses(&mut self, enabled: bool) -> &mut Builder { self.h1_parser_config .allow_spaces_after_header_name_in_responses(enabled); self } /// Set whether HTTP/1 connections will accept obsolete line folding for /// header values. /// /// Newline codepoints (`\r` and `\n`) will be transformed to spaces when /// parsing. /// /// You probably don't need this, here is what [RFC 7230 Section 3.2.4.] has /// to say about it: /// /// > A server that receives an obs-fold in a request message that is not /// > within a message/http container MUST either reject the message by /// > sending a 400 (Bad Request), preferably with a representation /// > explaining that obsolete line folding is unacceptable, or replace /// > each received obs-fold with one or more SP octets prior to /// > interpreting the field value or forwarding the message downstream. /// /// > A proxy or gateway that receives an obs-fold in a response message /// > that is not within a message/http container MUST either discard the /// > message and replace it with a 502 (Bad Gateway) response, preferably /// > with a representation explaining that unacceptable line folding was /// > received, or replace each received obs-fold with one or more SP /// > octets prior to interpreting the field value or forwarding the /// > message downstream. /// /// > A user agent that receives an obs-fold in a response message that is /// > not within a message/http container MUST replace each received /// > obs-fold with one or more SP octets prior to interpreting the field /// > value. /// /// Default is false. /// /// [RFC 7230 Section 3.2.4.]: https://tools.ietf.org/html/rfc7230#section-3.2.4 pub fn allow_obsolete_multiline_headers_in_responses(&mut self, enabled: bool) -> &mut Builder { self.h1_parser_config .allow_obsolete_multiline_headers_in_responses(enabled); self } /// Set whether HTTP/1 connections will silently ignored malformed header lines. /// /// If this is enabled and and a header line does not start with a valid header /// name, or does not include a colon at all, the line will be silently ignored /// and no error will be reported. /// /// Default is false. pub fn ignore_invalid_headers_in_responses(&mut self, enabled: bool) -> &mut Builder { self.h1_parser_config .ignore_invalid_headers_in_responses(enabled); self } /// Set whether HTTP/1 connections should try to use vectored writes, /// or always flatten into a single buffer. /// /// Note that setting this to false may mean more copies of body data, /// but may also improve performance when an IO transport doesn't /// support vectored writes well, such as most TLS implementations. /// /// Setting this to true will force hyper to use queued strategy /// which may eliminate unnecessary cloning on some TLS backends /// /// Default is `auto`. In this mode hyper will try to guess which /// mode to use pub fn writev(&mut self, enabled: bool) -> &mut Builder { self.h1_writev = Some(enabled); self } /// Set whether HTTP/1 connections will write header names as title case at /// the socket level. /// /// Default is false. pub fn title_case_headers(&mut self, enabled: bool) -> &mut Builder { self.h1_title_case_headers = enabled; self } /// Set whether to support preserving original header cases. /// /// Currently, this will record the original cases received, and store them /// in a private extension on the `Response`. It will also look for and use /// such an extension in any provided `Request`. /// /// Since the relevant extension is still private, there is no way to /// interact with the original cases. The only effect this can have now is /// to forward the cases in a proxy-like fashion. /// /// Default is false. pub fn preserve_header_case(&mut self, enabled: bool) -> &mut Builder { self.h1_preserve_header_case = enabled; self } /// Set whether to support preserving original header order. /// /// Currently, this will record the order in which headers are received, and store this /// ordering in a private extension on the `Response`. It will also look for and use /// such an extension in any provided `Request`. /// /// Default is false. #[cfg(feature = "ffi")] pub fn preserve_header_order(&mut self, enabled: bool) -> &mut Builder { self.h1_preserve_header_order = enabled; self } /// Sets the exact size of the read buffer to *always* use. /// /// Note that setting this option unsets the `max_buf_size` option. /// /// Default is an adaptive read buffer. pub fn read_buf_exact_size(&mut self, sz: Option) -> &mut Builder { self.h1_read_buf_exact_size = sz; self.h1_max_buf_size = None; self } /// Set the maximum buffer size for the connection. /// /// Default is ~400kb. /// /// Note that setting this option unsets the `read_exact_buf_size` option. /// /// # Panics /// /// The minimum value allowed is 8192. This method panics if the passed `max` is less than the minimum. pub fn max_buf_size(&mut self, max: usize) -> &mut Self { assert!( max >= proto::h1::MINIMUM_MAX_BUFFER_SIZE, "the max_buf_size cannot be smaller than the minimum that h1 specifies." ); self.h1_max_buf_size = Some(max); self.h1_read_buf_exact_size = None; self } /// Constructs a connection with the configured options and IO. /// See [`client::conn`](crate::client::conn) for more. /// /// Note, if [`Connection`] is not `await`-ed, [`SendRequest`] will /// do nothing. pub fn handshake( &self, io: T, ) -> impl Future, Connection)>> where T: AsyncRead + AsyncWrite + Unpin + Send + 'static, B: Body + 'static, B::Data: Send, B::Error: Into>, { let opts = self.clone(); async move { tracing::trace!("client handshake HTTP/1"); let (tx, rx) = dispatch::channel(); let mut conn = proto::Conn::new(io); conn.set_h1_parser_config(opts.h1_parser_config); if let Some(writev) = opts.h1_writev { if writev { conn.set_write_strategy_queue(); } else { conn.set_write_strategy_flatten(); } } if opts.h1_title_case_headers { conn.set_title_case_headers(); } if opts.h1_preserve_header_case { conn.set_preserve_header_case(); } #[cfg(feature = "ffi")] if opts.h1_preserve_header_order { conn.set_preserve_header_order(); } if opts.h09_responses { conn.set_h09_responses(); } if let Some(sz) = opts.h1_read_buf_exact_size { conn.set_read_buf_exact_size(sz); } if let Some(max) = opts.h1_max_buf_size { conn.set_max_buf_size(max); } let cd = proto::h1::dispatch::Client::new(rx); let proto = proto::h1::Dispatcher::new(cd, conn); Ok(( SendRequest { dispatch: tx }, Connection { inner: Some(proto) }, )) } } }