diff --git a/Cargo.lock b/Cargo.lock index c9f6c4699..17c35ae44 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -108,6 +108,15 @@ dependencies = [ "byte-tools", ] +[[package]] +name = "buf-list" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4851afb0c681f0bf27d675eff71da4d659ea32949363b048470e69b11fdcd02f" +dependencies = [ + "bytes", +] + [[package]] name = "bumpalo" version = "3.9.1" @@ -285,6 +294,7 @@ dependencies = [ "async-stream", "async-trait", "base64 0.20.0", + "buf-list", "bytes", "chrono", "dropshot_endpoint", @@ -303,6 +313,7 @@ dependencies = [ "paste", "pem", "percent-encoding", + "pin-project-lite", "proc-macro2", "rcgen", "rustls", @@ -981,9 +992,9 @@ dependencies = [ [[package]] name = "pin-project-lite" -version = "0.2.7" +version = "0.2.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8d31d11c69a6b52a174b42bdc0c30e5e11670f90788b2c471c31c1d17d449443" +checksum = "e0a7ae3ac2f1173085d398531c705756c94a4c56843785df85a60c1a0afac116" [[package]] name = "pin-utils" diff --git a/dropshot/Cargo.toml b/dropshot/Cargo.toml index 0028e4d66..e1f6ed59f 100644 --- a/dropshot/Cargo.toml +++ b/dropshot/Cargo.toml @@ -14,6 +14,7 @@ categories = ["network-programming", "web-programming::http-server"] async-stream = "0.3.3" async-trait = "0.1.60" base64 = "0.20.0" +buf-list = "1.0.0" bytes = "1" futures = "0.3.25" hostname = "0.3.0" @@ -21,6 +22,7 @@ http = "0.2.8" indexmap = "1.9.2" paste = "1.0.11" percent-encoding = "2.2.0" +pin-project-lite = "0.2.9" proc-macro2 = "1.0.49" rustls = "0.20.7" rustls-pemfile = "1.0.1" diff --git a/dropshot/src/handler.rs b/dropshot/src/handler.rs index c05abc3ab..3c23b7b2a 100644 --- a/dropshot/src/handler.rs +++ b/dropshot/src/handler.rs @@ -35,7 +35,6 @@ use super::error::HttpError; use super::http_util::http_extract_path_params; -use super::http_util::http_read_body; use super::http_util::CONTENT_TYPE_JSON; use super::http_util::CONTENT_TYPE_OCTET_STREAM; use super::server::DropshotState; @@ -52,14 +51,25 @@ use crate::router::VariableSet; use crate::to_map::to_map; use crate::websocket::WEBSOCKET_PARAM_SENTINEL; +use async_stream::stream; use async_trait::async_trait; +use buf_list::BufList; +use bytes::BufMut; use bytes::Bytes; +use bytes::BytesMut; use futures::lock::Mutex; +use futures::ready; +use futures::stream::BoxStream; +use futures::Stream; +use futures::StreamExt; +use futures::TryStreamExt; use http::HeaderMap; use http::StatusCode; +use hyper::body::HttpBody; use hyper::Body; use hyper::Request; use hyper::Response; +use pin_project_lite::pin_project; use schemars::schema::InstanceType; use schemars::schema::SchemaObject; use schemars::JsonSchema; @@ -74,7 +84,9 @@ use std::fmt::Result as FmtResult; use std::future::Future; use std::marker::PhantomData; use std::num::NonZeroU32; +use std::pin::Pin; use std::sync::Arc; +use std::task::Poll; /** * Type alias for the result returned by HTTP handler functions. @@ -971,13 +983,15 @@ where BodyType: JsonSchema + DeserializeOwned + Send + Sync, { let server = &rqctx.server; - let mut request = rqctx.request.lock().await; - let body = http_read_body( - request.body_mut(), - server.config.request_body_max_bytes, - ) + let body = UntypedBody { + request: rqctx.request.clone(), + max_bytes: server.config.request_body_max_bytes, + } + .into_bytes() .await?; + let request = rqctx.request.lock().await; + // RFC 7231 §3.1.1.1: media types are case insensitive and may // be followed by whitespace and/or a parameter (e.g., charset), // which we currently ignore. @@ -1065,42 +1079,126 @@ where } } -/* - * UntypedBody: body extractor for a plain array of bytes of a body. - */ - -/** - * `UntypedBody` is an extractor for reading in the contents of the HTTP request - * body and making the raw bytes directly available to the consumer. - */ +/// An extractor for raw bytes. +/// +/// `UntypedBody` is meant to read in the contents of an HTTP request body as a +/// series of raw bytes. Unlike [`TypedBody`], an `UntypedBody` represents a +/// read that hasn't happened yet; a method like +/// [`into_bytes`](Self::into_bytes) or [`into_stream`](Self::into_stream) must +/// be called to read the full body. #[derive(Debug)] pub struct UntypedBody { - content: Bytes, + request: Arc>>, + max_bytes: usize, } impl UntypedBody { - /** - * Returns a byte slice of the underlying body content. - */ - /* - * TODO drop this in favor of Deref? + Display and Debug for convenience? - */ - pub fn as_bytes(&self) -> &[u8] { - &self.content + /// Reads the body into a single, contiguous [`Bytes`] chunk, up to the + /// server's configured maximum body size. + /// + /// Recommended for smaller request bodies. Constructing a single `Bytes` + /// chunk from larger request bodies might cause excessive copying and + /// reallocations. + /// + /// # Errors + /// + /// Errors if there's an underlying HTTP error. Returns a "400 Bad Request" + /// error if the request body is too large. + pub async fn into_bytes(self) -> Result { + let mut stream = self.into_stream(); + let mut bytes = BytesMut::new(); + + while let Some(data) = stream.next().await { + bytes.put(data?); + } + + Ok(bytes.freeze()) } - /** - * Convenience wrapper to convert the body to a UTF-8 string slice, - * returning a 400-level error if the body is not valid UTF-8. - */ - pub fn as_str(&self) -> Result<&str, HttpError> { - std::str::from_utf8(self.as_bytes()).map_err(|e| { + /// Reads the body into a `String`, up to the server's configured maximum + /// body size. + /// + /// # Errors + /// + /// In addition to the errors returned by [`into_bytes`](Self::into_bytes), + /// returns a "400 Bad Request" error if the if the body is not valid UTF-8. + pub async fn into_string(self) -> Result { + let v = Vec::from(self.into_bytes().await?); + String::from_utf8(v).map_err(|e| { HttpError::for_bad_request( None, format!("failed to parse body as UTF-8 string: {}", e), ) }) } + + /// Reads the body into a [`BufList`], up to the server's configured maximum + /// body size. + /// + /// A `BufList` is a list of [`Bytes`] chunks that implements the + /// [`Buf`](bytes::Buf) trait. A `BufList` chunks can be operated on + /// as a unit. + /// + /// Recommended over [`into_bytes`](Self::into_bytes) or + /// [`into_string`](Self::into_string) for larger request bodies. Like those + /// functions, this function fails if the body exceeds the server's + /// configured maximum body size. + pub async fn into_buf_list(self) -> Result { + self.into_stream().try_collect().await + } + + /// Reads the body into a [`BufList`], with a custom limit for the maximum + /// body size. + /// + /// This method is similar to [`into_buf_list`](Self::into_buf_list), except + /// it specifies a custom limit. If this method is called, the default + /// [`request_body_max_bytes`](ServerConfig::request_body_max_bytes) limit + /// is ignored. + pub async fn into_buf_list_with_limit( + self, + max_bytes: usize, + ) -> Result { + self.into_stream().with_limit(max_bytes).try_collect().await + } + + /// Converts `self` into a [`Stream`] of `Result` chunks. + /// + /// By default, the stream is limited to the server's configured maximum + /// body size. To set a different maximum body size, call + /// [`with_limit`](UntypedBodyStream::with_limit) on the returned stream. + /// + /// # Errors + /// + /// The stream errors if there's an underlying HTTP error, or if the request + /// body exceeds the limit. + pub fn into_stream(self) -> UntypedBodyStream { + let max_bytes = self.max_bytes; + + let stream = stream! { + let mut request = self.request.lock().await; + let body = request.body_mut(); + + 'outer: { + while let Some(data) = body.data().await { + match data { + Ok(data) => yield Ok(data), + Err(e) => { + yield Err(HttpError::from(e)); + break 'outer; + } + } + } + + // Read the trailers even though we aren't going to do anything + // with them. + if let Err(e) = body.trailers().await { + yield Err(HttpError::from(e)); + } + } + }; + + UntypedBodyStream::new(stream, max_bytes) + } } #[async_trait] @@ -1108,14 +1206,10 @@ impl Extractor for UntypedBody { async fn from_request( rqctx: Arc>, ) -> Result { - let server = &rqctx.server; - let mut request = rqctx.request.lock().await; - let body_bytes = http_read_body( - request.body_mut(), - server.config.request_body_max_bytes, - ) - .await?; - Ok(UntypedBody { content: body_bytes }) + Ok(Self { + request: rqctx.request.clone(), + max_bytes: rqctx.server.config.request_body_max_bytes, + }) } fn metadata( @@ -1143,6 +1237,94 @@ impl Extractor for UntypedBody { } } +pin_project! { + /// A stream over an HTTP body. This stream that a limit on the number of + /// bytes that can be read from it. + pub struct UntypedBodyStream { + // TODO: replace with concrete type once TAIT is stabilized. + #[pin] + stream: BoxStream<'static, Result>, + max_bytes: usize, + current_bytes: usize, + } +} + +impl UntypedBodyStream { + pub(crate) fn new( + stream: impl Stream> + Send + 'static, + max_bytes: usize, + ) -> Self { + Self { stream: stream.boxed(), max_bytes, current_bytes: 0 } + } + + /// Returns the maximum number of bytes that can be read from this stream. + pub fn max_bytes(&self) -> usize { + self.max_bytes + } + + /// Returns the current number of bytes read from the stream. + pub fn current_bytes(&self) -> usize { + self.current_bytes + } + + /// Sets a new limit on the stream. + /// + /// [`Self::current_bytes`] does not change. + pub fn set_limit(&mut self, new_limit: usize) -> &mut Self { + self.max_bytes = new_limit; + self + } + + /// An owned version of [`set_limit`](Self::set_limit). + pub fn with_limit(mut self, new_limit: usize) -> Self { + self.set_limit(new_limit); + self + } +} + +impl Stream for UntypedBodyStream { + type Item = Result; + + fn poll_next( + mut self: Pin<&mut Self>, + cx: &mut std::task::Context<'_>, + ) -> Poll> { + let this = self.as_mut().project(); + let result = ready!(this.stream.poll_next(cx)); + let bytes = match result { + Some(Ok(bytes)) => bytes, + x @ None | x @ Some(Err(_)) => return Poll::Ready(x), + }; + + let max_bytes = *this.max_bytes; + + let is_too_large = + Arc::new(this.current_bytes.checked_add(bytes.len())) + .map_or(true, |x| x > max_bytes); + if is_too_large { + // The request was too large. Drain the rest of the stream. + while let Some(data) = + futures::ready!(self.as_mut().project().stream.poll_next(cx)) + { + if let Err(e) = data { + return Poll::Ready(Some(Err(e))); + } + } + return Poll::Ready(Some(Err(HttpError::for_bad_request( + None, + format!( + "request body exceeded maximum size of {} bytes", + max_bytes + ), + )))); + } + + *this.current_bytes += bytes.len(); + + Poll::Ready(Some(Ok(bytes))) + } +} + /* * Response Type Conversion * diff --git a/dropshot/src/http_util.rs b/dropshot/src/http_util.rs index c043ecb5a..d50885b81 100644 --- a/dropshot/src/http_util.rs +++ b/dropshot/src/http_util.rs @@ -3,9 +3,6 @@ * General-purpose HTTP-related facilities */ -use bytes::BufMut; -use bytes::Bytes; -use hyper::body::HttpBody; use serde::de::DeserializeOwned; use super::error::HttpError; @@ -23,93 +20,6 @@ pub const CONTENT_TYPE_NDJSON: &str = "application/x-ndjson"; /** MIME type for form/urlencoded data */ pub const CONTENT_TYPE_URL_ENCODED: &str = "application/x-www-form-urlencoded"; -/** - * Reads the rest of the body from the request up to the given number of bytes. - * If the body fits within the specified cap, a buffer is returned with all the - * bytes read. If not, an error is returned. - */ -pub async fn http_read_body( - body: &mut T, - cap: usize, -) -> Result -where - T: HttpBody + std::marker::Unpin, -{ - /* - * This looks a lot like the implementation of hyper::body::to_bytes(), but - * applies the requested cap. We've skipped the optimization for the - * 1-buffer case for now, as it seems likely this implementation will change - * anyway. - * TODO should this use some Stream interface instead? - * TODO why does this look so different in type signature (Data=Bytes, - * std::marker::Unpin, &mut T) - * TODO Error type shouldn't have to be hyper Error -- Into should - * work too? - * TODO do we need to use saturating_add() here? - */ - let mut parts = std::vec::Vec::new(); - let mut nbytesread: usize = 0; - while let Some(maybebuf) = body.data().await { - let buf = maybebuf?; - let bufsize = buf.len(); - - if nbytesread + bufsize > cap { - http_dump_body(body).await?; - // TODO-correctness check status code - return Err(HttpError::for_bad_request( - None, - format!("request body exceeded maximum size of {} bytes", cap), - )); - } - - nbytesread += bufsize; - parts.put(buf); - } - - /* - * Read the trailers as well, even though we're not going to do anything - * with them. - */ - body.trailers().await?; - /* - * TODO-correctness why does the is_end_stream() assertion fail and the next - * one panic? - */ - // assert!(body.is_end_stream()); - // assert!(body.data().await.is_none()); - // assert!(body.trailers().await?.is_none()); - Ok(parts.into()) -} - -/** - * Reads the rest of the body from the request, dropping all the bytes. This is - * useful after encountering error conditions. - */ -pub async fn http_dump_body(body: &mut T) -> Result -where - T: HttpBody + std::marker::Unpin, -{ - /* - * TODO should this use some Stream interface instead? - * TODO-hardening: does this actually cap the amount of data that will be - * read? What if the underlying implementation chooses to wait for a much - * larger number of bytes? - * TODO better understand pin_mut!() - * TODO do we need to use saturating_add() here? - */ - let mut nbytesread: usize = 0; - while let Some(maybebuf) = body.data().await { - let buf = maybebuf?; - nbytesread += buf.len(); - } - - /* - * TODO-correctness why does the is_end_stream() assertion fail? - */ - // assert!(body.is_end_stream()); - Ok(nbytesread) -} - /** * Given a set of variables (most immediately from a RequestContext, likely * generated by the HttpRouter when routing an incoming request), extract them diff --git a/dropshot/src/lib.rs b/dropshot/src/lib.rs index 12fd1fb62..66d014024 100644 --- a/dropshot/src/lib.rs +++ b/dropshot/src/lib.rs @@ -214,7 +214,7 @@ * [query_params: Query,] * [path_params: Path

,] * [body_param: TypedBody,] - * [body_param: UntypedBody,] + * [body_param: UntypedBody,] * ) -> Result * ``` * @@ -239,9 +239,9 @@ * * [`UntypedBody`] extracts the raw bytes of the request body. * * If the handler takes a `Query`, `Path

`, `TypedBody`, or - * `UntypedBody`, and the corresponding extraction cannot be completed, the - * request fails with status code 400 and an error message reflecting a - * validation error. + * `UntypedBody`, and the corresponding extraction cannot be + * completed, the request fails with status code 400 and an error message + * reflecting a validation error. * * As with any serde-deserializable type, you can make fields optional by having * the corresponding property of the type be an `Option`. Here's an example of @@ -657,6 +657,7 @@ pub use handler::Query; pub use handler::RequestContext; pub use handler::TypedBody; pub use handler::UntypedBody; +pub use handler::UntypedBodyStream; pub use http_util::CONTENT_TYPE_JSON; pub use http_util::CONTENT_TYPE_NDJSON; pub use http_util::CONTENT_TYPE_OCTET_STREAM; diff --git a/dropshot/tests/common/mod.rs b/dropshot/tests/common/mod.rs index 93e8951d8..bda67bf8a 100644 --- a/dropshot/tests/common/mod.rs +++ b/dropshot/tests/common/mod.rs @@ -34,6 +34,19 @@ pub fn test_setup( TestContext::new(api, 0 as usize, &config_dropshot, Some(logctx), log) } +pub fn test_setup_with_large_request_bodies( + test_name: &str, + api: ApiDescription, + request_body_max_bytes: usize, +) -> TestContext { + let config_dropshot: ConfigDropshot = + ConfigDropshot { request_body_max_bytes, ..Default::default() }; + + let logctx = create_log_context(test_name); + let log = logctx.log.new(o!()); + TestContext::new(api, 0 as usize, &config_dropshot, Some(logctx), log) +} + pub fn create_log_context(test_name: &str) -> LogContext { let log_config = ConfigLogging::File { level: ConfigLoggingLevel::Debug, diff --git a/dropshot/tests/fail/bad_endpoint1.stderr b/dropshot/tests/fail/bad_endpoint1.stderr index 7d1989a1f..00ff29ad4 100644 --- a/dropshot/tests/fail/bad_endpoint1.stderr +++ b/dropshot/tests/fail/bad_endpoint1.stderr @@ -4,7 +4,7 @@ error: Endpoint handlers must have the following signature: [query_params: Query,] [path_params: Path

,] [body_param: TypedBody,] - [body_param: UntypedBody,] + [body_param: UntypedBody,] ) -> Result --> tests/fail/bad_endpoint1.rs:20:1 | diff --git a/dropshot/tests/fail/bad_endpoint11.stderr b/dropshot/tests/fail/bad_endpoint11.stderr index 1d4f19d8d..393a7379a 100644 --- a/dropshot/tests/fail/bad_endpoint11.stderr +++ b/dropshot/tests/fail/bad_endpoint11.stderr @@ -4,7 +4,7 @@ error: Endpoint handlers must have the following signature: [query_params: Query,] [path_params: Path

,] [body_param: TypedBody,] - [body_param: UntypedBody,] + [body_param: UntypedBody,] ) -> Result --> tests/fail/bad_endpoint11.rs:13:1 | diff --git a/dropshot/tests/fail/bad_endpoint13.stderr b/dropshot/tests/fail/bad_endpoint13.stderr index 1559b41d3..dcc582f53 100644 --- a/dropshot/tests/fail/bad_endpoint13.stderr +++ b/dropshot/tests/fail/bad_endpoint13.stderr @@ -4,7 +4,7 @@ error: Endpoint handlers must have the following signature: [query_params: Query,] [path_params: Path

,] [body_param: TypedBody,] - [body_param: UntypedBody,] + [body_param: UntypedBody,] ) -> Result --> tests/fail/bad_endpoint13.rs:19:1 | diff --git a/dropshot/tests/fail/bad_endpoint2.stderr b/dropshot/tests/fail/bad_endpoint2.stderr index c71207619..08a75a863 100644 --- a/dropshot/tests/fail/bad_endpoint2.stderr +++ b/dropshot/tests/fail/bad_endpoint2.stderr @@ -4,7 +4,7 @@ error: Endpoint handlers must have the following signature: [query_params: Query,] [path_params: Path

,] [body_param: TypedBody,] - [body_param: UntypedBody,] + [body_param: UntypedBody,] ) -> Result --> tests/fail/bad_endpoint2.rs:13:1 | diff --git a/dropshot/tests/fail/bad_endpoint8.stderr b/dropshot/tests/fail/bad_endpoint8.stderr index dc6067086..d5aad5d87 100644 --- a/dropshot/tests/fail/bad_endpoint8.stderr +++ b/dropshot/tests/fail/bad_endpoint8.stderr @@ -4,7 +4,7 @@ error: Endpoint handlers must have the following signature: [query_params: Query,] [path_params: Path

,] [body_param: TypedBody,] - [body_param: UntypedBody,] + [body_param: UntypedBody,] ) -> Result --> tests/fail/bad_endpoint8.rs:20:1 | diff --git a/dropshot/tests/test_demo.rs b/dropshot/tests/test_demo.rs index e0ed378f7..b3bbc75f8 100644 --- a/dropshot/tests/test_demo.rs +++ b/dropshot/tests/test_demo.rs @@ -646,7 +646,7 @@ async fn test_untyped_body() { let error = client .make_request_with_body( Method::PUT, - "/testing/untyped_body?parse_str=true", + "/testing/untyped_body?into=string", bad_body.clone().into(), StatusCode::BAD_REQUEST, ) @@ -676,7 +676,7 @@ async fn test_untyped_body() { let mut response = client .make_request_with_body( Method::PUT, - "/testing/untyped_body?parse_str=true", + "/testing/untyped_body?into=string", "".into(), StatusCode::OK, ) @@ -691,7 +691,7 @@ async fn test_untyped_body() { let mut response = client .make_request_with_body( Method::PUT, - "/testing/untyped_body?parse_str=true", + "/testing/untyped_body?into=string", body.into(), StatusCode::OK, ) @@ -701,6 +701,69 @@ async fn test_untyped_body() { assert_eq!(json.nbytes, 4); assert_eq!(json.as_utf8, Some(String::from("tμv"))); + /* Success case: into BufList, body under limit. */ + let big_body = vec![0u8; 1024]; + let mut response = client + .make_request_with_body( + Method::PUT, + "/testing/untyped_body?into=buf-list", + big_body.into(), + StatusCode::OK, + ) + .await + .unwrap(); + let json: DemoUntyped = read_json(&mut response).await; + assert_eq!(json.nbytes, 1024); + + /* Error case: into BufList, body over limit. */ + let big_body = vec![0u8; 1025]; + let error = client + .make_request_with_body( + Method::PUT, + "/testing/untyped_body?into=buf-list", + big_body.into(), + StatusCode::BAD_REQUEST, + ) + .await + .unwrap_err(); + assert_eq!( + error.message, + "request body exceeded maximum size of 1024 bytes" + ); + + /* Success case: into BufList with custom limit, body under limit. */ + const MAX_BYTES: usize = 1 * 1024 * 1024; + + let very_big_body = vec![0u8; MAX_BYTES]; + let mut response = client + .make_request_with_body( + Method::PUT, + &format!("/testing/untyped_body?into=buf-list&limit={MAX_BYTES}"), + very_big_body.into(), + StatusCode::OK, + ) + .await + .unwrap(); + let json: DemoUntyped = read_json(&mut response).await; + println!("for very big body, got: {:?}", json); + assert_eq!(json.nbytes, MAX_BYTES); + + /* Error case: into BufList with custom limit, body over limit. */ + let very_big_body = vec![0u8; MAX_BYTES + 1]; + let error = client + .make_request_with_body( + Method::PUT, + &format!("/testing/untyped_body?into=buf-list&limit={MAX_BYTES}"), + very_big_body.into(), + StatusCode::BAD_REQUEST, + ) + .await + .unwrap_err(); + assert_eq!( + error.message, + format!("request body exceeded maximum size of {MAX_BYTES} bytes") + ); + testctx.teardown().await; } @@ -1001,15 +1064,27 @@ async fn demo_handler_path_param_u32( http_echo(&path_params.into_inner()) } -#[derive(Deserialize, Serialize, JsonSchema)] +#[derive(Debug, Deserialize, Serialize, JsonSchema)] pub struct DemoUntyped { pub nbytes: usize, + pub nchunks: usize, pub as_utf8: Option, } -#[derive(Deserialize, JsonSchema)] + +#[derive(Debug, Deserialize, JsonSchema)] pub struct DemoUntypedQuery { - pub parse_str: Option, + pub into: Option, + pub limit: Option, +} + +#[derive(Debug, Deserialize, JsonSchema)] +#[serde(rename_all = "kebab-case")] +pub enum UntypedQueryInto { + Bytes, + String, + BufList, } + #[endpoint { method = PUT, path = "/testing/untyped_body" @@ -1019,14 +1094,31 @@ async fn demo_handler_untyped_body( body: UntypedBody, query: Query, ) -> Result, HttpError> { - let nbytes = body.as_bytes().len(); - let as_utf8 = if query.into_inner().parse_str.unwrap_or(false) { - Some(String::from(body.as_str()?)) - } else { - None + let query = query.into_inner(); + let response = match query.into.unwrap_or(UntypedQueryInto::Bytes) { + UntypedQueryInto::Bytes => { + let nbytes = body.into_bytes().await?.len(); + DemoUntyped { nbytes, nchunks: 1, as_utf8: None } + } + UntypedQueryInto::String => { + let s = body.into_string().await?; + DemoUntyped { nbytes: s.len(), nchunks: 1, as_utf8: Some(s) } + } + UntypedQueryInto::BufList => { + let buf_list = match query.limit { + Some(max_bytes) => { + body.into_buf_list_with_limit(max_bytes).await? + } + None => body.into_buf_list().await?, + }; + DemoUntyped { + nbytes: buf_list.num_bytes(), + nchunks: buf_list.num_chunks(), + as_utf8: None, + } + } }; - - Ok(HttpResponseOk(DemoUntyped { nbytes, as_utf8 })) + Ok(HttpResponseOk(response)) } #[derive(Deserialize, Serialize, JsonSchema)] diff --git a/dropshot_endpoint/src/lib.rs b/dropshot_endpoint/src/lib.rs index 5ee6709fa..7797cc237 100644 --- a/dropshot_endpoint/src/lib.rs +++ b/dropshot_endpoint/src/lib.rs @@ -87,7 +87,7 @@ const USAGE: &str = "Endpoint handlers must have the following signature: [query_params: Query,] [path_params: Path

,] [body_param: TypedBody,] - [body_param: UntypedBody,] + [body_param: UntypedBody,] ) -> Result"; /// This attribute transforms a handler function into a Dropshot endpoint