added streamreader

serve
Avril 4 years ago
parent ff42a0830c
commit df7c8df1b7
Signed by: flanchan
GPG Key ID: 284488987C31F630

2
Cargo.lock generated

@ -645,9 +645,11 @@ dependencies = [
"cfg-if 1.0.0", "cfg-if 1.0.0",
"futures", "futures",
"hyper", "hyper",
"libc",
"log", "log",
"lzzzz", "lzzzz",
"markov 1.1.0", "markov 1.1.0",
"pin-project",
"pretty_env_logger", "pretty_env_logger",
"serde", "serde",
"serde_cbor", "serde_cbor",

@ -38,3 +38,5 @@ lzzzz = {version = "0.2", features=["tokio-io"], optional=true}
serde = {version ="1.0", features=["derive"]} serde = {version ="1.0", features=["derive"]}
toml = "0.5.6" toml = "0.5.6"
async-compression = {version = "0.3.5", features=["tokio-02", "bzip2"], optional=true} async-compression = {version = "0.3.5", features=["tokio-02", "bzip2"], optional=true}
pin-project = "0.4.26"
libc = "0.2.79"

@ -0,0 +1,28 @@
use libc::{
c_void,
};
/// Copy slice of bytes only
///
/// # Notes
/// `dst` and `src` must not overlap. See [move_slice].
pub fn copy_slice(dst: &mut [u8], src: &[u8]) -> usize
{
let sz = std::cmp::min(dst.len(),src.len());
unsafe {
libc::memcpy(&mut dst[0] as *mut u8 as *mut c_void, &src[0] as *const u8 as *const c_void, sz);
}
sz
}
/// Move slice of bytes only
///
/// # Notes
/// `dst` and `src` can overlap.
pub fn move_slice(dst: &mut [u8], src: &[u8]) -> usize
{
let sz = std::cmp::min(dst.len(),src.len());
unsafe {
libc::memmove(&mut dst[0] as *mut u8 as *mut c_void, &src[0] as *const u8 as *const c_void, sz);
}
sz
}

@ -0,0 +1,173 @@
//! Stream related things
use super::*;
use std::{
task::{
Poll,
Context,
},
pin::Pin,
};
use tokio::{
io::{
AsyncBufRead,
AsyncRead,
},
prelude::*,
};
use futures::{
stream::{
Stream,
},
};
use pin_project::pin_project;
/// Converts a stream of byte-containing objects into an `AsyncRead` and `AsyncBufRead`er.
#[pin_project]
pub struct StreamReader<I, T>
where I: Stream<Item=T>
{
#[pin]
source: I,
buffer: Vec<u8>,
}
impl<T, I> StreamReader<I, T>
where I: Stream<Item=T>,
T: AsRef<[u8]>
{
/// The current buffer
pub fn buffer(&self) -> &[u8]
{
&self.buffer[..]
}
/// Consume into the original stream
pub fn into_inner(self) -> I
{
self.source
}
/// Create a new instance with a buffer capacity
pub fn with_capacity(source: I, cap: usize) -> Self
{
Self {
source,
buffer: Vec::with_capacity(cap)
}
}
/// Create a new instance from this stream
pub fn new(source: I) -> Self
{
Self {
source,
buffer: Vec::new(),
}
}
/// Attempt to add to this buffer
#[cold] fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<usize>
{
let this = self.project();
match this.source.poll_next(cx) {
Poll::Ready(None) => Poll::Ready(0),
Poll::Ready(Some(buf)) if buf.as_ref().len() > 0 => {
let buf = buf.as_ref();
this.buffer.extend_from_slice(buf);
Poll::Ready(buf.len())
},
_ => Poll::Pending,
}
}
}
impl<T: AsRef<[u8]>, I: Stream<Item=T>> AsyncRead for StreamReader<I,T>
{
fn poll_read(self: Pin<&mut Self>, cx: &mut Context<'_>, buf: &mut [u8]) -> Poll<io::Result<usize>> {
let this = self.project();
if this.buffer.len() != 0 {
// We can fill the whole buffer, do it.
Poll::Ready(Ok(bytes::copy_slice(buf, this.buffer.drain(..buf.len()).as_slice())))
} else {
// Buffer is empty, try to fill it
match match this.source.poll_next(cx) {
Poll::Ready(None) => Poll::Ready(0),
Poll::Ready(Some(buf)) if buf.as_ref().len() > 0 => {
let buf = buf.as_ref();
this.buffer.extend_from_slice(buf);
Poll::Ready(buf.len())
},
_ => Poll::Pending,
} {
Poll::Ready(0) => Poll::Ready(Ok(0)),
Poll::Ready(x) => {
// x has been written
Poll::Ready(Ok(bytes::copy_slice(buf, this.buffer.drain(..x).as_slice())))
},
_ => Poll::Pending,
}
}
}
}
impl<T: AsRef<[u8]>, I: Stream<Item=T>> AsyncBufRead for StreamReader<I,T>
{
fn poll_fill_buf(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<&[u8]>> {
let this = self.project();
if this.buffer.len() < 1 {
// Fetch more into buffer
match match this.source.poll_next(cx) {
Poll::Ready(None) => Poll::Ready(0),
Poll::Ready(Some(buf)) if buf.as_ref().len() > 0 => {
let buf = buf.as_ref();
this.buffer.extend_from_slice(buf);
Poll::Ready(buf.len())
},
_ => Poll::Pending,
} {
Poll::Ready(0) => Poll::Ready(Ok(&[])), // should we return EOF error here?
Poll::Ready(x) => Poll::Ready(Ok(&this.buffer[..x])),
_ => Poll::Pending
}
} else {
Poll::Ready(Ok(&this.buffer[..]))
}
}
fn consume(self: Pin<&mut Self>, amt: usize) {
self.project().buffer.drain(..amt);
}
}
#[cfg(test)]
mod tests
{
use super::*;
use tokio::{
sync::{
mpsc,
},
};
#[tokio::test]
async fn stream_of_vec()
{
let (mut tx, rx) = mpsc::channel(16);
let sender = tokio::spawn(async move {
tx.send("Hello ").await.unwrap();
tx.send("world").await.unwrap();
tx.send("\n").await.unwrap();
tx.send("How ").await.unwrap();
tx.send("are ").await.unwrap();
tx.send("you").await.unwrap();
});
let mut reader = StreamReader::new(rx);
let mut output = String::new();
let mut read;
while {read = reader.read_line(&mut output).await.expect("Failed to read"); read!=0} {
println!("Read: {}", read);
}
println!("Done: {:?}", output);
sender.await.expect("Child panic");
assert_eq!(&output[..], "Hello world\nHow are you");
}
}

@ -46,6 +46,9 @@ macro_rules! status {
::warp::http::status::StatusCode::from_u16($code).unwrap() ::warp::http::status::StatusCode::from_u16($code).unwrap()
}; };
} }
mod bytes;
mod chunking;
#[cfg(feature="api")] #[cfg(feature="api")]
mod api; mod api;
#[cfg(target_family="unix")] #[cfg(target_family="unix")]

Loading…
Cancel
Save