Compare commits

..

No commits in common. 'master' and 'old-dual' have entirely different histories.

@ -7,10 +7,4 @@ edition = "2018"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
tokio = {version = "0.2", features=["full"]}
chacha20stream = {version = "1.0", features=["async"]}
openssl = "0.10.33"
stackalloc = "1.1.0"
pin-project = "1.0.6"
bytes = "0.5.6"
log = {version = "0.4.14", optional=true}

@ -1,27 +0,0 @@
use super::*;
use std::io::{
self,
Write,
};
/// TODO: RSA private key
pub type RsaPrivateKey = ();
/// TODO: RSA public key
pub type RsaPublicKey = ();
pub(crate) fn rsa_encrypt<W: io::Write>(with: &RsaPublicKey, to: &mut W, buf: &[u8]) -> io::Result<usize>
{
todo!()
}
pub(crate) fn generate() -> RsaPrivateKey
{
todo!()
}
pub use chacha20stream::{
Key,
IV,
keygen as chacha_keygen,
};

@ -1,364 +1,119 @@
//! Container for switching between un/encrypted stream
use super::*;
use std::mem::{self, MaybeUninit, ManuallyDrop};
use std::ops::{Drop, Deref, DerefMut};
use std::ptr;
use std::fmt;
use chacha20stream::AsyncSink;
use std::mem;
use chacha20stream::{
AsyncSink,
Key, IV,
};
use tokio::io::AsyncWrite;
use std::{
pin::Pin,
task::{Context, Poll},
io,
marker::Unpin,
};
bool_type!(pub Encrypted; "Is the value encrypted?");
bool_type!(pub Encryption; "What way are we en/decrypting?" => Encrypt, Decrypt);
/// A wrapper `AsyncWrite` stream that allows switching between encrypted (chacha20stream) and plain stream writing.
///
/// # Polymorphic dispatching
/// While this type implements `AsyncWrite` itself, it is recommended to use the polymorphic mutable reference returned by `as_dyn_unpin_mut()` (or `as_dyn_mut()` for non-`Unpin` values of `S`) for writing if lots of `AsyncWrite` methods will be dispatched on the instance.
/// This will prevent the need to check the discriminant of the enum each time the `DualStream`'s `AsyncWrite` methods are polled.
/// The type implements `AsRef/Mut` for streams `S` that are both `Unpin` and not `Unpin` for convenience.
#[derive(Debug)]
pub enum DualStream<S>
pub enum DualStreamKind<S>
{
/// If there is a panic while switching modes, the stream is left in this invariant state.
///
/// Stream `S` is dropped if the instance is in this state.
Poisoned,
/// Stream `S` is being written to through a chacha20stream `AsyncSink` stream cipher.
Encrypted(AsyncSink<S>),
/// Stream `S` is being written to directly.
Plain(S),
}
// We can use dynamic dispatching to prevent the need to check the enum's discriminant each time.
// We have `AsRef/Mut`s for normal and `Unpin` polymorphic `AsyncWrite`s
impl<'a, S: AsyncWrite + Unpin> AsRef<dyn AsyncWrite + Unpin + 'a> for DualStream<S>
where S: 'a
{
fn as_ref(&self) -> &(dyn AsyncWrite + Unpin + 'a)
{
self.as_dyn_unpin()
}
}
impl<'a, S: AsyncWrite + Unpin> AsMut<dyn AsyncWrite + Unpin + 'a> for DualStream<S>
where S: 'a
{
fn as_mut(&mut self) -> &mut (dyn AsyncWrite + Unpin + 'a)
{
self.as_dyn_unpin_mut()
}
}
pub struct DualStream<S>(MaybeUninit<Box<DualStreamKind<S>>>);
impl<'a, S: AsyncWrite> AsRef<dyn AsyncWrite + 'a> for DualStream<S>
where S: 'a
impl<S: fmt::Debug> fmt::Debug for DualStream<S>
{
fn as_ref(&self) -> &(dyn AsyncWrite + 'a)
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result
{
self.as_dyn()
fmt::Debug::fmt(self.as_ref(), f)
}
}
impl<'a, S: AsyncWrite> AsMut<dyn AsyncWrite + 'a> for DualStream<S>
where S: 'a
{
fn as_mut(&mut self) -> &mut (dyn AsyncWrite + 'a)
{
self.as_dyn_mut()
}
}
impl<'a, S: AsyncWrite + Unpin> DualStream<S>
where S: 'a
impl<S> DualStream<S>
{
/// Convert this stream into an encrypted one.
///
/// # Notes
/// This method makes sure to `flush()` the encrypted stream before dropping the cipher.
///
/// # Panics
/// If initialising the cipher fails, this method will panic.
/// # Poisons
/// If this method panics, then the inner stream will be dropped and this instance will be set to `Poisoned`. This is (currently) irrecoverable.
pub async fn to_plain(&mut self) -> io::Result<()>
fn as_mut_ref(&mut self) -> &mut Box<DualStreamKind<S>>
{
if !self.is_encrypted() {
// No need to do anything
return Ok(());
// SAFETY: It is always initialised except exactly within the swap function
unsafe {
&mut *self.0.as_mut_ptr()
}
use tokio::prelude::*;
self.flush().await?;
self.to_plain_now();
Ok(())
}
/// Convert this stream into an encrypted one.
///
/// # Notes
/// This method makes sure to `flush()` the encrypted stream before constructing the cipher.
///
/// # Panics
/// If initialising the cipher fails, this method will panic.
/// # Poisons
/// If this method panics, then the inner stream will be dropped and this instance will be set to `Poisoned`. This is (currently) irrecoverable.
pub async fn to_crypt(&mut self, enc: Encryption, key: Key, iv: IV) -> io::Result<()>
fn as_ref(&self) -> &Box<DualStreamKind<S>>
{
// NOTE: We can't skip this like `to_plain()` does by checking if the instance is already at `Encrypted` as the key/IV may differ.
use tokio::prelude::*;
self.flush().await?;
self.to_crypt_now(enc, key, iv);
Ok(())
// SAFETY: It is always initialised except exactly within the swap function
unsafe {
&*self.0.as_ptr()
}
}
}
impl<'a, S: AsyncWrite> DualStream<S>
where S: Unpin + 'a
{
/// As an immutable dynamic object for `Unpin` streams.
pub fn as_dyn_unpin(&self) -> &(dyn AsyncWrite + Unpin + 'a)
pub fn
/// Create explicit
pub fn new(k: DualStreamKind<S>) -> Self
{
match self {
Self::Plain(p) => p,
Self::Encrypted(e) => e,
_ => panic!("Poisoned")
}
Self(MaybeUninit::new(Box::new(k)))
}
/// As a mutable dynamic object for `Unpin` streams
pub fn as_dyn_unpin_mut(&mut self) -> &mut (dyn AsyncWrite + Unpin + 'a)
/// Consume into explicit (non-swappable) dual stream
pub fn into_inner(self) -> Box<DualStreamKind<S>>
{
match self {
Self::Plain(p) => p,
Self::Encrypted(e) => e,
_ => panic!("Poisoned")
let mut md = ManuallyDrop::new(self);
unsafe {
// We could just `read()` the pointer, but this is more semantiacally equivalent to moving the value, I think. Idk if it's more or less efficient or whatever.
mem::replace(&mut md.0, MaybeUninit::uninit()).assume_init()
}
}
}
impl<'a, S: AsyncWrite> DualStream<S>
where S: 'a
impl<S> Deref for DualStream<S>
{
/// Create a transparent wrapper
///
/// Identical to constructing the enum variant `Self::Plain` manually.
#[inline(always)] pub fn plain(stream: S) -> Self
{
Self::Plain(stream)
}
/// Construct an encrypting wrapper over this stream
///
/// # Panics
/// If constructing the cipher fails
#[inline] pub fn encrypt(stream: S, key: Key, iv: IV) -> Self
{
Self::Encrypted(AsyncSink::encrypt(stream, key, iv).expect("Initialising cipher failed"))
}
/// Construct a decrypting wrapper over this stream
///
/// # Panics
/// If constructing the cipher fails
#[inline] pub fn decrypt(stream: S, key: Key, iv: IV) -> Self
{
Self::Encrypted(AsyncSink::decrypt(stream, key, iv).expect("Initialising cipher failed"))
type Target = DualStreamKind<S>;
fn deref(&self) -> &Self::Target {
self.as_ref()
}
/// Construct an encrypting or decrypting wrapper over this stream
///
/// # Panics
/// If constructing the cipher fails
#[inline(always)] pub fn crypt(stream: S, enc: Encryption, key: Key, iv: IV) -> Self
{
match enc {
Encryption::Encrypt => Self::encrypt(stream, key, iv),
Encryption::Decrypt => Self::decrypt(stream, key, iv),
}
}
/// Is this stream set to encrypted?
#[inline] pub fn is_encrypted(&self) -> bool
{
if let Self::Encrypted(_) = self {
true
} else {
false
}
}
/// Is this stream in an invalid state?
#[inline(always)] pub fn is_poisoned(&self) -> bool {
if let Self::Poisoned = self {
true
} else {
false
}
}
impl<S> DerefMut for DualStream<S>
{
fn deref_mut(&mut self) -> &mut Self::Target {
self.as_mut_ref()
}
}
/// Move out of self, and in turn poison this insatance until self is assigned a proper value again.
#[inline(always)] fn poison(&mut self) -> Self
{
mem::replace(self, Self::Poisoned)
}
/// Immediately convert this stream into an encrypted one.
///
/// # Notes
/// Make sure to `flush()` the stream **before** calling this or data may be lost.
///
/// # Panics
/// If initialising the cipher fails, this method will panic.
/// # Poisons
/// If this method panics, then the inner stream will be dropped and this instance will be set to `Poisoned`. This is (currently) irrecoverable.
//TODO: in chacha20stream: Add `try_encrypt()`, `try_decrypt()` which both return `Result<Self, (S, Error)>`, to not lose the inner stream if the cipher fails to initialise.
#[inline] pub fn to_crypt_now(&mut self, enc: Encryption, key: Key, iv: IV)
impl<S> From<Box<DualStreamKind<S>>> for DualStream<S>
{
fn from(from: Box<DualStreamKind<S>>) -> Self
{
let inner = match self.poison() {
Self::Encrypted(enc) => enc.into_inner(),
Self::Plain(inner) => inner,
_ => panic!("Poisoned"),
};
*self = Self::Encrypted(match enc {
Encryption::Encrypt => AsyncSink::encrypt(inner, key, iv),
Encryption::Decrypt => AsyncSink::decrypt(inner, key, iv),
}.expect("Initialising cipher failed"));
Self(MaybeUninit::new(from))
}
}
/// Immediately convert this stream into a plain one
///
/// # Notes
/// Make sure to `flush()` the stream **before** calling this or encrypted data may be lost.
///
/// # Panics
/// If dropping the cipher fails, this method will panic.
/// # Poisons
/// If this method panics, then the inner stream will be dropped and this instance will be set to `Poisoned`. This is (currently) irrecoverable.
#[inline] pub fn to_plain_now(&mut self)
impl<S> From<DualStreamKind<S>> for DualStream<S>
{
fn from(from: DualStreamKind<S>) -> Self
{
*self = Self::Plain(match self.poison() {
Self::Plain(p) => p,
Self::Encrypted(e) => e.into_inner(),
_ => panic!("Poisoned"),
});
Self::new(from)
}
}
/// A mutable reference to the inner (plain) stream, whether this instance is set to encrypted or not.
pub fn inner_plain_mut(&mut self) -> &mut S
{
match self {
Self::Plain(p) => p,
Self::Encrypted(e) => e.inner_mut(),
_ => panic!("Poisoned")
}
}
/// A reference to the inner (plain) stream, whether this instance is set to encrypted or not.
pub fn inner_plain(&self) -> &S
impl<S> From<DualStream<S>> for Box<DualStreamKind<S>>
{
fn from(from: DualStream<S>) -> Self
{
match self {
Self::Plain(p) => p,
Self::Encrypted(e) => e.inner(),
_ => panic!("Poisoned")
}
from.into_inner()
}
}
/// Consume into the inner (plain) stream
#[inline] pub fn into_inner(self) -> S
{
match self {
Self::Plain(p) => p,
Self::Encrypted(e) => e.into_inner(),
_ => panic!("Poisoned")
}
}
/// As an immutable dynamic object
pub fn as_dyn(&self) -> &(dyn AsyncWrite + 'a)
{
match self {
Self::Plain(p) => p,
Self::Encrypted(e) => e,
_ => panic!("Poisoned")
}
}
/// As a mutable dynamic object
pub fn as_dyn_mut(&mut self) -> &mut (dyn AsyncWrite + 'a)
impl<S> From<DualStream<S>> for DualStreamKind<S>
{
fn from(from: DualStream<S>) -> Self
{
match self {
Self::Plain(p) => p,
Self::Encrypted(e) => e,
_ => panic!("Poisoned")
}
*from.into_inner()
}
}
impl<S: AsyncWrite> AsyncWrite for DualStream<S>
impl<S> Drop for DualStream<S>
{
fn poll_write(self: Pin<&mut Self>, cx: &mut Context<'_>, buf: &[u8]) -> Poll<Result<usize, io::Error>> {
let obj = unsafe {
self.map_unchecked_mut(|this| this.as_dyn_mut())
};
obj.poll_write(cx, buf)
}
fn poll_flush(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<(), io::Error>> {
let obj = unsafe {
self.map_unchecked_mut(|this| this.as_dyn_mut())
};
obj.poll_flush(cx)
}
fn poll_shutdown(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<(), io::Error>> {
let obj = unsafe {
self.map_unchecked_mut(|this| this.as_dyn_mut())
};
obj.poll_shutdown(cx)
fn drop(&mut self) {
// SAFETY: Value is always initialised except exactly within swap function
unsafe {
ptr::drop_in_place(self.0.as_mut_ptr())
}
}
}
#[cfg(test)]
mod tests
{
use tokio::prelude::*;
use chacha20stream::keygen;
#[tokio::test]
/// Write the whole `input` buffer encrypted, switch to plain, then write the first 5 bytes of `input` again.
async fn wrapper_construct()
{
let input = "Hello world!";
let backing = Vec::new();
let (key, iv) = keygen();
let written = {
let mut wrapper = super::DualStream::Plain(backing);
// Encrypting
wrapper.to_crypt(super::Encryption::Encrypt, key, iv).await.unwrap();
wrapper.write_all(input.as_bytes()).await.unwrap();
// Plain
wrapper.to_plain().await.unwrap();
wrapper.write_all(&input.as_bytes()[..5]).await.unwrap();
// Shutdown the stream and consume it.
wrapper.flush().await.unwrap();
wrapper.shutdown().await.unwrap();
wrapper.into_inner()
};
eprintln!("Output bytes: {:?}", written);
eprintln!("Output attempted string: {:?}", String::from_utf8_lossy(&written[..]));
}
// TODO: Write a test using Tokio's `duplex` to read and write encrypted bytes on 2 tasks, then compare the output to the input afterwards
}

@ -1,279 +0,0 @@
//! Extensions and macros
use std::cell::RefCell;
use std::ptr;
#[macro_export] macro_rules! basic_enum {
($(#[$meta:meta])* $vis:vis $name:ident $(; $tcomment:literal)?: $($var:ident $(=> $comment:literal)?),+ $(,)?) => {
$(#[$meta])*
#[derive(Debug, Clone, PartialEq, Eq, Hash, PartialOrd, Ord, Copy)]
$(#[doc = $tcomment])?
$vis enum $name {
$(
$(#[doc = $comment])?
$var
),+
}
}
}
/// Create a `Yes` or `No` enum.
#[macro_export] macro_rules! bool_type {
($vis:vis $name:ident $(; $comment:literal)? => $yes:ident, $no:ident) => {
basic_enum!(#[repr(u8)] $vis $name $(; $comment)?: $yes => "# First variant\n\nYes/true", $no => "# Second variant\n\nNo/false");
impl From<bool> for $name
{
#[inline] fn from(from: bool) -> Self
{
if from {
Self::$yes
} else {
Self::$no
}
}
}
impl From<$name> for bool
{
#[inline] fn from(from: $name) -> Self
{
match from {
$name::$yes => true,
$name::$no => false,
}
}
}
impl $name
{
/// Create from a bool value.
#[inline] pub const fn new(from: bool) -> Self
{
if from {
Self::$yes
} else {
Self::$no
}
}
/// Is this false?
#[inline] pub const fn is_no(self) -> bool
{
!self.is_yes()
}
/// Is this true?
#[inline] pub const fn is_yes(self) -> bool
{
match self {
Self::$yes => true,
Self::$no => false,
}
}
/// Return Some(T) if self is true.
#[inline] pub fn some<T>(self, value: T) -> Option<T>
{
self.and_then(move || value)
}
/// Map this value
#[inline] pub fn map<F, T>(self, f: F) -> T
where F: FnOnce(bool) -> T
{
f(self.is_yes())
}
/// Run this closure if value is false
#[inline] pub fn or_else<F, T>(self, f: F) -> Option<T>
where F: FnOnce() -> T
{
if let Self::$no = self {
Some(f())
} else {
None
}
}
/// Run this closure if value is true
#[inline] pub fn and_then<F, T>(self, f: F) -> Option<T>
where F: FnOnce() -> T
{
if let Self::$yes = self {
Some(f())
} else {
None
}
}
/// Return `yes` if true and `no` if false
#[inline] pub fn either<T>(self, yes: T, no: T) -> T
{
self.and_either(move || yes, move || no)
}
/// Run closure `yes` if value is true, `no` if value is false.
#[inline] pub fn and_either<F, G, T>(self, yes: F, no: G) -> T
where F: FnOnce() -> T,
G: FnOnce() -> T,
{
match self {
Self::$yes => yes(),
Self::$no => no(),
}
}
}
};
($vis:vis $name:ident $(; $comment:literal)?) => {
$crate::bool_type!($vis $name $(; $comment)? => Yes, No);
}
}
/// Max size of bytes we'll allocate to the stack at runtime before using a heap allocated buffer.
pub const STACK_SIZE_LIMIT: usize = 4096;
/// Allocate `size` bytes. Allocates on the stack if size is lower than `STACK_SIZE_LIMIT`, otherwise allocates on the heap.
pub fn alloca_limit<F, T>(size: usize, f: F) -> T
where F: FnOnce(&mut [u8]) -> T
{
if size > STACK_SIZE_LIMIT {
thread_local! {
static BUFFER: RefCell<Vec<u8>> = RefCell::new(vec![0u8; STACK_SIZE_LIMIT*2]);
}
BUFFER.with(move |buf| {
// If the borrow fails then `f` has recursively called into this function, so for that we allocate a new buffer instead of reusing this static one.
if let Ok(mut buf) = buf.try_borrow_mut() {
if buf.len() < size {
buf.resize(size, 0);
}
let res = f(&mut buf[..size]);
bytes::blank(&mut buf[..size]);
res
} else {
f(&mut vec![0u8; size])
}
})
} else {
stackalloc::alloca_zeroed(size, f)
// I don't think this is okay to do.
//stackalloc::alloca(size, move |buf| f(unsafe { stackalloc::helpers::slice_assume_init_mut(buf) }))
}
}
/// Create an accessor method. for a field in a structure.
///
/// The supported accessor types are: `ref`, `mut`, and `move`.
#[macro_export] macro_rules! accessor {
($vis:vis ref $name:ident -> $ty:ty => $internal:ident $(; $comment:literal)?) => {
$(#[doc=$comment])?
#[inline] $vis fn $name(&self) -> &$ty {
&self.$internal
}
};
($vis:vis ref $name:ident -> $ty:ty => $internal:tt $(; $comment:literal)?) => {
$(#[doc=$comment])?
#[inline] $vis fn $name(&self) -> &$ty {
&self.$internal
}
};
($vis:vis mut $name:ident -> $ty:ty => $internal:ident $(; $comment:literal)?) => {
$(#[doc=$comment])?
#[inline] $vis fn $name(&self) -> &mut $ty {
&mut self.$internal
}
};
($vis:vis mut $name:ident -> $ty:ty => $internal:tt $(; $comment:literal)?) => {
$(#[doc=$comment])?
#[inline] $vis fn $name(&self) -> &mut $ty {
&mut self.$internal
}
};
($vis:vis move $name:ident -> $ty:ty => $internal:ident $(; $comment:literal)?) => {
$(#[doc=$comment])?
#[inline] $vis fn $name(&self) -> $ty {
self.$internal
}
};
($vis:vis move $name:ident -> $ty:ty => $internal:tt $(; $comment:literal)?) => {
$(#[doc=$comment])?
#[inline] $vis fn $name(&self) -> $ty {
self.$internal
}
};
}
#[macro_export] macro_rules! lazy_format {
($msg:literal $($tt:tt)*) => {
{
use ::std::fmt::{self, Write, Formatter};
use ::std::sync::Mutex;
use ::std::io;
let pfn = move |fmt| {
write!(fmt, $msg $($tt)*)?;
let mut sfmt = String::new();
write!(&mut sfmt, $msg $($tt)*)?;
Ok(sfmt)
};
enum LazyFormatInner<F>
{
//todo: redo this entire thing
Pending(F),
Complete(String),
Error(fmt::Error),
Panicked,
}
struct LazyFormat<F>(Mutex<LazyFormatInner<F>>);
impl<F: FnOnce(&mut fmt::Formatter<'_>) -> io::Result<String>> fmt::Display for LazyFormat<F>
{
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result
{
//todo: redo this entire thing
/*
let mut inner = self.0.lock().unwrap();
let this = std::mem::replace(inner, LazyFormatInner::Panicked);
//TODO: impl fmt::Write wrapper that multi-writes to 2 outputs
let string = match this {
LazyFormatInner::Pending(func) => func(f),
LazyFormatInner::Complete(string) => write!(f, "{}", string).map(move |_| string),
LazyFormatInner::Error(err) => return Err(err),
LazyFormatInner::Panicked => panic!(),
};
match string {
Err(err) => {
*inner = LazyFormatInner::Error(err),
},
}*/
}
}
}
}
}
#[cfg(not(feature="log"))] #[macro_export] macro_rules! trace {
($fmt:literal $($tt:tt)*) => {
{
((), $($tt)*);
}
}
}
pub mod bytes
{
use super::*;
/// `bzero` this slice
pub fn blank(slice: &mut [u8])
{
unsafe {
ptr::write_bytes(slice.as_mut_ptr(), 0, slice.len());
}
}
}
mod slice;
pub use slice::*;

@ -1,200 +0,0 @@
//! Slice tools
use super::*;
use std::marker::PhantomData;
use std::{slice, ptr, mem};
/// For an untyped `SliceMeta<T>`, this is the default `T`.
///
/// `SliceMeta<AnySlice>` is slice metadata that can **unsafely** coerce to slice metadata for any other type.
/// And any `SliceMeta<T>` can **safely** coerce to `SliceMeta<AnySlice>`.
#[derive(Debug, Clone, PartialEq, Eq, Hash, PartialOrd, Ord, Copy)]
pub enum AnySlice { }
/// Metadata of a slice `[T]`.
///
/// # Usage
/// This may or may not point to a valid slice. The address may or may not be null.
/// It is intended for comparing pointer-identity of slice fat pointers.
///
/// # Layout
/// This type has a `repr(C)` memory layout of 2 `size_t`s and is safe for FFI use.
//TODO: Manyally force impls of all these regardless of if `T` impls them (derive does not do this)
#[derive(Debug, Clone, PartialEq, Eq, Hash, PartialOrd, Ord, Copy, Default)]
#[repr(C)]
pub struct SliceMeta<T = AnySlice>
{
/// The address of the first element of the slice
/// # Notes
/// This can be `0` if the `SliceMeta` is initialised to `Default`, but a null-pointing `SliceMeta` is never safe to construct a real slice out of. The address may also be dangling, if the slice is 0 sized.
pub address: usize,
/// The length of the slice.
pub length: usize,
_slice: PhantomData<[T]>,
}
impl<T, U: ?Sized> PartialEq<U> for SliceMeta<T>
where U: AsRef<[T]>
{
#[inline] fn eq(&self, other: &U) -> bool
{
SliceMeta::from_slice_any(other.as_ref()) == self.as_any()
}
}
impl<T, U: AsRef<[T]>> From<U> for SliceMeta<T>
{
fn from(from: U) -> Self
{
Self::from_slice(from.as_ref())
}
}
impl SliceMeta<AnySlice>
{
/// Create a new `SliceMeta` representing this slice of any type.
pub fn from_slice_any<T>(slice: &[T]) -> Self
{
Self::new(slice.as_ptr() as usize, slice.len())
}
/// Is this instance referring to the data and length of this slice (regardless of type)?
pub fn is_slice_any<T>(&self, other: &[T]) -> bool
{
*self == Self::from_slice_any(other)
}
}
impl<T> SliceMeta<T>
{
#[inline(always)] fn assert_not_null(&self)
{
#[inline(never)] fn panic_null()
{
panic!("Address cannot be null");
}
if self.address == 0 {
panic_null();
}
}
/// Remove the type information from this instance.
#[inline(always)] pub const fn as_any(&self) -> SliceMeta<AnySlice>
{
SliceMeta {
address: self.address,
length: self.length,
_slice: PhantomData,
}
}
/// Is this slice metadata instance pointing to a null address? (i.e. undefined/empty).
#[inline(always)] pub const fn is_null(self) -> bool{
self.address == 0
}
/// The size in bytes of the slice pointed to at `address`.
#[inline] pub const fn length_bytes(self) -> usize
{
mem::size_of::<T>() * self.length
}
/// Is this instance referring to the data and length of this slice?
pub fn is_slice(&self, other: &[T]) -> bool
{
self.as_any() == Self::from_slice(other).as_any()
}
/// Create a new `SliceMeta` representing this slice.
#[inline] pub fn from_slice(slice: &[T]) -> Self
{
Self::new(slice.as_ptr() as usize, slice.len())
}
/// Create a new `SliceMeta` with these parameters.
#[inline] pub const fn new(address: usize, length: usize) -> Self
{
Self {
address,
length,
_slice: PhantomData,
}
}
/// Returns `address` as a raw pointer.
///
/// The pointer will be null if `address` is 0.
#[inline(always)] pub const fn as_ptr(self) -> *const T
{
self.address as *const T
}
/// Returns `address` as a mutable raw pointer.
///
/// The pointer will be null if `address` is 0.
#[inline(always)] pub const fn as_mut_ptr(self) -> *mut T
{
self.address as *mut T
}
/// Create a raw slice from the metadata.
///
/// # Panics
/// If `address` is 0.
#[inline] pub fn as_raw_slice(self) -> *const [T]
{
self.assert_not_null();
ptr::slice_from_raw_parts(self.address as *const T, self.length)
}
/// Create a mutable raw slice from the metadata.
///
/// # Panics
/// If `address` is 0.
#[inline] pub fn as_raw_slice_mut(self) -> *mut [T]
{
self.assert_not_null();
ptr::slice_from_raw_parts_mut(self.address as *mut T, self.length)
}
/// Create a slice from the metadata.
///
/// This operation is unsafe.
/// # Safety
/// * `address` must be properly aligned and point to a valid `length` number of elements of `T`. If `length` is 0, the pointer can be dangling, but it cannot be null.
/// * Rust's slice requirements must be met using this metadata as a real slice.
/// * The lifetime chosen for the returned slice must be valid for the data pointed to by `address`.
///
/// # Panics
/// If `address` is 0.
#[inline] pub unsafe fn as_slice<'a>(self) -> &'a [T]
{
self.assert_not_null();
slice::from_raw_parts(self.address as *const T, self.length)
}
/// Create a mutable slice from the metadata.
///
/// This operation is unsafe.
/// # Safety
/// * `address` must be properly aligned and point to a valid `length` number of elements of `T`. If `length` is 0, the pointer can be dangling, but it cannot be null.
/// * Rust's slice requirements must be met using this metadata as a real slice.
/// * The lifetime chosen for the returned slice must be valid for the data pointed to by `address`.
///
/// # Panics
/// If `address` is 0.
#[inline] pub unsafe fn as_slice_mut<'a>(self) -> &'a mut [T]
{
self.assert_not_null();
slice::from_raw_parts_mut(self.address as *mut T, self.length)
}
//accessor!(pub move address -> usize => 0; "The address of the first element of the slice\n\n# Notes\nThis can be `0` if the `SliceMeta` is initialised to `Default`, but a null-pointing `SliceMeta` is never safe to construct a real slice out of. The address may also be dangling, if the slice is 0 sized.");
}

@ -1,29 +1,4 @@
#![allow(dead_code)]
#[macro_use] extern crate pin_project;
#[cfg(feature="log")] #[macro_use] extern crate log;
// Extensions & macros
#[macro_use] mod ext;
#[allow(unused_imports)] use ext::*;
pub mod dual;
// RSA crypto shit
mod crypt;
// Stream impls
mod stream;
pub use stream::{
AsyncStream,
Stream,
// EncryptedStream,
WriteHalf,
EncryptedWriteHalf,
ReadHalf,
EncryptedReadHalf,
};
mod dual;

@ -1,430 +0,0 @@
use super::*;
use tokio::io::{AsyncWrite, AsyncRead};
use std::sync::Arc;
use openssl::symm::Crypter;
use openssl::error::ErrorStack;
use ::bytes::{Buf, BufMut};
use std::{
pin::Pin,
task::{Poll, Context},
io,
};
use crypt::{
RsaPublicKey,
RsaPrivateKey,
};
mod traits;
pub use traits::*;
mod exchange;
/// Combined Read + Write encryptable async stream.
///
/// The `AsyncRead` and `AsyncWrite` impls of this type forward to the backing impls for `S`.
///
/// # Exchange
/// A combined stream is the only way to exchange pubkeys and enabling the creation of encrypted read/write wrappers on the combined stream or splits.
#[pin_project]
#[derive(Debug)]
pub struct Stream<S>
{
meta: EncryptedStreamMeta,
#[pin] stream: S,
}
/// `Stream` with enabled encryption.
pub struct EncryptedStream<'a, S>
{
read_cipher: Crypter,
write_cipher: Crypter,
write_crypt_buf_ptr: SliceMeta<u8>,
write_crypt_buffer: Vec<u8>,
backing: &'a mut Stream<S>,
}
impl<Tx, Rx> Stream<Merge<Tx, Rx>>
where Tx: AsyncWrite,
Rx: AsyncRead
{
/// Exchange RSA keys through this stream.
pub async fn exchange(&mut self) -> io::Result<()>
{
todo!()
}
/// Merge an `AsyncWrite`, and `AsyncRead` stream into `Stream`.
pub fn merged(tx: Tx, rx: Rx) -> Self
{
Self {
meta: EncryptedStreamMeta::new(),
stream: Merge(tx, rx),
}
}
}
/*
impl<S> Stream<S>
where S: Split,
S::First: AsyncWrite,
S::Second: AsyncRead
{
/// Create a new `Stream` from two streams, one implemetor of `AsyncWrite`, and one of `AsyncRead`.
pub fn new(tx: S::First, rx: S::Second) -> Self
{
Self {
meta: EncryptedStreamMeta {
them: None,
us: crypt::generate(),
},
stream: S::unsplit(tx, rx),
}
}
}
impl<S: AsyncStream> Stream<S>
{
/// Create a new `Stream` from an implementor of both `AsyncRead` and `AsyncWrite`.
pub fn new_single(stream: S) -> Self
{
Self {
meta: EncryptedStreamMeta {
them: None,
us: crypt::generate(),
},
stream,
}
}
/// Create a split by cloning `S`.
pub fn split_clone(self) -> (WriteHalf<S>, ReadHalf<S>)
where S: Clone
{
Stream {
stream: (self.stream.clone(), self.stream),
meta: self.meta
}.split()
}
}*/
impl<S> Split for Stream<S>
where S: Split,
S::First: AsyncWrite,
S::Second: AsyncRead
{
type First = WriteHalf<S::First>;
type Second = ReadHalf<S::Second>;
#[inline] fn split(self) -> (Self::First, Self::Second) {
self.split()
}
#[inline] fn unsplit(a: Self::First, b: Self::Second) -> Self {
Self::unsplit(a, b)
}
}
impl<S> Stream<S>
where S: Split,
S::First: AsyncWrite,
S::Second: AsyncRead
{
/// Combine a previously split `EncryptedStream`'s halves back into a single type.
///
/// # Panics
/// If the two halves didn't originally come from the same `EncryptedStream`.
pub fn unsplit(tx: WriteHalf<S::First>, rx: ReadHalf<S::Second>) -> Self
{
#[inline(never)] fn panic_not_ptr_eq() -> !
{
panic!("Cannot join halves from different splits")
}
if !Arc::ptr_eq(&tx.meta, &rx.meta) {
panic_not_ptr_eq();
}
let WriteHalf { meta: _meta, backing_write: tx } = tx;
drop(_meta);
let ReadHalf { meta, backing_read: rx } = rx;
let meta = Arc::try_unwrap(meta).unwrap();
Self {
meta,
stream: S::unsplit(tx, rx),
}
}
/// Split this `EncryptedStream` into a read and a write half.
pub fn split(self) -> (WriteHalf<S::First>, ReadHalf<S::Second>)
{
let meta = Arc::new(self.meta);
let (tx, rx) = self.stream.split();
(WriteHalf {
meta: meta.clone(),
backing_write: tx,
}, ReadHalf {
meta,
backing_read: rx,
})
}
}
impl<S: AsyncRead> AsyncRead for Stream<S>
{
#[inline] fn poll_read(self: Pin<&mut Self>, cx: &mut Context<'_>, buf: &mut [u8]) -> Poll<io::Result<usize>> {
self.project().stream.poll_read(cx, buf)
}
#[inline] fn poll_read_buf<B: BufMut>(self: Pin<&mut Self>, cx: &mut Context<'_>, buf: &mut B) -> Poll<io::Result<usize>>
where
Self: Sized, {
self.project().stream.poll_read_buf(cx, buf)
}
}
impl<S: AsyncWrite> AsyncWrite for Stream<S>
{
#[inline] fn poll_write(self: Pin<&mut Self>, cx: &mut Context<'_>, buf: &[u8]) -> Poll<Result<usize, io::Error>> {
self.project().stream.poll_write(cx, buf)
}
#[inline] fn poll_flush(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<(), io::Error>> {
self.project().stream.poll_flush(cx)
}
#[inline] fn poll_shutdown(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<(), io::Error>> {
self.project().stream.poll_shutdown(cx)
}
#[inline] fn poll_write_buf<B: Buf>(self: Pin<&mut Self>, cx: &mut Context<'_>, buf: &mut B) -> Poll<Result<usize, io::Error>>
where
Self: Sized, {
self.project().stream.poll_write_buf(cx, buf)
}
}
/// Inner rsa data for encrypted stream read+write halves
///
/// # Exchange / mutation
/// For split streams, this becomes immutable. If exchange has not been performed by the combined stream before splitting, then it is impossible for the split Read and Write halves to form EncryptedRead and EncryptedWrite instances on top of themselves.
/// The stream must be re-joined, exchanged, and then split again in this case.
/// Therefore exchange should happen before the original stream is split at all.
///
/// Only the combined stream can mutate this structure. The halves hold it behind an immutable shared reference.
#[derive(Debug)]
struct EncryptedStreamMeta
{
us: RsaPrivateKey,
them: Option<RsaPublicKey>,
}
impl EncryptedStreamMeta
{
/// Create a new meta with a newly generated private key.
#[inline(always)] pub fn new() -> Self
{
Self {
them: None,
us: crypt::generate(),
}
}
}
/// Writable half of `EncryptedStream`.
#[pin_project]
#[derive(Debug)]
pub struct WriteHalf<S>
where S: AsyncWrite
{
/// Shared reference to the RSA data of the backing stream, held by both Write and Read halves.
///
/// # Immutability of this metadata
/// Exchange can only happen on the combined Read+Write stream, so we don't need ayn mutability of `meta` here. Mutating `meta` happens only when it's owned by the combined stream (not in an `Arc`, which is only used to share it between the Read and Write half).
meta: Arc<EncryptedStreamMeta>,
#[pin] backing_write: S,//Box<dual::DualStream<S>>,
}
#[pin_project]
pub struct EncryptedWriteHalf<'a, S>
where S: AsyncWrite,
{
/// Used to transform input `buf` into `self.crypt_buffer` before polling a write to `backing_write` with the newly filled `self.crypt_buffer`.
/// See below 2 fields.
cipher: Crypter,
/// Slice pointer of the input `buf` that corresponds to the transformed data in `crypt_buf`.
/// Used to check if a `Pending` write was cancelled, by comparing if the input `buf` slice of this next write is different from the last one (which's data is stored in this field after the poll becomes `Pending`.)
///
/// # Usage
/// Before checking is `crypt_buffer` is empty and that we should re-poll the backing stream with it, we check the input `buf` against this value.
/// If they differ, then the `Pending` result from the last poll was discarded, and we clear the `crypt_buffer` and re-encrypt the new `buf` into it.
///
/// After a `Pending` write to `backing_write`, a `SliceMeta` from the input `buf` is written to this field.
/// If it was *not* a `Pending` poll result, then this field is re-set to `Default` (an invalid `null` value).
///
/// This compares **pointer** and **length** identity of the slice. (See `SliceMeta` for more information.)
/// Which is a faster method of determining if the buffer has changed than applying `Hash` to the whole buffer each time `poll_write` is called just to compare.
///
/// # Initialised
/// Initialised as `Default` (`null`).
/// Will be `null` if `crypt_buffer` is empty (i.e. a non-`Pending` poll result).
crypt_buf_ptr: SliceMeta<u8>,
/// Buffer written to when encrypting the input `buf`.
///
/// It is cleared after a `Ready` `poll_write()` on `backing_write`.
/// On a `Pending` write, this buffer is resized to only fit the transformed data written to it, and left as it is until the next call to `poll_write`.
///
/// If the poll was not discarded (see above field), on the next call to this instance's `poll_write` we just immediatly re-poll `backing_write` with this buffer.
/// If it was disarded. We re-set this buffer and transform the new input `buf` into it as if the previous poll returned `Ready`.
///
/// This exists so we don't have to transform the entire `buf` on every poll. We can just transform it once and then wait until it is `Ready` before discarding the data (`.empty()`) and allowing new data to fill it on the next, fresh `poll_write`.
crypt_buffer: Vec<u8>,
#[pin] backing: &'a mut WriteHalf<S>,
}
/// **Forcefully** transform `buf` into a transformed buffer.
///
/// # Does **not** do these things
/// Doesn't check for ptr ident with `buf` against `crypt_buf_ptr`. You should do that yourself.
/// Doesn't truncate `crypt_buffer` after transformation.
fn transform_into(crypt_buffer: &mut Vec<u8>, cipher: &mut Crypter, buf: &[u8]) -> Result<usize, ErrorStack>
{
if crypt_buffer.len() < buf.len() {
crypt_buffer.resize(buf.len(), 0);
}
let n = cipher.update(buf, &mut crypt_buffer[..buf.len()])?;
let _f = cipher.finalize(&mut crypt_buffer[..n])?;
debug_assert_eq!(_f, 0);
Ok(n)
}
impl<'a, S: AsyncWrite> EncryptedWriteHalf<'a, S>
{
#[inline(always)] fn forward(self: Pin<&mut Self>) -> Pin<&mut WriteHalf<S>>
{
unsafe {self.map_unchecked_mut(|this| this.backing)}
}
}
impl<'a, S: AsyncWrite> AsyncWrite for EncryptedWriteHalf<'a, S>
{
fn poll_write(mut self: Pin<&mut Self>, cx: &mut Context<'_>, buf: &[u8]) -> Poll<Result<usize, io::Error>> {
let this = self.as_mut().project();
if this.crypt_buffer.is_empty() || this.crypt_buf_ptr != buf {
// Transform `buf` into self.crypt_buffer
let n = transform_into(this.crypt_buffer, this.cipher, buf)?;
*this.crypt_buf_ptr = buf.into();
this.crypt_buffer.truncate(n);
} // else { /* No need to transform */ }
let poll = unsafe {this.backing.map_unchecked_mut(|this| *this)}.poll_write(cx, &this.crypt_buffer[..]);
if poll.is_ready()
{
*this.crypt_buf_ptr = Default::default();
this.crypt_buffer.clear();
}
poll
}
fn poll_flush(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<(), io::Error>> {
let this = self.project();
let poll = unsafe {this.backing.map_unchecked_mut(|this| *this)}.poll_flush(cx);
if poll.is_ready() {
this.crypt_buffer.clear();
*this.crypt_buf_ptr = Default::default();
}
poll
}
fn poll_shutdown(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<(), io::Error>> {
let this = self.project();
let poll = unsafe {this.backing.map_unchecked_mut(|this| *this)}.poll_shutdown(cx);
if poll.is_ready() {
bytes::blank(&mut this.crypt_buffer[..]);
this.crypt_buffer.clear();
*this.crypt_buf_ptr = Default::default();
}
poll
}
}
/// Readable half of `EncryptedStream`.
#[pin_project]
#[derive(Debug)]
pub struct ReadHalf<S>
where S: AsyncRead
{
meta: Arc<EncryptedStreamMeta>,
/// chacha20_poly1305 decrypter for incoming reads from `S`
#[pin] backing_read: S,
}
#[pin_project]
pub struct EncryptedReadHalf<'a, S>
where S: AsyncRead,
{
cipher: Crypter,
#[pin] backing: &'a mut ReadHalf<S>,
}
impl<'a, S: AsyncRead> AsyncRead for EncryptedReadHalf<'a, S>
{
fn poll_read(self: Pin<&mut Self>, cx: &mut Context<'_>, buf: &mut [u8]) -> Poll<io::Result<usize>> {
let this = self.project();
let cipher = this.cipher;
let stream = unsafe {this.backing.map_unchecked_mut(|f| &mut f.backing_read)};
let res = stream.poll_read(cx,buf);
// Decrypt the buffer if the read succeeded
res.map(move |res| res.and_then(move |sz| {
alloca_limit(sz, move |obuf| -> io::Result<usize> {
// This `sz` and old `sz` should always be the same.
let sz = cipher.update(&buf[..sz], &mut obuf[..])?;
let _f = cipher.finalize(&mut obuf[..sz])?;
debug_assert_eq!(_f, 0);
// Copy decrypted buffer into output buffer
buf.copy_from_slice(&obuf[..sz]);
Ok(sz)
})
}))
}
}
impl<S: AsyncRead> AsyncRead for ReadHalf<S>
{
#[inline] fn poll_read(self: Pin<&mut Self>, cx: &mut Context<'_>, buf: &mut [u8]) -> Poll<io::Result<usize>> {
self.project().backing_read.poll_read(cx, buf)
}
#[inline] fn poll_read_buf<B: BufMut>(self: Pin<&mut Self>, cx: &mut Context<'_>, buf: &mut B) -> Poll<io::Result<usize>>
where
Self: Sized, {
self.project().backing_read.poll_read_buf(cx, buf)
}
}
impl<S: AsyncWrite> AsyncWrite for WriteHalf<S>
{
#[inline] fn poll_write(self: Pin<&mut Self>, cx: &mut Context<'_>, buf: &[u8]) -> Poll<Result<usize, io::Error>> {
self.project().backing_write.poll_write(cx, buf)
}
#[inline] fn poll_flush(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<(), io::Error>> {
self.project().backing_write.poll_flush(cx)
}
#[inline] fn poll_shutdown(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<(), io::Error>> {
self.project().backing_write.poll_shutdown(cx)
}
#[inline] fn poll_write_buf<B: Buf>(self: Pin<&mut Self>, cx: &mut Context<'_>, buf: &mut B) -> Poll<Result<usize, io::Error>>
where
Self: Sized, {
self.project().backing_write.poll_write_buf(cx, buf)
}
}

@ -1,40 +0,0 @@
//! RSA and chacha20 key exchange methods
use super::*;
/*
use tokio::prelude::*;
pub async fn write_cckey<W: AsyncWrite + Unpin>(mut to: W, rsa: &crypt::RsaPublicKey, key: &crypt::Key, iv: &crypt::IV) -> std::io::Result<()>
{
let key = {
let mut buf = Vec::with_capacity(chacha20stream::key::KEY_SIZE);
crypt::rsa_encrypt(rsa, &mut buf, key.as_ref())?;
buf
};
to.write_all(&key[..]).await?; //TODO: Find size of `key` here.
to.write_all(iv.as_ref()).await?;
Ok(())
}
*/
/// A future that writes an RSA encrypted chacha20 key to a stream when awaited
//TODO: Find size of RSA ecnrypted chacha `Key`.
#[pin_project]
struct CCKeyWrite<'a, W: AsyncWrite>{
/// The bytes of an **already encrypted** chacha20 key.
enc_key: Vec<u8>,
/// A non-encrypted chacha20 IV.
iv: [u8; chacha20stream::key::IV_SIZE],
/// Stream to write the data to when this future is awaited.
#[pin] stream: &'a mut W,
}
//TODO: impl `Future` for CCKeyWrite should `write_all` both `enc_key` and `iv` to `stream` when `.await`ed.
impl<'a, W: AsyncWrite> Future for CCKeyWrite<'a, S>
{
type Output = io::Result<()>;
//todo: how to we `write_all` in `poll`? implement it outselves with looping `poll_write` and `futures::ready!()`?
}

@ -1,153 +0,0 @@
//! Stream traits
use super::*;
/// A type that implements both `AsyncWrite` and `AsyncRead`
pub trait AsyncStream: AsyncRead + AsyncWrite{}
impl<T: AsyncRead + AsyncWrite + ?Sized> AsyncStream for T{}
/// A type that can split itself into other types, and combine back from those types.
pub trait Split: Sized
{
/// First half of the split
type First;
/// Second half of the split
type Second;
fn split(self) -> (Self::First, Self::Second);
fn unsplit(a: Self::First, b: Self::Second) -> Self;
#[inline(always)] fn split_reverse(self) -> (Self::Second, Self::First)
{
let (tx, rx) = self.split();
(rx, tx)
}
#[inline(always)] fn unsplit_reverse(b: Self::Second, a: Self::First) -> Self
{
Self::unsplit(a, b)
}
}
impl<T, U> Split for (T, U)
{
type First = T;
type Second = U;
#[inline(always)] fn split(self) -> (Self::First, Self::Second) {
self
}
#[inline(always)] fn unsplit(a: Self::First, b: Self::Second) -> Self {
(a, b)
}
}
//TODO: Add trait `SplitRef` for exchange, I guess?
/// Merges a Read and Write stream in an implementor of `Split`, `AsyncRead`, and `Asyncwrite`.
///
/// Used for internal of `Stream`.
#[derive(Debug, Clone, PartialEq, Eq, Hash)]
pub(super) struct Merge<Tx, Rx>(pub Tx, pub Rx);
impl<Tx, Rx> Merge<Tx, Rx>
{
fn rx(self: Pin<&mut Self>) -> Pin<&mut Rx>
{
unsafe {self.map_unchecked_mut(|this| &mut this.1)}
}
fn tx(self: Pin<&mut Self>) -> Pin<&mut Tx>
{
unsafe {self.map_unchecked_mut(|this| &mut this.0)}
}
}
impl<Tx, Rx> Split for Merge<Tx, Rx>
{
type First = Tx;
type Second = Rx;
#[inline] fn split(self) -> (Self::First, Self::Second) {
(self.0, self.1)
}
#[inline] fn unsplit(a: Self::First, b: Self::Second) -> Self {
Self(a, b)
}
}
impl<Tx, Rx> AsyncWrite for Merge<Tx, Rx>
where Tx: AsyncWrite
{
#[inline] fn poll_write(self: Pin<&mut Self>, cx: &mut Context<'_>, buf: &[u8]) -> Poll<Result<usize, io::Error>> {
self.tx().poll_write(cx, buf)
}
#[inline] fn poll_flush(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<(), io::Error>> {
self.tx().poll_flush(cx)
}
#[inline] fn poll_shutdown(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<(), io::Error>> {
self.tx().poll_flush(cx)
}
#[inline] fn poll_write_buf<B: Buf>(self: Pin<&mut Self>, cx: &mut Context<'_>, buf: &mut B) -> Poll<Result<usize, io::Error>>
where
Self: Sized, {
self.tx().poll_write_buf(cx, buf)
}
}
impl<Tx, Rx> AsyncRead for Merge<Tx, Rx>
where Rx: AsyncRead
{
#[inline] fn poll_read(self: Pin<&mut Self>, cx: &mut Context<'_>, buf: &mut [u8]) -> Poll<io::Result<usize>> {
self.rx().poll_read(cx, buf)
}
#[inline] fn poll_read_buf<B: BufMut>(self: Pin<&mut Self>, cx: &mut Context<'_>, buf: &mut B) -> Poll<io::Result<usize>>
where
Self: Sized, {
self.rx().poll_read_buf(cx, buf)
}
}
/*
pub(super) enum MaybeFullWrite<'a, S: AsyncWrite>
{
Full(&'a mut Stream<S>),
Half(&'a mut WriteHalf<S>),
}
pub(super) enum MaybeFullRead<'a, S: AsyncRead>
{
Full(&'a mut Stream<S>),
Half(&'a mut ReadHalf<S>),
}
impl<'a, S: AsyncRead> AsMut<dyn AsyncRead + 'a> for MaybeFullRead<'a, S>
{
#[inline] fn as_mut(&mut self) -> &mut (dyn AsyncRead + 'a)
{
self.as_dyn()
}
}
impl<'a, S: AsyncWrite> AsMut<dyn AsyncWrite + 'a> for MaybeFullWrite<'a, S>
{
#[inline] fn as_mut(&mut self) -> &mut (dyn AsyncWrite + 'a)
{
self.as_dyn()
}
}
impl<'a, S: AsyncRead> MaybeFullRead<'a, S>
{
#[inline(always)] fn as_dyn(&mut self) -> &mut (dyn AsyncRead + 'a)
{
match self {
Self::Full(f) => f,
Self::Half(h) => h,
}
}
}
impl<'a, S: AsyncWrite> MaybeFullWrite<'a, S>
{
#[inline(always)] fn as_dyn(&mut self) -> &mut (dyn AsyncWrite + 'a)
{
match self {
Self::Full(f) => f,
Self::Half(h) => h,
}
}
}
*/
Loading…
Cancel
Save