Maybe okay state

serial
Avril 3 years ago
parent 13980b8cd2
commit 81a40587cb
Signed by: flanchan
GPG Key ID: 284488987C31F630

@ -1,7 +1,18 @@
use super::*;
use std::fs::{
File, Metadata,
};
#[derive(Debug)]
pub struct Job
{
//todo
fd: File,
stat: Metadata,
/// We grab the slice of memory we write to from here
state: state::State,
/// From this offset
offset: usize,
}
//TODO: job's work :^)

@ -2,7 +2,7 @@ use std::ops::RangeBounds;
use std::slice::SliceIndex;
use std::{fs,io};
use std::path::Path;
use std::ops::Drop;
use memmap::MmapMut;
#[derive(Debug)]
@ -12,6 +12,14 @@ pub struct MemoryMapMut
map: MmapMut,
}
impl Drop for MemoryMapMut
{
fn drop(&mut self)
{
let _ = self.map.flush();
}
}
impl AsRef<[u8]> for MemoryMapMut
{
fn as_ref(&self) -> &[u8]
@ -28,7 +36,6 @@ impl AsMut<[u8]> for MemoryMapMut
}
}
impl MemoryMapMut
{
#[inline] pub fn as_slice_mut(&mut self) -> &mut [u8]

@ -2,11 +2,55 @@ use std::sync::{
mpsc,
Mutex,
Arc,
PoisonError,
};
use std::fmt;
use std::error;
use std::cell::UnsafeCell;
use std::{slice::SliceIndex, ops::RangeBounds};
#[derive(Debug)]
#[repr(transparent)]
struct StateInner
{
map: UnsafeCell<super::map::MemoryMapMut>,
}
// SAFETY: The whole point of this is internal mutablility across thread boundaries.
unsafe impl Sync for StateInner{}
#[derive(Debug, Clone)]
pub struct State(Arc<StateInner>);
impl State
{
/// Create a new state from this map
#[inline] pub fn new(map: super::map::MemoryMapMut) -> Self
{
Self(Arc::new(StateInner{map: UnsafeCell::new(map)}))
}
/// Try to consume this instance into its map. This will only succeed if there are no more references to the state than this one.
#[inline] pub fn try_into_inner(self) -> Result<super::map::MemoryMapMut, Self>
{
match Arc::try_unwrap(self.0) {
Ok(v) => Ok(v.map.into_inner()),
Err(e) => Err(Self(e)),
}
}
/// Slice the map directly.
///
/// # Safety
/// The caller must make sure *no* slices of this map overlap with eachother.
// SAFETY: The map structure itself is never mutated, only its backing memory is accessed. This is fine, I think. If not, we can switch to using raw pointers and volatile writes. The backing memory itself is flushed to file when the map is dropped.
pub unsafe fn slice<R: RangeBounds<usize> + SliceIndex<[u8], Output = [u8]>>(&self, range: R) -> &mut [u8]
{
let slice = (*(self.0.map.get())).as_slice_mut();
&mut slice[range]
}
}
/// A multi-consumer message receiver
#[derive(Debug)]

Loading…
Cancel
Save