Added type_hash<T>(): Creates a (mostly) unique SHA256 hash at compile time for a given type. The hash is given static storage duration, no other executable bloat is kept.

XXX: Added new dependency: `sha256_literal`. (TODO: Grab this, manage dependencies somehow..)

Added id.hh: `uuid` and related types & helpers.

Fortune for readpass's current commit: Future small blessing − 末小吉
master
Avril 2 years ago
parent 09d29eba24
commit 18a854e480
Signed by: flanchan
GPG Key ID: 284488987C31F630

@ -4,6 +4,7 @@
#ifdef __cplusplus #ifdef __cplusplus
#include <memory> #include <memory>
#include <pair>
#endif #endif
#include "constraints.hh" #include "constraints.hh"
@ -38,80 +39,82 @@ namespace alloc {
constexpr deleter(deleter&&) noexcept = default; constexpr deleter(deleter&&) noexcept = default;
constexpr deleter& operator=(deleter&&) noexcept = default; constexpr deleter& operator=(deleter&&) noexcept = default;
constexpr void operator()(void* restrict p) const noexcept { return apply_delete(p); } [[gnu::nonnull(2)]]
constexpr void operator()(void* restrict p) const noexcept { apply_delete(p); }
virtual ~deleter(); virtual ~deleter();
protected: protected:
explicit deleter(std::shared_ptr<anon_raw_secmem>&& p); explicit deleter(std::shared_ptr<anon_raw_secmem>&& p);
template<where::is_polymorphic T> [[gnu::nonnull(2)]]
constexpr static void* erase_type(T* ptr) noexcept { return dynamic_cast<void*>(ptr); } virtual std::pair<void*> apply_delete(void* restrict, bool = true) const noexcept;
template<where::is_polymorphic T> virtual void scramble_memory(std::initializer_list<std::pair<void*>> ptrs) const noexcept;
constexpr static void const* erase_type(T const* ptr) noexcept { return dynamic_cast<void const*>(ptr); } inline void scramble_memory(std::pair<void*> ptr) const noexcept { return scramble_memory({ ptr }); }
template<typename T>
constexpr static void* erase_type_unsafe(T* ptr) noexcept { return static_cast<void*>(ptr); }
template<typename T>
constexpr static T* add_type_unsafe(void* ptr) noexcept { return static_cast<T*>(ptr); }
template<typename T>
constexpr static void const* erase_type_unsafe(T const* ptr) noexcept { return static_cast<void const*>(ptr); }
template<typename T>
constexpr static T const* add_type_unsafe(void const* ptr) noexcept { return static_cast<T const*>(ptr); }
virtual void apply_delete(void* restrict, bool = true) const noexcept;
inline deleter* finalizer_group_id() const noexcept {
return m_group_ptr ?: static_cast<deleter*>(this);
}
// See dtor impl
void apply_finalizer_group(std::initializer_list<void*> = {}) noexcept;
void apply_finalizer_now(std::initializer_list<void*> = {}) noexcept;
// To prevent anon_raw_secmem being destroyed while there are still allocated values, the base class for the deleter for those values contains a refcount. e.g: `std::unique_ptr<T, deleter_for<T>>` where: `deleter_for<T> final : public deleter { virtual ~deleter_for(); ... };`, or `std::shared_ptr<T>`, where: `std::shared_ptr<value_with_deleter<T>>` aliases-ctor(`old, old->value_ptr()`) -> `std::shared_ptr<T>` // To prevent anon_raw_secmem being destroyed while there are still allocated values, the base class for the deleter for those values contains a refcount. e.g: `std::unique_ptr<T, deleter_for<T>>` where: `deleter_for<T> final : public deleter { virtual ~deleter_for(); ... };`, or `std::shared_ptr<T>`, where: `std::shared_ptr<value_with_deleter<T>>` aliases-ctor(`old, old->value_ptr()`) -> `std::shared_ptr<T>`
std::shared_ptr<anon_raw_secmem> m_manager_ref; std::shared_ptr<anon_raw_secmem> m_manager_ref;
private:
// Used to sync `this`'s group across copies.
deleter* m_group_ptr;
}; };
struct alloc_vt;
struct alloc_info; struct alloc_info;
struct alloc_value; struct alloc_value;
template<typename>
struct deleter_for;
template<typename>
struct deleter_for_value;
public:
FrozenAllocator(FrozenAllocator &&) noexcept;
FrozenAllocator& operator=(FrozenAllocator &&);
FrozenAllocator(const FrozenAllocator&) = delete;
FrozenAllocator& operator=(const FrozenAllocator&) = delete;
virtual ~FrozenAllocator();
private:
std::unique_ptr<_impl> inner_;
/// Manages everything about the actual allocations
//std::shared_ptr<anon_raw_secmem> m_manager;
/// A map of values inside the allocator. This is destroyed in reverse order, meaning all the living values are destroyed *before* `m_manager`'d destruction deallocates them.
//std::map<alloc_info, std::unique_ptr<alloc_value, deleter>> m_values;
};
template<typename T> template<typename T>
struct deleter_for : virtual deleter { struct FrozenAllocator::deleter_for
: virtual deleter
{
inline deleter_for(std::shared_ptr<anon_raw_secmem>&& m) inline deleter_for(std::shared_ptr<anon_raw_secmem>&& m)
: deleter(std::move(m)) {} : deleter(std::move(m)) {}
virtual ~deleter_for() = default; // This will use deleter's dtor to remove allocations. virtual ~deleter_for() = default; // This will use deleter's dtor to remove allocations.
inline void operator()(T* ptr) const noexcept { return apply_delete(deleter::erase_type_unsafe(ptr)); } [[gnu::nonnull(2)]]
inline void operator()(T* ptr) const noexcept { apply_delete(deleter::erase_type_unsafe(ptr)); }
protected: protected:
inline virtual void apply_delete_typed(T* ptr) const noexcept { inline virtual void apply_delete_typed(T* ptr) const noexcept {
ptr->~T(); ptr->~T();
} }
private: private:
inline void apply_delete(void* restrict up) const noexcept override final { [[gnu::nonnull(2)]]
inline std::pair<void*> apply_delete(void* restrict up) const noexcept override final {
if constexpr(std::is_trivially_destructible_v<T>) { if constexpr(std::is_trivially_destructible_v<T>) {
deleter::apply_delete(up); // If the dtor is trivial, ignore it and use default behaviour. return deleter::apply_delete(up); // If the dtor is trivial, ignore it and use default behaviour.
} else { } else {
deleter::apply_delete(up, false); // Unlock the memory, but do *not* add it to this deleter's finalizer group. auto pair = deleter::apply_delete(up, false); // Unlock the memory and remove the allocation from `anon_raw_secmem`, but do *not* bzero it.
apply_delete_typed(static_cast<T*>(up)); // Apply the destructor for `T` apply_delete_typed(static_cast<T*>(up)); // Apply the destructor for `T` to the alligned pointer `up`.
deleter::apply_finalizer_now({up}); // Get `anon_raw_secmem` to deallocate the memory *now*, instead of at destruction. deleter::scramble_memory(pair); // *now* bzero the unaligned pointer (full range, including alignment padding.)
return pair;
} }
} }
}; };
public: template<typename T>
FrozenAllocator(FrozenAllocator &&) noexcept; struct FrozenAllocator::deleter_for_value final : deleter_for<T> {
FrozenAllocator& operator=(FrozenAllocator &&); //TODO: Re-work this? Is it useful? Is it needed?
FrozenAllocator(const FrozenAllocator&) = delete; inline virtual ~deleter_for_value() {
FrozenAllocator& operator=(const FrozenAllocator&) = delete; deleter_for<T>::apply_delete(m_value_ptr);
virtual ~FrozenAllocator(); }
private: private:
std::unique_ptr<_impl> inner_; T* m_value_ptr;
/// Manages everything about the actual allocations
//std::shared_ptr<anon_raw_secmem> m_manager;
/// A map of values inside the allocator. This is destroyed in reverse order, meaning all the living values are destroyed *before* `m_manager`'d destruction deallocates them.
//std::map<alloc_info, std::unique_ptr<alloc_value, deleter>> m_values;
}; };
} }
#endif #endif

@ -0,0 +1,45 @@
#pragma once
namespace id {
struct uuid {
//TODO: constexpr uuid_v4 impl
constexpr uuid v4() noexcept {
//TODO: Generate new UUID at compile time?
}
};
struct unique_ref {
uuid id;
constexpr friend auto operator<=>(unique_ref const& a, unique_ref const& b) noexcept = default;
};
struct unique {
constexpr unique() noexcept
: m_id(uuid::v4()) {}
constexpr unique(const unique&) noexcept
: unique() {}
constexpr unique(unique&&) noexcept = default;
constexpr unique& operator=(unique&&) noexcept = default;
constexpr unique& operator=(unique const& b) noexcept
{ if(this != std::addressof(b)) m_id = uuid::v4(); return *this; }
constexpr virtual ~unique() = default;
template<std::derived_from<unique> T, std::derived_from<unique> U>
constexpr friend bool operator==(T const& a, U const& b) noexcept { return static_cast<unique const&>(a) == static_cast<unique const&>(b); }
template<std::derived_from<unique> T, std::derived_from<unique> U>
constexpr friend bool operator!=(T const& a, U const& b) noexcept { return !(a == b); }
constexpr friend bool operator==(unique const& a, unique const& b) noexcept { return a.unique_id() == b.unique_id(); }
constexpr friend bool operator!=(unique const& a, unique const& b) noexcept { return a.unique_id() != b.unique_id(); }
constexpr unique_ref unique_id() const noexcept { return { m_id }; }
protected:
constexpr uuid& raw_id() noexcept { return m_id; }
constexpr uuid const& raw_id() const noexcept { return m_id; }
constexpr explicit unique(std::convertible_to<uuid> auto&& id)
: m_id(std::move(id)) {}
private:
uuid m_id;
};
}

@ -41,6 +41,8 @@
#define unrestrict __attribute__((__may_alias__)) #define unrestrict __attribute__((__may_alias__))
#define ASSUME(X) ({ if(!(X)) __builtin_unreachable(); (void)0; })
#if $CXX #if $CXX
#define $READ_ONCE_FROM(...) [&] () noexcept -> auto { using cvx_t = ::std::add_pointer_t<::std::add_const_t<::std::add_volatile_t<decltype(__VA_ARGS__)>>>; \ #define $READ_ONCE_FROM(...) [&] () noexcept -> auto { using cvx_t = ::std::add_pointer_t<::std::add_const_t<::std::add_volatile_t<decltype(__VA_ARGS__)>>>; \
return *static_cast<cvx_t>(::std::addressof(__VA_ARGS__)); \ return *static_cast<cvx_t>(::std::addressof(__VA_ARGS__)); \

@ -1,3 +1,29 @@
#pragma once #pragma once
//TODO: A very basic typeid: Using the constexpr __PRETTY_FUNCTION__ array slicing trick we used for `exopt::util::type_name<T>()`, we can extract the unmangled, de-aliased type name T, we can then hash that at comptime, and give it static storage: Therefore __PRETTY_FUNCTION__ will not be given storage, but the resulting (far smaller, but still *almost* unique to each type name) hash, will be. #include <sha256_literal.h>
#include "util.hh"
namespace types {
// A very basic typeid: Using the constexpr __PRETTY_FUNCTION__ array slicing trick we used for `exopt::util::type_name<T>()`, we can extract the unmangled, de-aliased type name T, we can then hash that at comptime, and give it static storage: Therefore __PRETTY_FUNCTION__ will not be given storage, but the resulting (far smaller, but still *almost* unique to each type name) hash, will be.
using util::type_name;
template<typename T>
struct type_hash_of {
// Give *only* the computed hash static storage duration, not the type name.
constexpr static inline auto value = sha256::compute(util::type_name_literal<T>());
};
/// Returns a (semi) unique SHA256 hash representing the type `T`.
///
/// NOTE: This hash is given static storage duration, but nothing else used to calculate it is. Therefore executable bloat is not a concern for values obtained from this function.
template<typename T>
constexpr const auto& type_hash() noexcept {
constexpr const auto& value = type_hash_of<T>::value;
return value;
}
template<typename T>
constexpr inline auto& type_hash_v = type_hash_of<T>::value;
}

@ -0,0 +1,80 @@
#pragma once
#include <utility>
#include <pointer_traits>
#include <array>
#include <cstddef>
namespace util {
[[gnu::const, gnu::always_inline]]
constexpr ptrdiff_t ptr_diff(const auto *p1, const auto *p2) noexcept
{
auto a1 = std::to_address(p1);
auto a2 = std::to_address(p2);
return ptrdiff_t(a1 < a2 ? a2 - a1 : a1 - a2);
}
[[gnu::const, gnu::always_inline]]
constexpr ptrdiff_t ptr_diff(intptr_t p1, intptr_t p2) noexcept
{
return ptrdiff_t(p1 < p2 ? p2 - p1 : p1 - p2);
}
template<size_t... Idx>
constexpr auto substring_literal(const auto& str, std::index_sequence<Idx...>) noexcept
requires(requires(size_t n) {
{ str[n] } noexcept -> std::convertible_to<char>;
})
{
return std::array{ str[Idx]..., '\n' };
}
template<typename T>
constexpr auto type_name_literal() noexcept
{
constexpr std::string_view prefix {
#if defined(__clang__)
"[T = "
#elif defined(__GNUC__)
"with T = "
#else
// Fuck MSVC, don't care.
#error Unsupported compiler
#endif
};
constexpr std::string_view suffix {"]"};
constexpr std::string_view function {__PRETTY_FUNCTION__};
constexpr auto start = function.find(prefix) + prefix.size();
constexpr auto end = function.rfind(suffix);
static_assert(start < end);
constexpr std::string_view name = function.substr(start, (end - start));
return substring_literal(name, std::make_index_sequence<name.size()>{});
}
template<typename T>
struct [[gnu::visibility("internal")]] type_name_of {
constexpr static inline auto value = type_name_literal<T>();
[[gnu::const]]
consteval operator std::string_view() const noexcept {
constexpr auto& v = value;
return std::string_view { v.data(), v.size() };
}
};
template<typename T>
constexpr auto type_name() noexcept -> std::string_view
{
constexpr auto& value = type_name_of<T>::value;
return std::string_view { value.data(), value.size() };
}
template<typename T>
constexpr inline auto type_name_v = type_name<T>();
}

@ -2,8 +2,24 @@
#include <map> #include <map>
#include <cstring>
#include <alloc.h> #include <alloc.h>
#include <util.hh>
#include <types.hh>
#include <macros.h>
template<typename T = int>
using type_hash_t = decltype(types::type_hash<int>());
template<typename T = int>
using type_hash_ref = std::remove_reference_t<type_hash_t<T>> const&;
template<typename T = int>
using type_hash_ptr = std::remove_reference_t<type_hash_t<T>> const*;
extern "C" { extern "C" {
base_allocator::~base_allocator() {} base_allocator::~base_allocator() {}
} }
@ -12,59 +28,120 @@ namespace alloc {
// Base class for all deallocations that happen within an `anon_raw_secmem` (managed, static, polymorphic, unmanaged, etc.) (TODO: See below, the base should be moved into the header so typed children can be templated...) // Base class for all deallocations that happen within an `anon_raw_secmem` (managed, static, polymorphic, unmanaged, etc.) (TODO: See below, the base should be moved into the header so typed children can be templated...)
FrozenAllocator::deleter::~deleter() { FrozenAllocator::deleter::~deleter() {
apply_finalizer_group(); //apply_finalizer_group(); // XXX: Do we actually need finalizer groups now? (see note below about `m_values`.
}
FrozenAllocator::deleter::deleter(std::shared_ptr<anon_raw_secmem>&& p)
: m_manager_ref(std::move(p))
, m_gorup_ptr(static_cast<deleter*>(this)) {} //TODO:XXX: Do we actually need finalizer groups now? (see note below about `m_values`.
void FrozenAllocator::deleter::apply_finalizer_group(std::initializer_list<void*> ptrs) noexcept {
if(ptrs.empty()) {
//TODO: auto&& fgroup = std::move(m_manager_ref->get_finalizer_group_for(finalizer_group_id())); // The *whole* finalizer group, *moved* from `m_manager_ref`. (which leaves finalizer_group for `this` empty now.)
//TODO: `bzero_explicit()` the memory from the allocations, then tell `anon_raw_secmem` to mark is as free.
} else {
//TODO: auto& fgroup = m_manager_ref->get_finalizer_group_ref_for(finalizer_group_id()); // The whole finalizer group, *referenced* from `m_manager_ref`. (which leaves finalizer_group for `this` intact, removal of pointers from the group will have to happen below manually.)
for(void* p : ptrs) {
//TODO: Do the same as above, but only for pointers registered for finalizing in `ptrs`.
}
} }
FrozenAllocator::deleter::deleter(std::shared_ptr<anon_raw_secmem>&& p)
: m_manager_ref(std::move(p)) {}
// , m_gorup_ptr(static_cast<deleter*>(this)) {}
void scramble_memory(std::initializer_list<std::pair<void*>> ptrs) const noexcept
{
for(auto [ps, pe] : ptrs) {
// ps: start of range, pe: end of range (or nullptr, if range must be looked up in `m_manager_ref`.)
if(UNLIKELY(!pe)) {
if(const auto& alloc = m_manager_ref->lookup_alloc(ps))
pe = alloc.range().second;
else continue;
} }
else if(UNLIKELY(ps == pe)) continue;
/// Instead of acting on pointers in `this`'s finalizer group, immediately get `anon_raw_secmem` to perform the deallocation and map-removal on these `ptrs`. intptr_t psa = std::to_address(ps),
void apply_finalizer_now(std::initializer_list<void*> ptrs) noexcept { pea = std::to_address(pe);
for(void* p : ptrs) { ASSUME(pea > psa);
//TODO: `m_manager_ref->immediate_dealloc(p)` explicit_bzero(ps, util::ptr_diff(psa, pea));
} }
} }
void FrozenAllocator::deleter::apply_delete(void* restrict p, bool add_group) const noexcept {
// TODO: allow the allocation (the memory corresponding to `p` from `m_manager_ref`) to be mutable (`munlock()` it.)
// TODO: Then, again through `m_manager_ref`, add to sequence `->register_finalizer_grouped(this, p)`.
/// TODO: Unless `add_group` is `false`, in which case, `p` is left to dangle.
[[gnu::nonnull(2)]]
std::pair<void*> FrozenAllocator::deleter::apply_delete(void* restrict p, bool clean) const noexcept {
//(XXX: NOTE: `m_values` map removal *causes* this to be invoked, i.e: A value removed from the map calls `deleter::operator()(uniq_p)`, which then calls `apply_delete(uniq_p, true)` //(XXX: NOTE: `m_values` map removal *causes* this to be invoked, i.e: A value removed from the map calls `deleter::operator()(uniq_p)`, which then calls `apply_delete(uniq_p, true)`
// Lookup the allocation info for pointer `p`.
const auto& alloc = m_manager_ref->lookup_alloc(p);
if(UNLIKELY(!alloc)) return {p, p};
// TODO: allow the allocation (the memory corresponding to `p` from `m_manager_ref`) to be mutable (`munlock()` it.)
// Get the full range (including alignment padding)
auto al_range = alloc.range();
// Then, if `clean == true` (default), `bzero_explicit()` the memory (range for `p` obtained from `m_manager_ref`.)
if(LIKELY(clean)) scramble_memory(al_range);
// Return the range (or the end pointer of `p`'s range for use in / as an iterator.)
return al_range;
} }
// This is the *true manager* of the allocation arena, when it is destroyed, the memory is cleared // This is the *true manager* of the allocation arena, when it is destroyed, the memory is cleared
struct FrozenAllocator::anon_raw_secmem { struct FrozenAllocator::anon_raw_secmem final
typedef FrozenAllocator::deleter deleter; : virtual id::unique
{
// Deleter that works on `alloc_value`s directly
struct deleter final : public deleter_for<alloc_value> {
//TODO: This ^
private:
void apply_delete_typed(alloc_value* ptr) const noexcept override {
//TODO: Destroy the typed object inside `ptr`. (XXX: Where should destructor info be kept? In `alloc_value` or `alloc_info`? I think `alloc_value`.
// Run `alloc_value` itself's dtor now.
deleter_for<alloc_value>::apply_delete_typed(ptr);
}
};
~anon_raw_secmem() { virtual ~anon_raw_secmem() {
//TODO: Clear and `munmap()` the used page(s) //TODO: Clear and `munmap()` the used page(s)
//XXX: Due to how this is managed via `shared_ptr<>`, it is UB for this to be called *before* all actual allocation of the memory are unallocated (via `deleter`, which they must *all* be managed by. //XXX: Due to how this is managed via `shared_ptr<>`, it is UB for this to be called *before* all actual allocation of the memory are unallocated (via `deleter`, which they must *all* be managed by.
} }
}; };
struct FrozenAllocator::alloc_info { struct FrozenAllocator::alloc_info {
id::uuid alloc_id; // ID of this allocation
id::unique_ref owner; // Reference to the unique ID of the `anon_raw_secmem` that manages this allocation.
type_hash_ptr type; // Static pointer to the `types::type_hash<T>()` result of the `T` that this allocation is. (Basic RTTI: `type_hash_t` should only exist in static storage, otherwise we use `type_hash_ref` or `type_hash_ptr`.)
struct {
size_t size, align;
} meta;
void* area_start;
void* area;
}; };
struct FrozenAllocator::alloc_value { struct FrozenAllocator::alloc_value {
typedef bool (*vt_ctor)(alloc_value* to, ...);
typedef bool (*vt_copy)(const alloc_value& from, alloc_value* to);
typedef bool (*vt_move)(alloc_value&& from, alloc_value* to);
typedef bool (*vt_assign_copy)(const alloc_value& from, alloc_value* to);
typedef bool (*vt_assign_move)(alloc_value&& from, alloc_value* to);
typedef bool (*vt_destroy)(alloc_value* obj);
typedef void* (*vt_this)(const alloc_value& from);
typedef bool (*vt_cast_into)(const alloc_value& a, alloc_value* b);
/// vt_type_info: if info not null: return if `a` is type referred to in `info`, else if `type` not null: set `*type` to be type of `a`, return false if that is not possible with the other arguments given.
typedef bool (*vt_type_info)(const alloc_value& a, const alloc_info* info, type_hash_ptr *restrict type);
//TODO: How to create? Overloaded operator placement new, inside `alloc_info` or `anon_raw_secmem`? Since the storage for these are allocated and managed *by* `anon_raw_secmem`, that would make the most sense I think... `alloc_info` holds the pointer to the specific allocation, its ID, etc; stuff for ordered allocation lookup. This is managed (entirely) by `anon_raw_secmem`, and `std::unique_ptr<alloc_value, anon_raw_secmem::deleter>` ensures it is not deleted naturally, but only removed from `anon_raw_secmem`.
//! Basic RTTI impl that holds type-erased, alignment padded values and gives out aligned void* pointers to it.
//! NOTE: This class does *not* apply any destructor when destroyed, `anon_raw_secmem::deleter` should be used for that.
struct {
struct {
vt_ctor _create;
vt_copy _copy;
vt_move _move;
vt_destroy _destroy;
vt_this _this;
vt_type_info _typeinfo;
// We don't need the others, they can be constructed by combining calls to these. (e.g.: assign_copy(new, old) = `destroy(old), copy(new, old)`.)
} vt;
size_t size, align;
} meta;
unsigned char data[];
}; };
struct FrozenAllocator::_impl { struct FrozenAllocator::_impl {
std::shared_ptr<anon_raw_secmem> m_manager; std::shared_ptr<anon_raw_secmem> m_manager;
std::map<alloc_info, std::unique_ptr<alloc_value, deleter>> m_values; std::map<alloc_info, std::unique_ptr<alloc_value, anon_raw_secmem::deleter>> m_values;
}; };
#define $CLASS FrozenAllocator #define $CLASS FrozenAllocator

Loading…
Cancel
Save