#pragma once #include #include #include #include #ifndef _RNGXX_IMPL #define _RNGXX_COMMON_MINIMAL #endif #include "rngxx/internal/common.h" #ifndef _RNGXX_IMPL #undef _RNGXX_COMMON_MINIMAL #endif #define RNGXX_DCTOR_NAME(T) _rng__init_for_ ## T #define CTOR_COPY(name) name(const name& copy) #define CTOR_MOVE(name) name(name&& move) // ugly hack to get around the absolutely retarded name lookup restrictions when overriding #define RNG_OVERRIDE(ty, nm, rest) using Random::nm; ty nm rest override namespace _rng__util { struct divp { inline consteval divp(usize n, usize o) : d(n / o), r(n % o){} const usize d,r; inline consteval bool exact() const { return d && !r; } inline consteval bool none() const { return !d; } template inline static consteval divp type(usize bytes) { return divp(bytes, sizeof(T)); } }; } struct InvalidRandomSample final : public std::exception { inline explicit InvalidRandomSample(f64 s) : std::exception(), value(s){} inline CTOR_COPY(InvalidRandomSample): std::exception(copy), value(copy.value){} const f64 value; }; struct ObjectMoved final : public std::exception{}; /// Interface for a simple random number generator /// /// # Must override /// f64 _sample() // A representation of a random range. The value must be between `0..=1` /// /// # Should override /// void next_bytes(u8* ptr, usize n); // Random bytes. The default implementation falls back to _sample() for each byte. This is very inefficient. /// /// void next_v32(u32* ptr, usize n); // Vectorised random bytes (4 bytes ptr iteration.) If 32-bit vectorised outputs are possible for your implementation, you should override this (falls back to next_bytes(ptr, n)) NOTE: `n` is the number of `u32`s `ptr` points to, **not** the number of bytes. /// void next_v64(u64* ptr, usize n); // Same as above, but for 8 byte iterations (64 bits.) struct Random { template struct iterator { //TODO: Implement this in another file (has to be header because of template :/) //XXX: C++ iterators are absolute jank. This is far harder than it should be. friend class Random; //TODO: Make this work with foreach(), and STL iterator APIs somehow inline CTOR_COPY(iterator) : rng(copy.rng){} inline CTOR_MOVE(iterator) : rng(move.rng) { *const_cast(&move.rng) = nullptr; } inline virtual ~iterator(){} inline T next() { return _sample(); } inline iterator& operator++() { return *this; } inline iterator& operator++(int) { return *this; } inline T operator*() { return next(); } friend auto operator<=>(const iterator&, const iterator&) = default; protected: virtual T _sample() { if (LIKELY(rng)) return rng->next(); else throw ObjectMoved(); } virtual inline void _init() {} private: inline explicit iterator(Random& rng) : rng(&rng){ _init(); } Random* const rng; }; public: inline Random(){} inline virtual ~Random(){} inline f64 next_f64() { return sample(); } inline f32 next_f32() { return (f32)sample(); } virtual bool next_bool(); #define NDEF(t) inline virtual t next_ ## t() { return next_ ## t(_max_ ## t()); } #define NDEFF(n) NDEF(i ## n) NDEF(u ## n) NDEFF(8) NDEFF(16) NDEFF(32) NDEFF(64) #undef NDEFF #undef NDEF virtual void next_u8(u8* a, usize n); virtual void next_i8(i8* a, usize n); virtual void next_u16(u16* a, usize n); virtual void next_i16(i16* a, usize n); virtual void next_u32(u32* a, usize n); virtual void next_i32(i32* a, usize n); virtual void next_u64(u64* a, usize n); virtual void next_i64(i64* a, usize n); virtual i8 next_i8(i8 max); i8 next_i8(i8 min, i8 max); virtual u8 next_u8(u8 max); u8 next_u8(u8 min, u8 max); virtual u16 next_u16(u16 max); u16 next_u16(u16 min, u16 max); virtual i16 next_i16(i16 max); i16 next_i16(i16 min, i16 max); virtual i32 next_i32(i32 max); i32 next_i32(i32 min, i32 max); virtual i64 next_i64(i64 max); i64 next_i64(i64 min, i64 max); virtual u32 next_u32(u32 max); u32 next_u32(u32 min, u32 max); virtual u64 next_u64(u64 max); u64 next_u64(u64 min, u64 max); virtual inline f32 next_f32(f32 max) { return next_f32() * max; } inline f32 next_f32(f32 min, f32 max) { return min + next_f32(max-min); } virtual inline f64 next_f64(f64 max) { return next_f64() * max; } inline f64 next_f64(f64 min, f64 max) { return min + next_f64(max-min); } virtual void next_bytes(u8* bytes, usize n); template inline void next_bytes(u8 (&a)[N]) { _next_bytes(a); } template inline void next_bytes(std::array& ar) { // XXX: this doesn't seem to work (aliasing issues?) _next_bytes(&ar[0]); } template inline T next() { std::array arr; next_bytes(arr); return std::bit_cast(arr); } template inline T next(T min, T max); template inline T next(T max); template inline iterator iter() { return iterator(*this); } //TODO: An iterator that yields `next()` forever. template constexpr inline T max_for() const; protected: //TODO: Should we have _min_* functions too? or just continue to use 0 as the lower bound for next_*(..max)? I think use 0... #define MAX(T, M) constexpr inline virtual T _max_ ## T() const { return M; } #define MAXX(n) MAX(u ## n, UINT ## n ## _MAX) MAX(i ## n, INT ## n ## _MAX) MAXX(8) MAXX(16) MAXX(32) MAXX(64) #undef MAXX #undef MAX protected: //constexpr inline virtual i8 _max() const { return 100; } // use limits.h stuff instead. // Vectorised versions of `next_bytes()`. These will fall back to that if they are not overriden, but if the implementation has a more efficient way of generating 4/8 bytes of random data it should override these. // // These should produce entirely random (not bounded or weighted) results, not confined to the integer types they take. virtual void next_v64(u64* p, usize n); virtual void next_v32(u32* p, usize n); // Main sample function. Must return between 0..=1 // If nothing else is overrided, this value is used for everything else. // It is recommended to override `next_bytes()` too however. virtual f64 _sample() = 0; f64 sample(); private: template inline void _next_bytes(u8* a) { using namespace _rng__util; constexpr const auto rem64 = divp::type(N); u8* ptr = a; if constexpr(rem64.d) { next_v64(reinterpret_cast(ptr), rem64.d); ptr+= rem64.d * sizeof(u64); } constexpr const auto rem32 = divp::type(rem64.r); if constexpr(rem32.d) { next_v32(reinterpret_cast(ptr), rem32.d); ptr+=rem32.d * sizeof(u32); } if constexpr(rem32.r) { next_bytes(ptr, rem32.r); } } }; #define DEFTT(n) DEFT(i ## n) DEFT(u ## n) #define DEFT(T) template<> inline T Random::max_for< T >() const { return _max_ ## T(); } #define DEF \ DEFTT(8) \ DEFTT(16) \ DEFTT(32) \ DEFTT(64) \ //DEFTT(128) DEF #undef DEFT #define DEFT(T) template<> inline T Random::next< T >() { return next_ ## T(); } \ template<> inline T Random::next< T >(T min, T max) { return next_ ## T(min, max); } \ template<> inline T Random::next< T >(T max) { return next_ ## T(max); } template<> inline bool Random::next() { return next_bool(); } //template<> inline f64 Random::next() { return next_f64(); } //template<> inline f32 Random::next() { return next_f32(); } DEFT(f32) DEFT(f64) DEF #undef DEF #undef DEFTT #undef DEFT #undef CTOR_COPY #undef CTOR_MOVE