C API start

Fortune for rngxx's current commit: Small curse − 小凶
capi
Avril 3 years ago
parent f193acd2ea
commit d51eb30b6b
Signed by: flanchan
GPG Key ID: 284488987C31F630

@ -1,10 +1,5 @@
#pragma once #ifndef _RNGXX_H
#define _RNGXX_H
#include <bit>
#include <stdexcept>
#include <array>
#include <climits>
#ifndef _RNGXX_IMPL #ifndef _RNGXX_IMPL
#define _RNGXX_COMMON_MINIMAL #define _RNGXX_COMMON_MINIMAL
@ -14,207 +9,20 @@
#undef _RNGXX_COMMON_MINIMAL #undef _RNGXX_COMMON_MINIMAL
#endif #endif
#define CTOR_COPY(name) name(const name& copy) #ifdef __cplusplus
#define CTOR_MOVE(name) name(name&& move) extern "C" {
#endif
// ugly hack to get around the absolutely retarded name lookup restrictions when overriding
#define RNG_OVERRIDE(ty, nm, rest) using Random::nm; ty nm rest override
namespace _rng__util {
struct divp
{
inline consteval divp(usize n, usize o)
: d(n / o),
r(n % o){}
const usize d,r;
inline consteval bool exact() const { return d && !r; }
inline consteval bool none() const { return !d; }
template<typename T>
inline static consteval divp type(usize bytes) { return divp(bytes, sizeof(T)); }
};
}
struct InvalidRandomSample final : public std::exception {
inline explicit InvalidRandomSample(f64 s) : std::exception(), value(s){}
inline CTOR_COPY(InvalidRandomSample): std::exception(copy), value(copy.value){}
const f64 value;
};
struct ObjectMoved final : public std::exception{};
/// Interface for a simple random number generator
///
/// # Must override
/// f64 _sample() // A representation of a random range. The value must be between `0..=1`
///
/// # Should override
/// void next_bytes(u8* ptr, usize n); // Random bytes. The default implementation falls back to _sample() for each byte. This is very inefficient.
///
/// void next_v32(u32* ptr, usize n); // Vectorised random bytes (4 bytes ptr iteration.) If 32-bit vectorised outputs are possible for your implementation, you should override this (falls back to next_bytes(ptr, n)) NOTE: `n` is the number of `u32`s `ptr` points to, **not** the number of bytes.
/// void next_v64(u64* ptr, usize n); // Same as above, but for 8 byte iterations (64 bits.)
struct Random
{
template<typename T>
struct iterator { //TODO: Implement this in another file (has to be header because of template :/)
friend class Random;
//TODO: Make this work with foreach(), and STL iterator APIs somehow
inline CTOR_COPY(iterator<T>) : rng(copy.rng){}
inline CTOR_MOVE(iterator<T>) : rng(move.rng) {
*const_cast<Random**>(&move.rng) = nullptr;
}
inline virtual ~iterator(){}
protected: typedef struct Random rng_t;
virtual T _sample() { if (rng) return rng->next<T>(); else throw ObjectMoved(); }
virtual inline void _init() {}
private:
inline explicit iterator(Random& rng) : rng(&rng){ _init(); }
Random* const rng; enum rng_kind {
RNG_KIND_CRAND,
}; };
public:
inline Random(){}
inline virtual ~Random(){}
inline f64 next_f64() { return sample(); }
inline f32 next_f32() { return (f32)sample(); }
virtual bool next_bool();
#define NDEF(t) inline virtual t next_ ## t() { return next_ ## t(_max_ ## t()); }
#define NDEFF(n) NDEF(i ## n) NDEF(u ## n)
NDEFF(8)
NDEFF(16)
NDEFF(32)
NDEFF(64)
#undef NDEFF
#undef NDEF
virtual void next_u8(u8* a, usize n);
virtual void next_i8(i8* a, usize n);
virtual void next_u16(u16* a, usize n);
virtual void next_i16(i16* a, usize n);
virtual void next_u32(u32* a, usize n);
virtual void next_i32(i32* a, usize n);
virtual void next_u64(u64* a, usize n);
virtual void next_i64(i64* a, usize n);
virtual i8 next_i8(i8 max); extern rng_t* rng_new(enum rng_kind kind, u64 seed[static restrict 1]);
i8 next_i8(i8 min, i8 max);
virtual u8 next_u8(u8 max); #ifdef __cplusplus
u8 next_u8(u8 min, u8 max);
virtual u16 next_u16(u16 max);
u16 next_u16(u16 min, u16 max);
virtual i16 next_i16(i16 max);
i16 next_i16(i16 min, i16 max);
virtual i32 next_i32(i32 max);
i32 next_i32(i32 min, i32 max);
virtual i64 next_i64(i64 max);
i64 next_i64(i64 min, i64 max);
virtual u32 next_u32(u32 max);
u32 next_u32(u32 min, u32 max);
virtual u64 next_u64(u64 max);
u64 next_u64(u64 min, u64 max);
virtual inline f32 next_f32(f32 max) { return next_f32() * max; }
inline f32 next_f32(f32 min, f32 max) { return min + next_f32(max-min); }
virtual inline f64 next_f64(f64 max) { return next_f64() * max; }
inline f64 next_f64(f64 min, f64 max) { return min + next_f64(max-min); }
virtual void next_bytes(u8* bytes, usize n);
template<usize N>
inline void next_bytes(u8 (&a)[N]) {
_next_bytes<N>(a);
} }
template<usize N> #endif
inline void next_bytes(std::array<u8, N>& ar)
{
// XXX: this doesn't seem to work (aliasing issues?)
_next_bytes<N>(&ar[0]);
}
template<typename T>
inline T next() {
std::array<u8, sizeof(T)> arr;
next_bytes(arr);
return std::bit_cast<T>(arr);
}
template<typename T>
inline iterator<T> iter() { return iterator(*this); } //TODO: An iterator that yields `next<T>()` forever.
protected:
//TODO: Should we have _min_* functions too? or just continue to use 0 as the lower bound for next_*(..max)? I think use 0...
#define MAX(T, M) constexpr inline virtual T _max_ ## T() const { return M; }
#define MAXX(n) MAX(u ## n, UINT ## n ## _MAX) MAX(i ## n, INT ## n ## _MAX)
MAXX(8)
MAXX(16)
MAXX(32)
MAXX(64)
#undef MAXX
#undef MAX
//constexpr inline virtual i8 _max() const { return 100; } // use limits.h stuff instead.
// Vectorised versions of `next_bytes()`. These will fall back to that if they are not overriden, but if the implementation has a more efficient way of generating 4/8 bytes of random data it should override these.
//
// These should produce entirely random (not bounded or weighted) results, not confined to the integer types they take.
virtual void next_v64(u64* p, usize n);
virtual void next_v32(u32* p, usize n);
// Main sample function. Must return between 0..=1
// If nothing else is overrided, this value is used for everything else.
// It is recommended to override `next_bytes()` too however.
virtual f64 _sample() = 0;
f64 sample();
private:
template<usize N>
inline void _next_bytes(u8* a)
{
using namespace _rng__util;
constexpr const auto rem64 = divp::type<u64>(N);
u8* ptr = a;
if constexpr(rem64.d) {
next_v64(reinterpret_cast<u64*>(ptr), rem64.d); ptr+= rem64.d * sizeof(u64);
}
constexpr const auto rem32 = divp::type<u32>(rem64.r);
if constexpr(rem32.d) {
next_v32(reinterpret_cast<u32*>(ptr), rem32.d); ptr+=rem32.d * sizeof(u32);
}
if constexpr(rem32.r) {
next_bytes(ptr, rem32.r);
}
}
};
#define DEFT(T) template<> inline T Random::next< T >() { return next_ ## T(); }
#define DEFTT(n) DEFT(i ## n) DEFT(u ## n)
DEFT(bool)
DEFTT(8)
DEFTT(16)
DEFTT(32)
DEFTT(64)
//DEFTT(128)
#undef DEFTT
#undef DEFT
#undef CTOR_COPY #endif /* _RNGXX_H */
#undef CTOR_MOVE

@ -0,0 +1,220 @@
#pragma once
#include <bit>
#include <stdexcept>
#include <array>
#include <climits>
#ifndef _RNGXX_IMPL
#define _RNGXX_COMMON_MINIMAL
#endif
#include "rngxx/internal/common.h"
#ifndef _RNGXX_IMPL
#undef _RNGXX_COMMON_MINIMAL
#endif
#define CTOR_COPY(name) name(const name& copy)
#define CTOR_MOVE(name) name(name&& move)
// ugly hack to get around the absolutely retarded name lookup restrictions when overriding
#define RNG_OVERRIDE(ty, nm, rest) using Random::nm; ty nm rest override
namespace _rng__util {
struct divp
{
inline consteval divp(usize n, usize o)
: d(n / o),
r(n % o){}
const usize d,r;
inline consteval bool exact() const { return d && !r; }
inline consteval bool none() const { return !d; }
template<typename T>
inline static consteval divp type(usize bytes) { return divp(bytes, sizeof(T)); }
};
}
struct InvalidRandomSample final : public std::exception {
inline explicit InvalidRandomSample(f64 s) : std::exception(), value(s){}
inline CTOR_COPY(InvalidRandomSample): std::exception(copy), value(copy.value){}
const f64 value;
};
struct ObjectMoved final : public std::exception{};
/// Interface for a simple random number generator
///
/// # Must override
/// f64 _sample() // A representation of a random range. The value must be between `0..=1`
///
/// # Should override
/// void next_bytes(u8* ptr, usize n); // Random bytes. The default implementation falls back to _sample() for each byte. This is very inefficient.
///
/// void next_v32(u32* ptr, usize n); // Vectorised random bytes (4 bytes ptr iteration.) If 32-bit vectorised outputs are possible for your implementation, you should override this (falls back to next_bytes(ptr, n)) NOTE: `n` is the number of `u32`s `ptr` points to, **not** the number of bytes.
/// void next_v64(u64* ptr, usize n); // Same as above, but for 8 byte iterations (64 bits.)
struct Random
{
template<typename T>
struct iterator { //TODO: Implement this in another file (has to be header because of template :/)
friend class Random;
//TODO: Make this work with foreach(), and STL iterator APIs somehow
inline CTOR_COPY(iterator<T>) : rng(copy.rng){}
inline CTOR_MOVE(iterator<T>) : rng(move.rng) {
*const_cast<Random**>(&move.rng) = nullptr;
}
inline virtual ~iterator(){}
protected:
virtual T _sample() { if (rng) return rng->next<T>(); else throw ObjectMoved(); }
virtual inline void _init() {}
private:
inline explicit iterator(Random& rng) : rng(&rng){ _init(); }
Random* const rng;
};
public:
inline Random(){}
inline virtual ~Random(){}
inline f64 next_f64() { return sample(); }
inline f32 next_f32() { return (f32)sample(); }
virtual bool next_bool();
#define NDEF(t) inline virtual t next_ ## t() { return next_ ## t(_max_ ## t()); }
#define NDEFF(n) NDEF(i ## n) NDEF(u ## n)
NDEFF(8)
NDEFF(16)
NDEFF(32)
NDEFF(64)
#undef NDEFF
#undef NDEF
virtual void next_u8(u8* a, usize n);
virtual void next_i8(i8* a, usize n);
virtual void next_u16(u16* a, usize n);
virtual void next_i16(i16* a, usize n);
virtual void next_u32(u32* a, usize n);
virtual void next_i32(i32* a, usize n);
virtual void next_u64(u64* a, usize n);
virtual void next_i64(i64* a, usize n);
virtual i8 next_i8(i8 max);
i8 next_i8(i8 min, i8 max);
virtual u8 next_u8(u8 max);
u8 next_u8(u8 min, u8 max);
virtual u16 next_u16(u16 max);
u16 next_u16(u16 min, u16 max);
virtual i16 next_i16(i16 max);
i16 next_i16(i16 min, i16 max);
virtual i32 next_i32(i32 max);
i32 next_i32(i32 min, i32 max);
virtual i64 next_i64(i64 max);
i64 next_i64(i64 min, i64 max);
virtual u32 next_u32(u32 max);
u32 next_u32(u32 min, u32 max);
virtual u64 next_u64(u64 max);
u64 next_u64(u64 min, u64 max);
virtual inline f32 next_f32(f32 max) { return next_f32() * max; }
inline f32 next_f32(f32 min, f32 max) { return min + next_f32(max-min); }
virtual inline f64 next_f64(f64 max) { return next_f64() * max; }
inline f64 next_f64(f64 min, f64 max) { return min + next_f64(max-min); }
virtual void next_bytes(u8* bytes, usize n);
template<usize N>
inline void next_bytes(u8 (&a)[N]) {
_next_bytes<N>(a);
}
template<usize N>
inline void next_bytes(std::array<u8, N>& ar)
{
// XXX: this doesn't seem to work (aliasing issues?)
_next_bytes<N>(&ar[0]);
}
template<typename T>
inline T next() {
std::array<u8, sizeof(T)> arr;
next_bytes(arr);
return std::bit_cast<T>(arr);
}
template<typename T>
inline iterator<T> iter() { return iterator(*this); } //TODO: An iterator that yields `next<T>()` forever.
protected:
//TODO: Should we have _min_* functions too? or just continue to use 0 as the lower bound for next_*(..max)? I think use 0...
#define MAX(T, M) constexpr inline virtual T _max_ ## T() const { return M; }
#define MAXX(n) MAX(u ## n, UINT ## n ## _MAX) MAX(i ## n, INT ## n ## _MAX)
MAXX(8)
MAXX(16)
MAXX(32)
MAXX(64)
#undef MAXX
#undef MAX
//constexpr inline virtual i8 _max() const { return 100; } // use limits.h stuff instead.
// Vectorised versions of `next_bytes()`. These will fall back to that if they are not overriden, but if the implementation has a more efficient way of generating 4/8 bytes of random data it should override these.
//
// These should produce entirely random (not bounded or weighted) results, not confined to the integer types they take.
virtual void next_v64(u64* p, usize n);
virtual void next_v32(u32* p, usize n);
// Main sample function. Must return between 0..=1
// If nothing else is overrided, this value is used for everything else.
// It is recommended to override `next_bytes()` too however.
virtual f64 _sample() = 0;
f64 sample();
private:
template<usize N>
inline void _next_bytes(u8* a)
{
using namespace _rng__util;
constexpr const auto rem64 = divp::type<u64>(N);
u8* ptr = a;
if constexpr(rem64.d) {
next_v64(reinterpret_cast<u64*>(ptr), rem64.d); ptr+= rem64.d * sizeof(u64);
}
constexpr const auto rem32 = divp::type<u32>(rem64.r);
if constexpr(rem32.d) {
next_v32(reinterpret_cast<u32*>(ptr), rem32.d); ptr+=rem32.d * sizeof(u32);
}
if constexpr(rem32.r) {
next_bytes(ptr, rem32.r);
}
}
};
#define DEFT(T) template<> inline T Random::next< T >() { return next_ ## T(); }
#define DEFTT(n) DEFT(i ## n) DEFT(u ## n)
DEFT(bool)
DEFTT(8)
DEFTT(16)
DEFTT(32)
DEFTT(64)
//DEFTT(128)
#undef DEFTT
#undef DEFT
#undef CTOR_COPY
#undef CTOR_MOVE

@ -2,7 +2,7 @@
#include "internal/common.h" #include "internal/common.h"
#include <rng.h> #include <rngxx.hpp>
#include "internal/mem.h" #include "internal/mem.h"
namespace rng namespace rng

@ -0,0 +1,17 @@
#include <rngxx.hpp>
#include <rngxx/crand.h>
#include <rngxx.h>
extern "C" {
//TODO: Make these C++ compiled ones intermediate, so the C interface used here can be used. use internal linkage for the C++ intermediates
rng_t* rng_new(rng_kind kind, u64 seed[static restrict 1])
{
switch(kind)
{
case RNG_KIND_CRAND:
return new rng::crand(seed[0]);
default: return NULL;
}
}
}

@ -1,5 +1,6 @@
#include <rngxx.h> #include <rngxx.hpp>
#include <range.h> #include <range.h>
constexpr const static util::range<f64> SAMPLE_RANGE { 0.0, 1.0 }; constexpr const static util::range<f64> SAMPLE_RANGE { 0.0, 1.0 };

Loading…
Cancel
Save