Compare commits

...

41 Commits

Author SHA1 Message Date
Avril 1212bfdb12
version API
4 years ago
Avril eeea6a13b1
test routing
4 years ago
Avril c51f87e8cb
begin web server
4 years ago
Avril 4e71299f37
Freeze tests
4 years ago
Avril b753923bf0
Freeze read-write and from-into
4 years ago
Avril 6973bddf41
post and user lookups
4 years ago
Avril 8f3a030c6e
start: rework freeze
4 years ago
Avril 85e6df6dc5
added configurable message passing buffer size
4 years ago
Avril 519cd814eb
begin concept: insert_new_post
4 years ago
Avril e6aa5da965
added jemallocator as default allocator
4 years ago
Avril f8efb25c30
start major state rework
4 years ago
Avril a057022d48
state post get
4 years ago
Avril 5327dadddf
freeze reading and writing via CBOR
4 years ago
Avril 0a6c1304dc
freeze: create, from
4 years ago
Avril 31ecef8954
post ID mnemonics
4 years ago
Avril e43dee3322
state user
4 years ago
Avril af3979e6a2
PEM format recognised
4 years ago
Avril b2caa126f9
start: templating
4 years ago
Avril bca5c2264a
aes keygen tests
4 years ago
Avril cd8973f45e
added Makefile to install client deps
4 years ago
Avril 6265a8f41d
client-side encryption working
4 years ago
Avril 0a1f7e2293
multiple RSA ciphertexts of post body's AES key
4 years ago
Avril ab05c34637
more verbose error message
4 years ago
Avril 3bad812765
user info serialisable
4 years ago
Avril 3b9ffd8ead
user info serialisable
4 years ago
Avril d72a52a988
user trust
4 years ago
Avril cda9470f86
update doc
4 years ago
Avril 878a2435c5
post body internals
4 years ago
Avril 9ba00f6722
post body format okay
4 years ago
Avril a85286edb9
more post expiry functions
4 years ago
Avril d72df0c9b2
seperated ident info from post to struct
4 years ago
Avril 4e813abed5
compile-time limit the size of post body
4 years ago
Avril b07a320234
post timestamps
4 years ago
Avril 5b3aeadb6b
improve test
4 years ago
Avril 8a1ccd55d9
improve test
4 years ago
Avril 055039d2e0
hard formatted strings also validate during serde deserialisation
4 years ago
Avril 80bd992e72
post serialise okay
4 years ago
Avril 4e0f6945bf
limit size of ID fields at type level
4 years ago
Avril 3a2e30486a
tripcode
4 years ago
Avril 7b71b4fdcb
post
4 years ago
Avril 572bb748a3
restart state
4 years ago

10
.gitignore vendored

@ -1,9 +1,5 @@
/target
*~
# Added by cargo
#
# already existing elements were commented out
#/target
bower_components/
node_modules/
vendor/

282
Cargo.lock generated

@ -1,5 +1,11 @@
# This file is automatically @generated by Cargo.
# It is not intended for manual editing.
[[package]]
name = "ad-hoc-iter"
version = "0.2.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "90a8dd76beceb5313687262230fcbaaf8d4e25c37541350cf0932e9adb8309c8"
[[package]]
name = "addr2line"
version = "0.14.1"
@ -67,6 +73,12 @@ version = "0.12.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3441f0f7b02788e948e47f457ca01f1d7e6d92c693bc132c22b087d3141c03ff"
[[package]]
name = "base64"
version = "0.13.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "904dfeac50f3cdaba28fc6f57fdcddb75f49ed61346676a78c4ffe55877802fd"
[[package]]
name = "bitflags"
version = "1.2.1"
@ -155,6 +167,20 @@ version = "1.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd"
[[package]]
name = "chrono"
version = "0.4.19"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "670ad68c9088c2a963aaa298cb369688cf3f9465ce5e2d4ca10e6e0098a1ce73"
dependencies = [
"libc",
"num-integer",
"num-traits",
"serde",
"time",
"winapi 0.3.9",
]
[[package]]
name = "cloudabi"
version = "0.0.3"
@ -204,14 +230,14 @@ dependencies = [
[[package]]
name = "cryptohelpers"
version = "1.7.1"
version = "1.7.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "705d37f9525e6e677af724ec414c7cb712e8576dbdd11bafbcfbcea2f468bcd9"
checksum = "33c34ac8437a348d0c23e71327d6d8affe4509cc91e33dd22c1e38f7c9da8070"
dependencies = [
"crc",
"futures",
"getrandom 0.1.16",
"hex-literal",
"hex-literal 0.3.1",
"hmac",
"libc",
"openssl",
@ -312,6 +338,12 @@ dependencies = [
"percent-encoding",
]
[[package]]
name = "fs_extra"
version = "1.2.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2022715d62ab30faffd124d40b76f4134a550a87792276512b18d63272333394"
[[package]]
name = "fuchsia-cprng"
version = "0.1.1"
@ -336,9 +368,9 @@ checksum = "3dcaa9ae7725d12cdb85b3ad99a434db70b468c09ded17e012d86b5c1010f7a7"
[[package]]
name = "futures"
version = "0.3.8"
version = "0.3.12"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9b3b0c040a1fe6529d30b3c5944b280c7f0dcb2930d2c3062bca967b602583d0"
checksum = "da9052a1a50244d8d5aa9bf55cbc2fb6f357c86cc52e46c62ed390a7180cf150"
dependencies = [
"futures-channel",
"futures-core",
@ -351,9 +383,9 @@ dependencies = [
[[package]]
name = "futures-channel"
version = "0.3.8"
version = "0.3.12"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4b7109687aa4e177ef6fe84553af6280ef2778bdb7783ba44c9dc3399110fe64"
checksum = "f2d31b7ec7efab6eefc7c57233bb10b847986139d88cc2f5a02a1ae6871a1846"
dependencies = [
"futures-core",
"futures-sink",
@ -361,15 +393,15 @@ dependencies = [
[[package]]
name = "futures-core"
version = "0.3.8"
version = "0.3.12"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "847ce131b72ffb13b6109a221da9ad97a64cbe48feb1028356b836b47b8f1748"
checksum = "79e5145dde8da7d1b3892dad07a9c98fc04bc39892b1ecc9692cf53e2b780a65"
[[package]]
name = "futures-executor"
version = "0.3.8"
version = "0.3.12"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4caa2b2b68b880003057c1dd49f1ed937e38f22fcf6c212188a121f08cf40a65"
checksum = "e9e59fdc009a4b3096bf94f740a0f2424c082521f20a9b08c5c07c48d90fd9b9"
dependencies = [
"futures-core",
"futures-task",
@ -378,15 +410,15 @@ dependencies = [
[[package]]
name = "futures-io"
version = "0.3.8"
version = "0.3.12"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "611834ce18aaa1bd13c4b374f5d653e1027cf99b6b502584ff8c9a64413b30bb"
checksum = "28be053525281ad8259d47e4de5de657b25e7bac113458555bb4b70bc6870500"
[[package]]
name = "futures-macro"
version = "0.3.8"
version = "0.3.12"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "77408a692f1f97bcc61dc001d752e00643408fbc922e4d634c655df50d595556"
checksum = "c287d25add322d9f9abdcdc5927ca398917996600182178774032e9f8258fedd"
dependencies = [
"proc-macro-hack",
"proc-macro2",
@ -396,24 +428,24 @@ dependencies = [
[[package]]
name = "futures-sink"
version = "0.3.8"
version = "0.3.12"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f878195a49cee50e006b02b93cf7e0a95a38ac7b776b4c4d9cc1207cd20fcb3d"
checksum = "caf5c69029bda2e743fddd0582d1083951d65cc9539aebf8812f36c3491342d6"
[[package]]
name = "futures-task"
version = "0.3.8"
version = "0.3.12"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7c554eb5bf48b2426c4771ab68c6b14468b6e76cc90996f528c3338d761a4d0d"
checksum = "13de07eb8ea81ae445aca7b69f5f7bf15d7bf4912d8ca37d6645c77ae8a58d86"
dependencies = [
"once_cell",
]
[[package]]
name = "futures-util"
version = "0.3.8"
version = "0.3.12"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d304cff4a7b99cfb7986f7d43fbe93d175e72e704a8860787cc95e9ffd85cbd2"
checksum = "632a8cd0f2a4b3fdea1657f08bde063848c3bd00f9bbf6e256b8be78802e624b"
dependencies = [
"futures-channel",
"futures-core",
@ -422,7 +454,7 @@ dependencies = [
"futures-sink",
"futures-task",
"memchr",
"pin-project 1.0.2",
"pin-project-lite 0.2.4",
"pin-utils",
"proc-macro-hack",
"proc-macro-nested",
@ -505,6 +537,12 @@ dependencies = [
"tracing-futures",
]
[[package]]
name = "half"
version = "1.7.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "62aca2aba2d62b4a7f5b33f3712cb1b0692779a56fb510499d5c0aa594daeaf3"
[[package]]
name = "hashbrown"
version = "0.9.1"
@ -517,7 +555,7 @@ version = "0.3.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ed18eb2459bf1a09ad2d6b1547840c3e5e62882fa09b9a6a20b1de8e3228848f"
dependencies = [
"base64",
"base64 0.12.3",
"bitflags",
"bytes",
"headers-core",
@ -545,12 +583,31 @@ dependencies = [
"libc",
]
[[package]]
name = "hex-literal"
version = "0.2.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "961de220ec9a91af2e1e5bd80d02109155695e516771762381ef8581317066e0"
dependencies = [
"hex-literal-impl",
"proc-macro-hack",
]
[[package]]
name = "hex-literal"
version = "0.3.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5af1f635ef1bc545d78392b136bfe1c9809e029023c84a3638a864a10b8819c8"
[[package]]
name = "hex-literal-impl"
version = "0.2.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "853f769599eb31de176303197b7ba4973299c38c7a7604a6bc88c3eef05b9b46"
dependencies = [
"proc-macro-hack",
]
[[package]]
name = "hmac"
version = "0.9.0"
@ -678,6 +735,27 @@ version = "0.4.7"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "dd25036021b0de88a0aff6b850051563c6516d0bf53f8638938edbb9de732736"
[[package]]
name = "jemalloc-sys"
version = "0.3.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0d3b9f3f5c9b31aa0f5ed3260385ac205db665baa41d49bb8338008ae94ede45"
dependencies = [
"cc",
"fs_extra",
"libc",
]
[[package]]
name = "jemallocator"
version = "0.3.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "43ae63fcfc45e99ab3d1b29a46782ad679e98436c3169d15a167a1108a724b69"
dependencies = [
"jemalloc-sys",
"libc",
]
[[package]]
name = "kernel32-sys"
version = "0.2.2"
@ -688,6 +766,20 @@ dependencies = [
"winapi-build",
]
[[package]]
name = "khash"
version = "2.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "592c0325561e878d43491e8ef0fed4ecb2b84ddbb1d668405914c50400ba1b9e"
dependencies = [
"crc",
"getrandom 0.1.16",
"hex-literal 0.2.1",
"libc",
"malloc-array",
"sha2",
]
[[package]]
name = "lazy_static"
version = "1.4.0"
@ -709,12 +801,50 @@ dependencies = [
"cfg-if 0.1.10",
]
[[package]]
name = "malloc-array"
version = "1.4.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9f994770c7bb3f8f7db7c4160665fc8814c8c705c10ae59a3d7354f0b8838f5c"
dependencies = [
"libc",
]
[[package]]
name = "matches"
version = "0.1.8"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7ffc5c5338469d4d3ea17d269fa8ea3512ad247247c30bd2df69e68309ed0a08"
[[package]]
name = "maud"
version = "0.22.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "da0784808b0c06f80365c1071048df9b6414a83fc56b8d4b305859e39f5162fa"
dependencies = [
"maud_htmlescape",
"maud_macros",
]
[[package]]
name = "maud_htmlescape"
version = "0.17.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d0fb85bccffc42302ad1e1ed8679f6a39d1317f775a37fbc3f79bdfbe054bfb7"
[[package]]
name = "maud_macros"
version = "0.22.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "73a00158abd4671407a3d8bd2c8577e9c7dc8d0b8a8e5523bdaba7d486655b1e"
dependencies = [
"maud_htmlescape",
"proc-macro-error",
"proc-macro2",
"quote",
"syn",
]
[[package]]
name = "memchr"
version = "2.3.4"
@ -846,6 +976,25 @@ dependencies = [
"winapi 0.3.9",
]
[[package]]
name = "num-integer"
version = "0.1.44"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d2cc698a63b549a70bc047073d2949cce27cd1c7b0a4a862d08a8031bc2801db"
dependencies = [
"autocfg 1.0.1",
"num-traits",
]
[[package]]
name = "num-traits"
version = "0.2.14"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9a64b1ec5cda2586e284722486d802acf1f7dbdc623e2bfc57e65ca1cd099290"
dependencies = [
"autocfg 1.0.1",
]
[[package]]
name = "num_cpus"
version = "1.13.0"
@ -919,7 +1068,7 @@ version = "0.5.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7170d73bf11f39b4ce1809aabc95bf5c33564cdc16fc3200ddda17a5f6e5e48b"
dependencies = [
"base64",
"base64 0.12.3",
"crypto-mac",
"hmac",
"rand 0.7.3",
@ -982,9 +1131,9 @@ checksum = "c917123afa01924fc84bb20c4c03f004d9c38e5127e3c039bbf7f4b9c76a2f6b"
[[package]]
name = "pin-project-lite"
version = "0.2.0"
version = "0.2.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6b063f57ec186e6140e2b8b6921e5f1bd89c7356dda5b33acc5401203ca6131c"
checksum = "439697af366c49a6d0a010c56a0d97685bc140ce0d377b13a2ea2aa42d64a827"
[[package]]
name = "pin-utils"
@ -1014,6 +1163,30 @@ dependencies = [
"log",
]
[[package]]
name = "proc-macro-error"
version = "1.0.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "da25490ff9892aab3fcf7c36f08cfb902dd3e71ca0f9f9517bea02a73a5ce38c"
dependencies = [
"proc-macro-error-attr",
"proc-macro2",
"quote",
"syn",
"version_check",
]
[[package]]
name = "proc-macro-error-attr"
version = "1.0.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a1be40180e52ecc98ad80b184934baf3d0d29f979574e439af5a55274b35f869"
dependencies = [
"proc-macro2",
"quote",
"version_check",
]
[[package]]
name = "proc-macro-hack"
version = "0.5.19"
@ -1245,6 +1418,15 @@ version = "0.1.18"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6e3bad0ee36814ca07d7968269dd4b7ec89ec2da10c4bb613928d3077083c232"
[[package]]
name = "rustc_version"
version = "0.2.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "138e3e0acb6c9fb258b19b67cb8abd63c00679d2851805ea151465464fe9030a"
dependencies = [
"semver",
]
[[package]]
name = "ryu"
version = "1.0.5"
@ -1263,6 +1445,21 @@ version = "1.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ea6a9290e3c9cf0f18145ef7ffa62d68ee0bf5fcd651017e586dc7fd5da448c2"
[[package]]
name = "semver"
version = "0.9.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1d7eb9ef2c18661902cc47e535f9bc51b78acd254da71d375c2f6720d9a40403"
dependencies = [
"semver-parser",
]
[[package]]
name = "semver-parser"
version = "0.7.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "388a1df253eca08550bef6c72392cfe7c30914bf41df5269b68cbd6ff8f570a3"
[[package]]
name = "serde"
version = "1.0.118"
@ -1272,6 +1469,16 @@ dependencies = [
"serde_derive",
]
[[package]]
name = "serde_cbor"
version = "0.11.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1e18acfa2f90e8b735b2836ab8d538de304cbb6729a7360729ea5a895d15a622"
dependencies = [
"half",
"serde",
]
[[package]]
name = "serde_derive"
version = "1.0.118"
@ -1359,6 +1566,15 @@ version = "0.4.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c111b5bd5695e56cffe5129854aa230b39c93a305372fdbb2668ca2394eea9f8"
[[package]]
name = "smallmap"
version = "1.3.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1d61d73d5986b7f728a76234ca60f7826faa44dd9043d2954ca6583bfc7b875d"
dependencies = [
"rustc_version",
]
[[package]]
name = "smallvec"
version = "1.6.0"
@ -1539,7 +1755,7 @@ checksum = "9f47026cdc4080c07e49b37087de021820269d996f581aac150ef9e5583eefe3"
dependencies = [
"cfg-if 1.0.0",
"log",
"pin-project-lite 0.2.0",
"pin-project-lite 0.2.4",
"tracing-core",
]
@ -1574,7 +1790,7 @@ version = "0.11.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f0308d80d86700c5878b9ef6321f020f29b1bb9d5ff3cab25e75e23f3a492a23"
dependencies = [
"base64",
"base64 0.12.3",
"byteorder",
"bytes",
"http",
@ -1788,20 +2004,30 @@ dependencies = [
name = "yuurei"
version = "0.1.0"
dependencies = [
"ad-hoc-iter",
"base64 0.13.0",
"cfg-if 1.0.0",
"chrono",
"color-eyre",
"cryptohelpers",
"difference",
"futures",
"generational-arena",
"getrandom 0.2.1",
"hex-literal 0.3.1",
"jemallocator",
"khash",
"lazy_static",
"log",
"maud",
"mopa",
"once_cell",
"pretty_env_logger",
"serde",
"serde_cbor",
"serde_json",
"sha2",
"smallmap",
"smallvec",
"smolset",
"tokio",

@ -6,25 +6,38 @@ authors = ["Avril <flanchan@cumallover.me>"]
edition = "2018"
[features]
default = ["nightly"]
default = ["nightly", "short-mnemonics"]
nightly = ["smallvec/const_generics"]
# Use shortened mnemonics for post IDs
short-mnemonics = []
[dependencies]
ad-hoc-iter = "0.2.3"
base64 = "0.13.0"
cfg-if = "1.0.0"
chrono = {version = "0.4", features=["serde"]}
color-eyre = {version = "0.5.10", default-features=false}
cryptohelpers = {version = "1.7.1", features=["full"]}
cryptohelpers = {version = "1.7.2", features=["full"]}
difference = "2.0.0"
futures = "0.3.8"
futures = "0.3.12"
generational-arena = "0.2.8"
getrandom = "0.2.1"
hex-literal = "0.3.1"
jemallocator = "0.3.2"
khash = "2.0.0"
lazy_static = "1.4.0"
log = "0.4.11"
maud = "0.22.2"
mopa = "0.2.2"
once_cell = "1.5.2"
pretty_env_logger = "0.4.0"
serde = {version = "1.0.118", features=["derive"]}
serde_cbor = "0.11.1"
serde_json = "1.0.61"
sha2 = "0.9.2"
smallmap = "1.3.0"
smallvec = {version = "1.6.0", features= ["union", "serde", "write"]}
smolset = "1.1"
tokio = {version = "0.2", features=["full"] }

@ -0,0 +1,10 @@
.PHONY: all
all: client
dirs:
@mkdir -p vendor
client: dirs
npm install
bower install crypto-js
cd node_modules/node-rsa/src && browserify -r ./NodeRSA.js:node-rsa > ../../../vendor/NodeRSA.js

39
package-lock.json generated

@ -0,0 +1,39 @@
{
"name": "yuurei",
"version": "0.1.0",
"lockfileVersion": 1,
"requires": true,
"dependencies": {
"asn1": {
"version": "0.2.4",
"resolved": "https://registry.npmjs.org/asn1/-/asn1-0.2.4.tgz",
"integrity": "sha512-jxwzQpLQjSmWXgwaCZE9Nz+glAG01yF1QnWgbhGwHI5A6FRIEY6IVqtHhIepHqI7/kyEyQEagBC5mBEFlIYvdg==",
"requires": {
"safer-buffer": "~2.1.0"
}
},
"crypto-js": {
"version": "4.0.0",
"resolved": "https://registry.npmjs.org/crypto-js/-/crypto-js-4.0.0.tgz",
"integrity": "sha512-bzHZN8Pn+gS7DQA6n+iUmBfl0hO5DJq++QP3U6uTucDtk/0iGpXd/Gg7CGR0p8tJhofJyaKoWBuJI4eAO00BBg=="
},
"jsencrypt": {
"version": "3.0.0-rc.1",
"resolved": "https://registry.npmjs.org/jsencrypt/-/jsencrypt-3.0.0-rc.1.tgz",
"integrity": "sha512-gcvGaqerlUJy1Kq6tNgPYteVEoWNemu+9hBe2CdsCIz4rVcwjoTQ72iD1W76/PRMlnkzG0yVh7nwOOMOOUfKmg=="
},
"node-rsa": {
"version": "1.1.1",
"resolved": "https://registry.npmjs.org/node-rsa/-/node-rsa-1.1.1.tgz",
"integrity": "sha512-Jd4cvbJMryN21r5HgxQOpMEqv+ooke/korixNNK3mGqfGJmy0M77WDDzo/05969+OkMy3XW1UuZsSmW9KQm7Fw==",
"requires": {
"asn1": "^0.2.4"
}
},
"safer-buffer": {
"version": "2.1.2",
"resolved": "https://registry.npmjs.org/safer-buffer/-/safer-buffer-2.1.2.tgz",
"integrity": "sha512-YZo3K82SD7Riyi0E1EQPojLz7kpepnSQI9IyPbHHg1XXXevb5dJI7tpyN2ADxGcQbHG7vcyRHk0cbwqcQriUtg=="
}
}
}

@ -0,0 +1,21 @@
{
"name": "yuurei",
"version": "0.1.0",
"description": "",
"main": "index.js",
"dependencies": {
"crypto-js": "^4.0.0",
"jsencrypt": "^3.0.0-rc.1",
"node-rsa": "^1.1.1"
},
"devDependencies": {},
"scripts": {
"test": "echo \"Error: no test specified\" && exit 1"
},
"repository": {
"type": "git",
"url": "git@git.flanchan.moe:flanchan/yuurei.git"
},
"author": "",
"license": "GPL-3.0-or-later"
}

@ -0,0 +1,62 @@
use crate::mnemonic::MnemonicSaltKind;
use crate::{version, version::Version};
/// Default anonymous name
pub const ANON_NAME: &'static str = "名無し";
/// Max length of `name` and `email` feilds in posts.
pub const POST_ID_MAX_LEN: usize = 32;
/// The salt to use for tripcode hashes
pub const TRIPCODE_SALT: khash::salt::Salt = khash::salt::Salt::internal();
/// The tripcode algorithm to use
pub const TRIPCODE_ALGO: khash::ctx::Algorithm = khash::ctx::Algorithm::Sha256Truncated;
/// What timezone to represent data in.
pub type Timezone = chrono::offset::Utc;
/// Default post expiry duration (120 days)
///
/// # Notes
/// This is specified as a std `Duration`, the chrono `Duration` calculated from the post's offset must be converted into this type to be used for anything.
/// This conversion can fail.
/// If it does fail, then the post should just be treated as expired.
pub const POST_EXPIRE: tokio::time::Duration = tokio::time::Duration::from_secs(
60 // Minute
* 60 // Hour
* 24 // Day
* 120
);
/// Max size of encrypted body
pub const POST_BODY_MAX_SIZE: usize = (1024 * 1024) * 20; // 20MB
/// What encoded PEM formats are recognised
pub const RECOGNISABLE_PEM_ENCODES: &'static [&'static str] = &[
"PUBLIC KEY",
"RSA PUBLIC KEY",
];
/// Size of the menmonic salt
pub const MNEMONIC_SALT_SIZE: usize = 16;
/// Mnemonic salt to use
pub const MNEMONIC_SALT: MnemonicSaltKind = MnemonicSaltKind::Random;
/// Max state image read size
///
/// Set to `0` for unlimited.
pub const MAX_IMAGE_READ_SIZE: usize = (1024 * 1024 * 1024) * 3; // 3GB
/// The size of state stream message passing buffers.
///
/// Must be 1 or larger.
pub const STATE_STREAM_BUFFER_SIZE: usize = 1;
static_assert!(STATE_STREAM_BUFFER_SIZE > 0);
/// The current version of the file formats for saving state
///
/// TODO: Create `Version` type that takes from `env!(version)` at compile time.
pub const VERSION: Version = version!(0);
/// Max size of a HTTP request body to process.
pub const MAX_CONTENT_LENGTH: u64 = (1024 * 1024) * 15; //15MB

@ -352,7 +352,7 @@ mod tests
#[macro_export] macro_rules! id_type {
($name:ident $(; $doc:literal)?) => {
$(#[doc(comment=$doc)])?
#[derive(Debug, Clone, PartialEq, Eq, Ord, PartialOrd, Hash, ::serde::Serialize, ::serde::Deserialize)]
#[derive(Copy, Debug, Clone, PartialEq, Eq, Ord, PartialOrd, Hash, ::serde::Serialize, ::serde::Deserialize)]
pub struct $name(uuid::Uuid);
impl $name
@ -367,6 +367,18 @@ mod tests
{
Self(from)
}
/// As bytes
#[inline] fn id_as_bytes(&self) -> &[u8]
{
self.0.as_bytes()
}
/// The inner UUID
#[inline] fn id_inner(&self) -> &::uuid::Uuid
{
&self.0
}
}
};
@ -691,3 +703,173 @@ mod maybe_iter
where I: ExactSizeIterator{}
}
pub use maybe_iter::MaybeIter;
mod cancel {
use std::{
pin::Pin,
task::{Context, Poll},
convert::Infallible,
};
use std::{error,fmt};
use futures::Future;
/// A future that never completes
#[derive(Debug, Clone, Copy)]
pub struct NeverFuture;
impl Future for NeverFuture
{
type Output = Infallible;
#[inline] fn poll(self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll<Self::Output> {
Poll::Pending
}
}
/// Default error when a task is cancelled.
#[derive(Debug)]
pub struct CancellationError;
impl error::Error for CancellationError{}
impl fmt::Display for CancellationError
{
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result
{
write!(f, "task was cancelled")
}
}
}
pub use cancel::{
NeverFuture,
CancellationError,
};
/// Run a future until another one completes, 'cancelling' the running task.
///
/// # Usage
/// Normal usage to return `CancellationError`s.
/// ```
/// # use futures::Future;
/// # use crate::ext::*;
/// async fn do_something(cancel: impl Future) -> Result<(), CancellationError> {
/// with_cancel!(cancel => async move {
/// // do some async work
/// })?;
/// Ok(())
/// }
/// ```
/// Return specific errors
/// ```
/// # use futures::Future;
/// # use crate::ext::*;
/// # struct SomeError;
/// async fn do_something(cancel: impl Future) -> Result<(), SomeError> {
/// with_cancel!(cancel, SomeError => async move {
/// // do some async work
/// })?;
/// Ok(())
/// }
/// ```
/// Return the return of the cancel future itself
/// ```
/// # use futures::Future;
/// # use crate::ext::*;
/// # struct SomeError;
/// async fn do_something(cancel: impl Future<Output=String>) -> Result<(), String> {
/// with_cancel!(use cancel => async move {
/// // do some async work
/// })?;
/// Ok(())
/// }
/// ```
#[macro_export] macro_rules! with_cancel {
($cancel:expr => $normal:expr) => {
tokio::select! {
_ = $cancel => {
return Err($crate::ext::cancel::CancellationError.into());
}
x = $normal => {
x
}
}
};
($cancel:expr, $or:expr => $normal:expr) => {
tokio::select! {
_ = $cancel => {
return Err($or);
}
x = $normal => {
x
}
}
};
(use $cancel:expr => $normal:expr) => {
tokio::select! {
y = $cancel => {
return Err(y.into());
}
x = $normal => {
x
}
}
}
}
cfg_if!{
if #[cfg(feature="unlimited-buffers")] {
//TODO: Create mpsc wrapper for unlimited channel that works like normal mpsc channel
#[macro_export] macro_rules! mpsc_channel {
($sz:expr) => (::tokio::sync::mpsc::channel($sz));
}
} else {
/// Create an mpsc channel
///
/// # Intended for state
#[macro_export] macro_rules! mpsc_channel {
($sz:expr) => (::tokio::sync::mpsc::channel($sz));
}
}
}
/// A static assertions
#[macro_export] macro_rules! static_assert {
($expr:expr) => {
const _:[();1] = [(); (($expr) as bool) as usize];
}
}
/// Clone this object with the same name.
#[macro_export] macro_rules! clone {
($ident:ident) => {let $ident = $ident.clone();}
}
/// Report this `Result`'s `Err` if it is one. Ignore if not.
#[macro_export] macro_rules! report {
(try $result:expr) => {
match $result
{
Ok(v) => Some(v),
Err(err) => {
error!("{}:{} error: {:?}", file!(), line!(), err);
trace!(concat!("In statement ", stringify!($result)));
None
}
}
};
($result:expr) => {
{
let _ = report!(try $result);
}
};
}
/// Move this value into this block.
#[macro_export] macro_rules! mv {
($ident:ident) => (let $ident = $ident;);
($($ident:ident),+ $(,)?) => {
$(
mv!($ident);
)+
}
}

@ -0,0 +1,583 @@
use super::*;
use std::marker::PhantomData;
use std::{fmt,error};
use std::ops::Deref;
use std::borrow::{Borrow, ToOwned};
/// A spec to validate the formatting of a string for.
pub trait FormatSpec
{
type Error: Into<eyre::Report>;
fn validate(s: &str) -> Result<(), Self::Error>;
}
/// A strongly validated string slice
#[derive(Debug, PartialEq, Eq, PartialOrd, Ord, Hash)]
#[repr(transparent)]
pub struct FormattedStr<F: FormatSpec + ?Sized>(PhantomData<*const F>, str);
impl<F: FormatSpec + ?Sized> std::marker::Unpin for FormattedStr<F>{}
unsafe impl<F: FormatSpec + ?Sized> std::marker::Send for FormattedStr<F>{}
unsafe impl<F: FormatSpec + ?Sized> std::marker::Sync for FormattedStr<F>{}
/// A strongly validated string
// TODO: How to perform validation on deserialise? Custom `Deserialize` impl? might have to.
#[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)]
pub struct FormattedString<F: FormatSpec + ?Sized>(String, PhantomData<F>);
impl<'a, F> fmt::Display for &'a FormattedStr<F>
where F: ?Sized + FormatSpec
{
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result
{
write!(f, "{}", self.as_str())
}
}
// Deserialising
const _:() = {
use serde::{
de::Visitor,
de::Error,
de,
Serialize,
ser::Serializer,
};
impl<F: FormatSpec + ?Sized> Serialize for FormattedStr<F>
{
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
serializer.serialize_str(&self.1)
}
}
impl<F: FormatSpec + ?Sized> Serialize for FormattedString<F>
{
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
serializer.serialize_str(&self.0)
}
}
#[derive(Debug, Clone, Copy)]
struct FormattedStringVisitor<F: FormatSpec + ?Sized>(PhantomData<F>);
impl<'de, F: FormatSpec + ?Sized> Visitor<'de> for FormattedStringVisitor<F>
{
type Value = FormattedString<F>;
fn expecting(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result
{
write!(f, "a string satisfying the requirements of {}", std::any::type_name::<F>())
}
fn visit_str<E: Error>(self, s: &str) -> Result<Self::Value, E>
{
FormattedStr::<F>::new(s).map_err(|x| E::custom(x.into())).map(ToOwned::to_owned)
}
fn visit_string<E: Error>(self, s: String) -> Result<Self::Value, E>
{
FormattedString::<F>::new(s).map_err(|x| E::custom(x.into()))
}
}
impl<'de, F: FormatSpec + ?Sized> de::Deserialize<'de> for FormattedString<F> {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: de::Deserializer<'de>,
{
let vis = FormattedStringVisitor::<F>(PhantomData);
deserializer.deserialize_string(vis)
}
}
};
impl<F: FormatSpec + ?Sized> FormattedStr<F>
{
/// Create a new instance without validating the input.
///
/// # Safety
/// You must be sure the input is of a valid state to `F`.
#[inline(always)] pub unsafe fn new_unchecked<'a>(s: &'a str) -> &'a Self
{
std::mem::transmute(s)
}
/// Create and validate a the format of a new instance.
pub fn new<'a>(s: &'a str) -> Result<&'a Self, F::Error>
{
F::validate(s).map(move |_| unsafe {Self::new_unchecked(s)})
}
/// Get the inner str
#[inline] pub fn as_str<'a>(&'a self) -> &'a str
{
&self.1
}
}
impl<F: FormatSpec + ?Sized> FormattedString<F>
{
/// Create a new instance without validating the input.
///
/// # Safety
/// You must be sure the input is of a valid state to `F`.
#[inline(always)] pub unsafe fn new_unchecked(s: String) -> Self
{
std::mem::transmute(s)
}
/// Create and validate a the format of a new instance.
pub fn new(s: String) -> Result<Self, F::Error>
{
F::validate(&s)
.map(move |_| unsafe {Self::new_unchecked(s)})
}
}
impl<F: FormatSpec + ?Sized> FormattedString<F>
{
/// As a formatted str
#[inline] pub fn as_ref<'a>(&'a self) -> &'a FormattedStr<F>
{
unsafe { FormattedStr::new_unchecked(&self.0) }
}
}
impl<F: FormatSpec + ?Sized> AsRef<str> for FormattedStr<F>
{
#[inline] fn as_ref(&self) -> &str
{
self.as_str()
}
}
impl<F: FormatSpec + ?Sized> Borrow<FormattedStr<F>> for FormattedString<F>
{
#[inline] fn borrow(&self) -> &FormattedStr<F> {
self.as_ref()
}
}
impl<F: FormatSpec + ?Sized> ToOwned for FormattedStr<F>
{
type Owned = FormattedString<F>;
#[inline] fn to_owned(&self) -> Self::Owned {
FormattedString(String::from(&self.1), PhantomData)
}
}
impl<F: FormatSpec + ?Sized> AsRef<FormattedStr<F>> for FormattedString<F>
{
#[inline] fn as_ref(&self) -> &FormattedStr<F> {
self.as_ref()
}
}
impl<F: FormatSpec + ?Sized> Deref for FormattedString<F>
{
type Target = FormattedStr<F>;
#[inline] fn deref(&self) -> &Self::Target {
self.as_ref()
}
}
impl<'a,F: FormatSpec + ?Sized> From<&'a FormattedStr<F>> for &'a str
{
#[inline] fn from(from: &'a FormattedStr<F>) -> Self
{
&from.1
}
}
impl<'a,F: FormatSpec + ?Sized> TryFrom<&'a str> for &'a FormattedStr<F>
{
type Error = F::Error;
#[inline] fn try_from(from: &'a str) -> Result<Self, Self::Error>
{
FormattedStr::new(from)
}
}
impl<'a, F: FormatSpec + ?Sized> From<FormattedString<F>> for String
{
#[inline] fn from(from: FormattedString<F>) -> Self
{
from.0
}
}
impl<F: FormatSpec + ?Sized> std::str::FromStr for FormattedString<F>
{
type Err = F::Error;
#[inline] fn from_str(s: &str) -> Result<Self, Self::Err> {
F::validate(s)?;
Ok(Self(s.to_owned(), PhantomData))
}
}
impl<'a, F: FormatSpec + ?Sized> TryFrom<String> for FormattedString<F>
{
type Error = F::Error;
fn try_from(from: String) -> Result<Self, Self::Error>
{
Self::new(from)
}
}
pub mod formats
{
use super::*;
cfg_if! {
if #[cfg(feature="nightly")] {
/// A format valid for any string
pub type AnyFormat = !;
} else {
/// A format valid for any string
#[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)]
pub enum AnyFormat{}
}
}
impl FormatSpec for AnyFormat
{
type Error = std::convert::Infallible;
#[inline(always)] fn validate(_: &str) -> Result<(), Self::Error>
{
Ok(())
}
}
/// A format spec that must satisfy both these format specs in order
#[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)]
pub struct BothFormat<T, U = AnyFormat>(PhantomData<(T, U)>)
where T: FormatSpec, U: FormatSpec;
/// A formatting error coming from an instance of `BothFormat<T,U>`.
///
/// This type's `T` and `U` correspond to the `Error` type of the `BothFormat`'s `T` and `U`.
#[derive(Debug)]
pub enum MultiFormatError<T,U>
{
First(T),
Second(U),
}
impl<T, U> FormatSpec for BothFormat<T,U>
where T: FormatSpec,
U: FormatSpec,
T::Error : error::Error + 'static + Send + Sync,
U::Error : error::Error + 'static + Send + Sync,
{
type Error = MultiFormatError<T::Error, U::Error>;
#[inline] fn validate(s: &str) -> Result<(), Self::Error> {
T::validate(s).map_err(MultiFormatError::First)?;
U::validate(s).map_err(MultiFormatError::Second)?;
Ok(())
}
}
impl<T: 'static, U: 'static> error::Error for MultiFormatError<T,U>
where T: error::Error,
U: error::Error
{
fn source(&self) -> Option<&(dyn error::Error + 'static)> {
match &self {
Self::First(f) => Some(f),
Self::Second(n) => Some(n),
}
}
}
impl<T, U> fmt::Display for MultiFormatError<T,U>
where T: fmt::Display,
U: fmt::Display
{
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result
{
match self {
Self::First(_) => write!(f, "the first condition failed"),
Self::Second(_) => write!(f, "the second condition failed"),
}
}
}
/// A hex string format specifier
#[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)]
pub enum HexFormat{}
/// A string with a constant max length
#[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)]
pub enum MaxLenFormat<const MAX: usize>{}
impl<const MAX: usize> FormatSpec for MaxLenFormat<MAX>
{
type Error = MaxLenExceededError<MAX>;
#[inline] fn validate(s: &str) -> Result<(), Self::Error> {
if s.len() > MAX {
Err(MaxLenExceededError)
} else {
Ok(())
}
}
}
#[derive(Debug)]
pub struct MaxLenExceededError<const MAX: usize>;
impl<const MAX: usize> error::Error for MaxLenExceededError<MAX>{}
impl<const MAX: usize> fmt::Display for MaxLenExceededError<MAX>
{
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result
{
write!(f, "string length exceeds {}", MAX)
}
}
impl HexFormat
{
const HEX_MAP: &'static [u8] = b"1234567890abcdefABCDEF";
}
impl FormatSpec for HexFormat
{
type Error = HexFormatError;
fn validate(s: &str) -> Result<(), Self::Error> {
for (i, chr) in s.char_indices()
{
if !chr.is_ascii_alphanumeric() || !Self::HEX_MAP.contains(&(chr as u8)) {
return Err(HexFormatError(chr, i));
}
}
Ok(())
}
}
/// Error for an invalid hex string.
#[derive(Debug)]
pub struct HexFormatError(char, usize);
impl error::Error for HexFormatError{}
impl fmt::Display for HexFormatError
{
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result
{
write!(f, "invalid hex char {:?} at index {}", self.0, self.1)
}
}
/// A PEM formatted string.
#[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)]
pub enum PEMFormat{}
#[derive(Debug)]
pub struct PEMFormatError;
impl error::Error for PEMFormatError{}
impl fmt::Display for PEMFormatError
{
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result
{
write!(f, "invalid PEM format")
}
}
impl PEMFormat
{
#[inline] fn is_allowed(string: &str) -> bool
{
lazy_static! {
static ref ALLOWED_MAP: smallmap::Map<&'static str,()> = defaults::RECOGNISABLE_PEM_ENCODES.iter().map(|&x| (x, ())).collect();
}
ALLOWED_MAP.contains_key(&string)
}
const BEGIN_BLOCK: &'static str = "-----BEGIN ";
const END_BLOCK: &'static str = "-----END ";
const END_ANY_BLOCK: &'static str = "-----";
}
impl FormatSpec for PEMFormat
{
type Error = PEMFormatError;
fn validate(s: &str) -> Result<(), Self::Error> {
macro_rules! enforce {
($e:expr) => {
if !$e {
return Err(PEMFormatError);
}
}
}
let s = s.trim();
let mut lines = s.lines();
let begin = lines.next().ok_or(PEMFormatError)?.trim();
enforce!(begin.starts_with(Self::BEGIN_BLOCK));
enforce!(begin.ends_with(Self::END_ANY_BLOCK));
let val = &begin[Self::BEGIN_BLOCK.len()..];
let val = &val[..(val.len()-Self::END_ANY_BLOCK.len())];
enforce!(Self::is_allowed(val));
let mut lines = lines.map(|x| x.trim()).fuse();
while let Some(line) = lines.next()
{
if line.starts_with(Self::END_BLOCK) {
let eval = &line[Self::END_BLOCK.len()..];
let eval = &eval[..(eval.len()-Self::END_ANY_BLOCK.len())];
enforce!(eval == val);
break;
} else {
enforce!(Base64Format::validate(line.trim()).is_ok());
}
}
enforce!(lines.next().is_none());
Ok(())
}
}
#[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Copy)]
pub enum Base64Format{}
impl Base64Format
{
const CHARSET_STR: &'static str = "0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ+/";
}
impl FormatSpec for Base64Format
{
type Error = InvalidBase64Error;
fn validate(s: &str) -> Result<(), Self::Error> {
lazy_static! {
static ref CHARSET: [bool; 256] = {
let mut map = [false; 256];
for byte in Base64Format::CHARSET_STR.bytes()
{
map[byte as usize] = true;
}
map
};
}
let mut iter = s.as_bytes().chunks(4).peekable();
while let Some(window) = iter.next()
{
let is_last = iter.peek().is_none();
// eprintln!("Window: {:?} ({})", std::str::from_utf8(window), is_last);
for byte in window.iter().copied()
{
if !(CHARSET[byte as usize] || (is_last && byte == b'=')) {
return Err(InvalidBase64Error(byte as char));
}
}
}
Ok(())
}
}
/// On invalid base64 string
#[derive(Debug)]
pub struct InvalidBase64Error(char);
impl error::Error for InvalidBase64Error{}
impl fmt::Display for InvalidBase64Error
{
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result
{
write!(f, "invalid char {:?} in base64 string", self.0)
}
}
pub type MaxLenStr<const MAX: usize> = FormattedStr<MaxLenFormat<MAX>>;
pub type MaxLenString<const MAX: usize> = FormattedString<MaxLenFormat<MAX>>;
pub type HexFormattedStr = FormattedStr<HexFormat>;
pub type HexFormattedString = FormattedString<HexFormat>;
pub type Base64FormattedStr = FormattedStr<Base64Format>;
pub type Base64FormattedString = FormattedString<Base64Format>;
impl Base64FormattedString
{
/// Encode some data as base64
pub fn encode(data: impl AsRef<[u8]>) -> Self
{
unsafe { Self::new_unchecked(base64::encode(data)) }
}
}
impl Base64FormattedStr
{
/// Decode this base64 string to a byte slice, returning the length of the written bytes.
///
/// # Panics
/// If the slice is not large enough
pub fn decode_slice(&self, mut to: impl AsMut<[u8]>) -> usize
{
base64::decode_config_slice(self.as_str(), base64::STANDARD, to.as_mut()).unwrap()
}
/// Decode this base64 string to a `Vec<u8>`
pub fn decode(&self) -> Vec<u8>
{
base64::decode(self.as_str()).unwrap()
}
}
pub type PEMFormattedStr = FormattedStr<PEMFormat>;
pub type PEMFormattedString = FormattedString<PEMFormat>;
#[cfg(test)]
mod tests
{
use super::*;
#[test]
fn hex_format()
{
let _invalid = HexFormattedStr::new("ab120982039840i ").expect_err("Invalidation");
let _valid = HexFormattedStr::new("abc123982095830495adcfDD").expect("Validation");
}
#[test]
fn base64()
{
let mut random = [0u8; 256];
for len in 9..101
{
getrandom::getrandom(&mut random[..len]).expect("rng");
let encoded = base64::encode(&random[..len]);
println!("String: {}", encoded);
let _encoded = Base64FormattedString::new(encoded).expect("Encode validate failed");
}
}
#[test]
fn pem_format()
{
const PUBKEY: &'static str = r#"-----BEGIN PUBLIC KEY-----
MIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQD2vGFdJ+KCK6Ot8sDcaCXk/9D8
S1InEHi5vGSLdGKJpaRdHhQ08NL5+fv6B9FGKV5KARCYXeW1JGnGNzhRXHhNyOSm
KNi2T+L84xBCTEazlLnnnvqKGaD95rtjwMmkhsErRMfavqUMThEmVca5fwP30Sqm
StF6Y2cSO2eUjqTeUQIDAQAB
-----END PUBLIC KEY-----"#;
let pem = PEMFormattedStr::new(PUBKEY).expect("PEM format");
println!("PEM: {}", pem);
}
}
}

@ -2,6 +2,7 @@
#![cfg_attr(feature="nightly", feature(never_type))]
#![allow(dead_code)]
#![allow(unused_macros)]
#[cfg(all(feature="nightly", test))] extern crate test;
#[macro_use] extern crate serde;
@ -9,6 +10,9 @@
#[macro_use] extern crate log;
#[macro_use] extern crate mopa;
#[macro_use] extern crate cfg_if;
#[macro_use] extern crate ad_hoc_iter;
#[macro_use] extern crate maud;
#[macro_use] extern crate hex_literal;
#[allow(unused_imports)]
use std::convert::{TryFrom, TryInto};
@ -22,13 +26,28 @@ use color_eyre::{
SectionExt, Help
};
use jemallocator::Jemalloc;
#[global_allocator]
static GLOBAL: Jemalloc = Jemalloc;
#[macro_use] mod ext; use ext::*;
mod version;
mod service;
mod bytes;
mod delta; //unused now, but tests still use it, so...
mod hard_format;
mod mnemonic;
mod defaults;
mod template;
mod delta;
mod user;
mod post;
mod state;
mod service;
mod web;
fn install() -> eyre::Result<()>
{
@ -38,10 +57,14 @@ fn install() -> eyre::Result<()>
Ok(())
}
//pub use defaults::VERSION;
#[tokio::main]
async fn main() -> eyre::Result<()> {
install()?;
println!("Hello, world!");
trace!("log initialised");
Ok(())
}

@ -0,0 +1,261 @@
//! Mnemonic hashes, incl. tripcode.
use super::*;
use std::borrow::Borrow;
use khash::ctx::Context;
use cryptohelpers::sha256::{self, Sha256Hash};
use std::hash::{Hash, Hasher};
use std::cmp::Ordering;
use std::str;
use std::fmt;
/// A newtype tripcode string
#[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, Deserialize)]
pub struct Tripcode(String);
lazy_static!{
static ref CONTEXT: Context = Context::new(defaults::TRIPCODE_ALGO, defaults::TRIPCODE_SALT);
}
impl Tripcode
{
/// Generate a tripcode from this string.
pub fn generate(from: impl AsRef<[u8]>) -> Result<Self, khash::error::Error>
{
khash::generate(&CONTEXT, from).map(Self)
}
/// Create a tripcode that *is* this string
#[inline] pub fn special(string: String) -> Self
{
Self(string)
}
/// As a string
#[inline] pub fn as_str(&self) -> &str
{
&self.0
}
/// Consume into regular string
#[inline] pub fn into_inner(self) -> String
{
self.0
}
}
impl From<Tripcode> for String
{
#[inline] fn from(from: Tripcode) -> Self
{
from.0
}
}
impl fmt::Display for Tripcode
{
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result
{
write!(f, "{}", self.0)
}
}
impl AsRef<str> for Tripcode
{
fn as_ref(&self) -> &str
{
&self.0[..]
}
}
impl Borrow<String> for Tripcode
{
fn borrow(&self) -> &String
{
&self.0
}
}
use once_cell::sync::OnceCell;
/// A mnemonic base64 hash
#[derive(Debug, Clone)]
pub struct MnemonicHash(Sha256Hash, OnceCell<String>);
impl Ord for MnemonicHash
{
fn cmp(&self, other: &Self) -> Ordering
{
self.0.cmp(&other.0)
}
}
impl PartialOrd for MnemonicHash
{
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
self.0.partial_cmp(&other.0)
}
}
impl Eq for MnemonicHash{}
impl PartialEq for MnemonicHash
{
fn eq(&self, other: &Self) -> bool
{
self.0 == other.0
}
}
impl Hash for MnemonicHash {
fn hash<H: Hasher>(&self, state: &mut H) {
self.0.hash(state)
}
}
impl str::FromStr for MnemonicHash
{
type Err = std::convert::Infallible;
#[inline] fn from_str(s: &str) -> Result<Self, Self::Err> {
Ok(Self::from_str(s))
}
}
impl MnemonicHash
{
/// Create mnemonic hash from this string slice
#[inline] pub fn from_str(string: impl AsRef<str>) -> Self
{
Self::from_slice(string.as_ref().as_bytes())
}
/// Create mnemonic hash from this slice
pub fn from_slice(data: impl AsRef<[u8]>) -> Self
{
Self(sha256::compute_slice_iter(iter![data.as_ref(), defaults::MNEMONIC_SALT.as_ref()]), OnceCell::new())
}
/// The inner hash
#[inline] pub fn as_hash(&self) -> &Sha256Hash
{
&self.0
}
/// Create a mnemonic from this hash
///
/// # Notes
/// This does not salt the hash, the hasher is responsible for salting the hash with `defaults::MNEMONIC_SALT`.
#[inline] pub fn from_hash(hash: Sha256Hash) -> Self
{
Self(hash, OnceCell::new())
}
fn render(&self) -> String
{
#[allow(unused_mut)] let mut end;
cfg_if! {
if #[cfg(feature="short-mnemonics")] {
let last = &self.0.as_ref()[..(sha256::SIZE/2)];
let first = &self.0.as_ref()[(sha256::SIZE/2)..];
end = [0u8; sha256::SIZE/2];
for (e, (f, l)) in end.iter_mut().zip(first.iter().copied().zip(last.iter().copied()))
{
*e = f ^ l;
}
} else {
end = &self.0.as_ref()[..];
}
}
base64::encode(end).chars().filter_map(|x| {
Some(match x {
'/' => 'ł',
'+' => 'þ',
'=' => return None,
x => x
})
}).collect()
}
#[inline] fn render_cache(&self) -> &String
{
self.1.get_or_init(|| {
self.render()
})
}
}
impl From<MnemonicHash> for Sha256Hash
{
fn from(from: MnemonicHash) -> Self
{
from.0
}
}
impl AsRef<Sha256Hash> for MnemonicHash
{
fn as_ref(&self) -> &Sha256Hash
{
&self.0
}
}
impl Borrow<Sha256Hash> for MnemonicHash
{
fn borrow(&self) -> &Sha256Hash {
&self.0
}
}
impl fmt::Display for MnemonicHash
{
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result
{
write!(f, "{}", self.render_cache())
}
}
/// What kind of salt to use for hashing `MnemonicHash`.
///
/// This is intended to be global state.
#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)]
pub enum MnemonicSaltKind
{
None,
Specific([u8; defaults::MNEMONIC_SALT_SIZE]),
Random,
}
impl MnemonicSaltKind
{
/// Get as a slice.
pub fn as_slice(&self) -> Option<&[u8]>
{
lazy_static! {
static ref RANDOM_SALT: [u8; defaults::MNEMONIC_SALT_SIZE] = {
let mut output = [0u8; defaults::MNEMONIC_SALT_SIZE];
getrandom::getrandom(&mut output[..]).expect("rng fatal");
output
};
}
Some(match self {
Self::None => return None,
Self::Random => &RANDOM_SALT[..],
Self::Specific(u) => &u[..],
})
}
}
impl AsRef<[u8]> for MnemonicSaltKind
{
#[inline] fn as_ref(&self) -> &[u8]
{
self.as_slice().unwrap_or(&[])
}
}
#[cfg(test)]
mod tests
{
#[test]
fn mnemonics()
{
let _mnemonic = super::MnemonicHash::from_slice(b"hello world");
}
}

@ -0,0 +1,256 @@
use super::*;
use cryptohelpers::sha256::Sha256Hash;
mod render;
pub use render::*;
use hard_format::formats::{
MaxLenString,
Base64FormattedString,
Base64FormattedStr,
self,
};
use mnemonic::Tripcode;
id_type!(PostID; "A unique post ID");
/// String type that limits its bytes to the ID string max limit.
pub type IDMaxString = MaxLenString<{defaults::POST_ID_MAX_LEN}>;
/// The timestamp type used in posts
pub type PostTimestamp = chrono::DateTime<defaults::Timezone>;
/// A size limited base64 formatting specifier
type PostBodyFormat = formats::BothFormat<formats::MaxLenFormat<{defaults::POST_BODY_MAX_SIZE}>, formats::Base64Format>;
/// A size limited base64 string
pub type PostBodyString = hard_format::FormattedString<PostBodyFormat>;
/// A size limited base64 string
pub type PostBodyStr = hard_format::FormattedStr<PostBodyFormat>;
/// Identifiers for a post
#[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, Deserialize, Default)]
pub struct Ident
{
name: Option<IDMaxString>,
tripcode: Option<Tripcode>,
email: Option<IDMaxString>,
}
impl Ident
{
/// Create a new ident object
///
/// # Panics
/// If `name` or `email` are longer than `defaults::POST_ID_MAX_LEN`.
pub fn new(name: Option<&str>, tripcode: Option<Tripcode>, email: Option<&str>) -> Self
{
Self {
name: name.map(|x| IDMaxString::new(x.to_owned()).expect("Name too long")),
email: email.map(|x| IDMaxString::new(x.to_owned()).expect("Email too long")),
tripcode
}
}
/// The name of this user ident
pub fn name(&self) -> &str
{
self.name.as_ref().map(|x| x.as_str()).unwrap_or(defaults::ANON_NAME)
}
/// The tripcode of this user ident
pub fn tripcode(&self) -> Option<&Tripcode>
{
self.tripcode.as_ref()
}
/// The email of this user ident
pub fn email(&self) -> Option<&str>
{
self.email.as_ref().map(|x| x.as_str())
}
}
/// A single completed post.
#[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, Deserialize)]
pub struct Post
{
/// Unique ID for each post
id: PostID,
/// Who created the post, (if specified)
owner: Option<user::UserID>,
/// Identifiers for this post
ident: Ident,
/// The client-side generated AES key used for decrypting the body's text.
///
/// This AES key can be encrypted with any number of RSA public keys on the client side. There must be at least one, or the post body is lost and the post should be removed.
body_aes_keys: Vec<PostBodyString>,
/// The client-side encrypted body string
body: PostBodyString,
/// Signature of the body (optional).
signature: Option<Base64FormattedString>,
/// Hash of the body
hash: Sha256Hash,
/// When the post was created
created: PostTimestamp,
/// When the post was last edited.
///
/// # Notes
/// Each new edit is pushed to the end of the vec, creation does not count as an edit.
edited: Vec<PostTimestamp>,
/// Optional dynamic expiry duration.
expires_in: Option<tokio::time::Duration>,
}
// Time based functions
impl Post
{
/// The timestamp when the post should expire.
///
/// # Notes
/// If the conversion overflows, then the expiry time is treated as 0. (i.e. the post expires immediately).
pub fn expires_at(&self) -> PostTimestamp
{
self.created + chrono::Duration::from_std(self.expires_in.unwrap_or(defaults::POST_EXPIRE))
.unwrap_or(chrono::Duration::zero())
}
/// How long until the post has until it reaches its expiry time from its creation time.
///
/// If this value is *lower* than the value of `time_since_creation`, then the post has expired.
pub fn expires_in(&self) -> chrono::Duration
{
self.expires_at() - self.created
}
/// Time passed since this post was created as a Chrono `Duration`.
pub fn time_since_creation(&self) -> chrono::Duration
{
defaults::Timezone::now() - self.created
}
/// The timestamp for when this post was created
pub fn created(&self) -> PostTimestamp
{
self.created
}
/// A slice of timestamps showing when this post was edited, in order of those edits.
pub fn edited(&self) -> &[PostTimestamp]
{
&self.edited[..]
}
/// Has this post expired?
///
/// Expired posts should be removed
pub fn expired(&self) -> bool
{
if let Ok(dur) = &self.time_since_creation().to_std()
{
dur >= self.expires_in.as_ref().unwrap_or(&defaults::POST_EXPIRE)
} else {
// Conversion failed. Expire the post
true
}
}
}
// Ident based functions
impl Post
{
/// Get a mnemonic for this post's ID.
#[inline] pub fn post_id_mnemonic(&self) -> mnemonic::MnemonicHash
{
mnemonic::MnemonicHash::from_slice(self.id.id_as_bytes())
}
/// This post's unique identifier
#[inline] pub fn post_id(&self) -> &PostID
{
&self.id
}
/// `UserID` of the post owner, if there is one.
#[inline] pub fn owner_id(&self) -> Option<&user::UserID>
{
self.owner.as_ref()
}
/// The user-set name for this post if there is one.
#[inline] pub fn own_name(&self) -> Option<&str>
{
self.ident.name.as_ref().map(|x| x.as_str())
}
/// The name for this post.
///
/// If no name is set, returns the default anon name.
pub fn name(&self) -> &str
{
self.own_name().unwrap_or(defaults::ANON_NAME)
}
/// The email set for this post, if there is one.
pub fn email(&self) -> Option<&str>
{
self.ident.email.as_ref().map(|x| x.as_str())
}
/// Get the tripcode of this post, if there is one.
pub fn tripcode(&self) -> Option<&Tripcode>
{
self.ident.tripcode.as_ref()
}
/// The AES encrypted body of this post
pub fn body(&self) -> &PostBodyStr
{
self.body.as_ref()
}
/// An iterator of RSA ciphertexts of the AES key used to encrypt the body text.
pub fn body_keys<'a>(&'a self) -> impl Iterator<Item = &'a PostBodyStr> + ExactSizeIterator + 'a
{
self.body_aes_keys.iter().map(|x| x.as_ref())
}
/// The PEM formatted signature of this post, if there is one.
pub fn signature(&self) -> Option<&Base64FormattedStr>
{
self.signature.as_ref().map(|x| x.as_ref())
}
}
#[cfg(test)]
mod tests
{
#[test]
fn post_serialise()
{
use std::convert::TryInto;
let post = super::Post {
owner: None,
id: super::PostID::id_new(),
ident: super::Ident {
name: Some("Some name".to_owned().try_into().unwrap()),
email: None,
tripcode: Some(super::Tripcode::generate("uhh hello").unwrap()),
},
body: super::PostBodyString::new(crate::hard_format::formats::Base64FormattedString::encode("Hello world").into()).unwrap(),
body_aes_keys: vec![super::PostBodyString::new(crate::hard_format::formats::Base64FormattedString::encode("TODO").into()).unwrap()],
signature: None,
hash: Default::default(),
created: crate::defaults::Timezone::now(),
edited: Default::default(),
expires_in: None,
};
eprintln!("Post as html: {}", html! { body { (post) } }.into_string());
println!("Post is: {:?}", post);
let post_json = serde_json::to_vec(&post).expect("Serialise");
println!("Post json: {}", std::str::from_utf8(&post_json[..]).unwrap());
let post2: super::Post = serde_json::from_slice(&post_json[..]).expect("Deserialise");
assert_eq!(post, post2);
println!("Post was: {:?}", post2);
}
}

@ -0,0 +1,64 @@
use super::*;
use maud::{html, Markup, Render};
impl Render for Ident
{
fn render(&self) -> Markup
{
html! {
b.ident {
@if let Some(email) = self.email() {
a href=(format!("mailto:{}", email)) { (self.name()) }
} @else {
(self.name())
}
@if let Some(tripcode) = self.tripcode() {
" "
code {
"!" (tripcode)
}
}
}
}
}
}
impl Render for Post
{
fn render(&self) -> Markup
{
let id = self.post_id_mnemonic();
html! {
article#(id) {
header {
(self.ident)
" "
time datetime=(self.created()) {
(self.created()) //TODO format the DateTime properly
}
nav {
a href=(format!("#{}", id)) { "No." }
a href=(format!("#q_{}", id)) { (id) }
}
}
blockquote {
(self.body())
}
//TODO: Signature and hash and other things
}
}
}
}
#[cfg(test)]
mod tests
{
#[test]
fn ident_htmlrender()
{
let ident = super::Ident::new(Some("Name"), Some(crate::mnemonic::Tripcode::generate("mwee").unwrap()), Some("user@example.com"));
use maud::Render;
assert_eq!(r#"<b class="ident"><a href="mailto:user@example.com">Name</a> <code>!えナセッゲよラで</code></b>"#, &ident.render().into_string());
}
}

@ -1,147 +1 @@
//! Session services
use super::*;
use tokio::{
task::JoinHandle,
sync::{
mpsc,
broadcast,
},
};
use std::{fmt,error};
use std::marker::{Send,Sync};
///// A boxed message that can be downcasted.
//pub type BoxedMessage = Box<dyn std::any::Any + Send + Sync + 'static>;
/// A handle to a service.
pub trait Service<T=ExitStatus>
where T: Send + 'static
{
/// The message type to send to the service.
type Message: Send + Sync + 'static;
/// The response to expect from the service.
type Response: Send + Sync + 'static;
/// Return the wait handle.
///
/// This method should drop the message pipeline.
fn wait_on(self) -> JoinHandle<T>;
/// An immutable reference to the pipe for sending message. Useful for service handle detaching (i.e. cloning the message input pipe).
fn message_in_ref(&self) -> &mpsc::Sender<Self::Message>;
/// The message pipe for sending messages to the service.
fn message_in(&mut self) -> &mut mpsc::Sender<Self::Message>;
/// The message pipe for receiving messages from the service.
fn message_out(&mut self) -> &mut broadcast::Receiver<Self::Response>;
/// Is the service alive? A `None` value means 'maybe', and is the default return.
///
/// # Note
/// This should not be considered an infallible indicator of if the service has crashed or not. A better method is attempting to send or receive a message and the sender/receiver returning an error.
#[inline] fn is_alive(&self) -> Option<bool>
{
None
}
}
/// Error returned when subscribing to a service
#[derive(Debug)]
#[non_exhaustive]
pub enum SubscribeError
{
/// The service's receive has already been dropped.
SenderDropped,
/// The service dropped the response oneshot channel.
NoResponse,
}
impl error::Error for SubscribeError{}
impl fmt::Display for SubscribeError
{
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result
{
match self {
Self::SenderDropped => write!(f, "the service has already stopped"),
Self::NoResponse => write!(f, "the service declined to, or was unable to respond"),
}
}
}
#[macro_export] macro_rules! join_service {
($serv:expr) => ($serv.await.map_err(|_| $crate::service::ExitStatus::Abnormal)?)
}
/// How a service exited
#[derive(Debug)]
#[non_exhaustive]
pub enum ExitStatus<T=()>
{
/// A graceful exit with value
Graceful(T),
/// An abnormal exit (counted as error)
///
/// # Usage
/// Usually for panicked services, otherwise use `Self::Error`.
/// The macro `join_service!()` can be used to convert handle join errors into this
Abnormal,
/// Exit on an error report (counted as error)
Error(eyre::Report),
}
impl<T,E: Into<eyre::Report>> From<Result<T, E>> for ExitStatus<T>
{
fn from(from: Result<T, E>) -> Self
{
match from
{
Ok(v) => Self::Graceful(v),
Err(e) => Self::Error(e.into())
}
}
}
#[derive(Debug)]
/// The error `ExitStatus::Abnormal` converts to.
pub struct AbnormalExitError;
impl error::Error for AbnormalExitError{}
impl fmt::Display for AbnormalExitError
{
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result
{
write!(f, "service terminated in an abnormal way")
}
}
/*
impl<U, T: error::Error + Send+Sync+'static> From<T> for ExitStatus<U>
{
fn from(from: T) -> Self
{
Self::Error(from.into())
}
}*/
impl<T: Default> Default for ExitStatus<T>
{
#[inline]
fn default() -> Self
{
Self::Graceful(T::default())
}
}
impl<T> From<ExitStatus<T>> for eyre::Result<T>
{
fn from(from: ExitStatus<T>) -> Self
{
match from {
ExitStatus::Abnormal => Err(AbnormalExitError).with_note(|| "The background worker likely panicked"),
ExitStatus::Graceful(t) => Ok(t),
ExitStatus::Error(rep) => Err(rep).wrap_err(AbnormalExitError),
}
}
}
//! Services

@ -1,33 +0,0 @@
//! Open post body
use super::*;
use tokio::{
sync::{
watch,
mpsc,
},
};
use once_cell::sync::OnceCell;
/// An open post body
#[derive(Debug)]
pub struct Karada
{
scape: Vec<char>,
body_cache: String,
//TODO: Pub/sub? Or should that be in some other place? Probably.
}
impl Karada
{
fn invalidate_cache(&mut self)
{
self.body_cache = self.scape.iter().copied().collect();
}
/// Apply this delta to the body.
pub fn apply_delta(&mut self, delta: &delta::Delta)
{
delta.insert(&mut self.scape);
self.invalidate_cache();
}
}

@ -0,0 +1,65 @@
use super::*;
use std::collections::HashMap;
use tokio::time::{
DelayQueue,
delay_queue,
};
#[derive(Debug)]
pub struct Cache
{
//TODO: HashMap and DelayQueue of mapping public keys to user IDs, etc.
pkey_maps: HashMap<String, (UserID, delay_queue::Key)>,
pkey_rems: DelayQueue<String>,
}
impl Cache
{
/// Create a new empty cache
pub fn new() -> Self
{
Self {
pkey_maps: HashMap::new(),
pkey_rems: DelayQueue::new(),
}
}
//how tf do we get this to run concurrently with insertions??? it holds `&mut self` forever!
//redesign required. maybe using Arc and RwLock or Mutex and interrupting the purge task when something needs to be inserted. i.e: give this task a stream that it can `select!` along with calling `next()`, if the other future completes first, we return? but wouldn't that lose us an item in `next()`? is there a `select with priority` in `futures`? i think there is. eh...
/// Run a purge on this cache.
pub async fn purge(&mut self) -> eyre::Result<()>
{
let mut errors = Vec::default();
while let Some(entry) = self.pkey_rems.next().await
{
match entry {
Ok(entry) => {
self.pkey_maps.remove(entry.get_ref());
},
Err(err) => {
errors.push(err);
}
}
}
if errors.len() > 0 {
let mut err = Err(eyre!("One or more removals failed"))
.with_note(|| errors.len().to_string().header("Number of failed removals"));
for e in errors.into_iter()
{
err = err.with_error(move || e);
}
return err;
} else {
Ok(())
}
}
/// Purge all values available to be purged now.
pub fn purge_now(&mut self) -> eyre::Result<()>
{
match self.purge().now_or_never() {
Some(x) => x,
None => Ok(())
}
}
}

@ -1,155 +1,391 @@
//! Creating immutable images of state.
//! Frozen, serialisable state
use super::*;
use std::{error,fmt};
use cryptohelpers::sha256::{Sha256Hash, self};
//use futures::prelude::*;
use std::io;
use tokio::prelude::*;
/// An image of the entire post container
#[derive(Debug, Default, Serialize, Deserialize)]
pub struct Freeze
/// An immutable image of `State`.
//TODO: Implement this when `State` is solidified and working
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
struct FreezeInner
{
posts: Vec<post::Post>,
users: HashSet<User>,
posts: HashSet<(UserID, Post)>,
}
impl From<Freeze> for Imouto
const FREEZE_CHK: &[u8; 4] = b"REI\0";
/// Metadata derived from `FreezeInner`'s CBOR serialisation.
///
/// This is written and read as-is.
#[derive(Debug, Clone, PartialEq, Eq, Hash, PartialOrd, Ord, Copy)]
struct FreezeMetadata
{
#[inline] fn from(from: Freeze) -> Self
{
Self::from_freeze(from)
}
chk: [u8; 4],
version: u32, //Version, //TODO: Convert env!(version) into type
body_size: u64,
compressed: bool, //TODO: Should body be compressed? If so, with what?
body_hash: Sha256Hash,
}
impl TryFrom<Imouto> for Freeze
impl FreezeMetadata
{
type Error = FreezeError;
#[inline] pub fn len(&self) -> usize
{
self.body_size.try_into().expect("Length exceeded limit of `usize`")
}
#[inline] fn new(from: &[u8]) -> Self
{
Self {
chk: *FREEZE_CHK,
version: defaults::VERSION.to_u32(),
body_size: from.len().try_into().unwrap(), //this should never fail, right?
compressed: false,
body_hash: sha256::compute_slice(from),
}
}
/// Write this metadata to an async stream, return the number of bytes written.
pub async fn write_to(&self, mut to: impl tokio::io::AsyncWrite + Unpin) -> io::Result<usize>
{
macro_rules! write_all
{
($expr:expr) => {
{
let bytes = $expr;
let bytes = &bytes[..];
to.write_all(bytes).await?;
bytes.len()
}
}
}
let done =
write_all!(self.chk) +
write_all!(u32::to_le_bytes(self.version)) +
write_all!(u64::to_le_bytes(self.body_size)) +
write_all!(if self.compressed {[1]} else {[0]}) +
write_all!(self.body_hash.as_ref());
#[inline] fn try_from(from: Imouto) -> Result<Self, Self::Error>
Ok(done)
}
/// Read a metadata object from an aynsc stream, without verifying any of its fields
async fn read_from_unchecked(mut from: impl tokio::io::AsyncRead + Unpin) -> io::Result<Self>
{
from.try_into_freeze()
macro_rules! read_exact
{
($num:expr) => {
{
let mut buf = [0u8; $num];
from.read_exact(&mut buf[..]).await?;
buf
}
};
(type $type:ty) => {
{
read_exact!(std::mem::size_of::<$type>())
}
};
}
Ok(
Self {
chk: read_exact!(4),
version: u32::from_le_bytes(read_exact!(type u32)),
body_size: u64::from_le_bytes(read_exact!(type u64)),
compressed: if read_exact!(1)[0] == 0 { false } else { true },
body_hash: Sha256Hash::from(read_exact!(type Sha256Hash)),
}
)
}
}
/// Read a metadata object from an async stream, verifying its fields.
///
/// # Note
/// The `body_hash` field must be verified *after* reading the body.
pub async fn read_from(from: impl tokio::io::AsyncRead + Unpin) -> eyre::Result<Self>
{
macro_rules! enforce {
($expr:expr; $err:expr) => {
{
if ! $expr {
return Err($err)
}
}
}
}
let this = Self::read_from_unchecked(from).await
.wrap_err(eyre!("Failed to read data from stream"))?;
macro_rules! enforce {
($expr:expr, $err:expr) => {
{
if ! $expr {
Err(eyre!($err))
.with_section(|| format!("{:?}", this).header("Metadata was"))
} else {
Ok(())
}
}
}
}
/// Error returned when a freeze operation fails
#[derive(Debug)]
pub struct FreezeError{
held: Option<Arc<RwLock<post::Post>>>,
}
enforce!(&this.chk == FREEZE_CHK, "Check value was invalid")
.with_section(|| format!("{:?}", FREEZE_CHK).header("Expected"))
.with_section(|| format!("{:?}", &this.chk).header("Got"))
.with_suggestion(|| "Was this the correct type of file you wanted to load?")?;
enforce!(this.body_size <= defaults::MAX_IMAGE_READ_SIZE as u64, "Body size exceeded max")
.with_section(|| format!("{}", &this.body_size).header("Size read was"))
.with_section(|| format!("{}", defaults::MAX_IMAGE_READ_SIZE).header("Max size allowed is"))
.with_warning(|| "This may indicate file corruption")?;
enforce!(this.version <= defaults::VERSION, "Unsupported version")
.with_section(|| this.version.to_string().header("Read version was"))
.with_section(|| defaults::VERSION.to_string().header("Current version is"))
.with_suggestion(|| "This file may have been created with a newer version of the program. Try updating the program.")?;
Ok(this)
}
impl FreezeError
{
/// The post associated with this error, if there is one.
pub fn post(&self) -> Option<&Arc<RwLock<post::Post>>>
/// Verify the hash of this metadata by computing the hash of `from` and checking.
///
/// # Notes
/// It is recommended to to this within a tokio `spawn_blocking` or `block_in_place` closure, as the hashing operation may take a while.
pub fn verify_hash_blocking(&self, from: impl AsRef<[u8]>) -> bool
{
self.held.as_ref()
sha256::compute_slice(from) == self.body_hash
}
}
impl error::Error for FreezeError{}
impl fmt::Display for FreezeError
/// An immutable `State` image
#[derive(Debug, Clone, PartialEq, Eq)]
pub struct Freeze
{
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result
{
write!(f,"Failed to create freeze image")?;
if let Some(aref) = &self.held
{
let cnt = Arc::strong_count(&aref) - 1;
if cnt > 0 {
write!(f, "Post reference still held in {} other places.", cnt)
} else {
write!(f, "Post reference was still held in another place at the time, but no longer is.")
}
} else {
Ok(())
}
}
//metadata: FreezeMetadata, //written as-is, calculated from body
//body: Vec<u8>, // `FreezeInner` written with CBOR
inner: FreezeInner, // Dumped to Vec<u8> as CBOR, then FreezeMetadata is calculated from this binary data. Then metadata is written, then the binary blob is written.
}
impl Imouto
impl Freeze
{
/// Create a serialisable image of this store by cloning each post into it.
pub async fn freeze(&self) -> Freeze
/// Generate the output to write.
///
/// This might take a while, and is done synchronously. Recommended to run on blocking task.
fn gen_output(&self) -> serde_cbor::Result<(FreezeMetadata, Vec<u8>)>
{
let read = &self.all;
let mut sis = Freeze{
posts: Vec::with_capacity(read.len()),
};
for (_, post) in read.iter()
{
sis.posts.push(post.read().await.clone());
}
sis
let body = serde_cbor::to_vec(&self.inner)?;
Ok((FreezeMetadata::new(&body[..]), body))
}
/// Consume into a serialisable image of this store.
///
/// # Fails
/// If references to any posts are still held elsewhere.
pub fn try_into_freeze(self) -> Result<Freeze, FreezeError>
/// Read `Freeze` from an input stream
pub async fn read_async(mut input: impl tokio::io::AsyncRead + Unpin) -> eyre::Result<Self>
{
let read = self.all;
let mut sis = Freeze{
posts: Vec::with_capacity(read.len()),
let inner: FreezeInner = {
let unchecked_meta = FreezeMetadata::read_from(&mut input)
.await
.wrap_err(eyre!("Failed to read metadata from stream"))?;
let mut body: Vec<u8> = vec![0; unchecked_meta.len()];
input.read_exact(&mut body[..]).await
.wrap_err(eyre!("Failed to read body from stream"))
.with_section(|| format!("{:?}", unchecked_meta).header("Metadata was"))?;
tokio::task::spawn_blocking(move || {
if !unchecked_meta.verify_hash_blocking(&body) {
Err(eyre!("Failed to verify metadata hash"))
} else {
serde_cbor::from_slice(&body)
.wrap_err(eyre!("Failed to deserialise body"))
}
}).await
.wrap_err("Background task panic")?
.with_section(|| format!("{:?}", unchecked_meta).header("Metadata was"))?
};
for post in read.into_iter()
{
sis.posts.push(match Arc::try_unwrap(post) {
Ok(val) => val.into_inner(),
Err(arc) => return Err(FreezeError{held: Some(arc)}),
// Err(_arc) => panic!("Reference to post is still being used"),//arc.read().await.clone(), // something else holds the reference, we can't consume it.
});
}
Ok(Self {
inner,
})
}
Ok(sis)
/// Write `Freeze` into this output stream on the current task.
///
/// This function runs the serialisation and hashing on a the current task, which is synchronous. Recommended to use `into_write_async` instead.
pub async fn write_async(&self, mut output: impl tokio::io::AsyncWrite + Unpin) -> eyre::Result<()>
{
let (meta, body) = self.gen_output()
.wrap_err(eyre!("Failed to generate write body"))?;
meta.write_to(&mut output).await
.wrap_err(eyre!("Failed to write metadata to output stream"))
.with_section(|| format!("{:?}", meta).header("Metadata was"))?;
output.write_all(&body[..]).await
.wrap_err(eyre!("Failed to write whole body to output stream"))?;
Ok(())
}
/// Consume into a serialisable image of this store.
/// Consume this `Freeze` into this output stream.
///
/// This function runs the serialisation and hashing on a background blocking task instead of on the current one.
pub async fn into_write_async(self, mut output: impl tokio::io::AsyncWrite + Unpin) -> eyre::Result<()>
{
let (meta, body) = tokio::task::spawn_blocking(move || self.gen_output())
.await
.wrap_err(eyre!("Background task panic"))?
.wrap_err(eyre!("Failed to generate write body"))?;
meta.write_to(&mut output).await
.wrap_err(eyre!("Failed to write metadata to output stream"))
.with_section(|| format!("{:?}", meta).header("Metadata was"))?;
output.write_all(&body[..]).await
.wrap_err(eyre!("Failed to write whole body to output stream"))?;
Ok(())
}
}
impl State
{
/// Create an image from the state.
///
/// # Locks
/// This method holds the read lock of Oneesan, it also holds the read lock of all posts and users.
/// This will prevent any writes while the `Freeze` is being created, and will also yield the current task until all write operations on `State` are completed.
///
/// # Panics
/// If references to any posts are still held elsewhere.
pub fn into_freeze(self) -> Freeze
/// If the internal state is incorrect.
pub async fn freeze(&self) -> Freeze
{
self.try_into_freeze().expect("Failed to consume into freeze")
let onee = self.0.oneesan.read().await;
use std::ops::Deref;
// this might be kinda expensive. should we offload this?
let post_owner_reverse_lookup: HashMap<Index, UserID> = onee.posts_user_map.iter()
.map(|(&y,x)| x.iter().map(move |idx| (*idx,y)))
.flatten()
.collect();
let (posts, users) = tokio::join!(
stream::iter(onee.posts.iter()).then(|(post_idx, shared)| {
let owner_id = *post_owner_reverse_lookup.get(&post_idx).unwrap();
async move { (owner_id, Post::clone(shared.read().await.deref())) }
}).collect(),
stream::iter(onee.users.iter()).then(|(_, shared)| async move { User::clone(shared.read().await.deref()) }).collect()
);
let inner = FreezeInner {
posts, users
};
Freeze {
inner,
}
}
}
/// Create a new store from a serialisable image of one by cloning each post in it
pub fn unfreeze(freeze: &Freeze) -> Self
impl Freeze
{
/// Create a working `State` from this image.
///
/// This clones all posts and users in the image. Use `into_state` to move into a state.
pub fn unfreeze(&self) -> State
{
let mut posts = Arena::new();
let mut user_map = HashMap::new();
let mut users = Arena::with_capacity(self.inner.users.len());
let mut posts = Arena::with_capacity(self.inner.posts.len());
let mut posts_map = HashMap::with_capacity(self.inner.posts.len());
let mut users_map = HashMap::with_capacity(self.inner.users.len());
let mut posts_user_map = HashMap::with_capacity(self.inner.users.len());
for post in freeze.posts.iter()
{
for (owner_id, post) in self.inner.posts.iter()
{
let idx = posts.insert(Arc::new(RwLock::new(post.clone())));
user_map.entry(post.owner().clone())
.or_insert_with(|| MaybeVec::new())
.push(idx);
posts_user_map.entry(*owner_id).or_insert_with(|| MaybeVec::new()).push(idx);
posts_map.insert(*post.post_id(), idx);
}
Self {
all: posts,
user_map,
for user in self.inner.users.iter()
{
let idx = users.insert(Arc::new(RwLock::new(user.clone())));
users_map.insert(*user.id(), idx);
}
State(Arc::new(Inner {
oneesan: RwLock::new(Oneesan {
users,
posts,
posts_map,
users_map,
posts_user_map
}),
cache: Cache::new(),
}))
}
/// Create a new store by consuming serialisable image of one by cloning each post in it
pub fn from_freeze(freeze: Freeze) -> Self
/// Convert this image into a new `State`.
pub fn into_state(self) -> State
{
let mut posts = Arena::new();
let mut user_map = HashMap::new();
let mut users = Arena::with_capacity(self.inner.users.len());
let mut posts = Arena::with_capacity(self.inner.posts.len());
let mut posts_map = HashMap::with_capacity(self.inner.posts.len());
let mut users_map = HashMap::with_capacity(self.inner.users.len());
let mut posts_user_map = HashMap::with_capacity(self.inner.users.len());
for post in freeze.posts.into_iter()
for (owner_id, post) in self.inner.posts.into_iter()
{
let mapping = user_map.entry(post.owner().clone())
.or_insert_with(|| MaybeVec::new());
let post_id = *post.post_id();
let idx = posts.insert(Arc::new(RwLock::new(post)));
mapping.push(idx);
posts_user_map.entry(owner_id).or_insert_with(|| MaybeVec::new()).push(idx);
posts_map.insert(post_id, idx);
}
Self {
all: posts,
user_map,
for user in self.inner.users.into_iter()
{
let user_id = *user.id();
let idx = users.insert(Arc::new(RwLock::new(user)));
users_map.insert(user_id, idx);
}
State(Arc::new(Inner {
cache: Cache::new(),
oneesan: RwLock::new(Oneesan {
users,
posts,
posts_map,
users_map,
posts_user_map
})
}))
}
}
impl From<Freeze> for State
{
#[inline] fn from(from: Freeze) -> Self
{
from.into_state()
}
}
#[cfg(test)]
mod tests
{
#[tokio::test]
async fn read_write()
{
let mut buffer = Vec::new();
let state = super::State::new();
let freeze = state.freeze().await;
freeze.write_async(&mut buffer).await.unwrap();
let freeze2 = super::Freeze::read_async(&mut &buffer[..]).await.unwrap();
assert_eq!(freeze, freeze2);
let mut buffer2 = Vec::new();
freeze2.into_write_async(&mut buffer2).await.unwrap();
assert_eq!(buffer, buffer2);
}
#[tokio::test]
async fn creation()
{
let state = super::State::new();
let freeze = state.freeze().await;
let state2 = freeze.unfreeze();
let freeze2 = state2.freeze().await;
assert_eq!(freeze, freeze2);
let _state3 = freeze2.into_state();
}
}

@ -1,69 +1,196 @@
use super::*;
use std::collections::HashMap;
use std::collections::{HashMap, HashSet};
use std::sync::Arc;
use tokio::sync::{
RwLock,
mpsc,
};
use generational_arena::{
Arena, Index as ArenaIndex,
Arena,Index,
};
use std::sync::Arc;
use tokio::sync::RwLock;
use std::ops::{Deref, DerefMut};
use futures::prelude::*;
use user::{User, UserID};
use post::{Post, PostID};
pub mod session;
pub mod user;
pub mod post;
pub mod body;
mod cache;
pub use cache::*;
mod service; pub use service::*;
mod freeze; pub use freeze::*;
mod freeze;
pub use freeze::*;
/// An `Arc<RwLock<T>>` wrapper
type SharedMut<T> = Arc<RwLock<T>>;
/// A shared pointer to a post
pub type SharedPost = SharedMut<Post>;
/// A shared pointer to a user
pub type SharedUser = SharedMut<User>;
/// Entire post state container
#[derive(Debug)]
pub struct Imouto
struct Oneesan
{
all: Arena<Arc<RwLock<post::Post>>>,
user_map: HashMap<user::UserID, MaybeVec<ArenaIndex>>,
}
users: Arena<Arc<RwLock<User>>>,
posts: Arena<Arc<RwLock<Post>>>,
impl Imouto
{
/// Create a new empty container
pub fn new() -> Self
{
Self {
all: Arena::new(),
user_map: HashMap::new(),
}
}
/// Maps `UserID`s to indexes in the `users` arena.
users_map: HashMap<UserID, Index>,
/// Maps `PostID`s to indexies in the `posts` arena.
posts_map: HashMap<PostID, Index>,
/// Maps `UserID`s to the user's owned posts in the `posts` arena.
posts_user_map: HashMap<UserID, MaybeVec<Index>>,
}
#[derive(Debug)]
/// Entire program state
struct Oneesan
struct Inner
{
posts: RwLock<Imouto>,
/// The posts and user state.
oneesan: RwLock<Oneesan>,
/// A non-persistant cache
cache: Cache,
}
/// Shares whole program state
/// Contains all posts and users
#[derive(Debug, Clone)]
pub struct State(Arc<Oneesan>);
pub struct State(Arc<Inner>);
impl State
{
/// Create a new empty state.
/// Create a new empty state
pub fn new() -> Self
{
Self(Arc::new(Oneesan {
posts: RwLock::new(Imouto::new()),
}))
Self(Arc::new(
Inner {
oneesan: RwLock::new(Oneesan {
users: Arena::new(),
posts: Arena::new(),
users_map: HashMap::new(),
posts_map: HashMap::new(),
posts_user_map: HashMap::new(),
}),
cache: Cache::new(),
}
))
}
/// Get a reference to the post state container
pub async fn imouto(&self) -> tokio::sync::RwLockReadGuard<'_, Imouto>
/// Insert a new user into state.
///
/// # Locks
/// This function holds the state write lock while performing insertions.
pub async fn insert_user(&self, user: User) -> SharedUser
{
self.0.posts.read().await
let user_id = *user.id();
let nuser = Arc::new(RwLock::new(user));
let mut write = self.0.oneesan.write().await;
let idx = write.users.insert(Arc::clone(&nuser));
write.users_map.insert(user_id, idx);
nuser
}
/// Get a mutable reference to the post state container
pub async fn imouto_mut(&self) -> tokio::sync::RwLockWriteGuard<'_, Imouto>
/// Insert a new post into state.
///
/// # Locks
/// This function holds the state write lock while performing insertions.
pub async fn insert_post(&self, owner: UserID, post: Post) -> SharedPost
{
self.0.posts.write().await
let post_id =*post.post_id();
let npost = Arc::new(RwLock::new(post));
let mut write = self.0.oneesan.write().await;
let idx = write.posts.insert(Arc::clone(&npost));
write.posts_map.insert(post_id, idx);
write.posts_user_map.entry(owner).or_insert_with(|| MaybeVec::new()).push(idx);
npost
}
}
/// Get a shared reference to a user by this ID, if it exists.
///
/// # Locks
/// This functions holds the state read lock while performing lookups.
/// # Panics
/// If the internal ID mappings are invalid
pub async fn get_user_by_id(&self, id: UserID) -> Option<SharedUser>
{
let read = self.0.oneesan.read().await;
read.users_map.get(&id).map(|&idx| read.users.get(idx).unwrap().clone())
}
/// Get a shared reference to a post by this ID, if it exists.
///
/// # Locks
/// This functions holds the state read lock while performing lookups.
/// # Panics
/// If the internal ID mappings are invalid
pub async fn get_post_by_id(&self, id: PostID) -> Option<SharedPost>
{
let read = self.0.oneesan.read().await;
read.posts_map.get(&id).map(|&idx| read.posts.get(idx).unwrap().clone())
}
/// Consume into a stream that yields all users lazily.
///
/// # Locks
/// The stream producer holds the Oneesan *read* lock until the stream is consumed or dropped.
pub fn all_users_stream(self: Arc<Self>) -> impl Stream<Item=SharedUser>
{
let (mut tx, rx) = mpsc_channel!(defaults::STATE_STREAM_BUFFER_SIZE);
tokio::spawn(async move {
let onee = self.0.oneesan.read().await;
for (_, user) in onee.users.iter()
{
if tx.send(user.clone()).await.is_err() {
break;
}
}
});
rx
}
/// Consume into a stream that yields all posts created by this user lazily.
///
/// # Locks
/// The stream producer holds the Oneesan *read* lock until the stream is consumed or dropped.
///
/// # Panics
/// The background task will panic and drop the producer if the internal ID mappings are invalid
pub fn all_posts_by_user_stream(self: Arc<Self>, user: UserID) -> impl Stream<Item=SharedPost>
{
let (mut tx, rx) = mpsc_channel!(defaults::STATE_STREAM_BUFFER_SIZE);
tokio::spawn(async move {
let onee = self.0.oneesan.read().await;
if let Some(map) = onee.posts_user_map.get(&user) {
for &post in map.iter()
{
if tx.send(onee.posts.get(post).unwrap().clone()).await.is_err() {
break;
}
}
}
});
rx
}
/// Consume into a stream that yields all posts lazily.
///
/// # Locks
/// The stream producer holds the Oneesan *read* lock until the stream is consumed or dropped.
pub fn all_posts_stream(self: Arc<Self>) -> impl Stream<Item=SharedPost>
{
let (mut tx, rx) = mpsc_channel!(defaults::STATE_STREAM_BUFFER_SIZE);
tokio::spawn(async move {
let onee = self.0.oneesan.read().await;
for (_, post) in onee.posts.iter()
{
if tx.send(post.clone()).await.is_err() {
break;
}
}
});
rx
}
}

@ -1,37 +0,0 @@
use super::*;
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
pub struct OpenPost
{
}
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
pub struct ClosedPost
{
}
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
pub enum PostKind
{
Open(OpenPost),
Closed(ClosedPost),
}
/// A post
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
pub struct Post
{
owner: user::UserID,
kind: PostKind
}
impl Post
{
/// The ID of the owning user & session of this post.
pub fn owner(&self) -> &user::UserID
{
&self.owner
}
}

@ -1,141 +0,0 @@
//! Controls the broadcasting of events sent from the state service task
use super::*;
/// The kind of event outputted from a state service's broadcast stream
#[derive(Debug, Clone, PartialEq, Eq, Hash, PartialOrd, Ord, Copy)]
pub enum ServiceEventKind
{
/// Does nothing.
///
/// # Associated object
/// `()`.
Ping,
/// Does nothing.
///
/// # Associated object
/// None.
KeepAlive,
}
cfg_if!{
if #[cfg(debug_assertions)] {
/// Type used for directed array.
/// Currently testing `smolset` over eagerly allocating.
pub(super) type SESet<T> = smolset::SmolSet<[T; 1]>;
} else {
pub(super) type SESet<T> = std::collections::HashSet<T>;
}
}
/// An event outputted from a state service's broadcast stream
#[derive(Debug, Clone)]
pub struct ServiceEvent
{
bc_id: BroadcastID,
kind: ServiceEventKind,
directed: Option<SESet<ServiceSubID>>,
obj: Option<ServiceEventObject>,
}
impl ServiceEvent
{
/// Create a new event to be broadcast
fn new<T>(kind: ServiceEventKind, directed: Option<impl IntoIterator<Item=ServiceSubID>>, obj: Option<T>) -> Self
where T: Any + Send + Sync + 'static
{
Self {
bc_id: BroadcastID::id_new(),
kind,
directed: directed.map(|x| x.into_iter().collect()).and_then(|n: SESet<_>| if n.len() < 1 {
None
} else {
Some(n)
}),
obj: obj.map(|x| ServiceEventObject(Arc::new(x))),
}
}
#[inline] pub fn id(&self) -> &BroadcastID
{
&self.bc_id
}
/// The kind of this event.
#[inline] pub fn kind(&self) -> ServiceEventKind
{
self.kind
}
/// Check if this event is for you
pub fn is_directed_for(&self, whom: &ServiceSubID) -> bool
{
if let Some(yes) = self.directed.as_ref() {
yes.contains(whom)
} else {
false
}
}
/// Check if this event is directed to anyone
pub fn is_directed(&self) -> bool
{
self.directed.is_some()
}
/// Check who this event is directed to.
///
/// If it is not directed, an empty slice will be returned.
pub fn directed_to(&self) -> impl Iterator<Item = &'_ ServiceSubID> + '_
{
match self.directed.as_ref()
{
Some(yes) => MaybeIter::many(yes.iter()),
None => MaybeIter::none(),
}
}
/// Get a reference to the object, if there is one.
pub fn obj_ref(&self) -> Option<&ServiceEventObject>
{
self.obj.as_ref()
}
/// Get a mutable reference to the object, if there is one.
pub fn obj_mut(&mut self) -> Option<&mut ServiceEventObject>
{
self.obj.as_mut()
}
/// Try to consume into the inner object. If there is no object, return self.
pub fn try_into_object(self) -> Result<ServiceEventObject, Self>
{
match self.obj
{
Some(obj) => Ok(obj),
None => Err(self),
}
}
}
impl From<ServiceEvent> for Option<ServiceEventObject>
{
#[inline] fn from(from: ServiceEvent) -> Self
{
from.obj
}
}
impl TryFrom<ServiceEvent> for ServiceEventObject
{
type Error = NoObjectError;
#[inline] fn try_from(from: ServiceEvent) -> Result<Self, Self::Error>
{
match from.obj
{
Some(obj) => Ok(obj),
None => Err(NoObjectError),
}
}
}

@ -1,28 +0,0 @@
//! Global state service
use super::*;
use tokio::{
sync::{
watch,
mpsc,
oneshot,
broadcast,
},
task::JoinHandle,
};
use crate::service::{
ExitStatus,
};
use std::{error, fmt};
use std::sync::Weak;
use std::any::Any;
use std::collections::{BTreeMap};
id_type!(ServiceSubID; "Optional ID for filtering directed broadcast messages");
id_type!(BroadcastID; "Each broadcast message has a unique ID.");
mod supervisor; pub use supervisor::*;
mod resreq; pub use resreq::*;
mod obj; pub use obj::*;
mod events; pub use events::*;

@ -1,114 +0,0 @@
//! broadcast object definitions
use super::*;
/// Object sent through the broadcast channel.
///
/// These objects can be cloned and downcasted, becaause they are atomically refcounted if that is more desireable.
#[derive(Clone)]
#[repr(transparent)]
pub struct ServiceEventObject(pub(super) Arc<dyn Any + Send + Sync + 'static>);
shim_debug!(ServiceEventObject);
/// A weak reference to a `ServiceEventObject`.
#[derive(Clone)]
#[repr(transparent)]
pub struct ServiceEventObjectRef(pub(super) Weak<dyn Any + Send + Sync + 'static>);
shim_debug!(ServiceEventObjectRef);
impl ServiceEventObjectRef
{
/// Try to upgrade to a concrete reference, and then clone the inner object.
pub fn try_clone(&self) -> Option<ServiceEventObject>
{
match self.0.upgrade()
{
Some(arc) => Some(ServiceEventObject(arc).clone()),
None => None
}
}
/// Try to upgrade to a concrete reference.
pub fn upgrade(self) -> Result<ServiceEventObject, Self>
{
match self.0.upgrade()
{
Some(arc) => Ok(ServiceEventObject(arc)),
None => Err(self),
}
}
/// Check if the object has not been destroyed yet.
pub fn is_alive(&self) -> bool
{
self.0.strong_count() > 0
}
}
impl ServiceEventObject
{
pub fn clone_inner(&self) -> Self
{
Self(Arc::from(self.0.clone_dyn_any_sync()))
}
/// Get a weak reference counted handle to the object, without cloning the object itself.
pub fn clone_weak(&self) -> ServiceEventObjectRef
{
ServiceEventObjectRef(Arc::downgrade(&self.0))
}
/// Try to downcast the inner object to a concrete type and then clone it.
///
/// This will fail if:
/// * The downcasted type is invalid
#[inline] pub fn downcast_clone<T: Any + Clone + Send + Sync + 'static>(&self) -> Option<T>
{
self.downcast_ref::<T>().map(|x| *x.clone_dyn_any().downcast().unwrap())
}
/// Try to consume this instance into downcast.
///
/// This will fail if:
/// * The downcasted type is invalid
/// * There are other references to this object (created through `clone_ref()`.).
pub fn try_into_downcast<T: Any + Send + Sync + 'static>(self) -> Result<T, Self>
{
match Arc::downcast(self.0)
{
Ok(v) => match Arc::try_unwrap(v) {
Ok(v) => Ok(v),
Err(s) => Err(Self(s)),
},
Err(e) => Err(Self(e)),
}
}
/// Check if there are any other references to this object
#[inline] pub fn is_unique(&self) -> bool
{
Arc::strong_count(&self.0) == 1
}
/// Try to downcast the object into a concrete type
#[inline] pub fn is<T: Any + Send + Sync + 'static>(&self) -> bool
{
self.0.as_ref().is::<T>()
}
/// Try to downcast the object into a concrete type
#[inline] pub fn downcast_ref<T: Any + Send + Sync + 'static>(&self) -> Option<&T>
{
self.0.as_ref().downcast_ref::<T>()
}
}
#[derive(Debug)]
/// Error returned from trying to extract an object from a `ServiceEvent` which has none.
pub struct NoObjectError;
impl error::Error for NoObjectError{}
impl fmt::Display for NoObjectError
{
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result
{
write!(f, "there was no object broadcasted along with this ")
}
}

@ -1,65 +0,0 @@
//! Responses and requests for the state service(s).
//!
//! These are sent to `Supervisor` which then dispatches them accordingly.
use super::*;
/// The kind of request to send to the the service
#[derive(Debug)]
#[non_exhaustive]
pub enum ServiceRequestKind
{
/// A no-op request.
None,
/// Test request.
#[cfg(debug_assertions)] EchoRequest(String),
}
/// The kind of response to expect from a service query, if any.
#[derive(Debug)]
#[non_exhaustive]
pub enum ServiceResponseKind
{
/// Test response.
#[cfg(debug_assertions)] EchoResponse(String),
/// Empty response
None,
}
/// A response from a service to a specific query.
///
/// It is sent theough the `output` onehot channel in the `ServiceCommand` struct.
#[derive(Debug)]
pub struct ServiceResponse(ServiceRequestKind);
impl ServiceResponse
{
/// An empty (default) response
#[inline] pub const fn none() -> Self
{
Self(ServiceRequestKind::None)
}
}
/// A formed service request.
#[derive(Debug)]
pub struct ServiceRequest
{
kind: ServiceRequestKind,
output: oneshot::Sender<ServiceResponse>, // If there is no response, this sender will just be dropped and the future impl can return `None` instead of `Some(response)`.
}
impl ServiceRequest
{
/// Create a new request
pub(in super) fn new(kind: ServiceRequestKind) -> (Self, oneshot::Receiver<ServiceResponse>)
{
let (tx, rx) = oneshot::channel();
(Self {
kind,
output: tx
}, rx)
}
}

@ -1,92 +0,0 @@
//! Dispatching to state service task(s) through a supervisor
use super::*;
use tokio::time;
use std::{fmt, error};
use futures::prelude::*;
impl Supervisor
{
/// Dispatch a request to the supervisor to be passed through to a subtask.
///
/// # Returns
/// Returns a `Future` that can be awaited on to produce the value sent back by the task (if there is one).
///
/// # Errors
/// * The first failure will be caused if sending to the supervisor fails.
/// * The 2nd failure will be caused if either the supervisor, or its delegated task panics before being able to respond, or if the task simply does not respond.
pub async fn dispatch_req(&mut self, kind: ServiceRequestKind) -> Result<impl Future<Output=Result<ServiceResponse, SupervisorDispatchError>> + 'static, SupervisorDispatchError>
{
let (req, rx) = ServiceRequest::new(kind);
self.pipe.send(req).await.map_err(|_| SupervisorDispatchError::Send)?;
Ok(rx.map_err(|_| SupervisorDispatchError::Recv))
}
/// Dispatch a request to the supervisor to be passed through to a subtask and then wait for a response from it.
///
/// # Returns
/// Returns the value sent back by the task, if there is one
pub async fn dispatch_and_wait(&mut self, kind: ServiceRequestKind) -> Result<ServiceResponse, SupervisorDispatchError>
{
Ok(self.dispatch_req(kind)
.await?
.await?)
}
/// Dispatch a request to the supervisor to be passed through to a subtask and then wait for a response from it.
/// If the timeout expires before a response from the server is received, then the operation will cancel and the error returned will be `SupervisorDispatchError::Timeout`.
///
/// # Returns
/// Returns the value sent back by the task, if there is one
pub async fn dispatch_and_wait_timeout(&mut self, kind: ServiceRequestKind, timeout: time::Duration) -> Result<ServiceResponse, SupervisorDispatchError>
{
let resp_wait = self.dispatch_req(kind)
.await?;
tokio::select! {
val = resp_wait => {
return Ok(val?);
}
_ = time::delay_for(timeout) => {
return Err(SupervisorDispatchError::Timeout("receiving response", Some(timeout)))
}
}
}
/// Dispatch a request to the supervisor to be passed through to a subtask and then wait for a response from it.
/// If the future `until` completes before a response from the server is received, then the operation will cancel and the error returned will be `SupervisorDispatchError::Timeout`.
///
/// # Returns
/// Returns the value sent back by the task, if there is one
pub async fn dispatch_and_wait_until(&mut self, kind: ServiceRequestKind, until: impl Future) -> Result<ServiceResponse, SupervisorDispatchError>
{
let resp_wait = self.dispatch_req(kind)
.await?;
tokio::select! {
val = resp_wait => {
return Ok(val?);
}
_ = until => {
return Err(SupervisorDispatchError::Timeout("receiving response", None))
}
}
}
}
/// Error when dispatching a request to the supervisor
#[derive(Debug)]
pub enum SupervisorDispatchError
{
Send, Recv, Timeout(&'static str, Option<tokio::time::Duration>),
}
impl error::Error for SupervisorDispatchError{}
impl fmt::Display for SupervisorDispatchError
{
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result
{
match self {
Self::Send => write!(f, "dispatching the request failed"),
Self::Recv => write!(f, "receiving the response failed"),
Self::Timeout(msg, Some(duration)) => write!(f, "timeout on {} was reached ({:?})", msg, duration),
Self::Timeout(msg, _) => write!(f, "timeout on {} was reached", msg),
}
}
}

@ -1,309 +0,0 @@
//! Handles spawning and restarting service task(s)
use super::*;
use tokio::sync::RwLock;
use std::mem::MaybeUninit;
use std::ops;
use futures::prelude::*;
use tokio::time;
use std::{fmt, error};
const SUPERVISOR_BACKLOG: usize = 32;
mod dispatch; pub use dispatch::*;
TODO: This all needs redoing when i'm actually lucid. This part seems okay but the rest of `service` needs to go and be replaced by something like this
/// Signal the shutdown method to the supervisor.
#[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Copy)]
pub enum SupervisorControl
{
/// Normal working
Initialise,
/// Signal the subtask(s) to shut down, then wait for them and exit, with an optional timeout.
///
/// # Notes
/// If the timeout expires while waiting, then the mode is switched to `Drop`.
Signal(Option<time::Duration>),
/// Drop all handles and pipes to subtask(s) then immediately exit.
Drop,
/// Restart any and all subtask(s)
Restart,
/// Set the max task limit. Default is 0.
TaskLimit(usize),
}
impl Default for SupervisorControl
{
#[inline]
fn default() -> Self
{
Self::Initialise
}
}
/// Supervisor responsible for spawning the state handler service.
#[derive(Debug)]
pub(super) struct Supervisor
{
/// Handle for the supervisor task itself
handle: JoinHandle<ExitStatus>,
/// Watch sender for signalling shutdowns the supervisor task itself
shutdown: watch::Sender<SupervisorControl>,
/// The pipe to send requests to the supervisor's subtasks
pipe: mpsc::Sender<ServiceRequest>,
/// The initial receiver created from `broadcast_root`.
broadcast_receiver: broadcast::Receiver<ServiceEvent>,
/// Data shared between the supervisor's task and its controller instance here.
shared: Arc<SupervisorShared>,
}
/// Object shared btweeen the Supervisor control instance and its supervisor task.
#[derive(Debug)]
struct SupervisorShared
{
/// this is for filtering specific messages to specific subscribers
sub: RwLock<BTreeMap<ServiceEventKind, SESet<ServiceSubID>>>,
broadcast_root: broadcast::Sender<ServiceEvent>,
state: state::State,
}
/// A subscriber to supervisor task(s) event pump
#[derive(Debug)]
pub struct Subscriber
{
id: ServiceSubID,
/// For directed messages
spec: mpsc::Receiver<ServiceEvent>,
/// For broadcast messages
broad: broadcast::Receiver<ServiceEvent>,
}
impl Supervisor
{
/// Attempt to send a control signal to the supervisor itself
pub fn signal_control(&self, sig: SupervisorControl) -> Result<(), watch::error::SendError<SupervisorControl>>
{
self.shutdown.broadcast(sig)?;
Ok(())
}
/// Drop all communications with background worker and wait for it to complete
pub async fn join_now(self) -> eyre::Result<()>
{
let handle = {
let Self { handle, ..} = self; // drop everything else
handle
};
handle.await?;
Ok(())
}
/// Check if the background worker has not been dropped.
///
/// If this returns false it usually indicates a fatal error.
pub fn is_alive(&self) -> bool
{
Arc::strong_count(&self.shared) > 1
}
/// Create a new supervisor for this state.
pub fn new(state: state::State) -> Self
{
let shutdown = watch::channel(Default::default());
let pipe = mpsc::channel(SUPERVISOR_BACKLOG);
let (broadcast_root, broadcast_receiver) = broadcast::channel(SUPERVISOR_BACKLOG);
let shared = Arc::new(SupervisorShared{
broadcast_root,
state,
});
let (shutdown_0, shutdown_1) = shutdown;
let (pipe_0, pipe_1) = pipe;
Self {
shutdown: shutdown_0,
pipe: pipe_0,
broadcast_receiver,
shared: Arc::clone(&shared),
handle: tokio::spawn(async move {
let shared = shared;
ExitStatus::from(service_fn(SupervisorTaskState {
shared,
recv: pipe_1,
shutdown: shutdown_1,
}).await.or_else(|err| err.into_own_result()))
}),
}
}
}
/// The state held by the running superviser service
#[derive(Debug)]
struct SupervisorTaskState
{
shutdown: watch::Receiver<SupervisorControl>,
recv: mpsc::Receiver<ServiceRequest>,
shared: Arc<SupervisorShared>,
}
/// Detached supervisor server
async fn service_fn(SupervisorTaskState {shared, mut recv, mut shutdown}: SupervisorTaskState) -> Result<(), ServiceTerminationError>
{
impl Default for TerminationKind
{
#[inline]
fn default() -> Self
{
Self::Graceful
}
}
// The command stream to dispatch to the worker tasks
let command_dispatch = async {
while let Some(req) = recv.recv().await {
todo!("Dispatch to child(s)");
}
TerminationKind::Graceful
};
tokio::pin!(command_dispatch);
// The signal stream to be handled here
let signal_stream = async {
while let Some(value) = shutdown.recv().await
{
use SupervisorControl::*;
match value {
Initialise => info!("Initialised"),
Signal(None) => return TerminationKind::SignalHup,
Signal(Some(to)) => return TerminationKind::SignalTimeout(to),
Drop => return TerminationKind::Immediate,
Restart => todo!("not implemented"),
TaskLimit(_limit) => todo!("not implemented"),
}
}
TerminationKind::Graceful
};
tokio::pin!(signal_stream);
//loop {
tokio::select! {
sd_kind = &mut signal_stream => {
// We received a signal
Err(ServiceTerminationError::Signal(sd_kind))
}
disp_end = &mut command_dispatch => {
// The command dispatch exited first, the logical error is `Graceful`. But it's not really an error, so...
disp_end.into()
}
}
// }
}
/// The mannor in which the supervisor exited.
#[derive(Debug)]
pub enum TerminationKind
{
/// The child task(s) were signalled to stop and they were waited on.
SignalHup,
/// If there was a timeout specified, and that timeout expired, the message will be `SignalTimeout` instead of `SignalHup`.
SignalTimeout(time::Duration),
/// Immediately drop everything and exit
Immediate,
/// A non-signalled shutdown. There were no more watchers for the shutdown channel.
Graceful,
}
impl TerminationKind
{
/// Convert `TerminationKind::Graceful` into a non-error
fn strip_grace(self) -> Result<(), Self>
{
match self {
Self::Graceful => Ok(()),
e => Err(e),
}
}
}
impl error::Error for TerminationKind{}
impl fmt::Display for TerminationKind
{
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result
{
match self {
Self::SignalHup => write!(f, "children were signalled to shut down and compiled"),
Self::SignalTimeout(to) => write!(f, "children were signalled to shut but did not do so within the {:?} timeout", to),
Self::Immediate => write!(f, "children were dropped and an immediate exit was made"),
Self::Graceful => write!(f, "a graceful shutdown order was issued and compiled with"),
}
}
}
/// The error returned on a failed service termination.
#[derive(Debug)]
#[non_exhaustive]
pub enum ServiceTerminationError
{
/// Was terminated by a signal.
Signal(TerminationKind),
/// Was terminated by a panic.
Panic,
/// There were no more commands being sent through, and the worker gracefully shut down.
Interest,
}
impl From<TerminationKind> for Result<(), ServiceTerminationError>
{
fn from(from: TerminationKind) -> Self
{
ServiceTerminationError::Signal(from).into_own_result()
}
}
impl ServiceTerminationError
{
fn into_own_result(self) -> Result<(), Self>
{
match self {
Self::Signal(term) => term.strip_grace().map_err(Self::Signal),
x => Err(x),
}
}
}
impl error::Error for ServiceTerminationError
{
fn source(&self) -> Option<&(dyn error::Error + 'static)>
{
Some(match &self {
Self::Signal(ts) => ts,
_ => return None,
})
}
}
impl fmt::Display for ServiceTerminationError
{
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result
{
match self {
Self::Signal(_) => write!(f, "shut down by signal"),
Self::Panic => write!(f, "shut down by panic. this is usually fatal"),
Self::Interest => write!(f, "all communications with this service stopped"),
}
}
}

@ -1,145 +0,0 @@
//! Session for each connected user
use super::*;
use tokio::{
sync::{
mpsc,
broadcast,
oneshot,
},
task::JoinHandle,
};
use std::sync::Arc;
use crate::service::{self, SubscribeError};
id_type!(SessionID; "A unique session ID, not bound to a user.");
impl SessionID
{
/// Generate a random session ID.
#[inline] fn generate() -> Self
{
Self::id_new()
}
}
#[derive(Debug)]
pub enum SessionResponse
{
Closed(SessionID),
}
#[derive(Debug)]
pub enum SessionCommand
{
Shutdown,
/// Subscribe to the session's message pump.
Subscribe(oneshot::Sender<broadcast::Receiver<SessionResponse>>),
/// Take this websocket connection.
//TODO: websockets
Connect(!),
}
/// Metadata for a session, scored across its service and handle(s)
#[derive(Debug)]
struct SessionMetadata
{
id: SessionID,
user: user::User,
}
/// A single connected session.
/// Hold the service for this session, its ID, and (TODO) its websocket connection.
#[derive(Debug)]
pub struct Session
{
meta: Arc<SessionMetadata>,
tx: mpsc::Sender<SessionCommand>,
rx: broadcast::Receiver<SessionResponse>,
handle: JoinHandle<()>,
}
async fn service_task(meta: Arc<SessionMetadata>, state: state::State, response_pair: (broadcast::Sender<SessionResponse>, mpsc::Receiver<SessionCommand>))
{
let (tx, mut rx) = response_pair;
while let Some(command) = rx.recv().await
{
match command
{
SessionCommand::Shutdown => break,
SessionCommand::Subscribe(out) => ignore!(out.send(tx.subscribe())),
_ => todo!()
}
}
let _ = tx.send(SessionResponse::Closed(meta.id.clone()));
}
impl Session
{
/// Create a new session object
pub fn create(user: user::User, state: state::State) -> Self
{
let id = SessionID::generate();
let meta =Arc::new(SessionMetadata{
user,
id,
});
let (handle, tx, rx) = {
let (s_tx, s_rx) = broadcast::channel(16);
let (r_tx, r_rx) = mpsc::channel(16);
(tokio::spawn(service_task(Arc::clone(&meta), state, (s_tx, r_rx))),
r_tx, s_rx)
};
Self {
meta,
handle,
tx, rx,
}
}
/// The randomly generated ID of this session, irrespective of the user of this session.
#[inline] pub fn session_id(&self) -> &SessionID
{
&self.meta.id
}
/// The unique user ID of this session
pub fn user_id(&self) -> user::UserID
{
self.meta.user.id_for_session(self)
}
/// Ask the service to subscribe to it.
pub async fn subscribe(&mut self) -> Result<broadcast::Receiver<SessionResponse>, SubscribeError>
{
let (tx, rx) = oneshot::channel();
self.tx.send(SessionCommand::Subscribe(tx)).await.map_err(|_| SubscribeError::SenderDropped)?;
rx.await.map_err(|_| SubscribeError::NoResponse)
}
}
// impl service::Service for Session
// {
// type Message = SessionCommand;
// type Response = SessionResponse;
// #[inline] fn wait_on(self) -> JoinHandle<()> {
// self.handle
// }
// #[inline] fn message_in_ref(&self) -> &mpsc::Sender<Self::Message> {
// &self.tx
// }
// #[inline] fn message_in(&mut self) -> &mut mpsc::Sender<Self::Message> {
// &mut self.tx
// }
// #[inline] fn message_out(&mut self) -> &mut broadcast::Receiver<Self::Response> {
// &mut self.rx
// }
// #[inline] fn is_alive(&self) -> Option<bool> {
// Some(Arc::strong_count(&self.meta) > 1)
// }
// }

@ -1,106 +0,0 @@
//! Used to determine which post belongs to who.
//!
//! Mostly for determining if a poster owns a post.
use super::*;
use std::{
net::SocketAddr,
};
use cryptohelpers::sha256;
/// A user's unique ID.
///
/// This is composed by the user's address and their session ID.
#[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, Deserialize)]
pub struct UserID(SocketAddr, session::SessionID);
static COUNTER: GlobalCounter = GlobalCounter::new();
impl UserID
{
/// Generate a token from this instance.
///
/// User tokens are deterministically generated and can be deterministically verified.
pub fn generate_token(&self) -> u64
{
let cnt = COUNTER.get();
let mut trunc = [0u8; std::mem::size_of::<u64>()];
let hash = GloballySalted::new(self).compute_sha256_hash();
bytes::move_slice(&mut trunc[..], hash.as_ref());
u64::from_le_bytes(trunc) ^ cnt
}
/// Validate a token for this instance created with `generate_token`.
pub fn validate_token(&self, val: u64) -> bool
{
let mut trunc = [0u8; std::mem::size_of::<u64>()];
let hash = GloballySalted::new(self).compute_sha256_hash();
bytes::move_slice(&mut trunc[..], hash.as_ref());
COUNTER.valid(u64::from_le_bytes(trunc) ^ val)
}
}
/// A user not bound to a session.
#[derive(Debug, Clone, PartialEq, Eq, Hash, Ord, PartialOrd)]
pub struct User
{
addr: SocketAddr,
}
impl User
{
/// Get the user ID for this session.
pub fn id_for_session(&self, session: &session::Session) -> UserID
{
UserID(self.addr, session.session_id().clone())
}
}
#[cfg(test)]
mod tests
{
use super::*;
use tokio::sync::mpsc;
use tokio::time;
use std::net::SocketAddrV4;
use std::net::Ipv4Addr;
#[tokio::test]
async fn counter_tokens()
{
let addr = SocketAddr::V4(SocketAddrV4::new(Ipv4Addr::LOCALHOST, 80));
let usr = User{addr};
let ses = session::Session::create(usr);
let id = ses.user_id();
let (mut tx, mut rx) = mpsc::channel(5);
let task = tokio::spawn(async move {
let id = ses.user_id();
while let Some(token) = rx.recv().await {
if !id.validate_token(token) {
panic!("Failed to validate token {:x} for id {:?}", token, id);
} else {
eprintln!("Token {:x} valid for id {:?}", token, id);
}
}
});
for x in 1..=10
{
if x % 2 == 0 {
time::delay_for(time::Duration::from_millis(10 * x)).await;
}
if tx.send(id.generate_token()).await.is_err() {
eprintln!("Failed to send to task");
break;
}
}
drop(tx);
task.await.expect("Background validate task failed");
}
}

@ -0,0 +1,49 @@
use super::*;
use maud::{
Markup,
Render,
};
use maud::DOCTYPE;
/// Posts view page template
mod view
{
use super::*;
/// Head for the view page template
pub fn head() -> Markup
{
html! {
//TODO
}
}
pub fn body(state: &state::State) -> Markup
{
html! {
//TODO
}
}
}
#[inline] fn page(head: impl Render, body: impl Render) -> Markup
{
html! {
(DOCTYPE)
head {
(head)
}
body {
main {
(body)
}
}
}
}
/// Post view page
pub fn view(state: &state::State) -> Markup
{
//TODO: Create one-time-use token system for rendering page. Insert into state's one-time-use tokens
page(view::head(), view::body(state))
}

@ -0,0 +1,129 @@
//! User related things
use super::*;
use cryptohelpers::{
rsa::{
RsaPublicKey,
Signature,
},
sha256::{self, Sha256Hash},
};
use std::borrow::Borrow;
use std::hash::{Hasher, Hash};
id_type!(UserID; "A unique user iD");
/// The salt added to to the user ID hash to be signed by the user's private key.
#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, Deserialize)]
pub struct UserSalt([u8; 16]);
impl UserSalt
{
/// Generate a new random salt.
pub fn generate() -> Self
{
let mut ar = [0u8; 16];
getrandom::getrandom(&mut ar[..]).expect("rng fatal");
Self(ar)
}
}
impl UserID
{
/// SHA256 hash this ID with a salt
pub fn hash_with_salt(&self, salt: &UserSalt) -> Sha256Hash
{
sha256::compute_slice_iter(iter![&self.0.as_bytes()[..], &salt.0[..]])
}
/// Generate a new salt and then return that salt and this ID hashed with that new salt.
///
/// This salt should be
pub fn generate_hash(&self) -> (UserSalt, Sha256Hash)
{
let salt = UserSalt::generate();
let hash = self.hash_with_salt(&salt);
(salt, hash)
}
}
/// A user identifier.
///
/// Contains the user's unique ID, their public key(s), and a valid signature of the sha256 hash of the user's ID + a random salt.
///
/// # Hash
/// This type hashes to its unique ID, and also borrows to its unique ID.
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
pub struct User
{
/// The user's unique ID.
id: UserID,
/// A optional set of identifiers given by the user. The user must be trusted to set/edit this value.
ident: post::Ident,
/// The public keys associated with this user.
///
/// # Trust
/// Each public key must have a corresponding signature in its complemental entry in `id_sig` to be considered trusted.
/// A user with no trusted public keys can be anyone or multiple people. This is not *disallowed* usually but should be discouraged.
///
/// Users are only considered trusted if they have at least one trusted public key.
pubkey: Vec<RsaPublicKey>,
/// This vector contains the complemental signature (and salt used with `id` to produce the signed hash) to the public keys in `pubkey`. Each element of `pubkey` must have a complemental element in this vector.
///
/// # Trusted public keys
/// `None` values for this are signatures that have not yet been produces for a given salt, and do not count as complete. Public keys in `pubkey` that do not have a corresponding `Some` signature value in this field should not be trusted.
id_sig: Vec<(UserSalt, Option<Signature>)>,
}
impl Hash for User {
fn hash<H: Hasher>(&self, state: &mut H) {
self.id.hash(state)
}
}
impl Borrow<UserID> for User
{
fn borrow(&self) -> &UserID
{
&self.id
}
}
impl User
{
/// Is this user a trusted user?
///
/// Does this user have at least one trusted public key (they have produced a valid signature specified in `id_sig`).
pub fn is_trusted(&self) -> eyre::Result<bool>
{
for (i, (key, (salt, sig))) in (0..).zip(self.pubkey.iter().zip(self.id_sig.iter()))
{
if let Some(sig) = sig {
let hash = self.id.hash_with_salt(salt);
if sig.verify_slice(&hash, key)
.with_section(move || format!("{:?}", key).header("Public key was"))
.with_section(move || format!("{:?}", sig).header("Signature was"))
.with_section(move || format!("{:?}", salt).header("Salt was"))
.with_section(move || format!("{:?}", hash).header("Hash was"))
.with_note(|| i.to_string().header("For pubkey"))
.with_note(|| format!("{:?} ({:?})", self.id, self.ident).header("For user"))
.with_warning(|| "This could indicate key or signature corruption. This key or signature may need to be removed.")
.with_suggestion(|| "If the user is unable to produce a verifyable signature for this public key despite haiving access to the private key, the key may be corrupted and may need to be removed and replaced.")
.wrap_err(eyre!("Failed to verify embedded signature of salted+hashed ID to complementary public key"))? {
return Ok(true);
}
}
}
Ok(false)
}
/// This user's unique ID
pub fn id(&self) -> &UserID
{
&self.id
}
}

@ -0,0 +1,207 @@
use std::fmt;
use std::cmp::Ordering;
/// Represents a semver version number of the order `major.minor.bugfix`.
#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Default)] //TODO: Make these impls instead of derives, because safe packed borrows.
#[repr(C, packed)]
pub struct Version(u8,u8,u16);
impl fmt::Display for Version
{
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result
{
write!(f, "{}.{}.{}", self.major(), self.minor(), self.bugfix())
}
}
impl From<u32> for Version
{
#[inline] fn from(from: u32) -> Self
{
Self::from_u32(from)
}
}
impl From<Version> for u32
{
fn from(from: Version) -> Self
{
from.to_u32()
}
}
impl PartialEq<Version> for u32
{
#[inline] fn eq(&self, other: &Version) -> bool
{
*self == other.to_u32()
}
}
impl PartialEq<u32> for Version
{
#[inline] fn eq(&self, other: &u32) -> bool
{
self.to_u32() == *other
}
}
impl PartialOrd<u32> for Version
{
#[inline] fn partial_cmp(&self, other: &u32) -> Option<Ordering> {
self.to_u32().partial_cmp(other)
}
}
impl PartialOrd<Version> for u32
{
#[inline] fn partial_cmp(&self, other: &Version) -> Option<Ordering> {
self.partial_cmp(&other.to_u32())
}
}
impl From<()> for Version
{
#[inline] fn from(from: ()) -> Self
{
Self(0,0,0)
}
}
impl From<(usize,)> for Version
{
#[inline] fn from(from: (usize,)) -> Self
{
Self::new(from.0,0,0)
}
}
impl From<(usize, usize)> for Version
{
#[inline] fn from((ma, mi): (usize, usize)) -> Self
{
Self::new(ma, mi, 0)
}
}
impl From<(usize, usize, usize)> for Version
{
#[inline] fn from((ma,mi,bu): (usize, usize, usize)) -> Self
{
Self::new(ma,mi,bu)
}
}
impl From<(u8, u8, u16)> for Version
{
#[inline] fn from((ma, mi, bu): (u8, u8, u16)) -> Self
{
Self(ma,mi,bu)
}
}
impl From<Version> for (u8, u8, u16)
{
#[inline] fn from(from: Version) -> Self
{
(from.0, from.1, from.2)
}
}
impl From<Version> for (usize, usize, usize)
{
#[inline] fn from(from: Version) -> Self
{
(from.major(), from.minor(), from.bugfix())
}
}
impl Version
{
/// The major component of this `Version`.
#[inline] pub const fn major(self) -> usize
{
self.0 as usize
}
/// The minor component of this `Version`.
#[inline] pub const fn minor(self) -> usize
{
self.1 as usize
}
/// The bugfix component of this `Version`.
#[inline] pub const fn bugfix(self) -> usize
{
self.2 as usize
}
/// Convert to a 32 bit integer representation
#[inline] pub const fn to_u32(self) -> u32
{
let mb = self.2.to_be_bytes();
u32::from_be_bytes([
self.0,
self.1,
mb[0],
mb[1],
])
}
/// Convert to a 32 bit integer representation
#[inline] pub const fn from_u32(from: u32) -> Self
{
let bytes = from.to_be_bytes();
Self(
bytes[0],
bytes[1],
u16::from_be_bytes([bytes[2], bytes[3]]),
)
}
/// Create a new version object
#[inline] pub const fn new_exact(major: u8, minor: u8, bugfix: u16) -> Self
{
Self(major,minor,bugfix)
}
/// Create a new version object
///
/// # Panics
/// If any of the components do not fit within their bounds.
#[inline] pub fn new(major: usize, minor: usize, bugfix: usize) -> Self
{
use std::convert::TryInto;
Self::new_exact(major.try_into().expect("Major exceeded limit of u8"),
minor.try_into().expect("Minor exceeded limit of u8"),
bugfix.try_into().expect("Bugfix exceeded limit of u16"),
)
}
}
#[macro_export] macro_rules! version {
($maj:expr, $min:expr, $bfx:expr) => ($crate::version::Version::new_exact($maj as u8, $min as u8, $bfx as u16));
($maj:expr, $min:expr) => ($crate::version::Version::new_exact($maj as u8, $min as u8, 0));
($maj:expr) => ($crate::version::Version::new_exact($maj as u8, 0, 0));
}
#[cfg(test)]
mod tests
{
use super::*;
#[test]
fn ordinal()
{
assert!( version!(1) > version!(0, 9, 1));
assert!( version!(2) > version!(1, 9, 300));
assert!( (version!(1)..version!(1, 9)).contains(&version!(1, 8)));
assert!( !(version!(1)..version!(2)).contains(&version!(2, 10, 432)));
println!("{}: {}", version!(1), version!(1).to_u32());
println!("{}: {}", version!(0,9,1), version!(0,9,1).to_u32());
assert!( version!(1).to_u32() > version!(0, 9, 1).to_u32());
assert!( version!(2).to_u32() > version!(1, 9, 300).to_u32());
assert!( (version!(1).to_u32()..version!(1, 9).to_u32()).contains(&version!(1, 8).to_u32()));
assert!( !(version!(1).to_u32()..version!(2).to_u32()).contains(&version!(2, 10, 432).to_u32()));
}
}

@ -0,0 +1,28 @@
use super::*;
use std::any::Any;
#[derive(Debug)]
pub enum CommandKind
{
/// Shutdown gracefully.
///
/// # Response
/// None.
GracefulShutdown,
}
/// A response from the interrupt channel.
/// Some command kinds may warrant a response.
pub type CommandResponse = Box<dyn Any + Send + 'static>;
/// A command to interrupt the web background task.
#[derive(Debug)]
pub(super) struct Command
{
pub(super) kind: CommandKind,
pub(super) response: oneshot::Sender<CommandResponse>, // If the interrupt stream produces no response for this query, the sender will just be dropped and the receiver will `Err`.
}
/// A channel to communicate with background task
#[derive(Debug, Clone)]
pub struct InterruptChannel(pub(super) mpsc::Sender<Command>);

@ -0,0 +1,86 @@
use super::*;
use state::State;
use futures::prelude::*;
use tokio::{
sync::{
mpsc,
oneshot,
},
};
mod command;
pub use command::*;
mod route;
/// Serve this state with this interrupt signal
pub fn serve(state: State) -> (InterruptChannel, impl Future<Output = eyre::Result<()>> + 'static)
{
// interrupt handler
let (int_tx, mut int_rx) = mpsc::channel::<Command>(16);
let (grace_tx, grace_rx) = oneshot::channel::<()>();
let (s_int_tx, s_int_rx) = oneshot::channel::<()>();
// When this future completes, the server will initiate a graceful shutdown.
let graceful_shutdown = async move {
tokio::select!{
//_ = tokio::signal::ctrl_c() =>{} //The caller should handle this and then send `InterruptChannel` a `GracefulShutdown` event.
_ = grace_rx => {}
}
};
let h_int = tokio::spawn(async move {
let work = async {
// Handle commands from interrupt channel.
while let Some(com) = int_rx.recv().await
{
let resp = com.response; //sender for response
match com.kind {
CommandKind::GracefulShutdown => {
report!(grace_tx.send(()));
return;
},
}
}
};
let int = async {
let _ = tokio::join![
//tokio::signal::ctrl_c(),
s_int_rx,
];
};
tokio::select!{
_ = int => {info!("h_int shutdown due to interrupt");},
_ = work => {info!("h_int shutdown due to exhausted work stream or shutdown signal");},
}
});
let command_channel = InterruptChannel(int_tx);
// setup server
let server = {
// TODO: warp routing paths
let routes = route::setup(state.clone());
clone!(command_channel);
async move {
mv![command_channel, // If we need to send commands to our own stream
state, // The program state
graceful_shutdown, // The graceful shutdown Future for warp.
routes,
];
// TODO: warp::try_serve... `routes`.
}
};
(command_channel,
async move {
info!("Waiting on server future");
tokio::join![
server,
h_int,
].1?;
report!(s_int_tx.send(()));
Ok(())
})
}

@ -0,0 +1,63 @@
use super::*;
use warp::Filter;
use std::convert::Infallible;
use hard_format::{
FormattedString,
formats,
};
pub fn setup(state: State) -> impl warp::Filter //TODO: What output should this have?
{
let root = warp::path("yuurei"); //TODO: configurable
let state = warp::any().map(move || state.clone());
let post_api = warp::post()
.and(warp::path("create"))
.and(state.clone())
//TODO: Filter to extract `User`. How? Dunno. Maybe cookies + IP or in the body itself.
.and(warp::body::content_length_limit(defaults::MAX_CONTENT_LENGTH)) //TODO: configurable
.and(warp::body::json())
.and_then(|state: State, body: post::Post| { //TODO: post read is not this type, but another more restricted one
async move {
Ok::<_, Infallible>("test")
}
});
let get_api = warp::get()
.and(warp::path("get"))
.and({
let get_post_by_id = warp::any()
.and(warp::path("post"))
.and(state.clone())
.and(warp::path::param().map(|opt: formats::HexFormattedString| opt)) //TODO: Convert to `PostID`.
.and_then(|state: State, id: formats::HexFormattedString| {
async move {
Ok::<_, Infallible>("test")
}
});
let get_posts_by_user_id = warp::any()
.and(warp::path("user"))
.and(state.clone())
.and(warp::path::param().map(|opt: formats::HexFormattedString| opt)) //TODO: Convert to `UserID`.
.and_then(|state: State, id: formats::HexFormattedString| {
async move {
Ok::<_, Infallible>("test")
}
});
get_post_by_id
.or(get_posts_by_user_id)
});
let render_api = warp::get()
.and(state.clone())
.and_then(|state: State| {
async move {
Ok::<_, std::convert::Infallible>(template::view(&state).into_string())
}
});
root.and(post_api
.or(get_api)
.or(render_api))
}

@ -0,0 +1,86 @@
<!DOCTYPE html>
<html>
<head>
<meta charset="utf8" />
</head>
<body>
<script src="js/crypto-js.js"></script>
<script src="js/jsencrypt.min.js"></script>
<script src="js/NodeRSA.js"></script>
<script type="text/javascript">
function toHex(buffer) {
return Array.prototype.map.call(buffer, x => ('00' + x.toString(16)).slice(-2)).join('');
}
function secure_rng(sz) {
let array = new Uint8Array(sz);
window.crypto.getRandomValues(array);
return array;
}
function aes_genkey(password) {
password = password || toHex(secure_rng());
console.log(`AES PBKDF2 Password: ${password}`);
//TODO: Generate custom random Key and IV of the correct size instead of this
const KEY_BITS = 256;
const salt = CryptoJS.lib.WordArray.random(KEY_BITS /8);
return CryptoJS.PBKDF2(password, salt, { keySize: KEY_BITS / 32 });
}
function test(priv, pub) {
//const priv = priv || document.getElementById("privkey").textContent;
//const pub = pub || document.getElementById("pubkey").textContent;
console.log(`Priv: ${priv}`);
console.log(`Pub: ${pub}`);
const encrypt = new JSEncrypt();
encrypt.setPublicKey(pub);
const ciphertext = encrypt.encrypt("test input");
console.log(`Ciphertext: ${ciphertext}`);
const decrypt = new JSEncrypt();
decrypt.setPrivateKey(priv);
const plaintext = decrypt.decrypt(ciphertext);
console.log(`Plaintext: ${plaintext}`);
const sign = new JSEncrypt();
sign.setPrivateKey(priv);
const signature = sign.sign("test input", CryptoJS.SHA256, "sha256");
console.log(`Signature: ${signature}`);
const verify = new JSEncrypt();
verify.setPublicKey(pub);
const verified = verify.verify("test input", signature, CryptoJS.SHA256);
console.log(`Verified: ${verified}`);
const key = aes_genkey().toString();
console.log(`AES key: ${key}`);
const aes_ciphertext = CryptoJS.AES.encrypt("test input", key).toString();
console.log(`AES ciphertext: ${aes_ciphertext}`);
const bytes = CryptoJS.AES.decrypt(aes_ciphertext, key);
const aes_plaintext = bytes.toString(CryptoJS.enc.Utf8);
console.log(`AES plaintext: ${aes_plaintext}`);
}
window.onload = (async() => {
const NodeRSA = require("node-rsa");
const key = new NodeRSA({b: 1024});
//key.generateKeyPair(); //unneeded I think
const pub = key.exportKey("public");
const priv = key.exportKey("private");
//console.log(`Pub: ${pub}, priv: ${priv}`);
document.getElementById("privkey").textContent = priv;
document.getElementById("pubkey").textContent = pub;
test(priv, pub);
});
</script>
<textarea id="privkey" rows="15" cols="65">(unbound)</textarea>
<textarea id="pubkey" rows="15" cols="65">(unbound)</textarea>
</body>
</html>

@ -0,0 +1 @@
../../vendor/NodeRSA.js

@ -0,0 +1 @@
../../bower_components/crypto-js/crypto-js.js

@ -0,0 +1 @@
../../node_modules/jsencrypt/bin/jsencrypt.min.js
Loading…
Cancel
Save