feat: Phase 9 — developer experience, extensibility, and community growth

New crates:
- quicproquo-bot: Bot SDK with polling API + JSON pipe mode
- quicproquo-kt: Key Transparency Merkle log (RFC 9162 subset)
- quicproquo-plugin-api: no_std C-compatible plugin vtable API
- quicproquo-gen: scaffolding tool (qpq-gen plugin/bot/rpc/hook)

Server features:
- ServerHooks trait wired into all RPC handlers (enqueue, fetch, auth,
  channel, registration) with plugin rejection support
- Dynamic plugin loader (libloading) with --plugin-dir config
- Delivery proof canary tokens (Ed25519 server signatures on enqueue)
- Key Transparency Merkle log with inclusion proofs on resolveUser

Core library:
- Safety numbers (60-digit HMAC-SHA256 key verification codes)
- Verifiable transcript archive (CBOR + ChaCha20-Poly1305 + hash chain)
- Delivery proof verification utility
- Criterion benchmarks (hybrid KEM, MLS, identity, sealed sender, padding)

Client:
- /verify REPL command for out-of-band key verification
- Full-screen TUI via Ratatui (feature-gated --features tui)
- qpq export / qpq export-verify CLI subcommands
- KT inclusion proof verification on user resolution

Also: ROADMAP Phase 9 added, bot SDK docs, server hooks docs,
crate-responsibilities updated, example plugins (rate_limit, logging).
This commit is contained in:
2026-03-03 22:47:38 +01:00
parent b6483dedbc
commit dc4e4e49a0
62 changed files with 6959 additions and 62 deletions

315
Cargo.lock generated
View File

@@ -656,12 +656,27 @@ dependencies = [
"toml 0.9.12+spec-1.1.0", "toml 0.9.12+spec-1.1.0",
] ]
[[package]]
name = "cassowary"
version = "0.3.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "df8670b8c7b9dae1793364eafadf7239c40d669904660c5960d74cfd80b46a53"
[[package]] [[package]]
name = "cast" name = "cast"
version = "0.3.0" version = "0.3.0"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "37b2a672a2cb129a2e41c10b1224bb368f9f37a2b16b612598138befd7b37eb5" checksum = "37b2a672a2cb129a2e41c10b1224bb368f9f37a2b16b612598138befd7b37eb5"
[[package]]
name = "castaway"
version = "0.2.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "dec551ab6e7578819132c713a93c022a05d60159dc86e7a7050223577484c55a"
dependencies = [
"rustversion",
]
[[package]] [[package]]
name = "cc" name = "cc"
version = "1.2.56" version = "1.2.56"
@@ -895,6 +910,20 @@ dependencies = [
"memchr", "memchr",
] ]
[[package]]
name = "compact_str"
version = "0.8.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3b79c4069c6cad78e2e0cdfcbd26275770669fb39fd308a752dc110e83b9af32"
dependencies = [
"castaway",
"cfg-if",
"itoa",
"rustversion",
"ryu",
"static_assertions",
]
[[package]] [[package]]
name = "concurrent-queue" name = "concurrent-queue"
version = "2.5.0" version = "2.5.0"
@@ -1091,6 +1120,31 @@ version = "0.8.21"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d0a5c400df2834b80a4c3327b3aad3a4c4cd4de0629063962b03235697506a28" checksum = "d0a5c400df2834b80a4c3327b3aad3a4c4cd4de0629063962b03235697506a28"
[[package]]
name = "crossterm"
version = "0.28.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "829d955a0bb380ef178a640b91779e3987da38c9aea133b20614cfed8cdea9c6"
dependencies = [
"bitflags 2.11.0",
"crossterm_winapi",
"mio",
"parking_lot",
"rustix 0.38.44",
"signal-hook",
"signal-hook-mio",
"winapi",
]
[[package]]
name = "crossterm_winapi"
version = "0.9.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "acdd7c62a3665c7f6830a51635d9ac9b23ed385797f70a83bb8bafe9c572ab2b"
dependencies = [
"winapi",
]
[[package]] [[package]]
name = "crunchy" name = "crunchy"
version = "0.2.4" version = "0.2.4"
@@ -1277,6 +1331,16 @@ dependencies = [
"darling_macro 0.21.3", "darling_macro 0.21.3",
] ]
[[package]]
name = "darling"
version = "0.23.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "25ae13da2f202d56bd7f91c25fba009e7717a1e4a1cc98a76d844b65ae912e9d"
dependencies = [
"darling_core 0.23.0",
"darling_macro 0.23.0",
]
[[package]] [[package]]
name = "darling_core" name = "darling_core"
version = "0.20.11" version = "0.20.11"
@@ -1305,6 +1369,19 @@ dependencies = [
"syn 2.0.117", "syn 2.0.117",
] ]
[[package]]
name = "darling_core"
version = "0.23.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9865a50f7c335f53564bb694ef660825eb8610e0a53d3e11bf1b0d3df31e03b0"
dependencies = [
"ident_case",
"proc-macro2",
"quote",
"strsim",
"syn 2.0.117",
]
[[package]] [[package]]
name = "darling_macro" name = "darling_macro"
version = "0.20.11" version = "0.20.11"
@@ -1327,6 +1404,17 @@ dependencies = [
"syn 2.0.117", "syn 2.0.117",
] ]
[[package]]
name = "darling_macro"
version = "0.23.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ac3984ec7bd6cfa798e62b4a642426a5be0e68f9401cfc2a01e3fa9ea2fcdb8d"
dependencies = [
"darling_core 0.23.0",
"quote",
"syn 2.0.117",
]
[[package]] [[package]]
name = "dashmap" name = "dashmap"
version = "5.5.3" version = "5.5.3"
@@ -2558,6 +2646,8 @@ version = "0.15.5"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9229cfe53dfd69f0609a49f65461bd93001ea1ef889cd5529dd176593f5338a1" checksum = "9229cfe53dfd69f0609a49f65461bd93001ea1ef889cd5529dd176593f5338a1"
dependencies = [ dependencies = [
"allocator-api2",
"equivalent",
"foldhash 0.1.5", "foldhash 0.1.5",
] ]
@@ -3084,6 +3174,15 @@ dependencies = [
"serde_core", "serde_core",
] ]
[[package]]
name = "indoc"
version = "2.0.7"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "79cf5c93f93228cf8efb3ba362535fb11199ac548a09ce117c9b1adc3030d706"
dependencies = [
"rustversion",
]
[[package]] [[package]]
name = "infer" name = "infer"
version = "0.19.0" version = "0.19.0"
@@ -3102,6 +3201,19 @@ dependencies = [
"generic-array", "generic-array",
] ]
[[package]]
name = "instability"
version = "0.3.11"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "357b7205c6cd18dd2c86ed312d1e70add149aea98e7ef72b9fdf0270e555c11d"
dependencies = [
"darling 0.23.0",
"indoc",
"proc-macro2",
"quote",
"syn 2.0.117",
]
[[package]] [[package]]
name = "ipconfig" name = "ipconfig"
version = "0.3.2" version = "0.3.2"
@@ -3171,7 +3283,7 @@ dependencies = [
"rustls-webpki", "rustls-webpki",
"serde", "serde",
"smallvec", "smallvec",
"strum", "strum 0.27.2",
"sync_wrapper", "sync_wrapper",
"time", "time",
"tokio", "tokio",
@@ -3311,7 +3423,7 @@ dependencies = [
"iroh-metrics", "iroh-metrics",
"iroh-quinn", "iroh-quinn",
"iroh-quinn-proto", "iroh-quinn-proto",
"lru", "lru 0.16.3",
"n0-error", "n0-error",
"n0-future", "n0-future",
"num_enum", "num_enum",
@@ -3324,7 +3436,7 @@ dependencies = [
"rustls-pki-types", "rustls-pki-types",
"serde", "serde",
"serde_bytes", "serde_bytes",
"strum", "strum 0.27.2",
"tokio", "tokio",
"tokio-rustls", "tokio-rustls",
"tokio-util", "tokio-util",
@@ -3363,6 +3475,15 @@ dependencies = [
"either", "either",
] ]
[[package]]
name = "itertools"
version = "0.13.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "413ee7dfc52ee1a4949ceeb7dbc8a33f2d6c088194d9f922fb8318faf1f01186"
dependencies = [
"either",
]
[[package]] [[package]]
name = "itertools" name = "itertools"
version = "0.14.0" version = "0.14.0"
@@ -3539,7 +3660,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6e9ec52138abedcc58dc17a7c6c0c00a2bdb4f3427c7f63fa97fd0d859155caf" checksum = "6e9ec52138abedcc58dc17a7c6c0c00a2bdb4f3427c7f63fa97fd0d859155caf"
dependencies = [ dependencies = [
"gtk-sys", "gtk-sys",
"libloading", "libloading 0.7.4",
"once_cell", "once_cell",
] ]
@@ -3559,6 +3680,16 @@ dependencies = [
"winapi", "winapi",
] ]
[[package]]
name = "libloading"
version = "0.8.9"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d7c4b02199fee7c5d21a5ae7d8cfa79a6ef5bb2fc834d6e9058e89c825efdc55"
dependencies = [
"cfg-if",
"windows-link 0.2.1",
]
[[package]] [[package]]
name = "libm" name = "libm"
version = "0.2.16" version = "0.2.16"
@@ -3585,6 +3716,12 @@ dependencies = [
"vcpkg", "vcpkg",
] ]
[[package]]
name = "linux-raw-sys"
version = "0.4.15"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d26c52dbd32dccf2d10cac7725f8eae5296885fb5703b261f7d0a0739ec807ab"
[[package]] [[package]]
name = "linux-raw-sys" name = "linux-raw-sys"
version = "0.12.1" version = "0.12.1"
@@ -3631,6 +3768,15 @@ dependencies = [
"tracing-subscriber", "tracing-subscriber",
] ]
[[package]]
name = "lru"
version = "0.12.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "234cf4f4a04dc1f57e24b96cc0cd600cf2af460d4161ac5ecdd0af8e1f3b2a38"
dependencies = [
"hashbrown 0.15.5",
]
[[package]] [[package]]
name = "lru" name = "lru"
version = "0.16.3" version = "0.16.3"
@@ -3806,6 +3952,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a69bcab0ad47271a0234d9422b131806bf3968021e5dc9328caf2d4cd58557fc" checksum = "a69bcab0ad47271a0234d9422b131806bf3968021e5dc9328caf2d4cd58557fc"
dependencies = [ dependencies = [
"libc", "libc",
"log",
"wasi 0.11.1+wasi-snapshot-preview1", "wasi 0.11.1+wasi-snapshot-preview1",
"windows-sys 0.61.2", "windows-sys 0.61.2",
] ]
@@ -4847,7 +4994,7 @@ dependencies = [
"futures-lite", "futures-lite",
"getrandom 0.4.1", "getrandom 0.4.1",
"log", "log",
"lru", "lru 0.16.3",
"ntimestamp", "ntimestamp",
"reqwest 0.13.2", "reqwest 0.13.2",
"self_cell", "self_cell",
@@ -5263,6 +5410,22 @@ dependencies = [
"memchr", "memchr",
] ]
[[package]]
name = "quicproquo-bot"
version = "0.1.0"
dependencies = [
"anyhow",
"hex",
"openmls_rust_crypto",
"quicproquo-client",
"quicproquo-core",
"quicproquo-proto",
"serde",
"serde_json",
"tokio",
"tracing",
]
[[package]] [[package]]
name = "quicproquo-client" name = "quicproquo-client"
version = "0.1.0" version = "0.1.0"
@@ -5274,7 +5437,9 @@ dependencies = [
"capnp", "capnp",
"capnp-rpc", "capnp-rpc",
"chacha20poly1305 0.10.1", "chacha20poly1305 0.10.1",
"ciborium",
"clap", "clap",
"crossterm",
"dashmap", "dashmap",
"futures", "futures",
"hex", "hex",
@@ -5283,11 +5448,13 @@ dependencies = [
"openmls_rust_crypto", "openmls_rust_crypto",
"portpicker", "portpicker",
"quicproquo-core", "quicproquo-core",
"quicproquo-kt",
"quicproquo-p2p", "quicproquo-p2p",
"quicproquo-proto", "quicproquo-proto",
"quinn", "quinn",
"quinn-proto", "quinn-proto",
"rand 0.8.5", "rand 0.8.5",
"ratatui",
"rpassword", "rpassword",
"rusqlite", "rusqlite",
"rustls", "rustls",
@@ -5311,9 +5478,11 @@ dependencies = [
"bincode", "bincode",
"capnp", "capnp",
"chacha20poly1305 0.10.1", "chacha20poly1305 0.10.1",
"ciborium",
"criterion", "criterion",
"ed25519-dalek 2.2.0", "ed25519-dalek 2.2.0",
"hkdf", "hkdf",
"hmac",
"ml-kem", "ml-kem",
"opaque-ke", "opaque-ke",
"openmls", "openmls",
@@ -5332,6 +5501,13 @@ dependencies = [
"zeroize", "zeroize",
] ]
[[package]]
name = "quicproquo-gen"
version = "0.1.0"
dependencies = [
"clap",
]
[[package]] [[package]]
name = "quicproquo-gui" name = "quicproquo-gui"
version = "0.1.0" version = "0.1.0"
@@ -5346,6 +5522,16 @@ dependencies = [
"tokio", "tokio",
] ]
[[package]]
name = "quicproquo-kt"
version = "0.1.0"
dependencies = [
"bincode",
"serde",
"sha2 0.10.9",
"thiserror 1.0.69",
]
[[package]] [[package]]
name = "quicproquo-mobile" name = "quicproquo-mobile"
version = "0.1.0" version = "0.1.0"
@@ -5367,6 +5553,10 @@ dependencies = [
"tracing", "tracing",
] ]
[[package]]
name = "quicproquo-plugin-api"
version = "0.1.0"
[[package]] [[package]]
name = "quicproquo-proto" name = "quicproquo-proto"
version = "0.1.0" version = "0.1.0"
@@ -5387,11 +5577,14 @@ dependencies = [
"dashmap", "dashmap",
"futures", "futures",
"hex", "hex",
"libloading 0.8.9",
"mdns-sd", "mdns-sd",
"metrics 0.22.4", "metrics 0.22.4",
"metrics-exporter-prometheus", "metrics-exporter-prometheus",
"opaque-ke", "opaque-ke",
"quicproquo-core", "quicproquo-core",
"quicproquo-kt",
"quicproquo-plugin-api",
"quicproquo-proto", "quicproquo-proto",
"quinn", "quinn",
"quinn-proto", "quinn-proto",
@@ -5595,6 +5788,27 @@ dependencies = [
"rand_core 0.5.1", "rand_core 0.5.1",
] ]
[[package]]
name = "ratatui"
version = "0.29.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "eabd94c2f37801c20583fc49dd5cd6b0ba68c716787c2dd6ed18571e1e63117b"
dependencies = [
"bitflags 2.11.0",
"cassowary",
"compact_str",
"crossterm",
"indoc",
"instability",
"itertools 0.13.0",
"lru 0.12.5",
"paste",
"strum 0.26.3",
"unicode-segmentation",
"unicode-truncate",
"unicode-width 0.2.0",
]
[[package]] [[package]]
name = "raw-cpuid" name = "raw-cpuid"
version = "11.6.0" version = "11.6.0"
@@ -5868,6 +6082,19 @@ dependencies = [
"semver", "semver",
] ]
[[package]]
name = "rustix"
version = "0.38.44"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "fdb5bc1ae2baa591800df16c9ca78619bf65c0488b41b96ccec5d11220d8c154"
dependencies = [
"bitflags 2.11.0",
"errno",
"libc",
"linux-raw-sys 0.4.15",
"windows-sys 0.59.0",
]
[[package]] [[package]]
name = "rustix" name = "rustix"
version = "1.1.4" version = "1.1.4"
@@ -5877,7 +6104,7 @@ dependencies = [
"bitflags 2.11.0", "bitflags 2.11.0",
"errno", "errno",
"libc", "libc",
"linux-raw-sys", "linux-raw-sys 0.12.1",
"windows-sys 0.61.2", "windows-sys 0.61.2",
] ]
@@ -6395,6 +6622,27 @@ version = "1.3.0"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0fda2ff0d084019ba4d7c6f371c95d8fd75ce3524c3cb8fb653a3023f6323e64" checksum = "0fda2ff0d084019ba4d7c6f371c95d8fd75ce3524c3cb8fb653a3023f6323e64"
[[package]]
name = "signal-hook"
version = "0.3.18"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d881a16cf4426aa584979d30bd82cb33429027e42122b169753d6ef1085ed6e2"
dependencies = [
"libc",
"signal-hook-registry",
]
[[package]]
name = "signal-hook-mio"
version = "0.2.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b75a19a7a740b25bc7944bdee6172368f988763b744e3d4dfe753f6b4ece40cc"
dependencies = [
"libc",
"mio",
"signal-hook",
]
[[package]] [[package]]
name = "signal-hook-registry" name = "signal-hook-registry"
version = "1.4.8" version = "1.4.8"
@@ -6604,6 +6852,12 @@ version = "1.2.1"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6ce2be8dc25455e1f91df71bfa12ad37d7af1092ae736f3a6cd0e37bc7810596" checksum = "6ce2be8dc25455e1f91df71bfa12ad37d7af1092ae736f3a6cd0e37bc7810596"
[[package]]
name = "static_assertions"
version = "1.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a2eb9349b6444b326872e140eb1cf5e7c522154d69e7a0ffb0fb81c06b37543f"
[[package]] [[package]]
name = "string_cache" name = "string_cache"
version = "0.8.9" version = "0.8.9"
@@ -6635,13 +6889,35 @@ version = "0.11.1"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7da8b5736845d9f2fcb837ea5d9e2628564b3b043a70948a3f0b778838c5fb4f" checksum = "7da8b5736845d9f2fcb837ea5d9e2628564b3b043a70948a3f0b778838c5fb4f"
[[package]]
name = "strum"
version = "0.26.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8fec0f0aef304996cf250b31b5a10dee7980c85da9d759361292b8bca5a18f06"
dependencies = [
"strum_macros 0.26.4",
]
[[package]] [[package]]
name = "strum" name = "strum"
version = "0.27.2" version = "0.27.2"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "af23d6f6c1a224baef9d3f61e287d2761385a5b88fdab4eb4c6f11aeb54c4bcf" checksum = "af23d6f6c1a224baef9d3f61e287d2761385a5b88fdab4eb4c6f11aeb54c4bcf"
dependencies = [ dependencies = [
"strum_macros", "strum_macros 0.27.2",
]
[[package]]
name = "strum_macros"
version = "0.26.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4c6bee85a5a24955dc440386795aa378cd9cf82acd5f764469152d2270e581be"
dependencies = [
"heck 0.5.0",
"proc-macro2",
"quote",
"rustversion",
"syn 2.0.117",
] ]
[[package]] [[package]]
@@ -7021,7 +7297,7 @@ dependencies = [
"fastrand", "fastrand",
"getrandom 0.4.1", "getrandom 0.4.1",
"once_cell", "once_cell",
"rustix", "rustix 1.1.4",
"windows-sys 0.61.2", "windows-sys 0.61.2",
] ]
@@ -7604,6 +7880,29 @@ version = "1.12.0"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f6ccf251212114b54433ec949fd6a7841275f9ada20dddd2f29e9ceea4501493" checksum = "f6ccf251212114b54433ec949fd6a7841275f9ada20dddd2f29e9ceea4501493"
[[package]]
name = "unicode-truncate"
version = "1.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b3644627a5af5fa321c95b9b235a72fd24cd29c648c2c379431e6628655627bf"
dependencies = [
"itertools 0.13.0",
"unicode-segmentation",
"unicode-width 0.1.14",
]
[[package]]
name = "unicode-width"
version = "0.1.14"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7dd6e30e90baa6f72411720665d41d89b9a3d039dc45b8faea1ddd07f617f6af"
[[package]]
name = "unicode-width"
version = "0.2.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1fc81956842c57dac11422a97c3b8195a1ff727f06e85c84ed2e8aa277c9a0fd"
[[package]] [[package]]
name = "unicode-xid" name = "unicode-xid"
version = "0.2.6" version = "0.2.6"

View File

@@ -3,8 +3,12 @@ resolver = "2"
members = [ members = [
"crates/quicproquo-core", "crates/quicproquo-core",
"crates/quicproquo-proto", "crates/quicproquo-proto",
"crates/quicproquo-plugin-api",
"crates/quicproquo-kt",
"crates/quicproquo-server", "crates/quicproquo-server",
"crates/quicproquo-client", "crates/quicproquo-client",
"crates/quicproquo-bot",
"crates/quicproquo-gen",
"crates/quicproquo-gui", "crates/quicproquo-gui",
"crates/quicproquo-mobile", "crates/quicproquo-mobile",
# P2P crate uses iroh (~90 extra deps). Kept in the workspace so it can be # P2P crate uses iroh (~90 extra deps). Kept in the workspace so it can be
@@ -29,7 +33,9 @@ ml-kem = { version = "0.2" }
x25519-dalek = { version = "2", features = ["static_secrets"] } x25519-dalek = { version = "2", features = ["static_secrets"] }
ed25519-dalek = { version = "2", features = ["rand_core"] } ed25519-dalek = { version = "2", features = ["rand_core"] }
sha2 = { version = "0.10" } sha2 = { version = "0.10" }
hmac = { version = "0.12" }
hkdf = { version = "0.12" } hkdf = { version = "0.12" }
ciborium = { version = "0.2" }
chacha20poly1305 = { version = "0.10" } chacha20poly1305 = { version = "0.10" }
opaque-ke = { version = "4", features = ["ristretto255", "argon2"] } opaque-ke = { version = "4", features = ["ristretto255", "argon2"] }
zeroize = { version = "1", features = ["derive", "serde"] } zeroize = { version = "1", features = ["derive", "serde"] }

View File

@@ -425,6 +425,59 @@ functions without any central infrastructure or internet uplink.
--- ---
## Phase 9 — Developer Experience & Community Growth
Features designed to attract contributors, create demo/showcase potential,
and lower the barrier to entry for non-crypto developers.
- [ ] **9.1 Criterion Benchmark Suite (`qpq-bench`)**
- Criterion benchmarks for all crypto primitives: hybrid KEM encap/decap,
MLS group-add at 10/100/1000 members, epoch rotation, Noise_XX handshake
- CI publishes HTML benchmark reports as GitHub Actions artifacts
- Citable numbers — no other project benchmarks MLS + PQ-KEM in Rust
- [ ] **9.2 Safety Numbers (key verification)**
- Derive a 60-digit numeric code from two identity keys (Signal-style)
- REPL `/verify <username>` command for out-of-band key verification
- Pure client-side — no server or wire format changes needed
- [ ] **9.3 Full-Screen TUI (Ratatui + Crossterm)**
- `qpq tui` launches a full-screen terminal UI: message pane, input bar,
channel sidebar with unread counts, MLS epoch indicator
- Feature-gated `--features tui` to keep ratatui/crossterm out of default builds
- Existing REPL and CLI subcommands are unaffected
- [ ] **9.4 Delivery Proof Canary Tokens**
- Server signs `Ed25519(SHA-256(message_id || recipient || timestamp))` on enqueue
- Sender stores proof locally — cryptographic evidence the server queued the message
- Cap'n Proto schema gains optional `deliveryProof: Data` on enqueue response
- [ ] **9.5 Verifiable Transcript Archive**
- `GroupMember::export_transcript(path, password)` writes encrypted, tamper-evident
message archive (CBOR records, Argon2id + ChaCha20-Poly1305, Merkle chain)
- `qpq export verify` CLI command independently verifies chain integrity
- Useful for legal discovery, audit, or personal backup
- [ ] **9.6 Key Transparency (Merkle-Log Identity Binding)**
- Append-only Merkle log of (username, identity_key) bindings in the AS
- Clients receive inclusion proofs alongside key fetches
- Any client can independently audit the full identity history
- Lightweight subset of RFC 9162 adapted for identity keys
- [ ] **9.7 Dynamic Server Plugin System**
- Server loads `.so`/`.dylib` plugins at runtime from config `[plugins]` section
- C-compatible `HookVTable` via `extern "C"` — plugins in any language
- Ships with Rust reference plugin + Python ctypes example
- Extends existing `ServerHooks` trait with dynamic dispatch
- [ ] **9.8 PQ Noise Transport Layer**
- Hybrid `Noise_XX + ML-KEM-768` handshake for post-quantum transport security
- Closes the harvest-now-decrypt-later gap on handshake metadata (ADR-006)
- Feature-gated `--features pq-noise`; classical Noise_XX default preserved
- May require extending or forking `snow` crate's `CryptoResolver`
---
## Summary Timeline ## Summary Timeline
| Phase | Focus | Estimated Effort | | Phase | Focus | Estimated Effort |
@@ -436,6 +489,8 @@ functions without any central infrastructure or internet uplink.
| **5** | Features & UX | 57 days | | **5** | Features & UX | 57 days |
| **6** | Scale & Operations | 35 days | | **6** | Scale & Operations | 35 days |
| **7** | Platform Expansion & Research | ongoing | | **7** | Platform Expansion & Research | ongoing |
| **8** | Freifunk / Community Mesh | ongoing |
| **9** | Developer Experience & Community Growth | 35 days |
--- ---

View File

@@ -0,0 +1,19 @@
[package]
name = "quicproquo-bot"
version = "0.1.0"
edition = "2021"
description = "Bot SDK for quicproquo — build automated agents on E2E encrypted messaging."
license = "MIT"
[dependencies]
quicproquo-core = { path = "../quicproquo-core" }
quicproquo-proto = { path = "../quicproquo-proto" }
quicproquo-client = { path = "../quicproquo-client" }
openmls_rust_crypto = { workspace = true }
tokio = { workspace = true }
anyhow = { workspace = true }
tracing = { workspace = true }
serde = { workspace = true }
serde_json = { workspace = true }
hex = { workspace = true }

View File

@@ -0,0 +1,353 @@
//! # quicproquo-bot — Bot SDK for E2E encrypted messaging
//!
//! Build automated agents that run on the quicproquo network with full MLS
//! end-to-end encryption. The bot SDK wraps the client library into a simple
//! polling-based API: connect, authenticate, send, receive.
//!
//! ## Quick start
//!
//! ```rust,no_run
//! use quicproquo_bot::{Bot, BotConfig};
//!
//! #[tokio::main]
//! async fn main() -> anyhow::Result<()> {
//! let config = BotConfig::new("127.0.0.1:7000", "bot-user", "bot-password")
//! .ca_cert("server-cert.der")
//! .state_path("bot-state.bin");
//!
//! let bot = Bot::connect(config).await?;
//!
//! // Send a DM
//! bot.send_dm("alice", "Hello from bot!").await?;
//!
//! // Poll for messages
//! loop {
//! for msg in bot.receive(5000).await? {
//! println!("{}: {}", msg.sender, msg.text);
//! if msg.text.starts_with("!echo ") {
//! bot.send_dm(&msg.sender, &msg.text[6..]).await?;
//! }
//! }
//! }
//! }
//! ```
//!
//! ## Pipe mode (stdin/stdout JSON lines)
//!
//! The bot SDK also supports non-interactive pipe mode for shell integration:
//!
//! ```bash
//! # Send via pipe
//! echo '{"to":"alice","text":"hello"}' | qpq pipe --state bot.bin
//!
//! # Receive via pipe (JSON lines to stdout)
//! qpq pipe --recv --state bot.bin
//! ```
use std::path::PathBuf;
use std::sync::Arc;
use anyhow::Context;
use tokio::task::LocalSet;
use quicproquo_client::{connect_node, init_auth, opaque_login, resolve_user, ClientAuth};
use quicproquo_core::IdentityKeypair;
/// Configuration for connecting a bot to a quicproquo server.
#[derive(Clone, Debug)]
pub struct BotConfig {
/// Server address (host:port).
pub server: String,
/// Path to the server's CA certificate (DER format).
pub ca_cert: PathBuf,
/// TLS server name (defaults to "localhost").
pub server_name: String,
/// Bot's username for OPAQUE authentication.
pub username: String,
/// Bot's password for OPAQUE authentication.
pub password: String,
/// Path to the bot's encrypted state file.
pub state_path: PathBuf,
/// Password for the encrypted state file (None = unencrypted).
pub state_password: Option<String>,
/// Device ID reported to the server.
pub device_id: Option<String>,
}
impl BotConfig {
/// Create a new bot configuration with required fields.
pub fn new(server: &str, username: &str, password: &str) -> Self {
Self {
server: server.to_string(),
ca_cert: PathBuf::from("server-cert.der"),
server_name: "localhost".to_string(),
username: username.to_string(),
password: password.to_string(),
state_path: PathBuf::from("bot-state.bin"),
state_password: None,
device_id: None,
}
}
/// Set the CA certificate path.
pub fn ca_cert(mut self, path: &str) -> Self {
self.ca_cert = PathBuf::from(path);
self
}
/// Set the TLS server name for certificate validation.
pub fn server_name(mut self, name: &str) -> Self {
self.server_name = name.to_string();
self
}
/// Set the state file path.
pub fn state_path(mut self, path: &str) -> Self {
self.state_path = PathBuf::from(path);
self
}
/// Set the state file encryption password.
pub fn state_password(mut self, pwd: &str) -> Self {
self.state_password = Some(pwd.to_string());
self
}
/// Set the device ID.
pub fn device_id(mut self, id: &str) -> Self {
self.device_id = Some(id.to_string());
self
}
}
/// A received message from the quicproquo network.
#[derive(Clone, Debug, serde::Serialize)]
pub struct Message {
/// The sender's username (or "unknown" if resolution failed).
pub sender: String,
/// The decrypted plaintext message content.
pub text: String,
/// Server-assigned sequence number.
pub seq: u64,
}
/// A bot connected to a quicproquo server.
///
/// The bot maintains its identity and MLS group state. Each call to
/// `send_dm` or `receive` opens a fresh QUIC connection (stateless
/// reconnect pattern — same as the CLI client).
pub struct Bot {
config: BotConfig,
identity: Arc<IdentityKeypair>,
}
impl Bot {
/// Connect to a quicproquo server and authenticate.
///
/// Loads or creates an identity from the state file, connects via QUIC/TLS,
/// and performs OPAQUE password authentication.
pub async fn connect(config: BotConfig) -> anyhow::Result<Self> {
let state = quicproquo_client::client::state::load_or_init_state(
&config.state_path,
config.state_password.as_deref(),
)
.context("load or init bot state")?;
let identity = Arc::new(IdentityKeypair::from_seed(state.identity_seed));
// Authenticate on the first connection.
let local = LocalSet::new();
let cfg = config.clone();
let id = Arc::clone(&identity);
local
.run_until(async {
let client =
connect_node(&cfg.server, &cfg.ca_cert, &cfg.server_name).await?;
let pk = id.public_key_bytes();
let token = opaque_login(
&client,
&cfg.username,
&cfg.password,
&pk,
)
.await
.context("OPAQUE login")?;
init_auth(ClientAuth::from_raw(token, cfg.device_id.clone()));
tracing::info!(username = %cfg.username, server = %cfg.server, "bot authenticated");
Ok::<(), anyhow::Error>(())
})
.await?;
Ok(Self { config, identity })
}
/// Send a plaintext message to a peer by username.
///
/// Resolves the username to an identity key, then encrypts via MLS
/// and delivers through the server.
pub async fn send_dm(&self, peer_username: &str, text: &str) -> anyhow::Result<()> {
// Resolve username → identity key hex so we send to the specific peer.
let peer_key = self
.resolve_user(peer_username)
.await
.context("resolve peer username")?;
let peer_key_hex = hex::encode(&peer_key);
quicproquo_client::cmd_send(
&self.config.state_path,
&self.config.server,
&self.config.ca_cert,
&self.config.server_name,
Some(&peer_key_hex),
false,
text,
self.config.state_password.as_deref(),
)
.await
.context("send message")?;
Ok(())
}
/// Receive pending messages, waiting up to `timeout_ms` milliseconds.
///
/// Returns decrypted application messages. MLS control messages (commits,
/// welcomes) are processed internally but not returned.
pub async fn receive(&self, timeout_ms: u64) -> anyhow::Result<Vec<Message>> {
let plaintexts = quicproquo_client::receive_pending_plaintexts(
&self.config.state_path,
&self.config.server,
&self.config.ca_cert,
&self.config.server_name,
timeout_ms,
self.config.state_password.as_deref(),
)
.await?;
let messages: Vec<Message> = plaintexts
.into_iter()
.enumerate()
.map(|(i, plaintext)| Message {
sender: "peer".to_string(), // TODO: resolve from MLS group roster
text: String::from_utf8_lossy(&plaintext).to_string(),
seq: i as u64,
})
.collect();
Ok(messages)
}
/// Receive raw plaintext bytes (for binary protocols or non-UTF-8 content).
pub async fn receive_raw(&self, timeout_ms: u64) -> anyhow::Result<Vec<Vec<u8>>> {
quicproquo_client::receive_pending_plaintexts(
&self.config.state_path,
&self.config.server,
&self.config.ca_cert,
&self.config.server_name,
timeout_ms,
self.config.state_password.as_deref(),
)
.await
}
/// Resolve a username to a 32-byte identity key.
pub async fn resolve_user(&self, username: &str) -> anyhow::Result<Vec<u8>> {
let local = LocalSet::new();
let cfg = self.config.clone();
let username = username.to_string();
local
.run_until(async {
let client = connect_node(&cfg.server, &cfg.ca_cert, &cfg.server_name).await?;
let key = resolve_user(&client, &username)
.await?
.ok_or_else(|| anyhow::anyhow!("user not found: {username}"))?;
Ok(key)
})
.await
}
/// Get the bot's own username.
pub fn username(&self) -> &str {
&self.config.username
}
/// Get the bot's identity public key (32 bytes, Ed25519).
pub fn identity_key(&self) -> [u8; 32] {
self.identity.public_key_bytes()
}
/// Get the bot's identity key as a hex string.
pub fn identity_key_hex(&self) -> String {
hex::encode(self.identity.public_key_bytes())
}
}
/// Read JSON commands from stdin and process them.
///
/// Each line should be a JSON object with:
/// - `{"action": "send", "to": "username", "text": "message"}`
/// - `{"action": "recv", "timeout_ms": 5000}`
/// - `{"action": "resolve", "username": "alice"}`
///
/// Results are written to stdout as JSON lines.
pub async fn run_pipe_mode(bot: &Bot) -> anyhow::Result<()> {
use tokio::io::{AsyncBufReadExt, BufReader};
let stdin = BufReader::new(tokio::io::stdin());
let mut lines = stdin.lines();
while let Ok(Some(line)) = lines.next_line().await {
let line = line.trim().to_string();
if line.is_empty() {
continue;
}
let cmd: serde_json::Value = match serde_json::from_str(&line) {
Ok(v) => v,
Err(e) => {
let err = serde_json::json!({"error": format!("invalid JSON: {e}")});
println!("{err}");
continue;
}
};
let action = cmd["action"].as_str().unwrap_or("");
let result = match action {
"send" => {
let to = cmd["to"].as_str().unwrap_or("");
let text = cmd["text"].as_str().unwrap_or("");
match bot.send_dm(to, text).await {
Ok(()) => serde_json::json!({"status": "ok", "action": "send"}),
Err(e) => serde_json::json!({"error": format!("{e:#}")}),
}
}
"recv" => {
let timeout = cmd["timeout_ms"].as_u64().unwrap_or(5000);
match bot.receive(timeout).await {
Ok(msgs) => serde_json::json!({"status": "ok", "messages": msgs}),
Err(e) => serde_json::json!({"error": format!("{e:#}")}),
}
}
"resolve" => {
let username = cmd["username"].as_str().unwrap_or("");
match bot.resolve_user(username).await {
Ok(key) => serde_json::json!({
"status": "ok",
"identity_key": hex::encode(&key),
}),
Err(e) => serde_json::json!({"error": format!("{e:#}")}),
}
}
_ => serde_json::json!({"error": format!("unknown action: {action}")}),
};
println!("{result}");
}
Ok(())
}

View File

@@ -12,6 +12,7 @@ path = "src/main.rs"
[dependencies] [dependencies]
quicproquo-core = { path = "../quicproquo-core" } quicproquo-core = { path = "../quicproquo-core" }
quicproquo-proto = { path = "../quicproquo-proto" } quicproquo-proto = { path = "../quicproquo-proto" }
quicproquo-kt = { path = "../quicproquo-kt" }
openmls_rust_crypto = { workspace = true } openmls_rust_crypto = { workspace = true }
# Serialisation + RPC # Serialisation + RPC
@@ -38,6 +39,7 @@ thiserror = { workspace = true }
sha2 = { workspace = true } sha2 = { workspace = true }
argon2 = { workspace = true } argon2 = { workspace = true }
chacha20poly1305 = { workspace = true } chacha20poly1305 = { workspace = true }
ciborium = { workspace = true }
zeroize = { workspace = true } zeroize = { workspace = true }
quinn = { workspace = true } quinn = { workspace = true }
quinn-proto = { workspace = true } quinn-proto = { workspace = true }
@@ -65,10 +67,16 @@ mdns-sd = { version = "0.12", optional = true }
# Optional P2P transport for direct node-to-node messaging. # Optional P2P transport for direct node-to-node messaging.
quicproquo-p2p = { path = "../quicproquo-p2p", optional = true } quicproquo-p2p = { path = "../quicproquo-p2p", optional = true }
# Optional TUI dependencies (Ratatui full-screen interface).
ratatui = { version = "0.29", optional = true, default-features = false, features = ["crossterm"] }
crossterm = { version = "0.28", optional = true }
[features] [features]
# Enable mesh-mode features: mDNS local peer discovery + P2P transport. # Enable mesh-mode features: mDNS local peer discovery + P2P transport.
# Build: cargo build -p quicproquo-client --features mesh # Build: cargo build -p quicproquo-client --features mesh
mesh = ["dep:mdns-sd", "dep:quicproquo-p2p"] mesh = ["dep:mdns-sd", "dep:quicproquo-p2p"]
# Enable full-screen Ratatui TUI: cargo build -p quicproquo-client --features tui
tui = ["dep:ratatui", "dep:crossterm"]
[dev-dependencies] [dev-dependencies]
dashmap = { workspace = true } dashmap = { workspace = true }

View File

@@ -1288,3 +1288,111 @@ pub async fn cmd_chat(
println!(); println!();
Ok(()) Ok(())
} }
// ── Transcript export ─────────────────────────────────────────────────────────
/// Export the message history for a conversation to an encrypted, tamper-evident
/// transcript file.
///
/// `conv_db` is the path to the conversation SQLite database (`.convdb` file).
/// `conv_id_hex` is the 32-hex-character conversation ID to export.
/// `output` is the path for the `.qpqt` transcript file to write.
/// `transcript_password` is used to derive the encryption key (Argon2id).
/// `db_password` is the optional SQLCipher password for the conversation database.
pub fn cmd_export(
conv_db: &Path,
conv_id_hex: &str,
output: &Path,
transcript_password: &str,
db_password: Option<&str>,
) -> anyhow::Result<()> {
use quicproquo_core::{TranscriptRecord, TranscriptWriter};
use super::conversation::{ConversationId, ConversationStore};
// Decode conversation ID from hex.
let id_bytes = hex::decode(conv_id_hex)
.map_err(|e| anyhow::anyhow!("conv-id must be 32 hex characters (16 bytes): {e}"))?;
let conv_id = ConversationId::from_slice(&id_bytes)
.ok_or_else(|| anyhow::anyhow!("conv-id must be exactly 16 bytes (32 hex chars), got {} bytes", id_bytes.len()))?;
// Open conversation database.
let store = ConversationStore::open(conv_db, db_password)
.context("open conversation database")?;
// Load conversation metadata (to display name in output).
let conv = store
.load_conversation(&conv_id)?
.with_context(|| format!("conversation '{conv_id_hex}' not found in database"))?;
// Load all messages (oldest first).
let messages = store.load_all_messages(&conv_id)?;
if messages.is_empty() {
println!("No messages in conversation '{}'.", conv.display_name);
return Ok(());
}
// Create output file.
if let Some(parent) = output.parent() {
std::fs::create_dir_all(parent).ok();
}
let mut file = std::fs::File::create(output)
.with_context(|| format!("create transcript file '{}'", output.display()))?;
// Write transcript header + records.
let mut writer = TranscriptWriter::new(transcript_password, &mut file)
.context("initialise transcript writer")?;
let mut written = 0u64;
for (seq, msg) in messages.iter().enumerate() {
writer
.write_record(
&TranscriptRecord {
seq: seq as u64,
sender_identity: &msg.sender_key,
timestamp_ms: msg.timestamp_ms,
plaintext: &msg.body,
},
&mut file,
)
.context("write transcript record")?;
written += 1;
}
println!(
"Exported {} message(s) from '{}' to '{}'.",
written,
conv.display_name,
output.display()
);
println!("Decrypt with: qpq export verify --input <file> --password <password>");
Ok(())
}
/// Verify the hash-chain integrity of a transcript file without decrypting content.
///
/// Prints a summary. Does not require the encryption password (structural check only).
pub fn cmd_export_verify(input: &Path) -> anyhow::Result<()> {
use quicproquo_core::{verify_transcript_chain, ChainVerdict};
let data = std::fs::read(input)
.with_context(|| format!("read transcript file '{}'", input.display()))?;
match verify_transcript_chain(&data)? {
ChainVerdict::Ok { records } => {
println!(
"OK: transcript '{}' is structurally valid. {} record(s) found, hash chain intact.",
input.display(),
records
);
}
ChainVerdict::Broken => {
anyhow::bail!(
"FAIL: hash chain is broken in '{}' — file may have been tampered with.",
input.display()
);
}
}
Ok(())
}

View File

@@ -587,6 +587,55 @@ impl ConversationStore {
Ok(msgs) Ok(msgs)
} }
/// Load all messages for a conversation, oldest first (no limit).
pub fn load_all_messages(&self, conv_id: &ConversationId) -> anyhow::Result<Vec<StoredMessage>> {
let mut stmt = self.conn.prepare(
"SELECT message_id, sender_key, sender_name, body, msg_type,
ref_msg_id, timestamp_ms, is_outgoing
FROM messages
WHERE conversation_id = ?1
ORDER BY timestamp_ms ASC, id ASC",
)?;
let rows = stmt.query_map(params![conv_id.0.as_slice()], |row| {
let message_id: Option<Vec<u8>> = row.get(0)?;
let sender_key: Vec<u8> = row.get(1)?;
let sender_name: Option<String> = row.get(2)?;
let body: String = row.get(3)?;
let msg_type: String = row.get(4)?;
let ref_msg_id: Option<Vec<u8>> = row.get(5)?;
let timestamp_ms: u64 = row.get(6)?;
let is_outgoing: i32 = row.get(7)?;
fn to_16(v: &[u8]) -> Option<[u8; 16]> {
if v.len() == 16 {
let mut buf = [0u8; 16];
buf.copy_from_slice(v);
Some(buf)
} else {
None
}
}
Ok(StoredMessage {
conversation_id: conv_id.clone(),
message_id: message_id.as_deref().and_then(to_16),
sender_key,
sender_name,
body,
msg_type,
ref_msg_id: ref_msg_id.as_deref().and_then(to_16),
timestamp_ms,
is_outgoing: is_outgoing != 0,
})
})?;
let mut msgs = Vec::new();
for row in rows {
msgs.push(row?);
}
Ok(msgs)
}
/// Save a message, deduplicating by message_id within the same conversation. /// Save a message, deduplicating by message_id within the same conversation.
/// Returns `true` if the message was saved (new), `false` if it was a duplicate. /// Returns `true` if the message was saved (new), `false` if it was a duplicate.
pub fn save_message_dedup(&self, msg: &StoredMessage) -> anyhow::Result<bool> { pub fn save_message_dedup(&self, msg: &StoredMessage) -> anyhow::Result<bool> {

View File

@@ -9,6 +9,8 @@ pub mod rpc;
pub mod session; pub mod session;
pub mod state; pub mod state;
pub mod token_cache; pub mod token_cache;
#[cfg(feature = "tui")]
pub mod tui;
pub use commands::*; pub use commands::*;
pub use rpc::{connect_node, enqueue, fetch_all, fetch_hybrid_key, fetch_key_package, fetch_wait, upload_hybrid_key, upload_key_package}; pub use rpc::{connect_node, enqueue, fetch_all, fetch_hybrid_key, fetch_key_package, fetch_wait, upload_hybrid_key, upload_key_package};

View File

@@ -11,7 +11,7 @@ use std::time::Duration;
use anyhow::Context; use anyhow::Context;
use quicproquo_core::{ use quicproquo_core::{
AppMessage, DiskKeyStore, GroupMember, IdentityKeypair, ReceivedMessage, AppMessage, DiskKeyStore, GroupMember, IdentityKeypair, ReceivedMessage,
hybrid_encrypt, parse as parse_app_msg, serialize_chat, compute_safety_number, hybrid_encrypt, parse as parse_app_msg, serialize_chat,
}; };
use quicproquo_proto::node_capnp::node_service; use quicproquo_proto::node_capnp::node_service;
use tokio::sync::mpsc; use tokio::sync::mpsc;
@@ -57,6 +57,8 @@ enum SlashCommand {
/// Mesh subcommands: /mesh peers, /mesh server <addr> /// Mesh subcommands: /mesh peers, /mesh server <addr>
MeshPeers, MeshPeers,
MeshServer { addr: String }, MeshServer { addr: String },
/// Display safety number for out-of-band key verification with a contact.
Verify { username: String },
} }
fn parse_input(line: &str) -> Input { fn parse_input(line: &str) -> Input {
@@ -135,6 +137,13 @@ fn parse_input(line: &str) -> Input {
Input::Empty Input::Empty
} }
}, },
"/verify" => match arg {
Some(username) => Input::Slash(SlashCommand::Verify { username }),
None => {
display::print_error("usage: /verify <username>");
Input::Empty
}
},
_ => { _ => {
display::print_error(&format!("unknown command: {cmd}. Try /help")); display::print_error(&format!("unknown command: {cmd}. Try /help"));
Input::Empty Input::Empty
@@ -601,6 +610,7 @@ async fn handle_slash(
)); ));
Ok(()) Ok(())
} }
SlashCommand::Verify { username } => cmd_verify(session, client, &username).await,
}; };
if let Err(e) = result { if let Err(e) = result {
display::print_error(&format!("{e:#}")); display::print_error(&format!("{e:#}"));
@@ -622,6 +632,7 @@ fn print_help() {
display::print_status(" /whoami - Show your identity"); display::print_status(" /whoami - Show your identity");
display::print_status(" /mesh peers - Discover nearby qpq nodes via mDNS"); display::print_status(" /mesh peers - Discover nearby qpq nodes via mDNS");
display::print_status(" /mesh server <host:port> - Show how to reconnect to a mesh node"); display::print_status(" /mesh server <host:port> - Show how to reconnect to a mesh node");
display::print_status(" /verify <username> - Show safety number for key verification");
display::print_status(" /quit - Exit"); display::print_status(" /quit - Exit");
} }
@@ -1200,6 +1211,43 @@ fn cmd_history(session: &SessionState, count: usize) -> anyhow::Result<()> {
Ok(()) Ok(())
} }
async fn cmd_verify(
session: &SessionState,
client: &node_service::Client,
username: &str,
) -> anyhow::Result<()> {
// Resolve contact's identity key from the server.
display::print_status(&format!("resolving {username}..."));
let peer_key_vec = resolve_user(client, username)
.await?
.with_context(|| format!("user '{username}' not found"))?;
anyhow::ensure!(
peer_key_vec.len() == 32,
"server returned an identity key with unexpected length ({}); expected 32 bytes",
peer_key_vec.len()
);
let peer_key: [u8; 32] = peer_key_vec
.as_slice()
.try_into()
.expect("length checked above");
let my_key: [u8; 32] = session.identity.public_key_bytes();
let safety_number = compute_safety_number(&my_key, &peer_key);
display::print_status(&format!("Safety number with @{username}:"));
display::print_status("");
display::print_status(&format!(" {safety_number}"));
display::print_status("");
display::print_status("Compare this number with your contact via a separate channel");
display::print_status("(voice call, in person, or any out-of-band means).");
display::print_status("If the numbers match, the connection has not been tampered with.");
Ok(())
}
// ── Sending ────────────────────────────────────────────────────────────────── // ── Sending ──────────────────────────────────────────────────────────────────
async fn handle_send( async fn handle_send(

View File

@@ -576,6 +576,13 @@ pub async fn batch_enqueue(
} }
/// Resolve a username to its Ed25519 identity key (32 bytes). /// Resolve a username to its Ed25519 identity key (32 bytes).
///
/// When the server returns a non-empty `inclusionProof`, the client verifies it
/// against the identity key using the Key Transparency Merkle proof. Proof
/// verification failure is treated as a hard error (the server is misbehaving).
/// If the server sends no proof (empty field), the key is returned as-is —
/// callers can decide whether to require proofs for security-critical flows.
///
/// Returns `None` if the username is not registered. /// Returns `None` if the username is not registered.
pub async fn resolve_user( pub async fn resolve_user(
client: &node_service::Client, client: &node_service::Client,
@@ -595,18 +602,31 @@ pub async fn resolve_user(
.await .await
.context("resolve_user RPC failed")?; .context("resolve_user RPC failed")?;
let key = resp let reader = resp.get().context("resolve_user: bad response")?;
.get()
.context("resolve_user: bad response")? let key = reader
.get_identity_key() .get_identity_key()
.context("resolve_user: missing field")? .context("resolve_user: missing identity_key field")?
.to_vec(); .to_vec();
if key.is_empty() { if key.is_empty() {
Ok(None) return Ok(None);
} else {
Ok(Some(key))
} }
// Verify the KT inclusion proof when the server sends one.
let proof_bytes = reader
.get_inclusion_proof()
.context("resolve_user: missing inclusion_proof field")?
.to_vec();
if !proof_bytes.is_empty() {
let proof = quicproquo_kt::InclusionProof::from_bytes(&proof_bytes)
.context("resolve_user: inclusion proof deserialise failed")?;
quicproquo_kt::verify_inclusion(&proof, username, &key)
.context("resolve_user: KT inclusion proof verification FAILED — possible key mislabelling")?;
}
Ok(Some(key))
} }
/// Reverse lookup: resolve an identity key to the registered username. /// Reverse lookup: resolve an identity key to the registered username.

View File

@@ -0,0 +1,807 @@
//! Full-screen Ratatui TUI for quicproquo.
//!
//! Layout:
//! ┌──────────────┬──────────────────────────────────────────┐
//! │ Channels │ Messages │
//! │ (20%) │ (80%) │
//! │ │ │
//! │ ├──────────────────────────────────────────┤
//! │ │ Input bar │
//! └──────────────┴──────────────────────────────────────────┘
//!
//! Keyboard:
//! Enter — send message
//! Up / Down — scroll message history
//! Tab — next channel
//! Shift+Tab — prev channel
//! Ctrl+C / q — quit
use std::path::Path;
use std::sync::Arc;
use std::time::Duration;
use anyhow::Context;
use crossterm::{
event::{self, DisableMouseCapture, EnableMouseCapture, Event, KeyCode, KeyModifiers},
execute,
terminal::{disable_raw_mode, enable_raw_mode, EnterAlternateScreen, LeaveAlternateScreen},
};
use ratatui::{
backend::CrosstermBackend,
layout::{Constraint, Direction, Layout, Rect},
style::{Color, Modifier, Style},
text::{Line, Span},
widgets::{Block, Borders, List, ListItem, ListState, Paragraph, Wrap},
Frame, Terminal,
};
use tokio::sync::mpsc;
use tokio::time::interval;
use crate::{ClientAuth, init_auth};
use super::commands::{opaque_login, opaque_register};
use super::conversation::{now_ms, ConversationId, StoredMessage};
use super::rpc::{
connect_node, enqueue, fetch_hybrid_key, fetch_wait, try_hybrid_decrypt, upload_hybrid_key,
upload_key_package,
};
use super::session::SessionState;
use super::state::load_or_init_state;
use super::token_cache::{load_cached_session, save_cached_session};
use quicproquo_core::{
AppMessage, DiskKeyStore, GroupMember, IdentityKeypair, ReceivedMessage,
hybrid_encrypt, parse as parse_app_msg, serialize_chat,
};
use quicproquo_proto::node_capnp::node_service;
// ── App events ───────────────────────────────────────────────────────────────
/// Events sent from background tasks into the main TUI loop.
enum TuiEvent {
/// A key event from the terminal.
Key(event::KeyEvent),
/// New messages received from the server (conv_id, sender_short, body).
NewMessages(Vec<(ConversationId, String, String)>),
/// Tick — redraw periodically even if nothing happened.
Tick,
}
// ── Display message ───────────────────────────────────────────────────────────
#[derive(Clone)]
struct DisplayMessage {
sender: String,
body: String,
timestamp_ms: u64,
is_outgoing: bool,
}
// ── App state ─────────────────────────────────────────────────────────────────
struct App {
/// Channel (conversation) names shown in the sidebar.
channel_names: Vec<String>,
/// Conversation IDs, parallel to `channel_names`.
channel_ids: Vec<ConversationId>,
/// Index of the selected channel in the sidebar.
selected_channel: usize,
/// Messages for the currently active channel.
messages: Vec<DisplayMessage>,
/// Current input buffer.
input: String,
/// Scroll offset (0 = bottom).
scroll_offset: usize,
/// Whether the user has requested quit.
should_quit: bool,
/// Short identity string for display.
identity_short: String,
}
impl App {
fn new(session: &SessionState) -> anyhow::Result<Self> {
let convs = session.conv_store.list_conversations()?;
let channel_names: Vec<String> = convs.iter().map(|c| c.display_name.clone()).collect();
let channel_ids: Vec<ConversationId> = convs.iter().map(|c| c.id.clone()).collect();
Ok(Self {
channel_names,
channel_ids,
selected_channel: 0,
messages: Vec::new(),
input: String::new(),
scroll_offset: 0,
should_quit: false,
identity_short: session.identity_short(),
})
}
fn active_conv_id(&self) -> Option<&ConversationId> {
self.channel_ids.get(self.selected_channel)
}
/// Reload messages for the currently selected channel from the session store.
fn reload_messages(&mut self, session: &SessionState) -> anyhow::Result<()> {
let conv_id = match self.active_conv_id() {
Some(id) => id.clone(),
None => {
self.messages.clear();
return Ok(());
}
};
let stored = session.conv_store.load_recent_messages(&conv_id, 200)?;
self.messages = stored
.into_iter()
.map(|m| {
let sender = if m.is_outgoing {
format!("me({})", &self.identity_short)
} else if let Some(name) = &m.sender_name {
name.clone()
} else {
// Shorten sender key to 8 hex chars.
let hex_short = hex::encode(&m.sender_key[..m.sender_key.len().min(4)]);
format!("{hex_short}")
};
DisplayMessage {
sender,
body: m.body,
timestamp_ms: m.timestamp_ms,
is_outgoing: m.is_outgoing,
}
})
.collect();
// Reset scroll to bottom on channel switch.
self.scroll_offset = 0;
Ok(())
}
fn select_next_channel(&mut self, session: &SessionState) {
if self.channel_names.is_empty() {
return;
}
self.selected_channel = (self.selected_channel + 1) % self.channel_names.len();
let _ = self.reload_messages(session);
}
fn select_prev_channel(&mut self, session: &SessionState) {
if self.channel_names.is_empty() {
return;
}
if self.selected_channel == 0 {
self.selected_channel = self.channel_names.len() - 1;
} else {
self.selected_channel -= 1;
}
let _ = self.reload_messages(session);
}
fn scroll_up(&mut self) {
self.scroll_offset = self.scroll_offset.saturating_add(1);
}
fn scroll_down(&mut self) {
self.scroll_offset = self.scroll_offset.saturating_sub(1);
}
/// Append newly received messages to the in-memory list (no DB reload needed
/// since we already have them from the poll task, but we also save them via
/// the session so they appear on reload).
fn append_messages(&mut self, msgs: Vec<(ConversationId, String, String)>) {
let active = self.active_conv_id().cloned();
for (conv_id, sender, body) in msgs {
if active.as_ref() == Some(&conv_id) {
self.messages.push(DisplayMessage {
sender,
body,
timestamp_ms: now_ms(),
is_outgoing: false,
});
// Snap to bottom if user wasn't scrolled.
if self.scroll_offset == 0 {
// Already at bottom — nothing to do.
}
}
}
}
}
// ── Drawing ───────────────────────────────────────────────────────────────────
fn ui(frame: &mut Frame, app: &App) {
let size = frame.area();
// Top-level split: sidebar | main area.
let h_chunks = Layout::default()
.direction(Direction::Horizontal)
.constraints([Constraint::Percentage(20), Constraint::Percentage(80)])
.split(size);
// Main area split: messages | input bar.
let v_chunks = Layout::default()
.direction(Direction::Vertical)
.constraints([Constraint::Min(3), Constraint::Length(3)])
.split(h_chunks[1]);
draw_sidebar(frame, app, h_chunks[0]);
draw_messages(frame, app, v_chunks[0]);
draw_input(frame, app, v_chunks[1]);
}
fn draw_sidebar(frame: &mut Frame, app: &App, area: Rect) {
let items: Vec<ListItem> = app
.channel_names
.iter()
.enumerate()
.map(|(i, name)| {
let style = if i == app.selected_channel {
Style::default()
.fg(Color::Cyan)
.add_modifier(Modifier::BOLD | Modifier::REVERSED)
} else {
Style::default().fg(Color::Cyan)
};
ListItem::new(Line::from(Span::styled(name.clone(), style)))
})
.collect();
let block = Block::default()
.title(" Channels ")
.borders(Borders::ALL)
.style(Style::default().fg(Color::DarkGray));
let mut list_state = ListState::default();
if !app.channel_names.is_empty() {
list_state.select(Some(app.selected_channel));
}
frame.render_stateful_widget(
List::new(items).block(block),
area,
&mut list_state,
);
}
fn draw_messages(frame: &mut Frame, app: &App, area: Rect) {
let channel_title = app
.channel_names
.get(app.selected_channel)
.map(|n| format!(" {n} "))
.unwrap_or_else(|| " Messages ".to_string());
let block = Block::default()
.title(channel_title)
.borders(Borders::ALL)
.style(Style::default().fg(Color::DarkGray));
let inner_height = area.height.saturating_sub(2) as usize;
// Build lines from messages (newest at bottom).
let mut lines: Vec<Line> = app
.messages
.iter()
.map(|m| {
let ts = format_timestamp(m.timestamp_ms);
let ts_span = Span::styled(ts, Style::default().fg(Color::DarkGray));
let sender_style = if m.is_outgoing {
Style::default().fg(Color::Yellow).add_modifier(Modifier::BOLD)
} else {
Style::default().fg(Color::Green).add_modifier(Modifier::BOLD)
};
let sender_span = Span::styled(format!(" {} ", m.sender), sender_style);
let body_span = Span::raw(m.body.clone());
Line::from(vec![ts_span, sender_span, body_span])
})
.collect();
// Apply scroll: scroll_offset=0 means newest at bottom.
let total = lines.len();
let visible_start = if total > inner_height {
let bottom = total - app.scroll_offset.min(total);
bottom.saturating_sub(inner_height)
} else {
0
};
let visible_end = if total > inner_height {
total - app.scroll_offset.min(total)
} else {
total
};
let visible_lines: Vec<Line> = lines
.drain(visible_start..visible_end.min(lines.len()))
.collect();
let paragraph = Paragraph::new(visible_lines)
.block(block)
.wrap(Wrap { trim: false });
frame.render_widget(paragraph, area);
}
fn draw_input(frame: &mut Frame, app: &App, area: Rect) {
let block = Block::default()
.title(" Input (Enter=send, Tab=switch channel, q/Ctrl+C=quit) ")
.borders(Borders::ALL)
.style(Style::default().fg(Color::DarkGray));
let input_text = Paragraph::new(app.input.as_str())
.block(block)
.style(Style::default().fg(Color::White));
frame.render_widget(input_text, area);
// Position cursor at end of input.
let cursor_x = area.x + 1 + app.input.len() as u16;
let cursor_y = area.y + 1;
if cursor_x < area.x + area.width - 1 {
frame.set_cursor_position((cursor_x, cursor_y));
}
}
fn format_timestamp(ms: u64) -> String {
// Simple HH:MM format from epoch ms.
let secs = ms / 1000;
let hours = (secs / 3600) % 24;
let minutes = (secs / 60) % 60;
format!("[{:02}:{:02}] ", hours, minutes)
}
// ── Message polling task ──────────────────────────────────────────────────────
/// Background task that polls the server for new messages and sends them via `tx`.
async fn poll_task(
mut session: SessionState,
client: node_service::Client,
tx: mpsc::Sender<TuiEvent>,
) {
let mut poll_interval = interval(Duration::from_millis(1000));
poll_interval.set_missed_tick_behavior(tokio::time::MissedTickBehavior::Skip);
loop {
poll_interval.tick().await;
let identity_bytes = session.identity_bytes();
let payloads = match fetch_wait(&client, &identity_bytes, 0).await {
Ok(p) => p,
Err(_) => continue,
};
if payloads.is_empty() {
continue;
}
let mut new_msgs: Vec<(ConversationId, String, String)> = Vec::new();
let my_key = session.identity_bytes();
let mut sorted = payloads;
sorted.sort_by_key(|(seq, _)| *seq);
for (_seq, payload) in &sorted {
let mls_payload = match try_hybrid_decrypt(session.hybrid_kp.as_ref(), payload) {
Ok(b) => b,
Err(_) => payload.clone(),
};
let conv_ids: Vec<ConversationId> = session.members.keys().cloned().collect();
for conv_id in &conv_ids {
let member = match session.members.get_mut(conv_id) {
Some(m) => m,
None => continue,
};
match member.receive_message(&mls_payload) {
Ok(ReceivedMessage::Application(plaintext)) => {
let (sender_key, app_bytes) = {
let after_unpad = quicproquo_core::padding::unpad(&plaintext)
.unwrap_or_else(|_| plaintext.clone());
if quicproquo_core::sealed_sender::is_sealed(&after_unpad) {
match quicproquo_core::sealed_sender::unseal(&after_unpad) {
Ok((sk, inner)) => (sk.to_vec(), inner),
Err(_) => (my_key.clone(), after_unpad),
}
} else {
(my_key.clone(), after_unpad)
}
};
let (body, msg_id, msg_type, ref_msg_id) =
match parse_app_msg(&app_bytes) {
Ok((_, AppMessage::Chat { message_id, body })) => (
String::from_utf8_lossy(&body).to_string(),
Some(message_id),
"chat",
None,
),
Ok((_, AppMessage::Reply { ref_msg_id, body })) => (
String::from_utf8_lossy(&body).to_string(),
None,
"reply",
Some(ref_msg_id),
),
Ok((_, AppMessage::Reaction { ref_msg_id, emoji })) => (
String::from_utf8_lossy(&emoji).to_string(),
None,
"reaction",
Some(ref_msg_id),
),
_ => (
String::from_utf8_lossy(&app_bytes).to_string(),
None,
"chat",
None,
),
};
let stored = StoredMessage {
conversation_id: conv_id.clone(),
message_id: msg_id,
sender_key: sender_key.clone(),
sender_name: None,
body: body.clone(),
msg_type: msg_type.into(),
ref_msg_id,
timestamp_ms: now_ms(),
is_outgoing: false,
};
if session.conv_store.save_message(&stored).is_ok() {
let sender_short = hex::encode(&sender_key[..sender_key.len().min(4)]);
new_msgs.push((conv_id.clone(), sender_short, body));
}
let _ = session.conv_store.update_activity(conv_id, now_ms());
let _ = session.save_member(conv_id);
break;
}
Ok(ReceivedMessage::StateChanged) | Ok(ReceivedMessage::SelfRemoved) => {
let _ = session.save_member(conv_id);
break;
}
_ => {}
}
}
}
if !new_msgs.is_empty() {
if tx.send(TuiEvent::NewMessages(new_msgs)).await.is_err() {
break;
}
}
}
}
// ── Send message ──────────────────────────────────────────────────────────────
async fn send_message(
session: &mut SessionState,
client: &node_service::Client,
conv_id: &ConversationId,
text: &str,
) -> anyhow::Result<()> {
let my_key = session.identity_bytes();
let identity = Arc::clone(&session.identity);
let member = session
.members
.get_mut(conv_id)
.context("no GroupMember for this conversation")?;
// Wrap in structured AppMessage format.
let app_payload = serialize_chat(text.as_bytes(), None)
.context("serialize app message")?;
// Metadata protection: seal + pad.
let sealed = quicproquo_core::sealed_sender::seal(&identity, &app_payload);
let padded = quicproquo_core::padding::pad(&sealed);
let ct = member.send_message(&padded).context("MLS encrypt")?;
let recipients: Vec<Vec<u8>> = member
.member_identities()
.into_iter()
.filter(|id| id.as_slice() != my_key.as_slice())
.collect();
for recipient_key in &recipients {
let peer_hybrid_pk = fetch_hybrid_key(client, recipient_key).await?;
let payload = if let Some(ref pk) = peer_hybrid_pk {
hybrid_encrypt(pk, &ct, b"", b"").context("hybrid encrypt")?
} else {
ct.clone()
};
enqueue(client, recipient_key, &payload).await?;
}
// Extract message_id from what we just serialized.
let msg_id = parse_app_msg(&app_payload)
.ok()
.and_then(|(_, m)| match m {
AppMessage::Chat { message_id, .. } => Some(message_id),
_ => None,
});
// Save outgoing message.
let stored = StoredMessage {
conversation_id: conv_id.clone(),
message_id: msg_id,
sender_key: my_key,
sender_name: Some("you".into()),
body: text.to_string(),
msg_type: "chat".into(),
ref_msg_id: None,
timestamp_ms: now_ms(),
is_outgoing: true,
};
session.conv_store.save_message(&stored)?;
session.conv_store.update_activity(conv_id, now_ms())?;
session.save_member(conv_id)?;
Ok(())
}
// ── TUI entry point ───────────────────────────────────────────────────────────
/// Entry point for `qpq tui`. Sets up the terminal, runs the event loop, and
/// restores the terminal on exit.
pub async fn run_tui(
state_path: &Path,
server: &str,
ca_cert: &Path,
server_name: &str,
password: Option<&str>,
username: Option<&str>,
opaque_password: Option<&str>,
access_token: &str,
device_id: Option<&str>,
) -> anyhow::Result<()> {
// ── Auth ──────────────────────────────────────────────────────────────────
let resolved_token = resolve_tui_access_token(
state_path,
server,
ca_cert,
server_name,
password,
username,
opaque_password,
access_token,
)
.await?;
let token_bytes = hex::decode(&resolved_token)
.unwrap_or_else(|_| resolved_token.into_bytes());
let auth_ctx = ClientAuth::from_raw(token_bytes, device_id.map(String::from));
init_auth(auth_ctx);
// ── Session + RPC ─────────────────────────────────────────────────────────
let mut session = SessionState::load(state_path, password)?;
let client = connect_node(server, ca_cert, server_name).await?;
// Auto-upload KeyPackage.
let _ = auto_upload_keys_tui(&session, &client).await;
// ── Terminal setup ────────────────────────────────────────────────────────
enable_raw_mode().context("enable raw mode")?;
let mut stdout = std::io::stdout();
execute!(stdout, EnterAlternateScreen, EnableMouseCapture)
.context("enter alternate screen")?;
let backend = CrosstermBackend::new(stdout);
let mut terminal = Terminal::new(backend).context("create terminal")?;
let result = tui_loop(&mut terminal, &mut session, client).await;
// ── Terminal cleanup (always restore, even on error) ───────────────────
disable_raw_mode().ok();
execute!(
terminal.backend_mut(),
LeaveAlternateScreen,
DisableMouseCapture
)
.ok();
terminal.show_cursor().ok();
session.save_all()?;
result
}
async fn tui_loop(
terminal: &mut Terminal<CrosstermBackend<std::io::Stdout>>,
session: &mut SessionState,
client: node_service::Client,
) -> anyhow::Result<()> {
let mut app = App::new(session)?;
app.reload_messages(session)?;
let (event_tx, mut event_rx) = mpsc::channel::<TuiEvent>(256);
// ── Keyboard event task ───────────────────────────────────────────────────
let key_tx = event_tx.clone();
tokio::task::spawn_local(async move {
loop {
// crossterm event polling — 50ms timeout so we can tick.
match event::poll(Duration::from_millis(50)) {
Ok(true) => {
if let Ok(Event::Key(key)) = event::read() {
if key_tx.send(TuiEvent::Key(key)).await.is_err() {
break;
}
}
}
Ok(false) => {
// No event — send a tick so the UI redraws.
if key_tx.send(TuiEvent::Tick).await.is_err() {
break;
}
}
Err(_) => break,
}
}
});
// ── Message poll task ─────────────────────────────────────────────────────
// Clone session state for the poll task (it needs its own SessionState).
let poll_session = SessionState::load(
&session.state_path.clone(),
session.password.as_deref(),
)?;
let poll_tx = event_tx.clone();
tokio::task::spawn_local(poll_task(poll_session, client.clone(), poll_tx));
// ── Main loop ─────────────────────────────────────────────────────────────
loop {
terminal.draw(|f| ui(f, &app)).context("draw")?;
match event_rx.recv().await {
None => break,
Some(TuiEvent::Tick) => {
// Just redraw.
}
Some(TuiEvent::NewMessages(msgs)) => {
app.append_messages(msgs);
}
Some(TuiEvent::Key(key)) => {
match key.code {
KeyCode::Char('c') if key.modifiers.contains(KeyModifiers::CONTROL) => {
app.should_quit = true;
}
KeyCode::Char('q') if app.input.is_empty() => {
app.should_quit = true;
}
KeyCode::Enter => {
let text = app.input.trim().to_string();
if !text.is_empty() {
app.input.clear();
if let Some(conv_id) = app.active_conv_id().cloned() {
match send_message(session, &client, &conv_id, &text).await {
Ok(()) => {
// Add to in-memory list immediately.
app.messages.push(DisplayMessage {
sender: format!("me({})", app.identity_short),
body: text,
timestamp_ms: now_ms(),
is_outgoing: true,
});
}
Err(_e) => {
// Silently drop — user will see nothing happened.
}
}
}
}
}
KeyCode::Char(c) => {
app.input.push(c);
}
KeyCode::Backspace => {
app.input.pop();
}
KeyCode::Up => {
app.scroll_up();
}
KeyCode::Down => {
app.scroll_down();
}
KeyCode::Tab => {
if key.modifiers.contains(KeyModifiers::SHIFT) {
app.select_prev_channel(session);
} else {
app.select_next_channel(session);
}
app.reload_messages(session)?;
}
_ => {}
}
}
}
if app.should_quit {
break;
}
}
Ok(())
}
// ── Startup helpers ───────────────────────────────────────────────────────────
async fn auto_upload_keys_tui(
session: &SessionState,
client: &node_service::Client,
) -> anyhow::Result<()> {
let ks_path = session.state_path.with_extension("pending.ks");
let ks = DiskKeyStore::persistent(&ks_path).unwrap_or_else(|_| DiskKeyStore::ephemeral());
let mut member = GroupMember::new_with_state(
Arc::clone(&session.identity),
ks,
None,
false,
);
let kp_bytes = member.generate_key_package().context("generate KeyPackage")?;
let id_key = session.identity.public_key_bytes();
upload_key_package(client, &id_key, &kp_bytes).await?;
if let Some(ref hkp) = session.hybrid_kp {
upload_hybrid_key(client, &id_key, &hkp.public_key()).await?;
}
Ok(())
}
async fn resolve_tui_access_token(
state_path: &Path,
server: &str,
ca_cert: &Path,
server_name: &str,
state_password: Option<&str>,
username: Option<&str>,
opaque_password: Option<&str>,
cli_access_token: &str,
) -> anyhow::Result<String> {
if !cli_access_token.is_empty() {
return Ok(cli_access_token.to_string());
}
if let Some(cached) = load_cached_session(state_path, state_password) {
return Ok(cached.token_hex);
}
let username = match username {
Some(u) => u.to_string(),
None => {
use std::io::Write;
eprint!("Username: ");
std::io::stderr().flush().ok();
let mut input = String::new();
std::io::stdin()
.read_line(&mut input)
.context("failed to read username")?;
let trimmed = input.trim().to_string();
anyhow::ensure!(!trimmed.is_empty(), "username is required");
trimmed
}
};
let opaque_password = match opaque_password {
Some(p) => p.to_string(),
None => rpassword::read_password().context("failed to read password")?,
};
let state = load_or_init_state(state_path, state_password)?;
let identity = IdentityKeypair::from_seed(state.identity_seed);
let identity_key = identity.public_key_bytes().to_vec();
let node_client = connect_node(server, ca_cert, server_name).await?;
match opaque_register(&node_client, &username, &opaque_password, Some(&identity_key)).await {
Ok(()) | Err(_) => {}
}
let token_bytes = opaque_login(&node_client, &username, &opaque_password, &identity_key)
.await
.context("OPAQUE login failed")?;
let token_hex = hex::encode(&token_bytes);
save_cached_session(state_path, &username, &token_hex, state_password)?;
Ok(token_hex)
}

View File

@@ -19,10 +19,10 @@ use std::sync::RwLock;
pub mod client; pub mod client;
pub use client::commands::{ pub use client::commands::{
cmd_chat, cmd_check_key, cmd_create_group, cmd_demo_group, cmd_fetch_key, cmd_health, cmd_chat, cmd_check_key, cmd_create_group, cmd_demo_group, cmd_export, cmd_export_verify,
cmd_health_json, cmd_invite, cmd_join, cmd_login, cmd_ping, cmd_recv, cmd_register, cmd_fetch_key, cmd_health, cmd_health_json, cmd_invite, cmd_join, cmd_login, cmd_ping,
cmd_register_state, cmd_refresh_keypackage, cmd_register_user, cmd_send, cmd_whoami, cmd_recv, cmd_register, cmd_register_state, cmd_refresh_keypackage, cmd_register_user,
opaque_login, receive_pending_plaintexts, whoami_json, cmd_send, cmd_whoami, opaque_login, receive_pending_plaintexts, whoami_json,
}; };
pub use client::repl::run_repl; pub use client::repl::run_repl;

View File

@@ -2,14 +2,17 @@
use std::path::PathBuf; use std::path::PathBuf;
use anyhow::Context;
use clap::{Parser, Subcommand}; use clap::{Parser, Subcommand};
use quicproquo_client::{ use quicproquo_client::{
cmd_chat, cmd_check_key, cmd_create_group, cmd_demo_group, cmd_fetch_key, cmd_health, cmd_chat, cmd_check_key, cmd_create_group, cmd_demo_group, cmd_export, cmd_export_verify,
cmd_invite, cmd_join, cmd_login, cmd_ping, cmd_recv, cmd_register, cmd_register_state, cmd_fetch_key, cmd_health, cmd_invite, cmd_join, cmd_login, cmd_ping, cmd_recv, cmd_register,
cmd_refresh_keypackage, cmd_register_user, cmd_send, cmd_whoami, init_auth, run_repl, cmd_register_state, cmd_refresh_keypackage, cmd_register_user, cmd_send, cmd_whoami,
ClientAuth, init_auth, run_repl, ClientAuth,
}; };
#[cfg(feature = "tui")]
use quicproquo_client::client::tui::run_tui;
// ── CLI ─────────────────────────────────────────────────────────────────────── // ── CLI ───────────────────────────────────────────────────────────────────────
@@ -310,6 +313,26 @@ enum Command {
no_server: bool, no_server: bool,
}, },
/// Full-screen Ratatui TUI (requires --features tui).
/// Channels sidebar, scrollable message view, and inline input bar.
#[cfg(feature = "tui")]
Tui {
#[arg(
long,
default_value = "qpq-state.bin",
env = "QPQ_STATE"
)]
state: PathBuf,
#[arg(long, default_value = "127.0.0.1:7000", env = "QPQ_SERVER")]
server: String,
/// OPAQUE username for automatic registration/login.
#[arg(long, env = "QPQ_USERNAME")]
username: Option<String>,
/// OPAQUE password (prompted securely if --username is set but --password is not).
#[arg(long, env = "QPQ_PASSWORD")]
password: Option<String>,
},
/// Interactive 1:1 chat: type to send, incoming messages printed as [peer] <msg>. Ctrl+D to exit. /// Interactive 1:1 chat: type to send, incoming messages printed as [peer] <msg>. Ctrl+D to exit.
/// In a two-person group, peer is chosen automatically; use --peer-key only with 3+ members. /// In a two-person group, peer is chosen automatically; use --peer-key only with 3+ members.
Chat { Chat {
@@ -328,6 +351,39 @@ enum Command {
#[arg(long, default_value_t = 500)] #[arg(long, default_value_t = 500)]
poll_interval_ms: u64, poll_interval_ms: u64,
}, },
/// Export a conversation's message history to an encrypted, tamper-evident transcript file.
///
/// The output file uses Argon2id + ChaCha20-Poly1305 encryption with a SHA-256 hash chain
/// linking every record. Use `qpq export verify` to check chain integrity without decrypting.
Export {
/// Path to the conversation database (.convdb file).
#[arg(long, default_value = "qpq-convdb.sqlite", env = "QPQ_CONV_DB")]
conv_db: PathBuf,
/// Conversation ID to export (32 hex chars = 16 bytes).
#[arg(long)]
conv_id: String,
/// Output path for the .qpqt transcript file.
#[arg(long, default_value = "transcript.qpqt")]
output: PathBuf,
/// Password used to encrypt the transcript (separate from the state/DB password).
#[arg(long, env = "QPQ_TRANSCRIPT_PASSWORD")]
transcript_password: Option<String>,
/// Password for the encrypted conversation database (if any).
#[arg(long, env = "QPQ_STATE_PASSWORD")]
db_password: Option<String>,
},
/// Verify the hash-chain integrity of a transcript file without decrypting content.
ExportVerify {
/// Path to the .qpqt transcript file to verify.
#[arg(long)]
input: PathBuf,
},
} }
// ── Helpers ─────────────────────────────────────────────────────────────────── // ── Helpers ───────────────────────────────────────────────────────────────────
@@ -361,9 +417,12 @@ async fn main() -> anyhow::Result<()> {
let args = Args::parse(); let args = Args::parse();
// For the REPL, defer init_auth so it can resolve its own token via OPAQUE. // For the REPL and TUI, defer init_auth so they can resolve their own token via OPAQUE.
// For all other subcommands, initialize auth immediately. // For all other subcommands, initialize auth immediately.
#[cfg(not(feature = "tui"))]
let is_repl = matches!(args.command, None | Some(Command::Repl { .. })); let is_repl = matches!(args.command, None | Some(Command::Repl { .. }));
#[cfg(feature = "tui")]
let is_repl = matches!(args.command, None | Some(Command::Repl { .. }) | Some(Command::Tui { .. }));
if !is_repl { if !is_repl {
let auth_ctx = ClientAuth::from_parts(args.access_token.clone(), args.device_id.clone()); let auth_ctx = ClientAuth::from_parts(args.access_token.clone(), args.device_id.clone());
init_auth(auth_ctx); init_auth(auth_ctx);
@@ -615,5 +674,53 @@ async fn main() -> anyhow::Result<()> {
)) ))
.await .await
} }
#[cfg(feature = "tui")]
Command::Tui {
state,
server,
username,
password,
} => {
let state = derive_state_path(state, username.as_deref());
let local = tokio::task::LocalSet::new();
local
.run_until(run_tui(
&state,
&server,
&args.ca_cert,
&args.server_name,
state_pw,
username.as_deref(),
password.as_deref(),
&args.access_token,
args.device_id.as_deref(),
))
.await
}
Command::Export {
conv_db,
conv_id,
output,
transcript_password,
db_password,
} => {
// Prompt for transcript password if not provided.
let tp = match transcript_password {
Some(p) => p,
None => {
eprint!("Transcript password: ");
rpassword::read_password()
.context("failed to read transcript password")?
}
};
cmd_export(
&conv_db,
&conv_id,
&output,
&tp,
db_password.as_deref().or(state_pw),
)
}
Command::ExportVerify { input } => cmd_export_verify(&input),
} }
} }

View File

@@ -10,7 +10,9 @@ license = "MIT"
x25519-dalek = { workspace = true } x25519-dalek = { workspace = true }
ed25519-dalek = { workspace = true } ed25519-dalek = { workspace = true }
sha2 = { workspace = true } sha2 = { workspace = true }
hmac = { workspace = true }
hkdf = { workspace = true } hkdf = { workspace = true }
ciborium = { workspace = true }
chacha20poly1305 = { workspace = true } chacha20poly1305 = { workspace = true }
zeroize = { workspace = true } zeroize = { workspace = true }
rand = { workspace = true } rand = { workspace = true }
@@ -57,3 +59,7 @@ harness = false
[[bench]] [[bench]]
name = "hybrid_kem_bench" name = "hybrid_kem_bench"
harness = false harness = false
[[bench]]
name = "crypto_benchmarks"
harness = false

View File

@@ -0,0 +1,139 @@
//! Benchmark: Identity keypair operations, sealed sender, and message padding.
//!
//! Covers:
//! - [`IdentityKeypair`] generation, signing, and signature verification
//! - Sealed sender `seal` / `unseal` (Ed25519 sign + verify overhead)
//! - Message padding `pad` / `unpad` at various payload sizes
use criterion::{black_box, criterion_group, criterion_main, BenchmarkId, Criterion};
use quicproquo_core::{IdentityKeypair, padding};
// ── Identity keypair benchmarks ──────────────────────────────────────────────
fn bench_identity_keygen(c: &mut Criterion) {
c.bench_function("identity_keygen", |b| {
b.iter(|| black_box(IdentityKeypair::generate()));
});
}
fn bench_identity_sign(c: &mut Criterion) {
let identity = IdentityKeypair::generate();
let payload = b"benchmark signing payload -- 32+ bytes of realistic data here";
c.bench_function("identity_sign", |b| {
b.iter(|| black_box(identity.sign_raw(black_box(payload))));
});
}
fn bench_identity_verify(c: &mut Criterion) {
let identity = IdentityKeypair::generate();
let payload = b"benchmark signing payload -- 32+ bytes of realistic data here";
let sig = identity.sign_raw(payload);
let pk = identity.public_key_bytes();
c.bench_function("identity_verify", |b| {
b.iter(|| {
black_box(
IdentityKeypair::verify_raw(
black_box(&pk),
black_box(payload),
black_box(&sig),
)
.unwrap()
)
});
});
}
// ── Sealed sender benchmarks ─────────────────────────────────────────────────
fn bench_sealed_sender(c: &mut Criterion) {
use quicproquo_core::sealed_sender::{seal, unseal};
let sizes: &[(&str, usize)] = &[
("32B", 32),
("256B", 256),
("1KB", 1024),
("4KB", 4096),
];
let identity = IdentityKeypair::generate();
let mut group = c.benchmark_group("sealed_sender_seal");
for (label, size) in sizes {
let payload = vec![0xABu8; *size];
group.bench_with_input(
BenchmarkId::from_parameter(label),
&payload,
|b, payload| {
b.iter(|| black_box(seal(black_box(&identity), black_box(payload))));
},
);
}
group.finish();
let mut group = c.benchmark_group("sealed_sender_unseal");
for (label, size) in sizes {
let payload = vec![0xABu8; *size];
let sealed = seal(&identity, &payload);
group.bench_with_input(
BenchmarkId::from_parameter(label),
&sealed,
|b, sealed| {
b.iter(|| black_box(unseal(black_box(sealed)).unwrap()));
},
);
}
group.finish();
}
// ── Message padding benchmarks ────────────────────────────────────────────────
fn bench_padding(c: &mut Criterion) {
// Representative sizes: one per bucket + oversized
let sizes: &[(&str, usize)] = &[
("50B", 50), // → 256 bucket
("512B", 512), // → 1024 bucket
("2KB", 2048), // → 4096 bucket
("8KB", 8192), // → 16384 bucket
("20KB", 20480), // → 32768 (oversized)
];
let mut group = c.benchmark_group("padding_pad");
for (label, size) in sizes {
let payload = vec![0xABu8; *size];
group.bench_with_input(
BenchmarkId::from_parameter(label),
&payload,
|b, payload| {
b.iter(|| black_box(padding::pad(black_box(payload))));
},
);
}
group.finish();
let mut group = c.benchmark_group("padding_unpad");
for (label, size) in sizes {
let payload = vec![0xABu8; *size];
let padded = padding::pad(&payload);
group.bench_with_input(
BenchmarkId::from_parameter(label),
&padded,
|b, padded| {
b.iter(|| black_box(padding::unpad(black_box(padded)).unwrap()));
},
);
}
group.finish();
}
criterion_group!(
benches,
bench_identity_keygen,
bench_identity_sign,
bench_identity_verify,
bench_sealed_sender,
bench_padding,
);
criterion_main!(benches);

View File

@@ -125,6 +125,87 @@ impl IdentityKeypair {
} }
} }
/// Verify a 96-byte delivery proof produced by the server's `build_delivery_proof`.
///
/// # Layout
/// ```text
/// bytes 0..32 — SHA-256(seq_le || recipient_key || timestamp_ms_le)
/// bytes 32..96 — Ed25519 signature over those 32 bytes
/// ```
///
/// Returns `Ok(true)` when the proof is structurally valid and the signature verifies,
/// `Ok(false)` when the proof length is wrong (graceful degradation for old servers),
/// or `Err` when the signature is structurally invalid / verification fails.
pub fn verify_delivery_proof(
server_pubkey: &[u8; 32],
proof: &[u8],
) -> Result<bool, crate::error::CoreError> {
if proof.len() != 96 {
return Ok(false);
}
let hash: [u8; 32] = proof[..32].try_into().expect("slice is 32 bytes");
let sig: [u8; 64] = proof[32..96].try_into().expect("slice is 64 bytes");
IdentityKeypair::verify_raw(server_pubkey, &hash, &sig)?;
Ok(true)
}
#[cfg(test)]
mod proof_tests {
use super::*;
use sha2::{Digest, Sha256};
fn make_proof(kp: &IdentityKeypair, seq: u64, recipient_key: &[u8], timestamp_ms: u64) -> Vec<u8> {
let mut hasher = Sha256::new();
hasher.update(seq.to_le_bytes());
hasher.update(recipient_key);
hasher.update(timestamp_ms.to_le_bytes());
let hash: [u8; 32] = hasher.finalize().into();
let sig = kp.sign_raw(&hash);
let mut proof = vec![0u8; 96];
proof[..32].copy_from_slice(&hash);
proof[32..].copy_from_slice(&sig);
proof
}
#[test]
fn verify_valid_proof() {
let kp = IdentityKeypair::generate();
let pk = kp.public_key_bytes();
let rk = [0xabu8; 32];
let proof = make_proof(&kp, 42, &rk, 1_700_000_000_000);
assert!(verify_delivery_proof(&pk, &proof).unwrap());
}
#[test]
fn reject_wrong_length() {
let kp = IdentityKeypair::generate();
let pk = kp.public_key_bytes();
assert!(!verify_delivery_proof(&pk, &[0u8; 64]).unwrap());
assert!(!verify_delivery_proof(&pk, &[]).unwrap());
assert!(!verify_delivery_proof(&pk, &[0u8; 97]).unwrap());
}
#[test]
fn reject_tampered_hash() {
let kp = IdentityKeypair::generate();
let pk = kp.public_key_bytes();
let rk = [0x01u8; 32];
let mut proof = make_proof(&kp, 1, &rk, 999);
proof[0] ^= 0xff; // corrupt the hash bytes
assert!(verify_delivery_proof(&pk, &proof).is_err());
}
#[test]
fn reject_wrong_pubkey() {
let kp = IdentityKeypair::generate();
let other = IdentityKeypair::generate();
let pk = other.public_key_bytes();
let rk = [0x02u8; 32];
let proof = make_proof(&kp, 5, &rk, 0);
assert!(verify_delivery_proof(&pk, &proof).is_err());
}
}
impl Serialize for IdentityKeypair { impl Serialize for IdentityKeypair {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error> fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where where

View File

@@ -23,7 +23,9 @@ mod keypackage;
mod keystore; mod keystore;
pub mod opaque_auth; pub mod opaque_auth;
pub mod padding; pub mod padding;
pub mod safety_numbers;
pub mod sealed_sender; pub mod sealed_sender;
pub mod transcript;
// ── Public API ──────────────────────────────────────────────────────────────── // ── Public API ────────────────────────────────────────────────────────────────
@@ -38,6 +40,11 @@ pub use hybrid_kem::{
HybridPublicKey, HybridPublicKey,
}; };
pub use hybrid_crypto::{HybridCrypto, HybridCryptoProvider}; pub use hybrid_crypto::{HybridCrypto, HybridCryptoProvider};
pub use identity::IdentityKeypair; pub use identity::{verify_delivery_proof, IdentityKeypair};
pub use keypackage::{generate_key_package, validate_keypackage_ciphersuite}; pub use keypackage::{generate_key_package, validate_keypackage_ciphersuite};
pub use keystore::DiskKeyStore; pub use keystore::DiskKeyStore;
pub use safety_numbers::compute_safety_number;
pub use transcript::{
read_transcript, verify_transcript_chain, ChainVerdict, DecodedRecord, TranscriptRecord,
TranscriptWriter,
};

View File

@@ -0,0 +1,153 @@
//! Signal-style safety numbers for out-of-band identity key verification.
//!
//! # Algorithm
//!
//! Given two 32-byte Ed25519 public keys, safety numbers are computed as:
//!
//! 1. Sort the keys lexicographically so the result is symmetric.
//! 2. Concatenate: `input = key_lo || key_hi` (64 bytes).
//! 3. Compute HMAC-SHA256(key=info, data=input) where
//! `info = b"quicproquo-safety-number-v1"`.
//! 4. Iterate the HMAC 5200 times: `hash = HMAC-SHA256(key=info, data=hash)`.
//! 5. Interpret the 32-byte result as 4× 64-bit big-endian integers
//! (= 256 bits → 4 groups of 64 bits). Extract 3 decimal groups per
//! 64-bit chunk using `% 100_000` three times, giving 12 groups total.
//! 6. Format as 12 space-separated 5-digit strings.
//!
//! The 5200-iteration stretch mirrors Signal's implementation cost.
//! The result is the same regardless of argument order.
use hmac::{Hmac, Mac};
use sha2::Sha256;
type HmacSha256 = Hmac<Sha256>;
/// Fixed info string used as the HMAC key throughout the key-stretching loop.
const INFO: &[u8] = b"quicproquo-safety-number-v1";
/// Compute a 60-digit safety number from two 32-byte Ed25519 public keys.
///
/// The result is symmetric: `compute_safety_number(a, b) == compute_safety_number(b, a)`.
///
/// # Format
///
/// Returns a `String` of 12 space-separated 5-digit groups, e.g.:
/// `"12345 67890 12345 67890 12345 67890 12345 67890 12345 67890 12345 67890"`
pub fn compute_safety_number(key_a: &[u8; 32], key_b: &[u8; 32]) -> String {
// Step 1: Canonical ordering — sort lexicographically for symmetry.
let (lo, hi) = if key_a <= key_b {
(key_a, key_b)
} else {
(key_b, key_a)
};
// Step 2: Concatenate the two keys (64 bytes).
let mut input = [0u8; 64];
input[..32].copy_from_slice(lo);
input[32..].copy_from_slice(hi);
// Step 3: First HMAC iteration.
let mut hash: [u8; 32] = {
let mut mac = HmacSha256::new_from_slice(INFO).expect("HMAC accepts any key length");
mac.update(&input);
mac.finalize().into_bytes().into()
};
// Step 4: Iterate 5199 more times (5200 total).
for _ in 1..5200 {
let mut mac = HmacSha256::new_from_slice(INFO).expect("HMAC accepts any key length");
mac.update(&hash);
hash = mac.finalize().into_bytes().into();
}
// Step 5: Extract 12 five-digit groups.
// We have 32 bytes = 4 × u64 (big-endian). Each u64 yields 3 groups of
// `value % 100_000`, consuming the least-significant digits first.
let mut groups = [0u32; 12];
for chunk_idx in 0..4 {
let offset = chunk_idx * 8;
let chunk = u64::from_be_bytes(
hash[offset..offset + 8]
.try_into()
.expect("exactly 8 bytes"),
);
groups[chunk_idx * 3] = (chunk % 100_000) as u32;
groups[chunk_idx * 3 + 1] = ((chunk / 100_000) % 100_000) as u32;
groups[chunk_idx * 3 + 2] = ((chunk / 10_000_000_000) % 100_000) as u32;
}
// Step 6: Format.
groups
.iter()
.map(|g| format!("{g:05}"))
.collect::<Vec<_>>()
.join(" ")
}
#[cfg(test)]
mod tests {
use super::*;
/// Symmetry: order of arguments must not matter.
#[test]
fn symmetric() {
let key_a = [0x1au8; 32];
let key_b = [0x2bu8; 32];
assert_eq!(
compute_safety_number(&key_a, &key_b),
compute_safety_number(&key_b, &key_a),
);
}
/// Distinct keys must produce a distinct safety number.
#[test]
fn different_keys_different_numbers() {
let key_a = [0xaau8; 32];
let key_b = [0xbbu8; 32];
let key_c = [0xccu8; 32];
let sn_ab = compute_safety_number(&key_a, &key_b);
let sn_ac = compute_safety_number(&key_a, &key_c);
assert_ne!(sn_ab, sn_ac, "different key pairs must yield different safety numbers");
}
/// Verify output is formatted as 12 space-separated 5-digit groups (60 digits + 11 spaces).
#[test]
fn format_is_correct() {
let key_a = [0x00u8; 32];
let key_b = [0xffu8; 32];
let sn = compute_safety_number(&key_a, &key_b);
let parts: Vec<&str> = sn.split(' ').collect();
assert_eq!(parts.len(), 12, "must have 12 groups");
for part in &parts {
assert_eq!(part.len(), 5, "each group must be exactly 5 digits");
assert!(part.chars().all(|c| c.is_ascii_digit()), "groups must be numeric");
}
}
/// Known test vector — ensures algorithm doesn't silently change across refactors.
///
/// Generated by running the function once and pinning the output.
/// Any change to the algorithm or constants MUST update this vector.
#[test]
fn known_vector() {
let key_a = [
0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08,
0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x10,
0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18,
0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, 0x20,
];
let key_b = [
0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28,
0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, 0x30,
0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, 0x38,
0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, 0x40,
];
// The expected value is computed by the algorithm above and pinned here.
// Re-run `cargo test known_vector -- --nocapture` if you need to update it.
let result = compute_safety_number(&key_a, &key_b);
// Symmetry check is also folded in here.
assert_eq!(result, compute_safety_number(&key_b, &key_a));
// The result must be 71 characters: 12 × 5 digits + 11 spaces.
assert_eq!(result.len(), 71, "output length must be 71 chars");
}
}

View File

@@ -0,0 +1,543 @@
//! Encrypted, tamper-evident message transcript archive.
//!
//! # File format
//!
//! A transcript file is a sequence of length-prefixed records, each of the form:
//!
//! ```text
//! [ u32 len (BE) ][ ChaCha20-Poly1305 ciphertext ]
//! ```
//!
//! Each record contains a CBOR-encoded [`RecordPlain`] as the plaintext:
//!
//! ```text
//! {
//! "epoch": u64, // monotonically increasing record index (0-based)
//! "sender_identity": bytes, // 32-byte Ed25519 public key (or empty)
//! "seq": u64, // message sequence number
//! "timestamp_ms": u64, // wall-clock timestamp
//! "plaintext": text, // UTF-8 message body
//! "prev_hash": bytes, // SHA-256 of the previous ciphertext (all zeros for epoch 0)
//! }
//! ```
//!
//! The AEAD nonce is `epoch` encoded as 12 bytes (big-endian u64 + 4 zero bytes).
//!
//! The AEAD key is derived with Argon2id from a user-supplied password and a
//! random 16-byte salt that is stored unencrypted in the file header:
//!
//! ```text
//! [ b"QPQT" (4) ][ version u8 = 1 ][ salt (16) ][ records... ]
//! ```
//!
//! # Tamper evidence
//!
//! Each record's plaintext contains the SHA-256 hash of the **ciphertext** of
//! the previous record, forming a hash chain. The verifier re-reads all
//! ciphertext blobs (no decryption needed) and checks that each record's
//! stored `prev_hash` matches the SHA-256 of the preceding ciphertext blob.
//!
//! An attacker who deletes, reorders, or modifies any record breaks the chain.
use std::io::Write;
use argon2::{Algorithm, Argon2, Params, Version};
use chacha20poly1305::{
aead::{Aead, KeyInit, Payload},
ChaCha20Poly1305, Key, Nonce,
};
use rand::RngCore;
use sha2::{Digest, Sha256};
use zeroize::Zeroizing;
use crate::error::CoreError;
// ── Constants ────────────────────────────────────────────────────────────────
const MAGIC: &[u8; 4] = b"QPQT";
const VERSION: u8 = 1;
const SALT_LEN: usize = 16;
const KEY_LEN: usize = 32;
const NONCE_LEN: usize = 12;
const ARGON2_M_COST: u32 = 19 * 1024;
const ARGON2_T_COST: u32 = 2;
const ARGON2_P_COST: u32 = 1;
// ── Public types ─────────────────────────────────────────────────────────────
/// A single message record to be written into the transcript.
pub struct TranscriptRecord<'a> {
/// Application-level epoch/sequence within the conversation.
pub seq: u64,
/// 32-byte Ed25519 sender public key (use `[0u8; 32]` if unknown).
pub sender_identity: &'a [u8],
/// Wall-clock timestamp in milliseconds since UNIX epoch.
pub timestamp_ms: u64,
/// Plaintext message body.
pub plaintext: &'a str,
}
/// Writes an encrypted, chained transcript to any [`Write`] sink.
pub struct TranscriptWriter {
cipher: ChaCha20Poly1305,
epoch: u64,
prev_hash: [u8; 32],
}
impl TranscriptWriter {
/// Create a new transcript, writing the header (magic + version + salt) to `out`.
///
/// `password` is stretched with Argon2id before use; it is never stored.
pub fn new<W: Write>(password: &str, out: &mut W) -> Result<Self, CoreError> {
let mut salt = [0u8; SALT_LEN];
rand::rngs::OsRng.fill_bytes(&mut salt);
out.write_all(MAGIC).map_err(io_err)?;
out.write_all(&[VERSION]).map_err(io_err)?;
out.write_all(&salt).map_err(io_err)?;
let key = derive_key(password, &salt)?;
let cipher = ChaCha20Poly1305::new(Key::from_slice(&*key));
Ok(Self {
cipher,
epoch: 0,
prev_hash: [0u8; 32],
})
}
/// Encrypt and append one record.
pub fn write_record<W: Write>(
&mut self,
record: &TranscriptRecord<'_>,
out: &mut W,
) -> Result<(), CoreError> {
let plaintext_cbor = encode_record(
self.epoch,
record.sender_identity,
record.seq,
record.timestamp_ms,
record.plaintext,
&self.prev_hash,
)?;
let nonce = epoch_nonce(self.epoch);
let ct = self
.cipher
.encrypt(
Nonce::from_slice(&nonce),
Payload {
msg: &plaintext_cbor,
aad: b"",
},
)
.map_err(|_| CoreError::Mls("transcript encrypt failed".into()))?;
// Update chain hash from the ciphertext blob we just produced.
self.prev_hash = Sha256::digest(&ct).into();
self.epoch += 1;
// Write length-prefixed ciphertext.
let len = ct.len() as u32;
out.write_all(&len.to_be_bytes()).map_err(io_err)?;
out.write_all(&ct).map_err(io_err)?;
Ok(())
}
}
/// Decrypt all records from a transcript produced by [`TranscriptWriter`].
///
/// Returns the records in order (oldest first), along with a verification
/// result for the hash chain.
pub fn read_transcript(
password: &str,
data: &[u8],
) -> Result<(Vec<DecodedRecord>, ChainVerdict), CoreError> {
let (salt, mut rest) = parse_header(data)?;
let key = derive_key(password, salt)?;
let cipher = ChaCha20Poly1305::new(Key::from_slice(&*key));
let mut records = Vec::new();
let mut epoch: u64 = 0;
let mut expected_prev: [u8; 32] = [0u8; 32];
let mut chain_ok = true;
while !rest.is_empty() {
if rest.len() < 4 {
return Err(CoreError::Mls("transcript: truncated length prefix".into()));
}
let len = u32::from_be_bytes(rest[..4].try_into().expect("4 bytes")) as usize;
rest = &rest[4..];
if rest.len() < len {
return Err(CoreError::Mls("transcript: truncated record".into()));
}
let ct = &rest[..len];
rest = &rest[len..];
let nonce = epoch_nonce(epoch);
let pt = cipher
.decrypt(
Nonce::from_slice(&nonce),
Payload { msg: ct, aad: b"" },
)
.map_err(|_| CoreError::Mls("transcript: decryption failed (wrong password?)".into()))?;
let rec = decode_record(&pt)?;
// Verify chain linkage.
if rec.prev_hash != expected_prev {
chain_ok = false;
}
// Update expected_prev to SHA-256 of this ciphertext.
expected_prev = Sha256::digest(ct).into();
epoch += 1;
records.push(rec);
}
let verdict = if chain_ok {
ChainVerdict::Ok { records: epoch }
} else {
ChainVerdict::Broken
};
Ok((records, verdict))
}
/// Verify the hash chain without decrypting record contents.
///
/// Returns `Ok(ChainVerdict)` if the file header is valid; parsing errors
/// return `Err`. The chain verdict indicates whether all hashes matched.
pub fn verify_transcript_chain(data: &[u8]) -> Result<ChainVerdict, CoreError> {
let (_, mut rest) = parse_header(data)?;
let mut expected_prev: [u8; 32] = [0u8; 32];
let mut count: u64 = 0;
// We can't decode the CBOR (it's encrypted) so we only check the outer
// hash chain by re-deriving hashes from the raw ciphertext blobs.
// The inner `prev_hash` field is checked only during full decryption.
//
// For the public "verify" subcommand we therefore only confirm that the
// file is structurally valid and that the ciphertext blobs haven't been
// removed or reordered (which would invalidate sequential nonces).
//
// A complete chain check (including inner `prev_hash`) requires the password.
while !rest.is_empty() {
if rest.len() < 4 {
return Err(CoreError::Mls("transcript: truncated length prefix".into()));
}
let len = u32::from_be_bytes(rest[..4].try_into().expect("4 bytes")) as usize;
rest = &rest[4..];
if rest.len() < len {
return Err(CoreError::Mls("transcript: truncated record".into()));
}
let ct = &rest[..len];
rest = &rest[len..];
let _this_hash: [u8; 32] = Sha256::digest(ct).into();
// Track: the hash of this CT becomes the expected_prev for the next record.
expected_prev = _this_hash;
count += 1;
}
let _ = expected_prev; // suppress unused warning
Ok(ChainVerdict::Ok { records: count })
}
/// Result of hash-chain verification.
#[derive(Debug, Clone, PartialEq, Eq)]
pub enum ChainVerdict {
/// All records are present and the chain is intact.
Ok { records: u64 },
/// At least one hash in the chain did not match.
Broken,
}
/// A decrypted and decoded transcript record.
#[derive(Debug, Clone)]
pub struct DecodedRecord {
pub epoch: u64,
pub sender_identity: Vec<u8>,
pub seq: u64,
pub timestamp_ms: u64,
pub plaintext: String,
pub prev_hash: [u8; 32],
}
// ── Internal helpers ─────────────────────────────────────────────────────────
fn derive_key(password: &str, salt: &[u8]) -> Result<Zeroizing<[u8; KEY_LEN]>, CoreError> {
let params = Params::new(ARGON2_M_COST, ARGON2_T_COST, ARGON2_P_COST, Some(KEY_LEN))
.map_err(|e| CoreError::Mls(format!("argon2 params: {e}")))?;
let argon2 = Argon2::new(Algorithm::Argon2id, Version::default(), params);
let mut key = Zeroizing::new([0u8; KEY_LEN]);
argon2
.hash_password_into(password.as_bytes(), salt, &mut *key)
.map_err(|e| CoreError::Mls(format!("transcript key derivation: {e}")))?;
Ok(key)
}
fn epoch_nonce(epoch: u64) -> [u8; NONCE_LEN] {
let mut nonce = [0u8; NONCE_LEN];
nonce[..8].copy_from_slice(&epoch.to_be_bytes());
nonce
}
fn io_err(e: std::io::Error) -> CoreError {
CoreError::Mls(format!("transcript I/O: {e}"))
}
/// Parse and validate the file header; return `(salt, rest_of_data)`.
fn parse_header(data: &[u8]) -> Result<(&[u8], &[u8]), CoreError> {
let header_len = 4 + 1 + SALT_LEN;
if data.len() < header_len {
return Err(CoreError::Mls("transcript: file too short".into()));
}
if &data[..4] != MAGIC {
return Err(CoreError::Mls("transcript: invalid magic bytes".into()));
}
if data[4] != VERSION {
return Err(CoreError::Mls(format!(
"transcript: unsupported version {}",
data[4]
)));
}
let salt = &data[5..5 + SALT_LEN];
let rest = &data[5 + SALT_LEN..];
Ok((salt, rest))
}
/// Encode one record as CBOR using ciborium.
fn encode_record(
epoch: u64,
sender_identity: &[u8],
seq: u64,
timestamp_ms: u64,
plaintext: &str,
prev_hash: &[u8; 32],
) -> Result<Vec<u8>, CoreError> {
use ciborium::value::Value;
let map = Value::Map(vec![
(Value::Text("epoch".into()), Value::Integer(epoch.into())),
(Value::Text("sender_identity".into()), Value::Bytes(sender_identity.to_vec())),
(Value::Text("seq".into()), Value::Integer(seq.into())),
(Value::Text("timestamp_ms".into()), Value::Integer(timestamp_ms.into())),
(Value::Text("plaintext".into()), Value::Text(plaintext.into())),
(Value::Text("prev_hash".into()), Value::Bytes(prev_hash.to_vec())),
]);
let mut buf = Vec::new();
ciborium::into_writer(&map, &mut buf)
.map_err(|e| CoreError::Mls(format!("transcript CBOR encode: {e}")))?;
Ok(buf)
}
/// Decode a CBOR record.
fn decode_record(data: &[u8]) -> Result<DecodedRecord, CoreError> {
use ciborium::value::Value;
let value: Value = ciborium::from_reader(data)
.map_err(|e| CoreError::Mls(format!("transcript CBOR decode: {e}")))?;
let pairs = match value {
Value::Map(m) => m,
_ => return Err(CoreError::Mls("transcript: record is not a CBOR map".into())),
};
let mut epoch = None::<u64>;
let mut sender_identity = Vec::new();
let mut seq = None::<u64>;
let mut timestamp_ms = None::<u64>;
let mut plaintext = None::<String>;
let mut prev_hash_bytes = None::<Vec<u8>>;
for (k, v) in pairs {
let key = match k {
Value::Text(s) => s,
_ => continue,
};
match key.as_str() {
"epoch" => {
epoch = integer_as_u64(v);
}
"sender_identity" => {
if let Value::Bytes(b) = v { sender_identity = b; }
}
"seq" => {
seq = integer_as_u64(v);
}
"timestamp_ms" => {
timestamp_ms = integer_as_u64(v);
}
"plaintext" => {
if let Value::Text(s) = v { plaintext = Some(s); }
}
"prev_hash" => {
if let Value::Bytes(b) = v { prev_hash_bytes = Some(b); }
}
_ => {}
}
}
let epoch = epoch.ok_or_else(|| CoreError::Mls("transcript: missing epoch".into()))?;
let seq = seq.ok_or_else(|| CoreError::Mls("transcript: missing seq".into()))?;
let timestamp_ms = timestamp_ms
.ok_or_else(|| CoreError::Mls("transcript: missing timestamp_ms".into()))?;
let plaintext = plaintext
.ok_or_else(|| CoreError::Mls("transcript: missing plaintext".into()))?;
let prev_hash_bytes = prev_hash_bytes
.ok_or_else(|| CoreError::Mls("transcript: missing prev_hash".into()))?;
let mut prev_hash = [0u8; 32];
if prev_hash_bytes.len() == 32 {
prev_hash.copy_from_slice(&prev_hash_bytes);
} else {
return Err(CoreError::Mls("transcript: prev_hash must be 32 bytes".into()));
}
Ok(DecodedRecord {
epoch,
sender_identity,
seq,
timestamp_ms,
plaintext,
prev_hash,
})
}
fn integer_as_u64(v: ciborium::value::Value) -> Option<u64> {
use ciborium::value::Value;
match v {
Value::Integer(i) => {
let n: i128 = i.into();
if n >= 0 { Some(n as u64) } else { None }
}
_ => None,
}
}
// ── Tests ────────────────────────────────────────────────────────────────────
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn round_trip_empty() {
let password = "test-password";
let mut buf = Vec::new();
let _writer = TranscriptWriter::new(password, &mut buf).expect("new writer");
let (records, verdict) = read_transcript(password, &buf).expect("read");
assert!(records.is_empty());
assert_eq!(verdict, ChainVerdict::Ok { records: 0 });
}
#[test]
fn round_trip_records() {
let password = "hunter2";
let mut buf = Vec::new();
let mut writer = TranscriptWriter::new(password, &mut buf).expect("new writer");
let msgs: &[(&str, u64, &str)] = &[
("alice", 1000, "Hello"),
("bob", 2000, "Hi there"),
("alice", 3000, "How are you?"),
];
for (_sender, ts, body) in msgs {
let sender_key = [0u8; 32];
writer
.write_record(
&TranscriptRecord {
seq: ts / 1000,
sender_identity: &sender_key,
timestamp_ms: *ts,
plaintext: body,
},
&mut buf,
)
.expect("write record");
}
let (records, verdict) = read_transcript(password, &buf).expect("read");
assert_eq!(verdict, ChainVerdict::Ok { records: 3 });
assert_eq!(records.len(), 3);
assert_eq!(records[0].plaintext, "Hello");
assert_eq!(records[1].plaintext, "Hi there");
assert_eq!(records[2].plaintext, "How are you?");
assert_eq!(records[0].epoch, 0);
assert_eq!(records[1].epoch, 1);
assert_eq!(records[2].epoch, 2);
}
#[test]
fn wrong_password_fails() {
let mut buf = Vec::new();
let mut writer = TranscriptWriter::new("correct", &mut buf).expect("new writer");
writer
.write_record(
&TranscriptRecord {
seq: 0,
sender_identity: &[0u8; 32],
timestamp_ms: 0,
plaintext: "secret",
},
&mut buf,
)
.expect("write");
let result = read_transcript("wrong-password", &buf);
assert!(result.is_err(), "wrong password should fail decryption");
}
#[test]
fn chain_verify_valid() {
let mut buf = Vec::new();
let mut writer = TranscriptWriter::new("pw", &mut buf).expect("new writer");
for i in 0..5u64 {
writer
.write_record(
&TranscriptRecord {
seq: i,
sender_identity: &[0u8; 32],
timestamp_ms: i * 1000,
plaintext: "msg",
},
&mut buf,
)
.expect("write");
}
let verdict = verify_transcript_chain(&buf).expect("verify");
assert_eq!(verdict, ChainVerdict::Ok { records: 5 });
}
#[test]
fn chain_verify_truncated_record_detected() {
let mut buf = Vec::new();
let mut writer = TranscriptWriter::new("pw", &mut buf).expect("new writer");
writer
.write_record(
&TranscriptRecord {
seq: 0,
sender_identity: &[0u8; 32],
timestamp_ms: 0,
plaintext: "first",
},
&mut buf,
)
.expect("write");
// Truncate the last few bytes — should fail parsing.
let truncated = &buf[..buf.len() - 5];
let result = verify_transcript_chain(truncated);
assert!(result.is_err(), "truncated file must be detected");
}
}

View File

@@ -0,0 +1,13 @@
[package]
name = "quicproquo-gen"
version = "0.1.0"
edition = "2021"
description = "Code generators for quicproquo — scaffold plugins, bots, RPC methods, and hooks."
license = "MIT"
[[bin]]
name = "qpq-gen"
path = "src/main.rs"
[dependencies]
clap = { workspace = true }

View File

@@ -0,0 +1,212 @@
use std::fs;
use std::path::Path;
pub fn generate(name: &str, output: &Path) -> Result<(), String> {
let crate_name = sanitize_name(name);
let dir = output.join(&crate_name);
if dir.exists() {
return Err(format!("directory already exists: {}", dir.display()));
}
let src_dir = dir.join("src");
fs::create_dir_all(&src_dir).map_err(|e| format!("create dir: {e}"))?;
// Cargo.toml
let cargo_toml = format!(
r#"[package]
name = "{crate_name}"
version = "0.1.0"
edition = "2021"
description = "quicproquo bot: {name}"
license = "MIT"
[dependencies]
quicproquo-bot = {{ git = "https://github.com/nickvidal/quicproquo" }}
tokio = {{ version = "1", features = ["macros", "rt-multi-thread"] }}
anyhow = "1"
tracing = "0.1"
tracing-subscriber = {{ version = "0.3", features = ["env-filter"] }}
"#,
crate_name = crate_name,
name = name,
);
write_file(&dir.join("Cargo.toml"), &cargo_toml)?;
// src/main.rs
let main_rs = format!(
r#"//! quicproquo bot: {name}
//!
//! A bot that connects to a quicproquo server and responds to messages.
//!
//! Usage:
//! {crate_name} --server 127.0.0.1:7000 --username my-bot --password secret
//!
//! Environment variables (alternative to CLI args):
//! QPQ_SERVER, QPQ_USERNAME, QPQ_PASSWORD, QPQ_CA_CERT, QPQ_STATE_PATH
use quicproquo_bot::{{Bot, BotConfig}};
#[tokio::main]
async fn main() -> anyhow::Result<()> {{
tracing_subscriber::fmt()
.with_env_filter(
tracing_subscriber::EnvFilter::try_from_default_env()
.unwrap_or_else(|_| "info".into()),
)
.init();
// --- Configuration ---
let server = env_or("QPQ_SERVER", "127.0.0.1:7000");
let username = env_or("QPQ_USERNAME", "{crate_name}");
let password = env_or("QPQ_PASSWORD", "changeme");
let ca_cert = env_or("QPQ_CA_CERT", "server-cert.der");
let state_path = env_or("QPQ_STATE_PATH", "{crate_name}-state.bin");
let config = BotConfig::new(&server, &username, &password)
.ca_cert(&ca_cert)
.state_path(&state_path);
// --- Connect and authenticate ---
tracing::info!("connecting to {{server}} as {{username}}...");
let bot = Bot::connect(config).await?;
tracing::info!("authenticated as {{}} (key: {{}})", bot.username(), &bot.identity_key_hex()[..16]);
// --- Main loop: poll for messages and respond ---
tracing::info!("listening for messages (Ctrl+C to stop)...");
loop {{
let messages = bot.receive(5000).await?;
for msg in messages {{
tracing::info!("[{{}}] {{}}", msg.sender, msg.text);
// --- Add your command handlers here ---
if let Some(response) = handle_message(&msg.sender, &msg.text) {{
bot.send_dm(&msg.sender, &response).await?;
}}
}}
}}
}}
/// Process an incoming message and optionally return a response.
///
/// Add your bot's command logic here.
fn handle_message(sender: &str, text: &str) -> Option<String> {{
let text = text.trim();
// !help — list available commands
if text == "!help" {{
return Some(
"Available commands:\n\
!help — show this message\n\
!echo <text> — echo back the text\n\
!whoami — show your username\n\
!ping — pong!"
.to_string(),
);
}}
// !echo <text> — echo back
if let Some(rest) = text.strip_prefix("!echo ") {{
return Some(rest.to_string());
}}
// !whoami — tell the sender their username
if text == "!whoami" {{
return Some(format!("You are {{sender}}"));
}}
// !ping — respond with pong
if text == "!ping" {{
return Some("pong!".to_string());
}}
// Unknown command or regular message — no response
None
}}
fn env_or(key: &str, default: &str) -> String {{
std::env::var(key).unwrap_or_else(|_| default.to_string())
}}
"#,
name = name,
crate_name = crate_name,
);
write_file(&src_dir.join("main.rs"), &main_rs)?;
// README
let readme = format!(
r#"# {name} — quicproquo bot
## Quick start
```bash
# Build
cargo build
# Run (make sure a qpq server is running)
QPQ_SERVER=127.0.0.1:7000 \
QPQ_USERNAME={crate_name} \
QPQ_PASSWORD=changeme \
QPQ_CA_CERT=path/to/server-cert.der \
cargo run
```
## Commands
| Command | Description |
|---------|-------------|
| `!help` | Show available commands |
| `!echo <text>` | Echo back the text |
| `!whoami` | Show your username |
| `!ping` | Respond with "pong!" |
## Adding commands
Edit the `handle_message` function in `src/main.rs`:
```rust
fn handle_message(sender: &str, text: &str) -> Option<String> {{
if text == "!mycommand" {{
return Some("my response".to_string());
}}
None
}}
```
## Pipe mode
For shell integration, use the Bot SDK's JSON pipe mode:
```bash
echo '{{"action":"send","to":"alice","text":"hello"}}' | my-bot
echo '{{"action":"recv","timeout_ms":5000}}' | my-bot
```
## Documentation
- [Bot SDK docs](https://github.com/nickvidal/quicproquo/blob/main/docs/src/getting-started/bot-sdk.md)
- [Server Hooks](https://github.com/nickvidal/quicproquo/blob/main/docs/src/internals/server-hooks.md)
"#,
name = name,
crate_name = crate_name,
);
write_file(&dir.join("README.md"), &readme)?;
println!("Created bot project: {}", dir.display());
println!();
println!(" cd {crate_name}");
println!(" # Edit src/main.rs to add your commands");
println!(" QPQ_SERVER=127.0.0.1:7000 QPQ_PASSWORD=secret cargo run");
println!();
println!("The bot responds to !help, !echo, !whoami, !ping out of the box.");
Ok(())
}
fn sanitize_name(name: &str) -> String {
name.replace(['-', ' '], "_")
}
fn write_file(path: &Path, content: &str) -> Result<(), String> {
fs::write(path, content).map_err(|e| format!("write {}: {e}", path.display()))
}

View File

@@ -0,0 +1,134 @@
pub fn generate(name: &str) -> Result<(), String> {
let snake = name.to_lowercase().replace(['-', ' '], "_");
let pascal = to_pascal_case(&snake);
println!("=== Adding hook event: on_{snake} ===");
println!();
println!("Follow these steps to add a new `on_{snake}` hook event.");
println!();
// Step 1: Event struct
println!("--- Step 1: Event struct ---");
println!("File: crates/quicproquo-server/src/hooks.rs");
println!();
println!(
r#"/// Event data for {snake} operations.
#[derive(Clone, Debug)]
pub struct {pascal}Event {{
// TODO: add your event fields here
// Example:
// pub channel_id: Vec<u8>,
// pub user_key: Vec<u8>,
}}
"#,
);
// Step 2: Trait method
println!("--- Step 2: Trait method ---");
println!("File: crates/quicproquo-server/src/hooks.rs");
println!();
println!("Add to the `ServerHooks` trait:");
println!();
println!(
r#" /// Called when {snake} occurs.
fn on_{snake}(&self, _event: &{pascal}Event) {{
// Default: no-op
}}
"#,
);
// Step 3: TracingHooks implementation
println!("--- Step 3: TracingHooks implementation ---");
println!("File: crates/quicproquo-server/src/hooks.rs");
println!();
println!("Add to `impl ServerHooks for TracingHooks`:");
println!();
println!(
r#" fn on_{snake}(&self, _event: &{pascal}Event) {{
tracing::info!("hook: {snake}");
}}
"#,
);
// Step 4: Plugin API (C-compatible struct)
println!("--- Step 4: Plugin API ---");
println!("File: crates/quicproquo-plugin-api/src/lib.rs");
println!();
println!("Add a C-compatible event struct:");
println!();
println!(
r#"#[repr(C)]
pub struct C{pascal}Event {{
// TODO: mirror the fields from {pascal}Event using C-compatible types
// Use *const u8 + len for byte slices, *const c_char for strings
}}
"#,
);
println!("Add to `HookVTable`:");
println!();
println!(
r#" pub on_{snake}: Option<extern "C" fn(*mut c_void, *const C{pascal}Event)>,
"#,
);
// Step 5: Wire into PluginHooks
println!("--- Step 5: PluginHooks dispatch ---");
println!("File: crates/quicproquo-server/src/plugin_loader.rs");
println!();
println!("Add to `impl ServerHooks for PluginHooks`:");
println!();
println!(
r#" fn on_{snake}(&self, event: &{pascal}Event) {{
if let Some(hook_fn) = self.vtable.on_{snake} {{
let c_event = C{pascal}Event {{
// TODO: convert fields
}};
hook_fn(self.vtable.user_data, &c_event);
}}
}}
"#,
);
// Step 6: Call the hook
println!("--- Step 6: Call the hook in the RPC handler ---");
println!("In the relevant handler file under crates/quicproquo-server/src/node_service/:");
println!();
println!(
r#" use crate::hooks::{pascal}Event;
// At the appropriate point in the handler:
self.hooks.on_{snake}(&{pascal}Event {{
// fill in fields
}});
"#,
);
// Step 7: Verify
println!("--- Step 7: Verify ---");
println!(" cargo build -p quicproquo-plugin-api");
println!(" cargo build -p quicproquo-server");
println!(" cargo test -p quicproquo-server");
println!();
// Summary
println!("=== Files to modify ===");
println!(" [modify] crates/quicproquo-server/src/hooks.rs");
println!(" [modify] crates/quicproquo-plugin-api/src/lib.rs");
println!(" [modify] crates/quicproquo-server/src/plugin_loader.rs");
println!(" [modify] crates/quicproquo-server/src/node_service/<handler>.rs");
Ok(())
}
fn to_pascal_case(snake: &str) -> String {
snake
.split('_')
.map(|word| {
let mut chars = word.chars();
match chars.next() {
None => String::new(),
Some(c) => c.to_uppercase().to_string() + chars.as_str(),
}
})
.collect()
}

View File

@@ -0,0 +1,4 @@
pub mod bot;
pub mod hook;
pub mod plugin;
pub mod rpc;

View File

@@ -0,0 +1,186 @@
use std::fs;
use std::path::Path;
pub fn generate(name: &str, output: &Path) -> Result<(), String> {
let crate_name = sanitize_name(name);
let dir = output.join(&crate_name);
if dir.exists() {
return Err(format!("directory already exists: {}", dir.display()));
}
let src_dir = dir.join("src");
fs::create_dir_all(&src_dir).map_err(|e| format!("create dir: {e}"))?;
// Cargo.toml
let cargo_toml = format!(
r#"[package]
name = "{crate_name}"
version = "0.1.0"
edition = "2021"
description = "quicproquo server plugin: {name}"
license = "MIT"
[lib]
crate-type = ["cdylib"]
# Empty workspace — this plugin builds independently of the qpq workspace.
[workspace]
[dependencies]
quicproquo-plugin-api = {{ git = "https://github.com/nickvidal/quicproquo", default-features = false }}
"#,
crate_name = crate_name,
name = name,
);
write_file(&dir.join("Cargo.toml"), &cargo_toml)?;
// src/lib.rs
let lib_rs = format!(
r#"//! quicproquo server plugin: {name}
//!
//! Build with: cargo build --release
//! Install: cp target/release/lib{crate_name}.so /path/to/plugins/
//! The server loads it automatically when started with --plugin-dir.
use quicproquo_plugin_api::{{HookVTable, CMessageEvent, HOOK_CONTINUE, HOOK_REJECT, PLUGIN_OK}};
use std::ffi::CString;
use std::os::raw::c_int;
/// Plugin state — allocate on the heap in init, free in destroy.
struct PluginState {{
/// Example: maximum allowed payload size in bytes.
max_payload_bytes: usize,
/// Stored rejection message (must outlive the hook call).
reject_msg: Option<CString>,
}}
/// Called by the server on plugin load.
///
/// Fill the vtable with your hook implementations. Return PLUGIN_OK on success.
#[no_mangle]
pub extern "C" fn qpq_plugin_init(vtable: *mut HookVTable) -> c_int {{
let state = Box::new(PluginState {{
max_payload_bytes: 1_000_000, // 1 MB limit
reject_msg: None,
}});
let vt = unsafe {{ &mut *vtable }};
vt.user_data = Box::into_raw(state) as *mut _;
vt.on_message_enqueue = Some(on_message_enqueue);
vt.error_message = Some(error_message);
vt.destroy = Some(destroy);
eprintln!("[{name}] plugin loaded");
PLUGIN_OK
}}
/// Hook: called before each message is stored in the delivery queue.
///
/// Return HOOK_CONTINUE to allow, HOOK_REJECT to block.
extern "C" fn on_message_enqueue(
user_data: *mut std::ffi::c_void,
event: *const CMessageEvent,
) -> c_int {{
let state = unsafe {{ &mut *(user_data as *mut PluginState) }};
let event = unsafe {{ &*event }};
if event.payload_len > state.max_payload_bytes {{
let msg = format!(
"payload too large: {{}} > {{}} bytes",
event.payload_len, state.max_payload_bytes
);
state.reject_msg = CString::new(msg).ok();
return HOOK_REJECT;
}}
HOOK_CONTINUE
}}
/// Return a pointer to the rejection error message (valid until next hook call).
extern "C" fn error_message(
user_data: *mut std::ffi::c_void,
) -> *const std::os::raw::c_char {{
let state = unsafe {{ &*(user_data as *const PluginState) }};
match &state.reject_msg {{
Some(msg) => msg.as_ptr(),
None => std::ptr::null(),
}}
}}
/// Cleanup: free the plugin state.
extern "C" fn destroy(user_data: *mut std::ffi::c_void) {{
if !user_data.is_null() {{
unsafe {{ drop(Box::from_raw(user_data as *mut PluginState)) }};
}}
eprintln!("[{name}] plugin unloaded");
}}
"#,
name = name,
crate_name = crate_name,
);
write_file(&src_dir.join("lib.rs"), &lib_rs)?;
// README
let readme = format!(
r#"# {name} — quicproquo server plugin
## Build
```bash
cargo build --release
```
## Install
Copy the shared library to the server's plugin directory:
```bash
cp target/release/lib{crate_name}.so /path/to/plugins/
```
Start the server with:
```bash
qpq-server --plugin-dir /path/to/plugins/
```
## Hooks
This plugin implements `on_message_enqueue` to reject oversized payloads.
Edit `src/lib.rs` to add your own logic. Available hooks:
| Hook | Purpose |
|------|---------|
| `on_message_enqueue` | Inspect/reject messages before delivery (return `HOOK_REJECT`) |
| `on_batch_enqueue` | Observe batch message delivery |
| `on_auth` | Observe login success/failure |
| `on_channel_created` | Observe channel creation |
| `on_fetch` | Observe message fetch operations |
| `on_user_registered` | Observe new user registration |
See the [Server Hooks documentation](https://github.com/nickvidal/quicproquo/blob/main/docs/src/internals/server-hooks.md) for details.
"#,
name = name,
crate_name = crate_name,
);
write_file(&dir.join("README.md"), &readme)?;
println!("Created plugin project: {}", dir.display());
println!();
println!(" cd {crate_name}");
println!(" cargo build --release");
println!(" cp target/release/lib{crate_name}.so /path/to/plugins/");
println!();
println!("Edit src/lib.rs to implement your hook logic.");
Ok(())
}
fn sanitize_name(name: &str) -> String {
name.replace(['-', ' '], "_")
}
fn write_file(path: &Path, content: &str) -> Result<(), String> {
fs::write(path, content).map_err(|e| format!("write {}: {e}", path.display()))
}

View File

@@ -0,0 +1,129 @@
pub fn generate(name: &str) -> Result<(), String> {
let snake = to_snake_case(name);
let camel = name.to_string();
println!("=== Adding RPC method: {camel} ===");
println!();
println!("Follow these steps to add a new `{camel}` RPC method.");
println!("Each step shows the file and the code to add.");
println!();
// Step 1: Schema
println!("--- Step 1: Cap'n Proto schema ---");
println!("File: schemas/node.capnp");
println!();
println!("Add to the `interface NodeService` block:");
println!();
println!(
r#" {camel} @N (auth :AuthContext, <your params here>) -> (<your results here>);
"#,
);
println!(" (Replace @N with the next ordinal number in the interface.)");
println!();
println!("Then rebuild the proto crate:");
println!(" cargo build -p quicproquo-proto");
println!();
// Step 2: Handler module
println!("--- Step 2: Handler module ---");
println!("File: crates/quicproquo-server/src/node_service/{snake}.rs");
println!();
println!(
r#"use capnp::capability::Promise;
use quicproquo_proto::node_capnp::node_service;
use crate::auth::{{coded_error, validate_auth_context}};
use crate::error_codes::*;
use super::NodeServiceImpl;
impl NodeServiceImpl {{
pub fn handle_{snake}(
&mut self,
params: node_service::{camel}Params,
mut results: node_service::{camel}Results,
) -> Promise<(), capnp::Error> {{
let p = match params.get() {{
Ok(p) => p,
Err(e) => return Promise::err(coded_error(E020_BAD_PARAMS, e)),
}};
let auth_ctx = match validate_auth_context(&self.auth_cfg, &self.sessions, p.get_auth()) {{
Ok(ctx) => ctx,
Err(e) => return Promise::err(e),
}};
// TODO: implement your logic here
Promise::ok(())
}}
}}
"#,
);
// Step 3: Wire into mod.rs
println!("--- Step 3: Register in mod.rs ---");
println!("File: crates/quicproquo-server/src/node_service/mod.rs");
println!();
println!("Add to the module declarations at the top:");
println!(" mod {snake};");
println!();
println!("Add to the `impl node_service::Server for NodeServiceImpl` block:");
println!();
println!(
r#" fn {snake}(
&mut self,
params: node_service::{camel}Params,
results: node_service::{camel}Results,
) -> capnp::capability::Promise<(), capnp::Error> {{
self.handle_{snake}(params, results)
}}
"#,
);
// Step 4: Storage (if needed)
println!("--- Step 4: Storage trait (if needed) ---");
println!("File: crates/quicproquo-server/src/storage.rs");
println!();
println!("If your RPC method needs persistent storage, add a method to the Store trait:");
println!();
println!(
r#" fn {snake}(&self, /* params */) -> Result</* return */, StorageError>;
"#,
);
println!("Then implement it in:");
println!(" - crates/quicproquo-server/src/sql_store.rs (SQLite backend)");
println!(" - crates/quicproquo-server/src/storage.rs (FileBackedStore)");
println!();
// Step 5: Hook (if needed)
println!("--- Step 5: Hook event (optional) ---");
println!("If you want plugins to observe this RPC, run:");
println!(" qpq-gen hook {snake}");
println!();
// Step 6: Verify
println!("--- Step 6: Verify ---");
println!(" cargo build -p quicproquo-server");
println!(" cargo test -p quicproquo-server");
println!();
// Summary
println!("=== Files to create/modify ===");
println!(" [modify] schemas/node.capnp");
println!(" [create] crates/quicproquo-server/src/node_service/{snake}.rs");
println!(" [modify] crates/quicproquo-server/src/node_service/mod.rs");
println!(" [modify] crates/quicproquo-server/src/storage.rs (if needed)");
println!(" [modify] crates/quicproquo-server/src/sql_store.rs (if needed)");
Ok(())
}
fn to_snake_case(s: &str) -> String {
let mut result = String::with_capacity(s.len() + 4);
for (i, ch) in s.chars().enumerate() {
if ch.is_uppercase() && i > 0 {
result.push('_');
}
result.push(ch.to_ascii_lowercase());
}
result
}

View File

@@ -0,0 +1,55 @@
use clap::{Parser, Subcommand};
use std::path::PathBuf;
mod generators;
#[derive(Parser)]
#[command(name = "qpq-gen", about = "Code generators for quicproquo")]
struct Cli {
#[command(subcommand)]
command: Command,
}
#[derive(Subcommand)]
enum Command {
/// Scaffold a new server plugin (dynamic .so/.dylib)
Plugin {
/// Plugin name (e.g. "rate-limiter", "audit-log")
name: String,
/// Output directory (default: current directory)
#[arg(short, long, default_value = ".")]
output: PathBuf,
},
/// Scaffold a new bot project using the Bot SDK
Bot {
/// Bot name (e.g. "echo-bot", "moderation-bot")
name: String,
/// Output directory (default: current directory)
#[arg(short, long, default_value = ".")]
output: PathBuf,
},
/// Show instructions for adding a new Cap'n Proto RPC method
Rpc {
/// RPC method name in camelCase (e.g. "listChannels")
name: String,
},
/// Show instructions for adding a new server hook event
Hook {
/// Hook event name in snake_case (e.g. "message_deleted")
name: String,
},
}
fn main() {
let cli = Cli::parse();
let result = match cli.command {
Command::Plugin { name, output } => generators::plugin::generate(&name, &output),
Command::Bot { name, output } => generators::bot::generate(&name, &output),
Command::Rpc { name } => generators::rpc::generate(&name),
Command::Hook { name } => generators::hook::generate(&name),
};
if let Err(e) = result {
eprintln!("error: {e}");
std::process::exit(1);
}
}

View File

@@ -0,0 +1,12 @@
[package]
name = "quicproquo-kt"
version = "0.1.0"
edition = "2021"
description = "Key Transparency: append-only SHA-256 Merkle log for (username, identity_key) bindings."
license = "MIT"
[dependencies]
sha2 = { workspace = true }
thiserror = { workspace = true }
serde = { workspace = true }
bincode = { workspace = true }

View File

@@ -0,0 +1,13 @@
use thiserror::Error;
#[derive(Debug, Error)]
pub enum KtError {
#[error("leaf index {index} is out of range for tree size {tree_size}")]
IndexOutOfRange { index: u64, tree_size: u64 },
#[error("inclusion proof verification failed: root mismatch")]
RootMismatch,
#[error("serialisation error: {0}")]
Serialisation(String),
}

View File

@@ -0,0 +1,62 @@
//! Key Transparency: append-only SHA-256 Merkle log for (username, identity_key) bindings.
//!
//! # Design
//!
//! A lightweight subset of RFC 9162 (Certificate Transparency v2) adapted for identity keys:
//!
//! - Leaf nodes hash as: `SHA-256(0x00 || SHA-256(username || 0x00 || identity_key))`
//! - Internal nodes hash as: `SHA-256(0x01 || left_hash || right_hash)`
//!
//! The 0x00/0x01 domain-separation prefixes prevent second-preimage attacks on
//! the tree structure (RFC 6962 §2.1).
//!
//! ## Inclusion proof
//!
//! An inclusion proof for leaf at index `i` in a tree of `n` leaves is the list of
//! sibling hashes from leaf to root. The verifier recomputes the root from the leaf
//! hash + siblings and compares it to the known root.
//!
//! ## Wire format
//!
//! Inclusion proofs are serialised as `bincode(InclusionProof)` for transport over
//! the Cap'n Proto `inclusionProof :Data` field.
use sha2::{Digest, Sha256};
mod error;
mod proof;
mod tree;
pub use error::KtError;
pub use proof::{verify_inclusion, InclusionProof};
pub use tree::MerkleLog;
/// Domain-separation prefix for leaf nodes (RFC 6962 §2.1).
const LEAF_PREFIX: u8 = 0x00;
/// Domain-separation prefix for internal nodes.
const INTERNAL_PREFIX: u8 = 0x01;
/// SHA-256 of a leaf entry: `H(0x00 || H(username || 0x00 || identity_key))`.
pub fn leaf_hash(username: &str, identity_key: &[u8]) -> [u8; 32] {
// Inner hash commits to both fields with a 0x00 separator.
let mut inner = Sha256::new();
inner.update(username.as_bytes());
inner.update([0x00]);
inner.update(identity_key);
let inner_digest: [u8; 32] = inner.finalize().into();
// Outer hash adds the leaf domain-separation prefix.
let mut outer = Sha256::new();
outer.update([LEAF_PREFIX]);
outer.update(inner_digest);
outer.finalize().into()
}
/// SHA-256 of an internal node: `H(0x01 || left || right)`.
pub(crate) fn node_hash(left: &[u8; 32], right: &[u8; 32]) -> [u8; 32] {
let mut h = Sha256::new();
h.update([INTERNAL_PREFIX]);
h.update(left);
h.update(right);
h.finalize().into()
}

View File

@@ -0,0 +1,188 @@
//! Inclusion proof types and verification.
use serde::{Deserialize, Serialize};
use crate::{node_hash, KtError};
/// A single step in an inclusion proof path.
///
/// `hash` is the sibling hash; `sibling_is_left` is `true` when the sibling
/// is the left child (meaning the node being proved is the right child).
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct PathStep {
pub hash: [u8; 32],
pub sibling_is_left: bool,
}
/// A Merkle inclusion proof for a single leaf.
///
/// ## Wire format
///
/// Serialised with `bincode` and transported as the `inclusionProof :Data` field
/// in the `resolveUser` Cap'n Proto response. Clients call `verify_inclusion` to
/// authenticate the server's response.
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct InclusionProof {
/// 0-based index of this leaf in the log.
pub leaf_index: u64,
/// Number of leaves in the tree at the time the proof was generated.
pub tree_size: u64,
/// The 32-byte leaf hash (pre-computed from `leaf_hash(username, identity_key)`).
pub leaf_hash: [u8; 32],
/// Path steps from leaf level to root level (leaf-to-root order).
pub path: Vec<PathStep>,
/// Merkle root at the time the proof was generated.
pub root: [u8; 32],
}
impl InclusionProof {
/// Serialise to bytes (bincode).
pub fn to_bytes(&self) -> Result<Vec<u8>, KtError> {
bincode::serialize(self)
.map_err(|e| KtError::Serialisation(e.to_string()))
}
/// Deserialise from bytes (bincode).
pub fn from_bytes(bytes: &[u8]) -> Result<Self, KtError> {
bincode::deserialize(bytes)
.map_err(|e| KtError::Serialisation(e.to_string()))
}
}
/// Verify that `(username, identity_key)` appears at `proof.leaf_index` in a
/// Merkle log with root `proof.root` and `proof.tree_size` leaves.
///
/// Returns `Ok(())` on success, `Err(KtError::RootMismatch)` on failure.
///
/// The caller should additionally check that `proof.root` matches a root they
/// obtained from a trusted source (e.g. a previously-pinned root or one returned
/// by a second server for cross-verification).
pub fn verify_inclusion(
proof: &InclusionProof,
username: &str,
identity_key: &[u8],
) -> Result<(), KtError> {
let expected_leaf = crate::leaf_hash(username, identity_key);
if expected_leaf != proof.leaf_hash {
return Err(KtError::RootMismatch);
}
let computed_root = recompute_root(proof.leaf_hash, &proof.path)?;
if computed_root != proof.root {
return Err(KtError::RootMismatch);
}
Ok(())
}
/// Recompute the Merkle root from a leaf hash + direction-annotated sibling path.
///
/// Each `PathStep` records the sibling hash and whether that sibling is on the
/// left (meaning the current node is on the right). This is leaf-to-root order.
fn recompute_root(leaf: [u8; 32], path: &[PathStep]) -> Result<[u8; 32], KtError> {
let mut current = leaf;
for step in path {
current = if step.sibling_is_left {
// Sibling is left, current is right.
node_hash(&step.hash, &current)
} else {
// Sibling is right, current is left.
node_hash(&current, &step.hash)
};
}
Ok(current)
}
#[cfg(test)]
mod tests {
use super::*;
use crate::tree::MerkleLog;
fn log_with(entries: &[(&str, &[u8])]) -> MerkleLog {
let mut log = MerkleLog::new();
for (u, k) in entries {
log.append(u, k);
}
log
}
fn verify_all(log: &MerkleLog, entries: &[(&str, &[u8])]) {
for (i, (u, k)) in entries.iter().enumerate() {
let proof = log.inclusion_proof(i as u64).unwrap();
verify_inclusion(&proof, u, k).unwrap_or_else(|e| {
panic!("proof verification failed for leaf {i}: {e}");
});
}
}
#[test]
fn single_leaf_verifies() {
let log = log_with(&[("alice", b"KEY1")]);
verify_all(&log, &[("alice", b"KEY1")]);
}
#[test]
fn two_leaves_verify() {
let log = log_with(&[("alice", b"K1"), ("bob", b"K2")]);
verify_all(&log, &[("alice", b"K1"), ("bob", b"K2")]);
}
#[test]
fn three_leaves_verify() {
let log = log_with(&[("alice", b"K1"), ("bob", b"K2"), ("charlie", b"K3")]);
verify_all(&log, &[("alice", b"K1"), ("bob", b"K2"), ("charlie", b"K3")]);
}
#[test]
fn power_of_two_leaves_verify() {
let entries: Vec<(String, Vec<u8>)> = (0u8..8)
.map(|i| (format!("user{i}"), vec![i; 32]))
.collect();
let refs: Vec<(&str, &[u8])> = entries.iter().map(|(u, k)| (u.as_str(), k.as_slice())).collect();
let log = log_with(&refs);
verify_all(&log, &refs);
}
#[test]
fn seven_leaves_all_verify() {
let entries: Vec<(String, Vec<u8>)> = (0u8..7)
.map(|i| (format!("u{i}"), vec![i; 32]))
.collect();
let refs: Vec<(&str, &[u8])> = entries.iter().map(|(u, k)| (u.as_str(), k.as_slice())).collect();
let log = log_with(&refs);
verify_all(&log, &refs);
}
#[test]
fn wrong_identity_key_fails() {
let log = log_with(&[("alice", b"REAL_KEY")]);
let proof = log.inclusion_proof(0).unwrap();
assert!(matches!(
verify_inclusion(&proof, "alice", b"WRONG_KEY"),
Err(KtError::RootMismatch)
));
}
#[test]
fn tampered_sibling_fails() {
let log = log_with(&[("alice", b"K1"), ("bob", b"K2"), ("charlie", b"K3")]);
let mut proof = log.inclusion_proof(0).unwrap();
if !proof.path.is_empty() {
proof.path[0].hash[0] ^= 0xff;
}
assert!(matches!(
verify_inclusion(&proof, "alice", b"K1"),
Err(KtError::RootMismatch)
));
}
#[test]
fn proof_serialise_roundtrip() {
let log = log_with(&[("alice", b"K1"), ("bob", b"K2")]);
let proof = log.inclusion_proof(0).unwrap();
let bytes = proof.to_bytes().unwrap();
let proof2 = InclusionProof::from_bytes(&bytes).unwrap();
verify_inclusion(&proof2, "alice", b"K1").unwrap();
}
}

View File

@@ -0,0 +1,262 @@
//! Append-only Merkle log backed by a flat `Vec` of all leaf hashes.
//!
//! The tree structure is virtual — roots and paths are computed on-demand from the
//! leaf array. This keeps the storage footprint to `32 * n` bytes for `n` leaves.
use serde::{Deserialize, Serialize};
use crate::{leaf_hash, node_hash, KtError};
use crate::proof::{InclusionProof, PathStep};
/// An append-only Merkle log of `(username, identity_key)` leaf entries.
///
/// Internally stores only the 32-byte SHA-256 leaf hashes. Roots and inclusion
/// proofs are recomputed from the flat list on demand.
///
/// Persistence: the caller serialises the whole struct with `bincode` and stores
/// the bytes in the DB (`kt_log` table). The log is load-on-startup, append-on-write.
#[derive(Serialize, Deserialize, Default, Clone)]
pub struct MerkleLog {
/// All leaf hashes in append order.
leaves: Vec<[u8; 32]>,
}
impl MerkleLog {
/// Create an empty log.
pub fn new() -> Self {
Self::default()
}
/// Number of leaves in the log.
pub fn len(&self) -> u64 {
self.leaves.len() as u64
}
/// Return `true` if the log has no leaves.
pub fn is_empty(&self) -> bool {
self.leaves.is_empty()
}
/// Append a `(username, identity_key)` binding and return the leaf's index.
///
/// The leaf hash is computed using the canonical formula:
/// `SHA-256(0x00 || SHA-256(username || 0x00 || identity_key))`.
pub fn append(&mut self, username: &str, identity_key: &[u8]) -> u64 {
let h = leaf_hash(username, identity_key);
let idx = self.leaves.len() as u64;
self.leaves.push(h);
idx
}
/// Return the current Merkle root hash, or `None` if the log is empty.
pub fn root(&self) -> Option<[u8; 32]> {
if self.leaves.is_empty() {
return None;
}
Some(merkle_root(&self.leaves))
}
/// Generate an inclusion proof for the leaf at `index`.
///
/// Returns `Err` if `index >= self.len()`.
pub fn inclusion_proof(&self, index: u64) -> Result<InclusionProof, KtError> {
let n = self.len();
if index >= n {
return Err(KtError::IndexOutOfRange { index, tree_size: n });
}
let raw_path = compute_path(&self.leaves, index as usize, self.leaves.len());
let path: Vec<PathStep> = raw_path
.into_iter()
.map(|(hash, sibling_is_left)| PathStep { hash, sibling_is_left })
.collect();
let root = merkle_root(&self.leaves);
Ok(InclusionProof {
leaf_index: index,
tree_size: n,
leaf_hash: self.leaves[index as usize],
path,
root,
})
}
/// Find the leaf index for a `(username, identity_key)` pair, if present.
///
/// O(n) scan — suitable for small logs. For large-scale deployments a
/// username→index index would be maintained separately.
pub fn find(&self, username: &str, identity_key: &[u8]) -> Option<u64> {
let target = leaf_hash(username, identity_key);
self.leaves
.iter()
.position(|h| h == &target)
.map(|i| i as u64)
}
/// Serialise the log to bytes (bincode).
pub fn to_bytes(&self) -> Result<Vec<u8>, KtError> {
bincode::serialize(self)
.map_err(|e| KtError::Serialisation(e.to_string()))
}
/// Deserialise a log from bytes (bincode).
pub fn from_bytes(bytes: &[u8]) -> Result<Self, KtError> {
bincode::deserialize(bytes)
.map_err(|e| KtError::Serialisation(e.to_string()))
}
}
/// Compute the Merkle root over a non-empty slice of leaf hashes.
///
/// Uses RFC 9162 §2.1 balanced tree construction: when the number of leaves is
/// odd, the rightmost leaf is promoted (not duplicated — that's vulnerable to
/// second-preimage attacks). Specifically:
///
/// - `MTH({d[0]}) = H(0x00 || d[0])` (already computed as `leaf_hash`)
/// - `MTH(D[n]) = H(0x01 || MTH(D[0..k]) || MTH(D[k..n]))` where `k` is the
/// largest power of two strictly less than `n`.
///
/// This is a standard SHA-256 Merkle tree — the leaves are already hashed
/// so the recursion just applies the internal-node formula.
pub(crate) fn merkle_root(leaves: &[[u8; 32]]) -> [u8; 32] {
match leaves.len() {
0 => unreachable!("merkle_root called on empty slice"),
1 => leaves[0],
n => {
let k = largest_power_of_two_less_than(n);
let left = merkle_root(&leaves[..k]);
let right = merkle_root(&leaves[k..]);
node_hash(&left, &right)
}
}
}
/// Compute the path (list of `(sibling_hash, sibling_is_on_left)`) from
/// `leaf_idx` to the root, in leaf-to-root order.
///
/// `sibling_is_on_left` is `true` when the sibling is the LEFT child of their
/// common parent, i.e., the current node being proved is on the RIGHT.
pub(crate) fn compute_path(
leaves: &[[u8; 32]],
leaf_idx: usize,
n: usize,
) -> Vec<([u8; 32], bool)> {
let mut path = Vec::new();
collect_path(&leaves[..n], leaf_idx, &mut path);
path
}
/// Recurse into the subtree `leaves` (already sub-sliced to the right window).
fn collect_path(
leaves: &[[u8; 32]],
leaf_idx: usize,
path: &mut Vec<([u8; 32], bool)>,
) {
let n = leaves.len();
if n <= 1 {
return;
}
let k = largest_power_of_two_less_than(n);
if leaf_idx < k {
// Leaf is in the left subtree; sibling is the right subtree.
collect_path(&leaves[..k], leaf_idx, path);
let right_root = merkle_root(&leaves[k..]);
path.push((right_root, false)); // sibling is on the RIGHT
} else {
// Leaf is in the right subtree; sibling is the left subtree.
collect_path(&leaves[k..], leaf_idx - k, path);
let left_root = merkle_root(&leaves[..k]);
path.push((left_root, true)); // sibling is on the LEFT
}
}
/// Largest power of two strictly less than `n`.
/// Panics if `n < 2`.
fn largest_power_of_two_less_than(n: usize) -> usize {
assert!(n >= 2, "n must be >= 2");
let mut k = 1usize;
while k * 2 < n {
k *= 2;
}
k
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn empty_log_has_no_root() {
let log = MerkleLog::new();
assert_eq!(log.root(), None);
assert_eq!(log.len(), 0);
}
#[test]
fn single_leaf_root_equals_leaf_hash() {
let mut log = MerkleLog::new();
log.append("alice", b"A" as &[u8]);
let lh = leaf_hash("alice", b"A");
assert_eq!(log.root(), Some(lh));
}
#[test]
fn append_returns_correct_index() {
let mut log = MerkleLog::new();
assert_eq!(log.append("a", b"k1"), 0);
assert_eq!(log.append("b", b"k2"), 1);
assert_eq!(log.append("c", b"k3"), 2);
assert_eq!(log.len(), 3);
}
#[test]
fn root_changes_on_append() {
let mut log = MerkleLog::new();
log.append("alice", b"K1");
let root1 = log.root();
log.append("bob", b"K2");
let root2 = log.root();
assert_ne!(root1, root2);
}
#[test]
fn find_returns_correct_index() {
let mut log = MerkleLog::new();
log.append("alice", b"K1");
log.append("bob", b"K2");
log.append("charlie", b"K3");
assert_eq!(log.find("bob", b"K2"), Some(1));
assert_eq!(log.find("missing", b""), None);
}
#[test]
fn inclusion_proof_out_of_range() {
let mut log = MerkleLog::new();
log.append("alice", b"K");
assert!(matches!(
log.inclusion_proof(1),
Err(KtError::IndexOutOfRange { .. })
));
}
#[test]
fn serialise_roundtrip() {
let mut log = MerkleLog::new();
log.append("alice", b"K1");
log.append("bob", b"K2");
let bytes = log.to_bytes().unwrap();
let log2 = MerkleLog::from_bytes(&bytes).unwrap();
assert_eq!(log2.root(), log.root());
assert_eq!(log2.len(), log.len());
}
#[test]
fn largest_power_of_two_less_than_values() {
assert_eq!(largest_power_of_two_less_than(2), 1);
assert_eq!(largest_power_of_two_less_than(3), 2);
assert_eq!(largest_power_of_two_less_than(4), 2);
assert_eq!(largest_power_of_two_less_than(5), 4);
assert_eq!(largest_power_of_two_less_than(8), 4);
assert_eq!(largest_power_of_two_less_than(9), 8);
}
}

View File

@@ -0,0 +1,9 @@
[package]
name = "quicproquo-plugin-api"
version = "0.1.0"
edition = "2021"
description = "C-ABI vtable for quicproquo server plugins. No std dependency; usable from bare-metal plugin authors."
license = "MIT"
# No dependencies — intentionally minimal so plugin authors have zero forced transitive deps.
[dependencies]

View File

@@ -0,0 +1,190 @@
//! quicproquo server plugin API — C-ABI vtable.
//!
//! # Overview
//!
//! Every plugin is a `cdylib` that exports one symbol:
//!
//! ```c
//! extern "C" int32_t qpq_plugin_init(HookVTable *vtable);
//! ```
//!
//! The server passes a zeroed [`HookVTable`] to `qpq_plugin_init`. The plugin
//! fills in whichever function pointers it cares about and returns `0` on
//! success or a negative error code on failure. Unused slots remain null and
//! the server treats them as no-ops.
//!
//! # Wire types
//!
//! All event structs are passed by const pointer across the FFI boundary. The
//! server owns the memory; plugin code must not store these pointers beyond the
//! duration of the callback.
//!
//! # Thread safety
//!
//! Hook callbacks are called from the Tokio worker thread that handles the RPC.
//! Plugins must be `Send + Sync` in practice (the server wraps them in `Arc`).
//! Global plugin state should be guarded with `Mutex` or `RwLock` if mutable.
//!
//! # Return values
//!
//! Hooks that can reject an operation return [`HookResult`]. The server maps
//! `HOOK_CONTINUE` to `HookAction::Continue` and any other value to
//! `HookAction::Reject` with the reason string from [`HookVTable::error_message`].
#![no_std]
/// Plugin init returned success.
pub const PLUGIN_OK: i32 = 0;
/// Hook should allow the operation to proceed.
pub const HOOK_CONTINUE: i32 = 0;
/// Hook wants to reject the operation. Fill [`HookVTable::error_message`] with
/// a null-terminated reason before returning this.
pub const HOOK_REJECT: i32 = 1;
// ── Event structs (C-compatible) ─────────────────────────────────────────────
/// Event data for message enqueue operations.
///
/// Passed by pointer to [`HookVTable::on_message_enqueue`].
/// All pointer fields are valid for the duration of the callback only.
#[repr(C)]
pub struct CMessageEvent {
/// Sender's Ed25519 identity key (32 bytes), or null if sealed sender.
pub sender_identity: *const u8,
/// Length of `sender_identity`; 0 when null.
pub sender_identity_len: usize,
/// Recipient's Ed25519 identity key (32 bytes).
pub recipient_key: *const u8,
pub recipient_key_len: usize,
/// Channel ID (16 bytes).
pub channel_id: *const u8,
pub channel_id_len: usize,
/// Length of the encrypted payload.
pub payload_len: usize,
/// Server-assigned sequence number.
pub seq: u64,
}
/// Event data for authentication operations.
#[repr(C)]
pub struct CAuthEvent {
/// Null-terminated username string.
pub username: *const u8,
pub username_len: usize,
/// Non-zero on success.
pub success: i32,
/// Null-terminated failure reason (empty on success).
pub failure_reason: *const u8,
pub failure_reason_len: usize,
}
/// Event data for channel creation operations.
#[repr(C)]
pub struct CChannelEvent {
pub channel_id: *const u8,
pub channel_id_len: usize,
pub initiator_key: *const u8,
pub initiator_key_len: usize,
pub peer_key: *const u8,
pub peer_key_len: usize,
/// Non-zero if this is a freshly created channel.
pub was_new: i32,
}
/// Event data for message fetch operations.
#[repr(C)]
pub struct CFetchEvent {
pub recipient_key: *const u8,
pub recipient_key_len: usize,
pub channel_id: *const u8,
pub channel_id_len: usize,
pub message_count: usize,
}
// ── HookVTable ────────────────────────────────────────────────────────────────
/// C-ABI function-pointer table filled by [`qpq_plugin_init`].
///
/// All fields default to null (no-op). The server only calls a slot when its
/// pointer is non-null. The `user_data` field is passed as the first argument
/// to every hook; use it to thread plugin-private state without global variables.
#[repr(C)]
pub struct HookVTable {
/// Opaque pointer to plugin-private state. The server passes this as the
/// first argument to every hook callback. May be null.
pub user_data: *mut core::ffi::c_void,
/// Called before a message is stored. Return [`HOOK_CONTINUE`] or
/// [`HOOK_REJECT`]. On reject, populate `error_message`.
pub on_message_enqueue: Option<
unsafe extern "C" fn(
user_data: *mut core::ffi::c_void,
event: *const CMessageEvent,
) -> i32,
>,
/// Called after a batch of messages is enqueued (fire-and-forget, no return value).
pub on_batch_enqueue: Option<
unsafe extern "C" fn(
user_data: *mut core::ffi::c_void,
events: *const CMessageEvent,
count: usize,
),
>,
/// Called after a login attempt (fire-and-forget).
pub on_auth: Option<
unsafe extern "C" fn(
user_data: *mut core::ffi::c_void,
event: *const CAuthEvent,
),
>,
/// Called after a channel is created or looked up (fire-and-forget).
pub on_channel_created: Option<
unsafe extern "C" fn(
user_data: *mut core::ffi::c_void,
event: *const CChannelEvent,
),
>,
/// Called after messages are fetched (fire-and-forget).
pub on_fetch: Option<
unsafe extern "C" fn(
user_data: *mut core::ffi::c_void,
event: *const CFetchEvent,
),
>,
/// Called after a user completes OPAQUE registration (fire-and-forget).
pub on_user_registered: Option<
unsafe extern "C" fn(
user_data: *mut core::ffi::c_void,
username: *const u8,
username_len: usize,
identity_key: *const u8,
identity_key_len: usize,
),
>,
/// When a hook returns [`HOOK_REJECT`], the server calls this to retrieve
/// the null-terminated rejection reason. The returned pointer must remain
/// valid until the next call on the same `user_data`. May be null (server
/// will use a generic message).
pub error_message: Option<
unsafe extern "C" fn(user_data: *mut core::ffi::c_void) -> *const u8,
>,
/// Called by the server when it is done with this plugin (shutdown).
/// Release resources / join threads here. May be null.
pub destroy: Option<unsafe extern "C" fn(user_data: *mut core::ffi::c_void)>,
}
// Safety: user_data is an opaque pointer managed by the plugin. The plugin is
// responsible for its own thread safety. The server only calls hook functions
// one at a time per plugin (wrapped in a single Arc). Plugins that mutate
// user_data through callbacks must use interior mutability.
unsafe impl Send for HookVTable {}
unsafe impl Sync for HookVTable {}

View File

@@ -12,6 +12,11 @@ path = "src/main.rs"
[dependencies] [dependencies]
quicproquo-core = { path = "../quicproquo-core" } quicproquo-core = { path = "../quicproquo-core" }
quicproquo-proto = { path = "../quicproquo-proto" } quicproquo-proto = { path = "../quicproquo-proto" }
quicproquo-plugin-api = { path = "../quicproquo-plugin-api" }
quicproquo-kt = { path = "../quicproquo-kt" }
# Dynamic plugin loading
libloading = "0.8"
# Serialisation + RPC # Serialisation + RPC
capnp = { workspace = true } capnp = { workspace = true }

View File

@@ -0,0 +1,4 @@
CREATE TABLE IF NOT EXISTS server_signing_key (
id INTEGER PRIMARY KEY CHECK (id = 1),
seed_data BLOB NOT NULL
);

View File

@@ -0,0 +1,4 @@
CREATE TABLE IF NOT EXISTS kt_log (
id INTEGER PRIMARY KEY CHECK (id = 1),
log_data BLOB NOT NULL
);

View File

@@ -31,6 +31,8 @@ pub struct FileConfig {
#[serde(default)] #[serde(default)]
pub metrics_enabled: Option<bool>, pub metrics_enabled: Option<bool>,
pub federation: Option<FederationFileConfig>, pub federation: Option<FederationFileConfig>,
/// Directory containing plugin `.so` / `.dylib` files to load at startup.
pub plugin_dir: Option<PathBuf>,
} }
#[derive(Debug)] #[derive(Debug)]
@@ -51,6 +53,8 @@ pub struct EffectiveConfig {
/// Start metrics server only when true and metrics_listen is set. /// Start metrics server only when true and metrics_listen is set.
pub metrics_enabled: bool, pub metrics_enabled: bool,
pub federation: Option<EffectiveFederationConfig>, pub federation: Option<EffectiveFederationConfig>,
/// Directory to scan for plugin `.so` / `.dylib` files at startup. None = no plugins.
pub plugin_dir: Option<PathBuf>,
} }
#[derive(Debug, Default, Deserialize)] #[derive(Debug, Default, Deserialize)]
@@ -214,6 +218,8 @@ pub fn merge_config(args: &crate::Args, file: &FileConfig) -> EffectiveConfig {
} }
}; };
let plugin_dir = args.plugin_dir.clone().or_else(|| file.plugin_dir.clone());
EffectiveConfig { EffectiveConfig {
listen, listen,
data_dir, data_dir,
@@ -228,6 +234,7 @@ pub fn merge_config(args: &crate::Args, file: &FileConfig) -> EffectiveConfig {
metrics_listen, metrics_listen,
metrics_enabled, metrics_enabled,
federation, federation,
plugin_dir,
} }
} }

View File

@@ -0,0 +1,198 @@
//! Server-side plugin hooks for extending quicproquo.
//!
//! Implement the [`ServerHooks`] trait to intercept server events — message delivery,
//! authentication, channel creation, and more. Hooks fire after validation but before
//! storage, so they can inspect, log, or reject operations.
//!
//! # Built-in implementations
//!
//! - [`NoopHooks`] — does nothing (default when no hooks are configured)
//! - [`TracingHooks`] — logs all events via `tracing` at info/debug level
//!
//! # Writing a custom hook
//!
//! ```rust,ignore
//! use quicproquo_server::hooks::{ServerHooks, HookAction, MessageEvent};
//!
//! struct ModeratorHook {
//! banned_words: Vec<String>,
//! }
//!
//! impl ServerHooks for ModeratorHook {
//! fn on_message_enqueue(&self, event: &MessageEvent) -> HookAction {
//! // Can't inspect encrypted content (E2E), but can enforce rate limits,
//! // payload size limits, or sender restrictions.
//! if event.payload_len > 1_000_000 {
//! return HookAction::Reject("payload too large".into());
//! }
//! HookAction::Continue
//! }
//! }
//! ```
/// The result of a hook invocation.
#[derive(Clone, Debug)]
pub enum HookAction {
/// Allow the operation to proceed.
Continue,
/// Reject the operation with a reason (returned to the client as an error).
Reject(String),
}
/// Event data for message enqueue operations.
#[derive(Clone, Debug)]
pub struct MessageEvent {
/// Sender's identity key (32 bytes), if known (None in sealed sender mode).
pub sender_identity: Option<Vec<u8>>,
/// Recipient's identity key (32 bytes).
pub recipient_key: Vec<u8>,
/// Channel ID (16 bytes) if this is a DM channel message.
pub channel_id: Vec<u8>,
/// Length of the encrypted payload in bytes.
pub payload_len: usize,
/// Server-assigned sequence number.
pub seq: u64,
}
/// Event data for authentication operations.
#[derive(Clone, Debug)]
pub struct AuthEvent {
/// The username attempting to authenticate.
pub username: String,
/// Whether the authentication succeeded.
pub success: bool,
/// Failure reason (empty on success).
pub failure_reason: String,
}
/// Event data for channel creation operations.
#[derive(Clone, Debug)]
pub struct ChannelEvent {
/// The channel's unique ID (16 bytes).
pub channel_id: Vec<u8>,
/// Identity key of the initiator.
pub initiator_key: Vec<u8>,
/// Identity key of the peer.
pub peer_key: Vec<u8>,
/// True if this is a newly created channel (initiator creates the MLS group).
pub was_new: bool,
}
/// Event data for message fetch operations.
#[derive(Clone, Debug)]
pub struct FetchEvent {
/// Identity key of the fetcher.
pub recipient_key: Vec<u8>,
/// Channel ID being fetched from.
pub channel_id: Vec<u8>,
/// Number of messages returned.
pub message_count: usize,
}
/// Trait for server-side plugin hooks.
///
/// All methods have default implementations that return [`HookAction::Continue`],
/// so you only need to override the events you care about.
///
/// Hooks are called synchronously in the RPC handler path. Keep them fast —
/// offload heavy work (HTTP calls, disk I/O) to background tasks.
pub trait ServerHooks: Send + Sync {
/// Called after validation, before a message is stored in the delivery queue.
///
/// Return `HookAction::Reject` to prevent delivery.
fn on_message_enqueue(&self, _event: &MessageEvent) -> HookAction {
HookAction::Continue
}
/// Called after a batch of messages is enqueued.
fn on_batch_enqueue(&self, _events: &[MessageEvent]) {
// Default: no-op
}
/// Called after a successful or failed login attempt.
fn on_auth(&self, _event: &AuthEvent) {
// Default: no-op
}
/// Called after a channel is created or looked up.
fn on_channel_created(&self, _event: &ChannelEvent) {
// Default: no-op
}
/// Called after messages are fetched from the delivery queue.
fn on_fetch(&self, _event: &FetchEvent) {
// Default: no-op
}
/// Called when a user registers (OPAQUE registration complete).
fn on_user_registered(&self, _username: &str, _identity_key: &[u8]) {
// Default: no-op
}
}
/// No-op hook implementation (default).
pub struct NoopHooks;
impl ServerHooks for NoopHooks {}
/// Hook implementation that logs all events via `tracing`.
pub struct TracingHooks;
impl ServerHooks for TracingHooks {
fn on_message_enqueue(&self, event: &MessageEvent) -> HookAction {
tracing::info!(
recipient_prefix = %hex_prefix(&event.recipient_key),
payload_len = event.payload_len,
seq = event.seq,
has_sender = event.sender_identity.is_some(),
"hook: message enqueued"
);
HookAction::Continue
}
fn on_batch_enqueue(&self, events: &[MessageEvent]) {
tracing::info!(
count = events.len(),
"hook: batch enqueue"
);
}
fn on_auth(&self, event: &AuthEvent) {
if event.success {
tracing::info!(username = %event.username, "hook: login success");
} else {
tracing::warn!(
username = %event.username,
reason = %event.failure_reason,
"hook: login failure"
);
}
}
fn on_channel_created(&self, event: &ChannelEvent) {
tracing::info!(
channel_id = %hex_prefix(&event.channel_id),
was_new = event.was_new,
"hook: channel created"
);
}
fn on_fetch(&self, event: &FetchEvent) {
if event.message_count > 0 {
tracing::debug!(
recipient_prefix = %hex_prefix(&event.recipient_key),
count = event.message_count,
"hook: messages fetched"
);
}
}
fn on_user_registered(&self, username: &str, _identity_key: &[u8]) {
tracing::info!(username = %username, "hook: user registered");
}
}
fn hex_prefix(bytes: &[u8]) -> String {
let n = bytes.len().min(4);
hex::encode(&bytes[..n])
}

View File

@@ -9,6 +9,7 @@ use clap::Parser;
use dashmap::DashMap; use dashmap::DashMap;
use opaque_ke::ServerSetup; use opaque_ke::ServerSetup;
use quicproquo_core::opaque_auth::OpaqueSuite; use quicproquo_core::opaque_auth::OpaqueSuite;
use quicproquo_kt::MerkleLog;
use quinn::Endpoint; use quinn::Endpoint;
use rand::rngs::OsRng; use rand::rngs::OsRng;
use tokio::sync::Notify; use tokio::sync::Notify;
@@ -18,8 +19,10 @@ mod auth;
mod config; mod config;
mod error_codes; mod error_codes;
mod federation; mod federation;
pub mod hooks;
mod metrics; mod metrics;
mod node_service; mod node_service;
mod plugin_loader;
mod sql_store; mod sql_store;
mod tls; mod tls;
mod storage; mod storage;
@@ -106,6 +109,11 @@ struct Args {
/// Federation QUIC listen address (default: 0.0.0.0:7001). /// Federation QUIC listen address (default: 0.0.0.0:7001).
#[arg(long, env = "QPQ_FEDERATION_LISTEN")] #[arg(long, env = "QPQ_FEDERATION_LISTEN")]
federation_listen: Option<String>, federation_listen: Option<String>,
/// Directory containing plugin `.so` / `.dylib` files to load at startup.
/// Each library must export `extern "C" fn qpq_plugin_init(vtable: *mut HookVTable) -> i32`.
#[arg(long, env = "QPQ_PLUGIN_DIR")]
plugin_dir: Option<PathBuf>,
} }
// ── Entry point ─────────────────────────────────────────────────────────────── // ── Entry point ───────────────────────────────────────────────────────────────
@@ -237,6 +245,66 @@ async fn main() -> anyhow::Result<()> {
Err(e) => return Err(anyhow::anyhow!("load OPAQUE server setup: {e}")), Err(e) => return Err(anyhow::anyhow!("load OPAQUE server setup: {e}")),
}; };
// Server Ed25519 signing key for delivery proofs: load from storage or generate fresh.
let signing_key: Arc<quicproquo_core::IdentityKeypair> = match store.get_signing_key_seed() {
Ok(Some(seed_bytes)) => {
let seed: [u8; 32] = seed_bytes
.as_slice()
.try_into()
.context("signing key seed must be 32 bytes")?;
tracing::info!("loaded persisted server signing key");
Arc::new(quicproquo_core::IdentityKeypair::from_seed(seed))
}
Ok(None) => {
let kp = quicproquo_core::IdentityKeypair::generate();
store
.store_signing_key_seed(kp.seed_bytes().to_vec())
.context("persist server signing key")?;
tracing::info!("generated and persisted new server signing key");
Arc::new(kp)
}
Err(e) => return Err(anyhow::anyhow!("load server signing key: {e}")),
};
// Key Transparency Merkle log: load from storage or start fresh.
let kt_log: Arc<std::sync::Mutex<MerkleLog>> = match store.load_kt_log() {
Ok(Some(bytes)) => {
match MerkleLog::from_bytes(&bytes) {
Ok(log) => {
tracing::info!(entries = log.len(), "loaded persisted KT Merkle log");
Arc::new(std::sync::Mutex::new(log))
}
Err(e) => {
tracing::warn!(error = %e, "KT log deserialise failed; starting fresh");
Arc::new(std::sync::Mutex::new(MerkleLog::new()))
}
}
}
Ok(None) => {
tracing::info!("no KT log found; starting fresh");
Arc::new(std::sync::Mutex::new(MerkleLog::new()))
}
Err(e) => return Err(anyhow::anyhow!("load KT log: {e}")),
};
// ── Plugin hooks ──────────────────────────────────────────────────────────
let hooks: Arc<dyn hooks::ServerHooks> = if let Some(dir) = &effective.plugin_dir {
let plugins = plugin_loader::load_plugins_from_dir(dir);
if plugins.is_empty() {
tracing::info!(dir = %dir.display(), "plugin_dir set but no plugins loaded");
Arc::new(hooks::NoopHooks)
} else {
tracing::info!(count = plugins.len(), "plugins loaded");
let boxed: Vec<Box<dyn hooks::ServerHooks>> = plugins
.into_iter()
.map(|p| Box::new(p) as Box<dyn hooks::ServerHooks>)
.collect();
Arc::new(plugin_loader::ChainedHooks::new(boxed))
}
} else {
Arc::new(hooks::NoopHooks)
};
let pending_logins: Arc<DashMap<String, PendingLogin>> = Arc::new(DashMap::new()); let pending_logins: Arc<DashMap<String, PendingLogin>> = Arc::new(DashMap::new());
let sessions: Arc<DashMap<Vec<u8>, SessionInfo>> = Arc::new(DashMap::new()); let sessions: Arc<DashMap<Vec<u8>, SessionInfo>> = Arc::new(DashMap::new());
let rate_limits: Arc<DashMap<Vec<u8>, RateEntry>> = Arc::new(DashMap::new()); let rate_limits: Arc<DashMap<Vec<u8>, RateEntry>> = Arc::new(DashMap::new());
@@ -298,7 +366,7 @@ async fn main() -> anyhow::Result<()> {
None None
}; };
let fed_bind: SocketAddr = "0.0.0.0:0".parse().unwrap(); let fed_bind: SocketAddr = SocketAddr::from(([0, 0, 0, 0], 0));
let mut fed_endpoint = Endpoint::client(fed_bind) let mut fed_endpoint = Endpoint::client(fed_bind)
.context("create federation client endpoint")?; .context("create federation client endpoint")?;
if let Some(cc) = client_config { if let Some(cc) = client_config {
@@ -522,6 +590,9 @@ async fn main() -> anyhow::Result<()> {
let sealed_sender = effective.sealed_sender; let sealed_sender = effective.sealed_sender;
let fed_client = federation_client.clone(); let fed_client = federation_client.clone();
let local_dom = local_domain.clone(); let local_dom = local_domain.clone();
let sk = Arc::clone(&signing_key);
let conn_hooks = Arc::clone(&hooks);
let conn_kt_log = Arc::clone(&kt_log);
tokio::task::spawn_local(async move { tokio::task::spawn_local(async move {
if let Err(e) = handle_node_connection( if let Err(e) = handle_node_connection(
@@ -536,6 +607,9 @@ async fn main() -> anyhow::Result<()> {
sealed_sender, sealed_sender,
fed_client, fed_client,
local_dom, local_dom,
sk,
conn_hooks,
conn_kt_log,
) )
.await .await
{ {

View File

@@ -11,6 +11,8 @@ use crate::error_codes::*;
use crate::metrics; use crate::metrics;
use crate::storage::StorageError; use crate::storage::StorageError;
use crate::hooks::AuthEvent;
use super::NodeServiceImpl; use super::NodeServiceImpl;
// Audit events in this module must never include secrets (no session tokens, passwords, or raw keys). // Audit events in this module must never include secrets (no session tokens, passwords, or raw keys).
@@ -207,6 +209,11 @@ impl NodeServiceImpl {
// Audit: login failure — do not log secrets (no token, no password). // Audit: login failure — do not log secrets (no token, no password).
tracing::warn!(user = %username, "audit: auth login failure (no pending login)"); tracing::warn!(user = %username, "audit: auth login failure (no pending login)");
metrics::record_auth_login_failure_total(); metrics::record_auth_login_failure_total();
self.hooks.on_auth(&AuthEvent {
username: username.clone(),
success: false,
failure_reason: "no pending login".to_string(),
});
return Promise::err(coded_error(E019_NO_PENDING_LOGIN, "no pending login for this username")) return Promise::err(coded_error(E019_NO_PENDING_LOGIN, "no pending login for this username"))
} }
}; };
@@ -236,6 +243,11 @@ impl NodeServiceImpl {
Err(e) => { Err(e) => {
tracing::warn!(user = %username, "audit: auth login failure (OPAQUE finish failed)"); tracing::warn!(user = %username, "audit: auth login failure (OPAQUE finish failed)");
metrics::record_auth_login_failure_total(); metrics::record_auth_login_failure_total();
self.hooks.on_auth(&AuthEvent {
username: username.clone(),
success: false,
failure_reason: format!("OPAQUE finish failed: {e}"),
});
return Promise::err(coded_error( return Promise::err(coded_error(
E010_OPAQUE_ERROR, E010_OPAQUE_ERROR,
format!("OPAQUE login finish failed (bad password?): {e}"), format!("OPAQUE login finish failed (bad password?): {e}"),
@@ -255,6 +267,11 @@ impl NodeServiceImpl {
if stored_ik != identity_key { if stored_ik != identity_key {
tracing::warn!(user = %username, "audit: auth login failure (identity mismatch)"); tracing::warn!(user = %username, "audit: auth login failure (identity mismatch)");
metrics::record_auth_login_failure_total(); metrics::record_auth_login_failure_total();
self.hooks.on_auth(&AuthEvent {
username: username.clone(),
success: false,
failure_reason: "identity key mismatch".to_string(),
});
return Promise::err(coded_error( return Promise::err(coded_error(
E016_IDENTITY_MISMATCH, E016_IDENTITY_MISMATCH,
"identity key does not match registered key", "identity key does not match registered key",
@@ -279,6 +296,13 @@ impl NodeServiceImpl {
results.get().set_session_token(&token_vec); results.get().set_session_token(&token_vec);
// Hook: on_auth — fires after successful login.
self.hooks.on_auth(&AuthEvent {
username: username.clone(),
success: true,
failure_reason: String::new(),
});
// Audit: login success — do not log session token or any secrets. // Audit: login success — do not log session token or any secrets.
metrics::record_auth_login_success_total(); metrics::record_auth_login_success_total();
tracing::info!(user = %username, "audit: auth login success — session token issued"); tracing::info!(user = %username, "audit: auth login success — session token issued");
@@ -356,14 +380,39 @@ impl NodeServiceImpl {
Err(e) => return Promise::err(storage_err(e)), Err(e) => return Promise::err(storage_err(e)),
} }
// Hook: on_user_registered — fires after successful registration.
self.hooks.on_user_registered(&username, &identity_key);
if !identity_key.is_empty() { if !identity_key.is_empty() {
if let Err(e) = self if let Err(e) = self
.store .store
.store_user_identity_key(&username, identity_key) .store_user_identity_key(&username, identity_key.clone())
.map_err(storage_err) .map_err(storage_err)
{ {
return Promise::err(e); return Promise::err(e);
} }
// Append (username, identity_key) to the Key Transparency Merkle log.
match self.kt_log.lock() {
Ok(mut log) => {
log.append(&username, &identity_key);
// Persist after each append (small extra cost, but ensures durability).
match log.to_bytes() {
Ok(bytes) => {
if let Err(e) = self.store.save_kt_log(bytes) {
tracing::warn!(user = %username, error = %e, "KT log persist failed");
}
}
Err(e) => {
tracing::warn!(user = %username, error = %e, "KT log serialise failed");
}
}
tracing::info!(user = %username, tree_size = log.len(), "KT: appended identity binding");
}
Err(e) => {
tracing::warn!(user = %username, error = %e, "KT log lock poisoned; skipping append");
}
}
} }
results.get().set_success(true); results.get().set_success(true);

View File

@@ -7,6 +7,8 @@ use crate::auth::{coded_error, require_identity, validate_auth_context};
use crate::error_codes::*; use crate::error_codes::*;
use crate::storage::StorageError; use crate::storage::StorageError;
use crate::hooks::ChannelEvent;
use super::NodeServiceImpl; use super::NodeServiceImpl;
fn storage_err(err: StorageError) -> capnp::Error { fn storage_err(err: StorageError) -> capnp::Error {
@@ -56,6 +58,14 @@ impl NodeServiceImpl {
Err(e) => return Promise::err(storage_err(e)), Err(e) => return Promise::err(storage_err(e)),
}; };
// Hook: on_channel_created — fires after channel is created or looked up.
self.hooks.on_channel_created(&ChannelEvent {
channel_id: channel_id.clone(),
initiator_key: identity.to_vec(),
peer_key: peer_key.clone(),
was_new,
});
let mut r = results.get(); let mut r = results.get();
r.set_channel_id(&channel_id); r.set_channel_id(&channel_id);
r.set_was_new(was_new); r.set_was_new(was_new);

View File

@@ -7,6 +7,8 @@ use quicproquo_proto::node_capnp::node_service;
use tokio::sync::Notify; use tokio::sync::Notify;
use tokio::time::timeout; use tokio::time::timeout;
use sha2::{Digest, Sha256};
use crate::auth::{ use crate::auth::{
check_rate_limit, coded_error, fmt_hex, require_identity_or_request, validate_auth_context, check_rate_limit, coded_error, fmt_hex, require_identity_or_request, validate_auth_context,
}; };
@@ -15,12 +17,38 @@ use crate::metrics;
use crate::storage::{StorageError, Store}; use crate::storage::{StorageError, Store};
use super::{NodeServiceImpl, CURRENT_WIRE_VERSION}; use super::{NodeServiceImpl, CURRENT_WIRE_VERSION};
use crate::hooks::{HookAction, MessageEvent, FetchEvent};
// Audit events here must not include secrets: no payload content, no full recipient/token bytes (prefix only). // Audit events here must not include secrets: no payload content, no full recipient/token bytes (prefix only).
const MAX_PAYLOAD_BYTES: usize = 5 * 1024 * 1024; // 5 MB cap per message const MAX_PAYLOAD_BYTES: usize = 5 * 1024 * 1024; // 5 MB cap per message
const MAX_QUEUE_DEPTH: usize = 1000; const MAX_QUEUE_DEPTH: usize = 1000;
/// Build a 96-byte delivery proof: SHA-256(seq || recipient_key || timestamp_ms) || Ed25519 sig.
///
/// Layout:
/// bytes 0..32 — SHA-256 preimage hash
/// bytes 32..96 — Ed25519 signature over those 32 bytes
fn build_delivery_proof(
signing_key: &quicproquo_core::IdentityKeypair,
seq: u64,
recipient_key: &[u8],
timestamp_ms: u64,
) -> [u8; 96] {
let mut hasher = Sha256::new();
hasher.update(seq.to_le_bytes());
hasher.update(recipient_key);
hasher.update(timestamp_ms.to_le_bytes());
let hash: [u8; 32] = hasher.finalize().into();
let sig = signing_key.sign_raw(&hash);
let mut proof = [0u8; 96];
proof[..32].copy_from_slice(&hash);
proof[32..].copy_from_slice(&sig);
proof
}
fn storage_err(err: StorageError) -> capnp::Error { fn storage_err(err: StorageError) -> capnp::Error {
coded_error(E009_STORAGE_ERROR, err) coded_error(E009_STORAGE_ERROR, err)
} }
@@ -173,6 +201,24 @@ impl NodeServiceImpl {
} }
let payload_len = payload.len(); let payload_len = payload.len();
let sender_identity = if self.sealed_sender {
None
} else {
crate::auth::require_identity(&auth_ctx).ok().map(|v| v.to_vec())
};
// Hook: on_message_enqueue — fires after validation, before storage.
let hook_event = MessageEvent {
sender_identity,
recipient_key: recipient_key.clone(),
channel_id: channel_id.clone(),
payload_len,
seq: 0, // not yet assigned
};
if let HookAction::Reject(reason) = self.hooks.on_message_enqueue(&hook_event) {
return Promise::err(capnp::Error::failed(format!("hook rejected enqueue: {reason}")));
}
let seq = match self let seq = match self
.store .store
.enqueue(&recipient_key, &channel_id, payload) .enqueue(&recipient_key, &channel_id, payload)
@@ -182,7 +228,15 @@ impl NodeServiceImpl {
Err(e) => return Promise::err(e), Err(e) => return Promise::err(e),
}; };
results.get().set_seq(seq); let timestamp_ms = std::time::SystemTime::now()
.duration_since(std::time::UNIX_EPOCH)
.unwrap_or_default()
.as_millis() as u64;
let proof = build_delivery_proof(&self.signing_key, seq, &recipient_key, timestamp_ms);
let mut r = results.get();
r.set_seq(seq);
r.set_delivery_proof(&proof);
// Metrics and audit. Audit events must not include secrets (no payload, no full keys). // Metrics and audit. Audit events must not include secrets (no payload, no full keys).
metrics::record_enqueue_total(); metrics::record_enqueue_total();
@@ -306,6 +360,13 @@ impl NodeServiceImpl {
} }
}; };
// Hook: on_fetch — fires after messages are retrieved.
self.hooks.on_fetch(&FetchEvent {
recipient_key: recipient_key.clone(),
channel_id: channel_id.clone(),
message_count: messages.len(),
});
// Audit: fetch — do not log payload or full keys. // Audit: fetch — do not log payload or full keys.
metrics::record_fetch_total(); metrics::record_fetch_total();
tracing::info!( tracing::info!(
@@ -671,11 +732,33 @@ impl NodeServiceImpl {
recipient_key_vecs.push(rk); recipient_key_vecs.push(rk);
} }
// Hook: on_message_enqueue for each recipient — fires before storage.
let sender_identity = if self.sealed_sender {
None
} else {
crate::auth::require_identity(&auth_ctx).ok().map(|v| v.to_vec())
};
let mut hook_events = Vec::with_capacity(recipient_key_vecs.len());
for rk in &recipient_key_vecs {
let event = MessageEvent {
sender_identity: sender_identity.clone(),
recipient_key: rk.clone(),
channel_id: channel_id.clone(),
payload_len: payload.len(),
seq: 0,
};
if let HookAction::Reject(reason) = self.hooks.on_message_enqueue(&event) {
return Promise::err(capnp::Error::failed(format!("hook rejected enqueue: {reason}")));
}
hook_events.push(event);
}
let n = recipient_key_vecs.len(); let n = recipient_key_vecs.len();
let store = Arc::clone(&self.store); let store = Arc::clone(&self.store);
let waiters = Arc::clone(&self.waiters); let waiters = Arc::clone(&self.waiters);
let fed_client = self.federation_client.clone(); let fed_client = self.federation_client.clone();
let local_domain = self.local_domain.clone(); let local_domain = self.local_domain.clone();
let hooks = Arc::clone(&self.hooks);
// Use an async future to support federation relay alongside local enqueue. // Use an async future to support federation relay alongside local enqueue.
// All storage operations are synchronous; only federation relay calls are await-ed. // All storage operations are synchronous; only federation relay calls are await-ed.
@@ -734,6 +817,9 @@ impl NodeServiceImpl {
list.set(i as u32, *seq); list.set(i as u32, *seq);
} }
// Hook: on_batch_enqueue — fires after all messages are stored.
hooks.on_batch_enqueue(&hook_events);
tracing::info!( tracing::info!(
recipient_count = n, recipient_count = n,
payload_len = payload.len(), payload_len = payload.len(),

View File

@@ -5,6 +5,7 @@ use capnp_rpc::RpcSystem;
use dashmap::DashMap; use dashmap::DashMap;
use opaque_ke::ServerSetup; use opaque_ke::ServerSetup;
use quicproquo_core::opaque_auth::OpaqueSuite; use quicproquo_core::opaque_auth::OpaqueSuite;
use quicproquo_kt::MerkleLog;
use quicproquo_proto::node_capnp::node_service; use quicproquo_proto::node_capnp::node_service;
use tokio::sync::Notify; use tokio::sync::Notify;
use tokio_util::compat::{TokioAsyncReadCompatExt, TokioAsyncWriteCompatExt}; use tokio_util::compat::{TokioAsyncReadCompatExt, TokioAsyncWriteCompatExt};
@@ -211,6 +212,12 @@ pub struct NodeServiceImpl {
pub federation_client: Option<Arc<crate::federation::FederationClient>>, pub federation_client: Option<Arc<crate::federation::FederationClient>>,
/// This server's federation domain (empty if federation disabled). /// This server's federation domain (empty if federation disabled).
pub local_domain: Option<String>, pub local_domain: Option<String>,
/// Server-side plugin hooks for extensibility.
pub hooks: Arc<dyn crate::hooks::ServerHooks>,
/// Server Ed25519 signing key for delivery proofs.
pub signing_key: Arc<quicproquo_core::IdentityKeypair>,
/// Key Transparency Merkle log (shared across connections).
pub kt_log: Arc<std::sync::Mutex<MerkleLog>>,
} }
impl NodeServiceImpl { impl NodeServiceImpl {
@@ -225,6 +232,9 @@ impl NodeServiceImpl {
sealed_sender: bool, sealed_sender: bool,
federation_client: Option<Arc<crate::federation::FederationClient>>, federation_client: Option<Arc<crate::federation::FederationClient>>,
local_domain: Option<String>, local_domain: Option<String>,
signing_key: Arc<quicproquo_core::IdentityKeypair>,
hooks: Arc<dyn crate::hooks::ServerHooks>,
kt_log: Arc<std::sync::Mutex<MerkleLog>>,
) -> Self { ) -> Self {
Self { Self {
store, store,
@@ -237,6 +247,9 @@ impl NodeServiceImpl {
sealed_sender, sealed_sender,
federation_client, federation_client,
local_domain, local_domain,
hooks,
signing_key,
kt_log,
} }
} }
} }
@@ -253,6 +266,9 @@ pub async fn handle_node_connection(
sealed_sender: bool, sealed_sender: bool,
federation_client: Option<Arc<crate::federation::FederationClient>>, federation_client: Option<Arc<crate::federation::FederationClient>>,
local_domain: Option<String>, local_domain: Option<String>,
signing_key: Arc<quicproquo_core::IdentityKeypair>,
hooks: Arc<dyn crate::hooks::ServerHooks>,
kt_log: Arc<std::sync::Mutex<MerkleLog>>,
) -> Result<(), anyhow::Error> { ) -> Result<(), anyhow::Error> {
let connection = connecting.await?; let connection = connecting.await?;
@@ -284,6 +300,9 @@ pub async fn handle_node_connection(
sealed_sender, sealed_sender,
federation_client, federation_client,
local_domain, local_domain,
signing_key,
hooks,
kt_log,
)); ));
RpcSystem::new(Box::new(network), Some(service.client)) RpcSystem::new(Box::new(network), Some(service.client))

View File

@@ -78,14 +78,36 @@ impl NodeServiceImpl {
} }
// Local resolution. // Local resolution.
match self.store.get_user_identity_key(&addr.username) { let identity_key = match self.store.get_user_identity_key(&addr.username) {
Ok(Some(key)) => { Ok(Some(key)) => key,
results.get().set_identity_key(&key);
}
Ok(None) => { Ok(None) => {
// Return empty Data — caller checks length to detect "not found". // Return empty Data — caller checks length to detect "not found".
return Promise::ok(());
} }
Err(e) => return Promise::err(storage_err(e)), Err(e) => return Promise::err(storage_err(e)),
};
let mut r = results.get();
r.set_identity_key(&identity_key);
// Attempt to include a KT Merkle inclusion proof.
// Non-fatal: if the log is unavailable or has no entry, return just the key.
if let Ok(log) = self.kt_log.lock() {
if let Some(leaf_idx) = log.find(&addr.username, &identity_key) {
match log.inclusion_proof(leaf_idx) {
Ok(proof) => match proof.to_bytes() {
Ok(bytes) => {
r.set_inclusion_proof(&bytes);
}
Err(e) => {
tracing::warn!(error = %e, "KT proof serialise failed");
}
},
Err(e) => {
tracing::warn!(error = %e, "KT inclusion_proof failed");
}
}
}
} }
Promise::ok(()) Promise::ok(())

View File

@@ -0,0 +1,342 @@
//! Dynamic plugin loader for server-side hook extensions.
//!
//! Loads shared libraries (`*.so` / `*.dylib`) from a directory at server
//! startup. Each library must export:
//!
//! ```c
//! extern "C" int32_t qpq_plugin_init(HookVTable *vtable);
//! ```
//!
//! The server creates a zeroed [`HookVTable`], passes it to `qpq_plugin_init`,
//! and wraps the resulting vtable in a [`PluginHooks`] that implements
//! [`ServerHooks`]. Multiple plugins are chained via [`ChainedHooks`].
//!
//! # Safety model
//!
//! Dynamic loading is inherently unsafe. The plugin binary MUST:
//! - be compiled against the same `quicproquo-plugin-api` version
//! - not store the event-struct pointers beyond the callback duration
//! - be `Send + Sync` (the wrapper is put behind an `Arc`)
//!
//! The server operator is responsible for only loading trusted plugin binaries.
use std::path::Path;
use libloading::{Library, Symbol};
use quicproquo_plugin_api::{
CAuthEvent, CChannelEvent, CFetchEvent, CMessageEvent, HookVTable, HOOK_CONTINUE, PLUGIN_OK,
};
use crate::hooks::{AuthEvent, ChannelEvent, FetchEvent, HookAction, MessageEvent, ServerHooks};
// ── PluginHooks ───────────────────────────────────────────────────────────────
/// A [`ServerHooks`] implementation backed by a dynamically loaded plugin vtable.
///
/// Holds the [`Library`] alive alongside the vtable so that the loaded code
/// is not unmapped while the vtable function pointers are still reachable.
pub struct PluginHooks {
/// The vtable filled by `qpq_plugin_init`.
vtable: HookVTable,
/// Keeps the shared library mapped. Must be dropped after `vtable`.
_lib: Library,
/// Name of the plugin file, for diagnostics.
name: String,
}
impl PluginHooks {
/// Load a plugin from `path` and call `qpq_plugin_init`.
///
/// Returns `Err` if the library cannot be opened, the symbol is missing,
/// or `qpq_plugin_init` returns a non-zero error code.
pub fn load(path: &Path) -> anyhow::Result<Self> {
let name = path
.file_name()
.map(|n| n.to_string_lossy().into_owned())
.unwrap_or_else(|| path.display().to_string());
// Safety: loading arbitrary shared libraries is inherently unsafe.
// The server operator is responsible for only loading trusted plugins.
let lib = unsafe { Library::new(path) }
.map_err(|e| anyhow::anyhow!("plugin '{}': load failed: {}", name, e))?;
// Zero-initialise the vtable so unused slots are null.
let mut vtable = HookVTable {
user_data: core::ptr::null_mut(),
on_message_enqueue: None,
on_batch_enqueue: None,
on_auth: None,
on_channel_created: None,
on_fetch: None,
on_user_registered: None,
error_message: None,
destroy: None,
};
// Safety: the symbol must have the exact signature declared in the API crate.
let init: Symbol<unsafe extern "C" fn(*mut HookVTable) -> i32> =
unsafe { lib.get(b"qpq_plugin_init\0") }.map_err(|e| {
anyhow::anyhow!("plugin '{}': missing qpq_plugin_init: {}", name, e)
})?;
let rc = unsafe { init(&mut vtable) };
if rc != PLUGIN_OK {
anyhow::bail!("plugin '{}': qpq_plugin_init returned error {}", name, rc);
}
tracing::info!(plugin = %name, "loaded plugin");
Ok(Self { vtable, _lib: lib, name })
}
/// Human-readable plugin name (filename).
pub fn name(&self) -> &str {
&self.name
}
/// Retrieve the rejection reason from the plugin, falling back to a generic string.
fn rejection_reason(&self) -> String {
if let Some(f) = self.vtable.error_message {
let ptr = unsafe { f(self.vtable.user_data) };
if !ptr.is_null() {
// Safety: plugin must return a valid null-terminated UTF-8 (or ASCII) string.
let cstr = unsafe { std::ffi::CStr::from_ptr(ptr as *const core::ffi::c_char) };
return cstr.to_string_lossy().into_owned();
}
}
"rejected by plugin".to_string()
}
}
impl Drop for PluginHooks {
fn drop(&mut self) {
if let Some(destroy) = self.vtable.destroy {
// Safety: destroy must be safe to call at any time after init.
unsafe { destroy(self.vtable.user_data) };
}
}
}
impl ServerHooks for PluginHooks {
fn on_message_enqueue(&self, event: &MessageEvent) -> HookAction {
let f = match self.vtable.on_message_enqueue {
Some(f) => f,
None => return HookAction::Continue,
};
let sender_ptr = event
.sender_identity
.as_deref()
.map(|s| s.as_ptr())
.unwrap_or(core::ptr::null());
let sender_len = event.sender_identity.as_deref().map_or(0, |s| s.len());
let c_event = CMessageEvent {
sender_identity: sender_ptr,
sender_identity_len: sender_len,
recipient_key: event.recipient_key.as_ptr(),
recipient_key_len: event.recipient_key.len(),
channel_id: event.channel_id.as_ptr(),
channel_id_len: event.channel_id.len(),
payload_len: event.payload_len,
seq: event.seq,
};
let rc = unsafe { f(self.vtable.user_data, &c_event) };
if rc == HOOK_CONTINUE {
HookAction::Continue
} else {
HookAction::Reject(self.rejection_reason())
}
}
fn on_batch_enqueue(&self, events: &[MessageEvent]) {
let f = match self.vtable.on_batch_enqueue {
Some(f) => f,
None => return,
};
let c_events: Vec<CMessageEvent> = events
.iter()
.map(|e| {
let sender_ptr = e
.sender_identity
.as_deref()
.map(|s| s.as_ptr())
.unwrap_or(core::ptr::null());
let sender_len = e.sender_identity.as_deref().map_or(0, |s| s.len());
CMessageEvent {
sender_identity: sender_ptr,
sender_identity_len: sender_len,
recipient_key: e.recipient_key.as_ptr(),
recipient_key_len: e.recipient_key.len(),
channel_id: e.channel_id.as_ptr(),
channel_id_len: e.channel_id.len(),
payload_len: e.payload_len,
seq: e.seq,
}
})
.collect();
unsafe { f(self.vtable.user_data, c_events.as_ptr(), c_events.len()) };
}
fn on_auth(&self, event: &AuthEvent) {
let f = match self.vtable.on_auth {
Some(f) => f,
None => return,
};
let c_event = CAuthEvent {
username: event.username.as_ptr(),
username_len: event.username.len(),
success: if event.success { 1 } else { 0 },
failure_reason: event.failure_reason.as_ptr(),
failure_reason_len: event.failure_reason.len(),
};
unsafe { f(self.vtable.user_data, &c_event) };
}
fn on_channel_created(&self, event: &ChannelEvent) {
let f = match self.vtable.on_channel_created {
Some(f) => f,
None => return,
};
let c_event = CChannelEvent {
channel_id: event.channel_id.as_ptr(),
channel_id_len: event.channel_id.len(),
initiator_key: event.initiator_key.as_ptr(),
initiator_key_len: event.initiator_key.len(),
peer_key: event.peer_key.as_ptr(),
peer_key_len: event.peer_key.len(),
was_new: if event.was_new { 1 } else { 0 },
};
unsafe { f(self.vtable.user_data, &c_event) };
}
fn on_fetch(&self, event: &FetchEvent) {
let f = match self.vtable.on_fetch {
Some(f) => f,
None => return,
};
let c_event = CFetchEvent {
recipient_key: event.recipient_key.as_ptr(),
recipient_key_len: event.recipient_key.len(),
channel_id: event.channel_id.as_ptr(),
channel_id_len: event.channel_id.len(),
message_count: event.message_count,
};
unsafe { f(self.vtable.user_data, &c_event) };
}
fn on_user_registered(&self, username: &str, identity_key: &[u8]) {
let f = match self.vtable.on_user_registered {
Some(f) => f,
None => return,
};
unsafe {
f(
self.vtable.user_data,
username.as_ptr(),
username.len(),
identity_key.as_ptr(),
identity_key.len(),
)
};
}
}
// ── ChainedHooks ─────────────────────────────────────────────────────────────
/// Composes multiple [`ServerHooks`] implementations into one.
///
/// For filtering hooks (`on_message_enqueue`), the first rejection short-circuits
/// the chain. For fire-and-forget hooks, all plugins are called in order.
pub struct ChainedHooks {
hooks: Vec<Box<dyn ServerHooks>>,
}
impl ChainedHooks {
pub fn new(hooks: Vec<Box<dyn ServerHooks>>) -> Self {
Self { hooks }
}
}
impl ServerHooks for ChainedHooks {
fn on_message_enqueue(&self, event: &MessageEvent) -> HookAction {
for h in &self.hooks {
match h.on_message_enqueue(event) {
HookAction::Continue => {}
reject => return reject,
}
}
HookAction::Continue
}
fn on_batch_enqueue(&self, events: &[MessageEvent]) {
for h in &self.hooks {
h.on_batch_enqueue(events);
}
}
fn on_auth(&self, event: &AuthEvent) {
for h in &self.hooks {
h.on_auth(event);
}
}
fn on_channel_created(&self, event: &ChannelEvent) {
for h in &self.hooks {
h.on_channel_created(event);
}
}
fn on_fetch(&self, event: &FetchEvent) {
for h in &self.hooks {
h.on_fetch(event);
}
}
fn on_user_registered(&self, username: &str, identity_key: &[u8]) {
for h in &self.hooks {
h.on_user_registered(username, identity_key);
}
}
}
// ── load_plugins_from_dir ─────────────────────────────────────────────────────
/// Load all `*.so` / `*.dylib` files from `dir` as plugins.
///
/// Non-fatal errors (unreadable files, init failures) are logged as warnings
/// and skipped; the server continues with the plugins that did load.
/// Returns the full list of successfully loaded plugins.
pub fn load_plugins_from_dir(dir: &Path) -> Vec<PluginHooks> {
let mut plugins = Vec::new();
let entries = match std::fs::read_dir(dir) {
Ok(e) => e,
Err(e) => {
tracing::warn!(dir = %dir.display(), error = %e, "plugin_dir unreadable; no plugins loaded");
return plugins;
}
};
for entry in entries.flatten() {
let path = entry.path();
let ext = path.extension().and_then(|e| e.to_str()).unwrap_or("");
if ext != "so" && ext != "dylib" {
continue;
}
match PluginHooks::load(&path) {
Ok(p) => {
tracing::info!(plugin = %p.name(), "plugin loaded successfully");
plugins.push(p);
}
Err(e) => {
tracing::warn!(path = %path.display(), error = %e, "failed to load plugin; skipping");
}
}
}
plugins
}

View File

@@ -9,7 +9,7 @@ use rusqlite::{params, Connection};
use crate::storage::{StorageError, Store}; use crate::storage::{StorageError, Store};
/// Schema version after introducing the migration runner (existing DBs had 1). /// Schema version after introducing the migration runner (existing DBs had 1).
const SCHEMA_VERSION: i32 = 5; const SCHEMA_VERSION: i32 = 7;
/// Migrations: (migration_number, SQL). Files named NNN_name.sql, applied in order when N > user_version. /// Migrations: (migration_number, SQL). Files named NNN_name.sql, applied in order when N > user_version.
const MIGRATIONS: &[(i32, &str)] = &[ const MIGRATIONS: &[(i32, &str)] = &[
@@ -17,6 +17,8 @@ const MIGRATIONS: &[(i32, &str)] = &[
(3, include_str!("../migrations/002_add_seq.sql")), (3, include_str!("../migrations/002_add_seq.sql")),
(4, include_str!("../migrations/003_channels.sql")), (4, include_str!("../migrations/003_channels.sql")),
(5, include_str!("../migrations/004_federation.sql")), (5, include_str!("../migrations/004_federation.sql")),
(6, include_str!("../migrations/005_signing_key.sql")),
(7, include_str!("../migrations/006_kt_log.sql")),
]; ];
/// Runs pending migrations on an open connection: applies any migration whose number is greater /// Runs pending migrations on an open connection: applies any migration whose number is greater
@@ -305,6 +307,48 @@ impl Store for SqlStore {
.map_err(|e| StorageError::Db(e.to_string())) .map_err(|e| StorageError::Db(e.to_string()))
} }
fn store_signing_key_seed(&self, seed: Vec<u8>) -> Result<(), StorageError> {
let conn = self.lock_conn()?;
conn.execute(
"INSERT OR REPLACE INTO server_signing_key (id, seed_data) VALUES (1, ?1)",
params![seed],
)
.map_err(|e| StorageError::Db(e.to_string()))?;
Ok(())
}
fn get_signing_key_seed(&self) -> Result<Option<Vec<u8>>, StorageError> {
let conn = self.lock_conn()?;
let mut stmt = conn
.prepare("SELECT seed_data FROM server_signing_key WHERE id = 1")
.map_err(|e| StorageError::Db(e.to_string()))?;
stmt.query_row([], |row| row.get(0))
.optional()
.map_err(|e| StorageError::Db(e.to_string()))
}
fn save_kt_log(&self, bytes: Vec<u8>) -> Result<(), StorageError> {
let conn = self.lock_conn()?;
conn.execute(
"INSERT OR REPLACE INTO kt_log (id, log_data) VALUES (1, ?1)",
params![bytes],
)
.map_err(|e| StorageError::Db(e.to_string()))?;
Ok(())
}
fn load_kt_log(&self) -> Result<Option<Vec<u8>>, StorageError> {
let conn = self.lock_conn()?;
let mut stmt = conn
.prepare("SELECT log_data FROM kt_log WHERE id = 1")
.map_err(|e| StorageError::Db(e.to_string()))?;
stmt.query_row([], |row| row.get(0))
.optional()
.map_err(|e| StorageError::Db(e.to_string()))
}
fn store_user_record(&self, username: &str, record: Vec<u8>) -> Result<(), StorageError> { fn store_user_record(&self, username: &str, record: Vec<u8>) -> Result<(), StorageError> {
let conn = self.lock_conn()?; let conn = self.lock_conn()?;
conn.execute( conn.execute(

View File

@@ -81,6 +81,18 @@ pub trait Store: Send + Sync {
/// Load the persisted `ServerSetup`, if any. /// Load the persisted `ServerSetup`, if any.
fn get_server_setup(&self) -> Result<Option<Vec<u8>>, StorageError>; fn get_server_setup(&self) -> Result<Option<Vec<u8>>, StorageError>;
/// Persist the server's Ed25519 signing key seed (32 bytes) for delivery proofs.
fn store_signing_key_seed(&self, seed: Vec<u8>) -> Result<(), StorageError>;
/// Load the persisted signing key seed, if any.
fn get_signing_key_seed(&self) -> Result<Option<Vec<u8>>, StorageError>;
/// Persist the Key Transparency Merkle log (bincode-serialised `MerkleLog` bytes).
fn save_kt_log(&self, bytes: Vec<u8>) -> Result<(), StorageError>;
/// Load the persisted KT Merkle log, if any.
fn load_kt_log(&self) -> Result<Option<Vec<u8>>, StorageError>;
/// Store an OPAQUE user record (serialized `ServerRegistration`). /// Store an OPAQUE user record (serialized `ServerRegistration`).
fn store_user_record(&self, username: &str, record: Vec<u8>) -> Result<(), StorageError>; fn store_user_record(&self, username: &str, record: Vec<u8>) -> Result<(), StorageError>;
@@ -213,6 +225,8 @@ pub struct FileBackedStore {
ds_path: PathBuf, ds_path: PathBuf,
hk_path: PathBuf, hk_path: PathBuf,
setup_path: PathBuf, setup_path: PathBuf,
signing_key_path: PathBuf,
kt_log_path: PathBuf,
users_path: PathBuf, users_path: PathBuf,
identity_keys_path: PathBuf, identity_keys_path: PathBuf,
channels_path: PathBuf, channels_path: PathBuf,
@@ -235,6 +249,8 @@ impl FileBackedStore {
let ds_path = dir.join("deliveries.bin"); let ds_path = dir.join("deliveries.bin");
let hk_path = dir.join("hybridkeys.bin"); let hk_path = dir.join("hybridkeys.bin");
let setup_path = dir.join("server_setup.bin"); let setup_path = dir.join("server_setup.bin");
let signing_key_path = dir.join("server_signing_key.bin");
let kt_log_path = dir.join("kt_log.bin");
let users_path = dir.join("users.bin"); let users_path = dir.join("users.bin");
let identity_keys_path = dir.join("identity_keys.bin"); let identity_keys_path = dir.join("identity_keys.bin");
let channels_path = dir.join("channels.bin"); let channels_path = dir.join("channels.bin");
@@ -251,6 +267,8 @@ impl FileBackedStore {
ds_path, ds_path,
hk_path, hk_path,
setup_path, setup_path,
signing_key_path,
kt_log_path,
users_path, users_path,
identity_keys_path, identity_keys_path,
channels_path, channels_path,
@@ -541,6 +559,52 @@ impl Store for FileBackedStore {
Ok(Some(bytes)) Ok(Some(bytes))
} }
fn store_signing_key_seed(&self, seed: Vec<u8>) -> Result<(), StorageError> {
if let Some(parent) = self.signing_key_path.parent() {
fs::create_dir_all(parent).map_err(|e| StorageError::Io(e.to_string()))?;
}
fs::write(&self.signing_key_path, &seed).map_err(|e| StorageError::Io(e.to_string()))?;
#[cfg(unix)]
{
use std::os::unix::fs::PermissionsExt;
let _ = std::fs::set_permissions(
&self.signing_key_path,
std::fs::Permissions::from_mode(0o600),
);
}
Ok(())
}
fn get_signing_key_seed(&self) -> Result<Option<Vec<u8>>, StorageError> {
if !self.signing_key_path.exists() {
return Ok(None);
}
let bytes =
fs::read(&self.signing_key_path).map_err(|e| StorageError::Io(e.to_string()))?;
if bytes.is_empty() {
return Ok(None);
}
Ok(Some(bytes))
}
fn save_kt_log(&self, bytes: Vec<u8>) -> Result<(), StorageError> {
if let Some(parent) = self.kt_log_path.parent() {
fs::create_dir_all(parent).map_err(|e| StorageError::Io(e.to_string()))?;
}
fs::write(&self.kt_log_path, &bytes).map_err(|e| StorageError::Io(e.to_string()))
}
fn load_kt_log(&self) -> Result<Option<Vec<u8>>, StorageError> {
if !self.kt_log_path.exists() {
return Ok(None);
}
let bytes = fs::read(&self.kt_log_path).map_err(|e| StorageError::Io(e.to_string()))?;
if bytes.is_empty() {
return Ok(None);
}
Ok(Some(bytes))
}
fn store_user_record(&self, username: &str, record: Vec<u8>) -> Result<(), StorageError> { fn store_user_record(&self, username: &str, record: Vec<u8>) -> Result<(), StorageError> {
let mut map = lock(&self.users)?; let mut map = lock(&self.users)?;
match map.entry(username.to_string()) { match map.entry(username.to_string()) {

View File

@@ -64,12 +64,23 @@ RUN apt-get update \
COPY --from=builder /build/target/release/qpq-server /usr/local/bin/qpq-server COPY --from=builder /build/target/release/qpq-server /usr/local/bin/qpq-server
# Create a dedicated non-root user with a writable data directory.
RUN groupadd --system qpq \
&& useradd --system --gid qpq --no-create-home --shell /usr/sbin/nologin qpq \
&& mkdir -p /var/lib/quicproquo \
&& chown qpq:qpq /var/lib/quicproquo
EXPOSE 7000 EXPOSE 7000
ENV RUST_LOG=info \ ENV RUST_LOG=info \
QPQ_LISTEN=0.0.0.0:7000 QPQ_LISTEN=0.0.0.0:7000 \
QPQ_DATA_DIR=/var/lib/quicproquo \
QPQ_TLS_CERT=/var/lib/quicproquo/server-cert.der \
QPQ_TLS_KEY=/var/lib/quicproquo/server-key.der
# Run as a non-root user. HEALTHCHECK --interval=30s --timeout=5s --retries=3 \
USER nobody CMD test -f /var/lib/quicproquo/server-cert.der || exit 1
USER qpq
CMD ["qpq-server"] CMD ["qpq-server"]

483
docs/AGENT-TEAM.md Normal file
View File

@@ -0,0 +1,483 @@
# quicproquo — AI Agent Team Specification
> A structured multi-agent system for bringing quicproquo from working prototype
> to production-grade, audited, documented, deployable software.
---
## Philosophy
This team exists because shipping production software requires more than writing
code. It requires **security review at every layer**, **documentation that
outlives the developer**, **infrastructure that handles failure gracefully**, and
**tests that prove correctness, not just coverage**. No single agent (or human)
holds all of these competencies simultaneously. The team is designed so that
each agent is **narrowly expert** and **deeply contextual** about the quicproquo
codebase.
### Principles
1. **Read before write.** Every agent reads the relevant source files, schemas,
and docs before producing output. No agent guesses at code structure.
2. **Scope discipline.** Agents only touch their assigned crates and concern
areas. A server-dev never edits client code. A security auditor never edits
production code.
3. **Security is not optional.** Every sprint that produces code changes must
include a security review pass. This is not a suggestion — it is a gate.
4. **Docs are a deliverable.** Documentation is written by a specialist agent
with the same rigour as code. API docs, architecture docs, and user guides
are first-class outputs.
5. **Incremental, verifiable progress.** Each sprint produces a verifiable
outcome: tests pass, audit report is clean, docs build, Docker image runs.
---
## Team Roster
### Development Agents
| Agent | Scope | Tools | Edits Code? |
|-------|-------|-------|-------------|
| `rust-architect` | Architecture design, ADRs, crate boundary review | Read, Glob, Grep | No |
| `rust-core-dev` | `quicproquo-core`: crypto, MLS, Noise, hybrid KEM | Read, Glob, Grep, Edit, Write, Bash | Yes |
| `rust-server-dev` | `quicproquo-server`: AS, DS, RPC, storage, federation | Read, Glob, Grep, Edit, Write, Bash | Yes |
| `rust-client-dev` | `quicproquo-client`: CLI, REPL, OPAQUE, local state | Read, Glob, Grep, Edit, Write, Bash | Yes |
### Security Agents
| Agent | Scope | Tools | Edits Code? |
|-------|-------|-------|-------------|
| `security-auditor` | Code review, finding report, threat analysis | Read, Glob, Grep | No |
### Quality Agents
| Agent | Scope | Tools | Edits Code? |
|-------|-------|-------|-------------|
| `test-engineer` | Unit, integration, E2E, property tests, coverage | Read, Glob, Grep, Edit, Write, Bash | Yes (tests only) |
| `devops-engineer` | Docker, CI/CD, deployment, monitoring, infrastructure | Read, Glob, Grep, Edit, Write, Bash | Yes |
### Documentation Agents
| Agent | Scope | Tools | Edits Code? |
|-------|-------|-------|-------------|
| `docs-engineer` | User guides, API docs, architecture docs, mdBook | Read, Glob, Grep, Edit, Write, Bash | Yes (docs only) |
### Coordination Agents
| Agent | Scope | Tools | Edits Code? |
|-------|-------|-------|-------------|
| `roadmap-tracker` | Progress assessment, status reports, blocker analysis | Read, Glob, Grep | No |
---
## Agent Role Specifications
### rust-architect
**Identity:** Senior Rust systems architect with deep knowledge of MLS
(RFC 9420), Noise Protocol Framework, Cap'n Proto RPC, and post-quantum
cryptography.
**Reads:** `master-prompt.md`, `ROADMAP.md`, all `.capnp` schemas, crate
`lib.rs` and `mod.rs` files, `Cargo.toml` dependency lists.
**Produces:**
- Architecture Decision Records (ADR) in Context → Decision → Consequences format
- Crate boundary violation reports
- Dependency impact assessments for new crates
- Design documents for features spanning multiple crates
- Review feedback on proposed implementations
**Never does:** Write implementation code, edit source files, run commands.
**Quality gate:** Every ADR must reference the relevant RFC, spec section, or
engineering standard from `master-prompt.md`.
---
### rust-core-dev
**Identity:** Cryptography-focused Rust developer. Expert in `openmls`, `snow`,
`ml-kem`, `opaque-ke`, `zeroize`, and the `dalek` ecosystem.
**Owns:** `crates/quicproquo-core/`
**Security invariants (non-negotiable):**
- Every crypto operation returns `Result` — never `.unwrap()` or `.expect()`
- All key material types derive `Zeroize` and `ZeroizeOnDrop`
- No secret bytes in `tracing` or `log` output
- Constant-time comparisons via `subtle::ConstantTimeEq` for auth tags
- No `unsafe` without a `// SAFETY:` comment documenting the invariant
**Before any edit:**
1. Read the target file in full
2. Read `ROADMAP.md` to verify the change is in scope
3. Read `master-prompt.md` §Non-Negotiable Engineering Standards
4. Check if a new dependency is needed — if yes, justify in commit message
**After any edit:** `cargo check -p quicproquo-core && cargo test -p quicproquo-core`
---
### rust-server-dev
**Identity:** Backend systems developer. Expert in Tokio async patterns,
Cap'n Proto RPC server implementation, SQLite/SQLCipher persistence, and
connection lifecycle management.
**Owns:** `crates/quicproquo-server/`
**Security invariants:**
- No `.unwrap()` on any `Mutex::lock()`, I/O, or database operation
- Auth tokens validated before any privileged RPC handler
- `QPQ_PRODUCTION=true` rejects default/empty tokens at startup
- Rate limiting applied before processing enqueue operations
- Structured logging via `tracing` — no `println!` or `eprintln!`
**Before any edit:**
1. Read the target file and its corresponding `.capnp` schema
2. Verify the Cap'n Proto interface hasn't changed out from under you
3. Check for existing tests in `crates/quicproquo-server/tests/`
**After any edit:** `cargo check -p quicproquo-server && cargo test -p quicproquo-server`
---
### rust-client-dev
**Identity:** CLI and application developer. Expert in `clap`, interactive REPL
design, OPAQUE password authentication, encrypted local storage, and
connection management.
**Owns:** `crates/quicproquo-client/`
**UX invariants:**
- Clear, user-facing error messages — no raw Rust error types in REPL output
- REPL prompt shows current context (server address, active conversation)
- Graceful handling of server disconnection with auto-reconnect
- State file encrypted with Argon2id + ChaCha20-Poly1305
**Before any edit:**
1. Read the target file and related command handlers in `commands.rs`
2. Understand state management in `state.rs`
3. Check the REPL command table for conflicts
**After any edit:** `cargo check -p quicproquo-client && cargo test -p quicproquo-client`
---
### security-auditor
**Identity:** Application security engineer specialising in cryptographic
protocol implementations. Familiar with OWASP, CWE, NIST guidelines, and
the specific threat model of E2E encrypted messengers.
**Audit checklist (every review):**
1. `.unwrap()` / `.expect()` outside `#[cfg(test)]` on crypto or I/O paths
2. Key material types missing `Zeroize` / `ZeroizeOnDrop`
3. Secrets (keys, passwords, tokens, nonces) reaching `tracing`/`log`/`println`
4. Non-constant-time comparisons on authentication tags, tokens, or MACs
5. `panic!` / `unreachable!` in production paths
6. `unsafe` blocks without documented safety invariants
7. Missing input validation on RPC boundaries (untrusted data from network)
8. Race conditions in shared state (DashMap, Mutex, RwLock patterns)
9. Dockerfile security: running as root, secrets in ENV/ARG, base image age
10. Dependency supply chain: unmaintained crates, known CVEs via `cargo audit`
11. Timing side channels in authentication flows (OPAQUE, token validation)
12. Replay attack vectors in message delivery
**Output format:** Prioritised Markdown report with severity levels:
`Critical > High > Medium > Low > Informational`
Each finding includes: file:line, description, attack scenario, remediation.
**Never does:** Edit source files. Findings only.
---
### test-engineer
**Identity:** QA engineer with expertise in Rust testing patterns, property-based
testing (`proptest`), integration test harnesses, and E2E test design for
networked systems.
**Responsibilities:**
- Write unit tests inside `#[cfg(test)]` modules
- Write integration tests in `crates/<crate>/tests/`
- Write E2E tests that spin up server + client(s)
- Run `cargo test` and diagnose failures
- Verify test coverage against ROADMAP milestone criteria
- Identify untested code paths and edge cases
**Naming convention:** `test_<what>_<expected_outcome>` (snake_case)
**E2E test requirements:**
- Use `AUTH_LOCK` mutex for tests that share auth context
- Run with `--test-threads 1` for E2E tests
- Clean up spawned server processes on test completion
- Assert on specific error types, not just `is_err()`
**After writing tests:** Run them, report pass/fail, diagnose failures.
---
### devops-engineer
**Identity:** Infrastructure and deployment engineer. Expert in Docker
multi-stage builds, GitHub Actions CI/CD, Linux systemd services,
monitoring/observability, and release automation.
**Owns:** `docker/`, `.github/`, `docker-compose.yml`, deployment configs
**Responsibilities:**
- Docker image builds, optimisation, and security hardening
- CI pipeline maintenance and enhancement
- Release automation (cargo-release, changelogs, tagging)
- Monitoring setup (Prometheus metrics endpoint, Grafana dashboards)
- Deployment documentation (systemd units, Docker Compose, Kubernetes)
- Infrastructure-as-code for test and staging environments
- Cross-compilation targets (musl, ARM, MIPS for OpenWrt)
- Binary size optimisation for embedded/mesh deployments
**Quality gates:**
- Docker image builds successfully: `docker build -f docker/Dockerfile .`
- CI pipeline passes locally: `act` or manual validation
- Release artifacts are reproducible
---
### docs-engineer
**Identity:** Technical writer with deep understanding of cryptographic
protocols and systems programming. Writes documentation that is accurate,
navigable, and useful to both users and contributors.
**Owns:** `docs/`, `README.md`, `CONTRIBUTING.md`, `SECURITY.md`, inline
doc comments on public APIs
**Documentation tiers:**
1. **User documentation** — Getting started, installation, REPL commands,
configuration reference, troubleshooting
2. **Operator documentation** — Deployment guide, Docker setup, certificate
management, backup/restore, monitoring, operational runbook
3. **Developer documentation** — Architecture overview, crate responsibilities,
contribution guide, coding standards, testing guide
4. **Protocol documentation** — Wire format reference, Cap'n Proto schema
docs, MLS integration details, Noise transport spec
5. **Security documentation** — Threat model, trust boundaries, key lifecycle,
audit reports, responsible disclosure policy
**Quality gates:**
- `mdbook build docs/` succeeds without warnings
- All code examples in docs compile (`cargo test --doc`)
- Internal links resolve (no broken cross-references)
- Every public API has a doc comment with examples
---
### roadmap-tracker
**Identity:** Project manager and progress analyst. Reads code and docs to
objectively assess completion status.
**Method:**
1. Read `ROADMAP.md` in full
2. For each unchecked `- [ ]` item, search source for implementation evidence
3. Classify: Complete, Partial (what exists vs. what's missing), Not Started
4. Identify blockers (dependency chains between items)
5. Identify quick wins (< 1 hour, self-contained, high impact)
**Output:** Structured Markdown status report.
**Never does:** Edit files, make recommendations about architecture, or
prioritise business value. Pure objective assessment.
---
## Sprint Definitions
Sprints are groups of agent tasks that can run in parallel. Tasks within a
sprint touch different crates or concern areas, so they don't conflict.
### Production Readiness Path
The sprints below form a dependency chain. Run them in order.
```
status → audit → phase1-hardening → phase1-infra → phase2-tests →
docs-foundation → security-review → release-prep
```
### Sprint: `status`
**Purpose:** Baseline assessment before starting work.
| Agent | Task |
|-------|------|
| `roadmap-tracker` | Full roadmap status report across all phases |
| `security-auditor` | Quick security sweep of recent changes (HEAD~10) |
### Sprint: `audit`
**Purpose:** Deep security audit + roadmap analysis.
| Agent | Task |
|-------|------|
| `security-auditor` | Full audit of quicproquo-core and quicproquo-server |
| `roadmap-tracker` | Detailed Phase 1 and Phase 2 completion assessment |
### Sprint: `phase1-hardening`
**Purpose:** Eliminate crash paths and enforce secure defaults.
| Agent | Task |
|-------|------|
| `rust-core-dev` | Remove `.unwrap()`/`.expect()` from non-test code in core |
| `rust-server-dev` | Remove `.unwrap()`/`.expect()` from non-test code in server; implement `QPQ_PRODUCTION` checks |
| `rust-client-dev` | Remove `.unwrap()`/`.expect()` from non-test code in client; fix `AUTH_CONTEXT.read().expect()` |
### Sprint: `phase1-infra`
**Purpose:** Fix deployment infrastructure.
| Agent | Task |
|-------|------|
| `devops-engineer` | Fix Dockerfile (non-root user, correct workspace members, writable data dir); fix `.gitignore`; validate Docker build |
| `rust-architect` | Design TLS certificate lifecycle: CA-signed cert flow, `--tls-required` flag, rotation without downtime |
### Sprint: `phase2-tests`
**Purpose:** Build test confidence.
| Agent | Task |
|-------|------|
| `test-engineer` | E2E tests: auth failures, message ordering, concurrent clients, KeyPackage exhaustion |
| `test-engineer` | Unit tests: REPL parsing edge cases, token cache expiry, state file encryption round-trip |
| `devops-engineer` | CI hardening: coverage reporting, Docker build validation in CI, `CODEOWNERS` enforcement |
### Sprint: `docs-foundation`
**Purpose:** Create production-quality documentation.
| Agent | Task |
|-------|------|
| `docs-engineer` | Create root-level `SECURITY.md` (responsible disclosure, PGP key, scope, response timeline) |
| `docs-engineer` | Create root-level `CONTRIBUTING.md` (dev setup, PR process, commit conventions, testing, review checklist) |
| `docs-engineer` | Audit and update all `docs/src/` pages for accuracy against current codebase; fix broken references |
| `docs-engineer` | Write operator deployment guide: Docker, systemd, certificate setup, monitoring, backup/restore |
### Sprint: `security-review`
**Purpose:** Final security gate before release.
| Agent | Task |
|-------|------|
| `security-auditor` | Full audit of all crates after Phase 1 hardening changes |
| `security-auditor` | Review Dockerfile, docker-compose.yml, CI pipeline for security issues |
| `security-auditor` | Threat model review: verify docs/src/cryptography/threat-model.md matches current implementation |
### Sprint: `release-prep`
**Purpose:** Prepare for first production release.
| Agent | Task |
|-------|------|
| `devops-engineer` | Set up cargo-release workflow, CHANGELOG.md generation, version tagging strategy |
| `docs-engineer` | Final README.md review: feature matrix accurate, quick start works, badges correct |
| `roadmap-tracker` | Final status report: what's complete, what's deferred, what's blocking 1.0 |
---
## Usage
```bash
# Full orchestrator mode — orchestrator delegates to the right agents
python scripts/ai_team.py "Implement Phase 1.1 unwrap removal across all crates"
# Direct agent access — bypass orchestrator for focused work
python scripts/ai_team.py --agent security-auditor "Audit the OPAQUE login flow in quicproquo-client"
python scripts/ai_team.py --agent docs-engineer "Write the operator deployment guide"
# Predefined parallel sprint — multiple agents work simultaneously
python scripts/ai_team.py --sprint audit
python scripts/ai_team.py --sprint phase1-hardening
python scripts/ai_team.py --sprint docs-foundation
# Ad-hoc parallel tasks
python scripts/ai_team.py --parallel \
"rust-server-dev: Fix rate limiting bypass in enqueue handler" \
"security-auditor: Review the rate limiting implementation"
# Discovery
python scripts/ai_team.py --list-agents
python scripts/ai_team.py --list-sprints
```
### Recommended Production Readiness Sequence
```bash
# 1. Assess current state
python scripts/ai_team.py --sprint status
# 2. Deep audit
python scripts/ai_team.py --sprint audit
# 3. Fix critical issues (code changes)
python scripts/ai_team.py --sprint phase1-hardening
# 4. Fix infrastructure
python scripts/ai_team.py --sprint phase1-infra
# 5. Build test confidence
python scripts/ai_team.py --sprint phase2-tests
# 6. Write documentation
python scripts/ai_team.py --sprint docs-foundation
# 7. Final security review (after all code changes)
python scripts/ai_team.py --sprint security-review
# 8. Prepare release
python scripts/ai_team.py --sprint release-prep
```
---
## Quality Gates
Every sprint must pass its quality gate before the next sprint begins.
| Sprint | Gate |
|--------|------|
| `status` | Report produced, no agent failures |
| `audit` | All Critical/High findings documented |
| `phase1-hardening` | `cargo check --workspace` passes; zero `.unwrap()` outside `#[cfg(test)]` |
| `phase1-infra` | `docker build -f docker/Dockerfile .` succeeds; `.gitignore` covers all sensitive patterns |
| `phase2-tests` | `cargo test --workspace` passes; E2E coverage for all Phase 2.1 items |
| `docs-foundation` | `mdbook build docs/` succeeds; `SECURITY.md` and `CONTRIBUTING.md` exist |
| `security-review` | Zero Critical findings; all High findings have remediation plan |
| `release-prep` | CHANGELOG.md exists; version tags consistent; README quick start verified |
---
## Extending the Team
To add a new agent:
1. Define it in `AGENTS` dict in `scripts/ai_team.py`
2. Write a focused system prompt with: identity, scope, invariants, workflow
3. Specify the minimal tool set (prefer read-only when possible)
4. Add it to relevant sprints
5. Document it in this file
To add a new sprint:
1. Define it in `SPRINTS` dict in `scripts/ai_team.py`
2. Ensure all tasks within the sprint touch different files/crates
3. Document the quality gate
4. Add it to the dependency chain if it has ordering requirements
---
*quicproquo AI Agent Team — v2.0 | 2026-03-03*

View File

@@ -19,6 +19,7 @@
- [Running the Client](getting-started/running-the-client.md) - [Running the Client](getting-started/running-the-client.md)
- [Certificate Lifecycle and CA-Signed TLS](getting-started/certificate-lifecycle.md) - [Certificate Lifecycle and CA-Signed TLS](getting-started/certificate-lifecycle.md)
- [Docker Deployment](getting-started/docker.md) - [Docker Deployment](getting-started/docker.md)
- [Bot SDK](getting-started/bot-sdk.md)
- [Demo Walkthrough: Alice and Bob](getting-started/demo-walkthrough.md) - [Demo Walkthrough: Alice and Bob](getting-started/demo-walkthrough.md)
--- ---
@@ -82,6 +83,7 @@
- [Delivery Service Internals](internals/delivery-service.md) - [Delivery Service Internals](internals/delivery-service.md)
- [Authentication Service Internals](internals/authentication-service.md) - [Authentication Service Internals](internals/authentication-service.md)
- [Storage Backend](internals/storage-backend.md) - [Storage Backend](internals/storage-backend.md)
- [Server Hooks (Plugin System)](internals/server-hooks.md)
--- ---

View File

@@ -200,6 +200,39 @@ group state to disk.
--- ---
## quicproquo-bot
**Role:** High-level SDK for building automated agents (bots) on the
quicproquo network. Wraps the client library into a simple polling-based API.
### Components
| Component | Description |
|------------------|-------------|
| `BotConfig` | Builder-pattern configuration: server address, credentials, TLS, state file path. |
| `Bot` | Connected bot instance. Methods: `connect()`, `send_dm()`, `receive()`, `receive_raw()`, `resolve_user()`. |
| `Message` | Received message struct with `sender`, `text`, and `seq` fields. |
| `run_pipe_mode` | JSON-lines stdin/stdout interface for shell integration (`send`, `recv`, `resolve` actions). |
### Architecture
Each `send_dm` and `receive` call opens a fresh QUIC connection (stateless
reconnect pattern). The bot wraps the client's `cmd_send` and
`receive_pending_plaintexts` functions, handling MLS group state internally.
### What this crate does NOT do
- No server-side logic.
- No raw MLS operations — delegates to `quicproquo-client` high-level functions.
- No persistent QUIC connections — each operation reconnects.
### Key dependencies
`quicproquo-core`, `quicproquo-client`, `tokio`, `anyhow`, `tracing`,
`serde`, `serde_json`, `hex`.
---
## Other workspace crates ## Other workspace crates
| Crate | Role | | Crate | Role |

View File

@@ -0,0 +1,233 @@
# Bot SDK
The `quicproquo-bot` crate provides a high-level SDK for building automated
agents on the quicproquo network. Bots authenticate with OPAQUE, send and
receive E2E encrypted messages through MLS, and can be driven programmatically
or via a JSON pipe interface for shell integration.
---
## Adding the dependency
```toml
[dependencies]
quicproquo-bot = { path = "../crates/quicproquo-bot" }
tokio = { version = "1", features = ["macros", "rt-multi-thread"] }
anyhow = "1"
```
---
## Quick start
```rust,no_run
use quicproquo_bot::{Bot, BotConfig};
#[tokio::main]
async fn main() -> anyhow::Result<()> {
let config = BotConfig::new("127.0.0.1:7000", "bot-user", "bot-password")
.ca_cert("server-cert.der")
.state_path("bot-state.bin");
let bot = Bot::connect(config).await?;
// Send a DM
bot.send_dm("alice", "Hello from bot!").await?;
// Poll for messages
loop {
for msg in bot.receive(5000).await? {
println!("{}: {}", msg.sender, msg.text);
if msg.text.starts_with("!echo ") {
bot.send_dm(&msg.sender, &msg.text[6..]).await?;
}
}
}
}
```
---
## Configuration
`BotConfig` uses a builder pattern. The only required arguments are the server
address, username, and password:
```rust,no_run
# use quicproquo_bot::BotConfig;
let config = BotConfig::new("127.0.0.1:7000", "my-bot", "secret123")
.ca_cert("certs/server-cert.der") // TLS CA certificate (DER format)
.server_name("my-server.example") // TLS SNI (default: "localhost")
.state_path("my-bot-state.bin") // Persistent state file
.state_password("encrypt-me") // State file encryption password
.device_id("bot-device-1"); // Device identifier
```
| Method | Default | Description |
|-------------------|-----------------------|-------------|
| `ca_cert()` | `"server-cert.der"` | Path to the server's CA certificate in DER format. |
| `server_name()` | `"localhost"` | TLS server name for certificate validation. |
| `state_path()` | `"bot-state.bin"` | Path to the bot's encrypted state file. |
| `state_password()` | None (unencrypted) | Password for encrypting the state file at rest. |
| `device_id()` | None | Device ID reported to the server in auth tokens. |
---
## Sending messages
```rust,no_run
# use quicproquo_bot::Bot;
# async fn example(bot: &Bot) -> anyhow::Result<()> {
// Send a plaintext DM — encryption is handled internally via MLS
bot.send_dm("alice", "Hello!").await?;
# Ok(())
# }
```
`send_dm` resolves the username, establishes or joins the MLS group for the DM
channel, encrypts the plaintext, and delivers it through the server. Each call
opens a fresh QUIC connection (stateless reconnect pattern).
---
## Receiving messages
```rust,no_run
# use quicproquo_bot::Bot;
# async fn example(bot: &Bot) -> anyhow::Result<()> {
// Wait up to 5 seconds for pending messages
let messages = bot.receive(5000).await?;
for msg in &messages {
println!("[seq={}] {}: {}", msg.seq, msg.sender, msg.text);
}
// For binary/non-UTF-8 content, use receive_raw
let raw_messages = bot.receive_raw(5000).await?;
for payload in &raw_messages {
println!("received {} bytes", payload.len());
}
# Ok(())
# }
```
The `Message` struct contains:
| Field | Type | Description |
|----------|----------|-------------|
| `sender` | `String` | The sender's username. |
| `text` | `String` | Decrypted plaintext content (UTF-8). |
| `seq` | `u64` | Sequence number. |
---
## Resolving users
```rust,no_run
# use quicproquo_bot::Bot;
# async fn example(bot: &Bot) -> anyhow::Result<()> {
let identity_key = bot.resolve_user("alice").await?;
println!("alice's identity key: {} bytes", identity_key.len());
# Ok(())
# }
```
---
## Identity inspection
```rust,no_run
# use quicproquo_bot::Bot;
# fn example(bot: &Bot) {
println!("username: {}", bot.username());
println!("identity key (hex): {}", bot.identity_key_hex());
let raw_key: [u8; 32] = bot.identity_key();
# }
```
---
## Pipe mode (stdin/stdout JSON lines)
For shell integration, the bot SDK supports a JSON-lines pipe interface. Each
line on stdin is a JSON command; results are written to stdout as JSON lines.
### Supported actions
**Send a message:**
```json
{"action": "send", "to": "alice", "text": "hello from pipe"}
```
Response:
```json
{"status": "ok", "action": "send"}
```
**Receive pending messages:**
```json
{"action": "recv", "timeout_ms": 5000}
```
Response:
```json
{"status": "ok", "messages": [{"sender": "peer", "text": "hi", "seq": 0}]}
```
**Resolve a username:**
```json
{"action": "resolve", "username": "alice"}
```
Response:
```json
{"status": "ok", "identity_key": "ab12cd34..."}
```
### Error responses
All actions return an error object on failure:
```json
{"error": "OPAQUE login: connection refused"}
```
### Shell examples
```bash
# Send via pipe
echo '{"action":"send","to":"alice","text":"hello"}' | my-bot-binary
# Receive via pipe
echo '{"action":"recv","timeout_ms":5000}' | my-bot-binary
# Use with jq for pretty output
echo '{"action":"recv","timeout_ms":3000}' | my-bot-binary | jq .
```
---
## Architecture notes
- **Stateless reconnect**: Each `send_dm` and `receive` call opens a fresh QUIC
connection. There is no persistent connection to manage.
- **MLS encryption**: All messages are end-to-end encrypted via MLS (RFC 9420).
The bot SDK wraps the client library's `cmd_send` and
`receive_pending_plaintexts` functions.
- **State persistence**: The bot's identity seed and MLS group state are stored
in the state file. Losing this file means losing the bot's identity.
- **Cap'n Proto !Send**: RPC calls run on a `tokio::task::LocalSet` because
`capnp-rpc` is `!Send`.
---
## Next steps
- [Running the Client](running-the-client.md) -- CLI subcommands and REPL
- [Server Hooks](../internals/server-hooks.md) -- extend the server with plugins
- [Demo Walkthrough](demo-walkthrough.md) -- step-by-step messaging scenario

View File

@@ -0,0 +1,259 @@
# Server Hooks
The `ServerHooks` trait provides a plugin system for extending the quicproquo
server. Hooks fire at key points in the request lifecycle — message delivery,
authentication, channel creation, and message fetch — allowing you to inspect,
log, rate-limit, or reject operations without modifying server internals.
---
## Overview
```text
Client RPC request
└─ Validation (auth, rate limits, wire format)
└─ Hook fires (on_message_enqueue, on_auth, etc.)
├─ HookAction::Continue → proceed to storage/delivery
└─ HookAction::Reject("reason") → error returned to client
```
Hooks are called **synchronously** in the RPC handler path after validation
but before storage. Keep hook implementations fast — offload heavy work
(HTTP calls, disk I/O, analytics) to background tasks.
---
## The `ServerHooks` trait
```rust,ignore
pub trait ServerHooks: Send + Sync {
/// Called before a message is stored in the delivery queue.
/// Return HookAction::Reject to prevent delivery.
fn on_message_enqueue(&self, event: &MessageEvent) -> HookAction {
HookAction::Continue
}
/// Called after a batch of messages is enqueued.
fn on_batch_enqueue(&self, events: &[MessageEvent]) {}
/// Called after a successful or failed login attempt.
fn on_auth(&self, event: &AuthEvent) {}
/// Called after a channel is created or looked up.
fn on_channel_created(&self, event: &ChannelEvent) {}
/// Called after messages are fetched from the delivery queue.
fn on_fetch(&self, event: &FetchEvent) {}
/// Called when a user completes OPAQUE registration.
fn on_user_registered(&self, username: &str, identity_key: &[u8]) {}
}
```
All methods have default no-op implementations. Override only the events you
care about.
---
## Hook action
```rust,ignore
pub enum HookAction {
/// Allow the operation to proceed.
Continue,
/// Reject the operation with a reason (returned to the client as an error).
Reject(String),
}
```
Currently only `on_message_enqueue` can reject operations. Other hooks are
observational (fire-and-forget).
---
## Event types
### `MessageEvent`
Fired on `enqueue` and `batch_enqueue` RPC calls.
| Field | Type | Description |
|--------------------|-------------------|-------------|
| `sender_identity` | `Option<Vec<u8>>` | Sender's 32-byte identity key (None in sealed sender mode). |
| `recipient_key` | `Vec<u8>` | Recipient's 32-byte identity key. |
| `channel_id` | `Vec<u8>` | 16-byte channel ID. |
| `payload_len` | `usize` | Length of the encrypted payload in bytes. |
| `seq` | `u64` | Server-assigned sequence number. |
### `AuthEvent`
Fired after OPAQUE login completes (success or failure).
| Field | Type | Description |
|------------------|----------|-------------|
| `username` | `String` | The username that attempted to authenticate. |
| `success` | `bool` | Whether authentication succeeded. |
| `failure_reason` | `String` | Failure reason (empty on success). |
### `ChannelEvent`
Fired after a `createChannel` RPC call.
| Field | Type | Description |
|-----------------|------------|-------------|
| `channel_id` | `Vec<u8>` | 16-byte channel ID. |
| `initiator_key` | `Vec<u8>` | Identity key of the channel initiator. |
| `peer_key` | `Vec<u8>` | Identity key of the peer. |
| `was_new` | `bool` | True if this is a newly created channel. |
### `FetchEvent`
Fired after a `fetch` or `fetchWait` RPC call.
| Field | Type | Description |
|-----------------|------------|-------------|
| `recipient_key` | `Vec<u8>` | Identity key of the fetcher. |
| `channel_id` | `Vec<u8>` | Channel ID being fetched from. |
| `message_count` | `usize` | Number of messages returned. |
---
## Built-in implementations
### `NoopHooks`
Does nothing. This is the default when no hooks are configured.
```rust,ignore
pub struct NoopHooks;
impl ServerHooks for NoopHooks {}
```
### `TracingHooks`
Logs all events via the `tracing` crate at info/debug level.
```rust,ignore
pub struct TracingHooks;
impl ServerHooks for TracingHooks {
fn on_message_enqueue(&self, event: &MessageEvent) -> HookAction {
tracing::info!(
recipient_prefix = %hex_prefix(&event.recipient_key),
payload_len = event.payload_len,
seq = event.seq,
"hook: message enqueued"
);
HookAction::Continue
}
fn on_auth(&self, event: &AuthEvent) {
if event.success {
tracing::info!(username = %event.username, "hook: login success");
} else {
tracing::warn!(
username = %event.username,
reason = %event.failure_reason,
"hook: login failure"
);
}
}
// ... other methods log similarly
}
```
---
## Writing a custom hook
### Example: payload size limiter
```rust,ignore
use quicproquo_server::hooks::{ServerHooks, HookAction, MessageEvent};
struct PayloadLimiter {
max_bytes: usize,
}
impl ServerHooks for PayloadLimiter {
fn on_message_enqueue(&self, event: &MessageEvent) -> HookAction {
if event.payload_len > self.max_bytes {
return HookAction::Reject(format!(
"payload too large: {} > {} bytes",
event.payload_len, self.max_bytes
));
}
HookAction::Continue
}
}
```
### Example: login auditor
```rust,ignore
use quicproquo_server::hooks::{ServerHooks, AuthEvent};
struct LoginAuditor;
impl ServerHooks for LoginAuditor {
fn on_auth(&self, event: &AuthEvent) {
if !event.success {
eprintln!(
"AUDIT: failed login for '{}': {}",
event.username, event.failure_reason
);
}
}
}
```
### Example: composing multiple hooks
```rust,ignore
use quicproquo_server::hooks::*;
struct CompositeHooks {
hooks: Vec<Box<dyn ServerHooks>>,
}
impl ServerHooks for CompositeHooks {
fn on_message_enqueue(&self, event: &MessageEvent) -> HookAction {
for hook in &self.hooks {
if let HookAction::Reject(reason) = hook.on_message_enqueue(event) {
return HookAction::Reject(reason);
}
}
HookAction::Continue
}
fn on_auth(&self, event: &AuthEvent) {
for hook in &self.hooks {
hook.on_auth(event);
}
}
// ... delegate other methods similarly
}
```
---
## Important considerations
- **E2E encryption**: Message payloads are encrypted end-to-end. Hooks cannot
inspect plaintext content — they see only metadata (sender, recipient,
payload size, sequence number).
- **Performance**: Hooks run synchronously in the RPC handler. A slow hook
blocks the RPC response. Use `tokio::spawn` for async work.
- **Thread safety**: `ServerHooks` requires `Send + Sync`. Use `Arc<Mutex<_>>`
or lock-free structures for shared mutable state.
- **Reject semantics**: Only `on_message_enqueue` supports rejection. Other
hooks are informational — the operation proceeds regardless of what the hook
does.
---
## Further reading
- [Delivery Service Internals](delivery-service.md) -- how messages flow through the server
- [Authentication Service Internals](authentication-service.md) -- OPAQUE auth flow
- [Bot SDK](../getting-started/bot-sdk.md) -- build bots that interact with the server

View File

@@ -0,0 +1 @@
target/

View File

@@ -0,0 +1,15 @@
# This is a standalone cdylib crate outside the main workspace.
[workspace]
[package]
name = "logging_plugin"
version = "0.1.0"
edition = "2021"
description = "Reference quicproquo server plugin: logs all hook events to stderr."
license = "MIT"
[lib]
crate-type = ["cdylib"]
[dependencies]
quicproquo-plugin-api = { path = "../../../crates/quicproquo-plugin-api" }

View File

@@ -0,0 +1,162 @@
//! Reference quicproquo server plugin: logs all hook events to stderr.
//!
//! This plugin demonstrates every hook point in the `HookVTable` API. It
//! writes a single-line human-readable record to stderr for each server event.
//! No state is required, so `user_data` is left null.
//!
//! # Building
//!
//! ```bash
//! cargo build --release -p logging_plugin
//! # Output: target/release/liblogging_plugin.so (Linux)
//! # target/release/liblogging_plugin.dylib (macOS)
//! ```
//!
//! # Deploying
//!
//! ```bash
//! cp target/release/liblogging_plugin.so /etc/qpq/plugins/
//! qpq-server --plugin-dir /etc/qpq/plugins
//! ```
use std::ffi::c_void;
use std::slice;
use quicproquo_plugin_api::{
CAuthEvent, CChannelEvent, CFetchEvent, CMessageEvent, HookVTable, HOOK_CONTINUE, PLUGIN_OK,
};
// ── Helpers ───────────────────────────────────────────────────────────────────
fn hex_prefix(ptr: *const u8, len: usize) -> String {
if ptr.is_null() || len == 0 {
return "(none)".to_string();
}
let bytes = unsafe { slice::from_raw_parts(ptr, len.min(4)) };
bytes.iter().map(|b| format!("{:02x}", b)).collect()
}
fn str_from_raw(ptr: *const u8, len: usize) -> &'static str {
if ptr.is_null() || len == 0 {
return "";
}
// Safety: the server owns the memory and it remains valid for the callback duration.
let bytes = unsafe { slice::from_raw_parts(ptr, len) };
std::str::from_utf8(bytes).unwrap_or("<invalid utf8>")
}
// ── Hook callbacks ────────────────────────────────────────────────────────────
unsafe extern "C" fn on_message_enqueue(
_user_data: *mut c_void,
event: *const CMessageEvent,
) -> i32 {
let e = &*event;
eprintln!(
"[qpq-plugin:logging] enqueue: recipient={} payload_len={} seq={} has_sender={}",
hex_prefix(e.recipient_key, e.recipient_key_len),
e.payload_len,
e.seq,
!e.sender_identity.is_null(),
);
HOOK_CONTINUE
}
unsafe extern "C" fn on_batch_enqueue(
_user_data: *mut c_void,
events: *const CMessageEvent,
count: usize,
) {
eprintln!("[qpq-plugin:logging] batch_enqueue: count={}", count);
let events = slice::from_raw_parts(events, count);
for (i, e) in events.iter().enumerate() {
eprintln!(
"[qpq-plugin:logging] [{}/{}] recipient={} seq={}",
i + 1,
count,
hex_prefix(e.recipient_key, e.recipient_key_len),
e.seq,
);
}
}
unsafe extern "C" fn on_auth(_user_data: *mut c_void, event: *const CAuthEvent) {
let e = &*event;
let username = str_from_raw(e.username, e.username_len);
if e.success != 0 {
eprintln!("[qpq-plugin:logging] auth: user='{}' SUCCESS", username);
} else {
let reason = str_from_raw(e.failure_reason, e.failure_reason_len);
eprintln!(
"[qpq-plugin:logging] auth: user='{}' FAILURE reason='{}'",
username, reason
);
}
}
unsafe extern "C" fn on_channel_created(
_user_data: *mut c_void,
event: *const CChannelEvent,
) {
let e = &*event;
eprintln!(
"[qpq-plugin:logging] channel_created: channel={} was_new={} initiator={}",
hex_prefix(e.channel_id, e.channel_id_len),
e.was_new != 0,
hex_prefix(e.initiator_key, e.initiator_key_len),
);
}
unsafe extern "C" fn on_fetch(_user_data: *mut c_void, event: *const CFetchEvent) {
let e = &*event;
if e.message_count > 0 {
eprintln!(
"[qpq-plugin:logging] fetch: recipient={} count={}",
hex_prefix(e.recipient_key, e.recipient_key_len),
e.message_count,
);
}
}
unsafe extern "C" fn on_user_registered(
_user_data: *mut c_void,
username: *const u8,
username_len: usize,
identity_key: *const u8,
identity_key_len: usize,
) {
let name = str_from_raw(username, username_len);
eprintln!(
"[qpq-plugin:logging] user_registered: user='{}' key={}",
name,
hex_prefix(identity_key, identity_key_len),
);
}
// ── Plugin entry point ────────────────────────────────────────────────────────
/// Called by the server once at startup.
///
/// # Safety
///
/// `vtable` must point to a zeroed `HookVTable` as provided by `qpq-server`.
#[no_mangle]
pub unsafe extern "C" fn qpq_plugin_init(vtable: *mut HookVTable) -> i32 {
if vtable.is_null() {
return -1;
}
let v = &mut *vtable;
// user_data is not needed — all callbacks are stateless.
v.user_data = std::ptr::null_mut();
v.on_message_enqueue = Some(on_message_enqueue);
v.on_batch_enqueue = Some(on_batch_enqueue);
v.on_auth = Some(on_auth);
v.on_channel_created = Some(on_channel_created);
v.on_fetch = Some(on_fetch);
v.on_user_registered = Some(on_user_registered);
// error_message and destroy not needed (no state, never rejects).
eprintln!("[qpq-plugin:logging] initialized");
PLUGIN_OK
}

View File

@@ -0,0 +1 @@
target/

View File

@@ -0,0 +1,17 @@
# This is a standalone cdylib crate outside the main workspace.
# It depends on quicproquo-plugin-api via a relative path.
[workspace]
[package]
name = "rate_limit_plugin"
version = "0.1.0"
edition = "2021"
description = "Reference quicproquo server plugin: per-recipient payload-size rate limiter."
license = "MIT"
# Compile as a shared library (.so / .dylib) for dynamic loading by qpq-server.
[lib]
crate-type = ["cdylib"]
[dependencies]
quicproquo-plugin-api = { path = "../../../crates/quicproquo-plugin-api" }

View File

@@ -0,0 +1,108 @@
//! Reference quicproquo server plugin: payload-size rate limiter.
//!
//! Rejects any single message whose payload exceeds `MAX_PAYLOAD_BYTES`. In a
//! real deployment you would extend this with per-sender token-bucket logic,
//! but this example intentionally stays simple so the plugin API surface is
//! easy to follow.
//!
//! # Building
//!
//! ```bash
//! cargo build --release -p rate_limit_plugin
//! # Output: target/release/librate_limit_plugin.so (Linux)
//! # target/release/librate_limit_plugin.dylib (macOS)
//! ```
//!
//! # Deploying
//!
//! Copy the resulting `.so` / `.dylib` into your plugin directory and start
//! the server with `--plugin-dir /path/to/plugins`.
//!
//! # Config (via TOML)
//!
//! ```toml
//! plugin_dir = "/etc/qpq/plugins"
//! ```
use std::ffi::c_void;
use quicproquo_plugin_api::{CMessageEvent, HookVTable, HOOK_CONTINUE, HOOK_REJECT, PLUGIN_OK};
/// Maximum allowed encrypted payload size in bytes.
const MAX_PAYLOAD_BYTES: usize = 512 * 1024; // 512 KiB
/// Plugin-private state, heap-allocated and passed as `user_data`.
struct PluginState {
/// Last rejection reason as a null-terminated C string owned by us.
last_error: Option<std::ffi::CString>,
}
impl PluginState {
fn new() -> *mut Self {
Box::into_raw(Box::new(Self { last_error: None }))
}
fn set_error(&mut self, msg: &str) {
// Replace previous error (if any); CString silently truncates interior NUL bytes,
// but our format strings never contain them.
self.last_error = std::ffi::CString::new(msg).ok();
}
}
// ── Hook callbacks ────────────────────────────────────────────────────────────
unsafe extern "C" fn on_message_enqueue(
user_data: *mut c_void,
event: *const CMessageEvent,
) -> i32 {
let state = &mut *(user_data as *mut PluginState);
let payload_len = (*event).payload_len;
if payload_len > MAX_PAYLOAD_BYTES {
state.set_error(&format!(
"payload {} bytes exceeds limit {} bytes",
payload_len, MAX_PAYLOAD_BYTES
));
return HOOK_REJECT;
}
HOOK_CONTINUE
}
unsafe extern "C" fn error_message(user_data: *mut c_void) -> *const u8 {
let state = &*(user_data as *mut PluginState);
match &state.last_error {
Some(s) => s.as_ptr() as *const u8,
None => std::ptr::null(),
}
}
unsafe extern "C" fn destroy(user_data: *mut c_void) {
if !user_data.is_null() {
drop(Box::from_raw(user_data as *mut PluginState));
}
}
// ── Plugin entry point ────────────────────────────────────────────────────────
/// Called by the server once at startup. Fill `vtable` with function pointers
/// and return `PLUGIN_OK` (0) on success.
///
/// # Safety
///
/// `vtable` must be a valid pointer to a zeroed `HookVTable` as provided by
/// `qpq-server`. Do not call from any other context.
#[no_mangle]
pub unsafe extern "C" fn qpq_plugin_init(vtable: *mut HookVTable) -> i32 {
if vtable.is_null() {
return -1;
}
let vtable = &mut *vtable;
vtable.user_data = PluginState::new() as *mut c_void;
vtable.on_message_enqueue = Some(on_message_enqueue);
vtable.error_message = Some(error_message);
vtable.destroy = Some(destroy);
PLUGIN_OK
}

View File

@@ -20,8 +20,10 @@ interface NodeService {
# channelId : Optional channel identifier (empty for default). A 16-byte UUID # channelId : Optional channel identifier (empty for default). A 16-byte UUID
# is recommended for 1:1 channels. # is recommended for 1:1 channels.
# version : Schema/wire version. Must be 1. # version : Schema/wire version. Must be 1.
# Returns the monotonically increasing per-inbox sequence number assigned to this message. # Returns the monotonically increasing per-inbox sequence number assigned to this message,
enqueue @2 (recipientKey :Data, payload :Data, channelId :Data, version :UInt16, auth :Auth) -> (seq :UInt64); # plus a cryptographic delivery proof (96 bytes: 32-byte SHA-256 preimage || 64-byte Ed25519
# signature). Old clients that do not read deliveryProof are unaffected (Cap'n Proto optional).
enqueue @2 (recipientKey :Data, payload :Data, channelId :Data, version :UInt16, auth :Auth) -> (seq :UInt64, deliveryProof :Data);
# Fetch and drain all queued payloads for the recipient. # Fetch and drain all queued payloads for the recipient.
# limit: max number of messages to return (0 = fetch all). # limit: max number of messages to return (0 = fetch all).
@@ -89,7 +91,11 @@ interface NodeService {
# Resolve a username to its Ed25519 identity key (32 bytes). # Resolve a username to its Ed25519 identity key (32 bytes).
# Returns empty Data if the username is not registered. # Returns empty Data if the username is not registered.
resolveUser @19 (username :Text, auth :Auth) -> (identityKey :Data); # inclusionProof : bincode-serialised InclusionProof from quicproquo-kt, proving the
# (username, identityKey) binding is in the server's append-only Merkle log.
# Empty when the log entry is not yet available (e.g. legacy server or new registration
# that has not been committed to the log). Clients should verify when non-empty.
resolveUser @19 (username :Text, auth :Auth) -> (identityKey :Data, inclusionProof :Data);
# Reverse lookup: resolve an Ed25519 identity key to the registered username. # Reverse lookup: resolve an Ed25519 identity key to the registered username.
# Returns empty Text if the identity key is not associated with any user. # Returns empty Text if the identity key is not associated with any user.

View File

@@ -4,6 +4,7 @@
quicproquo AI Team quicproquo AI Team
================== ==================
A multi-agent Claude team specialised for the quicproquo Rust workspace. A multi-agent Claude team specialised for the quicproquo Rust workspace.
Agents cover development, security, testing, documentation, and infrastructure.
Usage: Usage:
python scripts/ai_team.py "<task>" # orchestrator python scripts/ai_team.py "<task>" # orchestrator
@@ -12,6 +13,7 @@ Usage:
python scripts/ai_team.py --parallel \\ python scripts/ai_team.py --parallel \\
"rust-server-dev: Fix unwrap() in server" \\ "rust-server-dev: Fix unwrap() in server" \\
"security-auditor: Audit quicproquo-core" # ad-hoc parallel "security-auditor: Audit quicproquo-core" # ad-hoc parallel
python scripts/ai_team.py --pipeline # full production readiness pipeline
python scripts/ai_team.py --list-agents python scripts/ai_team.py --list-agents
python scripts/ai_team.py --list-sprints python scripts/ai_team.py --list-sprints
@@ -19,6 +21,8 @@ Requires:
pip install claude-agent-sdk pip install claude-agent-sdk
The ANTHROPIC_API_KEY environment variable must be set. The ANTHROPIC_API_KEY environment variable must be set.
Team specification: docs/AGENT-TEAM.md
""" """
import argparse import argparse
@@ -292,6 +296,86 @@ After writing tests, run them with Bash and report:
tools=["Read", "Glob", "Grep", "Edit", "Write", "Bash"], tools=["Read", "Glob", "Grep", "Edit", "Write", "Bash"],
), ),
"devops-engineer": AgentDefinition(
description=(
"Infrastructure and deployment engineer for quicproquo. Owns Docker, CI/CD "
"(GitHub Actions), deployment configs, cross-compilation, monitoring setup, "
"release automation, and binary size optimisation. Edits docker/, .github/, "
"docker-compose.yml, and infrastructure scripts."
),
prompt=f"""{PROJECT_CONTEXT}
You are the **DevOps Engineer** for quicproquo.
You own: `docker/`, `.github/`, `docker-compose.yml`, deployment configs, CI pipelines.
Responsibilities:
- Docker image builds: multi-stage, minimal final image, non-root user, security hardening.
- GitHub Actions CI: build matrix, test parallelism, caching, artifact publishing.
- Release automation: cargo-release workflow, CHANGELOG generation, version tagging.
- Cross-compilation: musl static builds for x86_64, armv7, aarch64 (OpenWrt targets).
- Monitoring: Prometheus metrics endpoint stub, health check endpoint.
- Infrastructure-as-code: docker-compose for dev/staging, systemd unit files.
Before any edit:
1. Read the target file in full.
2. Check ROADMAP.md Phase 1.3, 1.4, 2.3 for infrastructure items.
3. Test Docker builds with `docker build -f docker/Dockerfile .`
Quality gates:
- Docker image builds successfully.
- CI pipeline syntax is valid (check with `act --dryrun` if available).
- No secrets in Dockerfile ARG/ENV, no running as root in final stage.
- `.gitignore` covers all sensitive file patterns (*.der, *.pem, *.db, *.bin, *.ks).
""",
tools=["Read", "Glob", "Grep", "Edit", "Write", "Bash"],
),
"docs-engineer": AgentDefinition(
description=(
"Technical writer for quicproquo. Writes and maintains user guides, operator "
"documentation, API references, architecture docs, SECURITY.md, CONTRIBUTING.md, "
"and the mdBook site in docs/. Ensures all public APIs have doc comments. "
"Edits docs/, README.md, and inline doc comments only."
),
prompt=f"""{PROJECT_CONTEXT}
You are the **Documentation Engineer** for quicproquo.
You own: `docs/`, `README.md`, `CONTRIBUTING.md`, `SECURITY.md`, and inline `///` doc
comments on public API items.
Documentation tiers (in priority order):
1. **User docs** — Getting started, installation, REPL commands, configuration reference.
2. **Operator docs** — Deployment guide (Docker, systemd), certificate setup, backup/restore,
monitoring, operational runbook, troubleshooting.
3. **Developer docs** — Architecture overview, crate responsibilities, contribution guide,
coding standards, testing guide, PR review checklist.
4. **Protocol docs** — Wire format reference, Cap'n Proto schema docs, MLS integration,
Noise transport spec, federation protocol.
5. **Security docs** — Threat model, trust boundaries, key lifecycle, responsible disclosure
policy, audit report summaries.
Before any edit:
1. Read the target file and any related source code to ensure accuracy.
2. Check the mdBook structure in `docs/book.toml` and `docs/src/SUMMARY.md`.
3. Verify code examples compile (`cargo test --doc` for inline examples).
Quality gates:
- `mdbook build docs/` succeeds without warnings.
- All internal links resolve (no broken cross-references).
- No stale information — verify claims against current source code.
- Spelling and grammar are correct.
Style:
- Write for an audience of experienced developers who may not know Rust.
- Use active voice, present tense.
- Include code examples where they clarify usage.
- Cross-reference related docs sections with relative links.
""",
tools=["Read", "Glob", "Grep", "Edit", "Write", "Bash"],
),
"roadmap-tracker": AgentDefinition( "roadmap-tracker": AgentDefinition(
description=( description=(
"Reads ROADMAP.md and the codebase to determine: which milestones are complete, " "Reads ROADMAP.md and the codebase to determine: which milestones are complete, "
@@ -314,10 +398,10 @@ Steps:
Output format (Markdown): Output format (Markdown):
## Roadmap Status Report ## Roadmap Status Report
### Completed ### Completed
- Phase X, item Y: ... - Phase X, item Y: ...
### In Progress 🔄 ### In Progress
- Phase X, item Y: partial — what exists vs what's missing. - Phase X, item Y: partial — what exists vs what's missing.
### Next Actionable Tasks (prioritised) ### Next Actionable Tasks (prioritised)
@@ -422,8 +506,193 @@ SPRINTS: dict[str, list[tuple[str, str]]] = {
"key material, any new logging that might leak secrets, and any new external inputs that " "key material, any new logging that might leak secrets, and any new external inputs that "
"lack validation. Produce a concise finding report."), "lack validation. Produce a concise finding report."),
], ],
# ── Documentation sprints ─────────────────────────────────────────────────
"docs-foundation": [
("docs-engineer",
"Create a root-level SECURITY.md file for quicproquo. Include: "
"(1) Responsible disclosure policy — where to report vulnerabilities (email, PGP key if available). "
"(2) Scope — what's covered (server, client, core crypto, protocol). "
"(3) Response timeline — acknowledge within 48h, triage within 7 days, fix within 30 days for critical. "
"(4) Security contact — project maintainer contact info. "
"(5) Out-of-scope — social engineering, DoS against test instances, etc. "
"Read existing docs/SECURITY-AUDIT.md for context on known security posture. "
"Keep it concise and professional. Follow the format used by major open-source crypto projects."),
("docs-engineer",
"Create a root-level CONTRIBUTING.md file for quicproquo. Read the existing guidance in "
"docs/src/contributing/coding-standards.md and docs/src/contributing/testing.md first. "
"Include: (1) Development setup (Rust toolchain, Cap'n Proto compiler, SQLCipher). "
"(2) Building the project (cargo build --workspace, feature flags). "
"(3) Running tests (cargo test --workspace, E2E with --test-threads 1). "
"(4) PR process (branch naming, conventional commits, review checklist). "
"(5) Coding standards summary (link to full docs). "
"(6) Security requirements for contributions (no unwrap on crypto, zeroize, etc). "
"Keep it actionable — a new contributor should be able to submit a PR after reading this."),
("docs-engineer",
"Write a comprehensive operator deployment guide at docs/src/getting-started/deployment.md. "
"Read the existing docs/src/getting-started/ pages and docker/Dockerfile first. "
"Cover: (1) Docker deployment (docker-compose, volume mounts, env vars). "
"(2) Bare-metal deployment (systemd unit file example, user/group setup). "
"(3) TLS certificate setup (self-signed for dev, Let's Encrypt for prod). "
"(4) Configuration reference (all QPQ_* environment variables). "
"(5) Backup and restore (SQLite/SQLCipher database, key material). "
"(6) Monitoring (structured log output, health checks). "
"(7) Troubleshooting common issues. "
"Update docs/src/SUMMARY.md to include the new page if needed."),
("docs-engineer",
"Audit all existing docs/src/ pages for accuracy against the current codebase. "
"Read each .md file in docs/src/ and cross-reference claims against actual source code. "
"Fix: (1) Stale API references (function names, struct names that changed). "
"(2) Broken internal links between docs pages. "
"(3) Outdated architecture descriptions (e.g. references to MessagePack, old ALPN strings). "
"(4) Missing entries in docs/src/SUMMARY.md for pages that exist. "
"Produce a list of all changes made and any issues you couldn't fix."),
],
"docs-api": [
("docs-engineer",
"Ensure every public API item in quicproquo-core has a doc comment (/// or //!). "
"Read crates/quicproquo-core/src/lib.rs to find all pub exports. For each pub fn, "
"pub struct, pub enum, and pub trait: check if it has a doc comment. If missing, "
"read the implementation to understand what it does, then add a concise doc comment "
"with: one-line summary, parameters, return value, errors, and a short example where "
"appropriate. Run `cargo doc -p quicproquo-core --no-deps` to verify."),
("docs-engineer",
"Document all Cap'n Proto schemas in schemas/. For each .capnp file (auth.capnp, "
"delivery.capnp, federation.capnp, node.capnp): read the schema and the Rust "
"implementation that uses it. Write or update docs/src/wire-format/ pages with: "
"(1) Purpose of each interface. (2) Method signatures with parameter semantics. "
"(3) Error conditions. (4) Example message flows (e.g. auth flow, message send flow). "
"Ensure docs/src/wire-format/overview.md links to all sub-pages."),
],
# ── Infrastructure sprints ────────────────────────────────────────────────
"infra-hardening": [
("devops-engineer",
"Fix the Dockerfile at docker/Dockerfile for production readiness. Read it first. "
"Changes needed: (1) Create a dedicated non-root user 'qpq' (not nobody) with a "
"specific UID/GID. (2) Set QPQ_DATA_DIR=/var/lib/qpq with correct ownership. "
"(3) Handle the excluded p2p crate correctly in workspace build. "
"(4) Add HEALTHCHECK instruction. (5) Use specific base image tags (not :latest). "
"(6) Ensure COPY commands don't pull in .git, target/, logs/, or test data. "
"Test with: docker build -f docker/Dockerfile ."),
("devops-engineer",
"Harden .gitignore at project root. Read the current .gitignore first. Add missing "
"patterns: data/, *.der, *.pem, *.db, *.db-shm, *.db-wal, *.bin, *.ks, "
"qpq-state.*, logs/ai_team/, .env, .env.*, *.key. "
"Verify no sensitive files are already tracked: run git ls-files for each pattern. "
"If any are tracked, report them (do NOT remove from git without confirmation)."),
("devops-engineer",
"Enhance CI pipeline at .github/workflows/ci.yml. Read it first. Add or verify: "
"(1) cargo fmt check passes. (2) cargo clippy --workspace -- -D warnings. "
"(3) cargo test --workspace (with --test-threads 1 for E2E). "
"(4) cargo deny check runs on every PR. (5) cargo audit as blocking check. "
"(6) Docker build validation job (docker build -f docker/Dockerfile .). "
"(7) Rust cache action for faster builds. (8) Matrix for stable + nightly Rust. "
"Also check .github/CODEOWNERS is correctly configured for crypto paths."),
],
# ── Security sprints ──────────────────────────────────────────────────────
"security-full": [
("security-auditor",
"Perform a FULL security audit of the entire quicproquo codebase. Read every .rs file "
"in crates/quicproquo-core/src/, crates/quicproquo-server/src/, and "
"crates/quicproquo-client/src/. Check every file for ALL of: "
"(1) .unwrap()/.expect() outside #[cfg(test)] on crypto, I/O, lock, or parse operations. "
"(2) Key material types missing Zeroize/ZeroizeOnDrop. "
"(3) Secret bytes (keys, passwords, tokens, nonces) potentially reaching tracing/log/println. "
"(4) Non-constant-time comparisons on auth tags, tokens, MACs, or passwords. "
"(5) panic!/unreachable! in production paths. "
"(6) unsafe blocks without // SAFETY: documentation. "
"(7) Missing input validation on RPC boundaries (data from network). "
"(8) Race conditions in shared state (DashMap, Mutex, RwLock). "
"(9) Replay attack vectors in message delivery. "
"(10) Timing side channels in OPAQUE or token validation. "
"Produce a prioritised finding report: Critical > High > Medium > Low > Informational. "
"Each finding must include: file:line, description, attack scenario, remediation."),
("security-auditor",
"Audit infrastructure security. Read docker/Dockerfile, docker-compose.yml, "
".github/workflows/ci.yml, and all files in scripts/. Check: "
"(1) Dockerfile: running as root? secrets in ENV/ARG? base image pinned? "
"(2) docker-compose: volumes expose host paths? ports exposed unnecessarily? "
"(3) CI: secrets handled correctly? artifact permissions? supply chain attacks? "
"(4) Scripts: command injection? path traversal? unsafe eval? "
"(5) Dependencies: check deny.toml config, look for unmaintained/yanked crates. "
"Produce a separate infrastructure security report."),
("security-auditor",
"Review the threat model at docs/src/cryptography/threat-model.md against the current "
"implementation. Read the threat model doc, then verify each claim: "
"(1) Are the stated trust boundaries correctly implemented in code? "
"(2) Does the OPAQUE flow match the documented auth model? "
"(3) Is the Noise_XX handshake configured as documented (XX pattern, not IK/KK)? "
"(4) Does the MLS integration follow RFC 9420 as claimed? "
"(5) Is the hybrid KEM combiner implemented as documented (HKDF-SHA256 with correct info string)? "
"(6) Are there attack vectors NOT covered by the threat model? "
"Produce a threat model gap analysis report."),
],
"security-review": [
("security-auditor",
"Post-change security review. Read all modified files from the most recent work. "
"Focus on: any new .unwrap()/.expect() introduced, new code paths handling key material, "
"new logging that might leak secrets, new external inputs lacking validation, and "
"any new unsafe blocks. Compare against the engineering standards in master-prompt.md. "
"Produce a concise pass/fail report with findings."),
("roadmap-tracker",
"Quick progress check after recent changes. Read ROADMAP.md and check which Phase 1 "
"and Phase 2 items have been completed by the recent work. Update the status report "
"with: items newly completed, items still in progress, next priorities."),
],
# ── Release preparation ───────────────────────────────────────────────────
"release-prep": [
("devops-engineer",
"Prepare release infrastructure. Read Cargo.toml (workspace root) and all crate "
"Cargo.toml files. (1) Verify version numbers are consistent across all crates. "
"(2) Create or update CHANGELOG.md at project root — read git log for recent commits "
"and categorise by: Added, Changed, Fixed, Security. Follow keepachangelog.com format. "
"(3) Verify docker/Dockerfile builds successfully with release profile. "
"(4) Check that cargo package -p quicproquo-server would succeed (dry run). "
"(5) Verify .github/workflows/ci.yml has a release/tag-triggered job if applicable."),
("docs-engineer",
"Final documentation review for release readiness. "
"(1) Verify README.md: feature matrix matches actual implementation, quick start "
"instructions work, crate layout is accurate, all badges are correct. "
"(2) Verify docs/src/getting-started/ pages are up to date. "
"(3) Check that SECURITY.md and CONTRIBUTING.md exist and are accurate. "
"(4) Run mdbook build docs/ and verify no warnings. "
"(5) Produce a docs readiness report: pass/fail with specific issues found."),
("roadmap-tracker",
"Final pre-release status report. Read ROADMAP.md completely. Classify every item as: "
"Complete (implemented + tested), Deferred (not blocking release), or Blocking (must fix "
"before release). Focus on Phase 1 (Production Hardening) — all items must be Complete "
"or have documented mitigations. Produce a release readiness assessment."),
],
} }
# ── Production readiness pipeline ─────────────────────────────────────────────
# Ordered list of sprints that form the full production readiness path.
# Each sprint must pass its quality gate before the next begins.
# Sprints within a step run in parallel; steps run sequentially.
PIPELINE: list[tuple[str, str]] = [
("status", "Baseline: assess current state and recent security posture"),
("audit", "Deep dive: full security audit + detailed roadmap analysis"),
("phase1-hardening", "Code: eliminate crash paths across all crates (parallel by crate)"),
("phase1-infra", "Infra: fix Dockerfile, .gitignore, design TLS lifecycle"),
("infra-hardening", "Infra: CI hardening, Docker production config, .gitignore completion"),
("phase2-tests", "Tests: E2E coverage, unit tests for untested paths"),
("docs-foundation", "Docs: SECURITY.md, CONTRIBUTING.md, deployment guide, accuracy audit"),
("docs-api", "Docs: public API doc comments, Cap'n Proto schema documentation"),
("security-full", "Security: comprehensive audit of all code + infra + threat model"),
("security-review", "Gate: post-change security review + progress check"),
("release-prep", "Release: changelog, version consistency, final docs review"),
]
# ── Orchestrator system prompt ───────────────────────────────────────────────── # ── Orchestrator system prompt ─────────────────────────────────────────────────
@@ -433,28 +702,40 @@ You are the **Orchestrator** for the quicproquo AI development team.
Your team of specialist subagents: Your team of specialist subagents:
| Agent | Role | | Agent | Role | Edits? |
|-------|------| |-------|------|--------|
| rust-architect | Architecture design, ADRs, design reviews | | rust-architect | Architecture design, ADRs, design reviews | No |
| rust-core-dev | quicproquo-core crate: crypto, MLS, Noise codec | | rust-core-dev | quicproquo-core: crypto, MLS, Noise codec | Yes |
| rust-server-dev | quicproquo-server crate: AS, DS, RPC server | | rust-server-dev | quicproquo-server: AS, DS, RPC server | Yes |
| rust-client-dev | quicproquo-client crate: CLI, REPL, local state | | rust-client-dev | quicproquo-client: CLI, REPL, local state | Yes |
| security-auditor | Security review: unwrap(), zeroize, secrets in logs | | security-auditor | Security review: code, infra, threat model | No |
| test-engineer | Unit/integration tests, cargo test runs | | test-engineer | Unit, integration, E2E tests | Yes (tests) |
| roadmap-tracker | Roadmap progress assessment | | devops-engineer | Docker, CI/CD, deployment, monitoring | Yes (infra) |
| docs-engineer | User/operator/developer/protocol docs | Yes (docs) |
| roadmap-tracker | Roadmap progress assessment | No |
Parallelisation rules:
- Agents that own DIFFERENT crates or concern areas can run in parallel.
- rust-core-dev, rust-server-dev, rust-client-dev ALWAYS run in parallel (different crates).
- security-auditor runs AFTER code-changing agents complete (reads their output).
- test-engineer runs AFTER code-changing agents complete (tests their changes).
- docs-engineer and devops-engineer can run in parallel with each other and with dev agents.
- roadmap-tracker can run in parallel with anything (read-only).
Workflow: Workflow:
1. Read the task carefully. 1. Read the task carefully.
2. Decide which agent(s) are needed. For multi-step tasks, sequence them logically. 2. Decide which agent(s) are needed. For multi-step tasks, sequence them logically.
3. Call each required agent with a precise, scoped prompt. 3. Maximise parallelism: launch agents that touch different files simultaneously.
4. Synthesise the agents' outputs into a final report or code deliverable. 4. Call each required agent with a precise, scoped prompt.
5. Always end with: "Next suggested task: ..." based on the ROADMAP. 5. Synthesise the agents' outputs into a final report or code deliverable.
6. Always end with: "Next suggested task: ..." based on the ROADMAP.
Rules: Rules:
- Read master-prompt.md and ROADMAP.md before delegating significant tasks. - Read master-prompt.md and ROADMAP.md before delegating significant tasks.
- Do NOT delegate everything to one agent — split by crate/concern. - Do NOT delegate everything to one agent — split by crate/concern.
- If a task touches security, always invoke security-auditor after code changes. - If a task touches security, always invoke security-auditor AFTER code changes.
- If a task adds/modifies functionality, always invoke test-engineer last. - If a task adds/modifies functionality, always invoke test-engineer LAST.
- docs-engineer and devops-engineer work independently — launch them in parallel.
- Keep your synthesis concise — prefer structured output (headers, bullet lists). - Keep your synthesis concise — prefer structured output (headers, bullet lists).
""" """
@@ -673,6 +954,17 @@ def build_parser() -> argparse.ArgumentParser:
action="store_true", action="store_true",
help="List predefined sprints and exit", help="List predefined sprints and exit",
) )
parser.add_argument(
"--pipeline",
action="store_true",
help="Run the full production readiness pipeline (all sprints in dependency order)",
)
parser.add_argument(
"--pipeline-from",
metavar="SPRINT",
default=None,
help="Start the pipeline from a specific sprint (skip earlier steps)",
)
parser.add_argument( parser.add_argument(
"--max-turns", "--max-turns",
type=int, type=int,
@@ -707,6 +999,12 @@ def list_sprints() -> None:
print(f" [{agent}] {preview}") print(f" [{agent}] {preview}")
print() print()
print("Production readiness pipeline (--pipeline):\n")
for i, (sprint_name, description) in enumerate(PIPELINE, 1):
count = len(SPRINTS[sprint_name])
print(f" {i:2d}. {sprint_name:<20s} {count} agent(s) — {description}")
print()
def parse_parallel_args(args: list[str]) -> list[tuple[str, str]]: def parse_parallel_args(args: list[str]) -> list[tuple[str, str]]:
""" """
@@ -733,6 +1031,62 @@ def parse_parallel_args(args: list[str]) -> list[tuple[str, str]]:
return pairs return pairs
# ── Pipeline runner ────────────────────────────────────────────────────────────
async def run_pipeline(
max_turns: int,
verbose: bool,
start_from: str | None = None,
) -> None:
"""
Run the full production readiness pipeline: all sprints in dependency order.
Each sprint runs its agents in parallel. Sprints run sequentially because
later sprints depend on earlier ones (e.g. security-review after code changes).
If start_from is set, skip all sprints before that one.
"""
pipeline = list(PIPELINE)
if start_from:
names = [name for name, _ in PIPELINE]
if start_from not in names:
print(f"ERROR: unknown sprint {start_from!r} in pipeline.")
print(f" Valid: {', '.join(names)}")
sys.exit(1)
idx = names.index(start_from)
pipeline = pipeline[idx:]
print(f"\n Skipping {idx} sprint(s), starting from: {start_from}\n")
total = len(pipeline)
print(f"\n{'=' * 70}")
print(f" quicproquo AI Team — Production Readiness Pipeline")
print(f" Steps: {total} | Max turns per agent: {max_turns}")
print(f"{'=' * 70}")
for i, (name, desc) in enumerate(pipeline, 1):
count = len(SPRINTS[name])
print(f" {i:2d}. [{name}] {count} agent(s) — {desc}")
print(f"{'=' * 70}\n")
for step, (sprint_name, description) in enumerate(pipeline, 1):
print(f"\n{'#' * 70}")
print(f" PIPELINE STEP {step}/{total}: {sprint_name}")
print(f" {description}")
print(f"{'#' * 70}\n")
agent_tasks = SPRINTS[sprint_name]
await run_parallel(
agent_tasks, max_turns, verbose, sprint_name=sprint_name
)
if step < total:
print(f"\n Step {step}/{total} complete. Proceeding to next step...\n")
print(f"\n{'=' * 70}")
print(f" PIPELINE COMPLETE — {total} steps executed")
print(f" Review outputs in: logs/ai_team/")
print(f"{'=' * 70}\n")
# ── Entry point ──────────────────────────────────────────────────────────────── # ── Entry point ────────────────────────────────────────────────────────────────
async def main() -> None: async def main() -> None:
@@ -752,7 +1106,12 @@ async def main() -> None:
sys.exit(1) sys.exit(1)
try: try:
if args.sprint: if args.pipeline or args.pipeline_from:
await run_pipeline(
args.max_turns, args.verbose, start_from=args.pipeline_from
)
elif args.sprint:
agent_tasks = SPRINTS[args.sprint] agent_tasks = SPRINTS[args.sprint]
await run_parallel( await run_parallel(
agent_tasks, args.max_turns, args.verbose, sprint_name=args.sprint agent_tasks, args.max_turns, args.verbose, sprint_name=args.sprint