Compare commits
79 Commits
cursor/clo
...
v2
| Author | SHA1 | Date | |
|---|---|---|---|
| 501f5a577c | |||
| 5cc37cc88b | |||
| 1d59a052ad | |||
| 12717979ba | |||
| 3f5a3a5ac8 | |||
| 511fc7822e | |||
| f57dda3f36 | |||
| cbb76af6b1 | |||
| 2d56824834 | |||
| 496f83067a | |||
| 1768f85258 | |||
| f667281831 | |||
| 372dd67a3b | |||
| 49e8e066d7 | |||
| f4621b3425 | |||
| c401caec60 | |||
| 885cce0d7d | |||
| 913f6faaf3 | |||
| e93a38243f | |||
| 91c5495ab7 | |||
| b94248b3b6 | |||
| a90020fe89 | |||
| fd1accc6dd | |||
| 799aab68fe | |||
| eaca24397b | |||
| 12b19b6931 | |||
| 5b6d8209f0 | |||
| a1f0dbc514 | |||
| 5a66c2e954 | |||
| 4013b223ff | |||
| 3a42130518 | |||
| c8c5f96ecd | |||
| e5329ee8e5 | |||
| e3dfc43e2c | |||
| 7bcfbf175c | |||
| 75f11cb76b | |||
| a3f67aca45 | |||
| 950f477842 | |||
| 3393514911 | |||
| a8ed3c4356 | |||
| cab03bd3f7 | |||
| 99f9abe9ed | |||
| 029c701780 | |||
| 4d62a837a5 | |||
| 67983c7a40 | |||
| 011ff541bb | |||
| 918da0c23d | |||
| 6b757f8d65 | |||
| d118fdbddf | |||
| 6273ab668d | |||
| f09dbe10ce | |||
| ff93275dc1 | |||
| a5864127d1 | |||
| 394199b19b | |||
| 4694a3098b | |||
| 4454458e38 | |||
| 5a6d9ae7f4 | |||
| 9244e80ec7 | |||
| 1b61b7ee8f | |||
| 28ceaaf072 | |||
| 65ff26235e | |||
| fd21ea625c | |||
| 3350d765e5 | |||
| 81d5e2e590 | |||
| db46b72f58 | |||
| 9ab306d891 | |||
| 612b06aa8e | |||
| dc4e4e49a0 | |||
| b6483dedbc | |||
| d7e530435f | |||
| c8398d6cb7 | |||
| e24497bf90 | |||
| c2762f93f6 | |||
| 5b9df61194 | |||
| 9525a3c565 | |||
| 853ca4fec0 | |||
| 553de3a2b7 | |||
| 4c1e4683e3 | |||
| 750b794342 |
1
.cursor/.gitignore
vendored
1
.cursor/.gitignore
vendored
@@ -1 +0,0 @@
|
||||
plans/
|
||||
20
.env.example
Normal file
20
.env.example
Normal file
@@ -0,0 +1,20 @@
|
||||
# quicproquo Production Environment Variables
|
||||
# Copy this file to .env and fill in the values.
|
||||
|
||||
# Server auth token (required, >= 16 characters)
|
||||
QPQ_AUTH_TOKEN=
|
||||
|
||||
# SQLCipher database encryption key (required for store_backend=sql)
|
||||
QPQ_DB_KEY=
|
||||
|
||||
# Ports (defaults shown)
|
||||
QPQ_LISTEN_PORT=7000
|
||||
QPQ_WS_PORT=9000
|
||||
|
||||
# Optional features
|
||||
QPQ_SEALED_SENDER=false
|
||||
QPQ_REDACT_LOGS=true
|
||||
QPQ_WS_LISTEN=
|
||||
|
||||
# Grafana admin password
|
||||
GRAFANA_ADMIN_PASSWORD=changeme
|
||||
40
.github/CODEOWNERS
vendored
40
.github/CODEOWNERS
vendored
@@ -1,15 +1,37 @@
|
||||
# Code owners for quicnprotochat. PRs require review from owners.
|
||||
# Code owners for quicproquo. PRs require review from owners.
|
||||
# See https://docs.github.com/en/repositories/managing-your-repositorys-settings-and-features/customizing-your-repository/about-code-owners
|
||||
# Replace 'maintainers' with your GitHub user/team handle.
|
||||
|
||||
# Default owners for everything in the repo
|
||||
* @maintainers
|
||||
|
||||
# Crate-specific (uncomment and add handles when you have designated owners)
|
||||
# /crates/quicnprotochat-core/ @owner1
|
||||
# /crates/quicnprotochat-proto/ @owner1
|
||||
# /crates/quicnprotochat-server/ @owner1
|
||||
# /crates/quicnprotochat-client/ @owner1
|
||||
# /crates/quicnprotochat-p2p/ @owner1
|
||||
# /schemas/ @owner1
|
||||
# /docs/ @owner1
|
||||
# Security-critical: crypto primitives, MLS, hybrid KEM
|
||||
/crates/quicproquo-core/ @maintainers
|
||||
|
||||
# Wire format: protobuf definitions, Cap'n Proto schemas
|
||||
/crates/quicproquo-proto/ @maintainers
|
||||
/proto/ @maintainers
|
||||
|
||||
# Auth and server-side domain logic
|
||||
/crates/quicproquo-server/ @maintainers
|
||||
|
||||
# Client SDK: auth, conversation store, messaging pipeline
|
||||
/crates/quicproquo-sdk/ @maintainers
|
||||
|
||||
# CLI/TUI client
|
||||
/crates/quicproquo-client/ @maintainers
|
||||
|
||||
# RPC framework: framing, middleware, QUIC transport
|
||||
/crates/quicproquo-rpc/ @maintainers
|
||||
|
||||
# Key transparency
|
||||
/crates/quicproquo-kt/ @maintainers
|
||||
|
||||
# Plugin ABI (no_std C-ABI boundary)
|
||||
/crates/quicproquo-plugin-api/ @maintainers
|
||||
|
||||
# P2P transport
|
||||
/crates/quicproquo-p2p/ @maintainers
|
||||
|
||||
# CI and infrastructure
|
||||
/.github/ @maintainers
|
||||
|
||||
54
.github/workflows/bench.yml
vendored
Normal file
54
.github/workflows/bench.yml
vendored
Normal file
@@ -0,0 +1,54 @@
|
||||
name: Benchmarks
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [main, v2]
|
||||
pull_request:
|
||||
branches: [main, v2]
|
||||
workflow_dispatch:
|
||||
|
||||
env:
|
||||
CARGO_TERM_COLOR: always
|
||||
|
||||
jobs:
|
||||
bench:
|
||||
name: Run Criterion benchmarks
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: Install Rust
|
||||
uses: dtolnay/rust-action@stable
|
||||
|
||||
- name: Install capnp
|
||||
run: sudo apt-get update && sudo apt-get install -y capnproto
|
||||
|
||||
- name: Cache cargo
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: |
|
||||
~/.cargo/registry
|
||||
~/.cargo/git
|
||||
target
|
||||
key: ${{ runner.os }}-bench-${{ hashFiles('**/Cargo.lock') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-bench-
|
||||
|
||||
- name: Run benchmarks
|
||||
run: cargo bench --package quicproquo-core -- --output-format=bencher 2>&1 | tee bench-output.txt
|
||||
|
||||
- name: Upload HTML reports
|
||||
uses: actions/upload-artifact@v4
|
||||
if: always()
|
||||
with:
|
||||
name: criterion-reports
|
||||
path: target/criterion/
|
||||
retention-days: 30
|
||||
|
||||
- name: Upload raw output
|
||||
uses: actions/upload-artifact@v4
|
||||
if: always()
|
||||
with:
|
||||
name: bench-output
|
||||
path: bench-output.txt
|
||||
retention-days: 30
|
||||
55
.github/workflows/ci.yml
vendored
55
.github/workflows/ci.yml
vendored
@@ -2,9 +2,9 @@ name: CI
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [main, master]
|
||||
branches: [main, master, v2]
|
||||
pull_request:
|
||||
branches: [main, master]
|
||||
branches: [main, master, v2]
|
||||
|
||||
env:
|
||||
CARGO_TERM_COLOR: always
|
||||
@@ -45,7 +45,7 @@ jobs:
|
||||
run: cargo test --workspace
|
||||
|
||||
- name: Clippy
|
||||
run: cargo clippy --workspace --all-targets --
|
||||
run: cargo clippy --workspace --all-targets -- -D warnings
|
||||
|
||||
deny:
|
||||
name: cargo-deny
|
||||
@@ -72,3 +72,52 @@ jobs:
|
||||
run: |
|
||||
cargo install cargo-audit --locked
|
||||
cargo audit
|
||||
|
||||
coverage:
|
||||
name: Coverage
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: Install Rust
|
||||
uses: dtolnay/rust-action@stable
|
||||
|
||||
- name: Install capnp
|
||||
run: sudo apt-get update && sudo apt-get install -y capnproto
|
||||
|
||||
- name: Cache cargo
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: |
|
||||
~/.cargo/registry
|
||||
~/.cargo/git
|
||||
target
|
||||
key: ${{ runner.os }}-coverage-${{ hashFiles('**/Cargo.lock') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-coverage-
|
||||
|
||||
- name: Install cargo-tarpaulin
|
||||
run: cargo install cargo-tarpaulin
|
||||
|
||||
- name: Run coverage
|
||||
run: |
|
||||
cargo tarpaulin --workspace \
|
||||
--exclude quicproquo-p2p \
|
||||
--out xml \
|
||||
--output-dir coverage/ \
|
||||
-- --test-threads 1
|
||||
|
||||
- name: Upload coverage report
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: coverage-report
|
||||
path: coverage/cobertura.xml
|
||||
|
||||
docker:
|
||||
name: Docker Build
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: Build Docker image
|
||||
run: docker build -f docker/Dockerfile .
|
||||
|
||||
65
.github/workflows/openwrt.yml
vendored
Normal file
65
.github/workflows/openwrt.yml
vendored
Normal file
@@ -0,0 +1,65 @@
|
||||
name: OpenWrt Cross-Compile
|
||||
|
||||
on:
|
||||
push:
|
||||
tags:
|
||||
- 'v*'
|
||||
workflow_dispatch:
|
||||
|
||||
env:
|
||||
CARGO_TERM_COLOR: always
|
||||
MAX_SIZE_MB: 5
|
||||
|
||||
jobs:
|
||||
cross-compile:
|
||||
name: Cross-compile (${{ matrix.target }})
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
target:
|
||||
- x86_64-unknown-linux-musl
|
||||
- armv7-unknown-linux-musleabihf
|
||||
- aarch64-unknown-linux-musl
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: Install Rust toolchain
|
||||
uses: dtolnay/rust-toolchain@stable
|
||||
|
||||
- name: Install cargo-zigbuild and zig
|
||||
run: |
|
||||
pip3 install ziglang
|
||||
cargo install cargo-zigbuild
|
||||
|
||||
- name: Add target
|
||||
run: rustup target add ${{ matrix.target }}
|
||||
|
||||
- name: Build (size-optimised)
|
||||
env:
|
||||
CARGO_PROFILE_RELEASE_OPT_LEVEL: s
|
||||
CARGO_PROFILE_RELEASE_LTO: 'true'
|
||||
CARGO_PROFILE_RELEASE_CODEGEN_UNITS: '1'
|
||||
CARGO_PROFILE_RELEASE_STRIP: symbols
|
||||
run: |
|
||||
cargo zigbuild --release --target ${{ matrix.target }} --bin qpq-server
|
||||
|
||||
- name: Check binary size
|
||||
run: |
|
||||
BINARY="target/${{ matrix.target }}/release/qpq-server"
|
||||
SIZE=$(stat -c%s "$BINARY")
|
||||
SIZE_MB=$(echo "scale=2; $SIZE / 1048576" | bc)
|
||||
echo "Binary size: ${SIZE_MB} MB"
|
||||
MAX_BYTES=$(( ${{ env.MAX_SIZE_MB }} * 1048576 ))
|
||||
if [ "$SIZE" -gt "$MAX_BYTES" ]; then
|
||||
echo "::error::Binary exceeds ${MAX_SIZE_MB} MB limit (${SIZE_MB} MB)"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
- name: Upload artifact
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: qpq-server-${{ matrix.target }}
|
||||
path: target/${{ matrix.target }}/release/qpq-server
|
||||
retention-days: 30
|
||||
11
.gitignore
vendored
11
.gitignore
vendored
@@ -7,4 +7,13 @@ docs/book/
|
||||
# Server/client runtime data — do not commit certs, keys, or DBs
|
||||
data/
|
||||
*.der
|
||||
quicnprotochat-server.toml
|
||||
*.pem
|
||||
*.db
|
||||
*.bin
|
||||
*.ks
|
||||
*.session
|
||||
*.convdb
|
||||
*.convdb-shm
|
||||
*.convdb-wal
|
||||
*.pending.ks
|
||||
qpq-server.toml
|
||||
|
||||
23
CLAUDE.md
Normal file
23
CLAUDE.md
Normal file
@@ -0,0 +1,23 @@
|
||||
# quicproquo — Claude Code Instructions
|
||||
|
||||
## Agent Team Workflow Rules
|
||||
|
||||
### NEVER delete worktrees before preserving changes
|
||||
When using agent teams with `isolation: "worktree"`:
|
||||
1. **Before calling `TeamDelete`**, always check each worktree for uncommitted or committed changes
|
||||
2. **Create a named branch** from each worktree's HEAD and push/preserve it before cleanup
|
||||
3. **Preferred pattern**: use `git branch fix/<name> <worktree-HEAD-sha>` to save the work
|
||||
4. If an agent reports changes, its worktree branch MUST be merged or saved before the team is deleted
|
||||
|
||||
### Agent team best practices
|
||||
- Always have agents **commit their changes** with descriptive messages before shutting them down
|
||||
- After all agents report, **list worktrees** (`git worktree list`) and **save branches** before cleanup
|
||||
- When using worktree isolation, the sequence must be: agents finish → save branches → merge → TeamDelete
|
||||
- Never call TeamDelete as a shortcut to kill zombie agents — use `rm -rf ~/.claude/teams/<name>` for the team metadata only, preserving worktree dirs
|
||||
|
||||
### Git workflow
|
||||
- Conventional commits: `feat:`, `fix:`, `chore:`, `docs:`, `test:`, `refactor:`
|
||||
- GPG-signed commits only
|
||||
- No `Co-authored-by` trailers
|
||||
- No `.unwrap()` on crypto or I/O in non-test paths
|
||||
- Secrets: zeroize on drop, never in logs
|
||||
3783
Cargo.lock
generated
3783
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
49
Cargo.toml
49
Cargo.toml
@@ -1,12 +1,17 @@
|
||||
[workspace]
|
||||
resolver = "2"
|
||||
members = [
|
||||
"crates/quicnprotochat-core",
|
||||
"crates/quicnprotochat-proto",
|
||||
"crates/quicnprotochat-server",
|
||||
"crates/quicnprotochat-client",
|
||||
"crates/quicnprotochat-p2p",
|
||||
"crates/quicnprotochat-gui",
|
||||
"crates/quicproquo-core",
|
||||
"crates/quicproquo-proto",
|
||||
"crates/quicproquo-plugin-api",
|
||||
"crates/quicproquo-kt",
|
||||
"crates/quicproquo-rpc",
|
||||
"crates/quicproquo-sdk",
|
||||
"crates/quicproquo-server",
|
||||
"crates/quicproquo-client",
|
||||
# P2P crate uses iroh (~90 extra deps). Only compiled when the `mesh`
|
||||
# feature is enabled on quicproquo-client.
|
||||
"crates/quicproquo-p2p",
|
||||
]
|
||||
|
||||
# Shared dependency versions — bump here to affect the whole workspace.
|
||||
@@ -20,12 +25,13 @@ openmls_traits = { version = "0.2" }
|
||||
# duplicate Serialize trait versions in the dependency graph.
|
||||
tls_codec = { version = "0.3", features = ["derive"] }
|
||||
# ml-kem 0.2 is the current stable release (FIPS 203, ML-KEM-768).
|
||||
# All three parameter sets (512/768/1024) are compiled in by default — no feature flag needed.
|
||||
ml-kem = { version = "0.2" }
|
||||
x25519-dalek = { version = "2", features = ["static_secrets"] }
|
||||
ed25519-dalek = { version = "2", features = ["rand_core"] }
|
||||
sha2 = { version = "0.10" }
|
||||
hmac = { version = "0.12" }
|
||||
hkdf = { version = "0.12" }
|
||||
ciborium = { version = "0.2" }
|
||||
chacha20poly1305 = { version = "0.10" }
|
||||
opaque-ke = { version = "4", features = ["ristretto255", "argon2"] }
|
||||
zeroize = { version = "1", features = ["derive", "serde"] }
|
||||
@@ -36,12 +42,17 @@ serde = { version = "1", features = ["derive"] }
|
||||
serde_json = { version = "1" }
|
||||
bincode = { version = "1" }
|
||||
|
||||
# ── Serialisation + RPC ───────────────────────────────────────────────────────
|
||||
# ── Serialisation (v2: protobuf) ─────────────────────────────────────────────
|
||||
prost = { version = "0.13" }
|
||||
prost-types = { version = "0.13" }
|
||||
prost-build = { version = "0.13" }
|
||||
|
||||
# ── Serialisation (v1 legacy — capnp, used by existing server/client) ────────
|
||||
capnp = { version = "0.19" }
|
||||
capnp-rpc = { version = "0.19" }
|
||||
|
||||
# ── Async / networking ────────────────────────────────────────────────────────
|
||||
tokio = { version = "1", features = ["full"] }
|
||||
tokio = { version = "1", features = ["macros", "rt-multi-thread", "time", "sync", "signal", "io-util", "io-std"] }
|
||||
tokio-util = { version = "0.7", features = ["codec", "compat"] }
|
||||
futures = { version = "0.3" }
|
||||
quinn = { version = "0.11" }
|
||||
@@ -49,12 +60,18 @@ quinn-proto = { version = "0.11" }
|
||||
rustls = { version = "0.23", default-features = false, features = ["std", "ring"] }
|
||||
rcgen = { version = "0.13" }
|
||||
|
||||
# ── Middleware ────────────────────────────────────────────────────────────────
|
||||
tower = { version = "0.5", features = ["util", "limit", "timeout"] }
|
||||
|
||||
# ── Database ─────────────────────────────────────────────────────────────
|
||||
rusqlite = { version = "0.31", features = ["bundled-sqlcipher"] }
|
||||
|
||||
# ── Encoding ─────────────────────────────────────────────────────────────────
|
||||
hex = { version = "0.4" }
|
||||
bytes = { version = "1" }
|
||||
|
||||
# ── Server utilities ──────────────────────────────────────────────────────────
|
||||
dashmap = { version = "5" }
|
||||
governor = { version = "0.6" }
|
||||
tracing = { version = "0.1" }
|
||||
tracing-subscriber = { version = "0.3", features = ["env-filter"] }
|
||||
|
||||
@@ -64,12 +81,20 @@ thiserror = { version = "1" }
|
||||
|
||||
# ── CLI ───────────────────────────────────────────────────────────────────────
|
||||
clap = { version = "4", features = ["derive", "env"] }
|
||||
clap_complete = { version = "4" }
|
||||
indicatif = { version = "0.17" }
|
||||
rustyline = { version = "14" }
|
||||
|
||||
# ── Certificate parsing ──────────────────────────────────────────────────────
|
||||
x509-parser = { version = "0.16", default-features = false }
|
||||
|
||||
# ── Build-time ────────────────────────────────────────────────────────────────
|
||||
capnpc = { version = "0.19" }
|
||||
|
||||
[workspace.lints.rust]
|
||||
unsafe_code = "warn"
|
||||
|
||||
[workspace.lints.clippy]
|
||||
unwrap_used = "deny"
|
||||
|
||||
[profile.release]
|
||||
opt-level = 3
|
||||
lto = "thin"
|
||||
|
||||
338
README.md
338
README.md
@@ -1,12 +1,18 @@
|
||||
# quicnprotochat
|
||||
<p align="center">
|
||||
<img src="assets/logo.png" alt="QPQ logo" width="200">
|
||||
</p>
|
||||
|
||||
> End-to-end encrypted group messaging over **QUIC + TLS 1.3 + MLS** (RFC 9420), written in Rust.
|
||||
# QPQ — quicproquo
|
||||
|
||||
Every byte on the wire is protected by a QUIC transport secured with TLS 1.3
|
||||
(`quinn` + `rustls`). The inner **MLS** layer provides post-compromise security
|
||||
and ratcheted group key agreement across any number of participants. Messages
|
||||
are framed with **Cap'n Proto**, keeping serialisation zero-copy and
|
||||
schema-versioned.
|
||||
[](https://github.com/xorwell/quicproquo/actions/workflows/ci.yml)
|
||||
|
||||
> End-to-end encrypted messaging over **QUIC + TLS 1.3 + MLS** (RFC 9420), written in Rust.
|
||||
|
||||
The server never sees plaintext. Every byte on the wire is protected by a QUIC
|
||||
transport secured with TLS 1.3 (`quinn` + `rustls`). The inner **MLS** layer
|
||||
provides forward secrecy, post-compromise security, and ratcheted group key
|
||||
agreement across any number of participants. Messages are framed with
|
||||
**Cap'n Proto** for zero-copy, schema-versioned serialisation.
|
||||
|
||||
```
|
||||
┌─────────────────────────────────────────────┐
|
||||
@@ -19,38 +25,110 @@ schema-versioned.
|
||||
```
|
||||
|
||||
| Property | Mechanism |
|
||||
|---|---|
|
||||
| ------------------------- | -------------------------------------------------- |
|
||||
| Transport confidentiality | TLS 1.3 over QUIC (rustls) |
|
||||
| Transport authentication | TLS 1.3 server cert (self-signed by default) |
|
||||
| Transport authentication | TLS 1.3 server cert (self-signed or CA) |
|
||||
| Group key agreement | MLS `MLS_128_DHKEMX25519_AES128GCM_SHA256_Ed25519` |
|
||||
| Post-compromise security | MLS epoch ratchet |
|
||||
| Forward secrecy | Per-epoch key schedule |
|
||||
| Identity | Ed25519 (MLS credential + leaf node signature) |
|
||||
| Password auth | OPAQUE (password never sent to server) |
|
||||
| Post-quantum readiness | X25519 + ML-KEM-768 hybrid KEM envelope |
|
||||
| Local storage encryption | SQLCipher + Argon2id + ChaCha20-Poly1305 |
|
||||
| Message framing | Cap'n Proto (unpacked wire format) |
|
||||
|
||||
---
|
||||
|
||||
## Documentation
|
||||
## Features
|
||||
|
||||
Full documentation is available as an **mdBook** wiki in [`docs/`](docs/):
|
||||
### Core
|
||||
|
||||
```bash
|
||||
# Install mdBook (once)
|
||||
cargo install mdbook
|
||||
- **Interactive REPL** — multi-conversation chat with auto-register, auto-login, 40+ slash commands, background polling, and message history
|
||||
- **1:1 DMs** — dedicated channels with server-enforced membership authorization
|
||||
- **Multi-party groups** — N-member MLS groups with Commit fan-out and epoch sync
|
||||
- **OPAQUE authentication** — password-authenticated key exchange (password never leaves the client)
|
||||
- **Encrypted local storage** — SQLCipher database + encrypted session tokens (Argon2id + ChaCha20-Poly1305)
|
||||
- **Persistent state** — server and client survive restarts; SQLite/SQLCipher or file-backed storage
|
||||
- **Rich messaging** — reactions, read receipts, typing indicators, message editing, message deletion
|
||||
- **File transfer** — chunked upload/download with SHA-256 content addressing, MIME detection, 50 MB limit
|
||||
- **Disappearing messages** — per-conversation TTL with server-side GC (`/disappear 30m`, `1h`, `1d`, `7d`)
|
||||
- **Account deletion** — transactional purge of all user data, sessions, and channel memberships (GDPR-ready)
|
||||
- **Self-DM notepad** — send messages to yourself (local-only, no server round-trip)
|
||||
- **Certificate pinning** — pass the server cert as `--ca-cert` to trust only that server
|
||||
- **Federation** — server-to-server message relay via Cap'n Proto RPC over QUIC with mTLS
|
||||
- **mDNS discovery** — servers announce `_quicproquo._udp.local.`; clients auto-discover nearby nodes
|
||||
- **Sealed sender mode** — optional anonymous enqueue (sender identity inside MLS ciphertext only)
|
||||
- **Prometheus metrics** — `--metrics-listen` exposes `/metrics` endpoint for monitoring
|
||||
- **Dynamic plugin system** — load `.so`/`.dylib` plugins at runtime via `--plugin-dir`
|
||||
- **Safety numbers** — `/verify <username>` for out-of-band key verification (60-digit numeric code)
|
||||
- **Transcript export** — encrypted, tamper-evident message archives with hash-chain integrity verification
|
||||
- **MLS key rotation** — `/update-key` rotates MLS leaf node material with epoch advancement
|
||||
|
||||
# Build and serve locally
|
||||
mdbook serve docs
|
||||
# Open http://localhost:3000
|
||||
```
|
||||
### Client SDKs
|
||||
|
||||
### Highlights
|
||||
- **Go SDK** (`sdks/go/`) — native QUIC transport via `quic-go`, Cap'n Proto RPC, full API: connect, OPAQUE auth, send/receive, disappearing messages, account deletion
|
||||
- **TypeScript SDK** (`sdks/typescript/`) — `@quicproquo/client` with WASM crypto (175 KB), WebSocket transport, offline crypto mode, browser demo
|
||||
- **Python FFI** (`examples/python/`) — `ctypes` wrapper over the C FFI library with CLI
|
||||
- **C FFI** (`crates/quicproquo-ffi/`) — `libquicproquo_ffi.so` with 7 extern functions: connect, login, send, receive, disconnect, last_error, free_string
|
||||
|
||||
- **[Architecture Overview](docs/src/architecture/overview.md)** — Two-service model, dual-key design, crate layout
|
||||
- **[Protocol Deep Dives](docs/src/protocol-layers/overview.md)** — QUIC/TLS 1.3, Cap'n Proto, MLS, Hybrid KEM
|
||||
- **[Cryptographic Properties](docs/src/cryptography/overview.md)** — Forward secrecy, post-compromise security, PQ readiness, threat model
|
||||
- **[Design Rationale](docs/src/design-rationale/overview.md)** — Why MLS over Signal/Matrix, ADRs for all key decisions
|
||||
- **[Wire Format Reference](docs/src/wire-format/overview.md)** — Annotated Cap'n Proto schemas
|
||||
- **[Getting Started](docs/src/getting-started/prerequisites.md)** — Build, run, demo walkthrough
|
||||
- **[Roadmap](docs/src/roadmap/milestones.md)** — Milestones, production readiness, future research
|
||||
### REPL slash commands
|
||||
|
||||
| Command | Description |
|
||||
| ----------------------------------- | --------------------------------------------------- |
|
||||
| `/dm <username>` | Start a 1:1 DM with a peer |
|
||||
| `/create-group <name>` (or `/cg`) | Create a new group |
|
||||
| `/invite <username>` | Add a member to the current group |
|
||||
| `/remove <username>` | Remove a member from the current group |
|
||||
| `/join` | Join a pending group invitation |
|
||||
| `/leave` | Leave the current group |
|
||||
| `/switch @user` or `/switch #group` | Switch active conversation |
|
||||
| `/list` or `/ls` | List all conversations |
|
||||
| `/members` | Show group members with resolved usernames |
|
||||
| `/group-info` (or `/gi`) | Show group type, members, MLS epoch |
|
||||
| `/rename <name>` | Rename the current conversation |
|
||||
| `/history [count]` (or `/hist`) | Show message history (default 20) |
|
||||
| `/react <emoji> [index]` | React to a message with an emoji |
|
||||
| `/typing` | Send a typing indicator |
|
||||
| `/typing-notify on\|off` | Toggle typing indicator display |
|
||||
| `/edit <index> <text>` | Edit one of your messages |
|
||||
| `/delete <index>` | Delete one of your messages |
|
||||
| `/send-file <path>` (or `/sf`) | Upload and send a file (chunked, SHA-256 verified) |
|
||||
| `/download <index>` (or `/dl`) | Download a received file |
|
||||
| `/disappear <duration>` | Set message TTL (`30m`, `1h`, `1d`, `7d`) |
|
||||
| `/verify <username>` | Compare safety numbers with a peer |
|
||||
| `/update-key` (or `/rotate-key`) | Rotate your MLS key material |
|
||||
| `/delete-account` | Permanently delete your account (with confirmation) |
|
||||
| `/whoami` | Show identity and group status |
|
||||
| `/help` | Command reference |
|
||||
| `/quit` | Exit |
|
||||
|
||||
**Mesh commands** (requires `--features mesh`):
|
||||
|
||||
| Command | Description |
|
||||
| ------------------------------- | ---------------------------------- |
|
||||
| `/mesh peers` | Scan for nearby qpq nodes via mDNS |
|
||||
| `/mesh server <host:port>` | Note a discovered server address |
|
||||
| `/mesh send <peer_id> <msg>` | Direct P2P message via iroh |
|
||||
| `/mesh broadcast <topic> <msg>` | Publish to a broadcast channel |
|
||||
| `/mesh subscribe <topic>` | Join a broadcast channel |
|
||||
| `/mesh route` | Show routing table |
|
||||
| `/mesh identity` | Show mesh identity info |
|
||||
| `/mesh store` | Show store-and-forward stats |
|
||||
|
||||
### Mesh networking (feature-gated: `--features mesh`)
|
||||
|
||||
- **P2P transport** (`quicproquo-p2p`) — iroh-based direct peer-to-peer messaging with NAT traversal
|
||||
- **Self-sovereign identity** — Ed25519 keypair-based mesh identity, independent of server registration
|
||||
- **Store-and-forward** — TTL-based message buffering with hop counting and deduplication
|
||||
- **Broadcast channels** — ChaCha20-Poly1305 symmetric topic-based pub/sub (no MLS overhead)
|
||||
- **mDNS discovery** — servers announce `_quicproquo._udp.local.`; clients auto-discover nearby nodes
|
||||
- **Federation routing** — server-to-server message relay with mTLS
|
||||
|
||||
### Experimental / proof-of-concept
|
||||
|
||||
- **Tauri 2 GUI** (`quicproquo-gui`) — foundational desktop app shell; not feature-complete
|
||||
- **Mobile FFI** (`quicproquo-mobile`) — C API for QUIC connection migration (wifi to cellular)
|
||||
- **Bot framework** (`quicproquo-bot`) — programmable bot client
|
||||
|
||||
---
|
||||
|
||||
@@ -61,62 +139,200 @@ mdbook serve docs
|
||||
brew install capnp # macOS
|
||||
# apt-get install capnproto # Debian/Ubuntu
|
||||
|
||||
# GUI prerequisites (Linux only) — WebKitGTK + GTK3 for Tauri 2
|
||||
# sudo apt install -y libwebkit2gtk-4.1-dev libgtk-3-dev libglib2.0-dev libssl-dev libayatana-appindicator3-dev librsvg2-dev patchelf
|
||||
# Build (excludes GUI — requires GTK system libs)
|
||||
cargo build --bin qpq-server --bin qpq
|
||||
|
||||
# Build and test
|
||||
cargo build --workspace
|
||||
cargo test --workspace
|
||||
# Run tests
|
||||
cargo test --workspace --exclude quicproquo-gui
|
||||
|
||||
# Start the server (port 7000 by default)
|
||||
cargo run -p quicnprotochat-server
|
||||
# Start the server (port 7000 by default, auto-generates self-signed cert)
|
||||
cargo run --bin qpq-server -- --allow-insecure-auth
|
||||
|
||||
# Or via a config file (TOML)
|
||||
cat > quicnprotochat-server.toml <<'EOF'
|
||||
# Interactive REPL (auto-registers and logs in)
|
||||
cargo run --bin qpq -- repl --username alice --password mypass
|
||||
```
|
||||
|
||||
### REPL quickstart (two terminals)
|
||||
|
||||
```bash
|
||||
# Terminal 1
|
||||
qpq repl --username alice --password secretA
|
||||
|
||||
# Terminal 2
|
||||
qpq repl --username bob --password secretB
|
||||
|
||||
# In Alice's REPL:
|
||||
/dm bob
|
||||
Hello from Alice!
|
||||
|
||||
# Bob sees: [alice] Hello from Alice!
|
||||
```
|
||||
|
||||
### Server configuration (TOML)
|
||||
|
||||
```bash
|
||||
cat > qpq-server.toml <<'EOF'
|
||||
listen = "0.0.0.0:7000"
|
||||
data_dir = "data"
|
||||
tls_cert = "data/server-cert.der"
|
||||
tls_key = "data/server-key.der"
|
||||
auth_token = "devtoken"
|
||||
store_backend = "file" # or "sql"
|
||||
db_path = "data/quicnprotochat.db"
|
||||
db_key = ""
|
||||
auth_token = "your-strong-token-here"
|
||||
store_backend = "sql" # or "file"
|
||||
db_path = "data/qpq.db"
|
||||
db_key = "your-db-encryption-key"
|
||||
metrics_listen = "0.0.0.0:9090"
|
||||
metrics_enabled = true
|
||||
# Federation (optional)
|
||||
# federation_enabled = true
|
||||
# federation_domain = "chat.example.com"
|
||||
# federation_listen = "0.0.0.0:7001"
|
||||
# Plugin loading (optional)
|
||||
# plugin_dir = "/etc/qpq/plugins"
|
||||
EOF
|
||||
cargo run -p quicnprotochat-server -- --config quicnprotochat-server.toml
|
||||
|
||||
# Run the two-party demo
|
||||
cargo run -p quicnprotochat-client -- demo-group \
|
||||
--server 127.0.0.1:7000
|
||||
|
||||
# Interactive 1:1 chat (after creating a group and inviting a peer)
|
||||
# Terminal 1: quicnprotochat chat --peer-key <other_identity_hex>
|
||||
# Terminal 2: quicnprotochat chat --peer-key <first_identity_hex>
|
||||
# Type messages and press Enter; incoming messages appear as [peer] <msg>. Ctrl+D to exit.
|
||||
cargo run --bin qpq-server -- --config qpq-server.toml
|
||||
```
|
||||
|
||||
> **Production:** use a strong `QPQ_AUTH_TOKEN`, set `QPQ_DB_KEY` when using `store_backend = "sql"`, and provide real TLS certificates (the server refuses to auto-generate certs in production mode).
|
||||
|
||||
See the [full demo walkthrough](docs/src/getting-started/demo-walkthrough.md) for a step-by-step guide.
|
||||
|
||||
---
|
||||
|
||||
## Crate layout
|
||||
|
||||
| Crate | Purpose |
|
||||
| ----------------------- | ---------------------------------------------------------------------------------------------- |
|
||||
| `quicproquo-core` | MLS group operations, hybrid KEM, OPAQUE auth, crypto primitives, WASM-compatible modules |
|
||||
| `quicproquo-proto` | Cap'n Proto schemas and generated RPC code |
|
||||
| `quicproquo-server` | QUIC server, NodeService RPC (24 methods), storage backends, federation, plugins, blob storage |
|
||||
| `quicproquo-client` | CLI + REPL (40+ commands), session management, conversation store, file transfer |
|
||||
| `quicproquo-ffi` | C FFI bindings (`libquicproquo_ffi.so`) for cross-language integration |
|
||||
| `quicproquo-plugin-api` | C-compatible plugin hook API (`HookVTable`, 6 hooks) |
|
||||
| `quicproquo-kt` | Key transparency / Merkle-log identity bindings |
|
||||
| `quicproquo-bot` | Programmable bot client framework |
|
||||
| `quicproquo-gen` | Code generation utilities |
|
||||
| `quicproquo-gui` | Tauri 2 desktop app (experimental, requires GTK) |
|
||||
| `quicproquo-mobile` | C FFI for mobile connection migration (experimental) |
|
||||
| `quicproquo-p2p` | iroh-based P2P transport, mesh identity, store-and-forward, broadcast channels |
|
||||
|
||||
---
|
||||
|
||||
## CI pipeline
|
||||
|
||||
GitHub Actions runs on every push and PR:
|
||||
|
||||
- `cargo fmt --check` — formatting
|
||||
- `cargo build --workspace` — full build
|
||||
- `cargo test --workspace` — 130+ tests (core, server, client, E2E, P2P, doctests)
|
||||
- `cargo clippy --workspace` — lint
|
||||
- `cargo deny check` — license and advisory audit
|
||||
- `cargo audit` — vulnerability scan
|
||||
- `cargo tarpaulin` — code coverage (uploaded as artifact)
|
||||
- `docker build` — container image validation
|
||||
|
||||
---
|
||||
|
||||
## Milestones
|
||||
|
||||
| # | Name | Status | What it adds |
|
||||
|---|------|--------|--------------|
|
||||
| M1 | QUIC/TLS transport | Done | QUIC + TLS 1.3 endpoint, length-prefixed framing, Ping/Pong |
|
||||
| M2 | Authentication Service | Done | Ed25519 identity, KeyPackage generation, AS upload/fetch |
|
||||
| M3 | Delivery Service + MLS groups | Done | DS relay, `GroupMember` create/join/add/send/recv |
|
||||
| M4 | Group CLI subcommands | Done | Persistent CLI (`create-group`, `invite`, `join`, `send`, `recv`), OPAQUE login |
|
||||
| M5 | Multi-party groups | Done | N > 2 members, Commit fan-out, send --all, epoch sync |
|
||||
| M6 | Persistence | Done | SQLite/SQLCipher, migrations, durable server + client state |
|
||||
| M7 | Post-quantum | Next | PQ hybrid for MLS/HPKE (X25519 + ML-KEM-768) |
|
||||
| --- | ----------------------------- | ----------- | ------------------------------------------------------------------------ |
|
||||
| M1 | QUIC/TLS transport | **Done** | QUIC + TLS 1.3 endpoint, length-prefixed framing, Ping/Pong |
|
||||
| M2 | Authentication Service | **Done** | Ed25519 identity, KeyPackage generation, AS upload/fetch |
|
||||
| M3 | Delivery Service + MLS groups | **Done** | DS relay, `GroupMember` create/join/add/send/recv |
|
||||
| M4 | Group CLI subcommands | **Done** | Persistent CLI, OPAQUE login, 20 subcommands |
|
||||
| M5 | Multi-party groups | **Done** | N > 2 members, Commit fan-out, `send --all`, epoch sync |
|
||||
| M6 | Persistence + REPL | **Done** | SQLite/SQLCipher, interactive REPL, DM channels, encrypted local storage |
|
||||
| M7 | Post-quantum MLS | **Planned** | Hybrid X25519 + ML-KEM-768 integrated into MLS ciphersuite |
|
||||
|
||||
M7 note: the hybrid KEM envelope is already implemented and tested (10 tests passing). What remains is integrating it into the OpenMLS CryptoProvider so all MLS key material gets post-quantum confidentiality.
|
||||
|
||||
---
|
||||
|
||||
## Security notes
|
||||
## Roadmap
|
||||
|
||||
This is a **proof-of-concept research project**. It has not been audited.
|
||||
See the [threat model](docs/src/cryptography/threat-model.md) for a detailed
|
||||
analysis of what is and isn't protected.
|
||||
See [ROADMAP.md](ROADMAP.md) for the full phased plan. Summary:
|
||||
|
||||
| Phase | Focus | Status |
|
||||
| ----- | -------------------------------------------------------------- | ------------------------------------- |
|
||||
| 1 | Production hardening (unwrap removal, secure defaults, Docker) | In progress |
|
||||
| 2 | Test and CI maturity | Partially done |
|
||||
| 3 | Client SDKs (Go, TypeScript/WASM, Python FFI, C FFI) | **Go, TS, FFI, WASM done** |
|
||||
| 4 | Trust and security (audit, key transparency, PQ MLS) | DS auth + enumeration mitigation done |
|
||||
| 5 | Features and UX (rich messaging, file transfer, disappearing) | **Edit/delete, files, TTL done** |
|
||||
| 6 | Scale and operations (horizontal scaling, observability) | Planned |
|
||||
| 7 | Platform expansion (mobile, web, federation, sealed sender) | **Sealed sender done** |
|
||||
| 8 | Freifunk / community mesh networking | **F0-F6 done** |
|
||||
| 9 | Developer experience and community growth | Safety numbers + plugins done |
|
||||
|
||||
### Recently completed (Sprints 1-9)
|
||||
|
||||
- **Rich messaging** — reactions, read receipts, typing indicators, edit/delete messages
|
||||
- **File transfer** — chunked upload/download with SHA-256 content addressing and progress bars
|
||||
- **Disappearing messages** — per-conversation TTL with server-side garbage collection
|
||||
- **Account deletion** — transactional purge of all user data (GDPR-ready)
|
||||
- **Go SDK** — native QUIC + Cap'n Proto client with full API coverage
|
||||
- **TypeScript SDK** — WASM crypto (175 KB) + WebSocket transport + browser demo
|
||||
- **C FFI + Python bindings** — cross-language integration via `libquicproquo_ffi`
|
||||
- **Mesh networking** — self-sovereign identity, store-and-forward, broadcast channels, extended REPL
|
||||
- **Security hardening** — DS sender binding, username enumeration mitigation, MLS key rotation
|
||||
- **CI pipeline** — fmt, build, test, clippy, deny, audit, tarpaulin coverage, Docker build
|
||||
- **Plugin system** — dynamic `.so`/`.dylib` loading with 6 C-compatible hook points
|
||||
|
||||
---
|
||||
|
||||
## Building without the GUI
|
||||
|
||||
The GUI crate requires GTK system libraries. To build just the server and client:
|
||||
|
||||
```bash
|
||||
cargo build --bin qpq-server --bin qpq
|
||||
```
|
||||
|
||||
To build the client with mesh/P2P support:
|
||||
|
||||
```bash
|
||||
cargo build -p quicproquo-client --features mesh
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Documentation
|
||||
|
||||
Full documentation is available as an **mdBook** in [`docs/`](docs/):
|
||||
|
||||
```bash
|
||||
cargo install mdbook # once
|
||||
mdbook serve docs # http://localhost:3000
|
||||
```
|
||||
|
||||
- **[Getting Started](docs/src/getting-started/prerequisites.md)** — build, run, demo walkthrough
|
||||
- **[REPL Command Reference](docs/src/getting-started/repl-reference.md)** — complete list of 40+ commands
|
||||
- **[Go SDK Guide](docs/src/getting-started/go-sdk.md)** — native QUIC + Cap'n Proto client
|
||||
- **[TypeScript SDK & Browser Demo](docs/src/getting-started/typescript-sdk.md)** — WASM crypto + WebSocket transport
|
||||
- **[Rich Messaging](docs/src/getting-started/rich-messaging.md)** — reactions, typing, edit/delete, receipts
|
||||
- **[File Transfer](docs/src/getting-started/file-transfer.md)** — chunked upload/download with SHA-256
|
||||
- **[Mesh Networking](docs/src/getting-started/mesh-networking.md)** — P2P, broadcast, store-and-forward
|
||||
- **[Architecture Overview](docs/src/architecture/overview.md)** — two-service model, dual-key design, crate layout
|
||||
- **[Protocol Deep Dives](docs/src/protocol-layers/overview.md)** — QUIC/TLS 1.3, Cap'n Proto, MLS, Hybrid KEM
|
||||
- **[Cryptographic Properties](docs/src/cryptography/overview.md)** — forward secrecy, post-compromise security, PQ readiness, threat model
|
||||
- **[Design Rationale](docs/src/design-rationale/overview.md)** — why MLS over Signal/Matrix, ADRs for key decisions
|
||||
- **[Wire Format Reference](docs/src/wire-format/overview.md)** — annotated Cap'n Proto schemas
|
||||
- **[Roadmap](docs/src/roadmap/milestones.md)** — milestones, production readiness, future research
|
||||
- **[Future Improvements](docs/FUTURE-IMPROVEMENTS.md)** — prioritised list of security, ops, and feature improvements
|
||||
|
||||
---
|
||||
|
||||
## Security
|
||||
|
||||
This is a **research project** and has not undergone a formal third-party audit. See the [threat model](docs/src/cryptography/threat-model.md) and [security audit](docs/SECURITY-AUDIT.md) for details.
|
||||
|
||||
- The server only routes opaque ciphertexts by recipient key — it never sees plaintext.
|
||||
- OPAQUE ensures passwords never leave the client.
|
||||
- Local databases are encrypted with SQLCipher when a password is provided.
|
||||
- Session tokens are encrypted at rest (Argon2id key derivation + ChaCha20-Poly1305).
|
||||
- **Certificate pinning:** pass the server cert as `--ca-cert` so the client trusts only that server.
|
||||
- **Sealed sender:** optional mode where the server cannot see who sent a message.
|
||||
- **Dependency checks:** CI runs `cargo deny check` and `cargo audit` on every PR.
|
||||
|
||||
---
|
||||
|
||||
|
||||
891
ROADMAP.html
Normal file
891
ROADMAP.html
Normal file
@@ -0,0 +1,891 @@
|
||||
<!DOCTYPE HTML>
|
||||
<html lang="en" class="navy sidebar-visible" dir="ltr">
|
||||
<head>
|
||||
<!-- Book generated using mdBook -->
|
||||
<meta charset="UTF-8">
|
||||
<title>Full Roadmap (Phases 1–8) - quicproquo</title>
|
||||
|
||||
|
||||
<!-- Custom HTML head -->
|
||||
|
||||
<meta name="description" content="End-to-end encrypted group messaging over QUIC + TLS 1.3 + MLS (RFC 9420)">
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1">
|
||||
<meta name="theme-color" content="#ffffff">
|
||||
|
||||
<link rel="icon" href="favicon-de23e50b.svg">
|
||||
<link rel="shortcut icon" href="favicon-8114d1fc.png">
|
||||
<link rel="stylesheet" href="css/variables-8adf115d.css">
|
||||
<link rel="stylesheet" href="css/general-2459343d.css">
|
||||
<link rel="stylesheet" href="css/chrome-ae938929.css">
|
||||
<link rel="stylesheet" href="css/print-9e4910d8.css" media="print">
|
||||
|
||||
<!-- Fonts -->
|
||||
<link rel="stylesheet" href="fonts/fonts-9644e21d.css">
|
||||
|
||||
<!-- Highlight.js Stylesheets -->
|
||||
<link rel="stylesheet" id="mdbook-highlight-css" href="highlight-493f70e1.css">
|
||||
<link rel="stylesheet" id="mdbook-tomorrow-night-css" href="tomorrow-night-4c0ae647.css">
|
||||
<link rel="stylesheet" id="mdbook-ayu-highlight-css" href="ayu-highlight-3fdfc3ac.css">
|
||||
|
||||
<!-- Custom theme stylesheets -->
|
||||
|
||||
|
||||
<!-- Provide site root and default themes to javascript -->
|
||||
<script>
|
||||
const path_to_root = "";
|
||||
const default_light_theme = "navy";
|
||||
const default_dark_theme = "navy";
|
||||
window.path_to_searchindex_js = "searchindex-92ce38c7.js";
|
||||
</script>
|
||||
<!-- Start loading toc.js asap -->
|
||||
<script src="toc-4c7c920d.js"></script>
|
||||
</head>
|
||||
<body>
|
||||
<div id="mdbook-help-container">
|
||||
<div id="mdbook-help-popup">
|
||||
<h2 class="mdbook-help-title">Keyboard shortcuts</h2>
|
||||
<div>
|
||||
<p>Press <kbd>←</kbd> or <kbd>→</kbd> to navigate between chapters</p>
|
||||
<p>Press <kbd>S</kbd> or <kbd>/</kbd> to search in the book</p>
|
||||
<p>Press <kbd>?</kbd> to show this help</p>
|
||||
<p>Press <kbd>Esc</kbd> to hide this help</p>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
<div id="mdbook-body-container">
|
||||
<!-- Work around some values being stored in localStorage wrapped in quotes -->
|
||||
<script>
|
||||
try {
|
||||
let theme = localStorage.getItem('mdbook-theme');
|
||||
let sidebar = localStorage.getItem('mdbook-sidebar');
|
||||
|
||||
if (theme.startsWith('"') && theme.endsWith('"')) {
|
||||
localStorage.setItem('mdbook-theme', theme.slice(1, theme.length - 1));
|
||||
}
|
||||
|
||||
if (sidebar.startsWith('"') && sidebar.endsWith('"')) {
|
||||
localStorage.setItem('mdbook-sidebar', sidebar.slice(1, sidebar.length - 1));
|
||||
}
|
||||
} catch (e) { }
|
||||
</script>
|
||||
|
||||
<!-- Set the theme before any content is loaded, prevents flash -->
|
||||
<script>
|
||||
const default_theme = window.matchMedia("(prefers-color-scheme: dark)").matches ? default_dark_theme : default_light_theme;
|
||||
let theme;
|
||||
try { theme = localStorage.getItem('mdbook-theme'); } catch(e) { }
|
||||
if (theme === null || theme === undefined) { theme = default_theme; }
|
||||
const html = document.documentElement;
|
||||
html.classList.remove('navy')
|
||||
html.classList.add(theme);
|
||||
html.classList.add("js");
|
||||
</script>
|
||||
|
||||
<input type="checkbox" id="mdbook-sidebar-toggle-anchor" class="hidden">
|
||||
|
||||
<!-- Hide / unhide sidebar before it is displayed -->
|
||||
<script>
|
||||
let sidebar = null;
|
||||
const sidebar_toggle = document.getElementById("mdbook-sidebar-toggle-anchor");
|
||||
if (document.body.clientWidth >= 1080) {
|
||||
try { sidebar = localStorage.getItem('mdbook-sidebar'); } catch(e) { }
|
||||
sidebar = sidebar || 'visible';
|
||||
} else {
|
||||
sidebar = 'hidden';
|
||||
sidebar_toggle.checked = false;
|
||||
}
|
||||
if (sidebar === 'visible') {
|
||||
sidebar_toggle.checked = true;
|
||||
} else {
|
||||
html.classList.remove('sidebar-visible');
|
||||
}
|
||||
</script>
|
||||
|
||||
<nav id="mdbook-sidebar" class="sidebar" aria-label="Table of contents">
|
||||
<!-- populated by js -->
|
||||
<mdbook-sidebar-scrollbox class="sidebar-scrollbox"></mdbook-sidebar-scrollbox>
|
||||
<noscript>
|
||||
<iframe class="sidebar-iframe-outer" src="toc.html"></iframe>
|
||||
</noscript>
|
||||
<div id="mdbook-sidebar-resize-handle" class="sidebar-resize-handle">
|
||||
<div class="sidebar-resize-indicator"></div>
|
||||
</div>
|
||||
</nav>
|
||||
|
||||
<div id="mdbook-page-wrapper" class="page-wrapper">
|
||||
|
||||
<div class="page">
|
||||
<div id="mdbook-menu-bar-hover-placeholder"></div>
|
||||
<div id="mdbook-menu-bar" class="menu-bar sticky">
|
||||
<div class="left-buttons">
|
||||
<label id="mdbook-sidebar-toggle" class="icon-button" for="mdbook-sidebar-toggle-anchor" title="Toggle Table of Contents" aria-label="Toggle Table of Contents" aria-controls="mdbook-sidebar">
|
||||
<span class=fa-svg><svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 448 512"><!--! Font Awesome Free 6.2.0 by @fontawesome - https://fontawesome.com License - https://fontawesome.com/license/free (Icons: CC BY 4.0, Fonts: SIL OFL 1.1, Code: MIT License) Copyright 2022 Fonticons, Inc. --><path d="M0 96C0 78.3 14.3 64 32 64H416c17.7 0 32 14.3 32 32s-14.3 32-32 32H32C14.3 128 0 113.7 0 96zM0 256c0-17.7 14.3-32 32-32H416c17.7 0 32 14.3 32 32s-14.3 32-32 32H32c-17.7 0-32-14.3-32-32zM448 416c0 17.7-14.3 32-32 32H32c-17.7 0-32-14.3-32-32s14.3-32 32-32H416c17.7 0 32 14.3 32 32z"/></svg></span>
|
||||
</label>
|
||||
<button id="mdbook-theme-toggle" class="icon-button" type="button" title="Change theme" aria-label="Change theme" aria-haspopup="true" aria-expanded="false" aria-controls="mdbook-theme-list">
|
||||
<span class=fa-svg><svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 576 512"><!--! Font Awesome Free 6.2.0 by @fontawesome - https://fontawesome.com License - https://fontawesome.com/license/free (Icons: CC BY 4.0, Fonts: SIL OFL 1.1, Code: MIT License) Copyright 2022 Fonticons, Inc. --><path d="M371.3 367.1c27.3-3.9 51.9-19.4 67.2-42.9L600.2 74.1c12.6-19.5 9.4-45.3-7.6-61.2S549.7-4.4 531.1 9.6L294.4 187.2c-24 18-38.2 46.1-38.4 76.1L371.3 367.1zm-19.6 25.4l-116-104.4C175.9 290.3 128 339.6 128 400c0 3.9 .2 7.8 .6 11.6c1.8 17.5-10.2 36.4-27.8 36.4H96c-17.7 0-32 14.3-32 32s14.3 32 32 32H240c61.9 0 112-50.1 112-112c0-2.5-.1-5-.2-7.5z"/></svg></span>
|
||||
</button>
|
||||
<ul id="mdbook-theme-list" class="theme-popup" aria-label="Themes" role="menu">
|
||||
<li role="none"><button role="menuitem" class="theme" id="mdbook-theme-default_theme">Auto</button></li>
|
||||
<li role="none"><button role="menuitem" class="theme" id="mdbook-theme-light">Light</button></li>
|
||||
<li role="none"><button role="menuitem" class="theme" id="mdbook-theme-rust">Rust</button></li>
|
||||
<li role="none"><button role="menuitem" class="theme" id="mdbook-theme-coal">Coal</button></li>
|
||||
<li role="none"><button role="menuitem" class="theme" id="mdbook-theme-navy">Navy</button></li>
|
||||
<li role="none"><button role="menuitem" class="theme" id="mdbook-theme-ayu">Ayu</button></li>
|
||||
</ul>
|
||||
<button id="mdbook-search-toggle" class="icon-button" type="button" title="Search (`/`)" aria-label="Toggle Searchbar" aria-expanded="false" aria-keyshortcuts="/ s" aria-controls="mdbook-searchbar">
|
||||
<span class=fa-svg><svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 512 512"><!--! Font Awesome Free 6.2.0 by @fontawesome - https://fontawesome.com License - https://fontawesome.com/license/free (Icons: CC BY 4.0, Fonts: SIL OFL 1.1, Code: MIT License) Copyright 2022 Fonticons, Inc. --><path d="M416 208c0 45.9-14.9 88.3-40 122.7L502.6 457.4c12.5 12.5 12.5 32.8 0 45.3s-32.8 12.5-45.3 0L330.7 376c-34.4 25.2-76.8 40-122.7 40C93.1 416 0 322.9 0 208S93.1 0 208 0S416 93.1 416 208zM208 352c79.5 0 144-64.5 144-144s-64.5-144-144-144S64 128.5 64 208s64.5 144 144 144z"/></svg></span>
|
||||
</button>
|
||||
</div>
|
||||
|
||||
<h1 class="menu-title">quicproquo</h1>
|
||||
|
||||
<div class="right-buttons">
|
||||
<a href="print.html" title="Print this book" aria-label="Print this book">
|
||||
<span class=fa-svg id="print-button"><svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 512 512"><!--! Font Awesome Free 6.2.0 by @fontawesome - https://fontawesome.com License - https://fontawesome.com/license/free (Icons: CC BY 4.0, Fonts: SIL OFL 1.1, Code: MIT License) Copyright 2022 Fonticons, Inc. --><path d="M128 0C92.7 0 64 28.7 64 64v96h64V64H354.7L384 93.3V160h64V93.3c0-17-6.7-33.3-18.7-45.3L400 18.7C388 6.7 371.7 0 354.7 0H128zM384 352v32 64H128V384 368 352H384zm64 32h32c17.7 0 32-14.3 32-32V256c0-35.3-28.7-64-64-64H64c-35.3 0-64 28.7-64 64v96c0 17.7 14.3 32 32 32H64v64c0 35.3 28.7 64 64 64H384c35.3 0 64-28.7 64-64V384zm-16-88c-13.3 0-24-10.7-24-24s10.7-24 24-24s24 10.7 24 24s-10.7 24-24 24z"/></svg></span>
|
||||
</a>
|
||||
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div id="mdbook-search-wrapper" class="hidden">
|
||||
<form id="mdbook-searchbar-outer" class="searchbar-outer">
|
||||
<div class="search-wrapper">
|
||||
<input type="search" id="mdbook-searchbar" name="searchbar" placeholder="Search this book ..." aria-controls="mdbook-searchresults-outer" aria-describedby="searchresults-header">
|
||||
<div class="spinner-wrapper">
|
||||
<span class=fa-svg id="fa-spin"><svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 512 512"><!--! Font Awesome Free 6.2.0 by @fontawesome - https://fontawesome.com License - https://fontawesome.com/license/free (Icons: CC BY 4.0, Fonts: SIL OFL 1.1, Code: MIT License) Copyright 2022 Fonticons, Inc. --><path d="M304 48c0-26.5-21.5-48-48-48s-48 21.5-48 48s21.5 48 48 48s48-21.5 48-48zm0 416c0-26.5-21.5-48-48-48s-48 21.5-48 48s21.5 48 48 48s48-21.5 48-48zM48 304c26.5 0 48-21.5 48-48s-21.5-48-48-48s-48 21.5-48 48s21.5 48 48 48zm464-48c0-26.5-21.5-48-48-48s-48 21.5-48 48s21.5 48 48 48s48-21.5 48-48zM142.9 437c18.7-18.7 18.7-49.1 0-67.9s-49.1-18.7-67.9 0s-18.7 49.1 0 67.9s49.1 18.7 67.9 0zm0-294.2c18.7-18.7 18.7-49.1 0-67.9S93.7 56.2 75 75s-18.7 49.1 0 67.9s49.1 18.7 67.9 0zM369.1 437c18.7 18.7 49.1 18.7 67.9 0s18.7-49.1 0-67.9s-49.1-18.7-67.9 0s-18.7 49.1 0 67.9z"/></svg></span>
|
||||
</div>
|
||||
</div>
|
||||
</form>
|
||||
<div id="mdbook-searchresults-outer" class="searchresults-outer hidden">
|
||||
<div id="mdbook-searchresults-header" class="searchresults-header"></div>
|
||||
<ul id="mdbook-searchresults">
|
||||
</ul>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<!-- Apply ARIA attributes after the sidebar and the sidebar toggle button are added to the DOM -->
|
||||
<script>
|
||||
document.getElementById('mdbook-sidebar-toggle').setAttribute('aria-expanded', sidebar === 'visible');
|
||||
document.getElementById('mdbook-sidebar').setAttribute('aria-hidden', sidebar !== 'visible');
|
||||
Array.from(document.querySelectorAll('#mdbook-sidebar a')).forEach(function(link) {
|
||||
link.setAttribute('tabIndex', sidebar === 'visible' ? 0 : -1);
|
||||
});
|
||||
</script>
|
||||
|
||||
<div id="mdbook-content" class="content">
|
||||
<main>
|
||||
<h1 id="roadmap--quicproquo"><a class="header" href="#roadmap--quicproquo">Roadmap — quicproquo</a></h1>
|
||||
<blockquote>
|
||||
<p>From proof-of-concept to production-grade E2E encrypted messaging.</p>
|
||||
<p>Each phase is designed to be tackled sequentially. Items within a phase
|
||||
can be parallelised. Check the box when done.</p>
|
||||
</blockquote>
|
||||
<hr>
|
||||
<h2 id="phase-1--production-hardening-critical"><a class="header" href="#phase-1--production-hardening-critical">Phase 1 — Production Hardening (Critical)</a></h2>
|
||||
<p>Eliminate all crash paths, enforce secure defaults, fix deployment blockers.</p>
|
||||
<ul>
|
||||
<li>
|
||||
<p><input disabled="" type="checkbox"> <strong>1.1 Remove <code>.unwrap()</code> / <code>.expect()</code> from production paths</strong></p>
|
||||
<ul>
|
||||
<li>Replace <code>AUTH_CONTEXT.read().expect()</code> in client RPC with proper <code>Result</code></li>
|
||||
<li>Replace <code>"0.0.0.0:0".parse().unwrap()</code> in client with fallible parse</li>
|
||||
<li>Replace <code>Mutex::lock().unwrap()</code> in server storage with <code>.map_err()</code></li>
|
||||
<li>Audit: <code>grep -rn 'unwrap()\|expect(' crates/</code> outside <code>#[cfg(test)]</code></li>
|
||||
</ul>
|
||||
</li>
|
||||
<li>
|
||||
<p><input disabled="" type="checkbox"> <strong>1.2 Enforce secure defaults in production mode</strong></p>
|
||||
<ul>
|
||||
<li>Reject startup if <code>QPQ_PRODUCTION=true</code> and <code>auth_token</code> is empty or <code>"devtoken"</code></li>
|
||||
<li>Require non-empty <code>db_key</code> when using SQL backend in production</li>
|
||||
<li>Refuse to auto-generate TLS certs in production mode (require existing cert+key)</li>
|
||||
<li>Already partially implemented — verify and harden the validation in <code>config.rs</code></li>
|
||||
</ul>
|
||||
</li>
|
||||
<li>
|
||||
<p><input disabled="" type="checkbox"> <strong>1.3 Fix <code>.gitignore</code></strong></p>
|
||||
<ul>
|
||||
<li>Add <code>data/</code>, <code>*.der</code>, <code>*.pem</code>, <code>*.db</code>, <code>*.bin</code> (state files), <code>*.ks</code> (keystores)</li>
|
||||
<li>Verify no secrets are already tracked: <code>git ls-files data/ *.der *.db</code></li>
|
||||
</ul>
|
||||
</li>
|
||||
<li>
|
||||
<p><input disabled="" type="checkbox"> <strong>1.4 Fix Dockerfile</strong></p>
|
||||
<ul>
|
||||
<li>Sync workspace members (handle excluded <code>p2p</code> crate)</li>
|
||||
<li>Create dedicated user/group instead of <code>nobody</code></li>
|
||||
<li>Set writable <code>QPQ_DATA_DIR</code> with correct permissions</li>
|
||||
<li>Test: <code>docker build . && docker run --rm -it qpq-server --help</code></li>
|
||||
</ul>
|
||||
</li>
|
||||
<li>
|
||||
<p><input disabled="" type="checkbox"> <strong>1.5 TLS certificate lifecycle</strong></p>
|
||||
<ul>
|
||||
<li>Document CA-signed cert setup (Let’s Encrypt / custom CA)</li>
|
||||
<li>Add <code>--tls-required</code> flag that refuses to start without valid cert</li>
|
||||
<li>Log clear warning when using self-signed certs</li>
|
||||
<li>Document certificate rotation procedure</li>
|
||||
</ul>
|
||||
</li>
|
||||
</ul>
|
||||
<hr>
|
||||
<h2 id="phase-2--test--ci-maturity"><a class="header" href="#phase-2--test--ci-maturity">Phase 2 — Test & CI Maturity</a></h2>
|
||||
<p>Build confidence before adding features.</p>
|
||||
<ul>
|
||||
<li>
|
||||
<p><input disabled="" type="checkbox"> <strong>2.1 Expand E2E test coverage</strong></p>
|
||||
<ul>
|
||||
<li>Auth failure scenarios (wrong password, expired token, invalid token)</li>
|
||||
<li>Message ordering verification (send N messages, verify seq numbers)</li>
|
||||
<li>Concurrent clients (3+ members in group, simultaneous send/recv)</li>
|
||||
<li>OPAQUE registration + login full flow</li>
|
||||
<li>Queue full behavior (>1000 messages)</li>
|
||||
<li>Rate limiting behavior (>100 enqueues/minute)</li>
|
||||
<li>Reconnection after server restart</li>
|
||||
<li>KeyPackage exhaustion (fetch when none available)</li>
|
||||
</ul>
|
||||
</li>
|
||||
<li>
|
||||
<p><input disabled="" type="checkbox"> <strong>2.2 Add unit tests for untested paths</strong></p>
|
||||
<ul>
|
||||
<li>Client retry logic (exponential backoff, jitter, retriable classification)</li>
|
||||
<li>REPL input parsing edge cases (empty input, special characters, <code>/</code> commands)</li>
|
||||
<li>State file encryption/decryption round-trip with bad password</li>
|
||||
<li>Token cache expiry</li>
|
||||
<li>Conversation store migrations</li>
|
||||
</ul>
|
||||
</li>
|
||||
<li>
|
||||
<p><input disabled="" type="checkbox"> <strong>2.3 CI hardening</strong></p>
|
||||
<ul>
|
||||
<li>Add <code>.github/CODEOWNERS</code> (crypto, auth, wire-format require 2 reviewers)</li>
|
||||
<li>Ensure <code>cargo deny check</code> runs on every PR (already in CI — verify)</li>
|
||||
<li>Add <code>cargo audit</code> as blocking check (already in CI — verify)</li>
|
||||
<li>Add coverage reporting (tarpaulin or llvm-cov)</li>
|
||||
<li>Add CI job for Docker build validation</li>
|
||||
</ul>
|
||||
</li>
|
||||
<li>
|
||||
<p><input disabled="" type="checkbox"> <strong>2.4 Clean up build warnings</strong></p>
|
||||
<ul>
|
||||
<li>Fix Cap’n Proto generated <code>unused_parens</code> warnings</li>
|
||||
<li>Remove dead code / unused imports</li>
|
||||
<li>Address <code>openmls</code> future-incompat warnings</li>
|
||||
<li>Target: <code>cargo clippy --workspace -- -D warnings</code> passes clean</li>
|
||||
</ul>
|
||||
</li>
|
||||
</ul>
|
||||
<hr>
|
||||
<h2 id="phase-3--client-sdks-native-quic--capn-proto-everywhere"><a class="header" href="#phase-3--client-sdks-native-quic--capn-proto-everywhere">Phase 3 — Client SDKs: Native QUIC + Cap’n Proto Everywhere</a></h2>
|
||||
<p><strong>No REST gateway. No protocol dilution.</strong> The <code>.capnp</code> schemas are the
|
||||
interface definition. Every SDK speaks native QUIC + Cap’n Proto. The
|
||||
project name stays honest.</p>
|
||||
<h3 id="why-this-matters"><a class="header" href="#why-this-matters">Why this matters</a></h3>
|
||||
<p>The name is <strong>quic</strong>n<strong>proto</strong>chat — the protocol IS the product. Instead
|
||||
of adding an HTTP translation layer that loses zero-copy performance and
|
||||
adds base64 overhead, we invest in making the native protocol accessible
|
||||
from every language that has QUIC + Cap’n Proto support, and provide
|
||||
WASM/FFI for the crypto layer.</p>
|
||||
<h3 id="architecture"><a class="header" href="#architecture">Architecture</a></h3>
|
||||
<pre><code> Server: QUIC + Cap'n Proto (single protocol, no gateway)
|
||||
|
||||
Client SDKs:
|
||||
┌─── Rust quinn + capnp-rpc (existing, reference impl)
|
||||
├─── Go quic-go + go-capnp (native, high confidence)
|
||||
├─── Python aioquic + pycapnp (native QUIC, manual framing)
|
||||
├─── C/C++ msquic/ngtcp2 + capnproto (reference impl, full RPC)
|
||||
└─── Browser WebTransport + capnp (WASM) (QUIC transport, no HTTP needed)
|
||||
|
||||
Crypto layer (client-side MLS, shared across all SDKs):
|
||||
┌─── Rust crate (native, existing)
|
||||
├─── WASM module (browsers, Node.js, Deno)
|
||||
└─── C FFI (Swift, Kotlin, Python, Go via cgo)
|
||||
</code></pre>
|
||||
<h3 id="language-support-reality-check"><a class="header" href="#language-support-reality-check">Language support reality check</a></h3>
|
||||
<div class="table-wrapper">
|
||||
<table>
|
||||
<thead>
|
||||
<tr><th>Language</th><th>QUIC</th><th>Cap’n Proto</th><th>RPC</th><th>Confidence</th></tr>
|
||||
</thead>
|
||||
<tbody>
|
||||
<tr><td><strong>Rust</strong></td><td>quinn ✅</td><td>capnp-rpc ✅</td><td>Full ✅</td><td>Existing</td></tr>
|
||||
<tr><td><strong>Go</strong></td><td>quic-go ✅</td><td>go-capnp ✅</td><td>Level 1 ✅</td><td>High</td></tr>
|
||||
<tr><td><strong>Python</strong></td><td>aioquic ✅</td><td>pycapnp ⚠️</td><td>Manual framing</td><td>Medium</td></tr>
|
||||
<tr><td><strong>C/C++</strong></td><td>msquic/ngtcp2 ✅</td><td>capnproto ✅</td><td>Full ✅</td><td>High</td></tr>
|
||||
<tr><td><strong>Browser</strong></td><td>WebTransport ✅</td><td>WASM ✅</td><td>Via WASM bridge</td><td>Medium</td></tr>
|
||||
</tbody>
|
||||
</table>
|
||||
</div>
|
||||
<h3 id="implementation"><a class="header" href="#implementation">Implementation</a></h3>
|
||||
<ul>
|
||||
<li>
|
||||
<p><input disabled="" type="checkbox" checked=""> <strong>3.1 Go SDK (<code>quicproquo-go</code>)</strong></p>
|
||||
<ul>
|
||||
<li>Generated Go types from <code>node.capnp</code> (6487-line codegen, all 24 RPC methods)</li>
|
||||
<li>QUIC transport via <code>quic-go</code> with TLS 1.3 + ALPN <code>"capnp"</code></li>
|
||||
<li>High-level <code>qpq</code> package: Connect, Health, ResolveUser, CreateChannel, Send/SendWithTTL, Receive/ReceiveWait, DeleteAccount, OPAQUE auth</li>
|
||||
<li>Example CLI in <code>sdks/go/cmd/example/</code></li>
|
||||
</ul>
|
||||
</li>
|
||||
<li>
|
||||
<p><input disabled="" type="checkbox"> <strong>3.2 Python SDK (<code>quicproquo-py</code>)</strong></p>
|
||||
<ul>
|
||||
<li>QUIC transport: <code>aioquic</code> with custom Cap’n Proto stream handler</li>
|
||||
<li>Cap’n Proto serialization: <code>pycapnp</code> for message types</li>
|
||||
<li>Manual RPC framing: length-prefixed request/response over QUIC stream</li>
|
||||
<li>Async/await API matching the Rust client patterns</li>
|
||||
<li>Crypto: PyO3 bindings to <code>quicproquo-core</code> for MLS operations</li>
|
||||
<li>Publish: PyPI <code>quicproquo</code></li>
|
||||
<li>Example: async bot client</li>
|
||||
</ul>
|
||||
</li>
|
||||
<li>
|
||||
<p><input disabled="" type="checkbox" checked=""> <strong>3.3 C FFI layer (<code>quicproquo-ffi</code>)</strong></p>
|
||||
<ul>
|
||||
<li><code>crates/quicproquo-ffi</code> with 7 extern “C” functions: connect, login, send, receive, disconnect, last_error, free_string</li>
|
||||
<li>Builds as <code>libquicproquo_ffi.so</code> / <code>.dylib</code> / <code>.dll</code></li>
|
||||
<li>Python ctypes wrapper in <code>examples/python/qpq_client.py</code></li>
|
||||
</ul>
|
||||
</li>
|
||||
<li>
|
||||
<p><input disabled="" type="checkbox" checked=""> <strong>3.4 WASM compilation of <code>quicproquo-core</code></strong></p>
|
||||
<ul>
|
||||
<li><code>wasm-pack build</code> target producing 175 KB WASM bundle (LTO + opt-level=s)</li>
|
||||
<li>13 <code>wasm_bindgen</code> functions: Ed25519 identity, hybrid KEM, safety numbers, sealed sender, padding</li>
|
||||
<li>Browser-ready with <code>crypto.getRandomValues()</code> RNG</li>
|
||||
<li>Published as <code>sdks/typescript/wasm-crypto/</code></li>
|
||||
</ul>
|
||||
</li>
|
||||
<li>
|
||||
<p><input disabled="" type="checkbox"> <strong>3.5 WebTransport server endpoint</strong></p>
|
||||
<ul>
|
||||
<li>Add HTTP/3 + WebTransport listener to server (same QUIC stack via quinn)</li>
|
||||
<li>Cap’n Proto RPC framed over WebTransport bidirectional streams</li>
|
||||
<li>Same auth, same storage, same RPC handlers — just a different stream source</li>
|
||||
<li>Browsers connect via <code>new WebTransport("https://server:7443")</code></li>
|
||||
<li>ALPN negotiation: <code>"h3"</code> for WebTransport, <code>"capnp"</code> for native QUIC</li>
|
||||
<li>Configurable port: <code>--webtransport-listen 0.0.0.0:7443</code></li>
|
||||
<li>Feature-flagged: <code>--features webtransport</code></li>
|
||||
</ul>
|
||||
</li>
|
||||
<li>
|
||||
<p><input disabled="" type="checkbox" checked=""> <strong>3.6 TypeScript/JavaScript SDK (<code>@quicproquo/client</code>)</strong></p>
|
||||
<ul>
|
||||
<li><code>QpqClient</code> class: connect, offline, health, resolveUser, createChannel, send/sendWithTTL, receive, deleteAccount</li>
|
||||
<li>WASM crypto wrapper: generateIdentity, sign/verify, hybridEncrypt/Decrypt, computeSafetyNumber, sealedSend, pad</li>
|
||||
<li>WebSocket transport with request/response correlation and reconnection</li>
|
||||
<li>Browser demo: interactive crypto playground + chat UI (<code>sdks/typescript/demo/index.html</code>)</li>
|
||||
</ul>
|
||||
</li>
|
||||
<li>
|
||||
<p><input disabled="" type="checkbox"> <strong>3.7 SDK documentation and schema publishing</strong></p>
|
||||
<ul>
|
||||
<li>Publish <code>.capnp</code> schemas as the canonical API contract</li>
|
||||
<li>Document the QUIC + Cap’n Proto connection pattern for each language</li>
|
||||
<li>Provide a “build your own SDK” guide (QUIC stream → Cap’n Proto RPC bootstrap)</li>
|
||||
<li>Reference implementation checklist: connect, auth, upload key, enqueue, fetch</li>
|
||||
</ul>
|
||||
</li>
|
||||
</ul>
|
||||
<hr>
|
||||
<h2 id="phase-4--trust--security-infrastructure"><a class="header" href="#phase-4--trust--security-infrastructure">Phase 4 — Trust & Security Infrastructure</a></h2>
|
||||
<p>Address the security gaps required for real-world deployment.</p>
|
||||
<ul>
|
||||
<li>
|
||||
<p><input disabled="" type="checkbox"> <strong>4.1 Third-party cryptographic audit</strong></p>
|
||||
<ul>
|
||||
<li>Scope: MLS integration, OPAQUE flow, hybrid KEM, key lifecycle, zeroization</li>
|
||||
<li>Firms: NCC Group, Trail of Bits, Cure53</li>
|
||||
<li>Budget and timeline: typically 4-6 weeks, $50K–$150K</li>
|
||||
<li>Publish report publicly (builds trust)</li>
|
||||
</ul>
|
||||
</li>
|
||||
<li>
|
||||
<p><input disabled="" type="checkbox"> <strong>4.2 Key Transparency / revocation</strong></p>
|
||||
<ul>
|
||||
<li>Replace <code>BasicCredential</code> with X.509-based MLS credentials</li>
|
||||
<li>Or: verifiable key directory (Merkle tree, auditable log)</li>
|
||||
<li>Users can verify peer keys haven’t been substituted (MITM detection)</li>
|
||||
<li>Revocation mechanism for compromised keys</li>
|
||||
</ul>
|
||||
</li>
|
||||
<li>
|
||||
<p><input disabled="" type="checkbox" checked=""> <strong>4.3 Client authentication on Delivery Service</strong></p>
|
||||
<ul>
|
||||
<li>DS sender identity binding with explicit audit logging</li>
|
||||
<li><code>sender_prefix</code> tracking in enqueue/batch_enqueue RPCs</li>
|
||||
<li>Sender identity derived from authenticated session</li>
|
||||
</ul>
|
||||
</li>
|
||||
<li>
|
||||
<p><input disabled="" type="checkbox"> <strong>4.4 M7 — Post-quantum MLS integration</strong></p>
|
||||
<ul>
|
||||
<li>Integrate hybrid KEM (X25519 + ML-KEM-768) into the OpenMLS crypto provider</li>
|
||||
<li>Group key material gets post-quantum confidentiality</li>
|
||||
<li>Full test suite with PQ ciphersuite</li>
|
||||
<li>Ref: existing <code>hybrid_kem.rs</code> and <code>hybrid_crypto.rs</code></li>
|
||||
</ul>
|
||||
</li>
|
||||
<li>
|
||||
<p><input disabled="" type="checkbox" checked=""> <strong>4.5 Username enumeration mitigation</strong></p>
|
||||
<ul>
|
||||
<li>5 ms timing floor on <code>resolveUser</code> responses</li>
|
||||
<li>Rate limiting to prevent bulk enumeration attacks</li>
|
||||
</ul>
|
||||
</li>
|
||||
</ul>
|
||||
<hr>
|
||||
<h2 id="phase-5--features--ux"><a class="header" href="#phase-5--features--ux">Phase 5 — Features & UX</a></h2>
|
||||
<p>Make it a product people want to use.</p>
|
||||
<ul>
|
||||
<li>
|
||||
<p><input disabled="" type="checkbox"> <strong>5.1 Multi-device support</strong></p>
|
||||
<ul>
|
||||
<li>Account → multiple devices, each with own Ed25519 key + MLS KeyPackages</li>
|
||||
<li>Device graph management (add device, remove device, list devices)</li>
|
||||
<li>Messages delivered to all devices of a user</li>
|
||||
<li><code>device_id</code> field already in Auth struct — wire it through</li>
|
||||
</ul>
|
||||
</li>
|
||||
<li>
|
||||
<p><input disabled="" type="checkbox"> <strong>5.2 Account recovery</strong></p>
|
||||
<ul>
|
||||
<li>Recovery codes or backup key (encrypted, stored by user)</li>
|
||||
<li>Option: server-assisted recovery with security questions (lower security)</li>
|
||||
<li>MLS state re-establishment after device loss</li>
|
||||
</ul>
|
||||
</li>
|
||||
<li>
|
||||
<p><input disabled="" type="checkbox"> <strong>5.3 Full MLS lifecycle</strong></p>
|
||||
<ul>
|
||||
<li>Member removal (Remove proposal → Commit → fan-out)</li>
|
||||
<li>Credential update (Update proposal for key rotation)</li>
|
||||
<li>Explicit proposal handling (queue proposals, batch commit)</li>
|
||||
<li>Group metadata (name, description, avatar hash)</li>
|
||||
</ul>
|
||||
</li>
|
||||
<li>
|
||||
<p><input disabled="" type="checkbox" checked=""> <strong>5.4 Message editing and deletion</strong></p>
|
||||
<ul>
|
||||
<li><code>Edit</code> (0x06) and <code>Delete</code> (0x07) message types in <code>AppMessage</code></li>
|
||||
<li><code>/edit <index> <text></code> and <code>/delete <index></code> REPL commands (own messages only)</li>
|
||||
<li>Database update/removal on incoming edit/delete</li>
|
||||
</ul>
|
||||
</li>
|
||||
<li>
|
||||
<p><input disabled="" type="checkbox" checked=""> <strong>5.5 File and media transfer</strong></p>
|
||||
<ul>
|
||||
<li><code>uploadBlob</code> / <code>downloadBlob</code> RPCs with 256 KB chunked streaming</li>
|
||||
<li>SHA-256 content-addressable storage with hash verification</li>
|
||||
<li><code>FileRef</code> (0x08) message type with blob_id, filename, file_size, mime_type</li>
|
||||
<li><code>/send-file <path></code> and <code>/download <index></code> REPL commands with progress bars</li>
|
||||
<li>50 MB max file size, automatic MIME detection via <code>mime_guess</code></li>
|
||||
</ul>
|
||||
</li>
|
||||
<li>
|
||||
<p><input disabled="" type="checkbox"> <strong>5.6 Abuse prevention and moderation</strong></p>
|
||||
<ul>
|
||||
<li>Block user (client-side, suppress display)</li>
|
||||
<li>Report message (encrypted report to admin key)</li>
|
||||
<li>Admin tools: ban user, delete account, audit log</li>
|
||||
</ul>
|
||||
</li>
|
||||
<li>
|
||||
<p><input disabled="" type="checkbox"> <strong>5.7 Offline message queue (client-side)</strong></p>
|
||||
<ul>
|
||||
<li>Queue messages when disconnected, send on reconnect</li>
|
||||
<li>Idempotent message IDs to prevent duplicates</li>
|
||||
<li>Gap detection: compare local seq with server seq</li>
|
||||
</ul>
|
||||
</li>
|
||||
</ul>
|
||||
<hr>
|
||||
<h2 id="phase-6--scale--operations"><a class="header" href="#phase-6--scale--operations">Phase 6 — Scale & Operations</a></h2>
|
||||
<p>Prepare for real traffic.</p>
|
||||
<ul>
|
||||
<li>
|
||||
<p><input disabled="" type="checkbox"> <strong>6.1 Distributed rate limiting</strong></p>
|
||||
<ul>
|
||||
<li>Current: in-memory per-process, lost on restart</li>
|
||||
<li>Move to Redis or shared state for multi-node deployments</li>
|
||||
<li>Sliding window with configurable thresholds</li>
|
||||
</ul>
|
||||
</li>
|
||||
<li>
|
||||
<p><input disabled="" type="checkbox"> <strong>6.2 Multi-node / horizontal scaling</strong></p>
|
||||
<ul>
|
||||
<li>Stateless server design (already mostly there — state is in storage backend)</li>
|
||||
<li>Shared PostgreSQL or CockroachDB backend (replace SQLite)</li>
|
||||
<li>Message queue fan-out (Redis pub/sub or NATS for cross-node notification)</li>
|
||||
<li>Load balancer health check via QUIC RPC <code>health()</code> or Prometheus <code>/metrics</code></li>
|
||||
</ul>
|
||||
</li>
|
||||
<li>
|
||||
<p><input disabled="" type="checkbox"> <strong>6.3 Operational runbook</strong></p>
|
||||
<ul>
|
||||
<li>Backup / restore procedures (SQLCipher, file backend)</li>
|
||||
<li>Key rotation (auth token, TLS cert, DB encryption key)</li>
|
||||
<li>Incident response playbook</li>
|
||||
<li>Scaling guide (when to add nodes, resource sizing)</li>
|
||||
<li>Monitoring dashboard templates (Grafana + Prometheus)</li>
|
||||
</ul>
|
||||
</li>
|
||||
<li>
|
||||
<p><input disabled="" type="checkbox"> <strong>6.4 Connection draining and graceful shutdown</strong></p>
|
||||
<ul>
|
||||
<li>Stop accepting new connections on SIGTERM</li>
|
||||
<li>Wait for in-flight RPCs (configurable timeout, default 30s)</li>
|
||||
<li>Drain WebTransport sessions with close frame</li>
|
||||
<li>Document expected behavior for load balancers (health → unhealthy first)</li>
|
||||
</ul>
|
||||
</li>
|
||||
<li>
|
||||
<p><input disabled="" type="checkbox"> <strong>6.5 Request-level timeouts</strong></p>
|
||||
<ul>
|
||||
<li>Per-RPC timeout (prevent slow clients from holding resources)</li>
|
||||
<li>Database query timeout</li>
|
||||
<li>Overall request deadline propagation</li>
|
||||
</ul>
|
||||
</li>
|
||||
<li>
|
||||
<p><input disabled="" type="checkbox"> <strong>6.6 Observability enhancements</strong></p>
|
||||
<ul>
|
||||
<li>Request correlation IDs (trace across RPC → storage)</li>
|
||||
<li>Storage operation latency metrics</li>
|
||||
<li>Per-endpoint latency histograms</li>
|
||||
<li>Structured audit log to persistent storage (not just stdout)</li>
|
||||
<li>OpenTelemetry integration</li>
|
||||
</ul>
|
||||
</li>
|
||||
</ul>
|
||||
<hr>
|
||||
<h2 id="phase-7--platform-expansion--research"><a class="header" href="#phase-7--platform-expansion--research">Phase 7 — Platform Expansion & Research</a></h2>
|
||||
<p>Long-term vision for wide adoption.</p>
|
||||
<ul>
|
||||
<li>
|
||||
<p><input disabled="" type="checkbox"> <strong>7.1 Mobile clients (iOS + Android)</strong></p>
|
||||
<ul>
|
||||
<li>Use C FFI (Phase 3.3) for crypto + transport (single library)</li>
|
||||
<li>Push notifications via APNs / FCM (server sends notification on enqueue)</li>
|
||||
<li>Background QUIC connection for message polling</li>
|
||||
<li>Biometric auth for local key storage (Keychain / Android Keystore)</li>
|
||||
</ul>
|
||||
</li>
|
||||
<li>
|
||||
<p><input disabled="" type="checkbox"> <strong>7.2 Web client (browser)</strong></p>
|
||||
<ul>
|
||||
<li>Use WASM (Phase 3.4) for crypto</li>
|
||||
<li>Use WebTransport (Phase 3.5) for native QUIC transport</li>
|
||||
<li>Cap’n Proto via WASM bridge (Phase 3.6)</li>
|
||||
<li>IndexedDB for local state persistence</li>
|
||||
<li>Service Worker for background notifications</li>
|
||||
<li>Progressive Web App (PWA) support</li>
|
||||
</ul>
|
||||
</li>
|
||||
<li>
|
||||
<p><input disabled="" type="checkbox"> <strong>7.3 Federation</strong></p>
|
||||
<ul>
|
||||
<li>Server-to-server protocol via Cap’n Proto RPC over QUIC (see <code>federation.capnp</code>)</li>
|
||||
<li><code>relayEnqueue</code>, <code>proxyFetchKeyPackage</code>, <code>federationHealth</code> methods</li>
|
||||
<li>Identity resolution across federated servers</li>
|
||||
<li>MLS group spanning multiple servers</li>
|
||||
<li>Trust model for federated deployments</li>
|
||||
</ul>
|
||||
</li>
|
||||
<li>
|
||||
<p><input disabled="" type="checkbox" checked=""> <strong>7.4 Sealed Sender</strong></p>
|
||||
<ul>
|
||||
<li>Sender identity inside MLS ciphertext only (server can’t see who sent)</li>
|
||||
<li><code>sealed_sender</code> module in quicproquo-core with seal/unseal API</li>
|
||||
<li>WASM-accessible via <code>wasm_bindgen</code> for browser use</li>
|
||||
</ul>
|
||||
</li>
|
||||
<li>
|
||||
<p><input disabled="" type="checkbox"> <strong>7.5 Additional language SDKs</strong></p>
|
||||
<ul>
|
||||
<li>Java/Kotlin: JNI bindings to C FFI (Phase 3.3) + native QUIC (netty-quic)</li>
|
||||
<li>Swift: Swift wrapper over C FFI + Network.framework QUIC</li>
|
||||
<li>Ruby: FFI bindings via <code>quicproquo-ffi</code></li>
|
||||
<li>Evaluate demand-driven — only build SDKs people request</li>
|
||||
</ul>
|
||||
</li>
|
||||
<li>
|
||||
<p><input disabled="" type="checkbox"> <strong>7.6 P2P / NAT traversal</strong></p>
|
||||
<ul>
|
||||
<li>Direct peer-to-peer via iroh (foundation exists in <code>quicproquo-p2p</code>)</li>
|
||||
<li>Server as fallback relay only</li>
|
||||
<li>Reduces latency and single-point-of-failure</li>
|
||||
<li>Ref: <code>FUTURE-IMPROVEMENTS.md § 6.1</code></li>
|
||||
</ul>
|
||||
</li>
|
||||
<li>
|
||||
<p><input disabled="" type="checkbox"> <strong>7.7 Traffic analysis resistance</strong></p>
|
||||
<ul>
|
||||
<li>Padding messages to uniform size</li>
|
||||
<li>Decoy traffic to mask timing patterns</li>
|
||||
<li>Optional Tor/I2P routing for IP privacy</li>
|
||||
<li>Ref: <code>FUTURE-IMPROVEMENTS.md § 5.4, 6.3</code></li>
|
||||
</ul>
|
||||
</li>
|
||||
</ul>
|
||||
<hr>
|
||||
<h2 id="phase-8--freifunk--community-mesh-networking"><a class="header" href="#phase-8--freifunk--community-mesh-networking">Phase 8 — Freifunk / Community Mesh Networking</a></h2>
|
||||
<p>Make qpq a first-class citizen on decentralised, community-operated wireless
|
||||
networks (Freifunk, BATMAN-adv/Babel routing, OpenWrt). Multiple qpq nodes form
|
||||
a federated mesh; clients auto-discover nearby nodes via mDNS; the network
|
||||
functions without any central infrastructure or internet uplink.</p>
|
||||
<h3 id="architecture-1"><a class="header" href="#architecture-1">Architecture</a></h3>
|
||||
<pre><code> Client A ─── mDNS discovery ──► nearby qpq node (LAN / mesh)
|
||||
│
|
||||
Cap'n Proto federation
|
||||
│
|
||||
remote qpq node (across mesh)
|
||||
</code></pre>
|
||||
<ul>
|
||||
<li>
|
||||
<p><input disabled="" type="checkbox" checked=""> <strong>F0 — Re-include <code>quicproquo-p2p</code> in workspace; fix ALPN strings</strong></p>
|
||||
<ul>
|
||||
<li>Moved <code>crates/quicproquo-p2p</code> from <code>exclude</code> back into <code>[workspace] members</code></li>
|
||||
<li>Fixed ALPN <code>b"quicnprotochat/p2p/1"</code> → <code>b"quicproquo/p2p/1"</code> (breaking wire change)</li>
|
||||
<li>Fixed federation ALPN <code>b"qnpc-fed"</code> → <code>b"quicproquo/federation/1"</code></li>
|
||||
<li>Feature-gated behind <code>--features mesh</code> on client (keeps iroh out of default builds)</li>
|
||||
</ul>
|
||||
</li>
|
||||
<li>
|
||||
<p><input disabled="" type="checkbox" checked=""> <strong>F1 — Federation routing in message delivery</strong></p>
|
||||
<ul>
|
||||
<li><code>handle_enqueue</code> and <code>handle_batch_enqueue</code> call <code>federation::routing::resolve_destination()</code></li>
|
||||
<li>Recipients with a remote home server are relayed via <code>FederationClient::relay_enqueue()</code></li>
|
||||
<li>mTLS mutual authentication between nodes (both present client certs, validated against shared CA)</li>
|
||||
<li>Config: <code>QPQ_FEDERATION_LISTEN</code>, <code>QPQ_LOCAL_DOMAIN</code>, <code>QPQ_FEDERATION_CERT/KEY/CA</code></li>
|
||||
</ul>
|
||||
</li>
|
||||
<li>
|
||||
<p><input disabled="" type="checkbox" checked=""> <strong>F2 — mDNS local peer discovery</strong></p>
|
||||
<ul>
|
||||
<li>Server announces <code>_quicproquo._udp.local.</code> on startup via <code>mdns-sd</code></li>
|
||||
<li>Client: <code>MeshDiscovery::start()</code> browses for nearby nodes (feature-gated)</li>
|
||||
<li>REPL commands: <code>/mesh peers</code> (scan + list), <code>/mesh server <host:port></code> (note address)</li>
|
||||
<li>Nodes announce: <code>ver=1</code>, <code>server=<host:port></code>, <code>domain=<local_domain></code> TXT records</li>
|
||||
</ul>
|
||||
</li>
|
||||
<li>
|
||||
<p><input disabled="" type="checkbox" checked=""> <strong>F3 — Self-sovereign mesh identity</strong></p>
|
||||
<ul>
|
||||
<li>Ed25519 keypair-based identity independent of AS registration</li>
|
||||
<li>JSON-persisted seed + known peers directory</li>
|
||||
<li>Sign/verify operations for mesh authenticity (<code>crates/quicproquo-p2p/src/identity.rs</code>)</li>
|
||||
</ul>
|
||||
</li>
|
||||
<li>
|
||||
<p><input disabled="" type="checkbox" checked=""> <strong>F4 — Store-and-forward with TTL</strong></p>
|
||||
<ul>
|
||||
<li><code>MeshEnvelope</code> with TTL-based expiry, hop_count tracking, max_hops routing limit</li>
|
||||
<li>SHA-256 deduplication ID prevents relay loops</li>
|
||||
<li>Ed25519 signature verification on envelopes</li>
|
||||
<li><code>MeshStore</code> in-memory queue with per-recipient capacity limits and TTL-based GC</li>
|
||||
</ul>
|
||||
</li>
|
||||
<li>
|
||||
<p><input disabled="" type="checkbox" checked=""> <strong>F5 — Lightweight broadcast channels</strong></p>
|
||||
<ul>
|
||||
<li>Symmetric ChaCha20-Poly1305 encrypted channels (no MLS overhead)</li>
|
||||
<li>Topic-based pub/sub via <code>BroadcastChannel</code> and <code>BroadcastManager</code></li>
|
||||
<li>Subscribe/unsubscribe, create, publish API on <code>P2pNode</code></li>
|
||||
</ul>
|
||||
</li>
|
||||
<li>
|
||||
<p><input disabled="" type="checkbox" checked=""> <strong>F6 — Extended <code>/mesh</code> REPL commands</strong></p>
|
||||
<ul>
|
||||
<li><code>/mesh send <peer_id> <msg></code> — direct P2P message via iroh</li>
|
||||
<li><code>/mesh broadcast <topic> <msg></code> — publish to broadcast channel</li>
|
||||
<li><code>/mesh subscribe <topic></code> — join broadcast channel</li>
|
||||
<li><code>/mesh route</code> — show routing table</li>
|
||||
<li><code>/mesh identity</code> — show mesh identity info</li>
|
||||
<li><code>/mesh store</code> — show store-and-forward statistics</li>
|
||||
</ul>
|
||||
</li>
|
||||
<li>
|
||||
<p><input disabled="" type="checkbox"> <strong>F7 — OpenWrt cross-compilation guide</strong></p>
|
||||
<ul>
|
||||
<li>Musl static builds: <code>x86_64-unknown-linux-musl</code>, <code>armv7-unknown-linux-musleabihf</code>, <code>mips-unknown-linux-musl</code></li>
|
||||
<li>Strip binary: <code>--release</code> + <code>strip</code> → target size < 5 MB for flash storage</li>
|
||||
<li><code>opkg</code> package manifest for OpenWrt feed</li>
|
||||
<li><code>procd</code> init script + <code>uci</code> config file for OpenWrt integration</li>
|
||||
<li>CI job: cross-compile and size-check on every release tag</li>
|
||||
</ul>
|
||||
</li>
|
||||
<li>
|
||||
<p><input disabled="" type="checkbox"> <strong>F8 — Traffic analysis resistance for mesh</strong></p>
|
||||
<ul>
|
||||
<li>Uniform message padding to nearest 256-byte boundary (hides message size)</li>
|
||||
<li>Configurable decoy traffic rate (fake messages to mask send timing)</li>
|
||||
<li>Optional onion routing: 3-hop relay through other mesh nodes (no Tor dependency)</li>
|
||||
<li>Ref: Phase 7.7 for server-side traffic analysis resistance</li>
|
||||
</ul>
|
||||
</li>
|
||||
</ul>
|
||||
<hr>
|
||||
<h2 id="phase-9--developer-experience--community-growth"><a class="header" href="#phase-9--developer-experience--community-growth">Phase 9 — Developer Experience & Community Growth</a></h2>
|
||||
<p>Features designed to attract contributors, create demo/showcase potential,
|
||||
and lower the barrier to entry for non-crypto developers.</p>
|
||||
<ul>
|
||||
<li>
|
||||
<p><input disabled="" type="checkbox"> <strong>9.1 Criterion Benchmark Suite (<code>qpq-bench</code>)</strong></p>
|
||||
<ul>
|
||||
<li>Criterion benchmarks for all crypto primitives: hybrid KEM encap/decap,
|
||||
MLS group-add at 10/100/1000 members, epoch rotation, Noise_XX handshake</li>
|
||||
<li>CI publishes HTML benchmark reports as GitHub Actions artifacts</li>
|
||||
<li>Citable numbers — no other project benchmarks MLS + PQ-KEM in Rust</li>
|
||||
</ul>
|
||||
</li>
|
||||
<li>
|
||||
<p><input disabled="" type="checkbox" checked=""> <strong>9.2 Safety Numbers (key verification)</strong></p>
|
||||
<ul>
|
||||
<li>60-digit numeric code derived from two identity keys (Signal-style)</li>
|
||||
<li><code>/verify <username></code> REPL command for out-of-band verification</li>
|
||||
<li>Available in WASM via <code>compute_safety_number</code> binding</li>
|
||||
</ul>
|
||||
</li>
|
||||
<li>
|
||||
<p><input disabled="" type="checkbox"> <strong>9.3 Full-Screen TUI (Ratatui + Crossterm)</strong></p>
|
||||
<ul>
|
||||
<li><code>qpq tui</code> launches a full-screen terminal UI: message pane, input bar,
|
||||
channel sidebar with unread counts, MLS epoch indicator</li>
|
||||
<li>Feature-gated <code>--features tui</code> to keep ratatui/crossterm out of default builds</li>
|
||||
<li>Existing REPL and CLI subcommands are unaffected</li>
|
||||
</ul>
|
||||
</li>
|
||||
<li>
|
||||
<p><input disabled="" type="checkbox"> <strong>9.4 Delivery Proof Canary Tokens</strong></p>
|
||||
<ul>
|
||||
<li>Server signs <code>Ed25519(SHA-256(message_id || recipient || timestamp))</code> on enqueue</li>
|
||||
<li>Sender stores proof locally — cryptographic evidence the server queued the message</li>
|
||||
<li>Cap’n Proto schema gains optional <code>deliveryProof: Data</code> on enqueue response</li>
|
||||
</ul>
|
||||
</li>
|
||||
<li>
|
||||
<p><input disabled="" type="checkbox"> <strong>9.5 Verifiable Transcript Archive</strong></p>
|
||||
<ul>
|
||||
<li><code>GroupMember::export_transcript(path, password)</code> writes encrypted, tamper-evident
|
||||
message archive (CBOR records, Argon2id + ChaCha20-Poly1305, Merkle chain)</li>
|
||||
<li><code>qpq export verify</code> CLI command independently verifies chain integrity</li>
|
||||
<li>Useful for legal discovery, audit, or personal backup</li>
|
||||
</ul>
|
||||
</li>
|
||||
<li>
|
||||
<p><input disabled="" type="checkbox"> <strong>9.6 Key Transparency (Merkle-Log Identity Binding)</strong></p>
|
||||
<ul>
|
||||
<li>Append-only Merkle log of (username, identity_key) bindings in the AS</li>
|
||||
<li>Clients receive inclusion proofs alongside key fetches</li>
|
||||
<li>Any client can independently audit the full identity history</li>
|
||||
<li>Lightweight subset of RFC 9162 adapted for identity keys</li>
|
||||
</ul>
|
||||
</li>
|
||||
<li>
|
||||
<p><input disabled="" type="checkbox" checked=""> <strong>9.7 Dynamic Server Plugin System</strong></p>
|
||||
<ul>
|
||||
<li>Server loads <code>.so</code>/<code>.dylib</code> plugins at runtime via <code>--plugin-dir</code></li>
|
||||
<li>C-compatible <code>HookVTable</code> via <code>extern "C"</code> — plugins in any language</li>
|
||||
<li>6 hook points: on_message_enqueue, on_batch_enqueue, on_auth, on_channel_created, on_fetch, on_user_registered</li>
|
||||
<li>Example plugins: logging plugin, rate limit plugin (512 KiB payload enforcement)</li>
|
||||
</ul>
|
||||
</li>
|
||||
<li>
|
||||
<p><input disabled="" type="checkbox"> <strong>9.8 PQ Noise Transport Layer</strong></p>
|
||||
<ul>
|
||||
<li>Hybrid <code>Noise_XX + ML-KEM-768</code> handshake for post-quantum transport security</li>
|
||||
<li>Closes the harvest-now-decrypt-later gap on handshake metadata (ADR-006)</li>
|
||||
<li>Feature-gated <code>--features pq-noise</code>; classical Noise_XX default preserved</li>
|
||||
<li>May require extending or forking <code>snow</code> crate’s <code>CryptoResolver</code></li>
|
||||
</ul>
|
||||
</li>
|
||||
</ul>
|
||||
<hr>
|
||||
<h2 id="summary-timeline"><a class="header" href="#summary-timeline">Summary Timeline</a></h2>
|
||||
<div class="table-wrapper">
|
||||
<table>
|
||||
<thead>
|
||||
<tr><th>Phase</th><th>Focus</th><th>Estimated Effort</th></tr>
|
||||
</thead>
|
||||
<tbody>
|
||||
<tr><td><strong>1</strong></td><td>Production Hardening</td><td>1–2 days</td></tr>
|
||||
<tr><td><strong>2</strong></td><td>Test & CI Maturity</td><td>2–3 days</td></tr>
|
||||
<tr><td><strong>3</strong></td><td>Client SDKs (Go, Python, WASM, FFI, WebTransport)</td><td>5–8 days</td></tr>
|
||||
<tr><td><strong>4</strong></td><td>Trust & Security Infrastructure</td><td>2–4 days (excl. audit)</td></tr>
|
||||
<tr><td><strong>5</strong></td><td>Features & UX</td><td>5–7 days</td></tr>
|
||||
<tr><td><strong>6</strong></td><td>Scale & Operations</td><td>3–5 days</td></tr>
|
||||
<tr><td><strong>7</strong></td><td>Platform Expansion & Research</td><td>ongoing</td></tr>
|
||||
<tr><td><strong>8</strong></td><td>Freifunk / Community Mesh</td><td>ongoing</td></tr>
|
||||
<tr><td><strong>9</strong></td><td>Developer Experience & Community Growth</td><td>3–5 days</td></tr>
|
||||
</tbody>
|
||||
</table>
|
||||
</div>
|
||||
<hr>
|
||||
<h2 id="related-documents"><a class="header" href="#related-documents">Related Documents</a></h2>
|
||||
<ul>
|
||||
<li><a href="docs/FUTURE-IMPROVEMENTS.html">Future Improvements</a> — consolidated improvement list</li>
|
||||
<li><a href="docs/PRODUCTION-READINESS-AUDIT.html">Production Readiness Audit</a> — specific blockers</li>
|
||||
<li><a href="docs/SECURITY-AUDIT.html">Security Audit</a> — findings and recommendations</li>
|
||||
<li><a href="docs/src/roadmap/milestones.html">Milestone Tracker</a> — M1–M7 status</li>
|
||||
<li><a href="docs/src/roadmap/authz-plan.html">Auth, Devices, and Tokens</a> — authorization design</li>
|
||||
<li><a href="docs/src/roadmap/dm-channels.html">DM Channel Design</a> — 1:1 channel spec</li>
|
||||
</ul>
|
||||
|
||||
</main>
|
||||
|
||||
<nav class="nav-wrapper" aria-label="Page navigation">
|
||||
<!-- Mobile navigation buttons -->
|
||||
<a rel="prev" href="roadmap/future-research.html" class="mobile-nav-chapters previous" title="Previous chapter" aria-label="Previous chapter" aria-keyshortcuts="Left">
|
||||
<span class=fa-svg><svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 320 512"><!--! Font Awesome Free 6.2.0 by @fontawesome - https://fontawesome.com License - https://fontawesome.com/license/free (Icons: CC BY 4.0, Fonts: SIL OFL 1.1, Code: MIT License) Copyright 2022 Fonticons, Inc. --><path d="M41.4 233.4c-12.5 12.5-12.5 32.8 0 45.3l160 160c12.5 12.5 32.8 12.5 45.3 0s12.5-32.8 0-45.3L109.3 256 246.6 118.6c12.5-12.5 12.5-32.8 0-45.3s-32.8-12.5-45.3 0l-160 160z"/></svg></span>
|
||||
</a>
|
||||
|
||||
<a rel="next prefetch" href="contributing/coding-standards.html" class="mobile-nav-chapters next" title="Next chapter" aria-label="Next chapter" aria-keyshortcuts="Right">
|
||||
<span class=fa-svg><svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 320 512"><!--! Font Awesome Free 6.2.0 by @fontawesome - https://fontawesome.com License - https://fontawesome.com/license/free (Icons: CC BY 4.0, Fonts: SIL OFL 1.1, Code: MIT License) Copyright 2022 Fonticons, Inc. --><path d="M278.6 233.4c12.5 12.5 12.5 32.8 0 45.3l-160 160c-12.5 12.5-32.8 12.5-45.3 0s-12.5-32.8 0-45.3L210.7 256 73.4 118.6c-12.5-12.5-12.5-32.8 0-45.3s32.8-12.5 45.3 0l160 160z"/></svg></span>
|
||||
</a>
|
||||
|
||||
<div style="clear: both"></div>
|
||||
</nav>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<nav class="nav-wide-wrapper" aria-label="Page navigation">
|
||||
<a rel="prev" href="roadmap/future-research.html" class="nav-chapters previous" title="Previous chapter" aria-label="Previous chapter" aria-keyshortcuts="Left">
|
||||
<span class=fa-svg><svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 320 512"><!--! Font Awesome Free 6.2.0 by @fontawesome - https://fontawesome.com License - https://fontawesome.com/license/free (Icons: CC BY 4.0, Fonts: SIL OFL 1.1, Code: MIT License) Copyright 2022 Fonticons, Inc. --><path d="M41.4 233.4c-12.5 12.5-12.5 32.8 0 45.3l160 160c12.5 12.5 32.8 12.5 45.3 0s12.5-32.8 0-45.3L109.3 256 246.6 118.6c12.5-12.5 12.5-32.8 0-45.3s-32.8-12.5-45.3 0l-160 160z"/></svg></span>
|
||||
</a>
|
||||
|
||||
<a rel="next prefetch" href="contributing/coding-standards.html" class="nav-chapters next" title="Next chapter" aria-label="Next chapter" aria-keyshortcuts="Right">
|
||||
<span class=fa-svg><svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 320 512"><!--! Font Awesome Free 6.2.0 by @fontawesome - https://fontawesome.com License - https://fontawesome.com/license/free (Icons: CC BY 4.0, Fonts: SIL OFL 1.1, Code: MIT License) Copyright 2022 Fonticons, Inc. --><path d="M278.6 233.4c12.5 12.5 12.5 32.8 0 45.3l-160 160c-12.5 12.5-32.8 12.5-45.3 0s-12.5-32.8 0-45.3L210.7 256 73.4 118.6c-12.5-12.5-12.5-32.8 0-45.3s32.8-12.5 45.3 0l160 160z"/></svg></span>
|
||||
</a>
|
||||
</nav>
|
||||
|
||||
</div>
|
||||
|
||||
<template id=fa-eye><span class=fa-svg><svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 576 512"><!--! Font Awesome Free 6.2.0 by @fontawesome - https://fontawesome.com License - https://fontawesome.com/license/free (Icons: CC BY 4.0, Fonts: SIL OFL 1.1, Code: MIT License) Copyright 2022 Fonticons, Inc. --><path d="M288 32c-80.8 0-145.5 36.8-192.6 80.6C48.6 156 17.3 208 2.5 243.7c-3.3 7.9-3.3 16.7 0 24.6C17.3 304 48.6 356 95.4 399.4C142.5 443.2 207.2 480 288 480s145.5-36.8 192.6-80.6c46.8-43.5 78.1-95.4 93-131.1c3.3-7.9 3.3-16.7 0-24.6c-14.9-35.7-46.2-87.7-93-131.1C433.5 68.8 368.8 32 288 32zM432 256c0 79.5-64.5 144-144 144s-144-64.5-144-144s64.5-144 144-144s144 64.5 144 144zM288 192c0 35.3-28.7 64-64 64c-11.5 0-22.3-3-31.6-8.4c-.2 2.8-.4 5.5-.4 8.4c0 53 43 96 96 96s96-43 96-96s-43-96-96-96c-2.8 0-5.6 .1-8.4 .4c5.3 9.3 8.4 20.1 8.4 31.6z"/></svg></span></template>
|
||||
<template id=fa-eye-slash><span class=fa-svg><svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 640 512"><!--! Font Awesome Free 6.2.0 by @fontawesome - https://fontawesome.com License - https://fontawesome.com/license/free (Icons: CC BY 4.0, Fonts: SIL OFL 1.1, Code: MIT License) Copyright 2022 Fonticons, Inc. --><path d="M38.8 5.1C28.4-3.1 13.3-1.2 5.1 9.2S-1.2 34.7 9.2 42.9l592 464c10.4 8.2 25.5 6.3 33.7-4.1s6.3-25.5-4.1-33.7L525.6 386.7c39.6-40.6 66.4-86.1 79.9-118.4c3.3-7.9 3.3-16.7 0-24.6c-14.9-35.7-46.2-87.7-93-131.1C465.5 68.8 400.8 32 320 32c-68.2 0-125 26.3-169.3 60.8L38.8 5.1zM223.1 149.5C248.6 126.2 282.7 112 320 112c79.5 0 144 64.5 144 144c0 24.9-6.3 48.3-17.4 68.7L408 294.5c5.2-11.8 8-24.8 8-38.5c0-53-43-96-96-96c-2.8 0-5.6 .1-8.4 .4c5.3 9.3 8.4 20.1 8.4 31.6c0 10.2-2.4 19.8-6.6 28.3l-90.3-70.8zm223.1 298L373 389.9c-16.4 6.5-34.3 10.1-53 10.1c-79.5 0-144-64.5-144-144c0-6.9 .5-13.6 1.4-20.2L83.1 161.5C60.3 191.2 44 220.8 34.5 243.7c-3.3 7.9-3.3 16.7 0 24.6c14.9 35.7 46.2 87.7 93 131.1C174.5 443.2 239.2 480 320 480c47.8 0 89.9-12.9 126.2-32.5z"/></svg></span></template>
|
||||
<template id=fa-copy><span class=fa-svg><svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 512 512"><!--! Font Awesome Free 6.2.0 by @fontawesome - https://fontawesome.com License - https://fontawesome.com/license/free (Icons: CC BY 4.0, Fonts: SIL OFL 1.1, Code: MIT License) Copyright 2022 Fonticons, Inc. --><path d="M502.6 70.63l-61.25-61.25C435.4 3.371 427.2 0 418.7 0H255.1c-35.35 0-64 28.66-64 64l.0195 256C192 355.4 220.7 384 256 384h192c35.2 0 64-28.8 64-64V93.25C512 84.77 508.6 76.63 502.6 70.63zM464 320c0 8.836-7.164 16-16 16H255.1c-8.838 0-16-7.164-16-16L239.1 64.13c0-8.836 7.164-16 16-16h128L384 96c0 17.67 14.33 32 32 32h47.1V320zM272 448c0 8.836-7.164 16-16 16H63.1c-8.838 0-16-7.164-16-16L47.98 192.1c0-8.836 7.164-16 16-16H160V128H63.99c-35.35 0-64 28.65-64 64l.0098 256C.002 483.3 28.66 512 64 512h192c35.2 0 64-28.8 64-64v-32h-47.1L272 448z"/></svg></span></template>
|
||||
<template id=fa-play><span class=fa-svg><svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 384 512"><!--! Font Awesome Free 6.2.0 by @fontawesome - https://fontawesome.com License - https://fontawesome.com/license/free (Icons: CC BY 4.0, Fonts: SIL OFL 1.1, Code: MIT License) Copyright 2022 Fonticons, Inc. --><path d="M73 39c-14.8-9.1-33.4-9.4-48.5-.9S0 62.6 0 80V432c0 17.4 9.4 33.4 24.5 41.9s33.7 8.1 48.5-.9L361 297c14.3-8.7 23-24.2 23-41s-8.7-32.2-23-41L73 39z"/></svg></span></template>
|
||||
<template id=fa-clock-rotate-left><span class=fa-svg><svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 512 512"><!--! Font Awesome Free 6.2.0 by @fontawesome - https://fontawesome.com License - https://fontawesome.com/license/free (Icons: CC BY 4.0, Fonts: SIL OFL 1.1, Code: MIT License) Copyright 2022 Fonticons, Inc. --><path d="M75 75L41 41C25.9 25.9 0 36.6 0 57.9V168c0 13.3 10.7 24 24 24H134.1c21.4 0 32.1-25.9 17-41l-30.8-30.8C155 85.5 203 64 256 64c106 0 192 86 192 192s-86 192-192 192c-40.8 0-78.6-12.7-109.7-34.4c-14.5-10.1-34.4-6.6-44.6 7.9s-6.6 34.4 7.9 44.6C151.2 495 201.7 512 256 512c141.4 0 256-114.6 256-256S397.4 0 256 0C185.3 0 121.3 28.7 75 75zm181 53c-13.3 0-24 10.7-24 24V256c0 6.4 2.5 12.5 7 17l72 72c9.4 9.4 24.6 9.4 33.9 0s9.4-24.6 0-33.9l-65-65V152c0-13.3-10.7-24-24-24z"/></svg></span></template>
|
||||
|
||||
|
||||
|
||||
<script>
|
||||
window.playground_copyable = true;
|
||||
</script>
|
||||
|
||||
|
||||
<script src="elasticlunr-ef4e11c1.min.js"></script>
|
||||
<script src="mark-09e88c2c.min.js"></script>
|
||||
<script src="searcher-c2a407aa.js"></script>
|
||||
|
||||
<script src="clipboard-1626706a.min.js"></script>
|
||||
<script src="highlight-abc7f01d.js"></script>
|
||||
<script src="book-a0b12cfe.js"></script>
|
||||
|
||||
<!-- Custom JS scripts -->
|
||||
|
||||
|
||||
|
||||
</div>
|
||||
</body>
|
||||
</html>
|
||||
493
ROADMAP.md
Normal file
493
ROADMAP.md
Normal file
@@ -0,0 +1,493 @@
|
||||
# Roadmap — quicproquo
|
||||
|
||||
> From proof-of-concept to production-grade E2E encrypted messaging.
|
||||
>
|
||||
> Each phase is designed to be tackled sequentially. Items within a phase
|
||||
> can be parallelised. Check the box when done.
|
||||
|
||||
---
|
||||
|
||||
## Phase 1 — Production Hardening (Critical)
|
||||
|
||||
Eliminate all crash paths, enforce secure defaults, fix deployment blockers.
|
||||
|
||||
- [x] **1.1 Remove `.unwrap()` / `.expect()` from production paths**
|
||||
- Replace `AUTH_CONTEXT.read().expect()` in client RPC with proper `Result`
|
||||
- Replace `"0.0.0.0:0".parse().unwrap()` in client with fallible parse
|
||||
- Replace `Mutex::lock().unwrap()` in server storage with `.map_err()`
|
||||
- Audit: `grep -rn 'unwrap()\|expect(' crates/` outside `#[cfg(test)]`
|
||||
|
||||
- [x] **1.2 Enforce secure defaults in production mode**
|
||||
- Reject startup if `QPQ_PRODUCTION=true` and `auth_token` is empty or `"devtoken"`
|
||||
- Require non-empty `db_key` when using SQL backend in production
|
||||
- Refuse to auto-generate TLS certs in production mode (require existing cert+key)
|
||||
- Already partially implemented — verify and harden the validation in `config.rs`
|
||||
|
||||
- [x] **1.3 Fix `.gitignore`**
|
||||
- Add `data/`, `*.der`, `*.pem`, `*.db`, `*.bin` (state files), `*.ks` (keystores)
|
||||
- Verify no secrets are already tracked: `git ls-files data/ *.der *.db`
|
||||
|
||||
- [x] **1.4 Fix Dockerfile**
|
||||
- Sync workspace members (handle excluded `p2p` crate)
|
||||
- Create dedicated user/group instead of `nobody`
|
||||
- Set writable `QPQ_DATA_DIR` with correct permissions
|
||||
- Test: `docker build . && docker run --rm -it qpq-server --help`
|
||||
|
||||
- [x] **1.5 TLS certificate lifecycle**
|
||||
- Document CA-signed cert setup (Let's Encrypt / custom CA)
|
||||
- Add `--tls-required` flag that refuses to start without valid cert
|
||||
- Log clear warning when using self-signed certs
|
||||
- Document certificate rotation procedure
|
||||
|
||||
---
|
||||
|
||||
## Phase 2 — Test & CI Maturity
|
||||
|
||||
Build confidence before adding features.
|
||||
|
||||
- [x] **2.1 Expand E2E test coverage**
|
||||
- Auth failure scenarios (wrong password, expired token, invalid token)
|
||||
- Message ordering verification (send N messages, verify seq numbers)
|
||||
- Concurrent clients (3+ members in group, simultaneous send/recv)
|
||||
- OPAQUE registration + login full flow
|
||||
- Queue full behavior (>1000 messages)
|
||||
- Rate limiting behavior (>100 enqueues/minute)
|
||||
- Reconnection after server restart
|
||||
- KeyPackage exhaustion (fetch when none available)
|
||||
|
||||
- [x] **2.2 Add unit tests for untested paths**
|
||||
- Client retry logic (exponential backoff, jitter, retriable classification)
|
||||
- REPL input parsing edge cases (empty input, special characters, `/` commands)
|
||||
- State file encryption/decryption round-trip with bad password
|
||||
- Token cache expiry
|
||||
- Conversation store migrations
|
||||
|
||||
- [x] **2.3 CI hardening**
|
||||
- Add `.github/CODEOWNERS` (crypto, auth, wire-format require 2 reviewers)
|
||||
- Ensure `cargo deny check` runs on every PR (already in CI — verify)
|
||||
- Add `cargo audit` as blocking check (already in CI — verify)
|
||||
- Add coverage reporting (tarpaulin or llvm-cov)
|
||||
- Add CI job for Docker build validation
|
||||
|
||||
- [x] **2.4 Clean up build warnings**
|
||||
- Fix Cap'n Proto generated `unused_parens` warnings
|
||||
- Remove dead code / unused imports
|
||||
- Address `openmls` future-incompat warnings
|
||||
- Target: `cargo clippy --workspace -- -D warnings` passes clean
|
||||
|
||||
---
|
||||
|
||||
## Phase 3 — Client SDKs: Native QUIC + Cap'n Proto Everywhere
|
||||
|
||||
**No REST gateway. No protocol dilution.** The `.capnp` schemas are the
|
||||
interface definition. Every SDK speaks native QUIC + Cap'n Proto. The
|
||||
project name stays honest.
|
||||
|
||||
### Why this matters
|
||||
|
||||
The name is **quic**n**proto**chat — the protocol IS the product. Instead
|
||||
of adding an HTTP translation layer that loses zero-copy performance and
|
||||
adds base64 overhead, we invest in making the native protocol accessible
|
||||
from every language that has QUIC + Cap'n Proto support, and provide
|
||||
WASM/FFI for the crypto layer.
|
||||
|
||||
### Architecture
|
||||
|
||||
```
|
||||
Server: QUIC + Cap'n Proto (single protocol, no gateway)
|
||||
|
||||
Client SDKs:
|
||||
┌─── Rust quinn + capnp-rpc (existing, reference impl)
|
||||
├─── Go quic-go + go-capnp (native, high confidence)
|
||||
├─── Python aioquic + pycapnp (native QUIC, manual framing)
|
||||
├─── C/C++ msquic/ngtcp2 + capnproto (reference impl, full RPC)
|
||||
└─── Browser WebTransport + capnp (WASM) (QUIC transport, no HTTP needed)
|
||||
|
||||
Crypto layer (client-side MLS, shared across all SDKs):
|
||||
┌─── Rust crate (native, existing)
|
||||
├─── WASM module (browsers, Node.js, Deno)
|
||||
└─── C FFI (Swift, Kotlin, Python, Go via cgo)
|
||||
```
|
||||
|
||||
### Language support reality check
|
||||
|
||||
| Language | QUIC | Cap'n Proto | RPC | Confidence |
|
||||
|----------|------|-------------|-----|------------|
|
||||
| **Rust** | quinn ✅ | capnp-rpc ✅ | Full ✅ | Existing |
|
||||
| **Go** | quic-go ✅ | go-capnp ✅ | Level 1 ✅ | High |
|
||||
| **Python** | aioquic ✅ | pycapnp ⚠️ | Manual framing | Medium |
|
||||
| **C/C++** | msquic/ngtcp2 ✅ | capnproto ✅ | Full ✅ | High |
|
||||
| **Browser** | WebTransport ✅ | WASM ✅ | Via WASM bridge | Medium |
|
||||
|
||||
### Implementation
|
||||
|
||||
- [x] **3.1 Go SDK (`quicproquo-go`)**
|
||||
- Generated Go types from `node.capnp` (6487-line codegen, all 24 RPC methods)
|
||||
- QUIC transport via `quic-go` with TLS 1.3 + ALPN `"capnp"`
|
||||
- High-level `qpq` package: Connect, Health, ResolveUser, CreateChannel, Send/SendWithTTL, Receive/ReceiveWait, DeleteAccount, OPAQUE auth
|
||||
- Example CLI in `sdks/go/cmd/example/`
|
||||
|
||||
- [x] **3.2 Python SDK (`quicproquo-py`)**
|
||||
- QUIC transport: `aioquic` with custom Cap'n Proto stream handler
|
||||
- Cap'n Proto serialization: `pycapnp` for message types
|
||||
- Manual RPC framing: length-prefixed request/response over QUIC stream
|
||||
- Async/await API matching the Rust client patterns
|
||||
- Crypto: PyO3 bindings to `quicproquo-core` for MLS operations
|
||||
- Publish: PyPI `quicproquo`
|
||||
- Example: async bot client
|
||||
|
||||
- [x] **3.3 C FFI layer (`quicproquo-ffi`)**
|
||||
- `crates/quicproquo-ffi` with 7 extern "C" functions: connect, login, send, receive, disconnect, last_error, free_string
|
||||
- Builds as `libquicproquo_ffi.so` / `.dylib` / `.dll`
|
||||
- Python ctypes wrapper in `examples/python/qpq_client.py`
|
||||
|
||||
- [x] **3.4 WASM compilation of `quicproquo-core`**
|
||||
- `wasm-pack build` target producing 175 KB WASM bundle (LTO + opt-level=s)
|
||||
- 13 `wasm_bindgen` functions: Ed25519 identity, hybrid KEM, safety numbers, sealed sender, padding
|
||||
- Browser-ready with `crypto.getRandomValues()` RNG
|
||||
- Published as `sdks/typescript/wasm-crypto/`
|
||||
|
||||
- [x] **3.5 WebTransport server endpoint**
|
||||
- Add HTTP/3 + WebTransport listener to server (same QUIC stack via quinn)
|
||||
- Cap'n Proto RPC framed over WebTransport bidirectional streams
|
||||
- Same auth, same storage, same RPC handlers — just a different stream source
|
||||
- Browsers connect via `new WebTransport("https://server:7443")`
|
||||
- ALPN negotiation: `"h3"` for WebTransport, `"capnp"` for native QUIC
|
||||
- Configurable port: `--webtransport-listen 0.0.0.0:7443`
|
||||
- Feature-flagged: `--features webtransport`
|
||||
|
||||
- [x] **3.6 TypeScript/JavaScript SDK (`@quicproquo/client`)**
|
||||
- `QpqClient` class: connect, offline, health, resolveUser, createChannel, send/sendWithTTL, receive, deleteAccount
|
||||
- WASM crypto wrapper: generateIdentity, sign/verify, hybridEncrypt/Decrypt, computeSafetyNumber, sealedSend, pad
|
||||
- WebSocket transport with request/response correlation and reconnection
|
||||
- Browser demo: interactive crypto playground + chat UI (`sdks/typescript/demo/index.html`)
|
||||
|
||||
- [x] **3.7 SDK documentation and schema publishing**
|
||||
- Publish `.capnp` schemas as the canonical API contract
|
||||
- Document the QUIC + Cap'n Proto connection pattern for each language
|
||||
- Provide a "build your own SDK" guide (QUIC stream → Cap'n Proto RPC bootstrap)
|
||||
- Reference implementation checklist: connect, auth, upload key, enqueue, fetch
|
||||
|
||||
---
|
||||
|
||||
## Phase 4 — Trust & Security Infrastructure
|
||||
|
||||
Address the security gaps required for real-world deployment.
|
||||
|
||||
- [ ] **4.1 Third-party cryptographic audit**
|
||||
- Scope: MLS integration, OPAQUE flow, hybrid KEM, key lifecycle, zeroization
|
||||
- Firms: NCC Group, Trail of Bits, Cure53
|
||||
- Budget and timeline: typically 4-6 weeks, $50K–$150K
|
||||
- Publish report publicly (builds trust)
|
||||
|
||||
- [x] **4.2 Key Transparency / revocation**
|
||||
- Replace `BasicCredential` with X.509-based MLS credentials
|
||||
- Or: verifiable key directory (Merkle tree, auditable log)
|
||||
- Users can verify peer keys haven't been substituted (MITM detection)
|
||||
- Revocation mechanism for compromised keys
|
||||
|
||||
- [x] **4.3 Client authentication on Delivery Service**
|
||||
- DS sender identity binding with explicit audit logging
|
||||
- `sender_prefix` tracking in enqueue/batch_enqueue RPCs
|
||||
- Sender identity derived from authenticated session
|
||||
|
||||
- [x] **4.4 M7 — Post-quantum MLS integration**
|
||||
- Integrate hybrid KEM (X25519 + ML-KEM-768) into the OpenMLS crypto provider
|
||||
- Group key material gets post-quantum confidentiality
|
||||
- Full test suite with PQ ciphersuite
|
||||
- Ref: existing `hybrid_kem.rs` and `hybrid_crypto.rs`
|
||||
|
||||
- [x] **4.5 Username enumeration mitigation**
|
||||
- 5 ms timing floor on `resolveUser` responses
|
||||
- Rate limiting to prevent bulk enumeration attacks
|
||||
|
||||
---
|
||||
|
||||
## Phase 5 — Features & UX
|
||||
|
||||
Make it a product people want to use.
|
||||
|
||||
- [x] **5.1 Multi-device support**
|
||||
- Account → multiple devices, each with own Ed25519 key + MLS KeyPackages
|
||||
- Device graph management (add device, remove device, list devices)
|
||||
- Messages delivered to all devices of a user
|
||||
- `device_id` field already in Auth struct — wire it through
|
||||
|
||||
- [x] **5.2 Account recovery**
|
||||
- Recovery codes or backup key (encrypted, stored by user)
|
||||
- Option: server-assisted recovery with security questions (lower security)
|
||||
- MLS state re-establishment after device loss
|
||||
|
||||
- [x] **5.3 Full MLS lifecycle**
|
||||
- Member removal (Remove proposal → Commit → fan-out)
|
||||
- Credential update (Update proposal for key rotation)
|
||||
- Explicit proposal handling (queue proposals, batch commit)
|
||||
- Group metadata (name, description, avatar hash)
|
||||
|
||||
- [x] **5.4 Message editing and deletion**
|
||||
- `Edit` (0x06) and `Delete` (0x07) message types in `AppMessage`
|
||||
- `/edit <index> <text>` and `/delete <index>` REPL commands (own messages only)
|
||||
- Database update/removal on incoming edit/delete
|
||||
|
||||
- [x] **5.5 File and media transfer**
|
||||
- `uploadBlob` / `downloadBlob` RPCs with 256 KB chunked streaming
|
||||
- SHA-256 content-addressable storage with hash verification
|
||||
- `FileRef` (0x08) message type with blob_id, filename, file_size, mime_type
|
||||
- `/send-file <path>` and `/download <index>` REPL commands with progress bars
|
||||
- 50 MB max file size, automatic MIME detection via `mime_guess`
|
||||
|
||||
- [x] **5.6 Abuse prevention and moderation**
|
||||
- Block user (client-side, suppress display)
|
||||
- Report message (encrypted report to admin key)
|
||||
- Admin tools: ban user, delete account, audit log
|
||||
|
||||
- [x] **5.7 Offline message queue (client-side)**
|
||||
- Queue messages when disconnected, send on reconnect
|
||||
- Idempotent message IDs to prevent duplicates
|
||||
- Gap detection: compare local seq with server seq
|
||||
|
||||
---
|
||||
|
||||
## Phase 6 — Scale & Operations
|
||||
|
||||
Prepare for real traffic.
|
||||
|
||||
- [x] **6.1 Distributed rate limiting**
|
||||
- Current: in-memory per-process, lost on restart
|
||||
- Move to Redis or shared state for multi-node deployments
|
||||
- Sliding window with configurable thresholds
|
||||
|
||||
- [x] **6.2 Multi-node / horizontal scaling**
|
||||
- Stateless server design (already mostly there — state is in storage backend)
|
||||
- Shared PostgreSQL or CockroachDB backend (replace SQLite)
|
||||
- Message queue fan-out (Redis pub/sub or NATS for cross-node notification)
|
||||
- Load balancer health check via QUIC RPC `health()` or Prometheus `/metrics`
|
||||
|
||||
- [x] **6.3 Operational runbook**
|
||||
- Backup / restore procedures (SQLCipher, file backend)
|
||||
- Key rotation (auth token, TLS cert, DB encryption key)
|
||||
- Incident response playbook
|
||||
- Scaling guide (when to add nodes, resource sizing)
|
||||
- Monitoring dashboard templates (Grafana + Prometheus)
|
||||
|
||||
- [x] **6.4 Connection draining and graceful shutdown**
|
||||
- Stop accepting new connections on SIGTERM
|
||||
- Wait for in-flight RPCs (configurable timeout, default 30s)
|
||||
- Drain WebTransport sessions with close frame
|
||||
- Document expected behavior for load balancers (health → unhealthy first)
|
||||
|
||||
- [x] **6.5 Request-level timeouts**
|
||||
- Per-RPC timeout (prevent slow clients from holding resources)
|
||||
- Database query timeout
|
||||
- Overall request deadline propagation
|
||||
|
||||
- [x] **6.6 Observability enhancements**
|
||||
- Request correlation IDs (trace across RPC → storage)
|
||||
- Storage operation latency metrics
|
||||
- Per-endpoint latency histograms
|
||||
- Structured audit log to persistent storage (not just stdout)
|
||||
- OpenTelemetry integration
|
||||
|
||||
---
|
||||
|
||||
## Phase 7 — Platform Expansion & Research
|
||||
|
||||
Long-term vision for wide adoption.
|
||||
|
||||
- [x] **7.1 Mobile clients (iOS + Android)**
|
||||
- Use C FFI (Phase 3.3) for crypto + transport (single library)
|
||||
- Push notifications via APNs / FCM (server sends notification on enqueue)
|
||||
- Background QUIC connection for message polling
|
||||
- Biometric auth for local key storage (Keychain / Android Keystore)
|
||||
|
||||
- [x] **7.2 Web client (browser)**
|
||||
- Use WASM (Phase 3.4) for crypto
|
||||
- Use WebTransport (Phase 3.5) for native QUIC transport
|
||||
- Cap'n Proto via WASM bridge (Phase 3.6)
|
||||
- IndexedDB for local state persistence
|
||||
- Service Worker for background notifications
|
||||
- Progressive Web App (PWA) support
|
||||
|
||||
- [x] **7.3 Federation**
|
||||
- Server-to-server protocol via Cap'n Proto RPC over QUIC (see `federation.capnp`)
|
||||
- `relayEnqueue`, `proxyFetchKeyPackage`, `federationHealth` methods
|
||||
- Identity resolution across federated servers
|
||||
- MLS group spanning multiple servers
|
||||
- Trust model for federated deployments
|
||||
|
||||
- [x] **7.4 Sealed Sender**
|
||||
- Sender identity inside MLS ciphertext only (server can't see who sent)
|
||||
- `sealed_sender` module in quicproquo-core with seal/unseal API
|
||||
- WASM-accessible via `wasm_bindgen` for browser use
|
||||
|
||||
- [x] **7.5 Additional language SDKs**
|
||||
- Java/Kotlin: JNI bindings to C FFI (Phase 3.3) + native QUIC (netty-quic)
|
||||
- Swift: Swift wrapper over C FFI + Network.framework QUIC
|
||||
- Ruby: FFI bindings via `quicproquo-ffi`
|
||||
- Evaluate demand-driven — only build SDKs people request
|
||||
|
||||
- [x] **7.6 P2P / NAT traversal**
|
||||
- Direct peer-to-peer via iroh (foundation exists in `quicproquo-p2p`)
|
||||
- Server as fallback relay only
|
||||
- Reduces latency and single-point-of-failure
|
||||
- Ref: `FUTURE-IMPROVEMENTS.md § 6.1`
|
||||
|
||||
- [x] **7.7 Traffic analysis resistance**
|
||||
- Padding messages to uniform size
|
||||
- Decoy traffic to mask timing patterns
|
||||
- Optional Tor/I2P routing for IP privacy
|
||||
- Ref: `FUTURE-IMPROVEMENTS.md § 5.4, 6.3`
|
||||
|
||||
---
|
||||
|
||||
## Phase 8 — Freifunk / Community Mesh Networking
|
||||
|
||||
Make qpq a first-class citizen on decentralised, community-operated wireless
|
||||
networks (Freifunk, BATMAN-adv/Babel routing, OpenWrt). Multiple qpq nodes form
|
||||
a federated mesh; clients auto-discover nearby nodes via mDNS; the network
|
||||
functions without any central infrastructure or internet uplink.
|
||||
|
||||
### Architecture
|
||||
|
||||
```
|
||||
Client A ─── mDNS discovery ──► nearby qpq node (LAN / mesh)
|
||||
│
|
||||
Cap'n Proto federation
|
||||
│
|
||||
remote qpq node (across mesh)
|
||||
```
|
||||
|
||||
- [x] **F0 — Re-include `quicproquo-p2p` in workspace; fix ALPN strings**
|
||||
- Moved `crates/quicproquo-p2p` from `exclude` back into `[workspace] members`
|
||||
- Fixed ALPN `b"quicnprotochat/p2p/1"` → `b"quicproquo/p2p/1"` (breaking wire change)
|
||||
- Fixed federation ALPN `b"qnpc-fed"` → `b"quicproquo/federation/1"`
|
||||
- Feature-gated behind `--features mesh` on client (keeps iroh out of default builds)
|
||||
|
||||
- [x] **F1 — Federation routing in message delivery**
|
||||
- `handle_enqueue` and `handle_batch_enqueue` call `federation::routing::resolve_destination()`
|
||||
- Recipients with a remote home server are relayed via `FederationClient::relay_enqueue()`
|
||||
- mTLS mutual authentication between nodes (both present client certs, validated against shared CA)
|
||||
- Config: `QPQ_FEDERATION_LISTEN`, `QPQ_LOCAL_DOMAIN`, `QPQ_FEDERATION_CERT/KEY/CA`
|
||||
|
||||
- [x] **F2 — mDNS local peer discovery**
|
||||
- Server announces `_quicproquo._udp.local.` on startup via `mdns-sd`
|
||||
- Client: `MeshDiscovery::start()` browses for nearby nodes (feature-gated)
|
||||
- REPL commands: `/mesh peers` (scan + list), `/mesh server <host:port>` (note address)
|
||||
- Nodes announce: `ver=1`, `server=<host:port>`, `domain=<local_domain>` TXT records
|
||||
|
||||
- [x] **F3 — Self-sovereign mesh identity**
|
||||
- Ed25519 keypair-based identity independent of AS registration
|
||||
- JSON-persisted seed + known peers directory
|
||||
- Sign/verify operations for mesh authenticity (`crates/quicproquo-p2p/src/identity.rs`)
|
||||
|
||||
- [x] **F4 — Store-and-forward with TTL**
|
||||
- `MeshEnvelope` with TTL-based expiry, hop_count tracking, max_hops routing limit
|
||||
- SHA-256 deduplication ID prevents relay loops
|
||||
- Ed25519 signature verification on envelopes
|
||||
- `MeshStore` in-memory queue with per-recipient capacity limits and TTL-based GC
|
||||
|
||||
- [x] **F5 — Lightweight broadcast channels**
|
||||
- Symmetric ChaCha20-Poly1305 encrypted channels (no MLS overhead)
|
||||
- Topic-based pub/sub via `BroadcastChannel` and `BroadcastManager`
|
||||
- Subscribe/unsubscribe, create, publish API on `P2pNode`
|
||||
|
||||
- [x] **F6 — Extended `/mesh` REPL commands**
|
||||
- `/mesh send <peer_id> <msg>` — direct P2P message via iroh
|
||||
- `/mesh broadcast <topic> <msg>` — publish to broadcast channel
|
||||
- `/mesh subscribe <topic>` — join broadcast channel
|
||||
- `/mesh route` — show routing table
|
||||
- `/mesh identity` — show mesh identity info
|
||||
- `/mesh store` — show store-and-forward statistics
|
||||
|
||||
- [x] **F7 — OpenWrt cross-compilation guide**
|
||||
- Musl static builds: `x86_64-unknown-linux-musl`, `armv7-unknown-linux-musleabihf`, `mips-unknown-linux-musl`
|
||||
- Strip binary: `--release` + `strip` → target size < 5 MB for flash storage
|
||||
- `opkg` package manifest for OpenWrt feed
|
||||
- `procd` init script + `uci` config file for OpenWrt integration
|
||||
- CI job: cross-compile and size-check on every release tag
|
||||
|
||||
- [x] **F8 — Traffic analysis resistance for mesh**
|
||||
- Uniform message padding to nearest 256-byte boundary (hides message size)
|
||||
- Configurable decoy traffic rate (fake messages to mask send timing)
|
||||
- Optional onion routing: 3-hop relay through other mesh nodes (no Tor dependency)
|
||||
- Ref: Phase 7.7 for server-side traffic analysis resistance
|
||||
|
||||
---
|
||||
|
||||
## Phase 9 — Developer Experience & Community Growth
|
||||
|
||||
Features designed to attract contributors, create demo/showcase potential,
|
||||
and lower the barrier to entry for non-crypto developers.
|
||||
|
||||
- [x] **9.1 Criterion Benchmark Suite (`qpq-bench`)**
|
||||
- Criterion benchmarks for all crypto primitives: hybrid KEM encap/decap,
|
||||
MLS group-add at 10/100/1000 members, epoch rotation, Noise_XX handshake
|
||||
- CI publishes HTML benchmark reports as GitHub Actions artifacts
|
||||
- Citable numbers — no other project benchmarks MLS + PQ-KEM in Rust
|
||||
|
||||
- [x] **9.2 Safety Numbers (key verification)**
|
||||
- 60-digit numeric code derived from two identity keys (Signal-style)
|
||||
- `/verify <username>` REPL command for out-of-band verification
|
||||
- Available in WASM via `compute_safety_number` binding
|
||||
|
||||
- [x] **9.3 Full-Screen TUI (Ratatui + Crossterm)**
|
||||
- `qpq tui` launches a full-screen terminal UI: message pane, input bar,
|
||||
channel sidebar with unread counts, MLS epoch indicator
|
||||
- Feature-gated `--features tui` to keep ratatui/crossterm out of default builds
|
||||
- Existing REPL and CLI subcommands are unaffected
|
||||
|
||||
- [x] **9.4 Delivery Proof Canary Tokens**
|
||||
- Server signs `Ed25519(SHA-256(message_id || recipient || timestamp))` on enqueue
|
||||
- Sender stores proof locally — cryptographic evidence the server queued the message
|
||||
- Cap'n Proto schema gains optional `deliveryProof: Data` on enqueue response
|
||||
|
||||
- [x] **9.5 Verifiable Transcript Archive**
|
||||
- `GroupMember::export_transcript(path, password)` writes encrypted, tamper-evident
|
||||
message archive (CBOR records, Argon2id + ChaCha20-Poly1305, Merkle chain)
|
||||
- `qpq export verify` CLI command independently verifies chain integrity
|
||||
- Useful for legal discovery, audit, or personal backup
|
||||
|
||||
- [x] **9.6 Key Transparency (Merkle-Log Identity Binding)**
|
||||
- Append-only Merkle log of (username, identity_key) bindings in the AS
|
||||
- Clients receive inclusion proofs alongside key fetches
|
||||
- Any client can independently audit the full identity history
|
||||
- Lightweight subset of RFC 9162 adapted for identity keys
|
||||
|
||||
- [x] **9.7 Dynamic Server Plugin System**
|
||||
- Server loads `.so`/`.dylib` plugins at runtime via `--plugin-dir`
|
||||
- C-compatible `HookVTable` via `extern "C"` — plugins in any language
|
||||
- 6 hook points: on_message_enqueue, on_batch_enqueue, on_auth, on_channel_created, on_fetch, on_user_registered
|
||||
- Example plugins: logging plugin, rate limit plugin (512 KiB payload enforcement)
|
||||
|
||||
- [x] **9.8 PQ Noise Transport Layer**
|
||||
- Hybrid `Noise_XX + ML-KEM-768` handshake for post-quantum transport security
|
||||
- Closes the harvest-now-decrypt-later gap on handshake metadata (ADR-006)
|
||||
- Feature-gated `--features pq-noise`; classical Noise_XX default preserved
|
||||
- May require extending or forking `snow` crate's `CryptoResolver`
|
||||
|
||||
---
|
||||
|
||||
## Summary Timeline
|
||||
|
||||
| Phase | Focus | Estimated Effort |
|
||||
|-------|-------|-----------------|
|
||||
| **1** | Production Hardening | 1–2 days |
|
||||
| **2** | Test & CI Maturity | 2–3 days |
|
||||
| **3** | Client SDKs (Go, Python, WASM, FFI, WebTransport) | 5–8 days |
|
||||
| **4** | Trust & Security Infrastructure | 2–4 days (excl. audit) |
|
||||
| **5** | Features & UX | 5–7 days |
|
||||
| **6** | Scale & Operations | 3–5 days |
|
||||
| **7** | Platform Expansion & Research | ongoing |
|
||||
| **8** | Freifunk / Community Mesh | ongoing |
|
||||
| **9** | Developer Experience & Community Growth | 3–5 days |
|
||||
|
||||
---
|
||||
|
||||
## Related Documents
|
||||
|
||||
- [Future Improvements](docs/FUTURE-IMPROVEMENTS.md) — consolidated improvement list
|
||||
- [Production Readiness Audit](docs/PRODUCTION-READINESS-AUDIT.md) — specific blockers
|
||||
- [Security Audit](docs/SECURITY-AUDIT.md) — findings and recommendations
|
||||
- [Milestone Tracker](docs/src/roadmap/milestones.md) — M1–M7 status
|
||||
- [Auth, Devices, and Tokens](docs/src/roadmap/authz-plan.md) — authorization design
|
||||
- [DM Channel Design](docs/src/roadmap/dm-channels.md) — 1:1 channel spec
|
||||
26
assets/left.ansi
Normal file
26
assets/left.ansi
Normal file
@@ -0,0 +1,26 @@
|
||||
registering 'alice'...
|
||||
user 'alice' registered
|
||||
logging in as 'alice'...
|
||||
logged in, session cached
|
||||
[2midentity: c1e1f6df17eeb6..2816[0m
|
||||
KeyPackage uploaded
|
||||
hybrid key uploaded
|
||||
type /help for commands, Ctrl+D to exit
|
||||
|
||||
[2m[[0m[1mno conversation[0m[2m][0m > /dm bob
|
||||
resolving bob...
|
||||
creating channel...
|
||||
fetching peer's key package...
|
||||
DM with @bob created. Start typing!
|
||||
[2m[[0m[1m@bob[0m[2m][0m > [32mHey Bob, testing our E2E encrypted channel![0m
|
||||
[36m[1m[bob][0m Works great -- the server never sees plaintext?
|
||||
[2m[[0m[1m@bob[0m[2m][0m > [32mRight. MLS forward secrecy + post-quantum KEM.[0m
|
||||
[36m[1m[bob][0m Impressive. How do I verify your identity?
|
||||
[2m[[0m[1m@bob[0m[2m][0m > [32mRun /verify alice -- compare the safety number out-of-band.[0m
|
||||
[2m[[0m[1m@bob[0m[2m][0m > /group-info
|
||||
[2m Conversation: @bob[0m
|
||||
[2m Type: DM[0m
|
||||
[2m Members: 2[0m
|
||||
[2m alice (you), bob[0m
|
||||
[2m MLS epoch: 3[0m
|
||||
[2m[[0m[1m@bob[0m[2m][0m >
|
||||
BIN
assets/logo.png
Normal file
BIN
assets/logo.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 2.2 MiB |
24
assets/right.ansi
Normal file
24
assets/right.ansi
Normal file
@@ -0,0 +1,24 @@
|
||||
registering 'bob'...
|
||||
user 'bob' registered
|
||||
logging in as 'bob'...
|
||||
logged in, session cached
|
||||
[2midentity: a8c2f19f1b0806..c73f[0m
|
||||
KeyPackage uploaded
|
||||
hybrid key uploaded
|
||||
type /help for commands, Ctrl+D to exit
|
||||
|
||||
[36m[1m[system][0m new conversation: @alice
|
||||
[2m[[0m[1m@alice[0m[2m][0m > [36m[1m[alice][0m Hey Bob, testing our E2E encrypted channel!
|
||||
[2m[[0m[1m@alice[0m[2m][0m > [32mWorks great -- the server never sees plaintext?[0m
|
||||
[36m[1m[alice][0m Right. MLS forward secrecy + post-quantum KEM.
|
||||
[2m[[0m[1m@alice[0m[2m][0m > [32mImpressive. How do I verify your identity?[0m
|
||||
[36m[1m[alice][0m Run /verify alice -- compare the safety number out-of-band.
|
||||
[2m[[0m[1m@alice[0m[2m][0m > /verify alice
|
||||
[2m Safety number for @alice:[0m
|
||||
[2m 096482 731945 208376[0m
|
||||
[2m 571039 284617 950283[0m
|
||||
[2m[[0m[1m@alice[0m[2m][0m > /whoami
|
||||
[2m identity: a8c2f19f1b0806..c73f[0m
|
||||
[2m hybrid key: yes[0m
|
||||
[2m conversations: 1[0m
|
||||
[2m[[0m[1m@alice[0m[2m][0m >
|
||||
BIN
assets/screenshot.png
Normal file
BIN
assets/screenshot.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 67 KiB |
59
assets/screenshot.txt
Normal file
59
assets/screenshot.txt
Normal file
@@ -0,0 +1,59 @@
|
||||
=== Alice (left) ===
|
||||
❯ ./target/debug/qpq repl --username alice --password de
|
||||
opass1 --server 127.0.0.1:17123 --ca-cert /tmp/tmp.adbXG
|
||||
OrPY/server-cert.der --state /tmp/tmp.adbXGLOrPY/alice.b
|
||||
n
|
||||
registering 'alice'...
|
||||
user 'alice' registered
|
||||
logging in as 'alice'...
|
||||
logged in, session cached
|
||||
identity: c1e1f6df17eeb6f539d7fbea94129fa32fc02ca40e5c
|
||||
7a7c95cfc94161d5f628
|
||||
KeyPackage uploaded
|
||||
hybrid key uploaded
|
||||
type /help for commands, Ctrl+D to exit
|
||||
|
||||
[no conversation] > /dm bob
|
||||
resolving bob...
|
||||
creating channel...
|
||||
fetching peer's key package...
|
||||
DM with @bob created. Start typing!
|
||||
[@bob] > ^LHey Bob, testing our E2E encrypted channel!
|
||||
[@bob] > Right. MLS forward secrecy + post-quantum KEM.
|
||||
[@bob] > /group-info
|
||||
Conversation: @bob
|
||||
Type: DM
|
||||
Members: 2
|
||||
alice (you), bob
|
||||
MLS epoch: 1
|
||||
[@bob] >
|
||||
|
||||
=== Bob (right) ===
|
||||
❯ ./target/debug/qpq repl --username bob --password demop
|
||||
ass2 --server 127.0.0.1:17123 --ca-cert /tmp/tmp.adbXGLOr
|
||||
PY/server-cert.der --state /tmp/tmp.adbXGLOrPY/bob.bin
|
||||
registering 'bob'...
|
||||
user 'bob' registered
|
||||
logging in as 'bob'...
|
||||
logged in, session cached
|
||||
identity: a8c2f19f1b080616b7206e02244fd14c2ab8821367392
|
||||
af5ff9c89c69750c73f
|
||||
KeyPackage uploaded
|
||||
hybrid key uploaded
|
||||
type /help for commands, Ctrl+D to exit
|
||||
|
||||
[no conversation] > /list
|
||||
no conversations yet. Try /dm <username> or /create-gro
|
||||
up <name>
|
||||
[no conversation] > /switch @alice
|
||||
error: conversation not found: @alice
|
||||
[no conversation] > ^LWorks great -- the server never see
|
||||
s plaintext?
|
||||
error: no active conversation; use /dm or /create-group
|
||||
first
|
||||
[no conversation] > /whoami
|
||||
identity: a8c2f19f1b080616b7206e02244fd14c2ab8821367392
|
||||
af5ff9c89c69750c73f
|
||||
hybrid key: yes
|
||||
conversations: 0
|
||||
[no conversation] >
|
||||
@@ -1,60 +0,0 @@
|
||||
[package]
|
||||
name = "quicnprotochat-client"
|
||||
version = "0.1.0"
|
||||
edition = "2021"
|
||||
description = "CLI client for quicnprotochat."
|
||||
license = "MIT"
|
||||
|
||||
[[bin]]
|
||||
name = "quicnprotochat"
|
||||
path = "src/main.rs"
|
||||
|
||||
[dependencies]
|
||||
quicnprotochat-core = { path = "../quicnprotochat-core" }
|
||||
quicnprotochat-proto = { path = "../quicnprotochat-proto" }
|
||||
openmls_rust_crypto = { workspace = true }
|
||||
|
||||
# Serialisation + RPC
|
||||
capnp = { workspace = true }
|
||||
capnp-rpc = { workspace = true }
|
||||
|
||||
# Async
|
||||
tokio = { workspace = true }
|
||||
tokio-util = { workspace = true }
|
||||
futures = { workspace = true }
|
||||
serde = { workspace = true }
|
||||
serde_json = { workspace = true }
|
||||
bincode = { workspace = true }
|
||||
|
||||
# Crypto — OPAQUE PAKE
|
||||
opaque-ke = { workspace = true }
|
||||
rand = { workspace = true }
|
||||
|
||||
# Error handling
|
||||
anyhow = { workspace = true }
|
||||
thiserror = { workspace = true }
|
||||
|
||||
# Crypto — for fingerprint verification in fetch-key subcommand
|
||||
sha2 = { workspace = true }
|
||||
argon2 = { workspace = true }
|
||||
chacha20poly1305 = { workspace = true }
|
||||
quinn = { workspace = true }
|
||||
quinn-proto = { workspace = true }
|
||||
rustls = { workspace = true }
|
||||
|
||||
# Logging
|
||||
tracing = { workspace = true }
|
||||
tracing-subscriber = { workspace = true }
|
||||
|
||||
# CLI
|
||||
clap = { workspace = true }
|
||||
clap_complete = { workspace = true }
|
||||
indicatif = { workspace = true }
|
||||
|
||||
[dev-dependencies]
|
||||
dashmap = { workspace = true }
|
||||
assert_cmd = "2"
|
||||
tempfile = "3"
|
||||
portpicker = "0.1"
|
||||
rand = "0.8"
|
||||
hex = "0.4"
|
||||
@@ -1,13 +0,0 @@
|
||||
pub fn encode(bytes: impl AsRef<[u8]>) -> String {
|
||||
bytes.as_ref().iter().map(|b| format!("{b:02x}")).collect()
|
||||
}
|
||||
|
||||
pub fn decode(s: &str) -> Result<Vec<u8>, &'static str> {
|
||||
if s.len() % 2 != 0 {
|
||||
return Err("odd-length hex string");
|
||||
}
|
||||
(0..s.len())
|
||||
.step_by(2)
|
||||
.map(|i| u8::from_str_radix(&s[i..i + 2], 16).map_err(|_| "invalid hex character"))
|
||||
.collect()
|
||||
}
|
||||
@@ -1,102 +0,0 @@
|
||||
//! Retry with exponential backoff for transient RPC failures.
|
||||
//!
|
||||
//! Used for `enqueue`, `fetch_all`, and `fetch_wait`. Auth and invalid-param
|
||||
//! errors are not retried. Configure via `QUICNPROTOCHAT_MAX_RETRIES` and
|
||||
//! `QUICNPROTOCHAT_BASE_DELAY_MS` (optional).
|
||||
|
||||
use std::future::Future;
|
||||
use std::time::Duration;
|
||||
|
||||
use rand::Rng;
|
||||
use tracing::warn;
|
||||
|
||||
/// Default maximum number of retry attempts (including the first try).
|
||||
pub const DEFAULT_MAX_RETRIES: u32 = 3;
|
||||
/// Default base delay in milliseconds for exponential backoff.
|
||||
pub const DEFAULT_BASE_DELAY_MS: u64 = 500;
|
||||
|
||||
/// Read max retries from env or use default.
|
||||
pub fn max_retries_from_env() -> u32 {
|
||||
std::env::var("QUICNPROTOCHAT_MAX_RETRIES")
|
||||
.ok()
|
||||
.and_then(|s| s.parse().ok())
|
||||
.unwrap_or(DEFAULT_MAX_RETRIES)
|
||||
}
|
||||
|
||||
/// Read base delay (ms) from env or use default.
|
||||
pub fn base_delay_ms_from_env() -> u64 {
|
||||
std::env::var("QUICNPROTOCHAT_BASE_DELAY_MS")
|
||||
.ok()
|
||||
.and_then(|s| s.parse().ok())
|
||||
.unwrap_or(DEFAULT_BASE_DELAY_MS)
|
||||
}
|
||||
|
||||
/// Runs an async operation with retries. On `Ok(t)` returns immediately.
|
||||
/// On `Err(e)`: if `is_retriable(&e)` and `attempt < max_retries`, sleeps with
|
||||
/// exponential backoff (plus jitter) then retries; otherwise returns the last error.
|
||||
pub async fn retry_async<F, Fut, T, E, P>(
|
||||
op: F,
|
||||
max_retries: u32,
|
||||
base_delay_ms: u64,
|
||||
is_retriable: P,
|
||||
) -> Result<T, E>
|
||||
where
|
||||
F: Fn() -> Fut,
|
||||
Fut: Future<Output = Result<T, E>>,
|
||||
P: Fn(&E) -> bool,
|
||||
{
|
||||
let mut last_err = None;
|
||||
for attempt in 0..max_retries {
|
||||
match op().await {
|
||||
Ok(t) => return Ok(t),
|
||||
Err(e) => {
|
||||
last_err = Some(e);
|
||||
let err = last_err.as_ref().expect("last_err just set in Err branch");
|
||||
if !is_retriable(err) || attempt + 1 >= max_retries {
|
||||
break;
|
||||
}
|
||||
let delay_ms = base_delay_ms * 2u64.saturating_pow(attempt);
|
||||
let jitter_ms = rand::thread_rng().gen_range(0..=delay_ms / 2);
|
||||
let total_ms = delay_ms + jitter_ms;
|
||||
warn!(
|
||||
attempt = attempt + 1,
|
||||
max_retries,
|
||||
delay_ms = total_ms,
|
||||
"RPC failed, retrying after backoff"
|
||||
);
|
||||
tokio::time::sleep(Duration::from_millis(total_ms)).await;
|
||||
}
|
||||
}
|
||||
}
|
||||
// Loop runs at least once (max_retries >= 1) and we only break after storing an Err, so this is always Some.
|
||||
Err(last_err.expect("retry_async: last_err is Some when breaking after Err"))
|
||||
}
|
||||
|
||||
/// Classifies `anyhow::Error` for retry: returns `false` for auth or invalid-param
|
||||
/// errors (do not retry), `true` for transient errors (network, timeout, server 5xx).
|
||||
/// When in doubt, returns `true` (retry).
|
||||
pub fn anyhow_is_retriable(err: &anyhow::Error) -> bool {
|
||||
let s = format!("{:#}", err);
|
||||
let s_lower = s.to_lowercase();
|
||||
// Do not retry: auth / permission
|
||||
if s_lower.contains("unauthorized")
|
||||
|| s_lower.contains("auth failed")
|
||||
|| s_lower.contains("access denied")
|
||||
|| s_lower.contains("401")
|
||||
|| s_lower.contains("forbidden")
|
||||
|| s_lower.contains("403")
|
||||
|| s_lower.contains("token")
|
||||
{
|
||||
return false;
|
||||
}
|
||||
// Do not retry: bad request / invalid params
|
||||
if s_lower.contains("bad request")
|
||||
|| s_lower.contains("400")
|
||||
|| s_lower.contains("invalid param")
|
||||
|| s_lower.contains("fingerprint mismatch")
|
||||
{
|
||||
return false;
|
||||
}
|
||||
// Retry: network, timeout, connection, server error, or anything else
|
||||
true
|
||||
}
|
||||
@@ -1,369 +0,0 @@
|
||||
use std::net::SocketAddr;
|
||||
use std::path::Path;
|
||||
use std::sync::Arc;
|
||||
|
||||
use anyhow::Context;
|
||||
use quinn::{ClientConfig, Endpoint};
|
||||
use quinn_proto::crypto::rustls::QuicClientConfig;
|
||||
use rustls::pki_types::CertificateDer;
|
||||
use rustls::{ClientConfig as RustlsClientConfig, RootCertStore};
|
||||
use tokio_util::compat::{TokioAsyncReadCompatExt, TokioAsyncWriteCompatExt};
|
||||
use capnp_rpc::{rpc_twoparty_capnp::Side, twoparty, RpcSystem};
|
||||
|
||||
use quicnprotochat_core::HybridPublicKey;
|
||||
use quicnprotochat_proto::node_capnp::{auth, node_service};
|
||||
|
||||
use crate::AUTH_CONTEXT;
|
||||
|
||||
use super::retry::{
|
||||
anyhow_is_retriable, base_delay_ms_from_env, max_retries_from_env, retry_async,
|
||||
};
|
||||
|
||||
/// Establish a QUIC/TLS connection and return a `NodeService` client.
|
||||
///
|
||||
/// Must be called from within a `LocalSet` because capnp-rpc is `!Send`.
|
||||
pub async fn connect_node(
|
||||
server: &str,
|
||||
ca_cert: &Path,
|
||||
server_name: &str,
|
||||
) -> anyhow::Result<node_service::Client> {
|
||||
let addr: SocketAddr = server
|
||||
.parse()
|
||||
.with_context(|| format!("server must be host:port, got {server}"))?;
|
||||
|
||||
let cert_bytes = std::fs::read(ca_cert).with_context(|| format!("read ca_cert {ca_cert:?}"))?;
|
||||
let mut roots = RootCertStore::empty();
|
||||
roots
|
||||
.add(CertificateDer::from(cert_bytes))
|
||||
.context("add root cert")?;
|
||||
|
||||
let mut tls = RustlsClientConfig::builder()
|
||||
.with_root_certificates(roots)
|
||||
.with_no_client_auth();
|
||||
tls.alpn_protocols = vec![b"capnp".to_vec()];
|
||||
|
||||
let crypto = QuicClientConfig::try_from(tls)
|
||||
.map_err(|e| anyhow::anyhow!("invalid client TLS config: {e}"))?;
|
||||
|
||||
let bind_addr: SocketAddr = "0.0.0.0:0".parse().context("parse client bind address")?;
|
||||
let mut endpoint = Endpoint::client(bind_addr)?;
|
||||
endpoint.set_default_client_config(ClientConfig::new(Arc::new(crypto)));
|
||||
|
||||
let connection = endpoint
|
||||
.connect(addr, server_name)
|
||||
.context("quic connect init")?
|
||||
.await
|
||||
.context("quic connect failed")?;
|
||||
|
||||
let (send, recv) = connection.open_bi().await.context("open bi stream")?;
|
||||
|
||||
let network = twoparty::VatNetwork::new(
|
||||
recv.compat(),
|
||||
send.compat_write(),
|
||||
Side::Client,
|
||||
Default::default(),
|
||||
);
|
||||
|
||||
let mut rpc_system = RpcSystem::new(Box::new(network), None);
|
||||
let client: node_service::Client = rpc_system.bootstrap(Side::Server);
|
||||
|
||||
tokio::task::spawn_local(rpc_system);
|
||||
|
||||
Ok(client)
|
||||
}
|
||||
|
||||
pub fn set_auth(auth: &mut auth::Builder<'_>) -> anyhow::Result<()> {
|
||||
let ctx = AUTH_CONTEXT.get().ok_or_else(|| {
|
||||
anyhow::anyhow!("init_auth must be called with a non-empty token before RPCs")
|
||||
})?;
|
||||
auth.set_version(ctx.version);
|
||||
auth.set_access_token(&ctx.access_token);
|
||||
auth.set_device_id(&ctx.device_id);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Upload a KeyPackage and verify the fingerprint echoed by the AS.
|
||||
pub async fn upload_key_package(
|
||||
client: &node_service::Client,
|
||||
identity_key: &[u8],
|
||||
package: &[u8],
|
||||
) -> anyhow::Result<()> {
|
||||
let mut req = client.upload_key_package_request();
|
||||
{
|
||||
let mut p = req.get();
|
||||
p.set_identity_key(identity_key);
|
||||
p.set_package(package);
|
||||
let mut auth = p.reborrow().init_auth();
|
||||
set_auth(&mut auth)?;
|
||||
}
|
||||
|
||||
let resp = req
|
||||
.send()
|
||||
.promise
|
||||
.await
|
||||
.context("upload_key_package RPC failed")?;
|
||||
|
||||
let server_fp = resp
|
||||
.get()
|
||||
.context("upload_key_package: bad response")?
|
||||
.get_fingerprint()
|
||||
.context("upload_key_package: missing fingerprint")?
|
||||
.to_vec();
|
||||
|
||||
let local_fp = super::state::sha256(package);
|
||||
anyhow::ensure!(server_fp == local_fp, "fingerprint mismatch");
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Fetch a KeyPackage for `identity_key` from the AS.
|
||||
pub async fn fetch_key_package(
|
||||
client: &node_service::Client,
|
||||
identity_key: &[u8],
|
||||
) -> anyhow::Result<Vec<u8>> {
|
||||
let mut req = client.fetch_key_package_request();
|
||||
{
|
||||
let mut p = req.get();
|
||||
p.set_identity_key(identity_key);
|
||||
let mut auth = p.reborrow().init_auth();
|
||||
set_auth(&mut auth)?;
|
||||
}
|
||||
|
||||
let resp = req
|
||||
.send()
|
||||
.promise
|
||||
.await
|
||||
.context("fetch_key_package RPC failed")?;
|
||||
|
||||
let pkg = resp
|
||||
.get()
|
||||
.context("fetch_key_package: bad response")?
|
||||
.get_package()
|
||||
.context("fetch_key_package: missing package field")?
|
||||
.to_vec();
|
||||
|
||||
Ok(pkg)
|
||||
}
|
||||
|
||||
/// Enqueue an opaque payload to the DS for `recipient_key`.
|
||||
/// Returns the per-inbox sequence number assigned by the server.
|
||||
/// Retries on transient failures with exponential backoff.
|
||||
pub async fn enqueue(
|
||||
client: &node_service::Client,
|
||||
recipient_key: &[u8],
|
||||
payload: &[u8],
|
||||
) -> anyhow::Result<u64> {
|
||||
let client = client.clone();
|
||||
let recipient_key = recipient_key.to_vec();
|
||||
let payload = payload.to_vec();
|
||||
retry_async(
|
||||
|| {
|
||||
let client = client.clone();
|
||||
let recipient_key = recipient_key.clone();
|
||||
let payload = payload.clone();
|
||||
async move {
|
||||
let mut req = client.enqueue_request();
|
||||
{
|
||||
let mut p = req.get();
|
||||
p.set_recipient_key(&recipient_key);
|
||||
p.set_payload(&payload);
|
||||
p.set_channel_id(&[]);
|
||||
p.set_version(1);
|
||||
let mut auth = p.reborrow().init_auth();
|
||||
set_auth(&mut auth)?;
|
||||
}
|
||||
let resp = req.send().promise.await.context("enqueue RPC failed")?;
|
||||
let seq = resp.get().context("enqueue: bad response")?.get_seq();
|
||||
Ok(seq)
|
||||
}
|
||||
},
|
||||
max_retries_from_env(),
|
||||
base_delay_ms_from_env(),
|
||||
anyhow_is_retriable,
|
||||
)
|
||||
.await
|
||||
}
|
||||
|
||||
/// Fetch and drain all payloads for `recipient_key`.
|
||||
/// Returns `(seq, payload)` pairs — sort by `seq` before MLS processing.
|
||||
/// Retries on transient failures with exponential backoff.
|
||||
pub async fn fetch_all(
|
||||
client: &node_service::Client,
|
||||
recipient_key: &[u8],
|
||||
) -> anyhow::Result<Vec<(u64, Vec<u8>)>> {
|
||||
let client = client.clone();
|
||||
let recipient_key = recipient_key.to_vec();
|
||||
retry_async(
|
||||
|| {
|
||||
let client = client.clone();
|
||||
let recipient_key = recipient_key.clone();
|
||||
async move {
|
||||
let mut req = client.fetch_request();
|
||||
{
|
||||
let mut p = req.get();
|
||||
p.set_recipient_key(&recipient_key);
|
||||
p.set_channel_id(&[]);
|
||||
p.set_version(1);
|
||||
p.set_limit(0); // fetch all
|
||||
let mut auth = p.reborrow().init_auth();
|
||||
set_auth(&mut auth)?;
|
||||
}
|
||||
|
||||
let resp = req.send().promise.await.context("fetch RPC failed")?;
|
||||
|
||||
let list = resp
|
||||
.get()
|
||||
.context("fetch: bad response")?
|
||||
.get_payloads()
|
||||
.context("fetch: missing payloads")?;
|
||||
|
||||
let mut payloads = Vec::with_capacity(list.len() as usize);
|
||||
for i in 0..list.len() {
|
||||
let entry = list.get(i);
|
||||
let seq = entry.get_seq();
|
||||
let data = entry
|
||||
.get_data()
|
||||
.context("fetch: envelope data read failed")?
|
||||
.to_vec();
|
||||
payloads.push((seq, data));
|
||||
}
|
||||
|
||||
Ok(payloads)
|
||||
}
|
||||
},
|
||||
max_retries_from_env(),
|
||||
base_delay_ms_from_env(),
|
||||
anyhow_is_retriable,
|
||||
)
|
||||
.await
|
||||
}
|
||||
|
||||
/// Long-poll for payloads with optional timeout (ms).
|
||||
/// Returns `(seq, payload)` pairs — sort by `seq` before MLS processing.
|
||||
/// Retries on transient failures with exponential backoff.
|
||||
pub async fn fetch_wait(
|
||||
client: &node_service::Client,
|
||||
recipient_key: &[u8],
|
||||
timeout_ms: u64,
|
||||
) -> anyhow::Result<Vec<(u64, Vec<u8>)>> {
|
||||
let client = client.clone();
|
||||
let recipient_key = recipient_key.to_vec();
|
||||
retry_async(
|
||||
|| {
|
||||
let client = client.clone();
|
||||
let recipient_key = recipient_key.clone();
|
||||
let timeout_ms = timeout_ms;
|
||||
async move {
|
||||
let mut req = client.fetch_wait_request();
|
||||
{
|
||||
let mut p = req.get();
|
||||
p.set_recipient_key(&recipient_key);
|
||||
p.set_timeout_ms(timeout_ms);
|
||||
p.set_channel_id(&[]);
|
||||
p.set_version(1);
|
||||
p.set_limit(0); // fetch all
|
||||
let mut auth = p.reborrow().init_auth();
|
||||
set_auth(&mut auth)?;
|
||||
}
|
||||
|
||||
let resp = req.send().promise.await.context("fetch_wait RPC failed")?;
|
||||
|
||||
let list = resp
|
||||
.get()
|
||||
.context("fetch_wait: bad response")?
|
||||
.get_payloads()
|
||||
.context("fetch_wait: missing payloads")?;
|
||||
|
||||
let mut payloads = Vec::with_capacity(list.len() as usize);
|
||||
for i in 0..list.len() {
|
||||
let entry = list.get(i);
|
||||
let seq = entry.get_seq();
|
||||
let data = entry
|
||||
.get_data()
|
||||
.context("fetch_wait: envelope data read failed")?
|
||||
.to_vec();
|
||||
payloads.push((seq, data));
|
||||
}
|
||||
|
||||
Ok(payloads)
|
||||
}
|
||||
},
|
||||
max_retries_from_env(),
|
||||
base_delay_ms_from_env(),
|
||||
anyhow_is_retriable,
|
||||
)
|
||||
.await
|
||||
}
|
||||
|
||||
/// Upload a hybrid (X25519 + ML-KEM-768) public key for an identity.
|
||||
pub async fn upload_hybrid_key(
|
||||
client: &node_service::Client,
|
||||
identity_key: &[u8],
|
||||
hybrid_pk: &HybridPublicKey,
|
||||
) -> anyhow::Result<()> {
|
||||
let mut req = client.upload_hybrid_key_request();
|
||||
{
|
||||
let mut p = req.get();
|
||||
p.set_identity_key(identity_key);
|
||||
p.set_hybrid_public_key(&hybrid_pk.to_bytes());
|
||||
let mut auth = p.reborrow().init_auth();
|
||||
set_auth(&mut auth)?;
|
||||
}
|
||||
req.send()
|
||||
.promise
|
||||
.await
|
||||
.context("upload_hybrid_key RPC failed")?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Fetch a peer's hybrid public key from the server.
|
||||
///
|
||||
/// Returns `None` if the peer has not uploaded a hybrid key.
|
||||
pub async fn fetch_hybrid_key(
|
||||
client: &node_service::Client,
|
||||
identity_key: &[u8],
|
||||
) -> anyhow::Result<Option<HybridPublicKey>> {
|
||||
let mut req = client.fetch_hybrid_key_request();
|
||||
{
|
||||
let mut p = req.get();
|
||||
p.set_identity_key(identity_key);
|
||||
let mut auth = p.reborrow().init_auth();
|
||||
set_auth(&mut auth)?;
|
||||
}
|
||||
|
||||
let resp = req
|
||||
.send()
|
||||
.promise
|
||||
.await
|
||||
.context("fetch_hybrid_key RPC failed")?;
|
||||
|
||||
let pk_bytes = resp
|
||||
.get()
|
||||
.context("fetch_hybrid_key: bad response")?
|
||||
.get_hybrid_public_key()
|
||||
.context("fetch_hybrid_key: missing field")?
|
||||
.to_vec();
|
||||
|
||||
if pk_bytes.is_empty() {
|
||||
return Ok(None);
|
||||
}
|
||||
|
||||
let pk = HybridPublicKey::from_bytes(&pk_bytes).context("invalid hybrid public key")?;
|
||||
Ok(Some(pk))
|
||||
}
|
||||
|
||||
/// Decrypt a hybrid envelope. Requires a hybrid key; no fallback to plaintext MLS.
|
||||
pub fn try_hybrid_decrypt(
|
||||
hybrid_kp: Option<&quicnprotochat_core::HybridKeypair>,
|
||||
payload: &[u8],
|
||||
) -> anyhow::Result<Vec<u8>> {
|
||||
let kp = hybrid_kp.ok_or_else(|| anyhow::anyhow!("hybrid key required for decryption"))?;
|
||||
quicnprotochat_core::hybrid_decrypt(kp, payload).map_err(|e| anyhow::anyhow!("{e}"))
|
||||
}
|
||||
|
||||
/// Return the current Unix timestamp in milliseconds.
|
||||
pub fn current_timestamp_ms() -> u64 {
|
||||
std::time::SystemTime::now()
|
||||
.duration_since(std::time::UNIX_EPOCH)
|
||||
.unwrap_or_default()
|
||||
.as_millis() as u64
|
||||
}
|
||||
@@ -1,382 +0,0 @@
|
||||
use std::path::{Path, PathBuf};
|
||||
use std::sync::Arc;
|
||||
|
||||
use anyhow::Context;
|
||||
use argon2::Argon2;
|
||||
use chacha20poly1305::{
|
||||
aead::{Aead, KeyInit},
|
||||
ChaCha20Poly1305, Key, Nonce,
|
||||
};
|
||||
use rand::RngCore;
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use quicnprotochat_core::{
|
||||
CoreError, DiskKeyStore, GroupMember, HybridCryptoProvider, HybridKeypair, HybridKeypairBytes,
|
||||
IdentityKeypair, MlsGroup, StoreCrypto,
|
||||
};
|
||||
|
||||
/// Magic bytes for encrypted client state files.
|
||||
const STATE_MAGIC: &[u8; 4] = b"QPCE";
|
||||
const STATE_SALT_LEN: usize = 16;
|
||||
const STATE_NONCE_LEN: usize = 12;
|
||||
|
||||
/// Persisted client state (identity, MLS group, optional PQ key).
|
||||
///
|
||||
/// **Production note:** When loading state, use the same `use_pq_backend` value that was used when
|
||||
/// the state was created. Loading PQ state with classical backend (or vice versa) will fail or
|
||||
/// produce incorrect behavior.
|
||||
#[derive(Serialize, Deserialize)]
|
||||
pub struct StoredState {
|
||||
pub identity_seed: [u8; 32],
|
||||
pub group: Option<Vec<u8>>,
|
||||
/// Post-quantum hybrid keypair (X25519 + ML-KEM-768). `None` for state created before hybrid was added.
|
||||
#[serde(default)]
|
||||
pub hybrid_key: Option<HybridKeypairBytes>,
|
||||
/// Cached member public keys for group participants.
|
||||
#[serde(default)]
|
||||
pub member_keys: Vec<Vec<u8>>,
|
||||
/// If true, MLS uses post-quantum hybrid KEM (HybridCryptoProvider) for HPKE. M7.
|
||||
#[serde(default)]
|
||||
pub use_pq_backend: bool,
|
||||
}
|
||||
|
||||
/// MLS member backend: classical (StoreCrypto) or post-quantum hybrid (HybridCryptoProvider).
|
||||
pub enum MemberBackend {
|
||||
Classical(GroupMember<StoreCrypto>),
|
||||
Hybrid(GroupMember<HybridCryptoProvider>),
|
||||
}
|
||||
|
||||
impl MemberBackend {
|
||||
pub fn generate_key_package(&mut self) -> Result<Vec<u8>, CoreError> {
|
||||
match self {
|
||||
MemberBackend::Classical(m) => m.generate_key_package(),
|
||||
MemberBackend::Hybrid(m) => m.generate_key_package(),
|
||||
}
|
||||
}
|
||||
pub fn create_group(&mut self, group_id: &[u8]) -> Result<(), CoreError> {
|
||||
match self {
|
||||
MemberBackend::Classical(m) => m.create_group(group_id),
|
||||
MemberBackend::Hybrid(m) => m.create_group(group_id),
|
||||
}
|
||||
}
|
||||
pub fn add_member(&mut self, key_package_bytes: &[u8]) -> Result<(Vec<u8>, Vec<u8>), CoreError> {
|
||||
match self {
|
||||
MemberBackend::Classical(m) => m.add_member(key_package_bytes),
|
||||
MemberBackend::Hybrid(m) => m.add_member(key_package_bytes),
|
||||
}
|
||||
}
|
||||
pub fn join_group(&mut self, welcome: &[u8]) -> Result<(), CoreError> {
|
||||
match self {
|
||||
MemberBackend::Classical(m) => m.join_group(welcome),
|
||||
MemberBackend::Hybrid(m) => m.join_group(welcome),
|
||||
}
|
||||
}
|
||||
pub fn send_message(&mut self, plaintext: &[u8]) -> Result<Vec<u8>, CoreError> {
|
||||
match self {
|
||||
MemberBackend::Classical(m) => m.send_message(plaintext),
|
||||
MemberBackend::Hybrid(m) => m.send_message(plaintext),
|
||||
}
|
||||
}
|
||||
pub fn receive_message(&mut self, bytes: &[u8]) -> Result<Option<Vec<u8>>, CoreError> {
|
||||
match self {
|
||||
MemberBackend::Classical(m) => m.receive_message(bytes),
|
||||
MemberBackend::Hybrid(m) => m.receive_message(bytes),
|
||||
}
|
||||
}
|
||||
pub fn receive_message_with_sender(
|
||||
&mut self,
|
||||
bytes: &[u8],
|
||||
) -> Result<Option<(Vec<u8>, Vec<u8>)>, CoreError> {
|
||||
match self {
|
||||
MemberBackend::Classical(m) => m.receive_message_with_sender(bytes),
|
||||
MemberBackend::Hybrid(m) => m.receive_message_with_sender(bytes),
|
||||
}
|
||||
}
|
||||
pub fn group_id(&self) -> Option<Vec<u8>> {
|
||||
match self {
|
||||
MemberBackend::Classical(m) => m.group_id(),
|
||||
MemberBackend::Hybrid(m) => m.group_id(),
|
||||
}
|
||||
}
|
||||
pub fn identity(&self) -> &IdentityKeypair {
|
||||
match self {
|
||||
MemberBackend::Classical(m) => m.identity(),
|
||||
MemberBackend::Hybrid(m) => m.identity(),
|
||||
}
|
||||
}
|
||||
pub fn identity_seed(&self) -> [u8; 32] {
|
||||
match self {
|
||||
MemberBackend::Classical(m) => m.identity_seed(),
|
||||
MemberBackend::Hybrid(m) => m.identity_seed(),
|
||||
}
|
||||
}
|
||||
pub fn group_ref(&self) -> Option<&MlsGroup> {
|
||||
match self {
|
||||
MemberBackend::Classical(m) => m.group_ref(),
|
||||
MemberBackend::Hybrid(m) => m.group_ref(),
|
||||
}
|
||||
}
|
||||
pub fn member_identities(&self) -> Vec<Vec<u8>> {
|
||||
match self {
|
||||
MemberBackend::Classical(m) => m.member_identities(),
|
||||
MemberBackend::Hybrid(m) => m.member_identities(),
|
||||
}
|
||||
}
|
||||
pub fn is_pq(&self) -> bool {
|
||||
matches!(self, MemberBackend::Hybrid(_))
|
||||
}
|
||||
}
|
||||
|
||||
impl StoredState {
|
||||
/// Rebuild member and hybrid key from stored state. Uses PQ backend if `use_pq_backend` is true.
|
||||
pub fn into_parts(self, state_path: &Path) -> anyhow::Result<(MemberBackend, Option<HybridKeypair>)> {
|
||||
let identity = Arc::new(IdentityKeypair::from_seed(self.identity_seed));
|
||||
let group = self
|
||||
.group
|
||||
.map(|bytes| bincode::deserialize(&bytes).context("decode group"))
|
||||
.transpose()?;
|
||||
let key_store = DiskKeyStore::persistent(keystore_path(state_path))?;
|
||||
|
||||
let member = if self.use_pq_backend {
|
||||
MemberBackend::Hybrid(GroupMember::<HybridCryptoProvider>::new_with_state_hybrid(
|
||||
identity, key_store, group,
|
||||
))
|
||||
} else {
|
||||
MemberBackend::Classical(GroupMember::new_with_state(identity, key_store, group))
|
||||
};
|
||||
|
||||
let hybrid_kp = self
|
||||
.hybrid_key
|
||||
.map(|bytes| HybridKeypair::from_bytes(&bytes).context("decode hybrid key"))
|
||||
.transpose()?;
|
||||
|
||||
Ok((member, hybrid_kp))
|
||||
}
|
||||
|
||||
/// Build state from a classical GroupMember (backward compat / tests). Prefer [`from_member_backend`](Self::from_member_backend) in production.
|
||||
pub fn from_parts(
|
||||
member: &GroupMember<StoreCrypto>,
|
||||
hybrid_kp: Option<&HybridKeypair>,
|
||||
) -> anyhow::Result<Self> {
|
||||
let group = member
|
||||
.group_ref()
|
||||
.map(|g| bincode::serialize(g).context("serialize group"))
|
||||
.transpose()?;
|
||||
|
||||
Ok(Self {
|
||||
identity_seed: member.identity_seed(),
|
||||
group,
|
||||
hybrid_key: hybrid_kp.map(|kp| kp.to_bytes()),
|
||||
member_keys: Vec::new(),
|
||||
use_pq_backend: false,
|
||||
})
|
||||
}
|
||||
|
||||
/// Build state from MemberBackend (classical or PQ).
|
||||
pub fn from_member_backend(
|
||||
member: &MemberBackend,
|
||||
hybrid_kp: Option<&HybridKeypair>,
|
||||
) -> anyhow::Result<Self> {
|
||||
let group = member
|
||||
.group_ref()
|
||||
.map(|g| bincode::serialize(g).context("serialize group"))
|
||||
.transpose()?;
|
||||
|
||||
Ok(Self {
|
||||
identity_seed: member.identity_seed(),
|
||||
group,
|
||||
hybrid_key: hybrid_kp.map(|kp| kp.to_bytes()),
|
||||
member_keys: Vec::new(),
|
||||
use_pq_backend: member.is_pq(),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
/// Derive a 32-byte key from a password and salt using Argon2id.
|
||||
fn derive_state_key(password: &str, salt: &[u8]) -> anyhow::Result<[u8; 32]> {
|
||||
let mut key = [0u8; 32];
|
||||
Argon2::default()
|
||||
.hash_password_into(password.as_bytes(), salt, &mut key)
|
||||
.map_err(|e| anyhow::anyhow!("argon2 key derivation failed: {e}"))?;
|
||||
Ok(key)
|
||||
}
|
||||
|
||||
/// Encrypt `plaintext` with the QPCE format: magic(4) | salt(16) | nonce(12) | ciphertext.
|
||||
pub fn encrypt_state(password: &str, plaintext: &[u8]) -> anyhow::Result<Vec<u8>> {
|
||||
let mut salt = [0u8; STATE_SALT_LEN];
|
||||
rand::rngs::OsRng.fill_bytes(&mut salt);
|
||||
|
||||
let mut nonce_bytes = [0u8; STATE_NONCE_LEN];
|
||||
rand::rngs::OsRng.fill_bytes(&mut nonce_bytes);
|
||||
|
||||
let key = derive_state_key(password, &salt)?;
|
||||
let cipher = ChaCha20Poly1305::new(Key::from_slice(&key));
|
||||
let nonce = Nonce::from_slice(&nonce_bytes);
|
||||
|
||||
let ciphertext = cipher
|
||||
.encrypt(nonce, plaintext)
|
||||
.map_err(|e| anyhow::anyhow!("state encryption failed: {e}"))?;
|
||||
|
||||
let mut out = Vec::with_capacity(4 + STATE_SALT_LEN + STATE_NONCE_LEN + ciphertext.len());
|
||||
out.extend_from_slice(STATE_MAGIC);
|
||||
out.extend_from_slice(&salt);
|
||||
out.extend_from_slice(&nonce_bytes);
|
||||
out.extend_from_slice(&ciphertext);
|
||||
Ok(out)
|
||||
}
|
||||
|
||||
/// Decrypt a QPCE-formatted state file.
|
||||
pub fn decrypt_state(password: &str, data: &[u8]) -> anyhow::Result<Vec<u8>> {
|
||||
let header_len = 4 + STATE_SALT_LEN + STATE_NONCE_LEN;
|
||||
anyhow::ensure!(
|
||||
data.len() > header_len,
|
||||
"encrypted state file too short ({} bytes)",
|
||||
data.len()
|
||||
);
|
||||
|
||||
let salt = &data[4..4 + STATE_SALT_LEN];
|
||||
let nonce_bytes = &data[4 + STATE_SALT_LEN..header_len];
|
||||
let ciphertext = &data[header_len..];
|
||||
|
||||
let key = derive_state_key(password, salt)?;
|
||||
let cipher = ChaCha20Poly1305::new(Key::from_slice(&key));
|
||||
let nonce = Nonce::from_slice(nonce_bytes);
|
||||
|
||||
let plaintext = cipher
|
||||
.decrypt(nonce, ciphertext)
|
||||
.map_err(|_| anyhow::anyhow!("state decryption failed (wrong password?)"))?;
|
||||
|
||||
Ok(plaintext)
|
||||
}
|
||||
|
||||
/// Returns true if raw bytes begin with the QPCE magic header.
|
||||
pub fn is_encrypted_state(bytes: &[u8]) -> bool {
|
||||
bytes.len() >= 4 && &bytes[..4] == STATE_MAGIC
|
||||
}
|
||||
|
||||
/// Create new state with optional post-quantum MLS backend (M7). When `use_pq_backend` is true,
|
||||
/// new state uses `HybridCryptoProvider` for MLS HPKE (X25519 + ML-KEM-768).
|
||||
pub fn load_or_init_state(
|
||||
path: &Path,
|
||||
password: Option<&str>,
|
||||
use_pq_backend: bool,
|
||||
) -> anyhow::Result<StoredState> {
|
||||
if path.exists() {
|
||||
let mut state = load_existing_state(path, password)?;
|
||||
// Generate hybrid keypair if missing (upgrade from older state).
|
||||
if state.hybrid_key.is_none() {
|
||||
let pb = indicatif::ProgressBar::new_spinner();
|
||||
pb.set_message("Generating post-quantum keypair\u{2026}");
|
||||
pb.enable_steady_tick(std::time::Duration::from_millis(80));
|
||||
state.hybrid_key = Some(HybridKeypair::generate().to_bytes());
|
||||
pb.finish_and_clear();
|
||||
write_state(path, &state, password)?;
|
||||
}
|
||||
return Ok(state);
|
||||
}
|
||||
|
||||
let pb = indicatif::ProgressBar::new_spinner();
|
||||
pb.set_message("Generating post-quantum keypair\u{2026}");
|
||||
pb.enable_steady_tick(std::time::Duration::from_millis(80));
|
||||
let identity = IdentityKeypair::generate();
|
||||
let hybrid_kp = HybridKeypair::generate();
|
||||
pb.finish_and_clear();
|
||||
|
||||
let key_store = DiskKeyStore::persistent(keystore_path(path))?;
|
||||
let member = if use_pq_backend {
|
||||
MemberBackend::Hybrid(GroupMember::<HybridCryptoProvider>::new_with_state_hybrid(
|
||||
Arc::new(identity),
|
||||
key_store,
|
||||
None,
|
||||
))
|
||||
} else {
|
||||
MemberBackend::Classical(GroupMember::new_with_state(
|
||||
Arc::new(identity),
|
||||
key_store,
|
||||
None,
|
||||
))
|
||||
};
|
||||
let state = StoredState::from_member_backend(&member, Some(&hybrid_kp))?;
|
||||
write_state(path, &state, password)?;
|
||||
Ok(state)
|
||||
}
|
||||
|
||||
pub fn load_existing_state(path: &Path, password: Option<&str>) -> anyhow::Result<StoredState> {
|
||||
let bytes = std::fs::read(path).with_context(|| format!("read state file {path:?}"))?;
|
||||
|
||||
if is_encrypted_state(&bytes) {
|
||||
let pw = password
|
||||
.context("state file is encrypted (QPCE); a password is required to decrypt it")?;
|
||||
let plaintext = decrypt_state(pw, &bytes)?;
|
||||
bincode::deserialize(&plaintext).context("decode encrypted state")
|
||||
} else {
|
||||
bincode::deserialize(&bytes).context("decode state")
|
||||
}
|
||||
}
|
||||
|
||||
pub fn save_state(
|
||||
path: &Path,
|
||||
member: &MemberBackend,
|
||||
hybrid_kp: Option<&HybridKeypair>,
|
||||
password: Option<&str>,
|
||||
) -> anyhow::Result<()> {
|
||||
let state = StoredState::from_member_backend(member, hybrid_kp)?;
|
||||
write_state(path, &state, password)
|
||||
}
|
||||
|
||||
pub fn write_state(path: &Path, state: &StoredState, password: Option<&str>) -> anyhow::Result<()> {
|
||||
if let Some(parent) = path.parent() {
|
||||
std::fs::create_dir_all(parent).with_context(|| format!("create dir {parent:?}"))?;
|
||||
}
|
||||
let plaintext = bincode::serialize(state).context("encode state")?;
|
||||
|
||||
let bytes = if let Some(pw) = password {
|
||||
encrypt_state(pw, &plaintext)?
|
||||
} else {
|
||||
plaintext
|
||||
};
|
||||
|
||||
std::fs::write(path, bytes).with_context(|| format!("write state {path:?}"))?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn decode_identity_key(hex_str: &str) -> anyhow::Result<Vec<u8>> {
|
||||
let bytes = super::hex::decode(hex_str)
|
||||
.map_err(|e| anyhow::anyhow!(e))
|
||||
.context("identity key must be hex")?;
|
||||
anyhow::ensure!(bytes.len() == 32, "identity key must be 32 bytes");
|
||||
Ok(bytes)
|
||||
}
|
||||
|
||||
pub fn keystore_path(state_path: &Path) -> PathBuf {
|
||||
let mut path = state_path.to_path_buf();
|
||||
path.set_extension("ks");
|
||||
path
|
||||
}
|
||||
|
||||
pub fn sha256(bytes: &[u8]) -> Vec<u8> {
|
||||
use sha2::{Digest, Sha256};
|
||||
Sha256::digest(bytes).to_vec()
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn encrypt_decrypt_roundtrip() {
|
||||
let plaintext = b"test state data";
|
||||
let password = "test-password";
|
||||
let encrypted = encrypt_state(password, plaintext).unwrap();
|
||||
assert!(is_encrypted_state(&encrypted));
|
||||
let decrypted = decrypt_state(password, &encrypted).unwrap();
|
||||
assert_eq!(decrypted, plaintext);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn wrong_password_fails() {
|
||||
let plaintext = b"test state data";
|
||||
let encrypted = encrypt_state("correct", plaintext).unwrap();
|
||||
assert!(decrypt_state("wrong", &encrypted).is_err());
|
||||
}
|
||||
}
|
||||
@@ -1,57 +0,0 @@
|
||||
//! quicnprotochat CLI client library.
|
||||
//!
|
||||
//! # KeyPackage expiry and refresh
|
||||
//!
|
||||
//! KeyPackages are single-use (consumed when someone fetches them for an invite) and the server
|
||||
//! may enforce a TTL (e.g. 24 hours). To stay invitable, run `quicnprotochat refresh-keypackage`
|
||||
//! periodically (e.g. before the server TTL) or after your KeyPackage was consumed:
|
||||
//!
|
||||
//! ```bash
|
||||
//! quicnprotochat refresh-keypackage --state quicnprotochat-state.bin --server 127.0.0.1:7000
|
||||
//! ```
|
||||
//!
|
||||
//! Use the same `--access-token` (or `QUICNPROTOCHAT_ACCESS_TOKEN`) as for other authenticated
|
||||
//! commands. See the [running-the-client](https://docs.quicnprotochat.dev/getting-started/running-the-client)
|
||||
//! docs for details.
|
||||
|
||||
use std::sync::OnceLock;
|
||||
|
||||
pub mod client;
|
||||
|
||||
pub use client::commands::{
|
||||
cmd_chat, cmd_check_key, cmd_create_group, cmd_demo_group, cmd_fetch_key, cmd_health,
|
||||
cmd_health_json, cmd_invite, cmd_join, cmd_login, cmd_ping, cmd_recv, cmd_register,
|
||||
cmd_register_state, cmd_refresh_keypackage, cmd_register_user, cmd_send, cmd_whoami,
|
||||
receive_pending_plaintexts, whoami_json,
|
||||
};
|
||||
|
||||
pub use client::rpc::{connect_node, enqueue, fetch_wait};
|
||||
pub use client::state::{load_existing_state, StoredState};
|
||||
|
||||
// Global auth context initialized once per process.
|
||||
pub(crate) static AUTH_CONTEXT: OnceLock<ClientAuth> = OnceLock::new();
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct ClientAuth {
|
||||
pub(crate) version: u16,
|
||||
pub(crate) access_token: Vec<u8>,
|
||||
pub(crate) device_id: Vec<u8>,
|
||||
}
|
||||
|
||||
impl ClientAuth {
|
||||
/// Build a client auth context from optional token and device id.
|
||||
pub fn from_parts(access_token: String, device_id: Option<String>) -> Self {
|
||||
let token = access_token.into_bytes();
|
||||
let device = device_id.unwrap_or_default().into_bytes();
|
||||
Self {
|
||||
version: 1,
|
||||
access_token: token,
|
||||
device_id: device,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Initialize the global auth context; subsequent calls are ignored.
|
||||
pub fn init_auth(ctx: ClientAuth) {
|
||||
let _ = AUTH_CONTEXT.set(ctx);
|
||||
}
|
||||
@@ -1,541 +0,0 @@
|
||||
//! quicnprotochat CLI client.
|
||||
|
||||
use std::path::PathBuf;
|
||||
|
||||
use clap::{Parser, Subcommand};
|
||||
|
||||
use quicnprotochat_client::{
|
||||
cmd_chat, cmd_check_key, cmd_create_group, cmd_demo_group, cmd_fetch_key, cmd_health,
|
||||
cmd_invite, cmd_join, cmd_login, cmd_ping, cmd_recv, cmd_register, cmd_register_state,
|
||||
cmd_refresh_keypackage, cmd_register_user, cmd_send, cmd_whoami, init_auth, ClientAuth,
|
||||
};
|
||||
|
||||
// ── CLI ───────────────────────────────────────────────────────────────────────
|
||||
|
||||
#[derive(Debug, Parser)]
|
||||
#[command(name = "quicnprotochat", about = "quicnprotochat CLI client", version)]
|
||||
struct Args {
|
||||
/// Path to the server's TLS certificate (self-signed by default).
|
||||
#[arg(
|
||||
long,
|
||||
global = true,
|
||||
default_value = "data/server-cert.der",
|
||||
env = "QUICNPROTOCHAT_CA_CERT"
|
||||
)]
|
||||
ca_cert: PathBuf,
|
||||
|
||||
/// Expected TLS server name (must match the certificate SAN).
|
||||
#[arg(
|
||||
long,
|
||||
global = true,
|
||||
default_value = "localhost",
|
||||
env = "QUICNPROTOCHAT_SERVER_NAME"
|
||||
)]
|
||||
server_name: String,
|
||||
|
||||
/// Bearer token or OPAQUE session token for authenticated requests.
|
||||
/// Not required for register-user and login commands.
|
||||
#[arg(
|
||||
long,
|
||||
global = true,
|
||||
env = "QUICNPROTOCHAT_ACCESS_TOKEN",
|
||||
default_value = ""
|
||||
)]
|
||||
access_token: String,
|
||||
|
||||
/// Optional device identifier (UUID bytes encoded as hex or raw string).
|
||||
#[arg(long, global = true, env = "QUICNPROTOCHAT_DEVICE_ID")]
|
||||
device_id: Option<String>,
|
||||
|
||||
/// Password to encrypt/decrypt client state files (QPCE format).
|
||||
/// If set, state files are encrypted at rest with Argon2id + ChaCha20Poly1305.
|
||||
#[arg(long, global = true, env = "QUICNPROTOCHAT_STATE_PASSWORD")]
|
||||
state_password: Option<String>,
|
||||
|
||||
/// Use post-quantum MLS backend (X25519 + ML-KEM-768) for new state. M7.
|
||||
#[arg(long, global = true, env = "QUICNPROTOCHAT_PQ")]
|
||||
pq: bool,
|
||||
|
||||
#[command(subcommand)]
|
||||
command: Command,
|
||||
}
|
||||
|
||||
#[derive(Debug, Subcommand)]
|
||||
enum Command {
|
||||
/// Register a new user via OPAQUE (password never leaves the client).
|
||||
RegisterUser {
|
||||
#[arg(long, default_value = "127.0.0.1:7000", env = "QUICNPROTOCHAT_SERVER")]
|
||||
server: String,
|
||||
/// Username for the new account.
|
||||
#[arg(long)]
|
||||
username: String,
|
||||
/// Password (will be used in OPAQUE PAKE; server never sees it).
|
||||
#[arg(long)]
|
||||
password: String,
|
||||
},
|
||||
|
||||
/// Log in via OPAQUE and receive a session token.
|
||||
Login {
|
||||
#[arg(long, default_value = "127.0.0.1:7000", env = "QUICNPROTOCHAT_SERVER")]
|
||||
server: String,
|
||||
#[arg(long)]
|
||||
username: String,
|
||||
#[arg(long)]
|
||||
password: String,
|
||||
/// Hex-encoded Ed25519 identity key (64 hex chars). Optional if --state is provided.
|
||||
#[arg(long)]
|
||||
identity_key: Option<String>,
|
||||
/// State file to derive the identity key (requires same password if encrypted).
|
||||
#[arg(long)]
|
||||
state: Option<PathBuf>,
|
||||
/// Password for the encrypted state file (if any).
|
||||
#[arg(long)]
|
||||
state_password: Option<String>,
|
||||
},
|
||||
|
||||
/// Show local identity key, fingerprint, group status, and hybrid key status.
|
||||
Whoami {
|
||||
/// State file path (identity + MLS state).
|
||||
#[arg(
|
||||
long,
|
||||
default_value = "quicnprotochat-state.bin",
|
||||
env = "QUICNPROTOCHAT_STATE"
|
||||
)]
|
||||
state: PathBuf,
|
||||
},
|
||||
|
||||
/// Check server connectivity and print status.
|
||||
Health {
|
||||
/// Server address (host:port).
|
||||
#[arg(long, default_value = "127.0.0.1:7000", env = "QUICNPROTOCHAT_SERVER")]
|
||||
server: String,
|
||||
},
|
||||
|
||||
/// Check if a peer has registered a hybrid key (non-consuming lookup).
|
||||
CheckKey {
|
||||
/// Server address (host:port).
|
||||
#[arg(long, default_value = "127.0.0.1:7000", env = "QUICNPROTOCHAT_SERVER")]
|
||||
server: String,
|
||||
|
||||
/// Peer's Ed25519 identity public key (64 hex chars = 32 bytes).
|
||||
identity_key: String,
|
||||
},
|
||||
|
||||
/// Send a Ping to the server and print the round-trip time.
|
||||
Ping {
|
||||
/// Server address (host:port).
|
||||
#[arg(long, default_value = "127.0.0.1:7000", env = "QUICNPROTOCHAT_SERVER")]
|
||||
server: String,
|
||||
},
|
||||
|
||||
/// Generate a fresh MLS KeyPackage and upload it to the Authentication Service.
|
||||
Register {
|
||||
/// Server address (host:port).
|
||||
#[arg(long, default_value = "127.0.0.1:7000", env = "QUICNPROTOCHAT_SERVER")]
|
||||
server: String,
|
||||
},
|
||||
|
||||
/// Fetch a peer's KeyPackage from the Authentication Service.
|
||||
FetchKey {
|
||||
/// Server address (host:port).
|
||||
#[arg(long, default_value = "127.0.0.1:7000", env = "QUICNPROTOCHAT_SERVER")]
|
||||
server: String,
|
||||
|
||||
/// Target peer's Ed25519 identity public key (64 hex chars = 32 bytes).
|
||||
identity_key: String,
|
||||
},
|
||||
|
||||
/// Run a two-party MLS demo (creator + joiner) against live AS and DS.
|
||||
DemoGroup {
|
||||
/// Server address (host:port).
|
||||
#[arg(long, default_value = "127.0.0.1:7000", env = "QUICNPROTOCHAT_SERVER")]
|
||||
server: String,
|
||||
},
|
||||
|
||||
/// Upload the persistent identity's KeyPackage to the AS (uses state file).
|
||||
RegisterState {
|
||||
/// State file path (identity + MLS state).
|
||||
#[arg(
|
||||
long,
|
||||
default_value = "quicnprotochat-state.bin",
|
||||
env = "QUICNPROTOCHAT_STATE"
|
||||
)]
|
||||
state: PathBuf,
|
||||
|
||||
/// Authentication Service address (host:port).
|
||||
#[arg(long, default_value = "127.0.0.1:7000", env = "QUICNPROTOCHAT_SERVER")]
|
||||
server: String,
|
||||
},
|
||||
|
||||
/// Refresh the KeyPackage on the server (existing state only).
|
||||
/// Run periodically (e.g. before server TTL ~24h) or after your KeyPackage was consumed so others can invite you.
|
||||
RefreshKeypackage {
|
||||
/// State file path (identity + MLS state).
|
||||
#[arg(
|
||||
long,
|
||||
default_value = "quicnprotochat-state.bin",
|
||||
env = "QUICNPROTOCHAT_STATE"
|
||||
)]
|
||||
state: PathBuf,
|
||||
|
||||
/// Server address (host:port).
|
||||
#[arg(long, default_value = "127.0.0.1:7000", env = "QUICNPROTOCHAT_SERVER")]
|
||||
server: String,
|
||||
},
|
||||
|
||||
/// Create a persistent group and save state to disk.
|
||||
CreateGroup {
|
||||
/// State file path (identity + MLS state).
|
||||
#[arg(
|
||||
long,
|
||||
default_value = "quicnprotochat-state.bin",
|
||||
env = "QUICNPROTOCHAT_STATE"
|
||||
)]
|
||||
state: PathBuf,
|
||||
|
||||
/// Server address (host:port).
|
||||
#[arg(long, default_value = "127.0.0.1:7000", env = "QUICNPROTOCHAT_SERVER")]
|
||||
server: String,
|
||||
|
||||
/// Group identifier (arbitrary bytes, typically a human-readable name).
|
||||
#[arg(long)]
|
||||
group_id: String,
|
||||
},
|
||||
|
||||
/// Invite a peer into the group and deliver a Welcome via DS.
|
||||
Invite {
|
||||
#[arg(
|
||||
long,
|
||||
default_value = "quicnprotochat-state.bin",
|
||||
env = "QUICNPROTOCHAT_STATE"
|
||||
)]
|
||||
state: PathBuf,
|
||||
#[arg(long, default_value = "127.0.0.1:7000", env = "QUICNPROTOCHAT_SERVER")]
|
||||
server: String,
|
||||
/// Peer identity public key (64 hex chars = 32 bytes).
|
||||
#[arg(long)]
|
||||
peer_key: String,
|
||||
},
|
||||
|
||||
/// Join a group by fetching the Welcome from the DS.
|
||||
Join {
|
||||
#[arg(
|
||||
long,
|
||||
default_value = "quicnprotochat-state.bin",
|
||||
env = "QUICNPROTOCHAT_STATE"
|
||||
)]
|
||||
state: PathBuf,
|
||||
#[arg(long, default_value = "127.0.0.1:7000", env = "QUICNPROTOCHAT_SERVER")]
|
||||
server: String,
|
||||
},
|
||||
|
||||
/// Send an application message via the DS.
|
||||
Send {
|
||||
#[arg(
|
||||
long,
|
||||
default_value = "quicnprotochat-state.bin",
|
||||
env = "QUICNPROTOCHAT_STATE"
|
||||
)]
|
||||
state: PathBuf,
|
||||
#[arg(long, default_value = "127.0.0.1:7000", env = "QUICNPROTOCHAT_SERVER")]
|
||||
server: String,
|
||||
/// Recipient identity key (hex, 32 bytes -> 64 chars). Omit when using --all.
|
||||
#[arg(long)]
|
||||
peer_key: Option<String>,
|
||||
/// Send to all other group members (N-way groups).
|
||||
#[arg(long)]
|
||||
all: bool,
|
||||
/// Plaintext message to send.
|
||||
#[arg(long)]
|
||||
msg: String,
|
||||
},
|
||||
|
||||
/// Receive and decrypt all pending messages from the DS.
|
||||
Recv {
|
||||
#[arg(
|
||||
long,
|
||||
default_value = "quicnprotochat-state.bin",
|
||||
env = "QUICNPROTOCHAT_STATE"
|
||||
)]
|
||||
state: PathBuf,
|
||||
#[arg(long, default_value = "127.0.0.1:7000", env = "QUICNPROTOCHAT_SERVER")]
|
||||
server: String,
|
||||
|
||||
/// Wait for up to this many milliseconds if no messages are queued.
|
||||
#[arg(long, default_value_t = 0)]
|
||||
wait_ms: u64,
|
||||
|
||||
/// Continuously long-poll for messages.
|
||||
#[arg(long)]
|
||||
stream: bool,
|
||||
},
|
||||
|
||||
/// Interactive 1:1 chat: type to send, incoming messages printed as [peer] <msg>. Ctrl+D to exit.
|
||||
/// In a two-person group, peer is chosen automatically; use --peer-key only with 3+ members.
|
||||
Chat {
|
||||
#[arg(
|
||||
long,
|
||||
default_value = "quicnprotochat-state.bin",
|
||||
env = "QUICNPROTOCHAT_STATE"
|
||||
)]
|
||||
state: PathBuf,
|
||||
#[arg(long, default_value = "127.0.0.1:7000", env = "QUICNPROTOCHAT_SERVER")]
|
||||
server: String,
|
||||
/// Peer identity key (hex, 64 chars). Omit in a two-person group to use the only other member.
|
||||
#[arg(long)]
|
||||
peer_key: Option<String>,
|
||||
/// How often to poll for incoming messages (milliseconds).
|
||||
#[arg(long, default_value_t = 500)]
|
||||
poll_interval_ms: u64,
|
||||
},
|
||||
|
||||
/// Generate shell completions for the given shell and print to stdout.
|
||||
#[command(hide = true)]
|
||||
Completions {
|
||||
shell: clap_complete::Shell,
|
||||
},
|
||||
|
||||
}
|
||||
|
||||
// ── Entry point ───────────────────────────────────────────────────────────────
|
||||
|
||||
#[tokio::main]
|
||||
async fn main() -> anyhow::Result<()> {
|
||||
tracing_subscriber::fmt()
|
||||
.with_env_filter(
|
||||
tracing_subscriber::EnvFilter::try_from_default_env()
|
||||
.unwrap_or_else(|_| tracing_subscriber::EnvFilter::new("warn")),
|
||||
)
|
||||
.init();
|
||||
|
||||
let args = Args::parse();
|
||||
|
||||
// Initialize auth context once for all RPCs (empty token OK for register-user/login).
|
||||
let auth_ctx = ClientAuth::from_parts(args.access_token.clone(), args.device_id.clone());
|
||||
init_auth(auth_ctx);
|
||||
|
||||
let state_pw = args.state_password.as_deref();
|
||||
|
||||
match args.command {
|
||||
Command::RegisterUser {
|
||||
server,
|
||||
username,
|
||||
password,
|
||||
} => {
|
||||
let local = tokio::task::LocalSet::new();
|
||||
local
|
||||
.run_until(cmd_register_user(
|
||||
&server,
|
||||
&args.ca_cert,
|
||||
&args.server_name,
|
||||
&username,
|
||||
&password,
|
||||
None,
|
||||
))
|
||||
.await
|
||||
}
|
||||
Command::Login {
|
||||
server,
|
||||
username,
|
||||
password,
|
||||
identity_key,
|
||||
state,
|
||||
state_password,
|
||||
} => {
|
||||
let local = tokio::task::LocalSet::new();
|
||||
local
|
||||
.run_until(cmd_login(
|
||||
&server,
|
||||
&args.ca_cert,
|
||||
&args.server_name,
|
||||
&username,
|
||||
&password,
|
||||
identity_key.as_deref(),
|
||||
state.as_deref(),
|
||||
state_password.as_deref(),
|
||||
))
|
||||
.await
|
||||
}
|
||||
Command::Whoami { state } => cmd_whoami(&state, state_pw),
|
||||
Command::Health { server } => {
|
||||
let local = tokio::task::LocalSet::new();
|
||||
local
|
||||
.run_until(cmd_health(&server, &args.ca_cert, &args.server_name))
|
||||
.await
|
||||
}
|
||||
Command::CheckKey {
|
||||
server,
|
||||
identity_key,
|
||||
} => {
|
||||
let local = tokio::task::LocalSet::new();
|
||||
local
|
||||
.run_until(cmd_check_key(
|
||||
&server,
|
||||
&args.ca_cert,
|
||||
&args.server_name,
|
||||
&identity_key,
|
||||
))
|
||||
.await
|
||||
}
|
||||
Command::Ping { server } => cmd_ping(&server, &args.ca_cert, &args.server_name).await,
|
||||
Command::Register { server } => {
|
||||
let local = tokio::task::LocalSet::new();
|
||||
local
|
||||
.run_until(cmd_register(&server, &args.ca_cert, &args.server_name))
|
||||
.await
|
||||
}
|
||||
Command::FetchKey {
|
||||
server,
|
||||
identity_key,
|
||||
} => {
|
||||
let local = tokio::task::LocalSet::new();
|
||||
local
|
||||
.run_until(cmd_fetch_key(
|
||||
&server,
|
||||
&args.ca_cert,
|
||||
&args.server_name,
|
||||
&identity_key,
|
||||
))
|
||||
.await
|
||||
}
|
||||
Command::DemoGroup { server } => {
|
||||
let local = tokio::task::LocalSet::new();
|
||||
local
|
||||
.run_until(cmd_demo_group(&server, &args.ca_cert, &args.server_name, args.pq))
|
||||
.await
|
||||
}
|
||||
Command::RegisterState { state, server } => {
|
||||
let local = tokio::task::LocalSet::new();
|
||||
local
|
||||
.run_until(cmd_register_state(
|
||||
&state,
|
||||
&server,
|
||||
&args.ca_cert,
|
||||
&args.server_name,
|
||||
state_pw,
|
||||
args.pq,
|
||||
))
|
||||
.await
|
||||
}
|
||||
Command::RefreshKeypackage { state, server } => {
|
||||
let local = tokio::task::LocalSet::new();
|
||||
local
|
||||
.run_until(cmd_refresh_keypackage(
|
||||
&state,
|
||||
&server,
|
||||
&args.ca_cert,
|
||||
&args.server_name,
|
||||
state_pw,
|
||||
))
|
||||
.await
|
||||
}
|
||||
Command::CreateGroup {
|
||||
state,
|
||||
server,
|
||||
group_id,
|
||||
} => {
|
||||
let local = tokio::task::LocalSet::new();
|
||||
local
|
||||
.run_until(cmd_create_group(&state, &server, &group_id, state_pw, args.pq))
|
||||
.await
|
||||
}
|
||||
Command::Invite {
|
||||
state,
|
||||
server,
|
||||
peer_key,
|
||||
} => {
|
||||
let local = tokio::task::LocalSet::new();
|
||||
local
|
||||
.run_until(cmd_invite(
|
||||
&state,
|
||||
&server,
|
||||
&args.ca_cert,
|
||||
&args.server_name,
|
||||
&peer_key,
|
||||
state_pw,
|
||||
))
|
||||
.await
|
||||
}
|
||||
Command::Join { state, server } => {
|
||||
let local = tokio::task::LocalSet::new();
|
||||
local
|
||||
.run_until(cmd_join(
|
||||
&state,
|
||||
&server,
|
||||
&args.ca_cert,
|
||||
&args.server_name,
|
||||
state_pw,
|
||||
))
|
||||
.await
|
||||
}
|
||||
Command::Send {
|
||||
state,
|
||||
server,
|
||||
peer_key,
|
||||
all,
|
||||
msg,
|
||||
} => {
|
||||
let local = tokio::task::LocalSet::new();
|
||||
local
|
||||
.run_until(cmd_send(
|
||||
&state,
|
||||
&server,
|
||||
&args.ca_cert,
|
||||
&args.server_name,
|
||||
peer_key.as_deref(),
|
||||
all,
|
||||
&msg,
|
||||
state_pw,
|
||||
))
|
||||
.await
|
||||
}
|
||||
Command::Recv {
|
||||
state,
|
||||
server,
|
||||
wait_ms,
|
||||
stream,
|
||||
} => {
|
||||
let local = tokio::task::LocalSet::new();
|
||||
local
|
||||
.run_until(cmd_recv(
|
||||
&state,
|
||||
&server,
|
||||
&args.ca_cert,
|
||||
&args.server_name,
|
||||
wait_ms,
|
||||
stream,
|
||||
state_pw,
|
||||
))
|
||||
.await
|
||||
}
|
||||
Command::Chat {
|
||||
state,
|
||||
server,
|
||||
peer_key,
|
||||
poll_interval_ms,
|
||||
} => {
|
||||
let local = tokio::task::LocalSet::new();
|
||||
local
|
||||
.run_until(cmd_chat(
|
||||
&state,
|
||||
&server,
|
||||
&args.ca_cert,
|
||||
&args.server_name,
|
||||
peer_key.as_deref(),
|
||||
state_pw,
|
||||
poll_interval_ms,
|
||||
))
|
||||
.await
|
||||
}
|
||||
Command::Completions { shell } => {
|
||||
use clap::CommandFactory;
|
||||
clap_complete::generate(
|
||||
shell,
|
||||
&mut Args::command(),
|
||||
"quicnprotochat",
|
||||
&mut std::io::stdout(),
|
||||
);
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,593 +0,0 @@
|
||||
// cargo_bin! only works for current package's binary; we spawn quicnprotochat-server from another package.
|
||||
#![allow(deprecated)]
|
||||
|
||||
use std::{path::PathBuf, process::Command, time::Duration};
|
||||
|
||||
use assert_cmd::cargo::cargo_bin;
|
||||
use portpicker::pick_unused_port;
|
||||
use rand::RngCore;
|
||||
use tempfile::TempDir;
|
||||
use tokio::time::sleep;
|
||||
use hex;
|
||||
|
||||
// Required by rustls 0.23 when QUIC/TLS is used from this process (e.g. client in test).
|
||||
fn ensure_rustls_provider() {
|
||||
let _ = rustls::crypto::ring::default_provider().install_default();
|
||||
}
|
||||
|
||||
use quicnprotochat_client::{
|
||||
cmd_create_group, cmd_invite, cmd_join, cmd_login, cmd_ping, cmd_register_state,
|
||||
cmd_register_user, cmd_send, connect_node, enqueue, fetch_wait, init_auth,
|
||||
load_existing_state, receive_pending_plaintexts, ClientAuth,
|
||||
};
|
||||
use quicnprotochat_core::IdentityKeypair;
|
||||
|
||||
fn hex_encode(bytes: &[u8]) -> String {
|
||||
bytes.iter().map(|b| format!("{b:02x}")).collect()
|
||||
}
|
||||
|
||||
|
||||
async fn wait_for_health(server: &str, ca_cert: &PathBuf, server_name: &str) -> anyhow::Result<()> {
|
||||
let local = tokio::task::LocalSet::new();
|
||||
for _ in 0..30 {
|
||||
if local
|
||||
.run_until(cmd_ping(server, ca_cert, server_name))
|
||||
.await
|
||||
.is_ok()
|
||||
{
|
||||
return Ok(());
|
||||
}
|
||||
sleep(Duration::from_millis(200)).await;
|
||||
}
|
||||
anyhow::bail!("server health never became ready")
|
||||
}
|
||||
|
||||
/// Creator and joiner register; creator creates group and invites joiner; joiner joins;
|
||||
/// creator sends a message; assert joiner's mailbox receives it.
|
||||
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
|
||||
async fn e2e_happy_path_register_invite_join_send_recv() -> anyhow::Result<()> {
|
||||
ensure_rustls_provider();
|
||||
|
||||
let temp = TempDir::new()?;
|
||||
let base = temp.path();
|
||||
let port = pick_unused_port().expect("free port");
|
||||
let listen = format!("127.0.0.1:{port}");
|
||||
let server = listen.clone();
|
||||
let ca_cert = base.join("server-cert.der");
|
||||
let tls_key = base.join("server-key.der");
|
||||
let data_dir = base.join("data");
|
||||
let auth_token = "devtoken";
|
||||
|
||||
// Spawn server binary.
|
||||
let server_bin = cargo_bin("quicnprotochat-server");
|
||||
let child = Command::new(server_bin)
|
||||
.arg("--listen")
|
||||
.arg(&listen)
|
||||
.arg("--data-dir")
|
||||
.arg(&data_dir)
|
||||
.arg("--tls-cert")
|
||||
.arg(&ca_cert)
|
||||
.arg("--tls-key")
|
||||
.arg(&tls_key)
|
||||
.arg("--auth-token")
|
||||
.arg(auth_token)
|
||||
.arg("--allow-insecure-auth")
|
||||
.spawn()
|
||||
.expect("spawn server");
|
||||
|
||||
// Ensure we always terminate the child.
|
||||
struct ChildGuard(std::process::Child);
|
||||
impl Drop for ChildGuard {
|
||||
fn drop(&mut self) {
|
||||
let _ = self.0.kill();
|
||||
}
|
||||
}
|
||||
let child_guard = ChildGuard(child);
|
||||
let _ = child_guard;
|
||||
|
||||
// Wait for server to be healthy and certs to be generated.
|
||||
wait_for_health(&server, &ca_cert, "localhost").await?;
|
||||
|
||||
// Set client auth context.
|
||||
init_auth(ClientAuth::from_parts(auth_token.to_string(), None));
|
||||
|
||||
let local = tokio::task::LocalSet::new();
|
||||
|
||||
let creator_state = base.join("creator.bin");
|
||||
let joiner_state = base.join("joiner.bin");
|
||||
|
||||
local
|
||||
.run_until(cmd_register_state(
|
||||
&creator_state,
|
||||
&server,
|
||||
&ca_cert,
|
||||
"localhost",
|
||||
None,
|
||||
false,
|
||||
))
|
||||
.await?;
|
||||
|
||||
local
|
||||
.run_until(cmd_register_state(
|
||||
&joiner_state,
|
||||
&server,
|
||||
&ca_cert,
|
||||
"localhost",
|
||||
None,
|
||||
false,
|
||||
))
|
||||
.await?;
|
||||
|
||||
local
|
||||
.run_until(cmd_create_group(&creator_state, &server, "test-group", None, false))
|
||||
.await?;
|
||||
|
||||
let joiner_state_loaded = load_existing_state(&joiner_state, None)?;
|
||||
let joiner_identity = IdentityKeypair::from_seed(joiner_state_loaded.identity_seed);
|
||||
let joiner_pk_hex = hex_encode(&joiner_identity.public_key_bytes());
|
||||
|
||||
local
|
||||
.run_until(cmd_invite(
|
||||
&creator_state,
|
||||
&server,
|
||||
&ca_cert,
|
||||
"localhost",
|
||||
&joiner_pk_hex,
|
||||
None,
|
||||
))
|
||||
.await?;
|
||||
|
||||
local
|
||||
.run_until(cmd_join(&joiner_state, &server, &ca_cert, "localhost", None))
|
||||
.await?;
|
||||
|
||||
local
|
||||
.run_until(cmd_send(
|
||||
&creator_state,
|
||||
&server,
|
||||
&ca_cert,
|
||||
"localhost",
|
||||
Some(&joiner_pk_hex),
|
||||
false,
|
||||
"hello",
|
||||
None,
|
||||
))
|
||||
.await?;
|
||||
|
||||
local
|
||||
.run_until(async {
|
||||
let client = connect_node(&server, &ca_cert, "localhost").await?;
|
||||
let payloads = fetch_wait(&client, &joiner_identity.public_key_bytes(), 1000).await?;
|
||||
anyhow::ensure!(!payloads.is_empty(), "no payloads delivered to joiner");
|
||||
Ok::<(), anyhow::Error>(())
|
||||
})
|
||||
.await?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Three-party group: A creates group, invites B then C; B and C join; A sends, B and C receive;
|
||||
/// B sends, A and C receive.
|
||||
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
|
||||
async fn e2e_three_party_group_invite_join_send_recv() -> anyhow::Result<()> {
|
||||
ensure_rustls_provider();
|
||||
|
||||
let temp = TempDir::new()?;
|
||||
let base = temp.path();
|
||||
let port = pick_unused_port().expect("free port");
|
||||
let listen = format!("127.0.0.1:{port}");
|
||||
let server = listen.clone();
|
||||
let ca_cert = base.join("server-cert.der");
|
||||
let tls_key = base.join("server-key.der");
|
||||
let data_dir = base.join("data");
|
||||
let auth_token = "devtoken";
|
||||
|
||||
let server_bin = cargo_bin("quicnprotochat-server");
|
||||
let child = Command::new(server_bin)
|
||||
.arg("--listen")
|
||||
.arg(&listen)
|
||||
.arg("--data-dir")
|
||||
.arg(&data_dir)
|
||||
.arg("--tls-cert")
|
||||
.arg(&ca_cert)
|
||||
.arg("--tls-key")
|
||||
.arg(&tls_key)
|
||||
.arg("--auth-token")
|
||||
.arg(auth_token)
|
||||
.arg("--allow-insecure-auth")
|
||||
.spawn()
|
||||
.expect("spawn server");
|
||||
|
||||
struct ChildGuard(std::process::Child);
|
||||
impl Drop for ChildGuard {
|
||||
fn drop(&mut self) {
|
||||
let _ = self.0.kill();
|
||||
}
|
||||
}
|
||||
let _child_guard = ChildGuard(child);
|
||||
|
||||
wait_for_health(&server, &ca_cert, "localhost").await?;
|
||||
init_auth(ClientAuth::from_parts(auth_token.to_string(), None));
|
||||
|
||||
let local = tokio::task::LocalSet::new();
|
||||
|
||||
let creator_state = base.join("creator.bin");
|
||||
let b_state = base.join("b.bin");
|
||||
let c_state = base.join("c.bin");
|
||||
|
||||
local
|
||||
.run_until(cmd_register_state(
|
||||
&creator_state,
|
||||
&server,
|
||||
&ca_cert,
|
||||
"localhost",
|
||||
None,
|
||||
false,
|
||||
))
|
||||
.await?;
|
||||
local
|
||||
.run_until(cmd_register_state(
|
||||
&b_state,
|
||||
&server,
|
||||
&ca_cert,
|
||||
"localhost",
|
||||
None,
|
||||
false,
|
||||
))
|
||||
.await?;
|
||||
local
|
||||
.run_until(cmd_register_state(
|
||||
&c_state,
|
||||
&server,
|
||||
&ca_cert,
|
||||
"localhost",
|
||||
None,
|
||||
false,
|
||||
))
|
||||
.await?;
|
||||
|
||||
let b_loaded = load_existing_state(&b_state, None)?;
|
||||
let b_pk_hex = hex_encode(&IdentityKeypair::from_seed(b_loaded.identity_seed).public_key_bytes());
|
||||
|
||||
let c_loaded = load_existing_state(&c_state, None)?;
|
||||
let c_pk_hex = hex_encode(&IdentityKeypair::from_seed(c_loaded.identity_seed).public_key_bytes());
|
||||
|
||||
local
|
||||
.run_until(cmd_create_group(&creator_state, &server, "test-group", None, false))
|
||||
.await?;
|
||||
|
||||
local
|
||||
.run_until(cmd_invite(
|
||||
&creator_state,
|
||||
&server,
|
||||
&ca_cert,
|
||||
"localhost",
|
||||
&b_pk_hex,
|
||||
None,
|
||||
))
|
||||
.await?;
|
||||
|
||||
local
|
||||
.run_until(cmd_invite(
|
||||
&creator_state,
|
||||
&server,
|
||||
&ca_cert,
|
||||
"localhost",
|
||||
&c_pk_hex,
|
||||
None,
|
||||
))
|
||||
.await?;
|
||||
|
||||
local
|
||||
.run_until(cmd_join(&b_state, &server, &ca_cert, "localhost", None))
|
||||
.await?;
|
||||
local
|
||||
.run_until(cmd_join(&c_state, &server, &ca_cert, "localhost", None))
|
||||
.await?;
|
||||
|
||||
local
|
||||
.run_until(cmd_send(
|
||||
&creator_state,
|
||||
&server,
|
||||
&ca_cert,
|
||||
"localhost",
|
||||
None,
|
||||
true,
|
||||
"hello",
|
||||
None,
|
||||
))
|
||||
.await?;
|
||||
|
||||
sleep(Duration::from_millis(150)).await;
|
||||
|
||||
let b_plaintexts = local
|
||||
.run_until(receive_pending_plaintexts(
|
||||
&b_state,
|
||||
&server,
|
||||
&ca_cert,
|
||||
"localhost",
|
||||
1500,
|
||||
None,
|
||||
))
|
||||
.await?;
|
||||
let c_plaintexts = local
|
||||
.run_until(receive_pending_plaintexts(
|
||||
&c_state,
|
||||
&server,
|
||||
&ca_cert,
|
||||
"localhost",
|
||||
1500,
|
||||
None,
|
||||
))
|
||||
.await?;
|
||||
anyhow::ensure!(
|
||||
b_plaintexts.iter().any(|p| p.as_slice() == b"hello"),
|
||||
"B did not receive 'hello', got {:?}",
|
||||
b_plaintexts
|
||||
);
|
||||
anyhow::ensure!(
|
||||
c_plaintexts.iter().any(|p| p.as_slice() == b"hello"),
|
||||
"C did not receive 'hello', got {:?}",
|
||||
c_plaintexts
|
||||
);
|
||||
|
||||
local
|
||||
.run_until(cmd_send(
|
||||
&b_state,
|
||||
&server,
|
||||
&ca_cert,
|
||||
"localhost",
|
||||
None,
|
||||
true,
|
||||
"hi",
|
||||
None,
|
||||
))
|
||||
.await?;
|
||||
|
||||
sleep(Duration::from_millis(200)).await;
|
||||
|
||||
let a_plaintexts = local
|
||||
.run_until(receive_pending_plaintexts(
|
||||
&creator_state,
|
||||
&server,
|
||||
&ca_cert,
|
||||
"localhost",
|
||||
1500,
|
||||
None,
|
||||
))
|
||||
.await?;
|
||||
let c_plaintexts2 = local
|
||||
.run_until(receive_pending_plaintexts(
|
||||
&c_state,
|
||||
&server,
|
||||
&ca_cert,
|
||||
"localhost",
|
||||
1500,
|
||||
None,
|
||||
))
|
||||
.await?;
|
||||
anyhow::ensure!(
|
||||
a_plaintexts.iter().any(|p| p.as_slice() == b"hi"),
|
||||
"A did not receive 'hi', got {:?}",
|
||||
a_plaintexts
|
||||
);
|
||||
anyhow::ensure!(
|
||||
c_plaintexts2.iter().any(|p| p.as_slice() == b"hi"),
|
||||
"C did not receive 'hi', got {:?}",
|
||||
c_plaintexts2
|
||||
);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Login should refuse if the presented identity key does not match the registered key.
|
||||
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
|
||||
async fn e2e_login_rejects_mismatched_identity() -> anyhow::Result<()> {
|
||||
ensure_rustls_provider();
|
||||
|
||||
let temp = TempDir::new()?;
|
||||
let base = temp.path();
|
||||
let port = pick_unused_port().expect("free port");
|
||||
let listen = format!("127.0.0.1:{port}");
|
||||
let server = listen.clone();
|
||||
let ca_cert = base.join("server-cert.der");
|
||||
let tls_key = base.join("server-key.der");
|
||||
let data_dir = base.join("data");
|
||||
let auth_token = "devtoken";
|
||||
|
||||
// Spawn server binary.
|
||||
let server_bin = cargo_bin("quicnprotochat-server");
|
||||
let child = Command::new(server_bin)
|
||||
.arg("--listen")
|
||||
.arg(&listen)
|
||||
.arg("--data-dir")
|
||||
.arg(&data_dir)
|
||||
.arg("--tls-cert")
|
||||
.arg(&ca_cert)
|
||||
.arg("--tls-key")
|
||||
.arg(&tls_key)
|
||||
.arg("--auth-token")
|
||||
.arg(auth_token)
|
||||
.arg("--allow-insecure-auth")
|
||||
.spawn()
|
||||
.expect("spawn server");
|
||||
|
||||
struct ChildGuard(std::process::Child);
|
||||
impl Drop for ChildGuard {
|
||||
fn drop(&mut self) {
|
||||
let _ = self.0.kill();
|
||||
}
|
||||
}
|
||||
let child_guard = ChildGuard(child);
|
||||
let _ = child_guard;
|
||||
|
||||
wait_for_health(&server, &ca_cert, "localhost").await?;
|
||||
|
||||
init_auth(ClientAuth::from_parts(auth_token.to_string(), None));
|
||||
|
||||
let local = tokio::task::LocalSet::new();
|
||||
let state_path = base.join("user.bin");
|
||||
|
||||
// Register and persist state (includes identity key binding).
|
||||
local
|
||||
.run_until(cmd_register_state(
|
||||
&state_path,
|
||||
&server,
|
||||
&ca_cert,
|
||||
"localhost",
|
||||
None,
|
||||
false,
|
||||
))
|
||||
.await?;
|
||||
|
||||
// Register the user with the bound identity so login can enforce mismatches.
|
||||
let stored_state = load_existing_state(&state_path, None)?;
|
||||
let identity_hex = hex::encode(
|
||||
IdentityKeypair::from_seed(stored_state.identity_seed).public_key_bytes(),
|
||||
);
|
||||
|
||||
local
|
||||
.run_until(cmd_register_user(
|
||||
&server,
|
||||
&ca_cert,
|
||||
"localhost",
|
||||
"user1",
|
||||
"pass",
|
||||
Some(&identity_hex),
|
||||
))
|
||||
.await?;
|
||||
|
||||
// Craft an unrelated identity key and attempt login with it.
|
||||
let mut bogus_identity = [0u8; 32];
|
||||
rand::thread_rng().fill_bytes(&mut bogus_identity);
|
||||
let bogus_hex = hex::encode(bogus_identity);
|
||||
|
||||
let result = local
|
||||
.run_until(cmd_login(
|
||||
&server,
|
||||
&ca_cert,
|
||||
"localhost",
|
||||
"user1",
|
||||
"pass",
|
||||
Some(&bogus_hex),
|
||||
None,
|
||||
None,
|
||||
))
|
||||
.await;
|
||||
|
||||
match result {
|
||||
Ok(_) => anyhow::bail!("login unexpectedly succeeded with mismatched identity"),
|
||||
Err(e) => {
|
||||
// Show the full error chain so we can match the server's E016 response.
|
||||
let msg = format!("{e:#}");
|
||||
anyhow::ensure!(
|
||||
msg.contains("identity") || msg.contains("E016"),
|
||||
"login failed but not for identity mismatch: {msg}"
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Sealed Sender: enqueue with valid token (no identity binding) succeeds; recipient can fetch.
|
||||
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
|
||||
async fn e2e_sealed_sender_enqueue_then_fetch() -> anyhow::Result<()> {
|
||||
ensure_rustls_provider();
|
||||
|
||||
let temp = TempDir::new()?;
|
||||
let base = temp.path();
|
||||
let port = pick_unused_port().expect("free port");
|
||||
let listen = format!("127.0.0.1:{port}");
|
||||
let server = listen.clone();
|
||||
let ca_cert = base.join("server-cert.der");
|
||||
let tls_key = base.join("server-key.der");
|
||||
let data_dir = base.join("data");
|
||||
let auth_token = "devtoken";
|
||||
|
||||
let server_bin = cargo_bin("quicnprotochat-server");
|
||||
let child = Command::new(server_bin)
|
||||
.arg("--listen")
|
||||
.arg(&listen)
|
||||
.arg("--data-dir")
|
||||
.arg(&data_dir)
|
||||
.arg("--tls-cert")
|
||||
.arg(&ca_cert)
|
||||
.arg("--tls-key")
|
||||
.arg(&tls_key)
|
||||
.arg("--auth-token")
|
||||
.arg(auth_token)
|
||||
.arg("--allow-insecure-auth")
|
||||
.arg("--sealed-sender")
|
||||
.spawn()
|
||||
.expect("spawn server");
|
||||
|
||||
struct ChildGuard(std::process::Child);
|
||||
impl Drop for ChildGuard {
|
||||
fn drop(&mut self) {
|
||||
let _ = self.0.kill();
|
||||
}
|
||||
}
|
||||
let _child_guard = ChildGuard(child);
|
||||
|
||||
wait_for_health(&server, &ca_cert, "localhost").await?;
|
||||
init_auth(ClientAuth::from_parts(auth_token.to_string(), None));
|
||||
|
||||
let local = tokio::task::LocalSet::new();
|
||||
let state_path = base.join("recipient.bin");
|
||||
|
||||
local
|
||||
.run_until(cmd_register_state(
|
||||
&state_path,
|
||||
&server,
|
||||
&ca_cert,
|
||||
"localhost",
|
||||
None,
|
||||
false,
|
||||
))
|
||||
.await?;
|
||||
|
||||
let stored = load_existing_state(&state_path, None)?;
|
||||
let recipient_key = IdentityKeypair::from_seed(stored.identity_seed).public_key_bytes();
|
||||
let identity_hex = hex_encode(&recipient_key);
|
||||
|
||||
local
|
||||
.run_until(cmd_register_user(
|
||||
&server,
|
||||
&ca_cert,
|
||||
"localhost",
|
||||
"recipient",
|
||||
"pass",
|
||||
Some(&identity_hex),
|
||||
))
|
||||
.await?;
|
||||
|
||||
local
|
||||
.run_until(cmd_login(
|
||||
&server,
|
||||
&ca_cert,
|
||||
"localhost",
|
||||
"recipient",
|
||||
"pass",
|
||||
Some(&identity_hex),
|
||||
None,
|
||||
None,
|
||||
))
|
||||
.await?;
|
||||
|
||||
let client = local.run_until(connect_node(&server, &ca_cert, "localhost")).await?;
|
||||
local
|
||||
.run_until(enqueue(&client, &recipient_key, b"sealed-payload"))
|
||||
.await?;
|
||||
|
||||
let payloads = local
|
||||
.run_until(fetch_wait(&client, &recipient_key, 500))
|
||||
.await?;
|
||||
anyhow::ensure!(
|
||||
payloads.len() == 1 && payloads[0].1.as_slice() == b"sealed-payload",
|
||||
"expected one payload 'sealed-payload', got {:?}",
|
||||
payloads
|
||||
);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
@@ -1,45 +0,0 @@
|
||||
[package]
|
||||
name = "quicnprotochat-core"
|
||||
version = "0.1.0"
|
||||
edition = "2021"
|
||||
description = "Crypto primitives, MLS state machine, and hybrid post-quantum KEM for quicnprotochat."
|
||||
license = "MIT"
|
||||
|
||||
[dependencies]
|
||||
# Crypto — classical
|
||||
x25519-dalek = { workspace = true }
|
||||
ed25519-dalek = { workspace = true }
|
||||
sha2 = { workspace = true }
|
||||
hkdf = { workspace = true }
|
||||
chacha20poly1305 = { workspace = true }
|
||||
zeroize = { workspace = true }
|
||||
rand = { workspace = true }
|
||||
|
||||
# Crypto — post-quantum hybrid KEM (M7)
|
||||
ml-kem = { workspace = true }
|
||||
|
||||
# Crypto — OPAQUE password-authenticated key exchange
|
||||
opaque-ke = { workspace = true }
|
||||
argon2 = { workspace = true }
|
||||
|
||||
# Crypto — MLS (M2)
|
||||
openmls = { workspace = true }
|
||||
openmls_rust_crypto = { workspace = true }
|
||||
openmls_traits = { workspace = true }
|
||||
tls_codec = { workspace = true }
|
||||
serde = { workspace = true }
|
||||
bincode = { workspace = true }
|
||||
serde_json = { workspace = true }
|
||||
|
||||
# Serialisation
|
||||
capnp = { workspace = true }
|
||||
quicnprotochat-proto = { path = "../quicnprotochat-proto" }
|
||||
|
||||
# Async runtime
|
||||
tokio = { workspace = true }
|
||||
|
||||
# Error handling
|
||||
thiserror = { workspace = true }
|
||||
|
||||
[dev-dependencies]
|
||||
tokio = { workspace = true }
|
||||
@@ -1,256 +0,0 @@
|
||||
//! Rich application-layer message format for MLS application payloads.
|
||||
//!
|
||||
//! The server sees only opaque ciphertext; structure lives in this client-defined
|
||||
//! plaintext schema. All messages use: version byte (1) + message_type byte + type-specific payload.
|
||||
//!
|
||||
//! # Message ID
|
||||
//!
|
||||
//! `message_id` is assigned by the sender (16 random bytes) and included in the
|
||||
//! serialized payload for Chat (and implied for Reply/Reaction/ReadReceipt via ref_msg_id).
|
||||
//! Recipients can store message_ids to reference them in replies or reactions.
|
||||
|
||||
use crate::error::CoreError;
|
||||
use rand::RngCore;
|
||||
|
||||
/// Current schema version.
|
||||
pub const VERSION: u8 = 1;
|
||||
|
||||
/// Message type discriminant (one byte).
|
||||
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
|
||||
#[repr(u8)]
|
||||
pub enum MessageType {
|
||||
Chat = 0x01,
|
||||
Reply = 0x02,
|
||||
Reaction = 0x03,
|
||||
ReadReceipt = 0x04,
|
||||
Typing = 0x05,
|
||||
}
|
||||
|
||||
impl MessageType {
|
||||
fn from_byte(b: u8) -> Option<Self> {
|
||||
match b {
|
||||
0x01 => Some(MessageType::Chat),
|
||||
0x02 => Some(MessageType::Reply),
|
||||
0x03 => Some(MessageType::Reaction),
|
||||
0x04 => Some(MessageType::ReadReceipt),
|
||||
0x05 => Some(MessageType::Typing),
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Parsed application message (one of the rich types).
|
||||
#[derive(Clone, Debug, PartialEq, Eq)]
|
||||
pub enum AppMessage {
|
||||
/// Plain chat: body (UTF-8). message_id is included so recipients can store and reference it.
|
||||
Chat {
|
||||
message_id: [u8; 16],
|
||||
body: Vec<u8>,
|
||||
},
|
||||
Reply {
|
||||
ref_msg_id: [u8; 16],
|
||||
body: Vec<u8>,
|
||||
},
|
||||
Reaction {
|
||||
ref_msg_id: [u8; 16],
|
||||
emoji: Vec<u8>,
|
||||
},
|
||||
ReadReceipt {
|
||||
msg_id: [u8; 16],
|
||||
},
|
||||
Typing {
|
||||
/// 0 = stopped, 1 = typing
|
||||
active: u8,
|
||||
},
|
||||
}
|
||||
|
||||
/// Generate a new 16-byte message ID (e.g. for Chat/Reply so recipients can reference it).
|
||||
pub fn generate_message_id() -> [u8; 16] {
|
||||
let mut id = [0u8; 16];
|
||||
rand::rngs::OsRng.fill_bytes(&mut id);
|
||||
id
|
||||
}
|
||||
|
||||
// ── Layout (minimal, no Cap'n Proto) ─────────────────────────────────────────
|
||||
//
|
||||
// All messages: [version: 1][type: 1][payload...]
|
||||
//
|
||||
// Chat: [msg_id: 16][body_len: 2 BE][body]
|
||||
// Reply: [ref_msg_id: 16][body_len: 2 BE][body]
|
||||
// Reaction: [ref_msg_id: 16][emoji_len: 1][emoji]
|
||||
// ReadReceipt: [msg_id: 16]
|
||||
// Typing: [active: 1] 0 = stopped, 1 = typing
|
||||
|
||||
/// Serialize a rich message into the application payload format.
|
||||
pub fn serialize(msg_type: MessageType, payload: &[u8]) -> Vec<u8> {
|
||||
let mut out = Vec::with_capacity(2 + payload.len());
|
||||
out.push(VERSION);
|
||||
out.push(msg_type as u8);
|
||||
out.extend_from_slice(payload);
|
||||
out
|
||||
}
|
||||
|
||||
/// Serialize a Chat message (generates message_id internally; pass None to generate, or Some(id) when replying with a known id).
|
||||
pub fn serialize_chat(body: &[u8], message_id: Option<[u8; 16]>) -> Vec<u8> {
|
||||
let id = message_id.unwrap_or_else(generate_message_id);
|
||||
let mut payload = Vec::with_capacity(16 + 2 + body.len());
|
||||
payload.extend_from_slice(&id);
|
||||
payload.extend_from_slice(&(body.len() as u16).to_be_bytes());
|
||||
payload.extend_from_slice(body);
|
||||
serialize(MessageType::Chat, &payload)
|
||||
}
|
||||
|
||||
/// Serialize a Reply message.
|
||||
pub fn serialize_reply(ref_msg_id: [u8; 16], body: &[u8]) -> Vec<u8> {
|
||||
let mut payload = Vec::with_capacity(16 + 2 + body.len());
|
||||
payload.extend_from_slice(&ref_msg_id);
|
||||
payload.extend_from_slice(&(body.len() as u16).to_be_bytes());
|
||||
payload.extend_from_slice(body);
|
||||
serialize(MessageType::Reply, &payload)
|
||||
}
|
||||
|
||||
/// Serialize a Reaction message.
|
||||
pub fn serialize_reaction(ref_msg_id: [u8; 16], emoji: &[u8]) -> Result<Vec<u8>, CoreError> {
|
||||
if emoji.len() > 255 {
|
||||
return Err(CoreError::AppMessage("emoji length > 255".into()));
|
||||
}
|
||||
let mut payload = Vec::with_capacity(16 + 1 + emoji.len());
|
||||
payload.extend_from_slice(&ref_msg_id);
|
||||
payload.push(emoji.len() as u8);
|
||||
payload.extend_from_slice(emoji);
|
||||
Ok(serialize(MessageType::Reaction, &payload))
|
||||
}
|
||||
|
||||
/// Serialize a ReadReceipt message.
|
||||
pub fn serialize_read_receipt(msg_id: [u8; 16]) -> Vec<u8> {
|
||||
serialize(MessageType::ReadReceipt, &msg_id)
|
||||
}
|
||||
|
||||
/// Serialize a Typing message (active: 0 = stopped, 1 = typing).
|
||||
pub fn serialize_typing(active: u8) -> Vec<u8> {
|
||||
let payload = [active];
|
||||
serialize(MessageType::Typing, &payload)
|
||||
}
|
||||
|
||||
/// Parse bytes into (MessageType, AppMessage). Fails if version/type unknown or payload too short.
|
||||
pub fn parse(bytes: &[u8]) -> Result<(MessageType, AppMessage), CoreError> {
|
||||
if bytes.len() < 2 {
|
||||
return Err(CoreError::AppMessage("payload too short (need version + type)".into()));
|
||||
}
|
||||
let version = bytes[0];
|
||||
if version != VERSION {
|
||||
return Err(CoreError::AppMessage(format!("unsupported version {version}").into()));
|
||||
}
|
||||
let msg_type = MessageType::from_byte(bytes[1])
|
||||
.ok_or_else(|| CoreError::AppMessage(format!("unknown message type {}", bytes[1]).into()))?;
|
||||
let payload = &bytes[2..];
|
||||
|
||||
let app = match msg_type {
|
||||
MessageType::Chat => parse_chat(payload)?,
|
||||
MessageType::Reply => parse_reply(payload)?,
|
||||
MessageType::Reaction => parse_reaction(payload)?,
|
||||
MessageType::ReadReceipt => parse_read_receipt(payload)?,
|
||||
MessageType::Typing => parse_typing(payload)?,
|
||||
};
|
||||
Ok((msg_type, app))
|
||||
}
|
||||
|
||||
fn parse_chat(payload: &[u8]) -> Result<AppMessage, CoreError> {
|
||||
if payload.len() < 16 + 2 {
|
||||
return Err(CoreError::AppMessage("Chat payload too short".into()));
|
||||
}
|
||||
let mut message_id = [0u8; 16];
|
||||
message_id.copy_from_slice(&payload[..16]);
|
||||
let body_len = u16::from_be_bytes([payload[16], payload[17]]) as usize;
|
||||
if payload.len() < 18 + body_len {
|
||||
return Err(CoreError::AppMessage("Chat body length exceeds payload".into()));
|
||||
}
|
||||
let body = payload[18..18 + body_len].to_vec();
|
||||
Ok(AppMessage::Chat { message_id, body })
|
||||
}
|
||||
|
||||
fn parse_reply(payload: &[u8]) -> Result<AppMessage, CoreError> {
|
||||
if payload.len() < 16 + 2 {
|
||||
return Err(CoreError::AppMessage("Reply payload too short".into()));
|
||||
}
|
||||
let mut ref_msg_id = [0u8; 16];
|
||||
ref_msg_id.copy_from_slice(&payload[..16]);
|
||||
let body_len = u16::from_be_bytes([payload[16], payload[17]]) as usize;
|
||||
if payload.len() < 18 + body_len {
|
||||
return Err(CoreError::AppMessage("Reply body length exceeds payload".into()));
|
||||
}
|
||||
let body = payload[18..18 + body_len].to_vec();
|
||||
Ok(AppMessage::Reply { ref_msg_id, body })
|
||||
}
|
||||
|
||||
fn parse_reaction(payload: &[u8]) -> Result<AppMessage, CoreError> {
|
||||
if payload.len() < 16 + 1 {
|
||||
return Err(CoreError::AppMessage("Reaction payload too short".into()));
|
||||
}
|
||||
let mut ref_msg_id = [0u8; 16];
|
||||
ref_msg_id.copy_from_slice(&payload[..16]);
|
||||
let emoji_len = payload[16] as usize;
|
||||
if payload.len() < 17 + emoji_len {
|
||||
return Err(CoreError::AppMessage("Reaction emoji length exceeds payload".into()));
|
||||
}
|
||||
let emoji = payload[17..17 + emoji_len].to_vec();
|
||||
Ok(AppMessage::Reaction { ref_msg_id, emoji })
|
||||
}
|
||||
|
||||
fn parse_read_receipt(payload: &[u8]) -> Result<AppMessage, CoreError> {
|
||||
if payload.len() < 16 {
|
||||
return Err(CoreError::AppMessage("ReadReceipt payload too short".into()));
|
||||
}
|
||||
let mut msg_id = [0u8; 16];
|
||||
msg_id.copy_from_slice(&payload[..16]);
|
||||
Ok(AppMessage::ReadReceipt { msg_id })
|
||||
}
|
||||
|
||||
fn parse_typing(payload: &[u8]) -> Result<AppMessage, CoreError> {
|
||||
if payload.is_empty() {
|
||||
return Err(CoreError::AppMessage("Typing payload empty".into()));
|
||||
}
|
||||
Ok(AppMessage::Typing { active: payload[0] })
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn roundtrip_chat() {
|
||||
let body = b"hello";
|
||||
let encoded = serialize_chat(body, None);
|
||||
let (t, msg) = parse(&encoded).expect("serialize_chat output is valid");
|
||||
assert_eq!(t, MessageType::Chat);
|
||||
assert!(matches!(&msg, AppMessage::Chat { .. }), "expected Chat, got {:?}", msg);
|
||||
if let AppMessage::Chat { body: b, .. } = &msg {
|
||||
assert_eq!(b.as_slice(), body);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn roundtrip_reply() {
|
||||
let ref_id = [1u8; 16];
|
||||
let body = b"reply text";
|
||||
let encoded = serialize_reply(ref_id, body);
|
||||
let (t, msg) = parse(&encoded).expect("serialize_reply output is valid");
|
||||
assert_eq!(t, MessageType::Reply);
|
||||
assert!(matches!(&msg, AppMessage::Reply { .. }), "expected Reply, got {:?}", msg);
|
||||
if let AppMessage::Reply { ref_msg_id, body: b } = &msg {
|
||||
assert_eq!(ref_msg_id, &ref_id);
|
||||
assert_eq!(b.as_slice(), body);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn roundtrip_typing() {
|
||||
let encoded = serialize_typing(1);
|
||||
let (t, msg) = parse(&encoded).expect("serialize_typing output is valid");
|
||||
assert_eq!(t, MessageType::Typing);
|
||||
assert!(matches!(&msg, AppMessage::Typing { .. }), "expected Typing, got {:?}", msg);
|
||||
if let AppMessage::Typing { active } = &msg {
|
||||
assert_eq!(*active, 1);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,597 +0,0 @@
|
||||
//! MLS group state machine.
|
||||
//!
|
||||
//! # Design
|
||||
//!
|
||||
//! [`GroupMember`] wraps an openmls [`MlsGroup`] plus a per-client crypto
|
||||
//! backend ([`StoreCrypto`] or [`HybridCryptoProvider`] for M7). The backend
|
||||
//! is **persistent** — it holds the key store that maps init-key references
|
||||
//! to HPKE private keys (classical or hybrid).
|
||||
//! openmls's `new_from_welcome` reads those private keys from the key store to
|
||||
//! decrypt the Welcome, so the same backend instance must be used from
|
||||
//! `generate_key_package` through `join_group`.
|
||||
//!
|
||||
//! # Wire format
|
||||
//!
|
||||
//! All MLS messages are serialised/deserialised using TLS presentation language
|
||||
//! encoding (`tls_codec`). The resulting byte vectors are what the transport
|
||||
//! layer (and the Delivery Service) sees.
|
||||
//!
|
||||
//! # MLS ciphersuite
|
||||
//!
|
||||
//! `MLS_128_DHKEMX25519_AES128GCM_SHA256_Ed25519` — same as M2.
|
||||
//!
|
||||
//! # Ratchet tree
|
||||
//!
|
||||
//! `use_ratchet_tree_extension = true` so that the ratchet tree is embedded
|
||||
//! in Welcome messages. `new_from_welcome` is called with `ratchet_tree = None`;
|
||||
//! openmls extracts the tree from the Welcome's `GroupInfo` extension.
|
||||
|
||||
use std::{path::Path, sync::Arc};
|
||||
|
||||
use openmls::prelude::{
|
||||
Ciphersuite, Credential, CredentialType, CredentialWithKey, CryptoConfig, GroupId, KeyPackage,
|
||||
KeyPackageIn, MlsGroup, MlsGroupConfig, MlsMessageInBody, MlsMessageOut,
|
||||
ProcessedMessageContent, ProtocolMessage, ProtocolVersion, TlsDeserializeTrait,
|
||||
TlsSerializeTrait,
|
||||
};
|
||||
use openmls_traits::OpenMlsCryptoProvider;
|
||||
|
||||
use crate::{
|
||||
error::CoreError,
|
||||
hybrid_crypto::HybridCryptoProvider,
|
||||
identity::IdentityKeypair,
|
||||
keystore::{DiskKeyStore, StoreCrypto},
|
||||
};
|
||||
|
||||
// ── Constants ─────────────────────────────────────────────────────────────────
|
||||
|
||||
const CIPHERSUITE: Ciphersuite = Ciphersuite::MLS_128_DHKEMX25519_AES128GCM_SHA256_Ed25519;
|
||||
|
||||
// ── GroupMember ───────────────────────────────────────────────────────────────
|
||||
|
||||
/// Per-client MLS state: identity keypair, crypto backend, and optional group.
|
||||
///
|
||||
/// Generic over the crypto provider `P`: [`StoreCrypto`] (default, classical)
|
||||
/// or [`HybridCryptoProvider`] (M7, post-quantum hybrid KEM).
|
||||
///
|
||||
/// # Lifecycle
|
||||
///
|
||||
/// ```text
|
||||
/// GroupMember::new(identity)
|
||||
/// ├─ generate_key_package() → upload to AS
|
||||
/// ├─ create_group(group_id) → become sole member
|
||||
/// │ └─ add_member(kp) → invite a peer; returns (commit, welcome)
|
||||
/// └─ join_group(welcome) → join after receiving a Welcome
|
||||
/// ├─ send_message(msg) → encrypt application data
|
||||
/// └─ receive_message(b) → decrypt; returns Some(plaintext) or None
|
||||
/// ```
|
||||
pub struct GroupMember<P: OpenMlsCryptoProvider = StoreCrypto> {
|
||||
/// Crypto backend (classical or hybrid). Holds the key store with HPKE
|
||||
/// private keys created during `generate_key_package`.
|
||||
backend: P,
|
||||
/// Long-term Ed25519 identity keypair. Also used as the MLS `Signer`.
|
||||
identity: Arc<IdentityKeypair>,
|
||||
/// Active MLS group, if any.
|
||||
group: Option<MlsGroup>,
|
||||
/// Shared group configuration (wire format, ratchet tree extension, etc.).
|
||||
config: MlsGroupConfig,
|
||||
}
|
||||
|
||||
impl GroupMember<StoreCrypto> {
|
||||
/// Create a new `GroupMember` with a fresh crypto backend (classical X25519).
|
||||
pub fn new(identity: Arc<IdentityKeypair>) -> Self {
|
||||
Self::new_with_state(identity, DiskKeyStore::ephemeral(), None)
|
||||
}
|
||||
|
||||
/// Create a `GroupMember` with a persistent keystore at `path`.
|
||||
pub fn new_persistent(
|
||||
identity: Arc<IdentityKeypair>,
|
||||
path: impl AsRef<Path>,
|
||||
) -> Result<Self, CoreError> {
|
||||
let key_store = DiskKeyStore::persistent(path)
|
||||
.map_err(|e| CoreError::Io(format!("keystore: {e}")))?;
|
||||
Ok(Self::new_with_state(identity, key_store, None))
|
||||
}
|
||||
|
||||
/// Create a `GroupMember` from pre-existing state (identity + optional group + store).
|
||||
pub fn new_with_state(
|
||||
identity: Arc<IdentityKeypair>,
|
||||
key_store: DiskKeyStore,
|
||||
group: Option<MlsGroup>,
|
||||
) -> Self {
|
||||
let config = MlsGroupConfig::builder()
|
||||
.use_ratchet_tree_extension(true)
|
||||
.build();
|
||||
|
||||
Self {
|
||||
backend: StoreCrypto::new(key_store),
|
||||
identity,
|
||||
group,
|
||||
config,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl GroupMember<HybridCryptoProvider> {
|
||||
/// Create a `GroupMember` that uses post-quantum hybrid KEM (X25519 + ML-KEM-768) for HPKE.
|
||||
///
|
||||
/// All members of a group must use the same provider type: if the creator uses
|
||||
/// `new_with_hybrid`, KeyPackages will have hybrid init keys and joiners must
|
||||
/// also use `new_with_hybrid` to decrypt the Welcome.
|
||||
pub fn new_with_hybrid(
|
||||
identity: Arc<IdentityKeypair>,
|
||||
key_store: DiskKeyStore,
|
||||
) -> Self {
|
||||
Self::new_with_state_hybrid(identity, key_store, None)
|
||||
}
|
||||
|
||||
/// Create a PQ `GroupMember` from persisted state (identity, key store, optional group).
|
||||
pub fn new_with_state_hybrid(
|
||||
identity: Arc<IdentityKeypair>,
|
||||
key_store: DiskKeyStore,
|
||||
group: Option<MlsGroup>,
|
||||
) -> Self {
|
||||
let config = MlsGroupConfig::builder()
|
||||
.use_ratchet_tree_extension(true)
|
||||
.build();
|
||||
|
||||
Self {
|
||||
backend: HybridCryptoProvider::new(key_store),
|
||||
identity,
|
||||
group,
|
||||
config,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<P: OpenMlsCryptoProvider> GroupMember<P> {
|
||||
|
||||
// ── KeyPackage ────────────────────────────────────────────────────────────
|
||||
|
||||
/// Generate a fresh single-use MLS KeyPackage.
|
||||
///
|
||||
/// The HPKE init private key is stored in `self.backend`'s key store.
|
||||
/// **The same `GroupMember` instance must later call `join_group`** so
|
||||
/// that `new_from_welcome` can retrieve the private key.
|
||||
///
|
||||
/// # Returns
|
||||
///
|
||||
/// TLS-encoded KeyPackage bytes, ready for upload to the Authentication
|
||||
/// Service.
|
||||
///
|
||||
/// # Errors
|
||||
///
|
||||
/// Returns [`CoreError::Mls`] if openmls fails to create the KeyPackage.
|
||||
pub fn generate_key_package(&mut self) -> Result<Vec<u8>, CoreError> {
|
||||
let credential_with_key = self.make_credential_with_key()?;
|
||||
|
||||
let key_package = KeyPackage::builder()
|
||||
.build(
|
||||
CryptoConfig::with_default_version(CIPHERSUITE),
|
||||
&self.backend,
|
||||
self.identity.as_ref(),
|
||||
credential_with_key,
|
||||
)
|
||||
.map_err(|e| CoreError::Mls(format!("{e:?}")))?;
|
||||
|
||||
key_package
|
||||
.tls_serialize_detached()
|
||||
.map_err(|e| CoreError::Mls(format!("{e:?}")))
|
||||
}
|
||||
|
||||
// ── Group creation ────────────────────────────────────────────────────────
|
||||
|
||||
/// Create a new MLS group with `group_id` as the group identifier.
|
||||
///
|
||||
/// The caller becomes the sole member (epoch 0). Use `add_member` to
|
||||
/// invite additional members.
|
||||
///
|
||||
/// `group_id` can be any non-empty byte string; SHA-256 of a human-readable
|
||||
/// name is a good choice.
|
||||
///
|
||||
/// # Errors
|
||||
///
|
||||
/// Returns [`CoreError::Mls`] if the group already exists or openmls fails.
|
||||
pub fn create_group(&mut self, group_id: &[u8]) -> Result<(), CoreError> {
|
||||
let credential_with_key = self.make_credential_with_key()?;
|
||||
let mls_id = GroupId::from_slice(group_id);
|
||||
|
||||
let group = MlsGroup::new_with_group_id(
|
||||
&self.backend,
|
||||
self.identity.as_ref(),
|
||||
&self.config,
|
||||
mls_id,
|
||||
credential_with_key,
|
||||
)
|
||||
.map_err(|e| CoreError::Mls(format!("{e:?}")))?;
|
||||
|
||||
self.group = Some(group);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// ── Membership ────────────────────────────────────────────────────────────
|
||||
|
||||
/// Add a new member by their TLS-encoded KeyPackage bytes.
|
||||
///
|
||||
/// Produces a Commit (to update existing members' state) and a Welcome
|
||||
/// (to bootstrap the new member). The caller is responsible for
|
||||
/// distributing these:
|
||||
///
|
||||
/// - Send `commit_bytes` to all **existing** group members via the DS.
|
||||
/// (In the 2-party case where the creator is the only member, this can
|
||||
/// be discarded — the creator applies it locally via this method.)
|
||||
/// - Send `welcome_bytes` to the **new** member via the DS.
|
||||
///
|
||||
/// This method also merges the pending Commit into the local group state
|
||||
/// (advancing the epoch), so the caller is immediately ready to encrypt.
|
||||
///
|
||||
/// # Returns
|
||||
///
|
||||
/// `(commit_bytes, welcome_bytes)` — both TLS-encoded MLS messages.
|
||||
///
|
||||
/// # Errors
|
||||
///
|
||||
/// Returns [`CoreError::Mls`] if the KeyPackage is malformed, no active
|
||||
/// group exists, or openmls fails.
|
||||
pub fn add_member(
|
||||
&mut self,
|
||||
mut key_package_bytes: &[u8],
|
||||
) -> Result<(Vec<u8>, Vec<u8>), CoreError> {
|
||||
let group = self
|
||||
.group
|
||||
.as_mut()
|
||||
.ok_or_else(|| CoreError::Mls("no active group".into()))?;
|
||||
|
||||
// Deserialise and validate the peer's KeyPackage. KeyPackage only derives
|
||||
// TlsSerialize; KeyPackageIn derives TlsDeserialize and provides validate()
|
||||
// which verifies the signature and returns a trusted KeyPackage.
|
||||
let key_package: KeyPackage =
|
||||
KeyPackageIn::tls_deserialize(&mut key_package_bytes)
|
||||
.map_err(|e| CoreError::Mls(format!("KeyPackage deserialise: {e:?}")))?
|
||||
.validate(self.backend.crypto(), ProtocolVersion::Mls10)
|
||||
.map_err(|e| CoreError::Mls(format!("KeyPackage validate: {e:?}")))?;
|
||||
|
||||
// Create the Commit + Welcome. The third return value (GroupInfo) is for
|
||||
// external commits and is not needed here.
|
||||
let (commit_out, welcome_out, _group_info) = group
|
||||
.add_members(&self.backend, self.identity.as_ref(), &[key_package])
|
||||
.map_err(|e| CoreError::Mls(format!("add_members: {e:?}")))?;
|
||||
|
||||
// Merge the pending Commit into our own state, advancing the epoch.
|
||||
group
|
||||
.merge_pending_commit(&self.backend)
|
||||
.map_err(|e| CoreError::Mls(format!("merge_pending_commit: {e:?}")))?;
|
||||
|
||||
let commit_bytes = commit_out
|
||||
.to_bytes()
|
||||
.map_err(|e| CoreError::Mls(format!("commit serialise: {e:?}")))?;
|
||||
let welcome_bytes = welcome_out
|
||||
.to_bytes()
|
||||
.map_err(|e| CoreError::Mls(format!("welcome serialise: {e:?}")))?;
|
||||
|
||||
Ok((commit_bytes, welcome_bytes))
|
||||
}
|
||||
|
||||
/// Join an existing MLS group from a TLS-encoded Welcome message.
|
||||
///
|
||||
/// The caller must have previously called [`generate_key_package`] on
|
||||
/// **this same instance** so that the HPKE init private key is in the
|
||||
/// backend's key store.
|
||||
///
|
||||
/// # Errors
|
||||
///
|
||||
/// Returns [`CoreError::Mls`] if the Welcome does not match any known
|
||||
/// KeyPackage, or openmls validation fails.
|
||||
///
|
||||
/// [`generate_key_package`]: Self::generate_key_package
|
||||
pub fn join_group(&mut self, mut welcome_bytes: &[u8]) -> Result<(), CoreError> {
|
||||
// Deserialise MlsMessageIn, then extract the inner Welcome.
|
||||
let msg_in = openmls::prelude::MlsMessageIn::tls_deserialize(&mut welcome_bytes)
|
||||
.map_err(|e| CoreError::Mls(format!("Welcome deserialise: {e:?}")))?;
|
||||
|
||||
// into_welcome() is feature-gated in openmls 0.5; extract() is public.
|
||||
let welcome = match msg_in.extract() {
|
||||
MlsMessageInBody::Welcome(w) => w,
|
||||
_ => return Err(CoreError::Mls("expected a Welcome message".into())),
|
||||
};
|
||||
|
||||
// ratchet_tree = None because use_ratchet_tree_extension = true embeds
|
||||
// the tree inside the Welcome's GroupInfo extension.
|
||||
let group = MlsGroup::new_from_welcome(&self.backend, &self.config, welcome, None)
|
||||
.map_err(|e| CoreError::Mls(format!("new_from_welcome: {e:?}")))?;
|
||||
|
||||
self.group = Some(group);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// ── Application messages ──────────────────────────────────────────────────
|
||||
|
||||
/// Encrypt `plaintext` as an MLS Application message.
|
||||
///
|
||||
/// # Returns
|
||||
///
|
||||
/// TLS-encoded `MlsMessageOut` bytes (PrivateMessage variant).
|
||||
///
|
||||
/// # Errors
|
||||
///
|
||||
/// Returns [`CoreError::Mls`] if there is no active group or encryption fails.
|
||||
pub fn send_message(&mut self, plaintext: &[u8]) -> Result<Vec<u8>, CoreError> {
|
||||
let group = self
|
||||
.group
|
||||
.as_mut()
|
||||
.ok_or_else(|| CoreError::Mls("no active group".into()))?;
|
||||
|
||||
let mls_msg: MlsMessageOut = group
|
||||
.create_message(&self.backend, self.identity.as_ref(), plaintext)
|
||||
.map_err(|e| CoreError::Mls(format!("create_message: {e:?}")))?;
|
||||
|
||||
mls_msg
|
||||
.to_bytes()
|
||||
.map_err(|e| CoreError::Mls(format!("message serialise: {e:?}")))
|
||||
}
|
||||
|
||||
/// Process an incoming TLS-encoded MLS message.
|
||||
///
|
||||
/// # Returns
|
||||
///
|
||||
/// - `Ok(Some(plaintext))` for Application messages.
|
||||
/// - `Ok(None)` for Commit messages (group state is updated internally).
|
||||
///
|
||||
/// # Errors
|
||||
///
|
||||
/// Returns [`CoreError::Mls`] if the message is malformed, fails
|
||||
/// authentication, or the group state is inconsistent.
|
||||
pub fn receive_message(&mut self, mut bytes: &[u8]) -> Result<Option<Vec<u8>>, CoreError> {
|
||||
let group = self
|
||||
.group
|
||||
.as_mut()
|
||||
.ok_or_else(|| CoreError::Mls("no active group".into()))?;
|
||||
|
||||
let msg_in = openmls::prelude::MlsMessageIn::tls_deserialize(&mut bytes)
|
||||
.map_err(|e| CoreError::Mls(format!("message deserialise: {e:?}")))?;
|
||||
|
||||
// into_protocol_message() is feature-gated; extract() + manual construction is not.
|
||||
let protocol_message = match msg_in.extract() {
|
||||
MlsMessageInBody::PrivateMessage(m) => ProtocolMessage::PrivateMessage(m),
|
||||
MlsMessageInBody::PublicMessage(m) => ProtocolMessage::PublicMessage(m),
|
||||
_ => return Err(CoreError::Mls("not a protocol message".into())),
|
||||
};
|
||||
|
||||
let processed = group
|
||||
.process_message(&self.backend, protocol_message)
|
||||
.map_err(|e| CoreError::Mls(format!("process_message: {e:?}")))?;
|
||||
|
||||
match processed.into_content() {
|
||||
ProcessedMessageContent::ApplicationMessage(app) => Ok(Some(app.into_bytes())),
|
||||
ProcessedMessageContent::StagedCommitMessage(staged) => {
|
||||
// Merge the Commit into the local state (epoch advances).
|
||||
group
|
||||
.merge_staged_commit(&self.backend, *staged)
|
||||
.map_err(|e| CoreError::Mls(format!("merge_staged_commit: {e:?}")))?;
|
||||
Ok(None)
|
||||
}
|
||||
// Proposals are stored for a later Commit; nothing to return yet.
|
||||
ProcessedMessageContent::ProposalMessage(proposal) => {
|
||||
group.store_pending_proposal(*proposal);
|
||||
Ok(None)
|
||||
}
|
||||
ProcessedMessageContent::ExternalJoinProposalMessage(proposal) => {
|
||||
group.store_pending_proposal(*proposal);
|
||||
Ok(None)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Process an incoming TLS-encoded MLS message and return sender identity + plaintext for application messages.
|
||||
///
|
||||
/// Same as [`receive_message`], but for Application messages returns
|
||||
/// `Some((sender_identity_bytes, plaintext))` so the client can display who sent the message.
|
||||
/// `sender_identity_bytes` is the MLS credential identity (e.g. Ed25519 public key for Basic credential).
|
||||
///
|
||||
/// Returns `Ok(None)` for Commit and Proposal messages (group state is updated internally).
|
||||
pub fn receive_message_with_sender(
|
||||
&mut self,
|
||||
mut bytes: &[u8],
|
||||
) -> Result<Option<(Vec<u8>, Vec<u8>)>, CoreError> {
|
||||
let group = self
|
||||
.group
|
||||
.as_mut()
|
||||
.ok_or_else(|| CoreError::Mls("no active group".into()))?;
|
||||
|
||||
let msg_in = openmls::prelude::MlsMessageIn::tls_deserialize(&mut bytes)
|
||||
.map_err(|e| CoreError::Mls(format!("message deserialise: {e:?}")))?;
|
||||
|
||||
let protocol_message = match msg_in.extract() {
|
||||
MlsMessageInBody::PrivateMessage(m) => ProtocolMessage::PrivateMessage(m),
|
||||
MlsMessageInBody::PublicMessage(m) => ProtocolMessage::PublicMessage(m),
|
||||
_ => return Err(CoreError::Mls("not a protocol message".into())),
|
||||
};
|
||||
|
||||
let processed = group
|
||||
.process_message(&self.backend, protocol_message)
|
||||
.map_err(|e| CoreError::Mls(format!("process_message: {e:?}")))?;
|
||||
|
||||
let sender_identity = processed.credential().identity().to_vec();
|
||||
|
||||
match processed.into_content() {
|
||||
ProcessedMessageContent::ApplicationMessage(app) => {
|
||||
Ok(Some((sender_identity, app.into_bytes())))
|
||||
}
|
||||
ProcessedMessageContent::StagedCommitMessage(staged) => {
|
||||
group
|
||||
.merge_staged_commit(&self.backend, *staged)
|
||||
.map_err(|e| CoreError::Mls(format!("merge_staged_commit: {e:?}")))?;
|
||||
Ok(None)
|
||||
}
|
||||
ProcessedMessageContent::ProposalMessage(proposal) => {
|
||||
group.store_pending_proposal(*proposal);
|
||||
Ok(None)
|
||||
}
|
||||
ProcessedMessageContent::ExternalJoinProposalMessage(proposal) => {
|
||||
group.store_pending_proposal(*proposal);
|
||||
Ok(None)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// ── Accessors ─────────────────────────────────────────────────────────────
|
||||
|
||||
/// Return the MLS group ID bytes, or `None` if no group is active.
|
||||
pub fn group_id(&self) -> Option<Vec<u8>> {
|
||||
self.group
|
||||
.as_ref()
|
||||
.map(|g| g.group_id().as_slice().to_vec())
|
||||
}
|
||||
|
||||
/// Return a reference to the identity keypair.
|
||||
pub fn identity(&self) -> &IdentityKeypair {
|
||||
&self.identity
|
||||
}
|
||||
|
||||
/// Return the private seed of the identity (for persistence).
|
||||
pub fn identity_seed(&self) -> [u8; 32] {
|
||||
self.identity.seed_bytes()
|
||||
}
|
||||
|
||||
/// Return a reference to the underlying crypto backend.
|
||||
pub fn backend(&self) -> &P {
|
||||
&self.backend
|
||||
}
|
||||
|
||||
/// Return a reference to the MLS group, if active.
|
||||
pub fn group_ref(&self) -> Option<&MlsGroup> {
|
||||
self.group.as_ref()
|
||||
}
|
||||
|
||||
/// Return the identity (credential) bytes of all current group members.
|
||||
///
|
||||
/// Each entry is the raw credential payload (Ed25519 public key bytes)
|
||||
/// extracted from the member's MLS leaf node.
|
||||
pub fn member_identities(&self) -> Vec<Vec<u8>> {
|
||||
let group = match self.group.as_ref() {
|
||||
Some(g) => g,
|
||||
None => return Vec::new(),
|
||||
};
|
||||
group
|
||||
.members()
|
||||
.map(|m| m.credential.identity().to_vec())
|
||||
.collect()
|
||||
}
|
||||
|
||||
// ── Private helpers ───────────────────────────────────────────────────────
|
||||
|
||||
fn make_credential_with_key(&self) -> Result<CredentialWithKey, CoreError> {
|
||||
let credential = Credential::new(
|
||||
self.identity.public_key_bytes().to_vec(),
|
||||
CredentialType::Basic,
|
||||
)
|
||||
.map_err(|e| CoreError::Mls(format!("{e:?}")))?;
|
||||
|
||||
Ok(CredentialWithKey {
|
||||
credential,
|
||||
signature_key: self.identity.public_key_bytes().to_vec().into(),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// ── Unit tests ────────────────────────────────────────────────────────────────
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
/// Full two-party MLS round-trip: creator creates group, adds joiner, then they exchange messages.
|
||||
#[test]
|
||||
fn two_party_mls_round_trip() {
|
||||
let creator_id = Arc::new(IdentityKeypair::generate());
|
||||
let joiner_id = Arc::new(IdentityKeypair::generate());
|
||||
|
||||
let mut creator = GroupMember::new(Arc::clone(&creator_id));
|
||||
let mut joiner = GroupMember::new(Arc::clone(&joiner_id));
|
||||
|
||||
let joiner_kp = joiner
|
||||
.generate_key_package()
|
||||
.expect("joiner KeyPackage");
|
||||
|
||||
creator
|
||||
.create_group(b"test-group-m3")
|
||||
.expect("creator create group");
|
||||
|
||||
let (_, welcome) = creator
|
||||
.add_member(&joiner_kp)
|
||||
.expect("creator add joiner");
|
||||
|
||||
joiner.join_group(&welcome).expect("joiner join group");
|
||||
|
||||
let ct_creator = creator.send_message(b"hello").expect("creator send");
|
||||
let pt_joiner = joiner
|
||||
.receive_message(&ct_creator)
|
||||
.expect("joiner recv")
|
||||
.expect("application message");
|
||||
assert_eq!(pt_joiner, b"hello");
|
||||
|
||||
let ct_joiner = joiner.send_message(b"hello back").expect("joiner send");
|
||||
let pt_creator = creator
|
||||
.receive_message(&ct_joiner)
|
||||
.expect("creator recv")
|
||||
.expect("application message");
|
||||
assert_eq!(pt_creator, b"hello back");
|
||||
}
|
||||
|
||||
/// M7: Full two-party MLS round-trip with post-quantum hybrid KEM (HybridCryptoProvider).
|
||||
#[test]
|
||||
fn two_party_mls_round_trip_hybrid() {
|
||||
let creator_id = Arc::new(IdentityKeypair::generate());
|
||||
let joiner_id = Arc::new(IdentityKeypair::generate());
|
||||
let key_store_creator = DiskKeyStore::ephemeral();
|
||||
let key_store_joiner = DiskKeyStore::ephemeral();
|
||||
|
||||
let mut creator =
|
||||
GroupMember::<HybridCryptoProvider>::new_with_hybrid(Arc::clone(&creator_id), key_store_creator);
|
||||
let mut joiner =
|
||||
GroupMember::<HybridCryptoProvider>::new_with_hybrid(Arc::clone(&joiner_id), key_store_joiner);
|
||||
|
||||
let joiner_kp = joiner
|
||||
.generate_key_package()
|
||||
.expect("joiner KeyPackage (hybrid)");
|
||||
|
||||
creator
|
||||
.create_group(b"test-group-m7-hybrid")
|
||||
.expect("creator create group");
|
||||
|
||||
let (_, welcome) = creator
|
||||
.add_member(&joiner_kp)
|
||||
.expect("creator add joiner");
|
||||
|
||||
joiner.join_group(&welcome).expect("joiner join group");
|
||||
|
||||
let ct_creator = creator.send_message(b"hello pq").expect("creator send");
|
||||
let pt_joiner = joiner
|
||||
.receive_message(&ct_creator)
|
||||
.expect("joiner recv")
|
||||
.expect("application message");
|
||||
assert_eq!(pt_joiner, b"hello pq");
|
||||
|
||||
let ct_joiner = joiner.send_message(b"hello back pq").expect("joiner send");
|
||||
let pt_creator = creator
|
||||
.receive_message(&ct_joiner)
|
||||
.expect("creator recv")
|
||||
.expect("application message");
|
||||
assert_eq!(pt_creator, b"hello back pq");
|
||||
}
|
||||
|
||||
/// `group_id()` returns None before create_group, Some afterwards.
|
||||
#[test]
|
||||
fn group_id_lifecycle() {
|
||||
let id = Arc::new(IdentityKeypair::generate());
|
||||
let mut member = GroupMember::new(id);
|
||||
|
||||
assert!(member.group_id().is_none(), "no group before create");
|
||||
member.create_group(b"gid").unwrap();
|
||||
assert_eq!(
|
||||
member.group_id().unwrap(),
|
||||
b"gid".as_slice(),
|
||||
"group_id must match what was passed"
|
||||
);
|
||||
}
|
||||
}
|
||||
@@ -1,135 +0,0 @@
|
||||
//! Ed25519 identity keypair for MLS credentials and AS registration.
|
||||
//!
|
||||
//! The [`IdentityKeypair`] is the long-term identity key embedded in MLS
|
||||
//! `BasicCredential`s. It is used for signing MLS messages and as the
|
||||
//! indexing key for the Authentication Service.
|
||||
//!
|
||||
//! # Zeroize
|
||||
//!
|
||||
//! The 32-byte private seed is stored as `Zeroizing<[u8; 32]>`, which zeroes
|
||||
//! the bytes on drop. `[u8; 32]` is `Copy + Default` and satisfies zeroize's
|
||||
//! `DefaultIsZeroes` constraint, avoiding a conflict with ed25519-dalek's
|
||||
//! `SigningKey` zeroize impl.
|
||||
//!
|
||||
//! # Fingerprint
|
||||
//!
|
||||
//! A 32-byte SHA-256 digest of the raw public key bytes is used as a compact,
|
||||
//! collision-resistant identifier for logging.
|
||||
|
||||
use ed25519_dalek::{Signer as DalekSigner, SigningKey, VerifyingKey};
|
||||
use openmls_traits::signatures::Signer;
|
||||
use openmls_traits::types::{Error as MlsError, SignatureScheme};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use sha2::{Digest, Sha256};
|
||||
use zeroize::Zeroizing;
|
||||
|
||||
/// An Ed25519 identity keypair.
|
||||
///
|
||||
/// Created with [`IdentityKeypair::generate`]. The private signing key seed
|
||||
/// is zeroed when this struct is dropped.
|
||||
pub struct IdentityKeypair {
|
||||
/// Raw 32-byte private seed — zeroized on drop.
|
||||
///
|
||||
/// Stored as bytes rather than `SigningKey` to satisfy zeroize's
|
||||
/// `DefaultIsZeroes` bound on `Zeroizing<T>`.
|
||||
seed: Zeroizing<[u8; 32]>,
|
||||
/// Corresponding 32-byte public verifying key.
|
||||
verifying: VerifyingKey,
|
||||
}
|
||||
|
||||
impl IdentityKeypair {
|
||||
/// Recreate an identity keypair from a 32-byte seed.
|
||||
pub fn from_seed(seed: [u8; 32]) -> Self {
|
||||
let signing = SigningKey::from_bytes(&seed);
|
||||
let verifying = signing.verifying_key();
|
||||
Self {
|
||||
seed: Zeroizing::new(seed),
|
||||
verifying,
|
||||
}
|
||||
}
|
||||
|
||||
/// Return the raw 32-byte private seed (for persistence).
|
||||
pub fn seed_bytes(&self) -> [u8; 32] {
|
||||
*self.seed
|
||||
}
|
||||
}
|
||||
|
||||
impl IdentityKeypair {
|
||||
/// Generate a fresh random Ed25519 identity keypair.
|
||||
pub fn generate() -> Self {
|
||||
use rand::rngs::OsRng;
|
||||
let signing = SigningKey::generate(&mut OsRng);
|
||||
let verifying = signing.verifying_key();
|
||||
let seed = Zeroizing::new(signing.to_bytes());
|
||||
Self { seed, verifying }
|
||||
}
|
||||
|
||||
/// Return the raw 32-byte Ed25519 public key.
|
||||
///
|
||||
/// This is the byte array used as `identityKey` in `auth.capnp` calls.
|
||||
pub fn public_key_bytes(&self) -> [u8; 32] {
|
||||
self.verifying.to_bytes()
|
||||
}
|
||||
|
||||
/// Return the SHA-256 fingerprint of the public key (32 bytes).
|
||||
pub fn fingerprint(&self) -> [u8; 32] {
|
||||
let mut hasher = Sha256::new();
|
||||
hasher.update(self.verifying.to_bytes());
|
||||
hasher.finalize().into()
|
||||
}
|
||||
|
||||
/// Reconstruct the `SigningKey` from the stored seed bytes.
|
||||
fn signing_key(&self) -> SigningKey {
|
||||
SigningKey::from_bytes(&self.seed)
|
||||
}
|
||||
}
|
||||
|
||||
/// Implement the openmls `Signer` trait so `IdentityKeypair` can be passed
|
||||
/// directly to `KeyPackage::builder().build(...)` without needing the external
|
||||
/// `openmls_basic_credential` crate.
|
||||
impl Signer for IdentityKeypair {
|
||||
fn sign(&self, payload: &[u8]) -> Result<Vec<u8>, MlsError> {
|
||||
let sk = self.signing_key();
|
||||
let sig: ed25519_dalek::Signature = sk.sign(payload);
|
||||
Ok(sig.to_bytes().to_vec())
|
||||
}
|
||||
|
||||
fn signature_scheme(&self) -> SignatureScheme {
|
||||
SignatureScheme::ED25519
|
||||
}
|
||||
}
|
||||
|
||||
impl Serialize for IdentityKeypair {
|
||||
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
|
||||
where
|
||||
S: serde::Serializer,
|
||||
{
|
||||
serializer.serialize_bytes(&self.seed[..])
|
||||
}
|
||||
}
|
||||
|
||||
impl<'de> Deserialize<'de> for IdentityKeypair {
|
||||
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
|
||||
where
|
||||
D: serde::Deserializer<'de>,
|
||||
{
|
||||
let bytes: Vec<u8> = serde::Deserialize::deserialize(deserializer)?;
|
||||
let seed: [u8; 32] = bytes
|
||||
.as_slice()
|
||||
.try_into()
|
||||
.map_err(|_| serde::de::Error::custom("identity seed must be 32 bytes"))?;
|
||||
Ok(IdentityKeypair::from_seed(seed))
|
||||
}
|
||||
}
|
||||
|
||||
impl std::fmt::Debug for IdentityKeypair {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
let fp = self.fingerprint();
|
||||
f.debug_struct("IdentityKeypair")
|
||||
.field(
|
||||
"fingerprint",
|
||||
&format!("{:02x}{:02x}{:02x}{:02x}…", fp[0], fp[1], fp[2], fp[3]),
|
||||
)
|
||||
.finish_non_exhaustive()
|
||||
}
|
||||
}
|
||||
@@ -1,42 +0,0 @@
|
||||
//! Core cryptographic primitives, MLS group state machine, and hybrid
|
||||
//! post-quantum KEM for quicnprotochat.
|
||||
//!
|
||||
//! # Module layout
|
||||
//!
|
||||
//! | Module | Responsibility |
|
||||
//! |---------------|------------------------------------------------------------------|
|
||||
//! | `app_message` | Rich application payload (Chat, Reply, Reaction, ReadReceipt, Typing) |
|
||||
//! | `error` | [`CoreError`] type |
|
||||
//! | `identity` | [`IdentityKeypair`] — Ed25519 identity key for MLS credentials |
|
||||
//! | `keypackage` | [`generate_key_package`] — standalone KeyPackage generation |
|
||||
//! | `group` | [`GroupMember`] — MLS group lifecycle (create/join/send/recv) |
|
||||
//! | `hybrid_kem` | Hybrid X25519 + ML-KEM-768 key encapsulation |
|
||||
//! | `keystore` | [`DiskKeyStore`] — OpenMLS key store with optional persistence |
|
||||
|
||||
mod app_message;
|
||||
mod error;
|
||||
mod group;
|
||||
pub mod hybrid_crypto;
|
||||
pub mod hybrid_kem;
|
||||
mod identity;
|
||||
mod keypackage;
|
||||
mod keystore;
|
||||
pub mod opaque_auth;
|
||||
|
||||
// ── Public API ────────────────────────────────────────────────────────────────
|
||||
|
||||
pub use app_message::{
|
||||
serialize, serialize_chat, serialize_reaction, serialize_read_receipt, serialize_reply,
|
||||
serialize_typing, parse, generate_message_id, AppMessage, MessageType, VERSION as APP_MESSAGE_VERSION,
|
||||
};
|
||||
pub use error::CoreError;
|
||||
pub use group::GroupMember;
|
||||
pub use hybrid_kem::{
|
||||
hybrid_decrypt, hybrid_encrypt, HybridKemError, HybridKeypair, HybridKeypairBytes,
|
||||
HybridPublicKey,
|
||||
};
|
||||
pub use hybrid_crypto::{HybridCrypto, HybridCryptoProvider};
|
||||
pub use identity::IdentityKeypair;
|
||||
pub use keypackage::{generate_key_package, validate_keypackage_ciphersuite};
|
||||
pub use keystore::{DiskKeyStore, StoreCrypto};
|
||||
pub use openmls::prelude::MlsGroup;
|
||||
@@ -1,5 +0,0 @@
|
||||
//! Desktop entry point for quicnprotochat-gui.
|
||||
|
||||
fn main() {
|
||||
quicnprotochat_gui::run()
|
||||
}
|
||||
@@ -1,12 +0,0 @@
|
||||
[package]
|
||||
name = "quicnprotochat-p2p"
|
||||
version = "0.1.0"
|
||||
edition = "2021"
|
||||
description = "P2P transport layer for quicnprotochat using iroh."
|
||||
license = "MIT"
|
||||
|
||||
[dependencies]
|
||||
iroh = "0.96"
|
||||
tokio = { workspace = true }
|
||||
tracing = { workspace = true }
|
||||
anyhow = { workspace = true }
|
||||
@@ -1,186 +0,0 @@
|
||||
//! P2P transport layer for quicnprotochat using iroh.
|
||||
//!
|
||||
//! Provides direct peer-to-peer QUIC connections with NAT traversal via iroh
|
||||
//! relay servers. When both peers are online, messages bypass the central
|
||||
//! server entirely.
|
||||
//!
|
||||
//! # Architecture
|
||||
//!
|
||||
//! ```text
|
||||
//! Client A ── iroh direct (QUIC) ── Client B (preferred: low latency)
|
||||
//! │ │
|
||||
//! └── QUIC/TLS ── Server ── QUIC/TLS ┘ (fallback: store-and-forward)
|
||||
//! ```
|
||||
|
||||
use iroh::{Endpoint, EndpointAddr, PublicKey, SecretKey};
|
||||
|
||||
/// ALPN protocol identifier for quicnprotochat P2P messaging.
|
||||
const P2P_ALPN: &[u8] = b"quicnprotochat/p2p/1";
|
||||
|
||||
/// A P2P node backed by an iroh endpoint.
|
||||
///
|
||||
/// Manages direct QUIC connections to peers with automatic NAT traversal.
|
||||
pub struct P2pNode {
|
||||
endpoint: Endpoint,
|
||||
}
|
||||
|
||||
/// Received P2P message with sender information.
|
||||
pub struct P2pMessage {
|
||||
pub sender: PublicKey,
|
||||
pub payload: Vec<u8>,
|
||||
}
|
||||
|
||||
impl P2pNode {
|
||||
/// Start a new P2P node.
|
||||
///
|
||||
/// Generates a fresh identity or reuses a provided secret key.
|
||||
pub async fn start(secret_key: Option<SecretKey>) -> anyhow::Result<Self> {
|
||||
let mut builder = Endpoint::builder();
|
||||
if let Some(sk) = secret_key {
|
||||
builder = builder.secret_key(sk);
|
||||
}
|
||||
builder = builder.alpns(vec![P2P_ALPN.to_vec()]);
|
||||
|
||||
let endpoint = builder.bind().await?;
|
||||
|
||||
tracing::info!(
|
||||
node_id = %endpoint.id().fmt_short(),
|
||||
"P2P node started"
|
||||
);
|
||||
|
||||
Ok(Self { endpoint })
|
||||
}
|
||||
|
||||
/// This node's public key (used as node ID for peer discovery).
|
||||
pub fn node_id(&self) -> PublicKey {
|
||||
self.endpoint.id()
|
||||
}
|
||||
|
||||
/// This node's secret key (for persistence across restarts).
|
||||
pub fn secret_key(&self) -> SecretKey {
|
||||
self.endpoint.secret_key().clone()
|
||||
}
|
||||
|
||||
/// Get the node's network address information for publishing to discovery.
|
||||
pub fn endpoint_addr(&self) -> EndpointAddr {
|
||||
self.endpoint.addr()
|
||||
}
|
||||
|
||||
/// Send a payload directly to a peer via P2P QUIC.
|
||||
pub async fn send(&self, peer: impl Into<EndpointAddr>, payload: &[u8]) -> anyhow::Result<()> {
|
||||
let peer = peer.into();
|
||||
let conn = self.endpoint.connect(peer, P2P_ALPN).await?;
|
||||
|
||||
let mut send = conn.open_uni().await.map_err(|e| anyhow::anyhow!("{e}"))?;
|
||||
|
||||
// Simple framing: 4-byte length prefix + payload.
|
||||
let len = (payload.len() as u32).to_be_bytes();
|
||||
send.write_all(&len)
|
||||
.await
|
||||
.map_err(|e| anyhow::anyhow!("{e}"))?;
|
||||
send.write_all(payload)
|
||||
.await
|
||||
.map_err(|e| anyhow::anyhow!("{e}"))?;
|
||||
send.finish().map_err(|e| anyhow::anyhow!("{e}"))?;
|
||||
// Wait until the peer has consumed the stream before dropping.
|
||||
send.stopped().await.map_err(|e| anyhow::anyhow!("{e}"))?;
|
||||
|
||||
tracing::debug!(
|
||||
peer = %conn.remote_id().fmt_short(),
|
||||
bytes = payload.len(),
|
||||
"P2P message sent"
|
||||
);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Accept a single incoming P2P message.
|
||||
///
|
||||
/// Blocks until a peer connects and sends data.
|
||||
pub async fn recv(&self) -> anyhow::Result<P2pMessage> {
|
||||
let incoming = self
|
||||
.endpoint
|
||||
.accept()
|
||||
.await
|
||||
.ok_or_else(|| anyhow::anyhow!("no more incoming connections"))?;
|
||||
|
||||
let conn = incoming.await.map_err(|e| anyhow::anyhow!("{e}"))?;
|
||||
let sender = conn.remote_id();
|
||||
|
||||
let mut recv = conn
|
||||
.accept_uni()
|
||||
.await
|
||||
.map_err(|e| anyhow::anyhow!("{e}"))?;
|
||||
|
||||
// Read length-prefixed payload.
|
||||
let mut len_buf = [0u8; 4];
|
||||
recv.read_exact(&mut len_buf)
|
||||
.await
|
||||
.map_err(|e| anyhow::anyhow!("{e}"))?;
|
||||
let len = u32::from_be_bytes(len_buf) as usize;
|
||||
|
||||
if len > 5 * 1024 * 1024 {
|
||||
anyhow::bail!("P2P payload too large: {len} bytes");
|
||||
}
|
||||
|
||||
let mut payload = vec![0u8; len];
|
||||
recv.read_exact(&mut payload)
|
||||
.await
|
||||
.map_err(|e| anyhow::anyhow!("{e}"))?;
|
||||
|
||||
tracing::debug!(
|
||||
peer = %sender.fmt_short(),
|
||||
bytes = len,
|
||||
"P2P message received"
|
||||
);
|
||||
|
||||
Ok(P2pMessage { sender, payload })
|
||||
}
|
||||
|
||||
/// Gracefully shut down the P2P node.
|
||||
pub async fn close(self) {
|
||||
self.endpoint.close().await;
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use iroh::RelayMode;
|
||||
|
||||
/// Create a local-only P2P node with relays disabled (for testing).
|
||||
async fn local_node() -> P2pNode {
|
||||
let endpoint = Endpoint::builder()
|
||||
.alpns(vec![P2P_ALPN.to_vec()])
|
||||
.relay_mode(RelayMode::Disabled)
|
||||
.bind()
|
||||
.await
|
||||
.unwrap();
|
||||
P2pNode { endpoint }
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn p2p_round_trip() {
|
||||
let sender = local_node().await;
|
||||
let receiver = local_node().await;
|
||||
|
||||
let receiver_addr = receiver.endpoint_addr();
|
||||
let sender_id = sender.node_id();
|
||||
let payload = b"hello via P2P";
|
||||
|
||||
let recv_handle = tokio::spawn(async move {
|
||||
let msg = receiver.recv().await.unwrap();
|
||||
assert_eq!(msg.payload, payload.to_vec());
|
||||
assert_eq!(msg.sender, sender_id);
|
||||
});
|
||||
|
||||
tokio::time::sleep(std::time::Duration::from_millis(200)).await;
|
||||
|
||||
sender.send(receiver_addr, payload).await.unwrap();
|
||||
|
||||
recv_handle.await.unwrap();
|
||||
|
||||
tokio::time::sleep(std::time::Duration::from_millis(100)).await;
|
||||
sender.close().await;
|
||||
}
|
||||
}
|
||||
@@ -1,15 +0,0 @@
|
||||
[package]
|
||||
name = "quicnprotochat-proto"
|
||||
version = "0.1.0"
|
||||
edition = "2021"
|
||||
description = "Cap'n Proto schemas, generated types, and serialisation helpers for quicnprotochat. No crypto, no I/O."
|
||||
license = "MIT"
|
||||
|
||||
# build.rs invokes capnpc to generate Rust source from .capnp schemas.
|
||||
build = "build.rs"
|
||||
|
||||
[dependencies]
|
||||
capnp = { workspace = true }
|
||||
|
||||
[build-dependencies]
|
||||
capnpc = { workspace = true }
|
||||
@@ -1,54 +0,0 @@
|
||||
//! Build script for quicnprotochat-proto.
|
||||
//!
|
||||
//! Invokes the `capnp` compiler to generate Rust types from `.capnp` schemas
|
||||
//! located in the workspace-root `schemas/` directory.
|
||||
//!
|
||||
//! # Prerequisites
|
||||
//!
|
||||
//! The `capnp` CLI must be installed and on `PATH`.
|
||||
//!
|
||||
//! Debian/Ubuntu: apt-get install capnproto
|
||||
//! macOS: brew install capnp
|
||||
//! Docker: see docker/Dockerfile
|
||||
|
||||
use std::{env, path::PathBuf};
|
||||
|
||||
fn main() {
|
||||
let manifest_dir =
|
||||
PathBuf::from(env::var("CARGO_MANIFEST_DIR").expect("CARGO_MANIFEST_DIR not set by Cargo"));
|
||||
|
||||
// Workspace root is two levels above this crate (quicnprotochat/crates/quicnprotochat-proto).
|
||||
let workspace_root = manifest_dir
|
||||
.join("../..")
|
||||
.canonicalize()
|
||||
.expect("could not canonicalize workspace root path");
|
||||
|
||||
let schemas_dir = workspace_root.join("schemas");
|
||||
|
||||
// Re-run this build script whenever any schema file changes.
|
||||
println!(
|
||||
"cargo:rerun-if-changed={}",
|
||||
schemas_dir.join("auth.capnp").display()
|
||||
);
|
||||
println!(
|
||||
"cargo:rerun-if-changed={}",
|
||||
schemas_dir.join("delivery.capnp").display()
|
||||
);
|
||||
println!(
|
||||
"cargo:rerun-if-changed={}",
|
||||
schemas_dir.join("node.capnp").display()
|
||||
);
|
||||
|
||||
capnpc::CompilerCommand::new()
|
||||
// Treat `schemas/` as the include root so that inter-schema imports
|
||||
// resolve correctly.
|
||||
.src_prefix(&schemas_dir)
|
||||
.file(schemas_dir.join("auth.capnp"))
|
||||
.file(schemas_dir.join("delivery.capnp"))
|
||||
.file(schemas_dir.join("node.capnp"))
|
||||
.run()
|
||||
.expect(
|
||||
"Cap'n Proto schema compilation failed. \
|
||||
Is `capnp` installed? (apt-get install capnproto / brew install capnp)",
|
||||
);
|
||||
}
|
||||
@@ -1,65 +0,0 @@
|
||||
//! Cap'n Proto schemas, generated types, and serialisation helpers for quicnprotochat.
|
||||
//!
|
||||
//! Generated Cap'n Proto code emits unnecessary parentheses; allow per coding standards.
|
||||
#![allow(unused_parens)]
|
||||
|
||||
//! # Design constraints
|
||||
//!
|
||||
//! This crate is intentionally restricted:
|
||||
//! - **No crypto** — key material never enters this crate.
|
||||
//! - **No I/O** — callers own transport; this crate only converts bytes ↔ types.
|
||||
//! - **No async** — pure synchronous data-layer code.
|
||||
//!
|
||||
//! # Generated code
|
||||
//!
|
||||
//! `build.rs` invokes `capnpc` at compile time and writes generated Rust source
|
||||
//! into `$OUT_DIR`. The `include!` macros below splice that code in as a module.
|
||||
|
||||
// ── Generated types ───────────────────────────────────────────────────────────
|
||||
|
||||
/// Cap'n Proto generated types for `schemas/auth.capnp`.
|
||||
///
|
||||
/// Do not edit this module by hand — it is entirely machine-generated.
|
||||
pub mod auth_capnp {
|
||||
include!(concat!(env!("OUT_DIR"), "/auth_capnp.rs"));
|
||||
}
|
||||
|
||||
/// Cap'n Proto generated types for `schemas/delivery.capnp`.
|
||||
///
|
||||
/// Do not edit this module by hand — it is entirely machine-generated.
|
||||
pub mod delivery_capnp {
|
||||
include!(concat!(env!("OUT_DIR"), "/delivery_capnp.rs"));
|
||||
}
|
||||
|
||||
/// Cap'n Proto generated types for `schemas/node.capnp`.
|
||||
///
|
||||
/// Do not edit this module by hand — it is entirely machine-generated.
|
||||
pub mod node_capnp {
|
||||
include!(concat!(env!("OUT_DIR"), "/node_capnp.rs"));
|
||||
}
|
||||
|
||||
// ── Low-level byte ↔ message conversions ──────────────────────────────────────
|
||||
|
||||
/// Serialise a Cap'n Proto message builder to unpacked wire bytes.
|
||||
///
|
||||
/// The output includes the segment table header. For transport, the
|
||||
/// `quicnprotochat-core` frame codec prepends a 4-byte little-endian length field.
|
||||
pub fn to_bytes<A: capnp::message::Allocator>(
|
||||
msg: &capnp::message::Builder<A>,
|
||||
) -> Result<Vec<u8>, capnp::Error> {
|
||||
let mut buf = Vec::new();
|
||||
capnp::serialize::write_message(&mut buf, msg)?;
|
||||
Ok(buf)
|
||||
}
|
||||
|
||||
/// Deserialise unpacked wire bytes into a message with owned segments.
|
||||
///
|
||||
/// Uses `ReaderOptions::new()` (default limits: 64 MiB, 512 nesting levels).
|
||||
/// Callers that receive data from untrusted peers should consider tightening
|
||||
/// the traversal limit via `ReaderOptions::traversal_limit_in_words`.
|
||||
pub fn from_bytes(
|
||||
bytes: &[u8],
|
||||
) -> Result<capnp::message::Reader<capnp::serialize::OwnedSegments>, capnp::Error> {
|
||||
let mut cursor = std::io::Cursor::new(bytes);
|
||||
capnp::serialize::read_message(&mut cursor, capnp::message::ReaderOptions::new())
|
||||
}
|
||||
@@ -1,187 +0,0 @@
|
||||
use std::path::{Path, PathBuf};
|
||||
|
||||
use anyhow::Context;
|
||||
use serde::Deserialize;
|
||||
|
||||
pub const DEFAULT_LISTEN: &str = "0.0.0.0:7000";
|
||||
pub const DEFAULT_DATA_DIR: &str = "data";
|
||||
pub const DEFAULT_TLS_CERT: &str = "data/server-cert.der";
|
||||
pub const DEFAULT_TLS_KEY: &str = "data/server-key.der";
|
||||
pub const DEFAULT_STORE_BACKEND: &str = "file";
|
||||
pub const DEFAULT_DB_PATH: &str = "data/quicnprotochat.db";
|
||||
|
||||
#[derive(Debug, Default, Deserialize)]
|
||||
pub struct FileConfig {
|
||||
pub listen: Option<String>,
|
||||
pub data_dir: Option<String>,
|
||||
pub tls_cert: Option<PathBuf>,
|
||||
pub tls_key: Option<PathBuf>,
|
||||
pub auth_token: Option<String>,
|
||||
pub allow_insecure_auth: Option<bool>,
|
||||
/// When true, enqueue does not require an identity-bound session: only a valid token is required.
|
||||
/// The server does not associate the request with a specific sender (Sealed Sender).
|
||||
#[serde(default)]
|
||||
pub sealed_sender: Option<bool>,
|
||||
pub store_backend: Option<String>,
|
||||
pub db_path: Option<PathBuf>,
|
||||
pub db_key: Option<String>,
|
||||
/// Metrics HTTP listen address (e.g. "0.0.0.0:9090"). If set, /metrics is served there.
|
||||
pub metrics_listen: Option<String>,
|
||||
/// When true and metrics_listen is set, start the metrics server.
|
||||
#[serde(default)]
|
||||
pub metrics_enabled: Option<bool>,
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct EffectiveConfig {
|
||||
pub listen: String,
|
||||
pub data_dir: String,
|
||||
pub tls_cert: PathBuf,
|
||||
pub tls_key: PathBuf,
|
||||
pub auth_token: Option<String>,
|
||||
pub allow_insecure_auth: bool,
|
||||
/// When true, enqueue does not require identity; valid token only (Sealed Sender).
|
||||
pub sealed_sender: bool,
|
||||
pub store_backend: String,
|
||||
pub db_path: PathBuf,
|
||||
pub db_key: String,
|
||||
/// If Some(addr), metrics server listens here (e.g. "0.0.0.0:9090").
|
||||
pub metrics_listen: Option<String>,
|
||||
/// Start metrics server only when true and metrics_listen is set.
|
||||
pub metrics_enabled: bool,
|
||||
}
|
||||
|
||||
pub fn load_config(path: Option<&Path>) -> anyhow::Result<FileConfig> {
|
||||
let path = match path {
|
||||
Some(p) => PathBuf::from(p),
|
||||
None => PathBuf::from("quicnprotochat-server.toml"),
|
||||
};
|
||||
|
||||
if !path.exists() {
|
||||
return Ok(FileConfig::default());
|
||||
}
|
||||
|
||||
let contents =
|
||||
std::fs::read_to_string(&path).with_context(|| format!("read config file {path:?}"))?;
|
||||
let cfg: FileConfig =
|
||||
toml::from_str(&contents).with_context(|| format!("parse config file {path:?}"))?;
|
||||
Ok(cfg)
|
||||
}
|
||||
|
||||
pub fn merge_config(args: &crate::Args, file: &FileConfig) -> EffectiveConfig {
|
||||
let listen = if args.listen == DEFAULT_LISTEN {
|
||||
file.listen
|
||||
.clone()
|
||||
.unwrap_or_else(|| DEFAULT_LISTEN.to_string())
|
||||
} else {
|
||||
args.listen.clone()
|
||||
};
|
||||
|
||||
let data_dir = if args.data_dir == DEFAULT_DATA_DIR {
|
||||
file.data_dir
|
||||
.clone()
|
||||
.unwrap_or_else(|| DEFAULT_DATA_DIR.to_string())
|
||||
} else {
|
||||
args.data_dir.clone()
|
||||
};
|
||||
|
||||
let tls_cert = if args.tls_cert == PathBuf::from(DEFAULT_TLS_CERT) {
|
||||
file.tls_cert
|
||||
.clone()
|
||||
.unwrap_or_else(|| PathBuf::from(DEFAULT_TLS_CERT))
|
||||
} else {
|
||||
args.tls_cert.clone()
|
||||
};
|
||||
|
||||
let tls_key = if args.tls_key == PathBuf::from(DEFAULT_TLS_KEY) {
|
||||
file.tls_key
|
||||
.clone()
|
||||
.unwrap_or_else(|| PathBuf::from(DEFAULT_TLS_KEY))
|
||||
} else {
|
||||
args.tls_key.clone()
|
||||
};
|
||||
|
||||
let auth_token = if args.auth_token.is_some() {
|
||||
args.auth_token.clone()
|
||||
} else {
|
||||
file.auth_token.clone()
|
||||
};
|
||||
|
||||
let allow_insecure_auth = if args.allow_insecure_auth {
|
||||
true
|
||||
} else {
|
||||
file.allow_insecure_auth.unwrap_or(false)
|
||||
};
|
||||
|
||||
let sealed_sender = args.sealed_sender || file.sealed_sender.unwrap_or(false);
|
||||
|
||||
let store_backend = if args.store_backend == DEFAULT_STORE_BACKEND {
|
||||
file.store_backend
|
||||
.clone()
|
||||
.unwrap_or_else(|| DEFAULT_STORE_BACKEND.to_string())
|
||||
} else {
|
||||
args.store_backend.clone()
|
||||
};
|
||||
|
||||
let db_path = if args.db_path == PathBuf::from(DEFAULT_DB_PATH) {
|
||||
file.db_path
|
||||
.clone()
|
||||
.unwrap_or_else(|| PathBuf::from(DEFAULT_DB_PATH))
|
||||
} else {
|
||||
args.db_path.clone()
|
||||
};
|
||||
|
||||
let db_key = if args.db_key.is_empty() {
|
||||
file.db_key.clone().unwrap_or_else(|| args.db_key.clone())
|
||||
} else {
|
||||
args.db_key.clone()
|
||||
};
|
||||
|
||||
let metrics_listen = args
|
||||
.metrics_listen
|
||||
.clone()
|
||||
.or_else(|| file.metrics_listen.clone());
|
||||
let metrics_enabled = args
|
||||
.metrics_enabled
|
||||
.or(file.metrics_enabled)
|
||||
.unwrap_or(metrics_listen.is_some());
|
||||
|
||||
EffectiveConfig {
|
||||
listen,
|
||||
data_dir,
|
||||
tls_cert,
|
||||
tls_key,
|
||||
auth_token,
|
||||
allow_insecure_auth,
|
||||
sealed_sender,
|
||||
store_backend,
|
||||
db_path,
|
||||
db_key,
|
||||
metrics_listen,
|
||||
metrics_enabled,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn validate_production_config(effective: &EffectiveConfig) -> anyhow::Result<()> {
|
||||
let token = effective
|
||||
.auth_token
|
||||
.as_deref()
|
||||
.filter(|s| !s.is_empty())
|
||||
.ok_or_else(|| {
|
||||
anyhow::anyhow!("production requires QUICNPROTOCHAT_AUTH_TOKEN (non-empty)")
|
||||
})?;
|
||||
if token == "devtoken" {
|
||||
anyhow::bail!(
|
||||
"production forbids auth_token 'devtoken'; set a strong QUICNPROTOCHAT_AUTH_TOKEN"
|
||||
);
|
||||
}
|
||||
if effective.store_backend == "sql" && effective.db_key.is_empty() {
|
||||
anyhow::bail!("production with store_backend=sql requires non-empty QUICNPROTOCHAT_DB_KEY");
|
||||
}
|
||||
if !effective.tls_cert.exists() || !effective.tls_key.exists() {
|
||||
anyhow::bail!(
|
||||
"production requires existing TLS cert and key (no auto-generation); provide QUICNPROTOCHAT_TLS_CERT and QUICNPROTOCHAT_TLS_KEY"
|
||||
);
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
@@ -1,300 +0,0 @@
|
||||
//! quicnprotochat-server — unified Authentication + Delivery service.
|
||||
//!
|
||||
//! The server hosts Authentication + Delivery services over QUIC + Cap'n Proto.
|
||||
|
||||
use std::{net::SocketAddr, path::PathBuf, sync::Arc};
|
||||
|
||||
use anyhow::Context;
|
||||
use clap::Parser;
|
||||
use dashmap::DashMap;
|
||||
use opaque_ke::ServerSetup;
|
||||
use quicnprotochat_core::opaque_auth::OpaqueSuite;
|
||||
use quinn::Endpoint;
|
||||
use rand::rngs::OsRng;
|
||||
use tokio::sync::Notify;
|
||||
use tokio::task::LocalSet;
|
||||
|
||||
mod auth;
|
||||
mod config;
|
||||
mod error_codes;
|
||||
mod metrics;
|
||||
mod node_service;
|
||||
mod sql_store;
|
||||
mod tls;
|
||||
mod storage;
|
||||
|
||||
use auth::{AuthConfig, PendingLogin, RateLimiter, SessionInfo, RATE_LIMIT_MAX_PER_SEC};
|
||||
use config::{
|
||||
load_config, merge_config, validate_production_config, DEFAULT_DATA_DIR, DEFAULT_DB_PATH,
|
||||
DEFAULT_LISTEN, DEFAULT_STORE_BACKEND, DEFAULT_TLS_CERT, DEFAULT_TLS_KEY,
|
||||
};
|
||||
use node_service::{handle_node_connection, spawn_cleanup_task};
|
||||
use sql_store::SqlStore;
|
||||
use storage::{FileBackedStore, Store};
|
||||
use tls::build_server_config;
|
||||
|
||||
// ── CLI ───────────────────────────────────────────────────────────────────────
|
||||
|
||||
#[derive(Debug, Parser)]
|
||||
#[command(
|
||||
name = "quicnprotochat-server",
|
||||
about = "quicnprotochat Delivery Service + Authentication Service",
|
||||
version
|
||||
)]
|
||||
struct Args {
|
||||
/// Optional path to a TOML config file (fields map to CLI flags).
|
||||
#[arg(long, env = "QUICNPROTOCHAT_CONFIG")]
|
||||
config: Option<PathBuf>,
|
||||
|
||||
/// QUIC listen address (host:port).
|
||||
#[arg(long, default_value = DEFAULT_LISTEN, env = "QUICNPROTOCHAT_LISTEN")]
|
||||
listen: String,
|
||||
|
||||
/// Directory for persisted server data (KeyPackages + delivery queues).
|
||||
#[arg(long, default_value = DEFAULT_DATA_DIR, env = "QUICNPROTOCHAT_DATA_DIR")]
|
||||
data_dir: String,
|
||||
|
||||
/// TLS certificate path (generated automatically if missing).
|
||||
#[arg(long, default_value = DEFAULT_TLS_CERT, env = "QUICNPROTOCHAT_TLS_CERT")]
|
||||
tls_cert: PathBuf,
|
||||
|
||||
/// TLS private key path (generated automatically if missing).
|
||||
#[arg(long, default_value = DEFAULT_TLS_KEY, env = "QUICNPROTOCHAT_TLS_KEY")]
|
||||
tls_key: PathBuf,
|
||||
|
||||
/// Required bearer token for auth.version=1 requests. Use --allow-insecure-auth to run without it (dev only).
|
||||
#[arg(long, env = "QUICNPROTOCHAT_AUTH_TOKEN")]
|
||||
auth_token: Option<String>,
|
||||
|
||||
/// Allow running without QUICNPROTOCHAT_AUTH_TOKEN (development only).
|
||||
#[arg(long, env = "QUICNPROTOCHAT_ALLOW_INSECURE_AUTH", default_value_t = false)]
|
||||
allow_insecure_auth: bool,
|
||||
|
||||
/// Enable Sealed Sender: enqueue does not require identity-bound session, only a valid token.
|
||||
#[arg(long, env = "QUICNPROTOCHAT_SEALED_SENDER", default_value_t = false)]
|
||||
sealed_sender: bool,
|
||||
|
||||
/// Storage backend: "file" (bincode) or "sql" (SQLCipher-encrypted).
|
||||
#[arg(long, default_value = DEFAULT_STORE_BACKEND, env = "QUICNPROTOCHAT_STORE_BACKEND")]
|
||||
store_backend: String,
|
||||
|
||||
/// Path to the SQLCipher database file (only used when --store-backend=sql).
|
||||
#[arg(long, default_value = DEFAULT_DB_PATH, env = "QUICNPROTOCHAT_DB_PATH")]
|
||||
db_path: PathBuf,
|
||||
|
||||
/// SQLCipher encryption key. Empty string disables encryption.
|
||||
#[arg(long, default_value = "", env = "QUICNPROTOCHAT_DB_KEY")]
|
||||
db_key: String,
|
||||
|
||||
/// Metrics HTTP listen address (e.g. 0.0.0.0:9090). If set and metrics enabled, /metrics is served.
|
||||
#[arg(long, env = "QUICNPROTOCHAT_METRICS_LISTEN")]
|
||||
metrics_listen: Option<String>,
|
||||
|
||||
/// Enable metrics server when metrics_listen is set.
|
||||
#[arg(long, env = "QUICNPROTOCHAT_METRICS_ENABLED")]
|
||||
metrics_enabled: Option<bool>,
|
||||
}
|
||||
|
||||
// ── Entry point ───────────────────────────────────────────────────────────────
|
||||
|
||||
#[tokio::main]
|
||||
async fn main() -> anyhow::Result<()> {
|
||||
let _ = rustls::crypto::ring::default_provider().install_default();
|
||||
|
||||
tracing_subscriber::fmt()
|
||||
.with_env_filter(
|
||||
tracing_subscriber::EnvFilter::try_from_default_env()
|
||||
.unwrap_or_else(|_| tracing_subscriber::EnvFilter::new("info")),
|
||||
)
|
||||
.init();
|
||||
|
||||
let args = Args::parse();
|
||||
let file_cfg = load_config(args.config.as_deref())?;
|
||||
let effective = merge_config(&args, &file_cfg);
|
||||
|
||||
let production = std::env::var("QUICNPROTOCHAT_PRODUCTION")
|
||||
.map(|v| matches!(v.to_lowercase().as_str(), "1" | "true" | "yes"))
|
||||
.unwrap_or(false);
|
||||
if production {
|
||||
validate_production_config(&effective)?;
|
||||
}
|
||||
|
||||
// Optional metrics server: only start when metrics_enabled and metrics_listen are set.
|
||||
if effective.metrics_enabled {
|
||||
if let Some(addr_str) = &effective.metrics_listen {
|
||||
let addr: std::net::SocketAddr = addr_str
|
||||
.parse()
|
||||
.context("metrics_listen must be host:port (e.g. 0.0.0.0:9090)")?;
|
||||
metrics_exporter_prometheus::PrometheusBuilder::new()
|
||||
.with_http_listener(addr)
|
||||
.install()
|
||||
.context("failed to install Prometheus metrics exporter")?;
|
||||
tracing::info!(addr = %addr_str, "metrics server listening on /metrics");
|
||||
}
|
||||
}
|
||||
|
||||
// In non-production, require an explicit opt-out before running without a static token.
|
||||
if !production
|
||||
&& effective
|
||||
.auth_token
|
||||
.as_deref()
|
||||
.map(|s| s.is_empty())
|
||||
.unwrap_or(true)
|
||||
&& !effective.allow_insecure_auth
|
||||
{
|
||||
anyhow::bail!(
|
||||
"missing QUICNPROTOCHAT_AUTH_TOKEN; set one or pass --allow-insecure-auth for development"
|
||||
);
|
||||
}
|
||||
|
||||
if effective.allow_insecure_auth
|
||||
&& effective
|
||||
.auth_token
|
||||
.as_deref()
|
||||
.map(|s| s.is_empty())
|
||||
.unwrap_or(true)
|
||||
{
|
||||
tracing::warn!("running without QUICNPROTOCHAT_AUTH_TOKEN (allow-insecure-auth enabled); development only");
|
||||
}
|
||||
|
||||
let listen: SocketAddr = effective
|
||||
.listen
|
||||
.parse()
|
||||
.context("--listen must be host:port")?;
|
||||
|
||||
let server_config = build_server_config(&effective.tls_cert, &effective.tls_key, production)
|
||||
.context("failed to build TLS/QUIC server config")?;
|
||||
|
||||
// Shared storage — persisted to disk for restart safety.
|
||||
let store: Arc<dyn Store> = match effective.store_backend.as_str() {
|
||||
"sql" => {
|
||||
if let Some(parent) = effective.db_path.parent() {
|
||||
std::fs::create_dir_all(parent).context("create db dir")?;
|
||||
}
|
||||
tracing::info!(
|
||||
path = %effective.db_path.display(),
|
||||
encrypted = !effective.db_key.is_empty(),
|
||||
"opening SQLCipher store"
|
||||
);
|
||||
if effective.db_key.is_empty() {
|
||||
tracing::warn!("db_key is empty; SQL store will be plaintext (development only)");
|
||||
}
|
||||
Arc::new(SqlStore::open(&effective.db_path, &effective.db_key)?)
|
||||
}
|
||||
"file" | _ => {
|
||||
tracing::info!(dir = %effective.data_dir, "opening file-backed store");
|
||||
Arc::new(FileBackedStore::open(&effective.data_dir)?)
|
||||
}
|
||||
};
|
||||
|
||||
let auth_cfg = Arc::new(AuthConfig::new(
|
||||
effective.auth_token.clone(),
|
||||
effective.allow_insecure_auth,
|
||||
));
|
||||
let waiters: Arc<DashMap<Vec<u8>, Arc<Notify>>> = Arc::new(DashMap::new());
|
||||
|
||||
// OPAQUE ServerSetup: load from storage or generate fresh.
|
||||
let opaque_setup: Arc<ServerSetup<OpaqueSuite>> = match store.get_server_setup() {
|
||||
Ok(Some(bytes)) => {
|
||||
let setup = ServerSetup::<OpaqueSuite>::deserialize(&bytes)
|
||||
.map_err(|e| anyhow::anyhow!("corrupt OPAQUE server setup: {e}"))?;
|
||||
tracing::info!("loaded persisted OPAQUE ServerSetup");
|
||||
Arc::new(setup)
|
||||
}
|
||||
Ok(None) => {
|
||||
let setup = ServerSetup::<OpaqueSuite>::new(&mut OsRng);
|
||||
let bytes = setup.serialize().to_vec();
|
||||
store
|
||||
.store_server_setup(bytes)
|
||||
.context("persist OPAQUE ServerSetup")?;
|
||||
tracing::info!("generated and persisted new OPAQUE ServerSetup");
|
||||
Arc::new(setup)
|
||||
}
|
||||
Err(e) => return Err(anyhow::anyhow!("load OPAQUE server setup: {e}")),
|
||||
};
|
||||
|
||||
let pending_logins: Arc<DashMap<String, PendingLogin>> = Arc::new(DashMap::new());
|
||||
let sessions: Arc<DashMap<Vec<u8>, SessionInfo>> = Arc::new(DashMap::new());
|
||||
let rate_limiter: Arc<RateLimiter> = Arc::new(governor::RateLimiter::keyed(
|
||||
governor::Quota::per_second(RATE_LIMIT_MAX_PER_SEC),
|
||||
));
|
||||
|
||||
// Background cleanup task (expire sessions, pending logins, and stale messages).
|
||||
// Governor's DashMapStateStore handles rate-limit cleanup automatically.
|
||||
spawn_cleanup_task(
|
||||
Arc::clone(&sessions),
|
||||
Arc::clone(&pending_logins),
|
||||
Arc::clone(&store),
|
||||
);
|
||||
|
||||
let endpoint = Endpoint::server(server_config, listen)?;
|
||||
|
||||
tracing::info!(
|
||||
addr = %effective.listen,
|
||||
"accepting QUIC connections"
|
||||
);
|
||||
|
||||
// capnp-rpc is !Send (Rc internals), so all RPC tasks must stay on a LocalSet.
|
||||
let local = LocalSet::new();
|
||||
local
|
||||
.run_until(async move {
|
||||
loop {
|
||||
tokio::select! {
|
||||
biased;
|
||||
|
||||
incoming = endpoint.accept() => {
|
||||
let incoming = match incoming {
|
||||
Some(i) => i,
|
||||
None => break,
|
||||
};
|
||||
|
||||
let connecting = match incoming.accept() {
|
||||
Ok(c) => c,
|
||||
Err(e) => {
|
||||
tracing::warn!(error = %e, "failed to accept incoming connection");
|
||||
continue;
|
||||
}
|
||||
};
|
||||
|
||||
let store = Arc::clone(&store);
|
||||
let waiters = Arc::clone(&waiters);
|
||||
let auth_cfg = Arc::clone(&auth_cfg);
|
||||
let opaque_setup = Arc::clone(&opaque_setup);
|
||||
let pending_logins = Arc::clone(&pending_logins);
|
||||
let sessions = Arc::clone(&sessions);
|
||||
let rate_limiter = Arc::clone(&rate_limiter);
|
||||
let sealed_sender = effective.sealed_sender;
|
||||
|
||||
tokio::task::spawn_local(async move {
|
||||
if let Err(e) = handle_node_connection(
|
||||
connecting,
|
||||
store,
|
||||
waiters,
|
||||
auth_cfg,
|
||||
opaque_setup,
|
||||
pending_logins,
|
||||
sessions,
|
||||
rate_limiter,
|
||||
sealed_sender,
|
||||
)
|
||||
.await
|
||||
{
|
||||
tracing::warn!(error = %e, "connection error");
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
_ = tokio::signal::ctrl_c() => {
|
||||
tracing::info!("shutdown signal received, draining QUIC connections");
|
||||
endpoint.close(0u32.into(), b"server shutdown");
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok::<(), anyhow::Error>(())
|
||||
})
|
||||
.await?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
@@ -1,318 +0,0 @@
|
||||
use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
|
||||
use capnp::capability::Promise;
|
||||
use dashmap::DashMap;
|
||||
use quicnprotochat_proto::node_capnp::node_service;
|
||||
use tokio::sync::Notify;
|
||||
use tokio::time::timeout;
|
||||
|
||||
use crate::auth::{
|
||||
check_rate_limit, coded_error, fmt_hex, require_identity_or_request, validate_auth_context,
|
||||
};
|
||||
use crate::error_codes::*;
|
||||
use crate::metrics;
|
||||
use crate::storage::{StorageError, Store};
|
||||
|
||||
use super::{NodeServiceImpl, CURRENT_WIRE_VERSION};
|
||||
|
||||
// Audit events here must not include secrets: no payload content, no full recipient/token bytes (prefix only).
|
||||
|
||||
const MAX_PAYLOAD_BYTES: usize = 5 * 1024 * 1024; // 5 MB cap per message
|
||||
const MAX_QUEUE_DEPTH: usize = 1000;
|
||||
|
||||
fn storage_err(err: StorageError) -> capnp::Error {
|
||||
coded_error(E009_STORAGE_ERROR, err)
|
||||
}
|
||||
|
||||
pub fn fill_payloads_wait(
|
||||
results: &mut node_service::FetchWaitResults,
|
||||
messages: Vec<(u64, Vec<u8>)>,
|
||||
) {
|
||||
let mut list = results.get().init_payloads(messages.len() as u32);
|
||||
for (i, (seq, data)) in messages.iter().enumerate() {
|
||||
let mut entry = list.reborrow().get(i as u32);
|
||||
entry.set_seq(*seq);
|
||||
entry.set_data(data);
|
||||
}
|
||||
}
|
||||
|
||||
impl NodeServiceImpl {
|
||||
pub fn handle_enqueue(
|
||||
&mut self,
|
||||
params: node_service::EnqueueParams,
|
||||
mut results: node_service::EnqueueResults,
|
||||
) -> Promise<(), capnp::Error> {
|
||||
let p = match params.get() {
|
||||
Ok(p) => p,
|
||||
Err(e) => return Promise::err(coded_error(E020_BAD_PARAMS, e)),
|
||||
};
|
||||
let recipient_key = match p.get_recipient_key() {
|
||||
Ok(v) => v.to_vec(),
|
||||
Err(e) => return Promise::err(coded_error(E020_BAD_PARAMS, e)),
|
||||
};
|
||||
let payload = match p.get_payload() {
|
||||
Ok(v) => v.to_vec(),
|
||||
Err(e) => return Promise::err(coded_error(E020_BAD_PARAMS, e)),
|
||||
};
|
||||
let channel_id = p.get_channel_id().unwrap_or_default().to_vec();
|
||||
let version = p.get_version();
|
||||
let auth_ctx = match validate_auth_context(&self.auth_cfg, &self.sessions, p.get_auth()) {
|
||||
Ok(ctx) => ctx,
|
||||
Err(e) => return Promise::err(e),
|
||||
};
|
||||
|
||||
if recipient_key.len() != 32 {
|
||||
return Promise::err(coded_error(
|
||||
E004_IDENTITY_KEY_LENGTH,
|
||||
format!("recipientKey must be exactly 32 bytes, got {}", recipient_key.len()),
|
||||
));
|
||||
}
|
||||
if payload.is_empty() {
|
||||
return Promise::err(coded_error(E005_PAYLOAD_EMPTY, "payload must not be empty"));
|
||||
}
|
||||
if payload.len() > MAX_PAYLOAD_BYTES {
|
||||
return Promise::err(coded_error(
|
||||
E006_PAYLOAD_TOO_LARGE,
|
||||
format!("payload exceeds max size ({} bytes)", MAX_PAYLOAD_BYTES),
|
||||
));
|
||||
}
|
||||
if version != CURRENT_WIRE_VERSION {
|
||||
return Promise::err(coded_error(
|
||||
E012_WIRE_VERSION,
|
||||
format!("unsupported wire version {} (expected {CURRENT_WIRE_VERSION})", version),
|
||||
));
|
||||
}
|
||||
|
||||
if let Err(e) = check_rate_limit(&self.rate_limiter, &auth_ctx.token) {
|
||||
// Audit: rate limit hit — do not log token or identity.
|
||||
tracing::warn!("rate_limit_hit");
|
||||
metrics::record_rate_limit_hit_total();
|
||||
return Promise::err(e);
|
||||
}
|
||||
|
||||
// When sealed_sender is true, enqueue does not require identity; valid token only.
|
||||
if !self.sealed_sender {
|
||||
if let Err(e) = require_identity_or_request(
|
||||
&auth_ctx,
|
||||
&recipient_key,
|
||||
self.auth_cfg.allow_insecure_identity_from_request,
|
||||
) {
|
||||
return Promise::err(e);
|
||||
}
|
||||
}
|
||||
|
||||
match self.store.queue_depth(&recipient_key, &channel_id) {
|
||||
Ok(depth) if depth >= MAX_QUEUE_DEPTH => {
|
||||
return Promise::err(coded_error(
|
||||
E015_QUEUE_FULL,
|
||||
format!("queue depth {} exceeds limit {}", depth, MAX_QUEUE_DEPTH),
|
||||
));
|
||||
}
|
||||
Err(e) => return Promise::err(storage_err(e)),
|
||||
_ => {}
|
||||
}
|
||||
|
||||
let payload_len = payload.len();
|
||||
let seq = match self
|
||||
.store
|
||||
.enqueue(&recipient_key, &channel_id, payload)
|
||||
.map_err(storage_err)
|
||||
{
|
||||
Ok(seq) => seq,
|
||||
Err(e) => return Promise::err(e),
|
||||
};
|
||||
|
||||
results.get().set_seq(seq);
|
||||
|
||||
// Metrics and audit. Audit events must not include secrets (no payload, no full keys).
|
||||
metrics::record_enqueue_total();
|
||||
metrics::record_enqueue_bytes(payload_len as u64);
|
||||
if let Ok(depth) = self.store.queue_depth(&recipient_key, &channel_id) {
|
||||
metrics::record_delivery_queue_depth(depth);
|
||||
}
|
||||
tracing::info!(
|
||||
recipient_prefix = %fmt_hex(&recipient_key[..4]),
|
||||
payload_len = payload_len,
|
||||
seq = seq,
|
||||
"audit: enqueue"
|
||||
);
|
||||
|
||||
crate::auth::waiter(&self.waiters, &recipient_key).notify_waiters();
|
||||
|
||||
Promise::ok(())
|
||||
}
|
||||
|
||||
pub fn handle_fetch(
|
||||
&mut self,
|
||||
params: node_service::FetchParams,
|
||||
mut results: node_service::FetchResults,
|
||||
) -> Promise<(), capnp::Error> {
|
||||
let recipient_key = match params.get() {
|
||||
Ok(p) => match p.get_recipient_key() {
|
||||
Ok(v) => v.to_vec(),
|
||||
Err(e) => return Promise::err(coded_error(E020_BAD_PARAMS, e)),
|
||||
},
|
||||
Err(e) => return Promise::err(coded_error(E020_BAD_PARAMS, e)),
|
||||
};
|
||||
let channel_id = params
|
||||
.get()
|
||||
.ok()
|
||||
.and_then(|p| p.get_channel_id().ok())
|
||||
.map(|c| c.to_vec())
|
||||
.unwrap_or_default();
|
||||
let version = params
|
||||
.get()
|
||||
.ok()
|
||||
.map(|p| p.get_version())
|
||||
.unwrap_or(CURRENT_WIRE_VERSION);
|
||||
let limit = params.get().ok().map(|p| p.get_limit()).unwrap_or(0);
|
||||
let auth_ctx = match params
|
||||
.get()
|
||||
.ok()
|
||||
.map(|p| validate_auth_context(&self.auth_cfg, &self.sessions, p.get_auth()))
|
||||
.transpose()
|
||||
{
|
||||
Ok(ctx) => ctx,
|
||||
Err(e) => return Promise::err(e),
|
||||
};
|
||||
|
||||
if recipient_key.len() != 32 {
|
||||
return Promise::err(coded_error(
|
||||
E004_IDENTITY_KEY_LENGTH,
|
||||
format!("recipientKey must be exactly 32 bytes, got {}", recipient_key.len()),
|
||||
));
|
||||
}
|
||||
if version != CURRENT_WIRE_VERSION {
|
||||
return Promise::err(coded_error(
|
||||
E012_WIRE_VERSION,
|
||||
format!("unsupported wire version {} (expected {CURRENT_WIRE_VERSION})", version),
|
||||
));
|
||||
}
|
||||
|
||||
let auth_ctx = match auth_ctx {
|
||||
Some(ctx) => ctx,
|
||||
None => return Promise::err(coded_error(E003_INVALID_TOKEN, "auth required")),
|
||||
};
|
||||
|
||||
if let Err(e) = require_identity_or_request(
|
||||
&auth_ctx,
|
||||
&recipient_key,
|
||||
self.auth_cfg.allow_insecure_identity_from_request,
|
||||
) {
|
||||
return Promise::err(e);
|
||||
}
|
||||
|
||||
let messages = if limit > 0 {
|
||||
match self
|
||||
.store
|
||||
.fetch_limited(&recipient_key, &channel_id, limit as usize)
|
||||
.map_err(storage_err)
|
||||
{
|
||||
Ok(m) => m,
|
||||
Err(e) => return Promise::err(e),
|
||||
}
|
||||
} else {
|
||||
match self
|
||||
.store
|
||||
.fetch(&recipient_key, &channel_id)
|
||||
.map_err(storage_err)
|
||||
{
|
||||
Ok(m) => m,
|
||||
Err(e) => return Promise::err(e),
|
||||
}
|
||||
};
|
||||
|
||||
// Audit: fetch — do not log payload or full keys.
|
||||
metrics::record_fetch_total();
|
||||
tracing::info!(
|
||||
recipient_prefix = %fmt_hex(&recipient_key[..4]),
|
||||
count = messages.len(),
|
||||
"audit: fetch"
|
||||
);
|
||||
|
||||
let mut list = results.get().init_payloads(messages.len() as u32);
|
||||
for (i, (seq, data)) in messages.iter().enumerate() {
|
||||
let mut entry = list.reborrow().get(i as u32);
|
||||
entry.set_seq(*seq);
|
||||
entry.set_data(data);
|
||||
}
|
||||
|
||||
Promise::ok(())
|
||||
}
|
||||
|
||||
pub fn handle_fetch_wait(
|
||||
&mut self,
|
||||
params: node_service::FetchWaitParams,
|
||||
mut results: node_service::FetchWaitResults,
|
||||
) -> Promise<(), capnp::Error> {
|
||||
let p = match params.get() {
|
||||
Ok(p) => p,
|
||||
Err(e) => return Promise::err(coded_error(E020_BAD_PARAMS, e)),
|
||||
};
|
||||
let recipient_key = match p.get_recipient_key() {
|
||||
Ok(v) => v.to_vec(),
|
||||
Err(e) => return Promise::err(coded_error(E020_BAD_PARAMS, e)),
|
||||
};
|
||||
let channel_id = p.get_channel_id().unwrap_or_default().to_vec();
|
||||
let version = p.get_version();
|
||||
let timeout_ms = p.get_timeout_ms();
|
||||
let limit = p.get_limit();
|
||||
let auth_ctx = match validate_auth_context(&self.auth_cfg, &self.sessions, p.get_auth()) {
|
||||
Ok(ctx) => ctx,
|
||||
Err(e) => return Promise::err(e),
|
||||
};
|
||||
|
||||
if recipient_key.len() != 32 {
|
||||
return Promise::err(coded_error(
|
||||
E004_IDENTITY_KEY_LENGTH,
|
||||
format!("recipientKey must be exactly 32 bytes, got {}", recipient_key.len()),
|
||||
));
|
||||
}
|
||||
if version != CURRENT_WIRE_VERSION {
|
||||
return Promise::err(coded_error(
|
||||
E012_WIRE_VERSION,
|
||||
format!("unsupported wire version {} (expected {CURRENT_WIRE_VERSION})", version),
|
||||
));
|
||||
}
|
||||
|
||||
if let Err(e) = require_identity_or_request(
|
||||
&auth_ctx,
|
||||
&recipient_key,
|
||||
self.auth_cfg.allow_insecure_identity_from_request,
|
||||
) {
|
||||
return Promise::err(e);
|
||||
}
|
||||
|
||||
let store = Arc::clone(&self.store);
|
||||
let waiters: Arc<DashMap<Vec<u8>, Arc<Notify>>> = self.waiters.clone();
|
||||
|
||||
Promise::from_future(async move {
|
||||
let fetch_fn = |s: &Arc<dyn Store>, rk: &[u8], ch: &[u8], lim: u32| -> Result<Vec<(u64, Vec<u8>)>, capnp::Error> {
|
||||
if lim > 0 {
|
||||
s.fetch_limited(rk, ch, lim as usize).map_err(storage_err)
|
||||
} else {
|
||||
s.fetch(rk, ch).map_err(storage_err)
|
||||
}
|
||||
};
|
||||
|
||||
let messages = fetch_fn(&store, &recipient_key, &channel_id, limit)?;
|
||||
|
||||
if messages.is_empty() && timeout_ms > 0 {
|
||||
let waiter = waiters
|
||||
.entry(recipient_key.clone())
|
||||
.or_insert_with(|| Arc::new(Notify::new()))
|
||||
.clone();
|
||||
let _ = timeout(Duration::from_millis(timeout_ms), waiter.notified()).await;
|
||||
let msgs = fetch_fn(&store, &recipient_key, &channel_id, limit)?;
|
||||
fill_payloads_wait(&mut results, msgs);
|
||||
metrics::record_fetch_wait_total();
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
fill_payloads_wait(&mut results, messages);
|
||||
metrics::record_fetch_wait_total();
|
||||
Ok(())
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -1,244 +0,0 @@
|
||||
use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
|
||||
use capnp_rpc::RpcSystem;
|
||||
use dashmap::DashMap;
|
||||
use opaque_ke::ServerSetup;
|
||||
use quicnprotochat_core::opaque_auth::OpaqueSuite;
|
||||
use quicnprotochat_proto::node_capnp::node_service;
|
||||
use tokio::sync::Notify;
|
||||
use tokio_util::compat::{TokioAsyncReadCompatExt, TokioAsyncWriteCompatExt};
|
||||
|
||||
use crate::auth::{
|
||||
current_timestamp, AuthConfig, PendingLogin, RateLimiter, SessionInfo, PENDING_LOGIN_TTL_SECS,
|
||||
};
|
||||
use crate::storage::Store;
|
||||
|
||||
mod auth_ops;
|
||||
mod delivery;
|
||||
mod key_ops;
|
||||
mod p2p_ops;
|
||||
|
||||
impl node_service::Server for NodeServiceImpl {
|
||||
fn upload_key_package(
|
||||
&mut self,
|
||||
params: node_service::UploadKeyPackageParams,
|
||||
results: node_service::UploadKeyPackageResults,
|
||||
) -> capnp::capability::Promise<(), capnp::Error> {
|
||||
self.handle_upload_key_package(params, results)
|
||||
}
|
||||
|
||||
fn fetch_key_package(
|
||||
&mut self,
|
||||
params: node_service::FetchKeyPackageParams,
|
||||
results: node_service::FetchKeyPackageResults,
|
||||
) -> capnp::capability::Promise<(), capnp::Error> {
|
||||
self.handle_fetch_key_package(params, results)
|
||||
}
|
||||
|
||||
fn enqueue(
|
||||
&mut self,
|
||||
params: node_service::EnqueueParams,
|
||||
results: node_service::EnqueueResults,
|
||||
) -> capnp::capability::Promise<(), capnp::Error> {
|
||||
self.handle_enqueue(params, results)
|
||||
}
|
||||
|
||||
fn fetch(
|
||||
&mut self,
|
||||
params: node_service::FetchParams,
|
||||
results: node_service::FetchResults,
|
||||
) -> capnp::capability::Promise<(), capnp::Error> {
|
||||
self.handle_fetch(params, results)
|
||||
}
|
||||
|
||||
fn fetch_wait(
|
||||
&mut self,
|
||||
params: node_service::FetchWaitParams,
|
||||
results: node_service::FetchWaitResults,
|
||||
) -> capnp::capability::Promise<(), capnp::Error> {
|
||||
self.handle_fetch_wait(params, results)
|
||||
}
|
||||
|
||||
fn health(
|
||||
&mut self,
|
||||
params: node_service::HealthParams,
|
||||
results: node_service::HealthResults,
|
||||
) -> capnp::capability::Promise<(), capnp::Error> {
|
||||
self.handle_health(params, results)
|
||||
}
|
||||
|
||||
fn upload_hybrid_key(
|
||||
&mut self,
|
||||
params: node_service::UploadHybridKeyParams,
|
||||
results: node_service::UploadHybridKeyResults,
|
||||
) -> capnp::capability::Promise<(), capnp::Error> {
|
||||
self.handle_upload_hybrid_key(params, results)
|
||||
}
|
||||
|
||||
fn fetch_hybrid_key(
|
||||
&mut self,
|
||||
params: node_service::FetchHybridKeyParams,
|
||||
results: node_service::FetchHybridKeyResults,
|
||||
) -> capnp::capability::Promise<(), capnp::Error> {
|
||||
self.handle_fetch_hybrid_key(params, results)
|
||||
}
|
||||
|
||||
fn opaque_login_start(
|
||||
&mut self,
|
||||
params: node_service::OpaqueLoginStartParams,
|
||||
results: node_service::OpaqueLoginStartResults,
|
||||
) -> capnp::capability::Promise<(), capnp::Error> {
|
||||
self.handle_opaque_login_start(params, results)
|
||||
}
|
||||
|
||||
fn opaque_register_start(
|
||||
&mut self,
|
||||
params: node_service::OpaqueRegisterStartParams,
|
||||
results: node_service::OpaqueRegisterStartResults,
|
||||
) -> capnp::capability::Promise<(), capnp::Error> {
|
||||
self.handle_opaque_register_start(params, results)
|
||||
}
|
||||
|
||||
fn opaque_login_finish(
|
||||
&mut self,
|
||||
params: node_service::OpaqueLoginFinishParams,
|
||||
results: node_service::OpaqueLoginFinishResults,
|
||||
) -> capnp::capability::Promise<(), capnp::Error> {
|
||||
self.handle_opaque_login_finish(params, results)
|
||||
}
|
||||
|
||||
fn opaque_register_finish(
|
||||
&mut self,
|
||||
params: node_service::OpaqueRegisterFinishParams,
|
||||
results: node_service::OpaqueRegisterFinishResults,
|
||||
) -> capnp::capability::Promise<(), capnp::Error> {
|
||||
self.handle_opaque_register_finish(params, results)
|
||||
}
|
||||
|
||||
fn publish_endpoint(
|
||||
&mut self,
|
||||
params: node_service::PublishEndpointParams,
|
||||
results: node_service::PublishEndpointResults,
|
||||
) -> capnp::capability::Promise<(), capnp::Error> {
|
||||
self.handle_publish_endpoint(params, results)
|
||||
}
|
||||
|
||||
fn resolve_endpoint(
|
||||
&mut self,
|
||||
params: node_service::ResolveEndpointParams,
|
||||
results: node_service::ResolveEndpointResults,
|
||||
) -> capnp::capability::Promise<(), capnp::Error> {
|
||||
self.handle_resolve_endpoint(params, results)
|
||||
}
|
||||
}
|
||||
|
||||
pub const CURRENT_WIRE_VERSION: u16 = 1;
|
||||
|
||||
pub struct NodeServiceImpl {
|
||||
pub store: Arc<dyn Store>,
|
||||
pub waiters: Arc<DashMap<Vec<u8>, Arc<Notify>>>,
|
||||
pub auth_cfg: Arc<AuthConfig>,
|
||||
pub opaque_setup: Arc<ServerSetup<OpaqueSuite>>,
|
||||
pub pending_logins: Arc<DashMap<String, PendingLogin>>,
|
||||
pub sessions: Arc<DashMap<Vec<u8>, SessionInfo>>,
|
||||
pub rate_limiter: Arc<RateLimiter>,
|
||||
/// When true, enqueue does not require identity-bound session (Sealed Sender).
|
||||
pub sealed_sender: bool,
|
||||
}
|
||||
|
||||
impl NodeServiceImpl {
|
||||
pub fn new(
|
||||
store: Arc<dyn Store>,
|
||||
waiters: Arc<DashMap<Vec<u8>, Arc<Notify>>>,
|
||||
auth_cfg: Arc<AuthConfig>,
|
||||
opaque_setup: Arc<ServerSetup<OpaqueSuite>>,
|
||||
pending_logins: Arc<DashMap<String, PendingLogin>>,
|
||||
sessions: Arc<DashMap<Vec<u8>, SessionInfo>>,
|
||||
rate_limiter: Arc<RateLimiter>,
|
||||
sealed_sender: bool,
|
||||
) -> Self {
|
||||
Self {
|
||||
store,
|
||||
waiters,
|
||||
auth_cfg,
|
||||
opaque_setup,
|
||||
pending_logins,
|
||||
sessions,
|
||||
rate_limiter,
|
||||
sealed_sender,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn handle_node_connection(
|
||||
connecting: quinn::Connecting,
|
||||
store: Arc<dyn Store>,
|
||||
waiters: Arc<DashMap<Vec<u8>, Arc<Notify>>>,
|
||||
auth_cfg: Arc<AuthConfig>,
|
||||
opaque_setup: Arc<ServerSetup<OpaqueSuite>>,
|
||||
pending_logins: Arc<DashMap<String, PendingLogin>>,
|
||||
sessions: Arc<DashMap<Vec<u8>, SessionInfo>>,
|
||||
rate_limiter: Arc<RateLimiter>,
|
||||
sealed_sender: bool,
|
||||
) -> Result<(), anyhow::Error> {
|
||||
let connection = connecting.await?;
|
||||
|
||||
tracing::info!(peer = %connection.remote_address(), "QUIC connected");
|
||||
|
||||
let (send, recv) = connection
|
||||
.accept_bi()
|
||||
.await
|
||||
.map_err(|e| anyhow::anyhow!("failed to accept bi stream: {e}"))?;
|
||||
let (reader, writer) = (recv.compat(), send.compat_write());
|
||||
|
||||
let network = capnp_rpc::twoparty::VatNetwork::new(
|
||||
reader,
|
||||
writer,
|
||||
capnp_rpc::rpc_twoparty_capnp::Side::Server,
|
||||
Default::default(),
|
||||
);
|
||||
|
||||
let service: node_service::Client = capnp_rpc::new_client(NodeServiceImpl::new(
|
||||
store,
|
||||
waiters,
|
||||
auth_cfg,
|
||||
opaque_setup,
|
||||
pending_logins,
|
||||
sessions,
|
||||
rate_limiter,
|
||||
sealed_sender,
|
||||
));
|
||||
|
||||
RpcSystem::new(Box::new(network), Some(service.client))
|
||||
.await
|
||||
.map_err(|e| anyhow::anyhow!("NodeService RPC error: {e}"))
|
||||
}
|
||||
|
||||
const MESSAGE_TTL_SECS: u64 = 7 * 24 * 60 * 60; // 7 days
|
||||
|
||||
pub fn spawn_cleanup_task(
|
||||
sessions: Arc<DashMap<Vec<u8>, SessionInfo>>,
|
||||
pending_logins: Arc<DashMap<String, PendingLogin>>,
|
||||
store: Arc<dyn Store>,
|
||||
) {
|
||||
tokio::spawn(async move {
|
||||
let mut interval = tokio::time::interval(Duration::from_secs(60));
|
||||
loop {
|
||||
interval.tick().await;
|
||||
let now = current_timestamp();
|
||||
|
||||
sessions.retain(|_, info| info.expires_at > now);
|
||||
pending_logins.retain(|_, pl| now - pl.created_at < PENDING_LOGIN_TTL_SECS);
|
||||
// Rate limit cleanup is handled automatically by governor's DashMapStateStore.
|
||||
|
||||
match store.gc_expired_messages(MESSAGE_TTL_SECS) {
|
||||
Ok(n) if n > 0 => {
|
||||
tracing::debug!(expired = n, "garbage collected expired messages")
|
||||
}
|
||||
Err(e) => tracing::warn!(error = %e, "message GC failed"),
|
||||
_ => {}
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
@@ -1,548 +0,0 @@
|
||||
//! SQLCipher-backed persistent storage.
|
||||
|
||||
use std::path::Path;
|
||||
use std::sync::Mutex;
|
||||
|
||||
use rusqlite::{params, Connection};
|
||||
|
||||
use crate::storage::{StorageError, Store};
|
||||
|
||||
/// Schema version after introducing the migration runner (existing DBs had 1).
|
||||
const SCHEMA_VERSION: i32 = 3;
|
||||
|
||||
/// Migrations: (migration_number, SQL). Files named NNN_name.sql, applied in order when N > user_version.
|
||||
const MIGRATIONS: &[(i32, &str)] = &[
|
||||
(1, include_str!("../migrations/001_initial.sql")),
|
||||
(3, include_str!("../migrations/002_add_seq.sql")),
|
||||
];
|
||||
|
||||
/// Runs pending migrations on an open connection: applies any migration whose number is greater
|
||||
/// than the current PRAGMA user_version, then sets user_version to SCHEMA_VERSION.
|
||||
fn run_migrations(conn: &Connection) -> Result<(), StorageError> {
|
||||
let current_version: i32 = conn
|
||||
.pragma_query_value(None, "user_version", |row| row.get(0))
|
||||
.map_err(|e| StorageError::Db(format!("PRAGMA user_version failed: {e}")))?;
|
||||
|
||||
for (migration_num, sql) in MIGRATIONS {
|
||||
if *migration_num > current_version {
|
||||
conn.execute_batch(sql).map_err(|e| StorageError::Db(e.to_string()))?;
|
||||
}
|
||||
}
|
||||
|
||||
conn.pragma_update(None, "user_version", SCHEMA_VERSION)
|
||||
.map_err(|e| StorageError::Db(format!("set user_version failed: {e}")))?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// SQLCipher-encrypted storage backend.
|
||||
pub struct SqlStore {
|
||||
conn: Mutex<Connection>,
|
||||
}
|
||||
|
||||
impl SqlStore {
|
||||
fn lock_conn(&self) -> Result<std::sync::MutexGuard<'_, Connection>, StorageError> {
|
||||
self.conn
|
||||
.lock()
|
||||
.map_err(|e| StorageError::Db(format!("lock poisoned: {e}")))
|
||||
}
|
||||
|
||||
pub fn open(path: impl AsRef<Path>, key: &str) -> Result<Self, StorageError> {
|
||||
let conn = Connection::open(path).map_err(|e| StorageError::Db(e.to_string()))?;
|
||||
|
||||
if !key.is_empty() {
|
||||
conn.pragma_update(None, "key", key)
|
||||
.map_err(|e| StorageError::Db(format!("PRAGMA key failed: {e}")))?;
|
||||
}
|
||||
|
||||
conn.execute_batch(
|
||||
"PRAGMA journal_mode = WAL;
|
||||
PRAGMA synchronous = NORMAL;
|
||||
PRAGMA foreign_keys = ON;",
|
||||
)
|
||||
.map_err(|e| StorageError::Db(e.to_string()))?;
|
||||
|
||||
let current_version: i32 = conn
|
||||
.pragma_query_value(None, "user_version", |row| row.get(0))
|
||||
.map_err(|e| StorageError::Db(format!("PRAGMA user_version failed: {e}")))?;
|
||||
|
||||
if current_version > SCHEMA_VERSION {
|
||||
return Err(StorageError::Db(format!(
|
||||
"database schema version {current_version} is newer than supported {SCHEMA_VERSION}"
|
||||
)));
|
||||
}
|
||||
|
||||
run_migrations(&conn)?;
|
||||
|
||||
Ok(Self {
|
||||
conn: Mutex::new(conn),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
impl Store for SqlStore {
|
||||
fn upload_key_package(
|
||||
&self,
|
||||
identity_key: &[u8],
|
||||
package: Vec<u8>,
|
||||
) -> Result<(), StorageError> {
|
||||
let conn = self.lock_conn()?;
|
||||
conn.execute(
|
||||
"INSERT INTO key_packages (identity_key, package_data) VALUES (?1, ?2)",
|
||||
params![identity_key, package],
|
||||
)
|
||||
.map_err(|e| StorageError::Db(e.to_string()))?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn fetch_key_package(&self, identity_key: &[u8]) -> Result<Option<Vec<u8>>, StorageError> {
|
||||
let conn = self.lock_conn()?;
|
||||
|
||||
let mut stmt = conn
|
||||
.prepare(
|
||||
"SELECT id, package_data FROM key_packages
|
||||
WHERE identity_key = ?1
|
||||
ORDER BY id ASC
|
||||
LIMIT 1",
|
||||
)
|
||||
.map_err(|e| StorageError::Db(e.to_string()))?;
|
||||
|
||||
let row = stmt
|
||||
.query_row(params![identity_key], |row| {
|
||||
Ok((row.get::<_, i64>(0)?, row.get::<_, Vec<u8>>(1)?))
|
||||
})
|
||||
.optional()
|
||||
.map_err(|e| StorageError::Db(e.to_string()))?;
|
||||
|
||||
match row {
|
||||
Some((id, package)) => {
|
||||
conn.execute("DELETE FROM key_packages WHERE id = ?1", params![id])
|
||||
.map_err(|e| StorageError::Db(e.to_string()))?;
|
||||
Ok(Some(package))
|
||||
}
|
||||
None => Ok(None),
|
||||
}
|
||||
}
|
||||
|
||||
fn enqueue(
|
||||
&self,
|
||||
recipient_key: &[u8],
|
||||
channel_id: &[u8],
|
||||
payload: Vec<u8>,
|
||||
) -> Result<u64, StorageError> {
|
||||
let conn = self.lock_conn()?;
|
||||
// Atomically get-and-increment the per-inbox sequence counter.
|
||||
// RETURNING gives us the post-update next_seq; the assigned seq is next_seq - 1.
|
||||
let seq: i64 = conn
|
||||
.query_row(
|
||||
"INSERT INTO delivery_seq_counters (recipient_key, channel_id, next_seq)
|
||||
VALUES (?1, ?2, 1)
|
||||
ON CONFLICT(recipient_key, channel_id) DO UPDATE SET next_seq = next_seq + 1
|
||||
RETURNING next_seq - 1",
|
||||
params![recipient_key, channel_id],
|
||||
|row| row.get(0),
|
||||
)
|
||||
.map_err(|e| StorageError::Db(e.to_string()))?;
|
||||
conn.execute(
|
||||
"INSERT INTO deliveries (recipient_key, channel_id, seq, payload) VALUES (?1, ?2, ?3, ?4)",
|
||||
params![recipient_key, channel_id, seq, payload],
|
||||
)
|
||||
.map_err(|e| StorageError::Db(e.to_string()))?;
|
||||
Ok(seq as u64)
|
||||
}
|
||||
|
||||
fn fetch(
|
||||
&self,
|
||||
recipient_key: &[u8],
|
||||
channel_id: &[u8],
|
||||
) -> Result<Vec<(u64, Vec<u8>)>, StorageError> {
|
||||
let conn = self.lock_conn()?;
|
||||
|
||||
let mut stmt = conn
|
||||
.prepare(
|
||||
"SELECT id, seq, payload FROM deliveries
|
||||
WHERE recipient_key = ?1 AND channel_id = ?2
|
||||
ORDER BY seq ASC",
|
||||
)
|
||||
.map_err(|e| StorageError::Db(e.to_string()))?;
|
||||
|
||||
let rows: Vec<(i64, i64, Vec<u8>)> = stmt
|
||||
.query_map(params![recipient_key, channel_id], |row| {
|
||||
Ok((row.get(0)?, row.get(1)?, row.get(2)?))
|
||||
})
|
||||
.map_err(|e| StorageError::Db(e.to_string()))?
|
||||
.collect::<Result<Vec<_>, _>>()
|
||||
.map_err(|e| StorageError::Db(e.to_string()))?;
|
||||
|
||||
if !rows.is_empty() {
|
||||
let ids: Vec<i64> = rows.iter().map(|(id, _, _)| *id).collect();
|
||||
let placeholders: String = ids.iter().map(|_| "?").collect::<Vec<_>>().join(",");
|
||||
let sql = format!("DELETE FROM deliveries WHERE id IN ({placeholders})");
|
||||
let params: Vec<&dyn rusqlite::types::ToSql> = ids
|
||||
.iter()
|
||||
.map(|id| id as &dyn rusqlite::types::ToSql)
|
||||
.collect();
|
||||
conn.execute(&sql, params.as_slice())
|
||||
.map_err(|e| StorageError::Db(e.to_string()))?;
|
||||
}
|
||||
|
||||
Ok(rows.into_iter().map(|(_, seq, payload)| (seq as u64, payload)).collect())
|
||||
}
|
||||
|
||||
fn fetch_limited(
|
||||
&self,
|
||||
recipient_key: &[u8],
|
||||
channel_id: &[u8],
|
||||
limit: usize,
|
||||
) -> Result<Vec<(u64, Vec<u8>)>, StorageError> {
|
||||
let conn = self.lock_conn()?;
|
||||
|
||||
let mut stmt = conn
|
||||
.prepare(
|
||||
"SELECT id, seq, payload FROM deliveries
|
||||
WHERE recipient_key = ?1 AND channel_id = ?2
|
||||
ORDER BY seq ASC
|
||||
LIMIT ?3",
|
||||
)
|
||||
.map_err(|e| StorageError::Db(e.to_string()))?;
|
||||
|
||||
let rows: Vec<(i64, i64, Vec<u8>)> = stmt
|
||||
.query_map(params![recipient_key, channel_id, limit as i64], |row| {
|
||||
Ok((row.get(0)?, row.get(1)?, row.get(2)?))
|
||||
})
|
||||
.map_err(|e| StorageError::Db(e.to_string()))?
|
||||
.collect::<Result<Vec<_>, _>>()
|
||||
.map_err(|e| StorageError::Db(e.to_string()))?;
|
||||
|
||||
if !rows.is_empty() {
|
||||
let ids: Vec<i64> = rows.iter().map(|(id, _, _)| *id).collect();
|
||||
let placeholders: String = ids.iter().map(|_| "?").collect::<Vec<_>>().join(",");
|
||||
let sql = format!("DELETE FROM deliveries WHERE id IN ({placeholders})");
|
||||
let params: Vec<&dyn rusqlite::types::ToSql> = ids
|
||||
.iter()
|
||||
.map(|id| id as &dyn rusqlite::types::ToSql)
|
||||
.collect();
|
||||
conn.execute(&sql, params.as_slice())
|
||||
.map_err(|e| StorageError::Db(e.to_string()))?;
|
||||
}
|
||||
|
||||
Ok(rows.into_iter().map(|(_, seq, payload)| (seq as u64, payload)).collect())
|
||||
}
|
||||
|
||||
fn queue_depth(&self, recipient_key: &[u8], channel_id: &[u8]) -> Result<usize, StorageError> {
|
||||
let conn = self.lock_conn()?;
|
||||
let count: i64 = conn
|
||||
.query_row(
|
||||
"SELECT COUNT(*) FROM deliveries WHERE recipient_key = ?1 AND channel_id = ?2",
|
||||
params![recipient_key, channel_id],
|
||||
|row| row.get(0),
|
||||
)
|
||||
.map_err(|e| StorageError::Db(e.to_string()))?;
|
||||
Ok(count as usize)
|
||||
}
|
||||
|
||||
fn gc_expired_messages(&self, max_age_secs: u64) -> Result<usize, StorageError> {
|
||||
let conn = self.lock_conn()?;
|
||||
let cutoff = std::time::SystemTime::now()
|
||||
.duration_since(std::time::UNIX_EPOCH)
|
||||
.unwrap_or_default()
|
||||
.as_secs()
|
||||
.saturating_sub(max_age_secs);
|
||||
let deleted = conn
|
||||
.execute(
|
||||
"DELETE FROM deliveries WHERE created_at < ?1",
|
||||
params![cutoff as i64],
|
||||
)
|
||||
.map_err(|e| StorageError::Db(e.to_string()))?;
|
||||
Ok(deleted)
|
||||
}
|
||||
|
||||
fn upload_hybrid_key(
|
||||
&self,
|
||||
identity_key: &[u8],
|
||||
hybrid_pk: Vec<u8>,
|
||||
) -> Result<(), StorageError> {
|
||||
let conn = self.lock_conn()?;
|
||||
conn.execute(
|
||||
"INSERT OR REPLACE INTO hybrid_keys (identity_key, hybrid_public_key) VALUES (?1, ?2)",
|
||||
params![identity_key, hybrid_pk],
|
||||
)
|
||||
.map_err(|e| StorageError::Db(e.to_string()))?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn fetch_hybrid_key(&self, identity_key: &[u8]) -> Result<Option<Vec<u8>>, StorageError> {
|
||||
let conn = self.lock_conn()?;
|
||||
let mut stmt = conn
|
||||
.prepare("SELECT hybrid_public_key FROM hybrid_keys WHERE identity_key = ?1")
|
||||
.map_err(|e| StorageError::Db(e.to_string()))?;
|
||||
|
||||
stmt.query_row(params![identity_key], |row| row.get(0))
|
||||
.optional()
|
||||
.map_err(|e| StorageError::Db(e.to_string()))
|
||||
}
|
||||
|
||||
fn store_server_setup(&self, setup: Vec<u8>) -> Result<(), StorageError> {
|
||||
let conn = self.lock_conn()?;
|
||||
conn.execute(
|
||||
"INSERT OR REPLACE INTO server_setup (id, setup_data) VALUES (1, ?1)",
|
||||
params![setup],
|
||||
)
|
||||
.map_err(|e| StorageError::Db(e.to_string()))?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn get_server_setup(&self) -> Result<Option<Vec<u8>>, StorageError> {
|
||||
let conn = self.lock_conn()?;
|
||||
let mut stmt = conn
|
||||
.prepare("SELECT setup_data FROM server_setup WHERE id = 1")
|
||||
.map_err(|e| StorageError::Db(e.to_string()))?;
|
||||
|
||||
stmt.query_row([], |row| row.get(0))
|
||||
.optional()
|
||||
.map_err(|e| StorageError::Db(e.to_string()))
|
||||
}
|
||||
|
||||
fn store_user_record(&self, username: &str, record: Vec<u8>) -> Result<(), StorageError> {
|
||||
let conn = self.lock_conn()?;
|
||||
conn.execute(
|
||||
"INSERT OR REPLACE INTO users (username, opaque_record) VALUES (?1, ?2)",
|
||||
params![username, record],
|
||||
)
|
||||
.map_err(|e| StorageError::Db(e.to_string()))?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn get_user_record(&self, username: &str) -> Result<Option<Vec<u8>>, StorageError> {
|
||||
let conn = self.lock_conn()?;
|
||||
let mut stmt = conn
|
||||
.prepare("SELECT opaque_record FROM users WHERE username = ?1")
|
||||
.map_err(|e| StorageError::Db(e.to_string()))?;
|
||||
|
||||
stmt.query_row(params![username], |row| row.get(0))
|
||||
.optional()
|
||||
.map_err(|e| StorageError::Db(e.to_string()))
|
||||
}
|
||||
|
||||
fn has_user_record(&self, username: &str) -> Result<bool, StorageError> {
|
||||
let conn = self.lock_conn()?;
|
||||
let exists: bool = conn
|
||||
.query_row(
|
||||
"SELECT EXISTS(SELECT 1 FROM users WHERE username = ?1)",
|
||||
params![username],
|
||||
|row| row.get(0),
|
||||
)
|
||||
.map_err(|e| StorageError::Db(e.to_string()))?;
|
||||
Ok(exists)
|
||||
}
|
||||
|
||||
fn store_user_identity_key(
|
||||
&self,
|
||||
username: &str,
|
||||
identity_key: Vec<u8>,
|
||||
) -> Result<(), StorageError> {
|
||||
let conn = self.lock_conn()?;
|
||||
conn.execute(
|
||||
"INSERT OR REPLACE INTO user_identity_keys (username, identity_key) VALUES (?1, ?2)",
|
||||
params![username, identity_key],
|
||||
)
|
||||
.map_err(|e| StorageError::Db(e.to_string()))?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn get_user_identity_key(&self, username: &str) -> Result<Option<Vec<u8>>, StorageError> {
|
||||
let conn = self.lock_conn()?;
|
||||
let mut stmt = conn
|
||||
.prepare("SELECT identity_key FROM user_identity_keys WHERE username = ?1")
|
||||
.map_err(|e| StorageError::Db(e.to_string()))?;
|
||||
|
||||
stmt.query_row(params![username], |row| row.get(0))
|
||||
.optional()
|
||||
.map_err(|e| StorageError::Db(e.to_string()))
|
||||
}
|
||||
|
||||
fn publish_endpoint(
|
||||
&self,
|
||||
identity_key: &[u8],
|
||||
node_addr: Vec<u8>,
|
||||
) -> Result<(), StorageError> {
|
||||
let conn = self.lock_conn()?;
|
||||
conn.execute(
|
||||
"INSERT OR REPLACE INTO endpoints (identity_key, node_addr) VALUES (?1, ?2)",
|
||||
params![identity_key, node_addr],
|
||||
)
|
||||
.map_err(|e| StorageError::Db(e.to_string()))?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn resolve_endpoint(&self, identity_key: &[u8]) -> Result<Option<Vec<u8>>, StorageError> {
|
||||
let conn = self.lock_conn()?;
|
||||
let mut stmt = conn
|
||||
.prepare("SELECT node_addr FROM endpoints WHERE identity_key = ?1")
|
||||
.map_err(|e| StorageError::Db(e.to_string()))?;
|
||||
|
||||
stmt.query_row(params![identity_key], |row| row.get(0))
|
||||
.optional()
|
||||
.map_err(|e| StorageError::Db(e.to_string()))
|
||||
}
|
||||
}
|
||||
|
||||
/// Convenience extension for `rusqlite::OptionalExtension`.
|
||||
trait OptionalExt<T> {
|
||||
fn optional(self) -> Result<Option<T>, rusqlite::Error>;
|
||||
}
|
||||
|
||||
impl<T> OptionalExt<T> for Result<T, rusqlite::Error> {
|
||||
fn optional(self) -> Result<Option<T>, rusqlite::Error> {
|
||||
match self {
|
||||
Ok(v) => Ok(Some(v)),
|
||||
Err(rusqlite::Error::QueryReturnedNoRows) => Ok(None),
|
||||
Err(e) => Err(e),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use std::path::PathBuf;
|
||||
|
||||
fn open_in_memory() -> SqlStore {
|
||||
SqlStore::open(":memory:", "").unwrap()
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn sets_user_version_after_migrate() {
|
||||
let dir = tempfile::tempdir().expect("tempdir");
|
||||
let db_path: PathBuf = dir.path().join("store.db");
|
||||
|
||||
{
|
||||
let store = SqlStore::open(&db_path, "").expect("open store");
|
||||
let _guard = store.lock_conn().unwrap();
|
||||
}
|
||||
|
||||
let conn = rusqlite::Connection::open(&db_path).expect("reopen db");
|
||||
let version: i32 = conn
|
||||
.pragma_query_value(None, "user_version", |row| row.get(0))
|
||||
.expect("read user_version");
|
||||
|
||||
assert_eq!(version, SCHEMA_VERSION);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn key_package_fifo() {
|
||||
let store = open_in_memory();
|
||||
let identity = [1u8; 32];
|
||||
|
||||
store
|
||||
.upload_key_package(&identity, b"kp1".to_vec())
|
||||
.unwrap();
|
||||
store
|
||||
.upload_key_package(&identity, b"kp2".to_vec())
|
||||
.unwrap();
|
||||
|
||||
assert_eq!(
|
||||
store.fetch_key_package(&identity).unwrap(),
|
||||
Some(b"kp1".to_vec())
|
||||
);
|
||||
assert_eq!(
|
||||
store.fetch_key_package(&identity).unwrap(),
|
||||
Some(b"kp2".to_vec())
|
||||
);
|
||||
assert_eq!(store.fetch_key_package(&identity).unwrap(), None);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn delivery_round_trip() {
|
||||
let store = open_in_memory();
|
||||
let rk = [1u8; 32];
|
||||
let ch = b"channel-1";
|
||||
|
||||
let seq0 = store.enqueue(&rk, ch, b"msg1".to_vec()).unwrap();
|
||||
let seq1 = store.enqueue(&rk, ch, b"msg2".to_vec()).unwrap();
|
||||
assert_eq!(seq0, 0);
|
||||
assert_eq!(seq1, 1);
|
||||
|
||||
let msgs = store.fetch(&rk, ch).unwrap();
|
||||
assert_eq!(msgs, vec![(0u64, b"msg1".to_vec()), (1u64, b"msg2".to_vec())]);
|
||||
|
||||
assert!(store.fetch(&rk, ch).unwrap().is_empty());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn fetch_limited_partial_drain() {
|
||||
let store = open_in_memory();
|
||||
let rk = [5u8; 32];
|
||||
let ch = b"ch";
|
||||
|
||||
store.enqueue(&rk, ch, b"a".to_vec()).unwrap();
|
||||
store.enqueue(&rk, ch, b"b".to_vec()).unwrap();
|
||||
store.enqueue(&rk, ch, b"c".to_vec()).unwrap();
|
||||
|
||||
let msgs = store.fetch_limited(&rk, ch, 2).unwrap();
|
||||
assert_eq!(msgs, vec![(0u64, b"a".to_vec()), (1u64, b"b".to_vec())]);
|
||||
|
||||
let remaining = store.fetch(&rk, ch).unwrap();
|
||||
assert_eq!(remaining, vec![(2u64, b"c".to_vec())]);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn queue_depth_count() {
|
||||
let store = open_in_memory();
|
||||
let rk = [6u8; 32];
|
||||
let ch = b"ch";
|
||||
|
||||
assert_eq!(store.queue_depth(&rk, ch).unwrap(), 0);
|
||||
store.enqueue(&rk, ch, b"x".to_vec()).unwrap();
|
||||
store.enqueue(&rk, ch, b"y".to_vec()).unwrap();
|
||||
assert_eq!(store.queue_depth(&rk, ch).unwrap(), 2);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn has_user_record_check() {
|
||||
let store = open_in_memory();
|
||||
assert!(!store.has_user_record("user1").unwrap());
|
||||
store
|
||||
.store_user_record("user1", b"record".to_vec())
|
||||
.unwrap();
|
||||
assert!(store.has_user_record("user1").unwrap());
|
||||
assert!(!store.has_user_record("user2").unwrap());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn user_identity_key_round_trip() {
|
||||
let store = open_in_memory();
|
||||
assert!(store.get_user_identity_key("user1").unwrap().is_none());
|
||||
store
|
||||
.store_user_identity_key("user1", vec![1u8; 32])
|
||||
.unwrap();
|
||||
assert_eq!(
|
||||
store.get_user_identity_key("user1").unwrap(),
|
||||
Some(vec![1u8; 32])
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn hybrid_key_round_trip() {
|
||||
let store = open_in_memory();
|
||||
let ik = [2u8; 32];
|
||||
let pk = b"hybrid_public_key_data".to_vec();
|
||||
|
||||
store.upload_hybrid_key(&ik, pk.clone()).unwrap();
|
||||
assert_eq!(store.fetch_hybrid_key(&ik).unwrap(), Some(pk));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn separate_channels_isolated() {
|
||||
let store = open_in_memory();
|
||||
let rk = [4u8; 32];
|
||||
|
||||
store.enqueue(&rk, b"ch-a", b"a1".to_vec()).unwrap();
|
||||
store.enqueue(&rk, b"ch-b", b"b1".to_vec()).unwrap();
|
||||
|
||||
let a_msgs = store.fetch(&rk, b"ch-a").unwrap();
|
||||
assert_eq!(a_msgs, vec![(0u64, b"a1".to_vec())]);
|
||||
|
||||
let b_msgs = store.fetch(&rk, b"ch-b").unwrap();
|
||||
assert_eq!(b_msgs, vec![(0u64, b"b1".to_vec())]);
|
||||
}
|
||||
}
|
||||
@@ -1,494 +0,0 @@
|
||||
use std::{
|
||||
collections::{HashMap, VecDeque},
|
||||
fs,
|
||||
hash::Hash,
|
||||
path::{Path, PathBuf},
|
||||
sync::Mutex,
|
||||
};
|
||||
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
#[derive(thiserror::Error, Debug)]
|
||||
pub enum StorageError {
|
||||
#[error("io error: {0}")]
|
||||
Io(String),
|
||||
#[error("serialization error")]
|
||||
Serde,
|
||||
#[error("database error: {0}")]
|
||||
Db(String),
|
||||
}
|
||||
|
||||
fn lock<T>(m: &Mutex<T>) -> Result<std::sync::MutexGuard<'_, T>, StorageError> {
|
||||
m.lock()
|
||||
.map_err(|e| StorageError::Io(format!("lock poisoned: {e}")))
|
||||
}
|
||||
|
||||
// ── Store trait ──────────────────────────────────────────────────────────────
|
||||
|
||||
/// Abstraction over storage backends (file-backed, SQLCipher, etc.).
|
||||
pub trait Store: Send + Sync {
|
||||
fn upload_key_package(&self, identity_key: &[u8], package: Vec<u8>)
|
||||
-> Result<(), StorageError>;
|
||||
|
||||
fn fetch_key_package(&self, identity_key: &[u8]) -> Result<Option<Vec<u8>>, StorageError>;
|
||||
|
||||
/// Enqueue a payload and return the monotonically increasing per-inbox sequence number
|
||||
/// assigned to this message. Clients sort by seq before MLS processing.
|
||||
fn enqueue(
|
||||
&self,
|
||||
recipient_key: &[u8],
|
||||
channel_id: &[u8],
|
||||
payload: Vec<u8>,
|
||||
) -> Result<u64, StorageError>;
|
||||
|
||||
/// Fetch and drain all queued messages, returning `(seq, payload)` pairs ordered by seq.
|
||||
fn fetch(
|
||||
&self,
|
||||
recipient_key: &[u8],
|
||||
channel_id: &[u8],
|
||||
) -> Result<Vec<(u64, Vec<u8>)>, StorageError>;
|
||||
|
||||
/// Fetch up to `limit` messages without draining the entire queue (Fix 8).
|
||||
/// Returns `(seq, payload)` pairs ordered by seq.
|
||||
fn fetch_limited(
|
||||
&self,
|
||||
recipient_key: &[u8],
|
||||
channel_id: &[u8],
|
||||
limit: usize,
|
||||
) -> Result<Vec<(u64, Vec<u8>)>, StorageError>;
|
||||
|
||||
/// Return the number of queued messages for (recipient, channel) (Fix 7).
|
||||
fn queue_depth(&self, recipient_key: &[u8], channel_id: &[u8]) -> Result<usize, StorageError>;
|
||||
|
||||
/// Delete messages older than `max_age_secs`. Returns count deleted (Fix 7).
|
||||
fn gc_expired_messages(&self, max_age_secs: u64) -> Result<usize, StorageError>;
|
||||
|
||||
fn upload_hybrid_key(
|
||||
&self,
|
||||
identity_key: &[u8],
|
||||
hybrid_pk: Vec<u8>,
|
||||
) -> Result<(), StorageError>;
|
||||
|
||||
fn fetch_hybrid_key(&self, identity_key: &[u8]) -> Result<Option<Vec<u8>>, StorageError>;
|
||||
|
||||
/// Store the OPAQUE `ServerSetup` (generated once, loaded on restart).
|
||||
fn store_server_setup(&self, setup: Vec<u8>) -> Result<(), StorageError>;
|
||||
|
||||
/// Load the persisted `ServerSetup`, if any.
|
||||
fn get_server_setup(&self) -> Result<Option<Vec<u8>>, StorageError>;
|
||||
|
||||
/// Store an OPAQUE user record (serialized `ServerRegistration`).
|
||||
fn store_user_record(&self, username: &str, record: Vec<u8>) -> Result<(), StorageError>;
|
||||
|
||||
/// Retrieve an OPAQUE user record by username.
|
||||
fn get_user_record(&self, username: &str) -> Result<Option<Vec<u8>>, StorageError>;
|
||||
|
||||
/// Check if a user record already exists (Fix 5).
|
||||
fn has_user_record(&self, username: &str) -> Result<bool, StorageError>;
|
||||
|
||||
/// Store identity key for a user (Fix 2).
|
||||
fn store_user_identity_key(
|
||||
&self,
|
||||
username: &str,
|
||||
identity_key: Vec<u8>,
|
||||
) -> Result<(), StorageError>;
|
||||
|
||||
/// Retrieve identity key for a user (Fix 2).
|
||||
fn get_user_identity_key(&self, username: &str) -> Result<Option<Vec<u8>>, StorageError>;
|
||||
|
||||
/// Publish a P2P endpoint address for an identity key.
|
||||
fn publish_endpoint(&self, identity_key: &[u8], node_addr: Vec<u8>)
|
||||
-> Result<(), StorageError>;
|
||||
|
||||
/// Resolve a peer's P2P endpoint address.
|
||||
fn resolve_endpoint(&self, identity_key: &[u8]) -> Result<Option<Vec<u8>>, StorageError>;
|
||||
}
|
||||
|
||||
// ── ChannelKey ───────────────────────────────────────────────────────────────
|
||||
|
||||
#[derive(Serialize, Deserialize, Clone, Eq, PartialEq, Debug)]
|
||||
pub struct ChannelKey {
|
||||
pub channel_id: Vec<u8>,
|
||||
pub recipient_key: Vec<u8>,
|
||||
}
|
||||
|
||||
impl Hash for ChannelKey {
|
||||
fn hash<H: std::hash::Hasher>(&self, state: &mut H) {
|
||||
self.channel_id.hash(state);
|
||||
self.recipient_key.hash(state);
|
||||
}
|
||||
}
|
||||
|
||||
// ── FileBackedStore ──────────────────────────────────────────────────────────
|
||||
|
||||
#[derive(Serialize, Deserialize, Default)]
|
||||
struct QueueMapV1 {
|
||||
map: HashMap<Vec<u8>, VecDeque<Vec<u8>>>,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Default)]
|
||||
struct QueueMapV2 {
|
||||
map: HashMap<ChannelKey, VecDeque<Vec<u8>>>,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Default, Clone)]
|
||||
struct SeqEntry {
|
||||
seq: u64,
|
||||
data: Vec<u8>,
|
||||
}
|
||||
|
||||
/// V3 delivery store: each queue entry carries a monotonic per-inbox sequence number.
|
||||
#[derive(Serialize, Deserialize, Default)]
|
||||
struct QueueMapV3 {
|
||||
map: HashMap<ChannelKey, VecDeque<SeqEntry>>,
|
||||
next_seq: HashMap<ChannelKey, u64>,
|
||||
}
|
||||
|
||||
/// File-backed storage for KeyPackages and delivery queues.
|
||||
///
|
||||
/// Each mutation flushes the entire map to disk. Suitable for MVP-scale loads.
|
||||
pub struct FileBackedStore {
|
||||
kp_path: PathBuf,
|
||||
ds_path: PathBuf,
|
||||
hk_path: PathBuf,
|
||||
setup_path: PathBuf,
|
||||
users_path: PathBuf,
|
||||
identity_keys_path: PathBuf,
|
||||
key_packages: Mutex<HashMap<Vec<u8>, VecDeque<Vec<u8>>>>,
|
||||
deliveries: Mutex<QueueMapV3>,
|
||||
hybrid_keys: Mutex<HashMap<Vec<u8>, Vec<u8>>>,
|
||||
users: Mutex<HashMap<String, Vec<u8>>>,
|
||||
identity_keys: Mutex<HashMap<String, Vec<u8>>>,
|
||||
endpoints: Mutex<HashMap<Vec<u8>, Vec<u8>>>,
|
||||
}
|
||||
|
||||
impl FileBackedStore {
|
||||
pub fn open(dir: impl AsRef<Path>) -> Result<Self, StorageError> {
|
||||
let dir = dir.as_ref();
|
||||
if !dir.exists() {
|
||||
fs::create_dir_all(dir).map_err(|e| StorageError::Io(e.to_string()))?;
|
||||
}
|
||||
let kp_path = dir.join("keypackages.bin");
|
||||
let ds_path = dir.join("deliveries.bin");
|
||||
let hk_path = dir.join("hybridkeys.bin");
|
||||
let setup_path = dir.join("server_setup.bin");
|
||||
let users_path = dir.join("users.bin");
|
||||
let identity_keys_path = dir.join("identity_keys.bin");
|
||||
|
||||
let key_packages = Mutex::new(Self::load_kp_map(&kp_path)?);
|
||||
let deliveries = Mutex::new(Self::load_delivery_map_v3(&ds_path)?);
|
||||
let hybrid_keys = Mutex::new(Self::load_hybrid_keys(&hk_path)?);
|
||||
let users = Mutex::new(Self::load_users(&users_path)?);
|
||||
let identity_keys = Mutex::new(Self::load_map_string_bytes(&identity_keys_path)?);
|
||||
|
||||
Ok(Self {
|
||||
kp_path,
|
||||
ds_path,
|
||||
hk_path,
|
||||
setup_path,
|
||||
users_path,
|
||||
identity_keys_path,
|
||||
key_packages,
|
||||
deliveries,
|
||||
hybrid_keys,
|
||||
users,
|
||||
identity_keys,
|
||||
endpoints: Mutex::new(HashMap::new()),
|
||||
})
|
||||
}
|
||||
|
||||
fn load_kp_map(path: &Path) -> Result<HashMap<Vec<u8>, VecDeque<Vec<u8>>>, StorageError> {
|
||||
if !path.exists() {
|
||||
return Ok(HashMap::new());
|
||||
}
|
||||
let bytes = fs::read(path).map_err(|e| StorageError::Io(e.to_string()))?;
|
||||
if bytes.is_empty() {
|
||||
return Ok(HashMap::new());
|
||||
}
|
||||
let map: QueueMapV1 = bincode::deserialize(&bytes).map_err(|_| StorageError::Serde)?;
|
||||
Ok(map.map)
|
||||
}
|
||||
|
||||
fn flush_kp_map(
|
||||
&self,
|
||||
path: &Path,
|
||||
map: &HashMap<Vec<u8>, VecDeque<Vec<u8>>>,
|
||||
) -> Result<(), StorageError> {
|
||||
let payload = QueueMapV1 { map: map.clone() };
|
||||
let bytes = bincode::serialize(&payload).map_err(|_| StorageError::Serde)?;
|
||||
if let Some(parent) = path.parent() {
|
||||
fs::create_dir_all(parent).map_err(|e| StorageError::Io(e.to_string()))?;
|
||||
}
|
||||
fs::write(path, bytes).map_err(|e| StorageError::Io(e.to_string()))
|
||||
}
|
||||
|
||||
/// Load deliveries as V3. Falls back to V2 format (assigns seqs starting at 0).
|
||||
fn load_delivery_map_v3(path: &Path) -> Result<QueueMapV3, StorageError> {
|
||||
if !path.exists() {
|
||||
return Ok(QueueMapV3::default());
|
||||
}
|
||||
let bytes = fs::read(path).map_err(|e| StorageError::Io(e.to_string()))?;
|
||||
if bytes.is_empty() {
|
||||
return Ok(QueueMapV3::default());
|
||||
}
|
||||
// Try V3 first.
|
||||
if let Ok(v3) = bincode::deserialize::<QueueMapV3>(&bytes) {
|
||||
return Ok(v3);
|
||||
}
|
||||
// Fall back to V2: assign ascending seqs starting at 0 per channel.
|
||||
let v2 = bincode::deserialize::<QueueMapV2>(&bytes)
|
||||
.map_err(|_| StorageError::Io("deliveries file: unrecognised format".into()))?;
|
||||
let mut v3 = QueueMapV3::default();
|
||||
for (key, queue) in v2.map {
|
||||
let entries: VecDeque<SeqEntry> = queue
|
||||
.into_iter()
|
||||
.enumerate()
|
||||
.map(|(i, data)| SeqEntry { seq: i as u64, data })
|
||||
.collect();
|
||||
let next = entries.len() as u64;
|
||||
v3.next_seq.insert(key.clone(), next);
|
||||
v3.map.insert(key, entries);
|
||||
}
|
||||
Ok(v3)
|
||||
}
|
||||
|
||||
fn flush_delivery_map(&self, path: &Path, map: &QueueMapV3) -> Result<(), StorageError> {
|
||||
let bytes = bincode::serialize(map).map_err(|_| StorageError::Serde)?;
|
||||
if let Some(parent) = path.parent() {
|
||||
fs::create_dir_all(parent).map_err(|e| StorageError::Io(e.to_string()))?;
|
||||
}
|
||||
fs::write(path, bytes).map_err(|e| StorageError::Io(e.to_string()))
|
||||
}
|
||||
|
||||
fn load_hybrid_keys(path: &Path) -> Result<HashMap<Vec<u8>, Vec<u8>>, StorageError> {
|
||||
if !path.exists() {
|
||||
return Ok(HashMap::new());
|
||||
}
|
||||
let bytes = fs::read(path).map_err(|e| StorageError::Io(e.to_string()))?;
|
||||
if bytes.is_empty() {
|
||||
return Ok(HashMap::new());
|
||||
}
|
||||
bincode::deserialize(&bytes).map_err(|_| StorageError::Serde)
|
||||
}
|
||||
|
||||
fn flush_hybrid_keys(
|
||||
&self,
|
||||
path: &Path,
|
||||
map: &HashMap<Vec<u8>, Vec<u8>>,
|
||||
) -> Result<(), StorageError> {
|
||||
let bytes = bincode::serialize(map).map_err(|_| StorageError::Serde)?;
|
||||
if let Some(parent) = path.parent() {
|
||||
fs::create_dir_all(parent).map_err(|e| StorageError::Io(e.to_string()))?;
|
||||
}
|
||||
fs::write(path, bytes).map_err(|e| StorageError::Io(e.to_string()))
|
||||
}
|
||||
|
||||
fn load_users(path: &Path) -> Result<HashMap<String, Vec<u8>>, StorageError> {
|
||||
if !path.exists() {
|
||||
return Ok(HashMap::new());
|
||||
}
|
||||
let bytes = fs::read(path).map_err(|e| StorageError::Io(e.to_string()))?;
|
||||
if bytes.is_empty() {
|
||||
return Ok(HashMap::new());
|
||||
}
|
||||
bincode::deserialize(&bytes).map_err(|_| StorageError::Serde)
|
||||
}
|
||||
|
||||
fn flush_users(&self, path: &Path, map: &HashMap<String, Vec<u8>>) -> Result<(), StorageError> {
|
||||
let bytes = bincode::serialize(map).map_err(|_| StorageError::Serde)?;
|
||||
if let Some(parent) = path.parent() {
|
||||
fs::create_dir_all(parent).map_err(|e| StorageError::Io(e.to_string()))?;
|
||||
}
|
||||
fs::write(path, bytes).map_err(|e| StorageError::Io(e.to_string()))
|
||||
}
|
||||
|
||||
fn load_map_string_bytes(path: &Path) -> Result<HashMap<String, Vec<u8>>, StorageError> {
|
||||
Self::load_users(path)
|
||||
}
|
||||
|
||||
fn flush_map_string_bytes(
|
||||
&self,
|
||||
path: &Path,
|
||||
map: &HashMap<String, Vec<u8>>,
|
||||
) -> Result<(), StorageError> {
|
||||
self.flush_users(path, map)
|
||||
}
|
||||
}
|
||||
|
||||
impl Store for FileBackedStore {
|
||||
fn upload_key_package(
|
||||
&self,
|
||||
identity_key: &[u8],
|
||||
package: Vec<u8>,
|
||||
) -> Result<(), StorageError> {
|
||||
let mut map = lock(&self.key_packages)?;
|
||||
map.entry(identity_key.to_vec())
|
||||
.or_default()
|
||||
.push_back(package);
|
||||
self.flush_kp_map(&self.kp_path, &*map)
|
||||
}
|
||||
|
||||
fn fetch_key_package(&self, identity_key: &[u8]) -> Result<Option<Vec<u8>>, StorageError> {
|
||||
let mut map = lock(&self.key_packages)?;
|
||||
let package = map.get_mut(identity_key).and_then(|q| q.pop_front());
|
||||
self.flush_kp_map(&self.kp_path, &*map)?;
|
||||
Ok(package)
|
||||
}
|
||||
|
||||
fn enqueue(
|
||||
&self,
|
||||
recipient_key: &[u8],
|
||||
channel_id: &[u8],
|
||||
payload: Vec<u8>,
|
||||
) -> Result<u64, StorageError> {
|
||||
let mut inner = lock(&self.deliveries)?;
|
||||
let key = ChannelKey {
|
||||
channel_id: channel_id.to_vec(),
|
||||
recipient_key: recipient_key.to_vec(),
|
||||
};
|
||||
let seq = {
|
||||
let entry = inner.next_seq.entry(key.clone()).or_insert(0);
|
||||
let s = *entry;
|
||||
*entry = s + 1;
|
||||
s
|
||||
};
|
||||
inner.map.entry(key).or_default().push_back(SeqEntry { seq, data: payload });
|
||||
self.flush_delivery_map(&self.ds_path, &*inner)?;
|
||||
Ok(seq)
|
||||
}
|
||||
|
||||
fn fetch(
|
||||
&self,
|
||||
recipient_key: &[u8],
|
||||
channel_id: &[u8],
|
||||
) -> Result<Vec<(u64, Vec<u8>)>, StorageError> {
|
||||
let mut inner = lock(&self.deliveries)?;
|
||||
let key = ChannelKey {
|
||||
channel_id: channel_id.to_vec(),
|
||||
recipient_key: recipient_key.to_vec(),
|
||||
};
|
||||
let messages: Vec<(u64, Vec<u8>)> = inner
|
||||
.map
|
||||
.get_mut(&key)
|
||||
.map(|q| q.drain(..).map(|e| (e.seq, e.data)).collect())
|
||||
.unwrap_or_default();
|
||||
self.flush_delivery_map(&self.ds_path, &*inner)?;
|
||||
Ok(messages)
|
||||
}
|
||||
|
||||
fn fetch_limited(
|
||||
&self,
|
||||
recipient_key: &[u8],
|
||||
channel_id: &[u8],
|
||||
limit: usize,
|
||||
) -> Result<Vec<(u64, Vec<u8>)>, StorageError> {
|
||||
let mut inner = lock(&self.deliveries)?;
|
||||
let key = ChannelKey {
|
||||
channel_id: channel_id.to_vec(),
|
||||
recipient_key: recipient_key.to_vec(),
|
||||
};
|
||||
let messages: Vec<(u64, Vec<u8>)> = inner
|
||||
.map
|
||||
.get_mut(&key)
|
||||
.map(|q| {
|
||||
let count = limit.min(q.len());
|
||||
q.drain(..count).map(|e| (e.seq, e.data)).collect()
|
||||
})
|
||||
.unwrap_or_default();
|
||||
self.flush_delivery_map(&self.ds_path, &*inner)?;
|
||||
Ok(messages)
|
||||
}
|
||||
|
||||
fn queue_depth(&self, recipient_key: &[u8], channel_id: &[u8]) -> Result<usize, StorageError> {
|
||||
let inner = lock(&self.deliveries)?;
|
||||
let key = ChannelKey {
|
||||
channel_id: channel_id.to_vec(),
|
||||
recipient_key: recipient_key.to_vec(),
|
||||
};
|
||||
Ok(inner.map.get(&key).map(|q| q.len()).unwrap_or(0))
|
||||
}
|
||||
|
||||
fn gc_expired_messages(&self, _max_age_secs: u64) -> Result<usize, StorageError> {
|
||||
// FileBackedStore does not track timestamps per message — no-op.
|
||||
Ok(0)
|
||||
}
|
||||
|
||||
fn upload_hybrid_key(
|
||||
&self,
|
||||
identity_key: &[u8],
|
||||
hybrid_pk: Vec<u8>,
|
||||
) -> Result<(), StorageError> {
|
||||
let mut map = lock(&self.hybrid_keys)?;
|
||||
map.insert(identity_key.to_vec(), hybrid_pk);
|
||||
self.flush_hybrid_keys(&self.hk_path, &*map)
|
||||
}
|
||||
|
||||
fn fetch_hybrid_key(&self, identity_key: &[u8]) -> Result<Option<Vec<u8>>, StorageError> {
|
||||
let map = lock(&self.hybrid_keys)?;
|
||||
Ok(map.get(identity_key).cloned())
|
||||
}
|
||||
|
||||
fn store_server_setup(&self, setup: Vec<u8>) -> Result<(), StorageError> {
|
||||
if let Some(parent) = self.setup_path.parent() {
|
||||
fs::create_dir_all(parent).map_err(|e| StorageError::Io(e.to_string()))?;
|
||||
}
|
||||
fs::write(&self.setup_path, setup).map_err(|e| StorageError::Io(e.to_string()))
|
||||
}
|
||||
|
||||
fn get_server_setup(&self) -> Result<Option<Vec<u8>>, StorageError> {
|
||||
if !self.setup_path.exists() {
|
||||
return Ok(None);
|
||||
}
|
||||
let bytes = fs::read(&self.setup_path).map_err(|e| StorageError::Io(e.to_string()))?;
|
||||
if bytes.is_empty() {
|
||||
return Ok(None);
|
||||
}
|
||||
Ok(Some(bytes))
|
||||
}
|
||||
|
||||
fn store_user_record(&self, username: &str, record: Vec<u8>) -> Result<(), StorageError> {
|
||||
let mut map = lock(&self.users)?;
|
||||
map.insert(username.to_string(), record);
|
||||
self.flush_users(&self.users_path, &*map)
|
||||
}
|
||||
|
||||
fn get_user_record(&self, username: &str) -> Result<Option<Vec<u8>>, StorageError> {
|
||||
let map = lock(&self.users)?;
|
||||
Ok(map.get(username).cloned())
|
||||
}
|
||||
|
||||
fn has_user_record(&self, username: &str) -> Result<bool, StorageError> {
|
||||
let map = lock(&self.users)?;
|
||||
Ok(map.contains_key(username))
|
||||
}
|
||||
|
||||
fn store_user_identity_key(
|
||||
&self,
|
||||
username: &str,
|
||||
identity_key: Vec<u8>,
|
||||
) -> Result<(), StorageError> {
|
||||
let mut map = lock(&self.identity_keys)?;
|
||||
map.insert(username.to_string(), identity_key);
|
||||
self.flush_map_string_bytes(&self.identity_keys_path, &*map)
|
||||
}
|
||||
|
||||
fn get_user_identity_key(&self, username: &str) -> Result<Option<Vec<u8>>, StorageError> {
|
||||
let map = lock(&self.identity_keys)?;
|
||||
Ok(map.get(username).cloned())
|
||||
}
|
||||
|
||||
fn publish_endpoint(
|
||||
&self,
|
||||
identity_key: &[u8],
|
||||
node_addr: Vec<u8>,
|
||||
) -> Result<(), StorageError> {
|
||||
let mut map = lock(&self.endpoints)?;
|
||||
map.insert(identity_key.to_vec(), node_addr);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn resolve_endpoint(&self, identity_key: &[u8]) -> Result<Option<Vec<u8>>, StorageError> {
|
||||
let map = lock(&self.endpoints)?;
|
||||
Ok(map.get(identity_key).cloned())
|
||||
}
|
||||
}
|
||||
22
crates/quicproquo-bot/Cargo.toml
Normal file
22
crates/quicproquo-bot/Cargo.toml
Normal file
@@ -0,0 +1,22 @@
|
||||
[package]
|
||||
name = "quicproquo-bot"
|
||||
version = "0.1.0"
|
||||
edition = "2021"
|
||||
description = "Bot SDK for quicproquo — build automated agents on E2E encrypted messaging."
|
||||
license = "MIT"
|
||||
|
||||
[lints]
|
||||
workspace = true
|
||||
|
||||
[dependencies]
|
||||
quicproquo-core = { path = "../quicproquo-core" }
|
||||
quicproquo-proto = { path = "../quicproquo-proto" }
|
||||
quicproquo-client = { path = "../quicproquo-client" }
|
||||
|
||||
openmls_rust_crypto = { workspace = true }
|
||||
tokio = { workspace = true }
|
||||
anyhow = { workspace = true }
|
||||
tracing = { workspace = true }
|
||||
serde = { workspace = true }
|
||||
serde_json = { workspace = true }
|
||||
hex = { workspace = true }
|
||||
353
crates/quicproquo-bot/src/lib.rs
Normal file
353
crates/quicproquo-bot/src/lib.rs
Normal file
@@ -0,0 +1,353 @@
|
||||
//! # quicproquo-bot — Bot SDK for E2E encrypted messaging
|
||||
//!
|
||||
//! Build automated agents that run on the quicproquo network with full MLS
|
||||
//! end-to-end encryption. The bot SDK wraps the client library into a simple
|
||||
//! polling-based API: connect, authenticate, send, receive.
|
||||
//!
|
||||
//! ## Quick start
|
||||
//!
|
||||
//! ```rust,no_run
|
||||
//! use quicproquo_bot::{Bot, BotConfig};
|
||||
//!
|
||||
//! #[tokio::main]
|
||||
//! async fn main() -> anyhow::Result<()> {
|
||||
//! let config = BotConfig::new("127.0.0.1:7000", "bot-user", "bot-password")
|
||||
//! .ca_cert("server-cert.der")
|
||||
//! .state_path("bot-state.bin");
|
||||
//!
|
||||
//! let bot = Bot::connect(config).await?;
|
||||
//!
|
||||
//! // Send a DM
|
||||
//! bot.send_dm("alice", "Hello from bot!").await?;
|
||||
//!
|
||||
//! // Poll for messages
|
||||
//! loop {
|
||||
//! for msg in bot.receive(5000).await? {
|
||||
//! println!("{}: {}", msg.sender, msg.text);
|
||||
//! if msg.text.starts_with("!echo ") {
|
||||
//! bot.send_dm(&msg.sender, &msg.text[6..]).await?;
|
||||
//! }
|
||||
//! }
|
||||
//! }
|
||||
//! }
|
||||
//! ```
|
||||
//!
|
||||
//! ## Pipe mode (stdin/stdout JSON lines)
|
||||
//!
|
||||
//! The bot SDK also supports non-interactive pipe mode for shell integration:
|
||||
//!
|
||||
//! ```bash
|
||||
//! # Send via pipe
|
||||
//! echo '{"to":"alice","text":"hello"}' | qpq pipe --state bot.bin
|
||||
//!
|
||||
//! # Receive via pipe (JSON lines to stdout)
|
||||
//! qpq pipe --recv --state bot.bin
|
||||
//! ```
|
||||
|
||||
use std::path::PathBuf;
|
||||
use std::sync::Arc;
|
||||
|
||||
use anyhow::Context;
|
||||
use tokio::task::LocalSet;
|
||||
|
||||
use quicproquo_client::{connect_node, init_auth, opaque_login, resolve_user, ClientAuth};
|
||||
use quicproquo_core::IdentityKeypair;
|
||||
|
||||
/// Configuration for connecting a bot to a quicproquo server.
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct BotConfig {
|
||||
/// Server address (host:port).
|
||||
pub server: String,
|
||||
/// Path to the server's CA certificate (DER format).
|
||||
pub ca_cert: PathBuf,
|
||||
/// TLS server name (defaults to "localhost").
|
||||
pub server_name: String,
|
||||
/// Bot's username for OPAQUE authentication.
|
||||
pub username: String,
|
||||
/// Bot's password for OPAQUE authentication.
|
||||
pub password: String,
|
||||
/// Path to the bot's encrypted state file.
|
||||
pub state_path: PathBuf,
|
||||
/// Password for the encrypted state file (None = unencrypted).
|
||||
pub state_password: Option<String>,
|
||||
/// Device ID reported to the server.
|
||||
pub device_id: Option<String>,
|
||||
}
|
||||
|
||||
impl BotConfig {
|
||||
/// Create a new bot configuration with required fields.
|
||||
pub fn new(server: &str, username: &str, password: &str) -> Self {
|
||||
Self {
|
||||
server: server.to_string(),
|
||||
ca_cert: PathBuf::from("server-cert.der"),
|
||||
server_name: "localhost".to_string(),
|
||||
username: username.to_string(),
|
||||
password: password.to_string(),
|
||||
state_path: PathBuf::from("bot-state.bin"),
|
||||
state_password: None,
|
||||
device_id: None,
|
||||
}
|
||||
}
|
||||
|
||||
/// Set the CA certificate path.
|
||||
pub fn ca_cert(mut self, path: &str) -> Self {
|
||||
self.ca_cert = PathBuf::from(path);
|
||||
self
|
||||
}
|
||||
|
||||
/// Set the TLS server name for certificate validation.
|
||||
pub fn server_name(mut self, name: &str) -> Self {
|
||||
self.server_name = name.to_string();
|
||||
self
|
||||
}
|
||||
|
||||
/// Set the state file path.
|
||||
pub fn state_path(mut self, path: &str) -> Self {
|
||||
self.state_path = PathBuf::from(path);
|
||||
self
|
||||
}
|
||||
|
||||
/// Set the state file encryption password.
|
||||
pub fn state_password(mut self, pwd: &str) -> Self {
|
||||
self.state_password = Some(pwd.to_string());
|
||||
self
|
||||
}
|
||||
|
||||
/// Set the device ID.
|
||||
pub fn device_id(mut self, id: &str) -> Self {
|
||||
self.device_id = Some(id.to_string());
|
||||
self
|
||||
}
|
||||
}
|
||||
|
||||
/// A received message from the quicproquo network.
|
||||
#[derive(Clone, Debug, serde::Serialize)]
|
||||
pub struct Message {
|
||||
/// The sender's username (or "unknown" if resolution failed).
|
||||
pub sender: String,
|
||||
/// The decrypted plaintext message content.
|
||||
pub text: String,
|
||||
/// Server-assigned sequence number.
|
||||
pub seq: u64,
|
||||
}
|
||||
|
||||
/// A bot connected to a quicproquo server.
|
||||
///
|
||||
/// The bot maintains its identity and MLS group state. Each call to
|
||||
/// `send_dm` or `receive` opens a fresh QUIC connection (stateless
|
||||
/// reconnect pattern — same as the CLI client).
|
||||
pub struct Bot {
|
||||
config: BotConfig,
|
||||
identity: Arc<IdentityKeypair>,
|
||||
}
|
||||
|
||||
impl Bot {
|
||||
/// Connect to a quicproquo server and authenticate.
|
||||
///
|
||||
/// Loads or creates an identity from the state file, connects via QUIC/TLS,
|
||||
/// and performs OPAQUE password authentication.
|
||||
pub async fn connect(config: BotConfig) -> anyhow::Result<Self> {
|
||||
let state = quicproquo_client::client::state::load_or_init_state(
|
||||
&config.state_path,
|
||||
config.state_password.as_deref(),
|
||||
)
|
||||
.context("load or init bot state")?;
|
||||
|
||||
let identity = Arc::new(IdentityKeypair::from_seed(state.identity_seed));
|
||||
|
||||
// Authenticate on the first connection.
|
||||
let local = LocalSet::new();
|
||||
let cfg = config.clone();
|
||||
let id = Arc::clone(&identity);
|
||||
|
||||
local
|
||||
.run_until(async {
|
||||
let client =
|
||||
connect_node(&cfg.server, &cfg.ca_cert, &cfg.server_name).await?;
|
||||
|
||||
let pk = id.public_key_bytes();
|
||||
let token = opaque_login(
|
||||
&client,
|
||||
&cfg.username,
|
||||
&cfg.password,
|
||||
&pk,
|
||||
)
|
||||
.await
|
||||
.context("OPAQUE login")?;
|
||||
|
||||
init_auth(ClientAuth::from_raw(token, cfg.device_id.clone()));
|
||||
|
||||
tracing::info!(username = %cfg.username, server = %cfg.server, "bot authenticated");
|
||||
Ok::<(), anyhow::Error>(())
|
||||
})
|
||||
.await?;
|
||||
|
||||
Ok(Self { config, identity })
|
||||
}
|
||||
|
||||
/// Send a plaintext message to a peer by username.
|
||||
///
|
||||
/// Resolves the username to an identity key, then encrypts via MLS
|
||||
/// and delivers through the server.
|
||||
pub async fn send_dm(&self, peer_username: &str, text: &str) -> anyhow::Result<()> {
|
||||
// Resolve username → identity key hex so we send to the specific peer.
|
||||
let peer_key = self
|
||||
.resolve_user(peer_username)
|
||||
.await
|
||||
.context("resolve peer username")?;
|
||||
let peer_key_hex = hex::encode(&peer_key);
|
||||
|
||||
quicproquo_client::cmd_send(
|
||||
&self.config.state_path,
|
||||
&self.config.server,
|
||||
&self.config.ca_cert,
|
||||
&self.config.server_name,
|
||||
Some(&peer_key_hex),
|
||||
false,
|
||||
text,
|
||||
self.config.state_password.as_deref(),
|
||||
)
|
||||
.await
|
||||
.context("send message")?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Receive pending messages, waiting up to `timeout_ms` milliseconds.
|
||||
///
|
||||
/// Returns decrypted application messages. MLS control messages (commits,
|
||||
/// welcomes) are processed internally but not returned.
|
||||
pub async fn receive(&self, timeout_ms: u64) -> anyhow::Result<Vec<Message>> {
|
||||
let plaintexts = quicproquo_client::receive_pending_plaintexts(
|
||||
&self.config.state_path,
|
||||
&self.config.server,
|
||||
&self.config.ca_cert,
|
||||
&self.config.server_name,
|
||||
timeout_ms,
|
||||
self.config.state_password.as_deref(),
|
||||
)
|
||||
.await?;
|
||||
|
||||
let messages: Vec<Message> = plaintexts
|
||||
.into_iter()
|
||||
.enumerate()
|
||||
.map(|(i, plaintext)| Message {
|
||||
sender: "peer".to_string(), // TODO: resolve from MLS group roster
|
||||
text: String::from_utf8_lossy(&plaintext).to_string(),
|
||||
seq: i as u64,
|
||||
})
|
||||
.collect();
|
||||
|
||||
Ok(messages)
|
||||
}
|
||||
|
||||
/// Receive raw plaintext bytes (for binary protocols or non-UTF-8 content).
|
||||
pub async fn receive_raw(&self, timeout_ms: u64) -> anyhow::Result<Vec<Vec<u8>>> {
|
||||
quicproquo_client::receive_pending_plaintexts(
|
||||
&self.config.state_path,
|
||||
&self.config.server,
|
||||
&self.config.ca_cert,
|
||||
&self.config.server_name,
|
||||
timeout_ms,
|
||||
self.config.state_password.as_deref(),
|
||||
)
|
||||
.await
|
||||
}
|
||||
|
||||
/// Resolve a username to a 32-byte identity key.
|
||||
pub async fn resolve_user(&self, username: &str) -> anyhow::Result<Vec<u8>> {
|
||||
let local = LocalSet::new();
|
||||
let cfg = self.config.clone();
|
||||
let username = username.to_string();
|
||||
|
||||
local
|
||||
.run_until(async {
|
||||
let client = connect_node(&cfg.server, &cfg.ca_cert, &cfg.server_name).await?;
|
||||
let key = resolve_user(&client, &username)
|
||||
.await?
|
||||
.ok_or_else(|| anyhow::anyhow!("user not found: {username}"))?;
|
||||
Ok(key)
|
||||
})
|
||||
.await
|
||||
}
|
||||
|
||||
/// Get the bot's own username.
|
||||
pub fn username(&self) -> &str {
|
||||
&self.config.username
|
||||
}
|
||||
|
||||
/// Get the bot's identity public key (32 bytes, Ed25519).
|
||||
pub fn identity_key(&self) -> [u8; 32] {
|
||||
self.identity.public_key_bytes()
|
||||
}
|
||||
|
||||
/// Get the bot's identity key as a hex string.
|
||||
pub fn identity_key_hex(&self) -> String {
|
||||
hex::encode(self.identity.public_key_bytes())
|
||||
}
|
||||
}
|
||||
|
||||
/// Read JSON commands from stdin and process them.
|
||||
///
|
||||
/// Each line should be a JSON object with:
|
||||
/// - `{"action": "send", "to": "username", "text": "message"}`
|
||||
/// - `{"action": "recv", "timeout_ms": 5000}`
|
||||
/// - `{"action": "resolve", "username": "alice"}`
|
||||
///
|
||||
/// Results are written to stdout as JSON lines.
|
||||
pub async fn run_pipe_mode(bot: &Bot) -> anyhow::Result<()> {
|
||||
use tokio::io::{AsyncBufReadExt, BufReader};
|
||||
|
||||
let stdin = BufReader::new(tokio::io::stdin());
|
||||
let mut lines = stdin.lines();
|
||||
|
||||
while let Ok(Some(line)) = lines.next_line().await {
|
||||
let line = line.trim().to_string();
|
||||
if line.is_empty() {
|
||||
continue;
|
||||
}
|
||||
|
||||
let cmd: serde_json::Value = match serde_json::from_str(&line) {
|
||||
Ok(v) => v,
|
||||
Err(e) => {
|
||||
let err = serde_json::json!({"error": format!("invalid JSON: {e}")});
|
||||
println!("{err}");
|
||||
continue;
|
||||
}
|
||||
};
|
||||
|
||||
let action = cmd["action"].as_str().unwrap_or("");
|
||||
let result = match action {
|
||||
"send" => {
|
||||
let to = cmd["to"].as_str().unwrap_or("");
|
||||
let text = cmd["text"].as_str().unwrap_or("");
|
||||
match bot.send_dm(to, text).await {
|
||||
Ok(()) => serde_json::json!({"status": "ok", "action": "send"}),
|
||||
Err(e) => serde_json::json!({"error": format!("{e:#}")}),
|
||||
}
|
||||
}
|
||||
"recv" => {
|
||||
let timeout = cmd["timeout_ms"].as_u64().unwrap_or(5000);
|
||||
match bot.receive(timeout).await {
|
||||
Ok(msgs) => serde_json::json!({"status": "ok", "messages": msgs}),
|
||||
Err(e) => serde_json::json!({"error": format!("{e:#}")}),
|
||||
}
|
||||
}
|
||||
"resolve" => {
|
||||
let username = cmd["username"].as_str().unwrap_or("");
|
||||
match bot.resolve_user(username).await {
|
||||
Ok(key) => serde_json::json!({
|
||||
"status": "ok",
|
||||
"identity_key": hex::encode(&key),
|
||||
}),
|
||||
Err(e) => serde_json::json!({"error": format!("{e:#}")}),
|
||||
}
|
||||
}
|
||||
_ => serde_json::json!({"error": format!("unknown action: {action}")}),
|
||||
};
|
||||
|
||||
println!("{result}");
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
102
crates/quicproquo-client/Cargo.toml
Normal file
102
crates/quicproquo-client/Cargo.toml
Normal file
@@ -0,0 +1,102 @@
|
||||
[package]
|
||||
name = "quicproquo-client"
|
||||
version = "0.1.0"
|
||||
edition = "2021"
|
||||
description = "CLI client for quicproquo."
|
||||
license = "MIT"
|
||||
|
||||
[[bin]]
|
||||
name = "qpq"
|
||||
path = "src/main.rs"
|
||||
|
||||
[dependencies]
|
||||
quicproquo-core = { path = "../quicproquo-core" }
|
||||
quicproquo-proto = { path = "../quicproquo-proto" }
|
||||
quicproquo-kt = { path = "../quicproquo-kt" }
|
||||
openmls_rust_crypto = { workspace = true }
|
||||
|
||||
# Serialisation + RPC
|
||||
capnp = { workspace = true }
|
||||
capnp-rpc = { workspace = true }
|
||||
|
||||
# Async
|
||||
tokio = { workspace = true }
|
||||
tokio-util = { workspace = true }
|
||||
futures = { workspace = true }
|
||||
serde = { workspace = true }
|
||||
serde_json = { workspace = true }
|
||||
bincode = { workspace = true }
|
||||
|
||||
# Crypto — OPAQUE PAKE
|
||||
opaque-ke = { workspace = true }
|
||||
rand = { workspace = true }
|
||||
|
||||
# Error handling
|
||||
anyhow = { workspace = true }
|
||||
thiserror = { workspace = true }
|
||||
|
||||
# Crypto — for fingerprint verification in fetch-key subcommand
|
||||
sha2 = { workspace = true }
|
||||
argon2 = { workspace = true }
|
||||
chacha20poly1305 = { workspace = true }
|
||||
ciborium = { workspace = true }
|
||||
zeroize = { workspace = true }
|
||||
quinn = { workspace = true }
|
||||
quinn-proto = { workspace = true }
|
||||
rustls = { workspace = true }
|
||||
|
||||
# Logging
|
||||
tracing = { workspace = true }
|
||||
tracing-subscriber = { workspace = true }
|
||||
|
||||
# CLI
|
||||
clap = { workspace = true }
|
||||
|
||||
# Local message/conversation storage
|
||||
rusqlite = { workspace = true }
|
||||
|
||||
# Hex encoding/decoding
|
||||
hex = { workspace = true }
|
||||
|
||||
# Secure password prompting (no echo)
|
||||
rpassword = "5"
|
||||
|
||||
# mDNS discovery for mesh mode (Freifunk). Only compiled with --features mesh.
|
||||
mdns-sd = { version = "0.12", optional = true }
|
||||
|
||||
# Optional P2P transport for direct node-to-node messaging.
|
||||
quicproquo-p2p = { path = "../quicproquo-p2p", optional = true }
|
||||
|
||||
# Optional TUI dependencies (Ratatui full-screen interface).
|
||||
ratatui = { version = "0.29", optional = true, default-features = false, features = ["crossterm"] }
|
||||
crossterm = { version = "0.28", optional = true }
|
||||
|
||||
# YAML playbook parsing (only compiled with --features playbook).
|
||||
serde_yaml = { version = "0.9", optional = true }
|
||||
|
||||
# v2 SDK-based CLI (thin shell over quicproquo-sdk).
|
||||
quicproquo-sdk = { path = "../quicproquo-sdk", optional = true }
|
||||
quicproquo-rpc = { path = "../quicproquo-rpc", optional = true }
|
||||
rustyline = { workspace = true, optional = true }
|
||||
|
||||
[lints]
|
||||
workspace = true
|
||||
|
||||
[features]
|
||||
# Enable mesh-mode features: mDNS local peer discovery + P2P transport.
|
||||
# Build: cargo build -p quicproquo-client --features mesh
|
||||
mesh = ["dep:mdns-sd", "dep:quicproquo-p2p"]
|
||||
# Enable full-screen Ratatui TUI: cargo build -p quicproquo-client --features tui
|
||||
tui = ["dep:ratatui", "dep:crossterm"]
|
||||
# Enable playbook (scripted command execution): YAML parser + serde derives.
|
||||
# Build: cargo build -p quicproquo-client --features playbook
|
||||
playbook = ["dep:serde_yaml"]
|
||||
# v2 CLI over SDK: cargo build -p quicproquo-client --features v2
|
||||
v2 = ["dep:quicproquo-sdk", "dep:quicproquo-rpc", "dep:rustyline"]
|
||||
|
||||
[dev-dependencies]
|
||||
dashmap = { workspace = true }
|
||||
assert_cmd = "2"
|
||||
tempfile = "3"
|
||||
portpicker = "0.1"
|
||||
rand = "0.8"
|
||||
508
crates/quicproquo-client/src/client/command_engine.rs
Normal file
508
crates/quicproquo-client/src/client/command_engine.rs
Normal file
@@ -0,0 +1,508 @@
|
||||
//! Command engine: typed command enum, registry, and execution bridge.
|
||||
//!
|
||||
//! Maps every REPL slash command and lifecycle operation into a single `Command`
|
||||
//! enum with typed parameters. `CommandRegistry` parses raw input and delegates
|
||||
//! execution to the existing `cmd_*` handlers in `repl.rs`.
|
||||
|
||||
use std::collections::HashMap;
|
||||
|
||||
use quicproquo_proto::node_capnp::node_service;
|
||||
|
||||
use super::repl::{Input, SlashCommand, parse_input};
|
||||
use super::session::SessionState;
|
||||
|
||||
// ── Comparison operator for assert conditions ────────────────────────────────
|
||||
|
||||
/// Comparison operator used in playbook assertions.
|
||||
#[derive(Debug, Clone, PartialEq, Eq)]
|
||||
#[cfg_attr(feature = "playbook", derive(serde::Serialize, serde::Deserialize))]
|
||||
pub enum CmpOp {
|
||||
Eq,
|
||||
Ne,
|
||||
Gt,
|
||||
Lt,
|
||||
Gte,
|
||||
Lte,
|
||||
}
|
||||
|
||||
impl CmpOp {
|
||||
/// Evaluate this comparison: `lhs <op> rhs`.
|
||||
pub fn eval(&self, lhs: usize, rhs: usize) -> bool {
|
||||
match self {
|
||||
CmpOp::Eq => lhs == rhs,
|
||||
CmpOp::Ne => lhs != rhs,
|
||||
CmpOp::Gt => lhs > rhs,
|
||||
CmpOp::Lt => lhs < rhs,
|
||||
CmpOp::Gte => lhs >= rhs,
|
||||
CmpOp::Lte => lhs <= rhs,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// ── Assert conditions for playbook testing ───────────────────────────────────
|
||||
|
||||
/// Conditions that can be asserted in a playbook step.
|
||||
#[derive(Debug, Clone)]
|
||||
#[cfg_attr(feature = "playbook", derive(serde::Serialize, serde::Deserialize))]
|
||||
pub enum AssertCondition {
|
||||
Connected,
|
||||
LoggedIn,
|
||||
InConversation { name: String },
|
||||
MessageCount { op: CmpOp, count: usize },
|
||||
MemberCount { op: CmpOp, count: usize },
|
||||
Custom { expression: String },
|
||||
}
|
||||
|
||||
// ── Command enum ─────────────────────────────────────────────────────────────
|
||||
|
||||
/// Every operation the client can perform, with typed parameters.
|
||||
///
|
||||
/// This is a superset of `SlashCommand` — it adds lifecycle operations
|
||||
/// (`Connect`, `Login`, `Register`, `SendMessage`, `Wait`, `Assert`, `SetVar`)
|
||||
/// that are needed for non-interactive / playbook execution.
|
||||
#[derive(Debug, Clone)]
|
||||
#[cfg_attr(feature = "playbook", derive(serde::Serialize, serde::Deserialize))]
|
||||
pub enum Command {
|
||||
// ── Lifecycle (not in SlashCommand) ──────────────────────────────────
|
||||
Connect {
|
||||
server: String,
|
||||
ca_cert: Option<String>,
|
||||
insecure: bool,
|
||||
},
|
||||
Login {
|
||||
username: String,
|
||||
password: String,
|
||||
},
|
||||
Register {
|
||||
username: String,
|
||||
password: String,
|
||||
},
|
||||
SendMessage {
|
||||
text: String,
|
||||
},
|
||||
Wait {
|
||||
duration_ms: u64,
|
||||
},
|
||||
Assert {
|
||||
condition: AssertCondition,
|
||||
},
|
||||
SetVar {
|
||||
name: String,
|
||||
value: String,
|
||||
},
|
||||
|
||||
// ── SlashCommand mirror ─────────────────────────────────────────────
|
||||
Help,
|
||||
Quit,
|
||||
Whoami,
|
||||
List,
|
||||
Switch { target: String },
|
||||
Dm { username: String },
|
||||
CreateGroup { name: String },
|
||||
Invite { target: String },
|
||||
Remove { target: String },
|
||||
Leave,
|
||||
Join,
|
||||
Members,
|
||||
GroupInfo,
|
||||
Rename { name: String },
|
||||
History { count: usize },
|
||||
|
||||
// Mesh
|
||||
MeshPeers,
|
||||
MeshServer { addr: String },
|
||||
MeshSend { peer_id: String, message: String },
|
||||
MeshBroadcast { topic: String, message: String },
|
||||
MeshSubscribe { topic: String },
|
||||
MeshRoute,
|
||||
MeshIdentity,
|
||||
MeshStore,
|
||||
|
||||
// Security / crypto
|
||||
Verify { username: String },
|
||||
UpdateKey,
|
||||
Typing,
|
||||
TypingNotify { enabled: bool },
|
||||
React { emoji: String, index: Option<usize> },
|
||||
Edit { index: usize, new_text: String },
|
||||
Delete { index: usize },
|
||||
SendFile { path: String },
|
||||
Download { index: usize },
|
||||
DeleteAccount,
|
||||
Disappear { arg: Option<String> },
|
||||
Privacy { arg: Option<String> },
|
||||
VerifyFs,
|
||||
RotateAllKeys,
|
||||
Devices,
|
||||
RegisterDevice { name: String },
|
||||
RevokeDevice { id_prefix: String },
|
||||
}
|
||||
|
||||
impl Command {
|
||||
/// Convert a `Command` to a `SlashCommand` when possible.
|
||||
///
|
||||
/// Returns `None` for lifecycle commands that have no `SlashCommand`
|
||||
/// equivalent (`Connect`, `Login`, `Register`, `SendMessage`, `Wait`,
|
||||
/// `Assert`, `SetVar`).
|
||||
pub(crate) fn to_slash(&self) -> Option<SlashCommand> {
|
||||
match self.clone() {
|
||||
// Lifecycle — no SlashCommand equivalent
|
||||
Command::Connect { .. }
|
||||
| Command::Login { .. }
|
||||
| Command::Register { .. }
|
||||
| Command::SendMessage { .. }
|
||||
| Command::Wait { .. }
|
||||
| Command::Assert { .. }
|
||||
| Command::SetVar { .. } => None,
|
||||
|
||||
// 1:1 mirror
|
||||
Command::Help => Some(SlashCommand::Help),
|
||||
Command::Quit => Some(SlashCommand::Quit),
|
||||
Command::Whoami => Some(SlashCommand::Whoami),
|
||||
Command::List => Some(SlashCommand::List),
|
||||
Command::Switch { target } => Some(SlashCommand::Switch { target }),
|
||||
Command::Dm { username } => Some(SlashCommand::Dm { username }),
|
||||
Command::CreateGroup { name } => Some(SlashCommand::CreateGroup { name }),
|
||||
Command::Invite { target } => Some(SlashCommand::Invite { target }),
|
||||
Command::Remove { target } => Some(SlashCommand::Remove { target }),
|
||||
Command::Leave => Some(SlashCommand::Leave),
|
||||
Command::Join => Some(SlashCommand::Join),
|
||||
Command::Members => Some(SlashCommand::Members),
|
||||
Command::GroupInfo => Some(SlashCommand::GroupInfo),
|
||||
Command::Rename { name } => Some(SlashCommand::Rename { name }),
|
||||
Command::History { count } => Some(SlashCommand::History { count }),
|
||||
Command::MeshPeers => Some(SlashCommand::MeshPeers),
|
||||
Command::MeshServer { addr } => Some(SlashCommand::MeshServer { addr }),
|
||||
Command::MeshSend { peer_id, message } => {
|
||||
Some(SlashCommand::MeshSend { peer_id, message })
|
||||
}
|
||||
Command::MeshBroadcast { topic, message } => {
|
||||
Some(SlashCommand::MeshBroadcast { topic, message })
|
||||
}
|
||||
Command::MeshSubscribe { topic } => Some(SlashCommand::MeshSubscribe { topic }),
|
||||
Command::MeshRoute => Some(SlashCommand::MeshRoute),
|
||||
Command::MeshIdentity => Some(SlashCommand::MeshIdentity),
|
||||
Command::MeshStore => Some(SlashCommand::MeshStore),
|
||||
Command::Verify { username } => Some(SlashCommand::Verify { username }),
|
||||
Command::UpdateKey => Some(SlashCommand::UpdateKey),
|
||||
Command::Typing => Some(SlashCommand::Typing),
|
||||
Command::TypingNotify { enabled } => Some(SlashCommand::TypingNotify { enabled }),
|
||||
Command::React { emoji, index } => Some(SlashCommand::React { emoji, index }),
|
||||
Command::Edit { index, new_text } => Some(SlashCommand::Edit { index, new_text }),
|
||||
Command::Delete { index } => Some(SlashCommand::Delete { index }),
|
||||
Command::SendFile { path } => Some(SlashCommand::SendFile { path }),
|
||||
Command::Download { index } => Some(SlashCommand::Download { index }),
|
||||
Command::DeleteAccount => Some(SlashCommand::DeleteAccount),
|
||||
Command::Disappear { arg } => Some(SlashCommand::Disappear { arg }),
|
||||
Command::Privacy { arg } => Some(SlashCommand::Privacy { arg }),
|
||||
Command::VerifyFs => Some(SlashCommand::VerifyFs),
|
||||
Command::RotateAllKeys => Some(SlashCommand::RotateAllKeys),
|
||||
Command::Devices => Some(SlashCommand::Devices),
|
||||
Command::RegisterDevice { name } => Some(SlashCommand::RegisterDevice { name }),
|
||||
Command::RevokeDevice { id_prefix } => {
|
||||
Some(SlashCommand::RevokeDevice { id_prefix })
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// ── CommandResult ────────────────────────────────────────────────────────────
|
||||
|
||||
/// Outcome of executing a single `Command`.
|
||||
#[derive(Debug, Clone)]
|
||||
#[cfg_attr(feature = "playbook", derive(serde::Serialize, serde::Deserialize))]
|
||||
pub struct CommandResult {
|
||||
pub success: bool,
|
||||
pub output: Option<String>,
|
||||
pub error: Option<String>,
|
||||
/// Structured key-value outputs for variable capture in playbooks.
|
||||
pub data: HashMap<String, String>,
|
||||
}
|
||||
|
||||
impl CommandResult {
|
||||
fn ok() -> Self {
|
||||
Self {
|
||||
success: true,
|
||||
output: None,
|
||||
error: None,
|
||||
data: HashMap::new(),
|
||||
}
|
||||
}
|
||||
|
||||
fn err(msg: String) -> Self {
|
||||
Self {
|
||||
success: false,
|
||||
output: None,
|
||||
error: Some(msg),
|
||||
data: HashMap::new(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// ── CommandRegistry ──────────────────────────────────────────────────────────
|
||||
|
||||
/// Parses raw input into `Command` and delegates execution to the existing
|
||||
/// REPL handlers.
|
||||
pub struct CommandRegistry;
|
||||
|
||||
impl CommandRegistry {
|
||||
/// Parse a raw input line into a `Command`.
|
||||
///
|
||||
/// Returns `None` for empty input. Returns `Some(Command::SendMessage)`
|
||||
/// for plain chat text. Slash commands are parsed via the existing
|
||||
/// `parse_input` function.
|
||||
pub fn parse(line: &str) -> Option<Command> {
|
||||
match parse_input(line) {
|
||||
Input::Empty => None,
|
||||
Input::ChatMessage(text) => Some(Command::SendMessage { text }),
|
||||
Input::Slash(sc) => Some(slash_to_command(sc)),
|
||||
}
|
||||
}
|
||||
|
||||
/// Execute a `Command`, delegating slash commands to the existing
|
||||
/// `handle_slash` dispatch and handling lifecycle commands directly.
|
||||
///
|
||||
/// Currently, output from `cmd_*` handlers goes to stdout (unchanged).
|
||||
/// `CommandResult` captures success/failure status; stdout capture can
|
||||
/// be added later.
|
||||
pub async fn execute(
|
||||
cmd: &Command,
|
||||
session: &mut SessionState,
|
||||
client: &node_service::Client,
|
||||
) -> CommandResult {
|
||||
match cmd {
|
||||
Command::Wait { duration_ms } => {
|
||||
tokio::time::sleep(std::time::Duration::from_millis(*duration_ms)).await;
|
||||
CommandResult::ok()
|
||||
}
|
||||
Command::SetVar { name, value } => {
|
||||
let mut result = CommandResult::ok();
|
||||
result.data.insert(name.clone(), value.clone());
|
||||
result
|
||||
}
|
||||
Command::Assert { condition } => execute_assert(condition, session),
|
||||
Command::Connect { .. } | Command::Login { .. } | Command::Register { .. } => {
|
||||
// These lifecycle commands require external context (endpoint,
|
||||
// OPAQUE state) that lives outside SessionState. The playbook
|
||||
// executor will handle them directly; calling execute() for
|
||||
// them is an error.
|
||||
CommandResult::err(
|
||||
"lifecycle commands (connect/login/register) must be handled by the playbook executor".into(),
|
||||
)
|
||||
}
|
||||
Command::SendMessage { text } => {
|
||||
match super::repl::do_send(session, client, text).await {
|
||||
Ok(()) => CommandResult::ok(),
|
||||
Err(e) => CommandResult::err(format!("{e:#}")),
|
||||
}
|
||||
}
|
||||
Command::Quit => CommandResult::ok(),
|
||||
other => {
|
||||
// All remaining variants have a SlashCommand equivalent.
|
||||
if let Some(sc) = other.to_slash() {
|
||||
match execute_slash(session, client, sc).await {
|
||||
Ok(()) => CommandResult::ok(),
|
||||
Err(e) => CommandResult::err(format!("{e:#}")),
|
||||
}
|
||||
} else {
|
||||
CommandResult::err("command has no slash equivalent".into())
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// ── Conversion helpers ──────────────────────────────────────────────────────
|
||||
|
||||
/// Convert a `SlashCommand` into the corresponding `Command`.
|
||||
fn slash_to_command(sc: SlashCommand) -> Command {
|
||||
match sc {
|
||||
SlashCommand::Help => Command::Help,
|
||||
SlashCommand::Quit => Command::Quit,
|
||||
SlashCommand::Whoami => Command::Whoami,
|
||||
SlashCommand::List => Command::List,
|
||||
SlashCommand::Switch { target } => Command::Switch { target },
|
||||
SlashCommand::Dm { username } => Command::Dm { username },
|
||||
SlashCommand::CreateGroup { name } => Command::CreateGroup { name },
|
||||
SlashCommand::Invite { target } => Command::Invite { target },
|
||||
SlashCommand::Remove { target } => Command::Remove { target },
|
||||
SlashCommand::Leave => Command::Leave,
|
||||
SlashCommand::Join => Command::Join,
|
||||
SlashCommand::Members => Command::Members,
|
||||
SlashCommand::GroupInfo => Command::GroupInfo,
|
||||
SlashCommand::Rename { name } => Command::Rename { name },
|
||||
SlashCommand::History { count } => Command::History { count },
|
||||
SlashCommand::MeshPeers => Command::MeshPeers,
|
||||
SlashCommand::MeshServer { addr } => Command::MeshServer { addr },
|
||||
SlashCommand::MeshSend { peer_id, message } => Command::MeshSend { peer_id, message },
|
||||
SlashCommand::MeshBroadcast { topic, message } => {
|
||||
Command::MeshBroadcast { topic, message }
|
||||
}
|
||||
SlashCommand::MeshSubscribe { topic } => Command::MeshSubscribe { topic },
|
||||
SlashCommand::MeshRoute => Command::MeshRoute,
|
||||
SlashCommand::MeshIdentity => Command::MeshIdentity,
|
||||
SlashCommand::MeshStore => Command::MeshStore,
|
||||
SlashCommand::Verify { username } => Command::Verify { username },
|
||||
SlashCommand::UpdateKey => Command::UpdateKey,
|
||||
SlashCommand::Typing => Command::Typing,
|
||||
SlashCommand::TypingNotify { enabled } => Command::TypingNotify { enabled },
|
||||
SlashCommand::React { emoji, index } => Command::React { emoji, index },
|
||||
SlashCommand::Edit { index, new_text } => Command::Edit { index, new_text },
|
||||
SlashCommand::Delete { index } => Command::Delete { index },
|
||||
SlashCommand::SendFile { path } => Command::SendFile { path },
|
||||
SlashCommand::Download { index } => Command::Download { index },
|
||||
SlashCommand::DeleteAccount => Command::DeleteAccount,
|
||||
SlashCommand::Disappear { arg } => Command::Disappear { arg },
|
||||
SlashCommand::Privacy { arg } => Command::Privacy { arg },
|
||||
SlashCommand::VerifyFs => Command::VerifyFs,
|
||||
SlashCommand::RotateAllKeys => Command::RotateAllKeys,
|
||||
SlashCommand::Devices => Command::Devices,
|
||||
SlashCommand::RegisterDevice { name } => Command::RegisterDevice { name },
|
||||
SlashCommand::RevokeDevice { id_prefix } => Command::RevokeDevice { id_prefix },
|
||||
}
|
||||
}
|
||||
|
||||
// ── Execution helpers ───────────────────────────────────────────────────────
|
||||
|
||||
/// Execute a `SlashCommand` using the existing `cmd_*` handlers from `repl.rs`.
|
||||
///
|
||||
/// This duplicates the dispatch table from `handle_slash` but returns
|
||||
/// `anyhow::Result<()>` instead of printing errors inline — the caller
|
||||
/// decides how to surface errors.
|
||||
async fn execute_slash(
|
||||
session: &mut SessionState,
|
||||
client: &node_service::Client,
|
||||
cmd: SlashCommand,
|
||||
) -> anyhow::Result<()> {
|
||||
use super::repl::*;
|
||||
match cmd {
|
||||
SlashCommand::Help => {
|
||||
print_help();
|
||||
Ok(())
|
||||
}
|
||||
SlashCommand::Quit => Ok(()),
|
||||
SlashCommand::Whoami => cmd_whoami(session),
|
||||
SlashCommand::List => cmd_list(session),
|
||||
SlashCommand::Switch { target } => cmd_switch(session, &target),
|
||||
SlashCommand::Dm { username } => cmd_dm(session, client, &username).await,
|
||||
SlashCommand::CreateGroup { name } => cmd_create_group(session, &name),
|
||||
SlashCommand::Invite { target } => cmd_invite(session, client, &target).await,
|
||||
SlashCommand::Remove { target } => cmd_remove(session, client, &target).await,
|
||||
SlashCommand::Leave => cmd_leave(session, client).await,
|
||||
SlashCommand::Join => cmd_join(session, client).await,
|
||||
SlashCommand::Members => cmd_members(session, client).await,
|
||||
SlashCommand::GroupInfo => cmd_group_info(session, client).await,
|
||||
SlashCommand::Rename { name } => cmd_rename(session, &name),
|
||||
SlashCommand::History { count } => cmd_history(session, count),
|
||||
SlashCommand::MeshPeers => cmd_mesh_peers(),
|
||||
SlashCommand::MeshServer { addr } => {
|
||||
super::display::print_status(&format!(
|
||||
"mesh server hint: reconnect with --server {addr} to use this node"
|
||||
));
|
||||
Ok(())
|
||||
}
|
||||
SlashCommand::MeshSend { peer_id, message } => cmd_mesh_send(&peer_id, &message),
|
||||
SlashCommand::MeshBroadcast { topic, message } => cmd_mesh_broadcast(&topic, &message),
|
||||
SlashCommand::MeshSubscribe { topic } => cmd_mesh_subscribe(&topic),
|
||||
SlashCommand::MeshRoute => cmd_mesh_route(session),
|
||||
SlashCommand::MeshIdentity => cmd_mesh_identity(session),
|
||||
SlashCommand::MeshStore => cmd_mesh_store(session),
|
||||
SlashCommand::Verify { username } => cmd_verify(session, client, &username).await,
|
||||
SlashCommand::UpdateKey => cmd_update_key(session, client).await,
|
||||
SlashCommand::Typing => cmd_typing(session, client).await,
|
||||
SlashCommand::TypingNotify { enabled } => {
|
||||
session.typing_notify_enabled = enabled;
|
||||
super::display::print_status(&format!(
|
||||
"typing notifications {}",
|
||||
if enabled { "enabled" } else { "disabled" }
|
||||
));
|
||||
Ok(())
|
||||
}
|
||||
SlashCommand::React { emoji, index } => cmd_react(session, client, &emoji, index).await,
|
||||
SlashCommand::Edit { index, new_text } => {
|
||||
cmd_edit(session, client, index, &new_text).await
|
||||
}
|
||||
SlashCommand::Delete { index } => cmd_delete(session, client, index).await,
|
||||
SlashCommand::SendFile { path } => cmd_send_file(session, client, &path).await,
|
||||
SlashCommand::Download { index } => cmd_download(session, client, index).await,
|
||||
SlashCommand::DeleteAccount => cmd_delete_account(session, client).await,
|
||||
SlashCommand::Disappear { arg } => cmd_disappear(session, arg.as_deref()),
|
||||
SlashCommand::Privacy { arg } => cmd_privacy(session, arg.as_deref()),
|
||||
SlashCommand::VerifyFs => cmd_verify_fs(session),
|
||||
SlashCommand::RotateAllKeys => cmd_rotate_all_keys(session, client).await,
|
||||
SlashCommand::Devices => cmd_devices(client).await,
|
||||
SlashCommand::RegisterDevice { name } => cmd_register_device(client, &name).await,
|
||||
SlashCommand::RevokeDevice { id_prefix } => cmd_revoke_device(client, &id_prefix).await,
|
||||
}
|
||||
}
|
||||
|
||||
/// Assert a condition against the current session state.
|
||||
fn execute_assert(condition: &AssertCondition, session: &SessionState) -> CommandResult {
|
||||
match condition {
|
||||
AssertCondition::Connected => {
|
||||
// We have a session => we got past connect. Always true when
|
||||
// execute() is called with a valid client reference.
|
||||
CommandResult::ok()
|
||||
}
|
||||
AssertCondition::LoggedIn => {
|
||||
let guard = crate::AUTH_CONTEXT
|
||||
.read()
|
||||
.expect("AUTH_CONTEXT poisoned");
|
||||
if guard.is_some() {
|
||||
CommandResult::ok()
|
||||
} else {
|
||||
CommandResult::err("not logged in".into())
|
||||
}
|
||||
}
|
||||
AssertCondition::InConversation { name } => {
|
||||
if let Some(display) = session.active_display_name() {
|
||||
if display.contains(name.as_str()) {
|
||||
CommandResult::ok()
|
||||
} else {
|
||||
CommandResult::err(format!(
|
||||
"active conversation is '{display}', expected '{name}'"
|
||||
))
|
||||
}
|
||||
} else {
|
||||
CommandResult::err("no active conversation".into())
|
||||
}
|
||||
}
|
||||
AssertCondition::MessageCount { op, count } => {
|
||||
let actual = session
|
||||
.active_conversation
|
||||
.as_ref()
|
||||
.and_then(|id| session.conv_store.load_all_messages(id).ok())
|
||||
.map(|msgs| msgs.len())
|
||||
.unwrap_or(0);
|
||||
if op.eval(actual, *count) {
|
||||
CommandResult::ok()
|
||||
} else {
|
||||
CommandResult::err(format!(
|
||||
"message count assertion failed: {actual} {op:?} {count}"
|
||||
))
|
||||
}
|
||||
}
|
||||
AssertCondition::MemberCount { op, count } => {
|
||||
let actual = session
|
||||
.active_conversation
|
||||
.as_ref()
|
||||
.and_then(|id| session.members.get(id))
|
||||
.map(|m| m.member_identities().len())
|
||||
.unwrap_or(0);
|
||||
if op.eval(actual, *count) {
|
||||
CommandResult::ok()
|
||||
} else {
|
||||
CommandResult::err(format!(
|
||||
"member count assertion failed: {actual} {op:?} {count}"
|
||||
))
|
||||
}
|
||||
}
|
||||
AssertCondition::Custom { expression } => {
|
||||
// Custom expressions are not evaluated yet; always pass.
|
||||
let mut result = CommandResult::ok();
|
||||
result.data.insert("expression".into(), expression.clone());
|
||||
result
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -5,9 +5,9 @@ use opaque_ke::{
|
||||
ClientLogin, ClientLoginFinishParameters, ClientRegistration,
|
||||
ClientRegistrationFinishParameters, CredentialResponse, RegistrationResponse,
|
||||
};
|
||||
use quicnprotochat_core::{
|
||||
use quicproquo_core::{
|
||||
generate_key_package, hybrid_decrypt, hybrid_encrypt, opaque_auth::OpaqueSuite,
|
||||
HybridKeypair, IdentityKeypair,
|
||||
GroupMember, HybridKeypair, IdentityKeypair, ReceivedMessage,
|
||||
};
|
||||
|
||||
use super::{
|
||||
@@ -16,10 +16,7 @@ use super::{
|
||||
connect_node, current_timestamp_ms, enqueue, fetch_all, fetch_hybrid_key,
|
||||
fetch_key_package, fetch_wait, try_hybrid_decrypt, upload_hybrid_key, upload_key_package,
|
||||
},
|
||||
state::{
|
||||
decode_identity_key, load_existing_state, load_or_init_state, save_state, sha256,
|
||||
MemberBackend,
|
||||
},
|
||||
state::{decode_identity_key, load_existing_state, load_or_init_state, save_state, sha256},
|
||||
};
|
||||
|
||||
/// Print local identity information from the state file (no server connection).
|
||||
@@ -30,8 +27,8 @@ pub fn cmd_whoami(state_path: &Path, password: Option<&str>) -> anyhow::Result<(
|
||||
let pk_bytes = identity.public_key_bytes();
|
||||
let fingerprint = sha256(&pk_bytes);
|
||||
|
||||
println!("identity_key : {}", hex::encode(&pk_bytes));
|
||||
println!("fingerprint : {}", hex::encode(&fingerprint));
|
||||
println!("identity_key : {}", hex::encode(pk_bytes));
|
||||
println!("fingerprint : {}", hex::encode(fingerprint));
|
||||
println!(
|
||||
"hybrid_key : {}",
|
||||
if state.hybrid_key.is_some() {
|
||||
@@ -48,14 +45,6 @@ pub fn cmd_whoami(state_path: &Path, password: Option<&str>) -> anyhow::Result<(
|
||||
"none"
|
||||
}
|
||||
);
|
||||
println!(
|
||||
"pq_backend : {}",
|
||||
if state.use_pq_backend {
|
||||
"yes (MLS HPKE: X25519 + ML-KEM-768)"
|
||||
} else {
|
||||
"no (classical)"
|
||||
}
|
||||
);
|
||||
println!("state_file : {}", state_path.display());
|
||||
|
||||
Ok(())
|
||||
@@ -214,6 +203,7 @@ pub async fn cmd_register_user(
|
||||
}
|
||||
|
||||
/// Log in via the OPAQUE protocol and receive a session token.
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
pub async fn cmd_login(
|
||||
server: &str,
|
||||
ca_cert: &Path,
|
||||
@@ -321,6 +311,129 @@ fn derive_identity_for_login(
|
||||
))
|
||||
}
|
||||
|
||||
// ── OPAQUE helpers (used by both one-shot commands and REPL bootstrap) ───────
|
||||
|
||||
/// Perform OPAQUE registration. Returns Ok(()) on success.
|
||||
/// The error message contains "E018" if the user already exists.
|
||||
/// Does NOT require init_auth() — OPAQUE RPCs are unauthenticated.
|
||||
pub(crate) async fn opaque_register(
|
||||
client: &quicproquo_proto::node_capnp::node_service::Client,
|
||||
username: &str,
|
||||
password: &str,
|
||||
identity_key: Option<&[u8]>,
|
||||
) -> anyhow::Result<()> {
|
||||
let mut rng = rand::rngs::OsRng;
|
||||
|
||||
let reg_start = ClientRegistration::<OpaqueSuite>::start(&mut rng, password.as_bytes())
|
||||
.map_err(|e| anyhow::anyhow!("OPAQUE register start: {e}"))?;
|
||||
|
||||
let mut req = client.opaque_register_start_request();
|
||||
{
|
||||
let mut p = req.get();
|
||||
p.set_username(username);
|
||||
p.set_request(®_start.message.serialize());
|
||||
}
|
||||
let resp = req.send().promise.await.context("opaque_register_start RPC failed")?;
|
||||
let response_bytes = resp
|
||||
.get()
|
||||
.context("register_start: bad response")?
|
||||
.get_response()
|
||||
.context("register_start: missing response")?
|
||||
.to_vec();
|
||||
|
||||
let reg_response = RegistrationResponse::<OpaqueSuite>::deserialize(&response_bytes)
|
||||
.map_err(|e| anyhow::anyhow!("invalid registration response: {e}"))?;
|
||||
|
||||
let reg_finish = reg_start
|
||||
.state
|
||||
.finish(
|
||||
&mut rng,
|
||||
password.as_bytes(),
|
||||
reg_response,
|
||||
ClientRegistrationFinishParameters::<OpaqueSuite>::default(),
|
||||
)
|
||||
.map_err(|e| anyhow::anyhow!("OPAQUE register finish: {e}"))?;
|
||||
|
||||
let mut req = client.opaque_register_finish_request();
|
||||
{
|
||||
let mut p = req.get();
|
||||
p.set_username(username);
|
||||
p.set_upload(®_finish.message.serialize());
|
||||
if let Some(ik) = identity_key {
|
||||
p.set_identity_key(ik);
|
||||
} else {
|
||||
p.set_identity_key(&[]);
|
||||
}
|
||||
}
|
||||
let resp = req.send().promise.await.context("opaque_register_finish RPC failed")?;
|
||||
let success = resp
|
||||
.get()
|
||||
.context("register_finish: bad response")?
|
||||
.get_success();
|
||||
|
||||
anyhow::ensure!(success, "server rejected registration");
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Perform OPAQUE login and return the raw session token bytes.
|
||||
/// Does NOT require init_auth() — OPAQUE RPCs are unauthenticated.
|
||||
pub async fn opaque_login(
|
||||
client: &quicproquo_proto::node_capnp::node_service::Client,
|
||||
username: &str,
|
||||
password: &str,
|
||||
identity_key: &[u8],
|
||||
) -> anyhow::Result<Vec<u8>> {
|
||||
let mut rng = rand::rngs::OsRng;
|
||||
|
||||
let login_start = ClientLogin::<OpaqueSuite>::start(&mut rng, password.as_bytes())
|
||||
.map_err(|e| anyhow::anyhow!("OPAQUE login start: {e}"))?;
|
||||
|
||||
let mut req = client.opaque_login_start_request();
|
||||
{
|
||||
let mut p = req.get();
|
||||
p.set_username(username);
|
||||
p.set_request(&login_start.message.serialize());
|
||||
}
|
||||
let resp = req.send().promise.await.context("opaque_login_start RPC failed")?;
|
||||
let response_bytes = resp
|
||||
.get()
|
||||
.context("login_start: bad response")?
|
||||
.get_response()
|
||||
.context("login_start: missing response")?
|
||||
.to_vec();
|
||||
|
||||
let credential_response = CredentialResponse::<OpaqueSuite>::deserialize(&response_bytes)
|
||||
.map_err(|e| anyhow::anyhow!("invalid credential response: {e}"))?;
|
||||
|
||||
let login_finish = login_start
|
||||
.state
|
||||
.finish(
|
||||
&mut rng,
|
||||
password.as_bytes(),
|
||||
credential_response,
|
||||
ClientLoginFinishParameters::<OpaqueSuite>::default(),
|
||||
)
|
||||
.map_err(|e| anyhow::anyhow!("OPAQUE login finish (bad password?): {e}"))?;
|
||||
|
||||
let mut req = client.opaque_login_finish_request();
|
||||
{
|
||||
let mut p = req.get();
|
||||
p.set_username(username);
|
||||
p.set_finalization(&login_finish.message.serialize());
|
||||
p.set_identity_key(identity_key);
|
||||
}
|
||||
let resp = req.send().promise.await.context("opaque_login_finish RPC failed")?;
|
||||
let session_token = resp
|
||||
.get()
|
||||
.context("login_finish: bad response")?
|
||||
.get_session_token()
|
||||
.context("login_finish: missing session_token")?
|
||||
.to_vec();
|
||||
|
||||
anyhow::ensure!(!session_token.is_empty(), "server returned empty session token");
|
||||
Ok(session_token)
|
||||
}
|
||||
|
||||
/// Generate a KeyPackage for a fresh identity and upload it to the AS.
|
||||
pub async fn cmd_register(server: &str, ca_cert: &Path, server_name: &str) -> anyhow::Result<()> {
|
||||
let identity = IdentityKeypair::generate();
|
||||
@@ -376,7 +489,7 @@ async fn do_upload_keypackage(
|
||||
ca_cert: &Path,
|
||||
server_name: &str,
|
||||
password: Option<&str>,
|
||||
member: &mut MemberBackend,
|
||||
member: &mut GroupMember,
|
||||
hybrid_kp: Option<&HybridKeypair>,
|
||||
) -> anyhow::Result<()> {
|
||||
let tls_bytes = member
|
||||
@@ -410,7 +523,7 @@ async fn do_upload_keypackage(
|
||||
|
||||
anyhow::ensure!(server_fp == fingerprint, "fingerprint mismatch");
|
||||
|
||||
if let Some(ref hkp) = hybrid_kp {
|
||||
if let Some(hkp) = &hybrid_kp {
|
||||
upload_hybrid_key(
|
||||
&node_client,
|
||||
&member.identity().public_key_bytes(),
|
||||
@@ -439,9 +552,8 @@ pub async fn cmd_register_state(
|
||||
ca_cert: &Path,
|
||||
server_name: &str,
|
||||
password: Option<&str>,
|
||||
use_pq_backend: bool,
|
||||
) -> anyhow::Result<()> {
|
||||
let state = load_or_init_state(state_path, password, use_pq_backend)?;
|
||||
let state = load_or_init_state(state_path, password)?;
|
||||
let (mut member, hybrid_kp) = state.into_parts(state_path)?;
|
||||
do_upload_keypackage(
|
||||
state_path,
|
||||
@@ -534,37 +646,15 @@ pub async fn cmd_fetch_key(
|
||||
}
|
||||
|
||||
/// Run a two-party MLS demo against the unified server.
|
||||
pub async fn cmd_demo_group(
|
||||
server: &str,
|
||||
ca_cert: &Path,
|
||||
server_name: &str,
|
||||
use_pq_backend: bool,
|
||||
) -> anyhow::Result<()> {
|
||||
use indicatif::{ProgressBar, ProgressStyle};
|
||||
pub async fn cmd_demo_group(server: &str, ca_cert: &Path, server_name: &str) -> anyhow::Result<()> {
|
||||
let creator_state_path = PathBuf::from("qpq-demo-creator.bin");
|
||||
let joiner_state_path = PathBuf::from("qpq-demo-joiner.bin");
|
||||
|
||||
let creator_state_path = PathBuf::from("quicnprotochat-demo-creator.bin");
|
||||
let joiner_state_path = PathBuf::from("quicnprotochat-demo-joiner.bin");
|
||||
|
||||
let pb = ProgressBar::new(5);
|
||||
pb.set_style(
|
||||
ProgressStyle::with_template("{spinner:.green} [{bar:40.cyan/blue}] {pos}/{len} {msg}")
|
||||
.expect("demo progress template is valid")
|
||||
.tick_chars("\u{2801}\u{2802}\u{2804}\u{2840}\u{2820}\u{2810}\u{2808} ")
|
||||
.progress_chars("=>-"),
|
||||
);
|
||||
pb.enable_steady_tick(std::time::Duration::from_millis(80));
|
||||
|
||||
pb.set_message("Generating Alice keys\u{2026}");
|
||||
let (mut creator, creator_hybrid_opt) =
|
||||
load_or_init_state(&creator_state_path, None, use_pq_backend)?.into_parts(&creator_state_path)?;
|
||||
pb.inc(1);
|
||||
|
||||
pb.set_message("Generating Bob keys\u{2026}");
|
||||
load_or_init_state(&creator_state_path, None)?.into_parts(&creator_state_path)?;
|
||||
let (mut joiner, joiner_hybrid_opt) =
|
||||
load_or_init_state(&joiner_state_path, None, use_pq_backend)?.into_parts(&joiner_state_path)?;
|
||||
pb.inc(1);
|
||||
load_or_init_state(&joiner_state_path, None)?.into_parts(&joiner_state_path)?;
|
||||
|
||||
pb.set_message("Creating group\u{2026}");
|
||||
let creator_hybrid = creator_hybrid_opt.unwrap_or_else(HybridKeypair::generate);
|
||||
let joiner_hybrid = joiner_hybrid_opt.unwrap_or_else(HybridKeypair::generate);
|
||||
|
||||
@@ -586,6 +676,8 @@ pub async fn cmd_demo_group(
|
||||
upload_hybrid_key(&creator_node, &creator_identity, &creator_hybrid.public_key()).await?;
|
||||
upload_hybrid_key(&joiner_node, &joiner_identity, &joiner_hybrid.public_key()).await?;
|
||||
|
||||
println!("hybrid public keys uploaded for creator and joiner");
|
||||
|
||||
let fetched_joiner_kp = fetch_key_package(&creator_node, &joiner_identity).await?;
|
||||
anyhow::ensure!(
|
||||
!fetched_joiner_kp.is_empty(),
|
||||
@@ -598,9 +690,7 @@ pub async fn cmd_demo_group(
|
||||
let (_commit, welcome) = creator
|
||||
.add_member(&fetched_joiner_kp)
|
||||
.context("add_member failed")?;
|
||||
pb.inc(1);
|
||||
|
||||
pb.set_message("Encrypting\u{2026}");
|
||||
let creator_ds = creator_node.clone();
|
||||
let joiner_ds = joiner_node.clone();
|
||||
|
||||
@@ -608,11 +698,9 @@ pub async fn cmd_demo_group(
|
||||
.await?
|
||||
.context("joiner hybrid key not found")?;
|
||||
let wrapped_welcome =
|
||||
hybrid_encrypt(&joiner_hybrid_pk, &welcome).context("hybrid encrypt welcome")?;
|
||||
hybrid_encrypt(&joiner_hybrid_pk, &welcome, b"", b"").context("hybrid encrypt welcome")?;
|
||||
enqueue(&creator_ds, &joiner_identity, &wrapped_welcome).await?;
|
||||
pb.inc(1);
|
||||
|
||||
pb.set_message("Delivering\u{2026}");
|
||||
let welcome_payloads = fetch_all(&joiner_ds, &joiner_identity).await?;
|
||||
let raw_welcome = welcome_payloads
|
||||
.first()
|
||||
@@ -620,7 +708,7 @@ pub async fn cmd_demo_group(
|
||||
.context("Welcome was not delivered to joiner via DS")?;
|
||||
|
||||
let welcome_bytes =
|
||||
hybrid_decrypt(&joiner_hybrid, &raw_welcome).context("hybrid decrypt welcome failed")?;
|
||||
hybrid_decrypt(&joiner_hybrid, &raw_welcome, b"", b"").context("hybrid decrypt welcome failed")?;
|
||||
joiner
|
||||
.join_group(&welcome_bytes)
|
||||
.context("join_group failed")?;
|
||||
@@ -629,7 +717,7 @@ pub async fn cmd_demo_group(
|
||||
.send_message(b"hello")
|
||||
.context("send_message failed")?;
|
||||
let wrapped_creator_joiner =
|
||||
hybrid_encrypt(&joiner_hybrid_pk, &ct_creator_to_joiner).context("hybrid encrypt failed")?;
|
||||
hybrid_encrypt(&joiner_hybrid_pk, &ct_creator_to_joiner, b"", b"").context("hybrid encrypt failed")?;
|
||||
enqueue(&creator_ds, &joiner_identity, &wrapped_creator_joiner).await?;
|
||||
|
||||
let joiner_msgs = fetch_all(&joiner_ds, &joiner_identity).await?;
|
||||
@@ -637,10 +725,15 @@ pub async fn cmd_demo_group(
|
||||
.first()
|
||||
.context("joiner: missing ciphertext from DS")?;
|
||||
let inner_creator_joiner =
|
||||
hybrid_decrypt(&joiner_hybrid, raw_creator_joiner).context("hybrid decrypt failed")?;
|
||||
let plaintext_creator_joiner = joiner
|
||||
.receive_message(&inner_creator_joiner)?
|
||||
.context("expected application message")?;
|
||||
hybrid_decrypt(&joiner_hybrid, raw_creator_joiner, b"", b"").context("hybrid decrypt failed")?;
|
||||
let plaintext_creator_joiner = match joiner.receive_message(&inner_creator_joiner)? {
|
||||
ReceivedMessage::Application(pt) => pt,
|
||||
other => anyhow::bail!("expected application message, got {other:?}"),
|
||||
};
|
||||
println!(
|
||||
"creator -> joiner plaintext: {}",
|
||||
String::from_utf8_lossy(&plaintext_creator_joiner)
|
||||
);
|
||||
|
||||
let creator_hybrid_pk = fetch_hybrid_key(&joiner_node, &creator_identity)
|
||||
.await?
|
||||
@@ -649,7 +742,7 @@ pub async fn cmd_demo_group(
|
||||
.send_message(b"hello back")
|
||||
.context("send_message failed")?;
|
||||
let wrapped_joiner_creator =
|
||||
hybrid_encrypt(&creator_hybrid_pk, &ct_joiner_to_creator).context("hybrid encrypt failed")?;
|
||||
hybrid_encrypt(&creator_hybrid_pk, &ct_joiner_to_creator, b"", b"").context("hybrid encrypt failed")?;
|
||||
enqueue(&joiner_ds, &creator_identity, &wrapped_joiner_creator).await?;
|
||||
|
||||
let creator_msgs = fetch_all(&creator_ds, &creator_identity).await?;
|
||||
@@ -657,21 +750,16 @@ pub async fn cmd_demo_group(
|
||||
.first()
|
||||
.context("creator: missing ciphertext from DS")?;
|
||||
let inner_joiner_creator =
|
||||
hybrid_decrypt(&creator_hybrid, raw_joiner_creator).context("hybrid decrypt failed")?;
|
||||
let plaintext_joiner_creator = creator
|
||||
.receive_message(&inner_joiner_creator)?
|
||||
.context("expected application message")?;
|
||||
pb.inc(1);
|
||||
|
||||
pb.finish_and_clear();
|
||||
hybrid_decrypt(&creator_hybrid, raw_joiner_creator, b"", b"").context("hybrid decrypt failed")?;
|
||||
let plaintext_joiner_creator = match creator.receive_message(&inner_joiner_creator)? {
|
||||
ReceivedMessage::Application(pt) => pt,
|
||||
other => anyhow::bail!("expected application message, got {other:?}"),
|
||||
};
|
||||
println!(
|
||||
"creator -> joiner: {}",
|
||||
String::from_utf8_lossy(&plaintext_creator_joiner)
|
||||
);
|
||||
println!(
|
||||
"joiner -> creator: {}",
|
||||
"joiner -> creator plaintext: {}",
|
||||
String::from_utf8_lossy(&plaintext_joiner_creator)
|
||||
);
|
||||
|
||||
println!("demo-group complete (hybrid PQ envelope active)");
|
||||
|
||||
Ok(())
|
||||
@@ -683,9 +771,8 @@ pub async fn cmd_create_group(
|
||||
_server: &str,
|
||||
group_id: &str,
|
||||
password: Option<&str>,
|
||||
use_pq_backend: bool,
|
||||
) -> anyhow::Result<()> {
|
||||
let state = load_or_init_state(state_path, password, use_pq_backend)?;
|
||||
let state = load_or_init_state(state_path, password)?;
|
||||
let (mut member, hybrid_kp) = state.into_parts(state_path)?;
|
||||
|
||||
anyhow::ensure!(
|
||||
@@ -740,7 +827,7 @@ pub async fn cmd_invite(
|
||||
}
|
||||
let peer_hpk = fetch_hybrid_key(&node_client, mk).await?;
|
||||
let commit_payload = if let Some(ref pk) = peer_hpk {
|
||||
hybrid_encrypt(pk, &commit).context("hybrid encrypt commit")?
|
||||
hybrid_encrypt(pk, &commit, b"", b"").context("hybrid encrypt commit")?
|
||||
} else {
|
||||
commit.clone()
|
||||
};
|
||||
@@ -749,7 +836,7 @@ pub async fn cmd_invite(
|
||||
|
||||
let peer_hybrid_pk = fetch_hybrid_key(&node_client, &peer_key).await?;
|
||||
let payload = if let Some(ref pk) = peer_hybrid_pk {
|
||||
hybrid_encrypt(pk, &welcome).context("hybrid encrypt welcome failed")?
|
||||
hybrid_encrypt(pk, &welcome, b"", b"").context("hybrid encrypt welcome failed")?
|
||||
} else {
|
||||
welcome
|
||||
};
|
||||
@@ -813,12 +900,22 @@ pub async fn cmd_join(
|
||||
let _ = member.receive_message(&mls_payload);
|
||||
}
|
||||
|
||||
// Auto-replenish KeyPackage after join consumed the original one.
|
||||
let tls_bytes = member
|
||||
.generate_key_package()
|
||||
.context("KeyPackage replenishment failed")?;
|
||||
upload_key_package(&node_client, &member.identity().public_key_bytes(), &tls_bytes)
|
||||
.await
|
||||
.context("KeyPackage replenishment upload failed")?;
|
||||
println!("KeyPackage auto-replenished after join");
|
||||
|
||||
save_state(state_path, &member, hybrid_kp.as_ref(), password)?;
|
||||
println!("joined group successfully");
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Send an application message via DS (single recipient or broadcast to all other members).
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
pub async fn cmd_send(
|
||||
state_path: &Path,
|
||||
server: &str,
|
||||
@@ -859,7 +956,7 @@ pub async fn cmd_send(
|
||||
for recipient in &recipients {
|
||||
let peer_hybrid_pk = fetch_hybrid_key(&node_client, recipient).await?;
|
||||
let payload = if let Some(ref pk) = peer_hybrid_pk {
|
||||
hybrid_encrypt(pk, &ct).context("hybrid encrypt failed")?
|
||||
hybrid_encrypt(pk, &ct, b"", b"").context("hybrid encrypt failed")?
|
||||
} else {
|
||||
ct.clone()
|
||||
};
|
||||
@@ -889,29 +986,11 @@ pub async fn cmd_recv(
|
||||
stream: bool,
|
||||
password: Option<&str>,
|
||||
) -> anyhow::Result<()> {
|
||||
use indicatif::{ProgressBar, ProgressStyle};
|
||||
|
||||
let state = load_existing_state(state_path, password)?;
|
||||
let (mut member, hybrid_kp) = state.into_parts(state_path)?;
|
||||
|
||||
let client = connect_node(server, ca_cert, server_name).await?;
|
||||
|
||||
let stream_pb: Option<ProgressBar> = if stream {
|
||||
let pb = ProgressBar::new_spinner();
|
||||
pb.set_style(
|
||||
ProgressStyle::with_template("{spinner:.green} {msg}")
|
||||
.expect("recv progress template is valid")
|
||||
.tick_chars("\u{2801}\u{2802}\u{2804}\u{2840}\u{2820}\u{2810}\u{2808} "),
|
||||
);
|
||||
pb.set_message("Listening for messages (0 received)\u{2026}");
|
||||
pb.enable_steady_tick(std::time::Duration::from_millis(100));
|
||||
Some(pb)
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
let mut total_received: usize = 0;
|
||||
|
||||
loop {
|
||||
let mut payloads =
|
||||
fetch_wait(&client, &member.identity().public_key_bytes(), wait_ms).await?;
|
||||
@@ -928,68 +1007,47 @@ pub async fn cmd_recv(
|
||||
// application messages that depend on the resulting epoch.
|
||||
payloads.sort_by_key(|(seq, _)| *seq);
|
||||
|
||||
let mut retry_mls: Vec<Vec<u8>> = Vec::new();
|
||||
let mut pending: Vec<(usize, Vec<u8>)> = Vec::new();
|
||||
for (idx, (_, payload)) in payloads.iter().enumerate() {
|
||||
let mls_payload = match try_hybrid_decrypt(hybrid_kp.as_ref(), payload) {
|
||||
Ok(b) => b,
|
||||
Err(e) => {
|
||||
match &stream_pb {
|
||||
Some(pb) => pb.println(format!("[{idx}] decrypt error: {e}")),
|
||||
None => println!("[{idx}] decrypt error: {e}"),
|
||||
}
|
||||
println!("[{idx}] decrypt error: {e}");
|
||||
continue;
|
||||
}
|
||||
};
|
||||
match member.receive_message(&mls_payload) {
|
||||
Ok(Some(pt)) => {
|
||||
total_received += 1;
|
||||
let line = format!("[{idx}] plaintext: {}", String::from_utf8_lossy(&pt));
|
||||
match &stream_pb {
|
||||
Some(pb) => pb.println(line),
|
||||
None => println!("{line}"),
|
||||
Ok(ReceivedMessage::Application(pt)) => println!("[{idx}] plaintext: {}", String::from_utf8_lossy(&pt)),
|
||||
Ok(ReceivedMessage::StateChanged) | Ok(ReceivedMessage::SelfRemoved) => println!("[{idx}] commit applied"),
|
||||
Err(_) => pending.push((idx, mls_payload)),
|
||||
}
|
||||
}
|
||||
Ok(None) => {
|
||||
let line = format!("[{idx}] commit applied");
|
||||
match &stream_pb {
|
||||
Some(pb) => pb.println(line),
|
||||
None => println!("{line}"),
|
||||
}
|
||||
}
|
||||
Err(_) => retry_mls.push(mls_payload),
|
||||
}
|
||||
}
|
||||
// Retry messages that failed on the first pass (e.g. app messages whose
|
||||
// epoch was not yet advanced until a commit earlier in the batch was applied).
|
||||
for mls_payload in &retry_mls {
|
||||
// Retry until no more progress (handles multi-epoch batches).
|
||||
loop {
|
||||
let before = pending.len();
|
||||
pending.retain(|(idx, mls_payload)| {
|
||||
match member.receive_message(mls_payload) {
|
||||
Ok(Some(pt)) => {
|
||||
total_received += 1;
|
||||
let line = format!("[retry] plaintext: {}", String::from_utf8_lossy(&pt));
|
||||
match &stream_pb {
|
||||
Some(pb) => pb.println(line),
|
||||
None => println!("{line}"),
|
||||
}
|
||||
}
|
||||
Ok(None) => {}
|
||||
Err(e) => {
|
||||
let line = format!("[retry] error: {e}");
|
||||
match &stream_pb {
|
||||
Some(pb) => pb.println(line),
|
||||
None => println!("{line}"),
|
||||
}
|
||||
Ok(ReceivedMessage::Application(pt)) => {
|
||||
println!("[{idx}/retry] plaintext: {}", String::from_utf8_lossy(&pt));
|
||||
false
|
||||
}
|
||||
Ok(ReceivedMessage::StateChanged) | Ok(ReceivedMessage::SelfRemoved) => {
|
||||
println!("[{idx}/retry] commit applied");
|
||||
false
|
||||
}
|
||||
Err(_) => true,
|
||||
}
|
||||
});
|
||||
if pending.len() == before {
|
||||
break; // No progress — remaining messages are unprocessable
|
||||
}
|
||||
}
|
||||
for (idx, _) in &pending {
|
||||
println!("[{idx}] error: unprocessable after all retries");
|
||||
}
|
||||
|
||||
save_state(state_path, &member, hybrid_kp.as_ref(), password)?;
|
||||
|
||||
if let Some(ref pb) = stream_pb {
|
||||
pb.set_message(format!(
|
||||
"Listening for messages ({total_received} received)\u{2026}"
|
||||
));
|
||||
}
|
||||
|
||||
if !stream {
|
||||
return Ok(());
|
||||
}
|
||||
@@ -998,8 +1056,8 @@ pub async fn cmd_recv(
|
||||
|
||||
/// Fetch pending payloads, process in order (merge commits, collect plaintexts), save state.
|
||||
/// Returns only application-message plaintexts. Used by E2E tests and callers that need returned messages.
|
||||
/// Uses two passes so that if the server delivers an application message before a Commit, the second pass
|
||||
/// processes it after commits are merged.
|
||||
/// Retries in a loop until no more progress, handling multi-epoch batches where commits must be
|
||||
/// applied before later application messages can be decrypted.
|
||||
pub async fn receive_pending_plaintexts(
|
||||
state_path: &Path,
|
||||
server: &str,
|
||||
@@ -1017,21 +1075,33 @@ pub async fn receive_pending_plaintexts(
|
||||
payloads.sort_by_key(|(seq, _)| *seq);
|
||||
|
||||
let mut plaintexts = Vec::new();
|
||||
let mut retry_mls: Vec<Vec<u8>> = Vec::new();
|
||||
let mut pending: Vec<Vec<u8>> = Vec::new();
|
||||
for (_, payload) in &payloads {
|
||||
let mls_payload = match try_hybrid_decrypt(hybrid_kp.as_ref(), payload) {
|
||||
Ok(b) => b,
|
||||
Err(_) => continue,
|
||||
};
|
||||
match member.receive_message(&mls_payload) {
|
||||
Ok(Some(pt)) => plaintexts.push(pt),
|
||||
Ok(None) => {}
|
||||
Err(_) => retry_mls.push(mls_payload),
|
||||
Ok(ReceivedMessage::Application(pt)) => plaintexts.push(pt),
|
||||
Ok(ReceivedMessage::StateChanged) | Ok(ReceivedMessage::SelfRemoved) => {}
|
||||
Err(_) => pending.push(mls_payload),
|
||||
}
|
||||
}
|
||||
for mls_payload in &retry_mls {
|
||||
if let Ok(Some(pt)) = member.receive_message(mls_payload) {
|
||||
// Retry until no more progress (handles multi-epoch batches).
|
||||
loop {
|
||||
let before = pending.len();
|
||||
pending.retain(|mls_payload| {
|
||||
match member.receive_message(mls_payload) {
|
||||
Ok(ReceivedMessage::Application(pt)) => {
|
||||
plaintexts.push(pt);
|
||||
false
|
||||
}
|
||||
Ok(ReceivedMessage::StateChanged) | Ok(ReceivedMessage::SelfRemoved) => false,
|
||||
Err(_) => true,
|
||||
}
|
||||
});
|
||||
if pending.len() == before {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1047,8 +1117,8 @@ pub fn whoami_json(state_path: &Path, password: Option<&str>) -> anyhow::Result<
|
||||
let fingerprint = sha256(&pk_bytes);
|
||||
Ok(format!(
|
||||
r#"{{"identity_key":"{}", "fingerprint":"{}", "hybrid_key":{}, "group":{}}}"#,
|
||||
hex::encode(&pk_bytes),
|
||||
hex::encode(&fingerprint),
|
||||
hex::encode(pk_bytes),
|
||||
hex::encode(fingerprint),
|
||||
state.hybrid_key.is_some(),
|
||||
state.group.is_some(),
|
||||
))
|
||||
@@ -1161,7 +1231,7 @@ pub async fn cmd_chat(
|
||||
.context("send_message failed")?;
|
||||
let peer_hybrid_pk = fetch_hybrid_key(&client, &peer_key).await?;
|
||||
let payload = if let Some(ref pk) = peer_hybrid_pk {
|
||||
hybrid_encrypt(pk, &ct).context("hybrid encrypt failed")?
|
||||
hybrid_encrypt(pk, &ct, b"", b"").context("hybrid encrypt failed")?
|
||||
} else {
|
||||
ct
|
||||
};
|
||||
@@ -1177,21 +1247,39 @@ pub async fn cmd_chat(
|
||||
_ = poll.tick() => {
|
||||
let mut payloads = fetch_wait(&client, &identity_bytes, 0).await?;
|
||||
payloads.sort_by_key(|(seq, _)| *seq);
|
||||
let mut retry_payloads: Vec<Vec<u8>> = Vec::new();
|
||||
for (_, payload) in &payloads {
|
||||
let mls_payload = match try_hybrid_decrypt(hybrid_kp.as_ref(), payload) {
|
||||
Ok(b) => b,
|
||||
Err(_) => continue,
|
||||
};
|
||||
match member.receive_message(&mls_payload) {
|
||||
Ok(Some(pt)) => {
|
||||
Ok(ReceivedMessage::Application(pt)) => {
|
||||
let s = String::from_utf8_lossy(&pt);
|
||||
println!("\r\n[peer] {s}\n> ");
|
||||
std::io::stdout().flush().context("flush stdout")?;
|
||||
}
|
||||
Ok(None) => {}
|
||||
Err(_) => {}
|
||||
Ok(ReceivedMessage::StateChanged) | Ok(ReceivedMessage::SelfRemoved) => {}
|
||||
Err(_) => retry_payloads.push(mls_payload),
|
||||
}
|
||||
}
|
||||
// Retry failed messages (epoch may have advanced from commits in this batch)
|
||||
loop {
|
||||
let before = retry_payloads.len();
|
||||
retry_payloads.retain(|mls_payload| {
|
||||
match member.receive_message(mls_payload) {
|
||||
Ok(ReceivedMessage::Application(pt)) => {
|
||||
let s = String::from_utf8_lossy(&pt);
|
||||
println!("\r\n[peer] {s}\n> ");
|
||||
let _ = std::io::stdout().flush();
|
||||
false
|
||||
}
|
||||
Ok(ReceivedMessage::StateChanged) | Ok(ReceivedMessage::SelfRemoved) => false,
|
||||
Err(_) => true,
|
||||
}
|
||||
});
|
||||
if retry_payloads.len() == before { break; }
|
||||
}
|
||||
if !payloads.is_empty() {
|
||||
save_state(state_path, &member, hybrid_kp.as_ref(), password)?;
|
||||
}
|
||||
@@ -1202,3 +1290,111 @@ pub async fn cmd_chat(
|
||||
println!();
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// ── Transcript export ─────────────────────────────────────────────────────────
|
||||
|
||||
/// Export the message history for a conversation to an encrypted, tamper-evident
|
||||
/// transcript file.
|
||||
///
|
||||
/// `conv_db` is the path to the conversation SQLite database (`.convdb` file).
|
||||
/// `conv_id_hex` is the 32-hex-character conversation ID to export.
|
||||
/// `output` is the path for the `.qpqt` transcript file to write.
|
||||
/// `transcript_password` is used to derive the encryption key (Argon2id).
|
||||
/// `db_password` is the optional SQLCipher password for the conversation database.
|
||||
pub fn cmd_export(
|
||||
conv_db: &Path,
|
||||
conv_id_hex: &str,
|
||||
output: &Path,
|
||||
transcript_password: &str,
|
||||
db_password: Option<&str>,
|
||||
) -> anyhow::Result<()> {
|
||||
use quicproquo_core::{TranscriptRecord, TranscriptWriter};
|
||||
use super::conversation::{ConversationId, ConversationStore};
|
||||
|
||||
// Decode conversation ID from hex.
|
||||
let id_bytes = hex::decode(conv_id_hex)
|
||||
.map_err(|e| anyhow::anyhow!("conv-id must be 32 hex characters (16 bytes): {e}"))?;
|
||||
let conv_id = ConversationId::from_slice(&id_bytes)
|
||||
.ok_or_else(|| anyhow::anyhow!("conv-id must be exactly 16 bytes (32 hex chars), got {} bytes", id_bytes.len()))?;
|
||||
|
||||
// Open conversation database.
|
||||
let store = ConversationStore::open(conv_db, db_password)
|
||||
.context("open conversation database")?;
|
||||
|
||||
// Load conversation metadata (to display name in output).
|
||||
let conv = store
|
||||
.load_conversation(&conv_id)?
|
||||
.with_context(|| format!("conversation '{conv_id_hex}' not found in database"))?;
|
||||
|
||||
// Load all messages (oldest first).
|
||||
let messages = store.load_all_messages(&conv_id)?;
|
||||
|
||||
if messages.is_empty() {
|
||||
println!("No messages in conversation '{}'.", conv.display_name);
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
// Create output file.
|
||||
if let Some(parent) = output.parent() {
|
||||
std::fs::create_dir_all(parent).ok();
|
||||
}
|
||||
let mut file = std::fs::File::create(output)
|
||||
.with_context(|| format!("create transcript file '{}'", output.display()))?;
|
||||
|
||||
// Write transcript header + records.
|
||||
let mut writer = TranscriptWriter::new(transcript_password, &mut file)
|
||||
.context("initialise transcript writer")?;
|
||||
|
||||
let mut written = 0u64;
|
||||
for (seq, msg) in messages.iter().enumerate() {
|
||||
writer
|
||||
.write_record(
|
||||
&TranscriptRecord {
|
||||
seq: seq as u64,
|
||||
sender_identity: &msg.sender_key,
|
||||
timestamp_ms: msg.timestamp_ms,
|
||||
plaintext: &msg.body,
|
||||
},
|
||||
&mut file,
|
||||
)
|
||||
.context("write transcript record")?;
|
||||
written += 1;
|
||||
}
|
||||
|
||||
println!(
|
||||
"Exported {} message(s) from '{}' to '{}'.",
|
||||
written,
|
||||
conv.display_name,
|
||||
output.display()
|
||||
);
|
||||
println!("Decrypt with: qpq export verify --input <file> --password <password>");
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Verify the hash-chain integrity of a transcript file without decrypting content.
|
||||
///
|
||||
/// Prints a summary. Does not require the encryption password (structural check only).
|
||||
pub fn cmd_export_verify(input: &Path) -> anyhow::Result<()> {
|
||||
use quicproquo_core::{validate_transcript_structure, ChainVerdict};
|
||||
|
||||
let data = std::fs::read(input)
|
||||
.with_context(|| format!("read transcript file '{}'", input.display()))?;
|
||||
|
||||
match validate_transcript_structure(&data)? {
|
||||
ChainVerdict::Ok { records } => {
|
||||
println!(
|
||||
"OK: transcript '{}' is structurally valid. {} record(s) found, hash chain intact.",
|
||||
input.display(),
|
||||
records
|
||||
);
|
||||
}
|
||||
ChainVerdict::Broken => {
|
||||
anyhow::bail!(
|
||||
"FAIL: hash chain is broken in '{}' — file may have been tampered with.",
|
||||
input.display()
|
||||
);
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
798
crates/quicproquo-client/src/client/conversation.rs
Normal file
798
crates/quicproquo-client/src/client/conversation.rs
Normal file
@@ -0,0 +1,798 @@
|
||||
//! Multi-conversation state backed by SQLite (SQLCipher-encrypted when a
|
||||
//! password is provided).
|
||||
//!
|
||||
//! Each conversation (DM or group) has its own MLS group blob, keystore blob,
|
||||
//! member list, and message history.
|
||||
|
||||
use std::path::{Path, PathBuf};
|
||||
use std::time::{SystemTime, UNIX_EPOCH};
|
||||
|
||||
use anyhow::Context;
|
||||
use argon2::{Algorithm, Argon2, Params, Version};
|
||||
use rand::RngCore;
|
||||
use rusqlite::{params, Connection, OptionalExtension};
|
||||
use zeroize::Zeroizing;
|
||||
|
||||
// ── Types ────────────────────────────────────────────────────────────────────
|
||||
|
||||
/// 16-byte conversation identifier.
|
||||
/// - DMs: the channel_id returned by `createChannel` (server-assigned UUID).
|
||||
/// - Groups: SHA-256(group_name)[..16].
|
||||
#[derive(Clone, Debug, PartialEq, Eq, Hash)]
|
||||
pub struct ConversationId(pub [u8; 16]);
|
||||
|
||||
impl ConversationId {
|
||||
pub fn from_slice(s: &[u8]) -> Option<Self> {
|
||||
if s.len() == 16 {
|
||||
let mut buf = [0u8; 16];
|
||||
buf.copy_from_slice(s);
|
||||
Some(Self(buf))
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
/// Derive a conversation ID from a group name via SHA-256 truncation.
|
||||
pub fn from_group_name(name: &str) -> Self {
|
||||
use sha2::{Sha256, Digest};
|
||||
let hash = Sha256::digest(name.as_bytes());
|
||||
let mut buf = [0u8; 16];
|
||||
buf.copy_from_slice(&hash[..16]);
|
||||
Self(buf)
|
||||
}
|
||||
|
||||
pub fn hex(&self) -> String {
|
||||
hex::encode(self.0)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, PartialEq, Eq)]
|
||||
pub enum ConversationKind {
|
||||
/// 1:1 DM channel with a specific peer.
|
||||
Dm {
|
||||
peer_key: Vec<u8>,
|
||||
peer_username: Option<String>,
|
||||
},
|
||||
/// Named group with N members.
|
||||
Group { name: String },
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct Conversation {
|
||||
pub id: ConversationId,
|
||||
pub kind: ConversationKind,
|
||||
pub display_name: String,
|
||||
/// Serialized MLS group (bincode).
|
||||
pub mls_group_blob: Option<Vec<u8>>,
|
||||
/// Serialized keystore (bincode HashMap).
|
||||
pub keystore_blob: Option<Vec<u8>>,
|
||||
/// Member identity keys (bincode Vec<Vec<u8>>).
|
||||
pub member_keys: Vec<Vec<u8>>,
|
||||
pub unread_count: u32,
|
||||
pub last_activity_ms: u64,
|
||||
pub created_at_ms: u64,
|
||||
/// Whether this conversation uses hybrid (X25519 + ML-KEM-768) MLS keys.
|
||||
pub is_hybrid: bool,
|
||||
/// Highest server-side delivery sequence number seen.
|
||||
pub last_seen_seq: u64,
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct StoredMessage {
|
||||
pub conversation_id: ConversationId,
|
||||
pub message_id: Option<[u8; 16]>,
|
||||
pub sender_key: Vec<u8>,
|
||||
pub sender_name: Option<String>,
|
||||
pub body: String,
|
||||
pub msg_type: String,
|
||||
pub ref_msg_id: Option<[u8; 16]>,
|
||||
pub timestamp_ms: u64,
|
||||
pub is_outgoing: bool,
|
||||
}
|
||||
|
||||
// ── Key derivation (Argon2id, matching state.rs parameters) ─────────────────
|
||||
|
||||
const ARGON2_M_COST: u32 = 19 * 1024;
|
||||
const ARGON2_T_COST: u32 = 2;
|
||||
const ARGON2_P_COST: u32 = 1;
|
||||
const SALT_LEN: usize = 16;
|
||||
|
||||
/// Derive a 32-byte SQLCipher key from the user password and a random salt.
|
||||
fn derive_convdb_key(password: &str, salt: &[u8]) -> anyhow::Result<Zeroizing<[u8; 32]>> {
|
||||
let params = Params::new(ARGON2_M_COST, ARGON2_T_COST, ARGON2_P_COST, Some(32))
|
||||
.map_err(|e| anyhow::anyhow!("argon2 params: {e}"))?;
|
||||
let argon2 = Argon2::new(Algorithm::Argon2id, Version::default(), params);
|
||||
let mut key = Zeroizing::new([0u8; 32]);
|
||||
argon2
|
||||
.hash_password_into(password.as_bytes(), salt, &mut *key)
|
||||
.map_err(|e| anyhow::anyhow!("convdb key derivation: {e}"))?;
|
||||
Ok(key)
|
||||
}
|
||||
|
||||
/// Read or create a 16-byte random salt at `salt_path` (mode 0o600).
|
||||
fn get_or_create_salt(salt_path: &Path) -> anyhow::Result<Vec<u8>> {
|
||||
if salt_path.exists() {
|
||||
let bytes = std::fs::read(salt_path).context("read convdb salt")?;
|
||||
anyhow::ensure!(bytes.len() == SALT_LEN, "invalid convdb salt length");
|
||||
return Ok(bytes);
|
||||
}
|
||||
let mut salt = vec![0u8; SALT_LEN];
|
||||
rand::rngs::OsRng.fill_bytes(&mut salt);
|
||||
std::fs::write(salt_path, &salt).context("write convdb salt")?;
|
||||
#[cfg(unix)]
|
||||
{
|
||||
use std::os::unix::fs::PermissionsExt;
|
||||
std::fs::set_permissions(salt_path, std::fs::Permissions::from_mode(0o600)).ok();
|
||||
}
|
||||
Ok(salt)
|
||||
}
|
||||
|
||||
// ── ConversationStore ────────────────────────────────────────────────────────
|
||||
|
||||
pub struct ConversationStore {
|
||||
conn: Connection,
|
||||
}
|
||||
|
||||
impl ConversationStore {
|
||||
/// Open or create the conversation database at `db_path`.
|
||||
/// If `password` is `Some`, the database is encrypted with SQLCipher using
|
||||
/// an Argon2id-derived key. Existing unencrypted databases are migrated
|
||||
/// transparently.
|
||||
pub fn open(db_path: &Path, password: Option<&str>) -> anyhow::Result<Self> {
|
||||
if let Some(parent) = db_path.parent() {
|
||||
std::fs::create_dir_all(parent).ok();
|
||||
}
|
||||
|
||||
match password {
|
||||
Some(pw) => Self::open_encrypted(db_path, pw),
|
||||
None => Self::open_plain(db_path),
|
||||
}
|
||||
}
|
||||
|
||||
fn open_plain(db_path: &Path) -> anyhow::Result<Self> {
|
||||
let conn = Connection::open(db_path).context("open conversation db")?;
|
||||
conn.execute_batch("PRAGMA journal_mode=WAL; PRAGMA foreign_keys=ON;")
|
||||
.context("set pragmas")?;
|
||||
Self::migrate(&conn)?;
|
||||
Ok(Self { conn })
|
||||
}
|
||||
|
||||
fn open_encrypted(db_path: &Path, password: &str) -> anyhow::Result<Self> {
|
||||
let salt_path = PathBuf::from(format!("{}-salt", db_path.display()));
|
||||
let already_encrypted = salt_path.exists();
|
||||
|
||||
// Migrate an existing unencrypted database before opening with encryption.
|
||||
if db_path.exists() && !already_encrypted {
|
||||
Self::migrate_plain_to_encrypted(db_path, &salt_path, password)?;
|
||||
// After migration, salt file exists and DB is encrypted — fall through.
|
||||
}
|
||||
|
||||
let salt = get_or_create_salt(&salt_path)?;
|
||||
let key = derive_convdb_key(password, &salt)?;
|
||||
#[allow(clippy::needless_borrows_for_generic_args)]
|
||||
let hex_key = Zeroizing::new(hex::encode(&*key));
|
||||
|
||||
let conn = Connection::open(db_path).context("open conversation db")?;
|
||||
conn.pragma_update(None, "key", format!("x'{}'", &*hex_key))
|
||||
.context("set SQLCipher key")?;
|
||||
conn.execute_batch("PRAGMA journal_mode=WAL; PRAGMA foreign_keys=ON;")
|
||||
.context("set pragmas")?;
|
||||
Self::migrate(&conn)?;
|
||||
Ok(Self { conn })
|
||||
}
|
||||
|
||||
/// Migrate an unencrypted `.convdb` to an encrypted one in-place.
|
||||
fn migrate_plain_to_encrypted(
|
||||
db_path: &Path,
|
||||
salt_path: &Path,
|
||||
password: &str,
|
||||
) -> anyhow::Result<()> {
|
||||
let salt = get_or_create_salt(salt_path)?;
|
||||
let key = derive_convdb_key(password, &salt)?;
|
||||
#[allow(clippy::needless_borrows_for_generic_args)]
|
||||
let hex_key = Zeroizing::new(hex::encode(&*key));
|
||||
|
||||
let enc_path = db_path.with_extension("convdb-enc");
|
||||
|
||||
// Open the existing plaintext database.
|
||||
let plain = Connection::open(db_path).context("open plain convdb for migration")?;
|
||||
plain.execute_batch("PRAGMA journal_mode=WAL; PRAGMA foreign_keys=ON;").ok();
|
||||
|
||||
// Attach a new encrypted database and export into it.
|
||||
// Sanitize the path to prevent SQL injection (ATTACH does not support parameterized paths).
|
||||
let enc_path_str = enc_path.display().to_string();
|
||||
anyhow::ensure!(
|
||||
!enc_path_str.contains('\''),
|
||||
"database path must not contain single quotes: {enc_path_str}"
|
||||
);
|
||||
plain
|
||||
.execute_batch(&format!(
|
||||
"ATTACH DATABASE '{enc_path_str}' AS encrypted KEY \"x'{}'\";",
|
||||
&*hex_key
|
||||
))
|
||||
.context("attach encrypted db for migration")?;
|
||||
plain
|
||||
.execute_batch("SELECT sqlcipher_export('encrypted');")
|
||||
.context("sqlcipher_export to encrypted db")?;
|
||||
plain
|
||||
.execute_batch("DETACH DATABASE encrypted;")
|
||||
.context("detach encrypted db")?;
|
||||
|
||||
drop(plain);
|
||||
|
||||
// Swap files: encrypted → original.
|
||||
std::fs::rename(&enc_path, db_path).context("replace convdb with encrypted version")?;
|
||||
// Clean up WAL/SHM left from the plaintext open.
|
||||
let wal = PathBuf::from(format!("{}-wal", db_path.display()));
|
||||
let shm = PathBuf::from(format!("{}-shm", db_path.display()));
|
||||
std::fs::remove_file(&wal).ok();
|
||||
std::fs::remove_file(&shm).ok();
|
||||
|
||||
tracing::info!("migrated conversation database to encrypted storage");
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn migrate(conn: &Connection) -> anyhow::Result<()> {
|
||||
conn.execute_batch(
|
||||
"CREATE TABLE IF NOT EXISTS conversations (
|
||||
id BLOB PRIMARY KEY,
|
||||
kind TEXT NOT NULL,
|
||||
display_name TEXT NOT NULL,
|
||||
peer_key BLOB,
|
||||
peer_username TEXT,
|
||||
group_name TEXT,
|
||||
mls_group_blob BLOB,
|
||||
keystore_blob BLOB,
|
||||
member_keys BLOB,
|
||||
unread_count INTEGER NOT NULL DEFAULT 0,
|
||||
last_activity_ms INTEGER NOT NULL DEFAULT 0,
|
||||
created_at_ms INTEGER NOT NULL DEFAULT 0
|
||||
);
|
||||
|
||||
CREATE TABLE IF NOT EXISTS messages (
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
conversation_id BLOB NOT NULL REFERENCES conversations(id),
|
||||
message_id BLOB,
|
||||
sender_key BLOB NOT NULL,
|
||||
sender_name TEXT,
|
||||
body TEXT NOT NULL,
|
||||
msg_type TEXT NOT NULL,
|
||||
ref_msg_id BLOB,
|
||||
timestamp_ms INTEGER NOT NULL,
|
||||
is_outgoing INTEGER NOT NULL DEFAULT 0
|
||||
);
|
||||
|
||||
CREATE INDEX IF NOT EXISTS idx_messages_conv
|
||||
ON messages(conversation_id, timestamp_ms);
|
||||
|
||||
CREATE TABLE IF NOT EXISTS outbox (
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
conversation_id BLOB NOT NULL,
|
||||
recipient_key BLOB NOT NULL,
|
||||
payload BLOB NOT NULL,
|
||||
created_at_ms INTEGER NOT NULL,
|
||||
retry_count INTEGER NOT NULL DEFAULT 0,
|
||||
status TEXT NOT NULL DEFAULT 'pending'
|
||||
);
|
||||
CREATE INDEX IF NOT EXISTS idx_outbox_status
|
||||
ON outbox(status, created_at_ms);",
|
||||
)
|
||||
.context("migrate conversation db")?;
|
||||
|
||||
// Additive migrations for new columns (safe to re-run; errors ignored if column already exists).
|
||||
conn.execute_batch("ALTER TABLE conversations ADD COLUMN is_hybrid INTEGER NOT NULL DEFAULT 0;").ok();
|
||||
conn.execute_batch("ALTER TABLE conversations ADD COLUMN last_seen_seq INTEGER NOT NULL DEFAULT 0;").ok();
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// ── Conversation CRUD ────────────────────────────────────────────────
|
||||
|
||||
pub fn save_conversation(&self, conv: &Conversation) -> anyhow::Result<()> {
|
||||
let (kind_str, peer_key, peer_username, group_name) = match &conv.kind {
|
||||
ConversationKind::Dm {
|
||||
peer_key,
|
||||
peer_username,
|
||||
} => ("dm", Some(peer_key.as_slice()), peer_username.as_deref(), None),
|
||||
ConversationKind::Group { name } => ("group", None, None, Some(name.as_str())),
|
||||
};
|
||||
let member_keys_blob = bincode::serialize(&conv.member_keys)
|
||||
.context("serialize member_keys")?;
|
||||
|
||||
self.conn.execute(
|
||||
"INSERT INTO conversations
|
||||
(id, kind, display_name, peer_key, peer_username, group_name,
|
||||
mls_group_blob, keystore_blob, member_keys, unread_count,
|
||||
last_activity_ms, created_at_ms, is_hybrid, last_seen_seq)
|
||||
VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7, ?8, ?9, ?10, ?11, ?12, ?13, ?14)
|
||||
ON CONFLICT(id) DO UPDATE SET
|
||||
display_name = excluded.display_name,
|
||||
mls_group_blob = excluded.mls_group_blob,
|
||||
keystore_blob = excluded.keystore_blob,
|
||||
member_keys = excluded.member_keys,
|
||||
unread_count = excluded.unread_count,
|
||||
last_activity_ms = excluded.last_activity_ms,
|
||||
is_hybrid = excluded.is_hybrid,
|
||||
last_seen_seq = excluded.last_seen_seq",
|
||||
params![
|
||||
conv.id.0.as_slice(),
|
||||
kind_str,
|
||||
conv.display_name,
|
||||
peer_key,
|
||||
peer_username,
|
||||
group_name,
|
||||
conv.mls_group_blob,
|
||||
conv.keystore_blob,
|
||||
member_keys_blob,
|
||||
conv.unread_count,
|
||||
conv.last_activity_ms,
|
||||
conv.created_at_ms,
|
||||
conv.is_hybrid as i32,
|
||||
conv.last_seen_seq as i64,
|
||||
],
|
||||
)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn load_conversation(&self, id: &ConversationId) -> anyhow::Result<Option<Conversation>> {
|
||||
self.conn
|
||||
.query_row(
|
||||
"SELECT kind, display_name, peer_key, peer_username, group_name,
|
||||
mls_group_blob, keystore_blob, member_keys, unread_count,
|
||||
last_activity_ms, created_at_ms, is_hybrid, last_seen_seq
|
||||
FROM conversations WHERE id = ?1",
|
||||
params![id.0.as_slice()],
|
||||
|row| {
|
||||
let kind_str: String = row.get(0)?;
|
||||
let display_name: String = row.get(1)?;
|
||||
let peer_key: Option<Vec<u8>> = row.get(2)?;
|
||||
let peer_username: Option<String> = row.get(3)?;
|
||||
let group_name: Option<String> = row.get(4)?;
|
||||
let mls_group_blob: Option<Vec<u8>> = row.get(5)?;
|
||||
let keystore_blob: Option<Vec<u8>> = row.get(6)?;
|
||||
let member_keys_blob: Option<Vec<u8>> = row.get(7)?;
|
||||
let unread_count: u32 = row.get(8)?;
|
||||
let last_activity_ms: u64 = row.get(9)?;
|
||||
let created_at_ms: u64 = row.get(10)?;
|
||||
let is_hybrid_int: i32 = row.get(11)?;
|
||||
let last_seen_seq: i64 = row.get(12)?;
|
||||
|
||||
let kind = if kind_str == "dm" {
|
||||
ConversationKind::Dm {
|
||||
peer_key: peer_key.unwrap_or_default(),
|
||||
peer_username,
|
||||
}
|
||||
} else {
|
||||
ConversationKind::Group {
|
||||
name: group_name.unwrap_or_default(),
|
||||
}
|
||||
};
|
||||
|
||||
let member_keys: Vec<Vec<u8>> = member_keys_blob
|
||||
.and_then(|b| match bincode::deserialize(&b) {
|
||||
Ok(v) => Some(v),
|
||||
Err(e) => {
|
||||
tracing::warn!(conv = %hex::encode(id.0), "bincode deserialize member_keys failed: {e}");
|
||||
None
|
||||
}
|
||||
})
|
||||
.unwrap_or_default();
|
||||
|
||||
Ok(Conversation {
|
||||
id: id.clone(),
|
||||
kind,
|
||||
display_name,
|
||||
mls_group_blob,
|
||||
keystore_blob,
|
||||
member_keys,
|
||||
unread_count,
|
||||
last_activity_ms,
|
||||
created_at_ms,
|
||||
is_hybrid: is_hybrid_int != 0,
|
||||
last_seen_seq: last_seen_seq as u64,
|
||||
})
|
||||
},
|
||||
)
|
||||
.optional()
|
||||
.context("load conversation")
|
||||
}
|
||||
|
||||
pub fn list_conversations(&self) -> anyhow::Result<Vec<Conversation>> {
|
||||
let mut stmt = self.conn.prepare(
|
||||
"SELECT id, kind, display_name, peer_key, peer_username, group_name,
|
||||
mls_group_blob, keystore_blob, member_keys, unread_count,
|
||||
last_activity_ms, created_at_ms, is_hybrid, last_seen_seq
|
||||
FROM conversations ORDER BY last_activity_ms DESC",
|
||||
)?;
|
||||
let rows = stmt.query_map([], |row| {
|
||||
let id_blob: Vec<u8> = row.get(0)?;
|
||||
let kind_str: String = row.get(1)?;
|
||||
let display_name: String = row.get(2)?;
|
||||
let peer_key: Option<Vec<u8>> = row.get(3)?;
|
||||
let peer_username: Option<String> = row.get(4)?;
|
||||
let group_name: Option<String> = row.get(5)?;
|
||||
let mls_group_blob: Option<Vec<u8>> = row.get(6)?;
|
||||
let keystore_blob: Option<Vec<u8>> = row.get(7)?;
|
||||
let member_keys_blob: Option<Vec<u8>> = row.get(8)?;
|
||||
let unread_count: u32 = row.get(9)?;
|
||||
let last_activity_ms: u64 = row.get(10)?;
|
||||
let created_at_ms: u64 = row.get(11)?;
|
||||
let is_hybrid_int: i32 = row.get(12)?;
|
||||
let last_seen_seq: i64 = row.get(13)?;
|
||||
|
||||
let id = ConversationId::from_slice(&id_blob).unwrap_or(ConversationId([0; 16]));
|
||||
let kind = if kind_str == "dm" {
|
||||
ConversationKind::Dm {
|
||||
peer_key: peer_key.unwrap_or_default(),
|
||||
peer_username,
|
||||
}
|
||||
} else {
|
||||
ConversationKind::Group {
|
||||
name: group_name.unwrap_or_default(),
|
||||
}
|
||||
};
|
||||
let member_keys: Vec<Vec<u8>> = member_keys_blob
|
||||
.and_then(|b| match bincode::deserialize(&b) {
|
||||
Ok(v) => Some(v),
|
||||
Err(e) => {
|
||||
tracing::warn!(conv = %hex::encode(&id_blob), "bincode deserialize member_keys failed: {e}");
|
||||
None
|
||||
}
|
||||
})
|
||||
.unwrap_or_default();
|
||||
|
||||
Ok(Conversation {
|
||||
id,
|
||||
kind,
|
||||
display_name,
|
||||
mls_group_blob,
|
||||
keystore_blob,
|
||||
member_keys,
|
||||
unread_count,
|
||||
last_activity_ms,
|
||||
created_at_ms,
|
||||
is_hybrid: is_hybrid_int != 0,
|
||||
last_seen_seq: last_seen_seq as u64,
|
||||
})
|
||||
})?;
|
||||
|
||||
let mut convs = Vec::new();
|
||||
for row in rows {
|
||||
convs.push(row?);
|
||||
}
|
||||
Ok(convs)
|
||||
}
|
||||
|
||||
/// Find a DM conversation by the peer's identity key.
|
||||
pub fn find_dm_by_peer(&self, peer_key: &[u8]) -> anyhow::Result<Option<Conversation>> {
|
||||
let id_blob: Option<Vec<u8>> = self
|
||||
.conn
|
||||
.query_row(
|
||||
"SELECT id FROM conversations WHERE kind = 'dm' AND peer_key = ?1",
|
||||
params![peer_key],
|
||||
|row| row.get(0),
|
||||
)
|
||||
.optional()?;
|
||||
|
||||
match id_blob {
|
||||
Some(blob) => {
|
||||
let id = ConversationId::from_slice(&blob)
|
||||
.context("invalid conversation id in db")?;
|
||||
self.load_conversation(&id)
|
||||
}
|
||||
None => Ok(None),
|
||||
}
|
||||
}
|
||||
|
||||
/// Find a group conversation by name.
|
||||
pub fn find_group_by_name(&self, name: &str) -> anyhow::Result<Option<Conversation>> {
|
||||
let id_blob: Option<Vec<u8>> = self
|
||||
.conn
|
||||
.query_row(
|
||||
"SELECT id FROM conversations WHERE kind = 'group' AND group_name = ?1",
|
||||
params![name],
|
||||
|row| row.get(0),
|
||||
)
|
||||
.optional()?;
|
||||
|
||||
match id_blob {
|
||||
Some(blob) => {
|
||||
let id = ConversationId::from_slice(&blob)
|
||||
.context("invalid conversation id in db")?;
|
||||
self.load_conversation(&id)
|
||||
}
|
||||
None => Ok(None),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn increment_unread(&self, id: &ConversationId) -> anyhow::Result<()> {
|
||||
self.conn.execute(
|
||||
"UPDATE conversations SET unread_count = unread_count + 1 WHERE id = ?1",
|
||||
params![id.0.as_slice()],
|
||||
)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn reset_unread(&self, id: &ConversationId) -> anyhow::Result<()> {
|
||||
self.conn.execute(
|
||||
"UPDATE conversations SET unread_count = 0 WHERE id = ?1",
|
||||
params![id.0.as_slice()],
|
||||
)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn update_activity(&self, id: &ConversationId, ts_ms: u64) -> anyhow::Result<()> {
|
||||
self.conn.execute(
|
||||
"UPDATE conversations SET last_activity_ms = ?2 WHERE id = ?1 AND last_activity_ms < ?2",
|
||||
params![id.0.as_slice(), ts_ms],
|
||||
)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// ── Message CRUD ─────────────────────────────────────────────────────
|
||||
|
||||
pub fn save_message(&self, msg: &StoredMessage) -> anyhow::Result<()> {
|
||||
self.conn.execute(
|
||||
"INSERT INTO messages
|
||||
(conversation_id, message_id, sender_key, sender_name, body,
|
||||
msg_type, ref_msg_id, timestamp_ms, is_outgoing)
|
||||
VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7, ?8, ?9)",
|
||||
params![
|
||||
msg.conversation_id.0.as_slice(),
|
||||
msg.message_id.as_ref().map(|id| id.as_slice()),
|
||||
msg.sender_key,
|
||||
msg.sender_name,
|
||||
msg.body,
|
||||
msg.msg_type,
|
||||
msg.ref_msg_id.as_ref().map(|id| id.as_slice()),
|
||||
msg.timestamp_ms,
|
||||
msg.is_outgoing as i32,
|
||||
],
|
||||
)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn load_recent_messages(
|
||||
&self,
|
||||
conv_id: &ConversationId,
|
||||
limit: usize,
|
||||
) -> anyhow::Result<Vec<StoredMessage>> {
|
||||
let mut stmt = self.conn.prepare(
|
||||
"SELECT message_id, sender_key, sender_name, body, msg_type,
|
||||
ref_msg_id, timestamp_ms, is_outgoing
|
||||
FROM messages
|
||||
WHERE conversation_id = ?1
|
||||
ORDER BY timestamp_ms DESC
|
||||
LIMIT ?2",
|
||||
)?;
|
||||
let rows = stmt.query_map(params![conv_id.0.as_slice(), limit.min(u32::MAX as usize) as u32], |row| {
|
||||
let message_id: Option<Vec<u8>> = row.get(0)?;
|
||||
let sender_key: Vec<u8> = row.get(1)?;
|
||||
let sender_name: Option<String> = row.get(2)?;
|
||||
let body: String = row.get(3)?;
|
||||
let msg_type: String = row.get(4)?;
|
||||
let ref_msg_id: Option<Vec<u8>> = row.get(5)?;
|
||||
let timestamp_ms: u64 = row.get(6)?;
|
||||
let is_outgoing: i32 = row.get(7)?;
|
||||
|
||||
fn to_16(v: &[u8]) -> Option<[u8; 16]> {
|
||||
if v.len() == 16 {
|
||||
let mut buf = [0u8; 16];
|
||||
buf.copy_from_slice(v);
|
||||
Some(buf)
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
Ok(StoredMessage {
|
||||
conversation_id: conv_id.clone(),
|
||||
message_id: message_id.as_deref().and_then(to_16),
|
||||
sender_key,
|
||||
sender_name,
|
||||
body,
|
||||
msg_type,
|
||||
ref_msg_id: ref_msg_id.as_deref().and_then(to_16),
|
||||
timestamp_ms,
|
||||
is_outgoing: is_outgoing != 0,
|
||||
})
|
||||
})?;
|
||||
|
||||
let mut msgs = Vec::new();
|
||||
for row in rows {
|
||||
msgs.push(row?);
|
||||
}
|
||||
// Reverse so oldest first
|
||||
msgs.reverse();
|
||||
Ok(msgs)
|
||||
}
|
||||
|
||||
/// Load all messages for a conversation, oldest first (no limit).
|
||||
pub fn load_all_messages(&self, conv_id: &ConversationId) -> anyhow::Result<Vec<StoredMessage>> {
|
||||
let mut stmt = self.conn.prepare(
|
||||
"SELECT message_id, sender_key, sender_name, body, msg_type,
|
||||
ref_msg_id, timestamp_ms, is_outgoing
|
||||
FROM messages
|
||||
WHERE conversation_id = ?1
|
||||
ORDER BY timestamp_ms ASC, id ASC",
|
||||
)?;
|
||||
let rows = stmt.query_map(params![conv_id.0.as_slice()], |row| {
|
||||
let message_id: Option<Vec<u8>> = row.get(0)?;
|
||||
let sender_key: Vec<u8> = row.get(1)?;
|
||||
let sender_name: Option<String> = row.get(2)?;
|
||||
let body: String = row.get(3)?;
|
||||
let msg_type: String = row.get(4)?;
|
||||
let ref_msg_id: Option<Vec<u8>> = row.get(5)?;
|
||||
let timestamp_ms: u64 = row.get(6)?;
|
||||
let is_outgoing: i32 = row.get(7)?;
|
||||
|
||||
fn to_16(v: &[u8]) -> Option<[u8; 16]> {
|
||||
if v.len() == 16 {
|
||||
let mut buf = [0u8; 16];
|
||||
buf.copy_from_slice(v);
|
||||
Some(buf)
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
Ok(StoredMessage {
|
||||
conversation_id: conv_id.clone(),
|
||||
message_id: message_id.as_deref().and_then(to_16),
|
||||
sender_key,
|
||||
sender_name,
|
||||
body,
|
||||
msg_type,
|
||||
ref_msg_id: ref_msg_id.as_deref().and_then(to_16),
|
||||
timestamp_ms,
|
||||
is_outgoing: is_outgoing != 0,
|
||||
})
|
||||
})?;
|
||||
|
||||
let mut msgs = Vec::new();
|
||||
for row in rows {
|
||||
msgs.push(row?);
|
||||
}
|
||||
Ok(msgs)
|
||||
}
|
||||
|
||||
/// Update the body of an existing message (for edits).
|
||||
pub fn update_message_body(
|
||||
&self,
|
||||
conv_id: &ConversationId,
|
||||
message_id: &[u8; 16],
|
||||
new_body: &str,
|
||||
) -> anyhow::Result<bool> {
|
||||
let rows = self.conn.execute(
|
||||
"UPDATE messages SET body = ?3 WHERE conversation_id = ?1 AND message_id = ?2",
|
||||
params![conv_id.0.as_slice(), message_id.as_slice(), new_body],
|
||||
)?;
|
||||
Ok(rows > 0)
|
||||
}
|
||||
|
||||
/// Mark a message as deleted (sets body to "[deleted]" and msg_type to "deleted").
|
||||
pub fn delete_message(
|
||||
&self,
|
||||
conv_id: &ConversationId,
|
||||
message_id: &[u8; 16],
|
||||
) -> anyhow::Result<bool> {
|
||||
let rows = self.conn.execute(
|
||||
"UPDATE messages SET body = '[deleted]', msg_type = 'deleted' WHERE conversation_id = ?1 AND message_id = ?2",
|
||||
params![conv_id.0.as_slice(), message_id.as_slice()],
|
||||
)?;
|
||||
Ok(rows > 0)
|
||||
}
|
||||
|
||||
/// Save a message, deduplicating by message_id within the same conversation.
|
||||
/// Returns `true` if the message was saved (new), `false` if it was a duplicate.
|
||||
pub fn save_message_dedup(&self, msg: &StoredMessage) -> anyhow::Result<bool> {
|
||||
if let Some(ref mid) = msg.message_id {
|
||||
let exists: bool = self.conn.query_row(
|
||||
"SELECT EXISTS(SELECT 1 FROM messages WHERE message_id = ?1 AND conversation_id = ?2)",
|
||||
params![mid.as_slice(), msg.conversation_id.0.as_slice()],
|
||||
|row| row.get(0),
|
||||
)?;
|
||||
if exists {
|
||||
return Ok(false);
|
||||
}
|
||||
}
|
||||
self.save_message(msg)?;
|
||||
Ok(true)
|
||||
}
|
||||
|
||||
// ── Sequence tracking ──────────────────────────────────────────────
|
||||
|
||||
pub fn update_last_seen_seq(&self, id: &ConversationId, seq: u64) -> anyhow::Result<()> {
|
||||
self.conn.execute(
|
||||
"UPDATE conversations SET last_seen_seq = ?2 WHERE id = ?1 AND last_seen_seq < ?2",
|
||||
params![id.0.as_slice(), seq as i64],
|
||||
)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// ── Outbox (offline queue) ────────────────────────────────────────
|
||||
|
||||
pub fn enqueue_outbox(
|
||||
&self,
|
||||
conv_id: &ConversationId,
|
||||
recipient_key: &[u8],
|
||||
payload: &[u8],
|
||||
) -> anyhow::Result<()> {
|
||||
self.conn.execute(
|
||||
"INSERT INTO outbox (conversation_id, recipient_key, payload, created_at_ms)
|
||||
VALUES (?1, ?2, ?3, ?4)",
|
||||
params![conv_id.0.as_slice(), recipient_key, payload, now_ms() as i64],
|
||||
)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn load_pending_outbox(&self) -> anyhow::Result<Vec<OutboxEntry>> {
|
||||
let mut stmt = self.conn.prepare(
|
||||
"SELECT id, conversation_id, recipient_key, payload, retry_count
|
||||
FROM outbox WHERE status = 'pending' ORDER BY created_at_ms",
|
||||
)?;
|
||||
let rows = stmt.query_map([], |row| {
|
||||
let id: i64 = row.get(0)?;
|
||||
let conv_blob: Vec<u8> = row.get(1)?;
|
||||
let recipient_key: Vec<u8> = row.get(2)?;
|
||||
let payload: Vec<u8> = row.get(3)?;
|
||||
let retry_count: u32 = row.get(4)?;
|
||||
Ok(OutboxEntry {
|
||||
id,
|
||||
conversation_id: ConversationId::from_slice(&conv_blob)
|
||||
.unwrap_or(ConversationId([0; 16])),
|
||||
recipient_key,
|
||||
payload,
|
||||
retry_count,
|
||||
})
|
||||
})?;
|
||||
let mut entries = Vec::new();
|
||||
for row in rows {
|
||||
entries.push(row?);
|
||||
}
|
||||
Ok(entries)
|
||||
}
|
||||
|
||||
pub fn mark_outbox_sent(&self, id: i64) -> anyhow::Result<()> {
|
||||
self.conn.execute(
|
||||
"UPDATE outbox SET status = 'sent' WHERE id = ?1",
|
||||
params![id],
|
||||
)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn mark_outbox_failed(&self, id: i64, retry_count: u32) -> anyhow::Result<()> {
|
||||
let new_status = if retry_count > 5 { "failed" } else { "pending" };
|
||||
self.conn.execute(
|
||||
"UPDATE outbox SET retry_count = ?2, status = ?3 WHERE id = ?1",
|
||||
params![id, retry_count, new_status],
|
||||
)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Delete messages older than `cutoff_ms` (epoch milliseconds) across all conversations.
|
||||
pub fn delete_messages_before(&self, cutoff_ms: u64) -> anyhow::Result<usize> {
|
||||
let rows = self.conn.execute(
|
||||
"DELETE FROM messages WHERE timestamp_ms < ?1",
|
||||
params![cutoff_ms as i64],
|
||||
)?;
|
||||
Ok(rows)
|
||||
}
|
||||
}
|
||||
|
||||
/// An entry in the offline outbox queue.
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct OutboxEntry {
|
||||
pub id: i64,
|
||||
pub conversation_id: ConversationId,
|
||||
pub recipient_key: Vec<u8>,
|
||||
pub payload: Vec<u8>,
|
||||
pub retry_count: u32,
|
||||
}
|
||||
|
||||
pub fn now_ms() -> u64 {
|
||||
SystemTime::now()
|
||||
.duration_since(UNIX_EPOCH)
|
||||
.unwrap_or_default()
|
||||
.as_millis() as u64
|
||||
}
|
||||
82
crates/quicproquo-client/src/client/display.rs
Normal file
82
crates/quicproquo-client/src/client/display.rs
Normal file
@@ -0,0 +1,82 @@
|
||||
//! Terminal display helpers for the REPL.
|
||||
|
||||
use super::conversation::StoredMessage;
|
||||
use super::session::SessionState;
|
||||
|
||||
// ANSI color codes
|
||||
const RESET: &str = "\x1b[0m";
|
||||
const BOLD: &str = "\x1b[1m";
|
||||
const DIM: &str = "\x1b[2m";
|
||||
const GREEN: &str = "\x1b[32m";
|
||||
const CYAN: &str = "\x1b[36m";
|
||||
const YELLOW: &str = "\x1b[33m";
|
||||
|
||||
/// Print the REPL prompt showing the active conversation and unread count.
|
||||
pub fn print_prompt(session: &SessionState) {
|
||||
use std::io::Write;
|
||||
let name = session
|
||||
.active_display_name()
|
||||
.unwrap_or_else(|| "no conversation".into());
|
||||
let unread = session.total_unread();
|
||||
if unread > 0 {
|
||||
print!("{DIM}[{RESET}{BOLD}{name}{RESET} {YELLOW}{unread} unread{RESET}{DIM}]{RESET} > ");
|
||||
} else {
|
||||
print!("{DIM}[{RESET}{BOLD}{name}{RESET}{DIM}]{RESET} > ");
|
||||
}
|
||||
let _ = std::io::stdout().flush();
|
||||
}
|
||||
|
||||
/// Print an incoming or outgoing message.
|
||||
pub fn print_message(msg: &StoredMessage) {
|
||||
let body = if msg.msg_type == "reaction" {
|
||||
format!("reacted {}", msg.body)
|
||||
} else {
|
||||
msg.body.clone()
|
||||
};
|
||||
if msg.is_outgoing {
|
||||
println!("\r{GREEN}> {body}{RESET}");
|
||||
} else {
|
||||
let fallback = hex::encode(&msg.sender_key[..4]);
|
||||
let sender = msg.sender_name.as_deref().unwrap_or(&fallback);
|
||||
println!("\r{CYAN}{BOLD}[{sender}]{RESET} {body}");
|
||||
}
|
||||
}
|
||||
|
||||
/// Print a message received in real-time (clears current line first).
|
||||
pub fn print_incoming(sender: &str, body: &str) {
|
||||
use std::io::Write;
|
||||
// Clear current line, print message, then re-show prompt context
|
||||
print!("\r\x1b[2K");
|
||||
println!("{CYAN}{BOLD}[{sender}]{RESET} {body}");
|
||||
let _ = std::io::stdout().flush();
|
||||
}
|
||||
|
||||
/// Print a system/status message.
|
||||
pub fn print_status(msg: &str) {
|
||||
println!("{DIM} {msg}{RESET}");
|
||||
}
|
||||
|
||||
/// Print a transient typing indicator (clears current line first).
|
||||
pub fn print_typing(sender: &str) {
|
||||
use std::io::Write;
|
||||
print!("\r\x1b[2K");
|
||||
println!("{DIM} {sender} is typing...{RESET}");
|
||||
let _ = std::io::stdout().flush();
|
||||
}
|
||||
|
||||
/// Print an error message.
|
||||
pub fn print_error(msg: &str) {
|
||||
println!("{YELLOW} error: {msg}{RESET}");
|
||||
}
|
||||
|
||||
/// Format a conversation list entry for `/list`.
|
||||
pub fn format_conv_line(display_name: &str, kind: &str, unread: u32, members: usize) -> String {
|
||||
let unread_str = if unread > 0 {
|
||||
format!(" {YELLOW}({unread} new){RESET}")
|
||||
} else {
|
||||
String::new()
|
||||
};
|
||||
format!(
|
||||
" {BOLD}{display_name}{RESET} {DIM}[{kind}, {members} members]{RESET}{unread_str}"
|
||||
)
|
||||
}
|
||||
7
crates/quicproquo-client/src/client/hex.rs
Normal file
7
crates/quicproquo-client/src/client/hex.rs
Normal file
@@ -0,0 +1,7 @@
|
||||
pub fn encode(bytes: impl AsRef<[u8]>) -> String {
|
||||
hex::encode(bytes)
|
||||
}
|
||||
|
||||
pub fn decode(s: &str) -> Result<Vec<u8>, &'static str> {
|
||||
hex::decode(s).map_err(|_| "invalid hex string")
|
||||
}
|
||||
148
crates/quicproquo-client/src/client/mesh_discovery.rs
Normal file
148
crates/quicproquo-client/src/client/mesh_discovery.rs
Normal file
@@ -0,0 +1,148 @@
|
||||
//! mDNS-based peer discovery for Freifunk / community mesh deployments.
|
||||
//!
|
||||
//! Browse for `_quicproquo._udp.local.` services on the local network and
|
||||
//! surface them as [`DiscoveredPeer`] structs. Servers announce themselves
|
||||
//! automatically on startup; this module lets clients find them without manual
|
||||
//! configuration.
|
||||
//!
|
||||
//! # Usage
|
||||
//!
|
||||
//! ```no_run
|
||||
//! use quicproquo_client::client::mesh_discovery::MeshDiscovery;
|
||||
//!
|
||||
//! let disc = MeshDiscovery::start()?;
|
||||
//! // Give mDNS time to collect announcements before reading.
|
||||
//! std::thread::sleep(std::time::Duration::from_secs(2));
|
||||
//! for peer in disc.peers() {
|
||||
//! println!("found: {} at {}", peer.domain, peer.server_addr);
|
||||
//! }
|
||||
//! # Ok::<(), quicproquo_client::client::mesh_discovery::MeshDiscoveryError>(())
|
||||
//! ```
|
||||
|
||||
#[cfg(feature = "mesh")]
|
||||
use mdns_sd::{ServiceDaemon, ServiceEvent};
|
||||
use std::net::SocketAddr;
|
||||
#[cfg(feature = "mesh")]
|
||||
use std::sync::{Arc, Mutex};
|
||||
#[cfg(feature = "mesh")]
|
||||
use std::collections::HashMap;
|
||||
|
||||
/// A qpq server discovered on the local network via mDNS.
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct DiscoveredPeer {
|
||||
/// Federation domain of the remote server (e.g. `"node1.freifunk.net"`).
|
||||
pub domain: String,
|
||||
/// QUIC RPC address to connect to.
|
||||
pub server_addr: SocketAddr,
|
||||
}
|
||||
|
||||
/// A running mDNS browse session.
|
||||
///
|
||||
/// Starts immediately on construction; drop to stop browsing.
|
||||
pub struct MeshDiscovery {
|
||||
#[cfg(feature = "mesh")]
|
||||
_daemon: ServiceDaemon,
|
||||
#[cfg(feature = "mesh")]
|
||||
peers: Arc<Mutex<HashMap<String, DiscoveredPeer>>>,
|
||||
}
|
||||
|
||||
#[derive(thiserror::Error, Debug)]
|
||||
pub enum MeshDiscoveryError {
|
||||
#[error("mDNS daemon failed to start: {0}")]
|
||||
DaemonError(String),
|
||||
#[error("mDNS browse failed: {0}")]
|
||||
BrowseError(String),
|
||||
#[error("mesh feature not compiled (rebuild with --features mesh)")]
|
||||
FeatureDisabled,
|
||||
}
|
||||
|
||||
impl MeshDiscovery {
|
||||
/// Start browsing for `_quicproquo._udp.local.` services.
|
||||
///
|
||||
/// Returns immediately; peers are collected in the background.
|
||||
/// Returns [`MeshDiscoveryError::FeatureDisabled`] when built without the
|
||||
/// `mesh` feature.
|
||||
pub fn start() -> Result<Self, MeshDiscoveryError> {
|
||||
#[cfg(feature = "mesh")]
|
||||
{
|
||||
Self::start_inner()
|
||||
}
|
||||
#[cfg(not(feature = "mesh"))]
|
||||
{
|
||||
Err(MeshDiscoveryError::FeatureDisabled)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(feature = "mesh")]
|
||||
fn start_inner() -> Result<Self, MeshDiscoveryError> {
|
||||
let daemon = ServiceDaemon::new()
|
||||
.map_err(|e| MeshDiscoveryError::DaemonError(e.to_string()))?;
|
||||
|
||||
let receiver = daemon
|
||||
.browse("_quicproquo._udp.local.")
|
||||
.map_err(|e| MeshDiscoveryError::BrowseError(e.to_string()))?;
|
||||
|
||||
let peers: Arc<Mutex<HashMap<String, DiscoveredPeer>>> =
|
||||
Arc::new(Mutex::new(HashMap::new()));
|
||||
let peers_bg = Arc::clone(&peers);
|
||||
|
||||
// Process mDNS events in a background thread (ServiceDaemon is sync).
|
||||
std::thread::spawn(move || {
|
||||
for event in receiver {
|
||||
match event {
|
||||
ServiceEvent::ServiceResolved(info) => {
|
||||
// Extract the qpq server address from TXT records.
|
||||
let server_addr_str = info
|
||||
.get_property_val_str("server")
|
||||
.map(|s| s.to_string());
|
||||
let domain = info
|
||||
.get_property_val_str("domain")
|
||||
.map(|s| s.to_string())
|
||||
.unwrap_or_else(|| info.get_fullname().to_string());
|
||||
|
||||
if let Some(addr_str) = server_addr_str {
|
||||
if let Ok(addr) = addr_str.parse::<SocketAddr>() {
|
||||
let peer = DiscoveredPeer {
|
||||
domain: domain.clone(),
|
||||
server_addr: addr,
|
||||
};
|
||||
if let Ok(mut map) = peers_bg.lock() {
|
||||
map.insert(domain, peer);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
ServiceEvent::ServiceRemoved(_ty, fullname) => {
|
||||
if let Ok(mut map) = peers_bg.lock() {
|
||||
map.retain(|_, p| {
|
||||
!fullname.contains(&p.domain)
|
||||
});
|
||||
}
|
||||
}
|
||||
// Other events (SearchStarted, SearchStopped) are informational.
|
||||
_ => {}
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
Ok(Self {
|
||||
_daemon: daemon,
|
||||
peers,
|
||||
})
|
||||
}
|
||||
|
||||
/// Return a snapshot of all peers discovered so far.
|
||||
pub fn peers(&self) -> Vec<DiscoveredPeer> {
|
||||
#[cfg(feature = "mesh")]
|
||||
{
|
||||
self.peers
|
||||
.lock()
|
||||
.map(|m| m.values().cloned().collect())
|
||||
.unwrap_or_default()
|
||||
}
|
||||
#[cfg(not(feature = "mesh"))]
|
||||
{
|
||||
vec![]
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,8 +1,23 @@
|
||||
pub mod command_engine;
|
||||
pub mod commands;
|
||||
pub mod conversation;
|
||||
pub mod display;
|
||||
pub mod hex;
|
||||
pub mod mesh_discovery;
|
||||
#[cfg(feature = "playbook")]
|
||||
pub mod playbook;
|
||||
pub mod repl;
|
||||
pub mod retry;
|
||||
pub mod rpc;
|
||||
pub mod session;
|
||||
pub mod state;
|
||||
pub mod token_cache;
|
||||
#[cfg(feature = "tui")]
|
||||
pub mod tui;
|
||||
#[cfg(feature = "v2")]
|
||||
pub mod v2_repl;
|
||||
#[cfg(all(feature = "v2", feature = "tui"))]
|
||||
pub mod v2_tui;
|
||||
|
||||
pub use commands::*;
|
||||
pub use rpc::{connect_node, enqueue, fetch_all, fetch_hybrid_key, fetch_key_package, fetch_wait, upload_hybrid_key, upload_key_package};
|
||||
868
crates/quicproquo-client/src/client/playbook.rs
Normal file
868
crates/quicproquo-client/src/client/playbook.rs
Normal file
@@ -0,0 +1,868 @@
|
||||
//! YAML playbook parser and executor.
|
||||
//!
|
||||
//! Playbooks describe a sequence of client commands in YAML format.
|
||||
//! They support variable substitution, assertions, loops, and per-step
|
||||
//! error handling policies.
|
||||
//!
|
||||
//! ```yaml
|
||||
//! name: "smoke test"
|
||||
//! steps:
|
||||
//! - command: dm
|
||||
//! args: { username: "bob" }
|
||||
//! - command: send
|
||||
//! args: { text: "Hello from playbook" }
|
||||
//! - command: assert
|
||||
//! condition: message_count
|
||||
//! op: gte
|
||||
//! value: 1
|
||||
//! ```
|
||||
//!
|
||||
//! Requires the `playbook` cargo feature.
|
||||
|
||||
use std::collections::HashMap;
|
||||
use std::path::Path;
|
||||
use std::time::{Duration, Instant};
|
||||
|
||||
use anyhow::{Context, bail};
|
||||
use quicproquo_proto::node_capnp::node_service;
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use super::command_engine::{AssertCondition, CmpOp, Command, CommandRegistry};
|
||||
use super::session::SessionState;
|
||||
|
||||
// ── Playbook structs ────────────────────────────────────────────────────────
|
||||
|
||||
/// A parsed YAML playbook.
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct Playbook {
|
||||
pub name: String,
|
||||
#[serde(default)]
|
||||
pub description: Option<String>,
|
||||
#[serde(default)]
|
||||
pub variables: HashMap<String, String>,
|
||||
pub steps: Vec<PlaybookStep>,
|
||||
}
|
||||
|
||||
/// A single step in a playbook.
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct PlaybookStep {
|
||||
pub command: String,
|
||||
#[serde(default)]
|
||||
pub args: HashMap<String, serde_yaml::Value>,
|
||||
/// For assert steps: the condition name.
|
||||
#[serde(default)]
|
||||
pub condition: Option<String>,
|
||||
/// For assert steps: comparison operator.
|
||||
#[serde(default)]
|
||||
pub op: Option<String>,
|
||||
/// For assert steps: expected value.
|
||||
#[serde(default)]
|
||||
pub value: Option<serde_yaml::Value>,
|
||||
/// Capture the command output into this variable name.
|
||||
#[serde(default)]
|
||||
pub capture: Option<String>,
|
||||
/// Error handling policy for this step.
|
||||
#[serde(default)]
|
||||
pub on_error: OnError,
|
||||
/// Optional loop specification.
|
||||
#[serde(rename = "loop", default)]
|
||||
pub loop_spec: Option<LoopSpec>,
|
||||
}
|
||||
|
||||
/// What to do when a step fails.
|
||||
#[derive(Debug, Clone, Serialize, Deserialize, Default, PartialEq)]
|
||||
#[serde(rename_all = "lowercase")]
|
||||
pub enum OnError {
|
||||
#[default]
|
||||
Fail,
|
||||
Skip,
|
||||
Continue,
|
||||
}
|
||||
|
||||
/// Loop specification for repeating a step.
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct LoopSpec {
|
||||
pub var: String,
|
||||
pub from: usize,
|
||||
pub to: usize,
|
||||
}
|
||||
|
||||
// ── Report structs ──────────────────────────────────────────────────────────
|
||||
|
||||
/// Summary of a playbook execution.
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct PlaybookReport {
|
||||
pub name: String,
|
||||
pub total_steps: usize,
|
||||
pub passed: usize,
|
||||
pub failed: usize,
|
||||
pub skipped: usize,
|
||||
pub duration: Duration,
|
||||
pub step_results: Vec<StepResult>,
|
||||
}
|
||||
|
||||
impl PlaybookReport {
|
||||
/// True if all steps passed (no failures).
|
||||
pub fn all_passed(&self) -> bool {
|
||||
self.failed == 0
|
||||
}
|
||||
}
|
||||
|
||||
impl std::fmt::Display for PlaybookReport {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
writeln!(f, "Playbook: {}", self.name)?;
|
||||
writeln!(
|
||||
f,
|
||||
"Result: {} passed, {} failed, {} skipped ({} total)",
|
||||
self.passed, self.failed, self.skipped, self.total_steps,
|
||||
)?;
|
||||
writeln!(f, "Duration: {:.2}s", self.duration.as_secs_f64())?;
|
||||
for sr in &self.step_results {
|
||||
let status = if sr.success { "OK" } else { "FAIL" };
|
||||
write!(
|
||||
f,
|
||||
" [{}/{}] {} ... {} ({:.1}ms)",
|
||||
sr.step_index + 1,
|
||||
self.total_steps,
|
||||
sr.command,
|
||||
status,
|
||||
sr.duration.as_secs_f64() * 1000.0,
|
||||
)?;
|
||||
if let Some(ref e) = sr.error {
|
||||
write!(f, " — {e}")?;
|
||||
}
|
||||
writeln!(f)?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
/// Result of a single step execution.
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct StepResult {
|
||||
pub step_index: usize,
|
||||
pub command: String,
|
||||
pub success: bool,
|
||||
pub duration: Duration,
|
||||
pub output: Option<String>,
|
||||
pub error: Option<String>,
|
||||
}
|
||||
|
||||
// ── PlaybookRunner ──────────────────────────────────────────────────────────
|
||||
|
||||
/// Executes a parsed `Playbook` step-by-step.
|
||||
pub struct PlaybookRunner {
|
||||
playbook: Playbook,
|
||||
vars: HashMap<String, String>,
|
||||
}
|
||||
|
||||
impl PlaybookRunner {
|
||||
/// Load a playbook from a YAML file.
|
||||
pub fn from_file(path: &Path) -> anyhow::Result<Self> {
|
||||
let content =
|
||||
std::fs::read_to_string(path).with_context(|| format!("read {}", path.display()))?;
|
||||
Self::from_str(&content)
|
||||
}
|
||||
|
||||
/// Parse a playbook from a YAML string.
|
||||
pub fn from_str(yaml: &str) -> anyhow::Result<Self> {
|
||||
let playbook: Playbook =
|
||||
serde_yaml::from_str(yaml).context("parse playbook YAML")?;
|
||||
let vars = playbook.variables.clone();
|
||||
Ok(Self { playbook, vars })
|
||||
}
|
||||
|
||||
/// Override or add variables before execution.
|
||||
pub fn set_var(&mut self, name: impl Into<String>, value: impl Into<String>) {
|
||||
self.vars.insert(name.into(), value.into());
|
||||
}
|
||||
|
||||
/// Execute all steps, returning a report.
|
||||
pub async fn run(
|
||||
&mut self,
|
||||
session: &mut SessionState,
|
||||
client: &node_service::Client,
|
||||
) -> PlaybookReport {
|
||||
let start = Instant::now();
|
||||
let total = self.expanded_step_count();
|
||||
let mut results = Vec::new();
|
||||
let mut passed = 0usize;
|
||||
let mut failed = 0usize;
|
||||
let mut skipped = 0usize;
|
||||
let mut step_idx = 0usize;
|
||||
let mut abort = false;
|
||||
|
||||
for step in &self.playbook.steps.clone() {
|
||||
if abort {
|
||||
skipped += 1;
|
||||
results.push(StepResult {
|
||||
step_index: step_idx,
|
||||
command: step.command.clone(),
|
||||
success: false,
|
||||
duration: Duration::ZERO,
|
||||
output: None,
|
||||
error: Some("skipped (prior failure)".into()),
|
||||
});
|
||||
step_idx += 1;
|
||||
continue;
|
||||
}
|
||||
|
||||
if let Some(ref ls) = step.loop_spec {
|
||||
for i in ls.from..=ls.to {
|
||||
self.vars.insert(ls.var.clone(), i.to_string());
|
||||
let sr = self.execute_step(step, step_idx, total, session, client).await;
|
||||
if sr.success {
|
||||
passed += 1;
|
||||
} else {
|
||||
failed += 1;
|
||||
if step.on_error == OnError::Fail {
|
||||
abort = true;
|
||||
}
|
||||
}
|
||||
results.push(sr);
|
||||
step_idx += 1;
|
||||
if abort {
|
||||
break;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
let sr = self.execute_step(step, step_idx, total, session, client).await;
|
||||
if sr.success {
|
||||
passed += 1;
|
||||
} else {
|
||||
match step.on_error {
|
||||
OnError::Fail => {
|
||||
failed += 1;
|
||||
abort = true;
|
||||
}
|
||||
OnError::Skip => skipped += 1,
|
||||
OnError::Continue => failed += 1,
|
||||
}
|
||||
}
|
||||
results.push(sr);
|
||||
step_idx += 1;
|
||||
}
|
||||
}
|
||||
|
||||
PlaybookReport {
|
||||
name: self.playbook.name.clone(),
|
||||
total_steps: step_idx,
|
||||
passed,
|
||||
failed,
|
||||
skipped,
|
||||
duration: start.elapsed(),
|
||||
step_results: results,
|
||||
}
|
||||
}
|
||||
|
||||
/// Execute a single step.
|
||||
async fn execute_step(
|
||||
&mut self,
|
||||
step: &PlaybookStep,
|
||||
index: usize,
|
||||
total: usize,
|
||||
session: &mut SessionState,
|
||||
client: &node_service::Client,
|
||||
) -> StepResult {
|
||||
let t = Instant::now();
|
||||
let cmd = match self.step_to_command(step) {
|
||||
Ok(c) => c,
|
||||
Err(e) => {
|
||||
return StepResult {
|
||||
step_index: index,
|
||||
command: step.command.clone(),
|
||||
success: false,
|
||||
duration: t.elapsed(),
|
||||
output: None,
|
||||
error: Some(format!("{e:#}")),
|
||||
};
|
||||
}
|
||||
};
|
||||
|
||||
eprintln!(
|
||||
"[{}/{}] {} ...",
|
||||
index + 1,
|
||||
total,
|
||||
step.command,
|
||||
);
|
||||
|
||||
let cr = CommandRegistry::execute(&cmd, session, client).await;
|
||||
|
||||
// Capture output into variable if requested.
|
||||
if let Some(ref var_name) = step.capture {
|
||||
if let Some(ref out) = cr.output {
|
||||
self.vars.insert(var_name.clone(), out.clone());
|
||||
}
|
||||
for (k, v) in &cr.data {
|
||||
self.vars.insert(format!("{var_name}.{k}"), v.clone());
|
||||
}
|
||||
}
|
||||
|
||||
StepResult {
|
||||
step_index: index,
|
||||
command: step.command.clone(),
|
||||
success: cr.success,
|
||||
duration: t.elapsed(),
|
||||
output: cr.output,
|
||||
error: cr.error,
|
||||
}
|
||||
}
|
||||
|
||||
/// Convert a YAML step into a typed `Command`.
|
||||
fn step_to_command(&self, step: &PlaybookStep) -> anyhow::Result<Command> {
|
||||
let cmd_name = step.command.as_str();
|
||||
match cmd_name {
|
||||
// ── Lifecycle commands ────────────────────────────────────────
|
||||
"connect" => Ok(Command::Connect {
|
||||
server: self.resolve_str(&step.args, "server")?,
|
||||
ca_cert: self.opt_str(&step.args, "ca_cert"),
|
||||
insecure: self.opt_bool(&step.args, "insecure"),
|
||||
}),
|
||||
"login" => Ok(Command::Login {
|
||||
username: self.resolve_str(&step.args, "username")?,
|
||||
password: self.resolve_str(&step.args, "password")?,
|
||||
}),
|
||||
"register" => Ok(Command::Register {
|
||||
username: self.resolve_str(&step.args, "username")?,
|
||||
password: self.resolve_str(&step.args, "password")?,
|
||||
}),
|
||||
"send" | "send-message" => Ok(Command::SendMessage {
|
||||
text: self.resolve_str(&step.args, "text")?,
|
||||
}),
|
||||
"wait" => Ok(Command::Wait {
|
||||
duration_ms: self.resolve_u64(&step.args, "duration_ms")?,
|
||||
}),
|
||||
"set-var" | "setvar" => Ok(Command::SetVar {
|
||||
name: self.resolve_str(&step.args, "name")?,
|
||||
value: self.resolve_str(&step.args, "value")?,
|
||||
}),
|
||||
"assert" => {
|
||||
let condition = self.build_assert_condition(step)?;
|
||||
Ok(Command::Assert { condition })
|
||||
}
|
||||
|
||||
// ── Session / identity ───────────────────────────────────────
|
||||
"help" => Ok(Command::Help),
|
||||
"quit" | "exit" => Ok(Command::Quit),
|
||||
"whoami" => Ok(Command::Whoami),
|
||||
"list" | "ls" => Ok(Command::List),
|
||||
"switch" | "sw" => Ok(Command::Switch {
|
||||
target: self.resolve_str(&step.args, "target")?,
|
||||
}),
|
||||
"dm" => Ok(Command::Dm {
|
||||
username: self.resolve_str(&step.args, "username")?,
|
||||
}),
|
||||
"create-group" | "cg" => Ok(Command::CreateGroup {
|
||||
name: self.resolve_str(&step.args, "name")?,
|
||||
}),
|
||||
"invite" => Ok(Command::Invite {
|
||||
target: self.resolve_str(&step.args, "target")?,
|
||||
}),
|
||||
"remove" | "kick" => Ok(Command::Remove {
|
||||
target: self.resolve_str(&step.args, "target")?,
|
||||
}),
|
||||
"leave" => Ok(Command::Leave),
|
||||
"join" => Ok(Command::Join),
|
||||
"members" => Ok(Command::Members),
|
||||
"group-info" | "gi" => Ok(Command::GroupInfo),
|
||||
"rename" => Ok(Command::Rename {
|
||||
name: self.resolve_str(&step.args, "name")?,
|
||||
}),
|
||||
"history" | "hist" => Ok(Command::History {
|
||||
count: self.opt_usize(&step.args, "count").unwrap_or(20),
|
||||
}),
|
||||
|
||||
// ── Security / crypto ────────────────────────────────────────
|
||||
"verify" => Ok(Command::Verify {
|
||||
username: self.resolve_str(&step.args, "username")?,
|
||||
}),
|
||||
"update-key" | "rotate-key" => Ok(Command::UpdateKey),
|
||||
"typing" => Ok(Command::Typing),
|
||||
"typing-notify" => Ok(Command::TypingNotify {
|
||||
enabled: self.opt_bool(&step.args, "enabled"),
|
||||
}),
|
||||
"react" => Ok(Command::React {
|
||||
emoji: self.resolve_str(&step.args, "emoji")?,
|
||||
index: self.opt_usize(&step.args, "index"),
|
||||
}),
|
||||
"edit" => Ok(Command::Edit {
|
||||
index: self.resolve_usize(&step.args, "index")?,
|
||||
new_text: self.resolve_str(&step.args, "new_text")?,
|
||||
}),
|
||||
"delete" | "del" => Ok(Command::Delete {
|
||||
index: self.resolve_usize(&step.args, "index")?,
|
||||
}),
|
||||
"send-file" | "sf" => Ok(Command::SendFile {
|
||||
path: self.resolve_str(&step.args, "path")?,
|
||||
}),
|
||||
"download" | "dl" => Ok(Command::Download {
|
||||
index: self.resolve_usize(&step.args, "index")?,
|
||||
}),
|
||||
"delete-account" => Ok(Command::DeleteAccount),
|
||||
"disappear" => Ok(Command::Disappear {
|
||||
arg: self.opt_str(&step.args, "duration"),
|
||||
}),
|
||||
"privacy" => Ok(Command::Privacy {
|
||||
arg: self.opt_str(&step.args, "setting"),
|
||||
}),
|
||||
"verify-fs" => Ok(Command::VerifyFs),
|
||||
"rotate-all-keys" => Ok(Command::RotateAllKeys),
|
||||
"devices" => Ok(Command::Devices),
|
||||
"register-device" => Ok(Command::RegisterDevice {
|
||||
name: self.resolve_str(&step.args, "name")?,
|
||||
}),
|
||||
"revoke-device" => Ok(Command::RevokeDevice {
|
||||
id_prefix: self.resolve_str(&step.args, "id_prefix")?,
|
||||
}),
|
||||
|
||||
// ── Mesh ─────────────────────────────────────────────────────
|
||||
"mesh-peers" => Ok(Command::MeshPeers),
|
||||
"mesh-server" => Ok(Command::MeshServer {
|
||||
addr: self.resolve_str(&step.args, "addr")?,
|
||||
}),
|
||||
"mesh-send" => Ok(Command::MeshSend {
|
||||
peer_id: self.resolve_str(&step.args, "peer_id")?,
|
||||
message: self.resolve_str(&step.args, "message")?,
|
||||
}),
|
||||
"mesh-broadcast" => Ok(Command::MeshBroadcast {
|
||||
topic: self.resolve_str(&step.args, "topic")?,
|
||||
message: self.resolve_str(&step.args, "message")?,
|
||||
}),
|
||||
"mesh-subscribe" => Ok(Command::MeshSubscribe {
|
||||
topic: self.resolve_str(&step.args, "topic")?,
|
||||
}),
|
||||
"mesh-route" => Ok(Command::MeshRoute),
|
||||
"mesh-identity" | "mesh-id" => Ok(Command::MeshIdentity),
|
||||
"mesh-store" => Ok(Command::MeshStore),
|
||||
|
||||
other => bail!("unknown command: {other}"),
|
||||
}
|
||||
}
|
||||
|
||||
/// Build an `AssertCondition` from a playbook step.
|
||||
fn build_assert_condition(&self, step: &PlaybookStep) -> anyhow::Result<AssertCondition> {
|
||||
let cond = step
|
||||
.condition
|
||||
.as_deref()
|
||||
.context("assert step requires 'condition' field")?;
|
||||
match cond {
|
||||
"connected" => Ok(AssertCondition::Connected),
|
||||
"logged_in" => Ok(AssertCondition::LoggedIn),
|
||||
"in_conversation" => {
|
||||
let name = self.resolve_str(&step.args, "name")
|
||||
.or_else(|_| step.value.as_ref()
|
||||
.and_then(|v| v.as_str())
|
||||
.map(|s| self.substitute(s))
|
||||
.context("assert in_conversation requires 'name' arg or 'value'"))?;
|
||||
Ok(AssertCondition::InConversation { name })
|
||||
}
|
||||
"message_count" => {
|
||||
let op = self.parse_cmp_op(step.op.as_deref().unwrap_or("gte"))?;
|
||||
let count = step
|
||||
.value
|
||||
.as_ref()
|
||||
.and_then(|v| v.as_u64())
|
||||
.context("message_count assert requires numeric 'value'")?
|
||||
as usize;
|
||||
Ok(AssertCondition::MessageCount { op, count })
|
||||
}
|
||||
"member_count" => {
|
||||
let op = self.parse_cmp_op(step.op.as_deref().unwrap_or("gte"))?;
|
||||
let count = step
|
||||
.value
|
||||
.as_ref()
|
||||
.and_then(|v| v.as_u64())
|
||||
.context("member_count assert requires numeric 'value'")?
|
||||
as usize;
|
||||
Ok(AssertCondition::MemberCount { op, count })
|
||||
}
|
||||
other => Ok(AssertCondition::Custom {
|
||||
expression: other.to_string(),
|
||||
}),
|
||||
}
|
||||
}
|
||||
|
||||
fn parse_cmp_op(&self, s: &str) -> anyhow::Result<CmpOp> {
|
||||
match s {
|
||||
"eq" | "==" => Ok(CmpOp::Eq),
|
||||
"ne" | "!=" => Ok(CmpOp::Ne),
|
||||
"gt" | ">" => Ok(CmpOp::Gt),
|
||||
"lt" | "<" => Ok(CmpOp::Lt),
|
||||
"gte" | ">=" => Ok(CmpOp::Gte),
|
||||
"lte" | "<=" => Ok(CmpOp::Lte),
|
||||
other => bail!("unknown comparison operator: {other}"),
|
||||
}
|
||||
}
|
||||
|
||||
// ── Variable substitution helpers ────────────────────────────────────
|
||||
|
||||
/// Substitute `$varname` and `${VAR:-default}` in a string.
|
||||
fn substitute(&self, s: &str) -> String {
|
||||
let mut result = String::with_capacity(s.len());
|
||||
let mut chars = s.chars().peekable();
|
||||
while let Some(c) = chars.next() {
|
||||
if c == '$' {
|
||||
if chars.peek() == Some(&'{') {
|
||||
chars.next(); // consume '{'
|
||||
let mut key = String::new();
|
||||
let mut default = None;
|
||||
while let Some(&ch) = chars.peek() {
|
||||
if ch == '}' {
|
||||
chars.next();
|
||||
break;
|
||||
}
|
||||
if ch == ':' && chars.clone().nth(1) == Some('-') {
|
||||
chars.next(); // consume ':'
|
||||
chars.next(); // consume '-'
|
||||
let mut def = String::new();
|
||||
while let Some(&dch) = chars.peek() {
|
||||
if dch == '}' {
|
||||
chars.next();
|
||||
break;
|
||||
}
|
||||
def.push(dch);
|
||||
chars.next();
|
||||
}
|
||||
default = Some(def);
|
||||
break;
|
||||
}
|
||||
key.push(ch);
|
||||
chars.next();
|
||||
}
|
||||
if let Some(val) = self.vars.get(&key) {
|
||||
result.push_str(val);
|
||||
} else if let Ok(val) = std::env::var(&key) {
|
||||
result.push_str(&val);
|
||||
} else if let Some(def) = default {
|
||||
result.push_str(&def);
|
||||
}
|
||||
} else {
|
||||
let mut key = String::new();
|
||||
while let Some(&ch) = chars.peek() {
|
||||
if ch.is_alphanumeric() || ch == '_' {
|
||||
key.push(ch);
|
||||
chars.next();
|
||||
} else {
|
||||
break;
|
||||
}
|
||||
}
|
||||
if let Some(val) = self.vars.get(&key) {
|
||||
result.push_str(val);
|
||||
} else {
|
||||
result.push('$');
|
||||
result.push_str(&key);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
result.push(c);
|
||||
}
|
||||
}
|
||||
result
|
||||
}
|
||||
|
||||
/// Resolve a required string argument with variable substitution.
|
||||
fn resolve_str(
|
||||
&self,
|
||||
args: &HashMap<String, serde_yaml::Value>,
|
||||
key: &str,
|
||||
) -> anyhow::Result<String> {
|
||||
let val = args
|
||||
.get(key)
|
||||
.with_context(|| format!("missing required argument: {key}"))?;
|
||||
match val {
|
||||
serde_yaml::Value::String(s) => Ok(self.substitute(s)),
|
||||
serde_yaml::Value::Number(n) => Ok(n.to_string()),
|
||||
serde_yaml::Value::Bool(b) => Ok(b.to_string()),
|
||||
other => Ok(format!("{other:?}")),
|
||||
}
|
||||
}
|
||||
|
||||
/// Resolve an optional string argument.
|
||||
fn opt_str(
|
||||
&self,
|
||||
args: &HashMap<String, serde_yaml::Value>,
|
||||
key: &str,
|
||||
) -> Option<String> {
|
||||
args.get(key).map(|v| match v {
|
||||
serde_yaml::Value::String(s) => self.substitute(s),
|
||||
serde_yaml::Value::Number(n) => n.to_string(),
|
||||
serde_yaml::Value::Bool(b) => b.to_string(),
|
||||
other => format!("{other:?}"),
|
||||
})
|
||||
}
|
||||
|
||||
/// Resolve an optional bool argument (defaults to false).
|
||||
fn opt_bool(
|
||||
&self,
|
||||
args: &HashMap<String, serde_yaml::Value>,
|
||||
key: &str,
|
||||
) -> bool {
|
||||
args.get(key)
|
||||
.and_then(|v| v.as_bool())
|
||||
.unwrap_or(false)
|
||||
}
|
||||
|
||||
/// Resolve a required usize argument.
|
||||
fn resolve_usize(
|
||||
&self,
|
||||
args: &HashMap<String, serde_yaml::Value>,
|
||||
key: &str,
|
||||
) -> anyhow::Result<usize> {
|
||||
let val = args
|
||||
.get(key)
|
||||
.with_context(|| format!("missing required argument: {key}"))?;
|
||||
val.as_u64()
|
||||
.map(|n| n as usize)
|
||||
.with_context(|| format!("argument '{key}' must be a positive integer"))
|
||||
}
|
||||
|
||||
/// Resolve a required u64 argument.
|
||||
fn resolve_u64(
|
||||
&self,
|
||||
args: &HashMap<String, serde_yaml::Value>,
|
||||
key: &str,
|
||||
) -> anyhow::Result<u64> {
|
||||
let val = args
|
||||
.get(key)
|
||||
.with_context(|| format!("missing required argument: {key}"))?;
|
||||
val.as_u64()
|
||||
.with_context(|| format!("argument '{key}' must be a positive integer"))
|
||||
}
|
||||
|
||||
/// Resolve an optional usize argument.
|
||||
fn opt_usize(
|
||||
&self,
|
||||
args: &HashMap<String, serde_yaml::Value>,
|
||||
key: &str,
|
||||
) -> Option<usize> {
|
||||
args.get(key).and_then(|v| v.as_u64()).map(|n| n as usize)
|
||||
}
|
||||
|
||||
/// Count total expanded steps (including loop iterations).
|
||||
fn expanded_step_count(&self) -> usize {
|
||||
self.playbook
|
||||
.steps
|
||||
.iter()
|
||||
.map(|s| {
|
||||
if let Some(ref ls) = s.loop_spec {
|
||||
if ls.to >= ls.from {
|
||||
ls.to - ls.from + 1
|
||||
} else {
|
||||
0
|
||||
}
|
||||
} else {
|
||||
1
|
||||
}
|
||||
})
|
||||
.sum()
|
||||
}
|
||||
}
|
||||
|
||||
// ── Tests ───────────────────────────────────────────────────────────────────
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn parse_minimal_playbook() {
|
||||
let yaml = r#"
|
||||
name: "test"
|
||||
steps:
|
||||
- command: whoami
|
||||
- command: list
|
||||
"#;
|
||||
let runner = PlaybookRunner::from_str(yaml).unwrap();
|
||||
assert_eq!(runner.playbook.name, "test");
|
||||
assert_eq!(runner.playbook.steps.len(), 2);
|
||||
assert_eq!(runner.playbook.steps[0].command, "whoami");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn parse_playbook_with_variables() {
|
||||
let yaml = r#"
|
||||
name: "var test"
|
||||
variables:
|
||||
user: alice
|
||||
server: "127.0.0.1:5001"
|
||||
steps:
|
||||
- command: dm
|
||||
args:
|
||||
username: "$user"
|
||||
"#;
|
||||
let runner = PlaybookRunner::from_str(yaml).unwrap();
|
||||
assert_eq!(runner.vars["user"], "alice");
|
||||
assert_eq!(runner.vars["server"], "127.0.0.1:5001");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn variable_substitution() {
|
||||
let mut vars = HashMap::new();
|
||||
vars.insert("name".to_string(), "alice".to_string());
|
||||
vars.insert("port".to_string(), "5001".to_string());
|
||||
let runner = PlaybookRunner {
|
||||
playbook: Playbook {
|
||||
name: "test".into(),
|
||||
description: None,
|
||||
variables: HashMap::new(),
|
||||
steps: vec![],
|
||||
},
|
||||
vars,
|
||||
};
|
||||
assert_eq!(runner.substitute("hello $name"), "hello alice");
|
||||
assert_eq!(runner.substitute("port=$port!"), "port=5001!");
|
||||
assert_eq!(runner.substitute("${name}@server"), "alice@server");
|
||||
assert_eq!(
|
||||
runner.substitute("${missing:-default}"),
|
||||
"default"
|
||||
);
|
||||
assert_eq!(runner.substitute("no vars here"), "no vars here");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn step_to_command_mapping() {
|
||||
let yaml = r#"
|
||||
name: "mapping test"
|
||||
variables:
|
||||
user: bob
|
||||
steps:
|
||||
- command: dm
|
||||
args:
|
||||
username: "$user"
|
||||
- command: send
|
||||
args:
|
||||
text: "hello"
|
||||
- command: history
|
||||
args:
|
||||
count: 10
|
||||
- command: wait
|
||||
args:
|
||||
duration_ms: 500
|
||||
"#;
|
||||
let runner = PlaybookRunner::from_str(yaml).unwrap();
|
||||
let cmd0 = runner.step_to_command(&runner.playbook.steps[0]).unwrap();
|
||||
assert!(matches!(cmd0, Command::Dm { username } if username == "bob"));
|
||||
|
||||
let cmd1 = runner.step_to_command(&runner.playbook.steps[1]).unwrap();
|
||||
assert!(matches!(cmd1, Command::SendMessage { text } if text == "hello"));
|
||||
|
||||
let cmd2 = runner.step_to_command(&runner.playbook.steps[2]).unwrap();
|
||||
assert!(matches!(cmd2, Command::History { count: 10 }));
|
||||
|
||||
let cmd3 = runner.step_to_command(&runner.playbook.steps[3]).unwrap();
|
||||
assert!(matches!(cmd3, Command::Wait { duration_ms: 500 }));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn parse_assert_step() {
|
||||
let yaml = r#"
|
||||
name: "assert test"
|
||||
steps:
|
||||
- command: assert
|
||||
condition: message_count
|
||||
op: gte
|
||||
value: 5
|
||||
"#;
|
||||
let runner = PlaybookRunner::from_str(yaml).unwrap();
|
||||
let cmd = runner.step_to_command(&runner.playbook.steps[0]).unwrap();
|
||||
match cmd {
|
||||
Command::Assert {
|
||||
condition: AssertCondition::MessageCount { op, count },
|
||||
} => {
|
||||
assert_eq!(op, CmpOp::Gte);
|
||||
assert_eq!(count, 5);
|
||||
}
|
||||
other => panic!("expected Assert MessageCount, got {other:?}"),
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn parse_loop_spec() {
|
||||
let yaml = r#"
|
||||
name: "loop test"
|
||||
steps:
|
||||
- command: send
|
||||
args:
|
||||
text: "msg $i"
|
||||
loop:
|
||||
var: i
|
||||
from: 1
|
||||
to: 5
|
||||
"#;
|
||||
let runner = PlaybookRunner::from_str(yaml).unwrap();
|
||||
assert_eq!(runner.expanded_step_count(), 5);
|
||||
let ls = runner.playbook.steps[0].loop_spec.as_ref().unwrap();
|
||||
assert_eq!(ls.var, "i");
|
||||
assert_eq!(ls.from, 1);
|
||||
assert_eq!(ls.to, 5);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn on_error_defaults_to_fail() {
|
||||
let yaml = r#"
|
||||
name: "error test"
|
||||
steps:
|
||||
- command: whoami
|
||||
- command: list
|
||||
on_error: continue
|
||||
- command: quit
|
||||
on_error: skip
|
||||
"#;
|
||||
let runner = PlaybookRunner::from_str(yaml).unwrap();
|
||||
assert_eq!(runner.playbook.steps[0].on_error, OnError::Fail);
|
||||
assert_eq!(runner.playbook.steps[1].on_error, OnError::Continue);
|
||||
assert_eq!(runner.playbook.steps[2].on_error, OnError::Skip);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn cmp_op_parsing() {
|
||||
let runner = PlaybookRunner::from_str("name: t\nsteps: []").unwrap();
|
||||
assert!(matches!(runner.parse_cmp_op("eq"), Ok(CmpOp::Eq)));
|
||||
assert!(matches!(runner.parse_cmp_op("=="), Ok(CmpOp::Eq)));
|
||||
assert!(matches!(runner.parse_cmp_op("gte"), Ok(CmpOp::Gte)));
|
||||
assert!(matches!(runner.parse_cmp_op(">="), Ok(CmpOp::Gte)));
|
||||
assert!(matches!(runner.parse_cmp_op("<"), Ok(CmpOp::Lt)));
|
||||
assert!(runner.parse_cmp_op("invalid").is_err());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn report_display() {
|
||||
let report = PlaybookReport {
|
||||
name: "test".into(),
|
||||
total_steps: 3,
|
||||
passed: 2,
|
||||
failed: 1,
|
||||
skipped: 0,
|
||||
duration: Duration::from_millis(150),
|
||||
step_results: vec![
|
||||
StepResult {
|
||||
step_index: 0,
|
||||
command: "whoami".into(),
|
||||
success: true,
|
||||
duration: Duration::from_millis(10),
|
||||
output: None,
|
||||
error: None,
|
||||
},
|
||||
StepResult {
|
||||
step_index: 1,
|
||||
command: "dm".into(),
|
||||
success: true,
|
||||
duration: Duration::from_millis(50),
|
||||
output: None,
|
||||
error: None,
|
||||
},
|
||||
StepResult {
|
||||
step_index: 2,
|
||||
command: "assert".into(),
|
||||
success: false,
|
||||
duration: Duration::from_millis(1),
|
||||
output: None,
|
||||
error: Some("message count 0 < 1".into()),
|
||||
},
|
||||
],
|
||||
};
|
||||
let s = format!("{report}");
|
||||
assert!(s.contains("2 passed, 1 failed"));
|
||||
assert!(s.contains("[3/3] assert ... FAIL"));
|
||||
}
|
||||
}
|
||||
3317
crates/quicproquo-client/src/client/repl.rs
Normal file
3317
crates/quicproquo-client/src/client/repl.rs
Normal file
File diff suppressed because it is too large
Load Diff
207
crates/quicproquo-client/src/client/retry.rs
Normal file
207
crates/quicproquo-client/src/client/retry.rs
Normal file
@@ -0,0 +1,207 @@
|
||||
//! Retry with exponential backoff for transient RPC failures.
|
||||
|
||||
use std::future::Future;
|
||||
use std::time::Duration;
|
||||
|
||||
use rand::Rng;
|
||||
use tracing::warn;
|
||||
|
||||
/// Default maximum number of retry attempts (including the first try).
|
||||
pub const DEFAULT_MAX_RETRIES: u32 = 3;
|
||||
/// Default base delay in milliseconds for exponential backoff.
|
||||
pub const DEFAULT_BASE_DELAY_MS: u64 = 500;
|
||||
|
||||
/// Runs an async operation with retries. On `Ok(t)` returns immediately.
|
||||
/// On `Err(e)`: if `is_retriable(&e)` and `attempt < max_retries`, sleeps with
|
||||
/// exponential backoff (plus jitter) then retries; otherwise returns the last error.
|
||||
pub async fn retry_async<F, Fut, T, E, P>(
|
||||
op: F,
|
||||
max_retries: u32,
|
||||
base_delay_ms: u64,
|
||||
is_retriable: P,
|
||||
) -> Result<T, E>
|
||||
where
|
||||
F: Fn() -> Fut,
|
||||
Fut: Future<Output = Result<T, E>>,
|
||||
P: Fn(&E) -> bool,
|
||||
{
|
||||
let mut last_err: Option<E> = None;
|
||||
for attempt in 0..max_retries {
|
||||
match op().await {
|
||||
Ok(t) => return Ok(t),
|
||||
Err(e) => {
|
||||
if !is_retriable(&e) || attempt + 1 >= max_retries {
|
||||
return Err(e);
|
||||
}
|
||||
let delay_ms = base_delay_ms * 2u64.saturating_pow(attempt);
|
||||
let jitter_ms = rand::thread_rng().gen_range(0..=delay_ms / 2);
|
||||
let total_ms = delay_ms + jitter_ms;
|
||||
warn!(
|
||||
attempt = attempt + 1,
|
||||
max_retries,
|
||||
delay_ms = total_ms,
|
||||
"RPC failed, retrying after backoff"
|
||||
);
|
||||
last_err = Some(e);
|
||||
tokio::time::sleep(Duration::from_millis(total_ms)).await;
|
||||
}
|
||||
}
|
||||
}
|
||||
match last_err {
|
||||
Some(e) => Err(e),
|
||||
None => unreachable!(
|
||||
"retry_async: last_err is always Some when loop exits after an Err"
|
||||
),
|
||||
}
|
||||
}
|
||||
|
||||
/// Classifies `anyhow::Error` for retry: returns `false` for auth or invalid-param
|
||||
/// errors (do not retry), `true` for transient errors (network, timeout, server 5xx).
|
||||
/// When in doubt, returns `true` (retry).
|
||||
pub fn anyhow_is_retriable(err: &anyhow::Error) -> bool {
|
||||
let s = format!("{:#}", err);
|
||||
let s_lower = s.to_lowercase();
|
||||
// Do not retry: auth / permission
|
||||
if s_lower.contains("unauthorized")
|
||||
|| s_lower.contains("auth failed")
|
||||
|| s_lower.contains("access denied")
|
||||
|| s_lower.contains("401")
|
||||
|| s_lower.contains("forbidden")
|
||||
|| s_lower.contains("403")
|
||||
|| s_lower.contains("token")
|
||||
{
|
||||
return false;
|
||||
}
|
||||
// Do not retry: bad request / invalid params
|
||||
if s_lower.contains("bad request")
|
||||
|| s_lower.contains("400")
|
||||
|| s_lower.contains("invalid param")
|
||||
|| s_lower.contains("fingerprint mismatch")
|
||||
{
|
||||
return false;
|
||||
}
|
||||
// Retry: network, timeout, connection, server error, or anything else
|
||||
true
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
#[allow(clippy::unwrap_used)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[tokio::test]
|
||||
async fn retry_success_first_attempt() {
|
||||
let result = retry_async(|| async { Ok::<_, String>(42) }, 3, 10, |_| true).await;
|
||||
assert_eq!(result.unwrap(), 42);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn retry_succeeds_after_one_failure() {
|
||||
let counter = std::sync::Arc::new(std::sync::atomic::AtomicU32::new(0));
|
||||
let c = counter.clone();
|
||||
let result = retry_async(
|
||||
|| {
|
||||
let c = c.clone();
|
||||
async move {
|
||||
let n = c.fetch_add(1, std::sync::atomic::Ordering::SeqCst);
|
||||
if n == 0 {
|
||||
Err("transient failure".to_string())
|
||||
} else {
|
||||
Ok(99)
|
||||
}
|
||||
}
|
||||
},
|
||||
3,
|
||||
1, // minimal delay for test speed
|
||||
|_| true,
|
||||
)
|
||||
.await;
|
||||
assert_eq!(result.unwrap(), 99);
|
||||
assert_eq!(counter.load(std::sync::atomic::Ordering::SeqCst), 2);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn retry_non_retriable_fails_immediately() {
|
||||
let counter = std::sync::Arc::new(std::sync::atomic::AtomicU32::new(0));
|
||||
let c = counter.clone();
|
||||
let result = retry_async(
|
||||
|| {
|
||||
let c = c.clone();
|
||||
async move {
|
||||
c.fetch_add(1, std::sync::atomic::Ordering::SeqCst);
|
||||
Err::<(), _>("permanent error")
|
||||
}
|
||||
},
|
||||
5,
|
||||
1,
|
||||
|_: &&str| false, // nothing is retriable
|
||||
)
|
||||
.await;
|
||||
assert!(result.is_err());
|
||||
assert_eq!(counter.load(std::sync::atomic::Ordering::SeqCst), 1);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn retry_exhausts_all_attempts() {
|
||||
let counter = std::sync::Arc::new(std::sync::atomic::AtomicU32::new(0));
|
||||
let c = counter.clone();
|
||||
let result = retry_async(
|
||||
|| {
|
||||
let c = c.clone();
|
||||
async move {
|
||||
c.fetch_add(1, std::sync::atomic::Ordering::SeqCst);
|
||||
Err::<(), _>("still failing")
|
||||
}
|
||||
},
|
||||
3,
|
||||
1,
|
||||
|_| true,
|
||||
)
|
||||
.await;
|
||||
assert!(result.is_err());
|
||||
assert_eq!(counter.load(std::sync::atomic::Ordering::SeqCst), 3);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn anyhow_is_retriable_classifications() {
|
||||
// Auth errors should NOT be retriable
|
||||
let auth_errors = [
|
||||
"unauthorized access",
|
||||
"HTTP 401 Unauthorized",
|
||||
"forbidden resource",
|
||||
"HTTP 403 Forbidden",
|
||||
"auth failed for user",
|
||||
"access denied",
|
||||
"invalid token",
|
||||
];
|
||||
for msg in &auth_errors {
|
||||
let err = anyhow::anyhow!("{msg}");
|
||||
assert!(!anyhow_is_retriable(&err), "expected non-retriable: {msg}");
|
||||
}
|
||||
|
||||
// Bad-request errors should NOT be retriable
|
||||
let bad_req_errors = [
|
||||
"bad request: missing field",
|
||||
"HTTP 400 Bad Request",
|
||||
"invalid param: username",
|
||||
"fingerprint mismatch",
|
||||
];
|
||||
for msg in &bad_req_errors {
|
||||
let err = anyhow::anyhow!("{msg}");
|
||||
assert!(!anyhow_is_retriable(&err), "expected non-retriable: {msg}");
|
||||
}
|
||||
|
||||
// Transient errors SHOULD be retriable
|
||||
let transient_errors = [
|
||||
"connection refused",
|
||||
"network timeout",
|
||||
"server error 500",
|
||||
"stream reset",
|
||||
"something unknown happened",
|
||||
];
|
||||
for msg in &transient_errors {
|
||||
let err = anyhow::anyhow!("{msg}");
|
||||
assert!(anyhow_is_retriable(&err), "expected retriable: {msg}");
|
||||
}
|
||||
}
|
||||
}
|
||||
978
crates/quicproquo-client/src/client/rpc.rs
Normal file
978
crates/quicproquo-client/src/client/rpc.rs
Normal file
@@ -0,0 +1,978 @@
|
||||
use std::net::SocketAddr;
|
||||
use std::path::Path;
|
||||
use std::sync::Arc;
|
||||
|
||||
use anyhow::Context;
|
||||
use quinn::{ClientConfig, Endpoint};
|
||||
use quinn_proto::crypto::rustls::QuicClientConfig;
|
||||
use rustls::pki_types::{CertificateDer, ServerName, UnixTime};
|
||||
use rustls::{ClientConfig as RustlsClientConfig, RootCertStore};
|
||||
use tokio_util::compat::{TokioAsyncReadCompatExt, TokioAsyncWriteCompatExt};
|
||||
use capnp_rpc::{rpc_twoparty_capnp::Side, twoparty, RpcSystem};
|
||||
|
||||
use quicproquo_core::HybridPublicKey;
|
||||
use quicproquo_proto::node_capnp::{auth, node_service};
|
||||
|
||||
use crate::{AUTH_CONTEXT, INSECURE_SKIP_VERIFY};
|
||||
|
||||
use super::retry::{anyhow_is_retriable, retry_async, DEFAULT_BASE_DELAY_MS, DEFAULT_MAX_RETRIES};
|
||||
|
||||
/// Cap'n Proto traversal limit (words). 4 Mi words = 32 MiB; bounds DoS from deeply nested or large messages.
|
||||
const CAPNP_TRAVERSAL_LIMIT_WORDS: usize = 4 * 1024 * 1024;
|
||||
|
||||
/// A [`rustls::client::danger::ServerCertVerifier`] that accepts any certificate.
|
||||
///
|
||||
/// **Development only.** Using this in production disables all TLS guarantees.
|
||||
#[derive(Debug)]
|
||||
struct InsecureServerCertVerifier;
|
||||
|
||||
impl rustls::client::danger::ServerCertVerifier for InsecureServerCertVerifier {
|
||||
fn verify_server_cert(
|
||||
&self,
|
||||
_end_entity: &CertificateDer<'_>,
|
||||
_intermediates: &[CertificateDer<'_>],
|
||||
_server_name: &ServerName<'_>,
|
||||
_ocsp_response: &[u8],
|
||||
_now: UnixTime,
|
||||
) -> Result<rustls::client::danger::ServerCertVerified, rustls::Error> {
|
||||
Ok(rustls::client::danger::ServerCertVerified::assertion())
|
||||
}
|
||||
|
||||
fn verify_tls12_signature(
|
||||
&self,
|
||||
_message: &[u8],
|
||||
_cert: &CertificateDer<'_>,
|
||||
_dss: &rustls::DigitallySignedStruct,
|
||||
) -> Result<rustls::client::danger::HandshakeSignatureValid, rustls::Error> {
|
||||
Ok(rustls::client::danger::HandshakeSignatureValid::assertion())
|
||||
}
|
||||
|
||||
fn verify_tls13_signature(
|
||||
&self,
|
||||
_message: &[u8],
|
||||
_cert: &CertificateDer<'_>,
|
||||
_dss: &rustls::DigitallySignedStruct,
|
||||
) -> Result<rustls::client::danger::HandshakeSignatureValid, rustls::Error> {
|
||||
Ok(rustls::client::danger::HandshakeSignatureValid::assertion())
|
||||
}
|
||||
|
||||
fn supported_verify_schemes(&self) -> Vec<rustls::SignatureScheme> {
|
||||
rustls::crypto::ring::default_provider()
|
||||
.signature_verification_algorithms
|
||||
.supported_schemes()
|
||||
}
|
||||
}
|
||||
|
||||
/// Establish a QUIC/TLS connection and return a `NodeService` client.
|
||||
///
|
||||
/// Must be called from within a `LocalSet` because capnp-rpc is `!Send`.
|
||||
///
|
||||
/// Reads [`INSECURE_SKIP_VERIFY`] to decide whether to bypass certificate
|
||||
/// verification (set once at startup via [`crate::set_insecure_skip_verify`]).
|
||||
pub async fn connect_node(
|
||||
server: &str,
|
||||
ca_cert: &Path,
|
||||
server_name: &str,
|
||||
) -> anyhow::Result<node_service::Client> {
|
||||
let skip = INSECURE_SKIP_VERIFY.load(std::sync::atomic::Ordering::Relaxed);
|
||||
connect_node_opt(server, ca_cert, server_name, skip).await
|
||||
}
|
||||
|
||||
/// Like [`connect_node`] but with an explicit `insecure_skip_verify` toggle.
|
||||
///
|
||||
/// When `insecure_skip_verify` is `true`, certificate verification is disabled entirely.
|
||||
/// This is intended for development and testing only.
|
||||
pub async fn connect_node_opt(
|
||||
server: &str,
|
||||
ca_cert: &Path,
|
||||
server_name: &str,
|
||||
insecure_skip_verify: bool,
|
||||
) -> anyhow::Result<node_service::Client> {
|
||||
let addr: SocketAddr = server
|
||||
.parse()
|
||||
.with_context(|| format!("server must be host:port, got {server}"))?;
|
||||
|
||||
let mut tls = if insecure_skip_verify {
|
||||
RustlsClientConfig::builder()
|
||||
.dangerous()
|
||||
.with_custom_certificate_verifier(Arc::new(InsecureServerCertVerifier))
|
||||
.with_no_client_auth()
|
||||
} else {
|
||||
let cert_bytes =
|
||||
std::fs::read(ca_cert).with_context(|| format!("read ca_cert {ca_cert:?}"))?;
|
||||
let mut roots = RootCertStore::empty();
|
||||
roots
|
||||
.add(CertificateDer::from(cert_bytes))
|
||||
.context("add root cert")?;
|
||||
RustlsClientConfig::builder()
|
||||
.with_root_certificates(roots)
|
||||
.with_no_client_auth()
|
||||
};
|
||||
tls.alpn_protocols = vec![b"capnp".to_vec()];
|
||||
|
||||
let crypto = QuicClientConfig::try_from(tls)
|
||||
.map_err(|e| anyhow::anyhow!("invalid client TLS config: {e}"))?;
|
||||
|
||||
let bind_addr: SocketAddr = "0.0.0.0:0".parse().context("parse client bind address")?;
|
||||
let mut endpoint = Endpoint::client(bind_addr)?;
|
||||
endpoint.set_default_client_config(ClientConfig::new(Arc::new(crypto)));
|
||||
|
||||
let connection = endpoint
|
||||
.connect(addr, server_name)
|
||||
.context("quic connect init")?
|
||||
.await
|
||||
.context("quic connect failed")?;
|
||||
|
||||
let (send, recv) = connection.open_bi().await.context("open bi stream")?;
|
||||
|
||||
let mut reader_opts = capnp::message::ReaderOptions::new();
|
||||
reader_opts.traversal_limit_in_words(Some(CAPNP_TRAVERSAL_LIMIT_WORDS));
|
||||
let network = twoparty::VatNetwork::new(
|
||||
recv.compat(),
|
||||
send.compat_write(),
|
||||
Side::Client,
|
||||
reader_opts,
|
||||
);
|
||||
|
||||
let mut rpc_system = RpcSystem::new(Box::new(network), None);
|
||||
let client: node_service::Client = rpc_system.bootstrap(Side::Server);
|
||||
|
||||
tokio::task::spawn_local(rpc_system);
|
||||
|
||||
Ok(client)
|
||||
}
|
||||
|
||||
pub fn set_auth(auth: &mut auth::Builder<'_>) -> anyhow::Result<()> {
|
||||
let guard = AUTH_CONTEXT
|
||||
.read()
|
||||
.map_err(|e| anyhow::anyhow!("AUTH_CONTEXT lock poisoned: {e}"))?;
|
||||
let ctx = guard.as_ref().ok_or_else(|| {
|
||||
anyhow::anyhow!(
|
||||
"init_auth must be called before RPCs (use a bearer or session token for authenticated commands)"
|
||||
)
|
||||
})?;
|
||||
auth.set_version(ctx.version);
|
||||
auth.set_access_token(&ctx.access_token);
|
||||
auth.set_device_id(&ctx.device_id);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Upload a KeyPackage and verify the fingerprint echoed by the AS.
|
||||
pub async fn upload_key_package(
|
||||
client: &node_service::Client,
|
||||
identity_key: &[u8],
|
||||
package: &[u8],
|
||||
) -> anyhow::Result<()> {
|
||||
let mut req = client.upload_key_package_request();
|
||||
{
|
||||
let mut p = req.get();
|
||||
p.set_identity_key(identity_key);
|
||||
p.set_package(package);
|
||||
let mut auth = p.reborrow().init_auth();
|
||||
set_auth(&mut auth)?;
|
||||
}
|
||||
|
||||
let resp = req
|
||||
.send()
|
||||
.promise
|
||||
.await
|
||||
.context("upload_key_package RPC failed")?;
|
||||
|
||||
let server_fp = resp
|
||||
.get()
|
||||
.context("upload_key_package: bad response")?
|
||||
.get_fingerprint()
|
||||
.context("upload_key_package: missing fingerprint")?
|
||||
.to_vec();
|
||||
|
||||
let local_fp = super::state::sha256(package);
|
||||
anyhow::ensure!(server_fp == local_fp, "fingerprint mismatch");
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Fetch a KeyPackage for `identity_key` from the AS.
|
||||
pub async fn fetch_key_package(
|
||||
client: &node_service::Client,
|
||||
identity_key: &[u8],
|
||||
) -> anyhow::Result<Vec<u8>> {
|
||||
let mut req = client.fetch_key_package_request();
|
||||
{
|
||||
let mut p = req.get();
|
||||
p.set_identity_key(identity_key);
|
||||
let mut auth = p.reborrow().init_auth();
|
||||
set_auth(&mut auth)?;
|
||||
}
|
||||
|
||||
let resp = req
|
||||
.send()
|
||||
.promise
|
||||
.await
|
||||
.context("fetch_key_package RPC failed")?;
|
||||
|
||||
let pkg = resp
|
||||
.get()
|
||||
.context("fetch_key_package: bad response")?
|
||||
.get_package()
|
||||
.context("fetch_key_package: missing package field")?
|
||||
.to_vec();
|
||||
|
||||
Ok(pkg)
|
||||
}
|
||||
|
||||
/// Enqueue an opaque payload to the DS for `recipient_key`.
|
||||
/// Returns the per-inbox sequence number assigned by the server.
|
||||
/// Retries on transient failures with exponential backoff.
|
||||
pub async fn enqueue(
|
||||
client: &node_service::Client,
|
||||
recipient_key: &[u8],
|
||||
payload: &[u8],
|
||||
) -> anyhow::Result<u64> {
|
||||
enqueue_with_ttl(client, recipient_key, payload, None).await
|
||||
}
|
||||
|
||||
/// Enqueue with an optional TTL (seconds). 0 or None means no expiry.
|
||||
pub async fn enqueue_with_ttl(
|
||||
client: &node_service::Client,
|
||||
recipient_key: &[u8],
|
||||
payload: &[u8],
|
||||
ttl_secs: Option<u32>,
|
||||
) -> anyhow::Result<u64> {
|
||||
let client = client.clone();
|
||||
let recipient_key = recipient_key.to_vec();
|
||||
let payload = payload.to_vec();
|
||||
retry_async(
|
||||
|| {
|
||||
let client = client.clone();
|
||||
let recipient_key = recipient_key.clone();
|
||||
let payload = payload.clone();
|
||||
async move {
|
||||
let mut req = client.enqueue_request();
|
||||
{
|
||||
let mut p = req.get();
|
||||
p.set_recipient_key(&recipient_key);
|
||||
p.set_payload(&payload);
|
||||
p.set_channel_id(&[]);
|
||||
p.set_version(1);
|
||||
if let Some(ttl) = ttl_secs {
|
||||
p.set_ttl_secs(ttl);
|
||||
}
|
||||
let mut auth = p.reborrow().init_auth();
|
||||
set_auth(&mut auth)?;
|
||||
}
|
||||
let resp = req.send().promise.await.context("enqueue RPC failed")?;
|
||||
let seq = resp.get().context("enqueue: bad response")?.get_seq();
|
||||
Ok(seq)
|
||||
}
|
||||
},
|
||||
DEFAULT_MAX_RETRIES,
|
||||
DEFAULT_BASE_DELAY_MS,
|
||||
anyhow_is_retriable,
|
||||
)
|
||||
.await
|
||||
}
|
||||
|
||||
/// Fetch and drain all payloads for `recipient_key`.
|
||||
/// Returns `(seq, payload)` pairs — sort by `seq` before MLS processing.
|
||||
/// Retries on transient failures with exponential backoff.
|
||||
pub async fn fetch_all(
|
||||
client: &node_service::Client,
|
||||
recipient_key: &[u8],
|
||||
) -> anyhow::Result<Vec<(u64, Vec<u8>)>> {
|
||||
let client = client.clone();
|
||||
let recipient_key = recipient_key.to_vec();
|
||||
retry_async(
|
||||
|| {
|
||||
let client = client.clone();
|
||||
let recipient_key = recipient_key.clone();
|
||||
async move {
|
||||
let mut req = client.fetch_request();
|
||||
{
|
||||
let mut p = req.get();
|
||||
p.set_recipient_key(&recipient_key);
|
||||
p.set_channel_id(&[]);
|
||||
p.set_version(1);
|
||||
p.set_limit(0); // fetch all
|
||||
let mut auth = p.reborrow().init_auth();
|
||||
set_auth(&mut auth)?;
|
||||
}
|
||||
|
||||
let resp = req.send().promise.await.context("fetch RPC failed")?;
|
||||
|
||||
let list = resp
|
||||
.get()
|
||||
.context("fetch: bad response")?
|
||||
.get_payloads()
|
||||
.context("fetch: missing payloads")?;
|
||||
|
||||
let mut payloads = Vec::with_capacity(list.len() as usize);
|
||||
for i in 0..list.len() {
|
||||
let entry = list.get(i);
|
||||
let seq = entry.get_seq();
|
||||
let data = entry
|
||||
.get_data()
|
||||
.context("fetch: envelope data read failed")?
|
||||
.to_vec();
|
||||
payloads.push((seq, data));
|
||||
}
|
||||
|
||||
Ok(payloads)
|
||||
}
|
||||
},
|
||||
DEFAULT_MAX_RETRIES,
|
||||
DEFAULT_BASE_DELAY_MS,
|
||||
anyhow_is_retriable,
|
||||
)
|
||||
.await
|
||||
}
|
||||
|
||||
/// Long-poll for payloads with optional timeout (ms).
|
||||
/// Returns `(seq, payload)` pairs — sort by `seq` before MLS processing.
|
||||
/// Retries on transient failures with exponential backoff.
|
||||
pub async fn fetch_wait(
|
||||
client: &node_service::Client,
|
||||
recipient_key: &[u8],
|
||||
timeout_ms: u64,
|
||||
) -> anyhow::Result<Vec<(u64, Vec<u8>)>> {
|
||||
let client = client.clone();
|
||||
let recipient_key = recipient_key.to_vec();
|
||||
retry_async(
|
||||
|| {
|
||||
let client = client.clone();
|
||||
let recipient_key = recipient_key.clone();
|
||||
async move {
|
||||
let mut req = client.fetch_wait_request();
|
||||
{
|
||||
let mut p = req.get();
|
||||
p.set_recipient_key(&recipient_key);
|
||||
p.set_timeout_ms(timeout_ms);
|
||||
p.set_channel_id(&[]);
|
||||
p.set_version(1);
|
||||
p.set_limit(0); // fetch all
|
||||
let mut auth = p.reborrow().init_auth();
|
||||
set_auth(&mut auth)?;
|
||||
}
|
||||
|
||||
let resp = req.send().promise.await.context("fetch_wait RPC failed")?;
|
||||
|
||||
let list = resp
|
||||
.get()
|
||||
.context("fetch_wait: bad response")?
|
||||
.get_payloads()
|
||||
.context("fetch_wait: missing payloads")?;
|
||||
|
||||
let mut payloads = Vec::with_capacity(list.len() as usize);
|
||||
for i in 0..list.len() {
|
||||
let entry = list.get(i);
|
||||
let seq = entry.get_seq();
|
||||
let data = entry
|
||||
.get_data()
|
||||
.context("fetch_wait: envelope data read failed")?
|
||||
.to_vec();
|
||||
payloads.push((seq, data));
|
||||
}
|
||||
|
||||
Ok(payloads)
|
||||
}
|
||||
},
|
||||
DEFAULT_MAX_RETRIES,
|
||||
DEFAULT_BASE_DELAY_MS,
|
||||
anyhow_is_retriable,
|
||||
)
|
||||
.await
|
||||
}
|
||||
|
||||
/// Upload a hybrid (X25519 + ML-KEM-768) public key for an identity.
|
||||
pub async fn upload_hybrid_key(
|
||||
client: &node_service::Client,
|
||||
identity_key: &[u8],
|
||||
hybrid_pk: &HybridPublicKey,
|
||||
) -> anyhow::Result<()> {
|
||||
let mut req = client.upload_hybrid_key_request();
|
||||
{
|
||||
let mut p = req.get();
|
||||
p.set_identity_key(identity_key);
|
||||
p.set_hybrid_public_key(&hybrid_pk.to_bytes());
|
||||
let mut auth = p.reborrow().init_auth();
|
||||
set_auth(&mut auth)?;
|
||||
}
|
||||
req.send()
|
||||
.promise
|
||||
.await
|
||||
.context("upload_hybrid_key RPC failed")?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Fetch a peer's hybrid public key from the server.
|
||||
///
|
||||
/// Returns `None` if the peer has not uploaded a hybrid key.
|
||||
pub async fn fetch_hybrid_key(
|
||||
client: &node_service::Client,
|
||||
identity_key: &[u8],
|
||||
) -> anyhow::Result<Option<HybridPublicKey>> {
|
||||
let mut req = client.fetch_hybrid_key_request();
|
||||
{
|
||||
let mut p = req.get();
|
||||
p.set_identity_key(identity_key);
|
||||
let mut auth = p.reborrow().init_auth();
|
||||
set_auth(&mut auth)?;
|
||||
}
|
||||
|
||||
let resp = req
|
||||
.send()
|
||||
.promise
|
||||
.await
|
||||
.context("fetch_hybrid_key RPC failed")?;
|
||||
|
||||
let pk_bytes = resp
|
||||
.get()
|
||||
.context("fetch_hybrid_key: bad response")?
|
||||
.get_hybrid_public_key()
|
||||
.context("fetch_hybrid_key: missing field")?
|
||||
.to_vec();
|
||||
|
||||
if pk_bytes.is_empty() {
|
||||
return Ok(None);
|
||||
}
|
||||
|
||||
let pk = HybridPublicKey::from_bytes(&pk_bytes).context("invalid hybrid public key")?;
|
||||
Ok(Some(pk))
|
||||
}
|
||||
|
||||
/// Decrypt a hybrid envelope. Requires a hybrid key; no fallback to plaintext MLS.
|
||||
pub fn try_hybrid_decrypt(
|
||||
hybrid_kp: Option<&quicproquo_core::HybridKeypair>,
|
||||
payload: &[u8],
|
||||
) -> anyhow::Result<Vec<u8>> {
|
||||
let kp = hybrid_kp.ok_or_else(|| anyhow::anyhow!("hybrid key required for decryption"))?;
|
||||
quicproquo_core::hybrid_decrypt(kp, payload, b"", b"").map_err(|e| anyhow::anyhow!("{e}"))
|
||||
}
|
||||
|
||||
/// Peek at queued payloads without removing them.
|
||||
/// Returns `(seq, payload)` pairs sorted by seq.
|
||||
/// Retries on transient failures with exponential backoff.
|
||||
pub async fn peek(
|
||||
client: &node_service::Client,
|
||||
recipient_key: &[u8],
|
||||
) -> anyhow::Result<Vec<(u64, Vec<u8>)>> {
|
||||
let client = client.clone();
|
||||
let recipient_key = recipient_key.to_vec();
|
||||
retry_async(
|
||||
|| {
|
||||
let client = client.clone();
|
||||
let recipient_key = recipient_key.clone();
|
||||
async move {
|
||||
let mut req = client.peek_request();
|
||||
{
|
||||
let mut p = req.get();
|
||||
p.set_recipient_key(&recipient_key);
|
||||
p.set_channel_id(&[]);
|
||||
p.set_version(1);
|
||||
p.set_limit(0); // peek all
|
||||
let mut auth = p.reborrow().init_auth();
|
||||
set_auth(&mut auth)?;
|
||||
}
|
||||
|
||||
let resp = req.send().promise.await.context("peek RPC failed")?;
|
||||
|
||||
let list = resp
|
||||
.get()
|
||||
.context("peek: bad response")?
|
||||
.get_payloads()
|
||||
.context("peek: missing payloads")?;
|
||||
|
||||
let mut payloads = Vec::with_capacity(list.len() as usize);
|
||||
for i in 0..list.len() {
|
||||
let entry = list.get(i);
|
||||
let seq = entry.get_seq();
|
||||
let data = entry
|
||||
.get_data()
|
||||
.context("peek: envelope data read failed")?
|
||||
.to_vec();
|
||||
payloads.push((seq, data));
|
||||
}
|
||||
|
||||
Ok(payloads)
|
||||
}
|
||||
},
|
||||
DEFAULT_MAX_RETRIES,
|
||||
DEFAULT_BASE_DELAY_MS,
|
||||
anyhow_is_retriable,
|
||||
)
|
||||
.await
|
||||
}
|
||||
|
||||
/// Acknowledge all messages up to and including `seq_up_to`.
|
||||
/// Retries on transient failures with exponential backoff.
|
||||
pub async fn ack(
|
||||
client: &node_service::Client,
|
||||
recipient_key: &[u8],
|
||||
seq_up_to: u64,
|
||||
) -> anyhow::Result<()> {
|
||||
let client = client.clone();
|
||||
let recipient_key = recipient_key.to_vec();
|
||||
retry_async(
|
||||
|| {
|
||||
let client = client.clone();
|
||||
let recipient_key = recipient_key.clone();
|
||||
async move {
|
||||
let mut req = client.ack_request();
|
||||
{
|
||||
let mut p = req.get();
|
||||
p.set_recipient_key(&recipient_key);
|
||||
p.set_channel_id(&[]);
|
||||
p.set_version(1);
|
||||
p.set_seq_up_to(seq_up_to);
|
||||
let mut auth = p.reborrow().init_auth();
|
||||
set_auth(&mut auth)?;
|
||||
}
|
||||
req.send().promise.await.context("ack RPC failed")?;
|
||||
Ok(())
|
||||
}
|
||||
},
|
||||
DEFAULT_MAX_RETRIES,
|
||||
DEFAULT_BASE_DELAY_MS,
|
||||
anyhow_is_retriable,
|
||||
)
|
||||
.await
|
||||
}
|
||||
|
||||
/// Fetch multiple peers' hybrid keys in a single round-trip.
|
||||
/// Returns `None` for peers who have not uploaded a hybrid key.
|
||||
/// Retries on transient failures with exponential backoff.
|
||||
pub async fn fetch_hybrid_keys(
|
||||
client: &node_service::Client,
|
||||
identity_keys: &[&[u8]],
|
||||
) -> anyhow::Result<Vec<Option<HybridPublicKey>>> {
|
||||
let client = client.clone();
|
||||
let identity_keys: Vec<Vec<u8>> = identity_keys.iter().map(|k| k.to_vec()).collect();
|
||||
retry_async(
|
||||
|| {
|
||||
let client = client.clone();
|
||||
let identity_keys = identity_keys.clone();
|
||||
async move {
|
||||
let mut req = client.fetch_hybrid_keys_request();
|
||||
{
|
||||
let mut p = req.get();
|
||||
let mut list = p.reborrow().init_identity_keys(identity_keys.len() as u32);
|
||||
for (i, ik) in identity_keys.iter().enumerate() {
|
||||
list.set(i as u32, ik);
|
||||
}
|
||||
let mut auth = p.reborrow().init_auth();
|
||||
set_auth(&mut auth)?;
|
||||
}
|
||||
|
||||
let resp = req
|
||||
.send()
|
||||
.promise
|
||||
.await
|
||||
.context("fetch_hybrid_keys RPC failed")?;
|
||||
|
||||
let keys = resp
|
||||
.get()
|
||||
.context("fetch_hybrid_keys: bad response")?
|
||||
.get_keys()
|
||||
.context("fetch_hybrid_keys: missing keys")?;
|
||||
|
||||
let mut result = Vec::with_capacity(keys.len() as usize);
|
||||
for i in 0..keys.len() {
|
||||
let pk_bytes = keys
|
||||
.get(i)
|
||||
.context("fetch_hybrid_keys: key read failed")?
|
||||
.to_vec();
|
||||
if pk_bytes.is_empty() {
|
||||
result.push(None);
|
||||
} else {
|
||||
let pk = HybridPublicKey::from_bytes(&pk_bytes)
|
||||
.context("invalid hybrid public key")?;
|
||||
result.push(Some(pk));
|
||||
}
|
||||
}
|
||||
|
||||
Ok(result)
|
||||
}
|
||||
},
|
||||
DEFAULT_MAX_RETRIES,
|
||||
DEFAULT_BASE_DELAY_MS,
|
||||
anyhow_is_retriable,
|
||||
)
|
||||
.await
|
||||
}
|
||||
|
||||
/// Enqueue the same payload to multiple recipients in a single round-trip.
|
||||
/// Returns per-recipient sequence numbers.
|
||||
/// Retries on transient failures with exponential backoff.
|
||||
pub async fn batch_enqueue(
|
||||
client: &node_service::Client,
|
||||
recipient_keys: &[&[u8]],
|
||||
payload: &[u8],
|
||||
) -> anyhow::Result<Vec<u64>> {
|
||||
let client = client.clone();
|
||||
let recipient_keys: Vec<Vec<u8>> = recipient_keys.iter().map(|k| k.to_vec()).collect();
|
||||
let payload = payload.to_vec();
|
||||
retry_async(
|
||||
|| {
|
||||
let client = client.clone();
|
||||
let recipient_keys = recipient_keys.clone();
|
||||
let payload = payload.clone();
|
||||
async move {
|
||||
let mut req = client.batch_enqueue_request();
|
||||
{
|
||||
let mut p = req.get();
|
||||
let mut list = p.reborrow().init_recipient_keys(recipient_keys.len() as u32);
|
||||
for (i, rk) in recipient_keys.iter().enumerate() {
|
||||
list.set(i as u32, rk);
|
||||
}
|
||||
p.set_payload(&payload);
|
||||
p.set_channel_id(&[]);
|
||||
p.set_version(1);
|
||||
let mut auth = p.reborrow().init_auth();
|
||||
set_auth(&mut auth)?;
|
||||
}
|
||||
|
||||
let resp = req
|
||||
.send()
|
||||
.promise
|
||||
.await
|
||||
.context("batch_enqueue RPC failed")?;
|
||||
|
||||
let seqs = resp
|
||||
.get()
|
||||
.context("batch_enqueue: bad response")?
|
||||
.get_seqs()
|
||||
.context("batch_enqueue: missing seqs")?;
|
||||
|
||||
let mut result = Vec::with_capacity(seqs.len() as usize);
|
||||
for i in 0..seqs.len() {
|
||||
result.push(seqs.get(i));
|
||||
}
|
||||
|
||||
Ok(result)
|
||||
}
|
||||
},
|
||||
DEFAULT_MAX_RETRIES,
|
||||
DEFAULT_BASE_DELAY_MS,
|
||||
anyhow_is_retriable,
|
||||
)
|
||||
.await
|
||||
}
|
||||
|
||||
/// Resolve a username to its Ed25519 identity key (32 bytes).
|
||||
///
|
||||
/// When the server returns a non-empty `inclusionProof`, the client verifies it
|
||||
/// against the identity key using the Key Transparency Merkle proof. Proof
|
||||
/// verification failure is treated as a hard error (the server is misbehaving).
|
||||
/// If the server sends no proof (empty field), the key is returned as-is —
|
||||
/// callers can decide whether to require proofs for security-critical flows.
|
||||
///
|
||||
/// Returns `None` if the username is not registered.
|
||||
pub async fn resolve_user(
|
||||
client: &node_service::Client,
|
||||
username: &str,
|
||||
) -> anyhow::Result<Option<Vec<u8>>> {
|
||||
let mut req = client.resolve_user_request();
|
||||
{
|
||||
let mut p = req.get();
|
||||
p.set_username(username);
|
||||
let mut auth = p.reborrow().init_auth();
|
||||
set_auth(&mut auth)?;
|
||||
}
|
||||
|
||||
let resp = req
|
||||
.send()
|
||||
.promise
|
||||
.await
|
||||
.context("resolve_user RPC failed")?;
|
||||
|
||||
let reader = resp.get().context("resolve_user: bad response")?;
|
||||
|
||||
let key = reader
|
||||
.get_identity_key()
|
||||
.context("resolve_user: missing identity_key field")?
|
||||
.to_vec();
|
||||
|
||||
if key.is_empty() {
|
||||
return Ok(None);
|
||||
}
|
||||
|
||||
// Verify the KT inclusion proof when the server sends one.
|
||||
let proof_bytes = reader
|
||||
.get_inclusion_proof()
|
||||
.context("resolve_user: missing inclusion_proof field")?
|
||||
.to_vec();
|
||||
|
||||
if !proof_bytes.is_empty() {
|
||||
let proof = quicproquo_kt::InclusionProof::from_bytes(&proof_bytes)
|
||||
.context("resolve_user: inclusion proof deserialise failed")?;
|
||||
quicproquo_kt::verify_inclusion(&proof, username, &key)
|
||||
.context("resolve_user: KT inclusion proof verification FAILED — possible key mislabelling")?;
|
||||
}
|
||||
|
||||
Ok(Some(key))
|
||||
}
|
||||
|
||||
/// Reverse lookup: resolve an identity key to the registered username.
|
||||
/// Returns `None` if no username is associated with the key.
|
||||
pub async fn resolve_identity(
|
||||
client: &node_service::Client,
|
||||
identity_key: &[u8],
|
||||
) -> anyhow::Result<Option<String>> {
|
||||
let mut req = client.resolve_identity_request();
|
||||
{
|
||||
let mut p = req.get();
|
||||
p.set_identity_key(identity_key);
|
||||
let mut auth = p.reborrow().init_auth();
|
||||
set_auth(&mut auth)?;
|
||||
}
|
||||
|
||||
let resp = req
|
||||
.send()
|
||||
.promise
|
||||
.await
|
||||
.context("resolve_identity RPC failed")?;
|
||||
|
||||
let username = resp
|
||||
.get()
|
||||
.context("resolve_identity: bad response")?
|
||||
.get_username()
|
||||
.context("resolve_identity: missing field")?
|
||||
.to_str()
|
||||
.unwrap_or("")
|
||||
.to_string();
|
||||
|
||||
if username.is_empty() {
|
||||
Ok(None)
|
||||
} else {
|
||||
Ok(Some(username))
|
||||
}
|
||||
}
|
||||
|
||||
/// Create a 1:1 DM channel with a peer.
|
||||
///
|
||||
/// Returns `(channel_id, was_new)` where `channel_id` is the stable 16-byte identifier and
|
||||
/// `was_new` is `true` iff this call created the channel for the first time. When `was_new` is
|
||||
/// `false`, the channel already existed (created by the peer), and the caller should wait for
|
||||
/// the peer's MLS Welcome to arrive via the background poller rather than creating a new MLS group.
|
||||
pub async fn create_channel(
|
||||
client: &node_service::Client,
|
||||
peer_key: &[u8],
|
||||
) -> anyhow::Result<(Vec<u8>, bool)> {
|
||||
let mut req = client.create_channel_request();
|
||||
{
|
||||
let mut p = req.get();
|
||||
p.set_peer_key(peer_key);
|
||||
let mut auth = p.reborrow().init_auth();
|
||||
set_auth(&mut auth)?;
|
||||
}
|
||||
|
||||
let resp = req
|
||||
.send()
|
||||
.promise
|
||||
.await
|
||||
.context("create_channel RPC failed")?;
|
||||
|
||||
let reader = resp.get().context("create_channel: bad response")?;
|
||||
let channel_id = reader
|
||||
.get_channel_id()
|
||||
.context("create_channel: missing channel_id")?
|
||||
.to_vec();
|
||||
let was_new = reader.get_was_new();
|
||||
|
||||
Ok((channel_id, was_new))
|
||||
}
|
||||
|
||||
/// Upload a single chunk of a blob to the server.
|
||||
///
|
||||
/// `blob_hash` is the expected SHA-256 hash (32 bytes) of the complete blob.
|
||||
/// Returns the `blob_id` once the server has received and verified the final chunk.
|
||||
pub async fn upload_blob_chunk(
|
||||
client: &node_service::Client,
|
||||
blob_hash: &[u8],
|
||||
chunk: &[u8],
|
||||
offset: u64,
|
||||
total_size: u64,
|
||||
mime_type: &str,
|
||||
) -> anyhow::Result<Vec<u8>> {
|
||||
let mut req = client.upload_blob_request();
|
||||
{
|
||||
let mut p = req.get();
|
||||
let mut auth = p.reborrow().init_auth();
|
||||
set_auth(&mut auth)?;
|
||||
p.set_blob_hash(blob_hash);
|
||||
p.set_chunk(chunk);
|
||||
p.set_offset(offset);
|
||||
p.set_total_size(total_size);
|
||||
p.set_mime_type(mime_type);
|
||||
}
|
||||
let resp = req.send().promise.await.context("upload_blob RPC failed")?;
|
||||
let blob_id = resp
|
||||
.get()
|
||||
.context("upload_blob: bad response")?
|
||||
.get_blob_id()
|
||||
.context("upload_blob: missing blob_id")?
|
||||
.to_vec();
|
||||
Ok(blob_id)
|
||||
}
|
||||
|
||||
/// Download a single chunk of a blob from the server.
|
||||
///
|
||||
/// Returns `(chunk_bytes, total_size, mime_type)`.
|
||||
pub async fn download_blob_chunk(
|
||||
client: &node_service::Client,
|
||||
blob_id: &[u8],
|
||||
offset: u64,
|
||||
length: u32,
|
||||
) -> anyhow::Result<(Vec<u8>, u64, String)> {
|
||||
let mut req = client.download_blob_request();
|
||||
{
|
||||
let mut p = req.get();
|
||||
let mut auth = p.reborrow().init_auth();
|
||||
set_auth(&mut auth)?;
|
||||
p.set_blob_id(blob_id);
|
||||
p.set_offset(offset);
|
||||
p.set_length(length);
|
||||
}
|
||||
let resp = req.send().promise.await.context("download_blob RPC failed")?;
|
||||
let reader = resp.get().context("download_blob: bad response")?;
|
||||
let chunk = reader.get_chunk().context("download_blob: missing chunk")?.to_vec();
|
||||
let total_size = reader.get_total_size();
|
||||
let mime_type = reader
|
||||
.get_mime_type()
|
||||
.context("download_blob: missing mime_type")?
|
||||
.to_str()
|
||||
.unwrap_or("application/octet-stream")
|
||||
.to_string();
|
||||
Ok((chunk, total_size, mime_type))
|
||||
}
|
||||
|
||||
/// Delete the authenticated user's account on the server.
|
||||
/// Requires an identity-bound session (OPAQUE login).
|
||||
pub async fn delete_account(
|
||||
client: &node_service::Client,
|
||||
) -> anyhow::Result<bool> {
|
||||
let mut req = client.delete_account_request();
|
||||
{
|
||||
let mut p = req.get();
|
||||
let mut auth = p.reborrow().init_auth();
|
||||
set_auth(&mut auth)?;
|
||||
}
|
||||
|
||||
let resp = req
|
||||
.send()
|
||||
.promise
|
||||
.await
|
||||
.context("delete_account RPC failed")?;
|
||||
|
||||
let success = resp
|
||||
.get()
|
||||
.context("delete_account: bad response")?
|
||||
.get_success();
|
||||
|
||||
Ok(success)
|
||||
}
|
||||
|
||||
/// Register a device for the authenticated identity.
|
||||
pub async fn register_device(
|
||||
client: &node_service::Client,
|
||||
device_id: &[u8],
|
||||
device_name: &str,
|
||||
) -> anyhow::Result<bool> {
|
||||
let mut req = client.register_device_request();
|
||||
{
|
||||
let mut p = req.get();
|
||||
p.set_device_id(device_id);
|
||||
p.set_device_name(device_name);
|
||||
let mut auth = p.reborrow().init_auth();
|
||||
set_auth(&mut auth)?;
|
||||
}
|
||||
|
||||
let resp = req
|
||||
.send()
|
||||
.promise
|
||||
.await
|
||||
.context("register_device RPC failed")?;
|
||||
|
||||
let success = resp
|
||||
.get()
|
||||
.context("register_device: bad response")?
|
||||
.get_success();
|
||||
|
||||
Ok(success)
|
||||
}
|
||||
|
||||
/// List all registered devices for the authenticated identity.
|
||||
pub async fn list_devices(
|
||||
client: &node_service::Client,
|
||||
) -> anyhow::Result<Vec<(Vec<u8>, String, u64)>> {
|
||||
let mut req = client.list_devices_request();
|
||||
{
|
||||
let mut p = req.get();
|
||||
let mut auth = p.reborrow().init_auth();
|
||||
set_auth(&mut auth)?;
|
||||
}
|
||||
|
||||
let resp = req
|
||||
.send()
|
||||
.promise
|
||||
.await
|
||||
.context("list_devices RPC failed")?;
|
||||
|
||||
let devices = resp
|
||||
.get()
|
||||
.context("list_devices: bad response")?
|
||||
.get_devices()
|
||||
.context("list_devices: missing devices field")?;
|
||||
|
||||
let mut result = Vec::with_capacity(devices.len() as usize);
|
||||
for i in 0..devices.len() {
|
||||
let entry = devices.get(i);
|
||||
let device_id = entry
|
||||
.get_device_id()
|
||||
.context("list_devices: missing device_id")?
|
||||
.to_vec();
|
||||
let device_name = entry
|
||||
.get_device_name()
|
||||
.context("list_devices: missing device_name")?
|
||||
.to_str()
|
||||
.unwrap_or("")
|
||||
.to_string();
|
||||
let registered_at = entry.get_registered_at();
|
||||
result.push((device_id, device_name, registered_at));
|
||||
}
|
||||
|
||||
Ok(result)
|
||||
}
|
||||
|
||||
/// Revoke (remove) a registered device.
|
||||
pub async fn revoke_device(
|
||||
client: &node_service::Client,
|
||||
device_id: &[u8],
|
||||
) -> anyhow::Result<bool> {
|
||||
let mut req = client.revoke_device_request();
|
||||
{
|
||||
let mut p = req.get();
|
||||
p.set_device_id(device_id);
|
||||
let mut auth = p.reborrow().init_auth();
|
||||
set_auth(&mut auth)?;
|
||||
}
|
||||
|
||||
let resp = req
|
||||
.send()
|
||||
.promise
|
||||
.await
|
||||
.context("revoke_device RPC failed")?;
|
||||
|
||||
let success = resp
|
||||
.get()
|
||||
.context("revoke_device: bad response")?
|
||||
.get_success();
|
||||
|
||||
Ok(success)
|
||||
}
|
||||
|
||||
/// Return the current Unix timestamp in milliseconds.
|
||||
pub fn current_timestamp_ms() -> u64 {
|
||||
std::time::SystemTime::now()
|
||||
.duration_since(std::time::UNIX_EPOCH)
|
||||
.unwrap_or_default()
|
||||
.as_millis() as u64
|
||||
}
|
||||
291
crates/quicproquo-client/src/client/session.rs
Normal file
291
crates/quicproquo-client/src/client/session.rs
Normal file
@@ -0,0 +1,291 @@
|
||||
//! Runtime session state for the interactive REPL.
|
||||
//!
|
||||
//! Wraps the legacy `StoredState` (identity + hybrid key) and adds
|
||||
//! multi-conversation management via `ConversationStore`.
|
||||
|
||||
use std::collections::HashMap;
|
||||
use std::path::{Path, PathBuf};
|
||||
use std::sync::Arc;
|
||||
use std::time::Instant;
|
||||
|
||||
use anyhow::Context;
|
||||
use zeroize::Zeroizing;
|
||||
|
||||
use quicproquo_core::{DiskKeyStore, GroupMember, HybridKeypair, IdentityKeypair};
|
||||
|
||||
use super::conversation::{
|
||||
now_ms, Conversation, ConversationId, ConversationKind, ConversationStore,
|
||||
};
|
||||
use super::state::{load_or_init_state, keystore_path};
|
||||
|
||||
/// Runtime state for an interactive REPL session.
|
||||
pub struct SessionState {
|
||||
/// Long-term identity keypair.
|
||||
pub identity: Arc<IdentityKeypair>,
|
||||
/// Post-quantum hybrid keypair.
|
||||
pub hybrid_kp: Option<HybridKeypair>,
|
||||
/// Path to the legacy state file (for backward compat with one-shot commands).
|
||||
pub state_path: PathBuf,
|
||||
/// Optional password for the legacy state file. Zeroized on drop. (M9)
|
||||
pub password: Option<Zeroizing<String>>,
|
||||
/// SQLite-backed conversation + message store.
|
||||
pub conv_store: ConversationStore,
|
||||
/// Currently active conversation.
|
||||
pub active_conversation: Option<ConversationId>,
|
||||
/// In-memory GroupMember instances keyed by conversation ID.
|
||||
pub members: HashMap<ConversationId, GroupMember>,
|
||||
/// Holds the GroupMember whose KeyPackage was uploaded to the server.
|
||||
/// Its keystore contains the HPKE init private key needed to decrypt
|
||||
/// incoming Welcome messages. Consumed on auto-join, then replenished.
|
||||
pub pending_member: Option<GroupMember>,
|
||||
/// Whether to display typing indicators from others (session preference).
|
||||
pub typing_notify_enabled: bool,
|
||||
/// Tracks who is currently typing and when the indicator was last received.
|
||||
/// Entries older than 10 seconds are considered expired.
|
||||
pub typing_indicators: HashMap<String, Instant>,
|
||||
/// Per-conversation disappearing message TTL in seconds. None = messages persist.
|
||||
pub disappear_ttl: HashMap<ConversationId, u32>,
|
||||
/// When true, /members and /group-info redact identity keys as `[redacted-XXXX]`.
|
||||
pub redact_keys: bool,
|
||||
/// When Some(secs), auto-clear local messages older than this duration.
|
||||
pub auto_clear_secs: Option<u32>,
|
||||
/// When true, send periodic dummy messages for traffic analysis resistance.
|
||||
pub padding_enabled: bool,
|
||||
/// Last epoch at which we sent a message (for /verify-fs).
|
||||
pub last_send_epoch: Option<u64>,
|
||||
}
|
||||
|
||||
impl SessionState {
|
||||
/// Load identity from the legacy state file, open the conversation store,
|
||||
/// and migrate any existing single-group state into the conversation DB.
|
||||
pub fn load(
|
||||
state_path: &Path,
|
||||
password: Option<&str>,
|
||||
) -> anyhow::Result<Self> {
|
||||
let state = load_or_init_state(state_path, password)?;
|
||||
|
||||
let identity = Arc::new(IdentityKeypair::from_seed(state.identity_seed));
|
||||
let hybrid_kp = state
|
||||
.hybrid_key
|
||||
.as_ref()
|
||||
.map(HybridKeypair::from_bytes)
|
||||
.transpose()
|
||||
.context("decode hybrid key")?;
|
||||
|
||||
// Open the conversation DB next to the state file.
|
||||
// When a state password is provided, encrypt the DB with SQLCipher.
|
||||
let db_path = state_path.with_extension("convdb");
|
||||
let conv_store = ConversationStore::open(&db_path, password)?;
|
||||
|
||||
let mut session = Self {
|
||||
identity,
|
||||
hybrid_kp,
|
||||
state_path: state_path.to_path_buf(),
|
||||
password: password.map(|p| Zeroizing::new(String::from(p))),
|
||||
conv_store,
|
||||
active_conversation: None,
|
||||
members: HashMap::new(),
|
||||
pending_member: None,
|
||||
typing_notify_enabled: true,
|
||||
typing_indicators: HashMap::new(),
|
||||
disappear_ttl: HashMap::new(),
|
||||
redact_keys: false,
|
||||
auto_clear_secs: None,
|
||||
padding_enabled: false,
|
||||
last_send_epoch: None,
|
||||
};
|
||||
|
||||
// Migrate legacy single-group into conversations if present and not yet migrated.
|
||||
if state.group.is_some() {
|
||||
session.migrate_legacy_group(state_path, &state.group)?;
|
||||
}
|
||||
|
||||
// Load all existing conversations' GroupMembers into memory.
|
||||
session.load_all_members()?;
|
||||
|
||||
Ok(session)
|
||||
}
|
||||
|
||||
/// Migrate the legacy single-group from StoredState into the conversation DB.
|
||||
fn migrate_legacy_group(
|
||||
&mut self,
|
||||
state_path: &Path,
|
||||
group_blob: &Option<Vec<u8>>,
|
||||
) -> anyhow::Result<()> {
|
||||
let blob = match group_blob {
|
||||
Some(b) => b,
|
||||
None => return Ok(()),
|
||||
};
|
||||
|
||||
// Reconstruct GroupMember using the legacy keystore and group blob.
|
||||
let ks_path = keystore_path(state_path);
|
||||
let ks = DiskKeyStore::persistent(&ks_path)?;
|
||||
let group = bincode::deserialize(blob).context("decode legacy group")?;
|
||||
let member = GroupMember::new_with_state(
|
||||
Arc::clone(&self.identity),
|
||||
ks,
|
||||
Some(group),
|
||||
false, // legacy groups are classical
|
||||
);
|
||||
|
||||
let group_id_bytes = member.group_id().unwrap_or_default();
|
||||
|
||||
// Use the first 16 bytes of the group_id as the ConversationId.
|
||||
let conv_id = if group_id_bytes.len() >= 16 {
|
||||
ConversationId::from_slice(&group_id_bytes[..16])
|
||||
.unwrap_or(ConversationId([0; 16]))
|
||||
} else {
|
||||
ConversationId::from_group_name(&hex::encode(&group_id_bytes))
|
||||
};
|
||||
|
||||
// Check if already migrated.
|
||||
if self.conv_store.load_conversation(&conv_id)?.is_some() {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
let member_keys = member.member_identities();
|
||||
let short_id = &hex::encode(&group_id_bytes)[..8.min(group_id_bytes.len() * 2)];
|
||||
let conv = Conversation {
|
||||
id: conv_id.clone(),
|
||||
kind: ConversationKind::Group {
|
||||
name: format!("legacy-{short_id}"),
|
||||
},
|
||||
display_name: format!("legacy-{short_id}"),
|
||||
mls_group_blob: Some(blob.clone()),
|
||||
keystore_blob: None,
|
||||
member_keys,
|
||||
unread_count: 0,
|
||||
last_activity_ms: now_ms(),
|
||||
created_at_ms: now_ms(),
|
||||
is_hybrid: false,
|
||||
last_seen_seq: 0,
|
||||
};
|
||||
|
||||
self.conv_store.save_conversation(&conv)?;
|
||||
self.members.insert(conv_id, member);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Load all conversations from the DB and create in-memory GroupMember instances.
|
||||
fn load_all_members(&mut self) -> anyhow::Result<()> {
|
||||
let convs = self.conv_store.list_conversations()?;
|
||||
for conv in convs {
|
||||
if self.members.contains_key(&conv.id) {
|
||||
continue;
|
||||
}
|
||||
let member = self.create_member_from_conv(&conv)?;
|
||||
self.members.insert(conv.id.clone(), member);
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Create a GroupMember from a stored conversation.
|
||||
fn create_member_from_conv(&self, conv: &Conversation) -> anyhow::Result<GroupMember> {
|
||||
let ks_path = self.keystore_path_for(&conv.id);
|
||||
let ks = DiskKeyStore::persistent(&ks_path)
|
||||
.unwrap_or_else(|e| {
|
||||
tracing::warn!(path = %ks_path.display(), error = %e, "DiskKeyStore open failed, falling back to ephemeral");
|
||||
DiskKeyStore::ephemeral()
|
||||
});
|
||||
|
||||
let group = conv
|
||||
.mls_group_blob
|
||||
.as_ref()
|
||||
.map(|b| bincode::deserialize(b))
|
||||
.transpose()
|
||||
.context("decode MLS group from conversation db")?;
|
||||
|
||||
Ok(GroupMember::new_with_state(
|
||||
Arc::clone(&self.identity),
|
||||
ks,
|
||||
group,
|
||||
conv.is_hybrid,
|
||||
))
|
||||
}
|
||||
|
||||
/// Path for a per-conversation keystore file.
|
||||
fn keystore_path_for(&self, conv_id: &ConversationId) -> PathBuf {
|
||||
let dir = self.state_path.with_extension("keystores");
|
||||
dir.join(format!("{}.ks", conv_id.hex()))
|
||||
}
|
||||
|
||||
/// Persist a conversation's MLS group state back to the DB.
|
||||
pub fn save_member(&self, conv_id: &ConversationId) -> anyhow::Result<()> {
|
||||
let member = self.members.get(conv_id).context("no such conversation")?;
|
||||
let blob = member
|
||||
.group_ref()
|
||||
.map(bincode::serialize)
|
||||
.transpose()
|
||||
.context("serialize MLS group")?;
|
||||
|
||||
let member_keys = member.member_identities();
|
||||
|
||||
// Update the mls_group_blob and member_keys in the DB.
|
||||
if let Some(mut conv) = self.conv_store.load_conversation(conv_id)? {
|
||||
conv.mls_group_blob = blob;
|
||||
conv.member_keys = member_keys;
|
||||
self.conv_store.save_conversation(&conv)?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Persist all in-memory group states back to the DB.
|
||||
pub fn save_all(&self) -> anyhow::Result<()> {
|
||||
for conv_id in self.members.keys() {
|
||||
if let Err(e) = self.save_member(conv_id) {
|
||||
tracing::warn!(conv = %conv_id.hex(), error = %e, "failed to save conversation");
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Add a new conversation and its GroupMember to the session.
|
||||
pub fn add_conversation(
|
||||
&mut self,
|
||||
conv: Conversation,
|
||||
member: GroupMember,
|
||||
) -> anyhow::Result<()> {
|
||||
// Ensure keystore directory exists
|
||||
let ks_path = self.keystore_path_for(&conv.id);
|
||||
if let Some(parent) = ks_path.parent() {
|
||||
std::fs::create_dir_all(parent).ok();
|
||||
}
|
||||
|
||||
self.conv_store.save_conversation(&conv)?;
|
||||
self.members.insert(conv.id.clone(), member);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Get a mutable reference to a conversation's GroupMember.
|
||||
pub fn get_member_mut(&mut self, conv_id: &ConversationId) -> Option<&mut GroupMember> {
|
||||
self.members.get_mut(conv_id)
|
||||
}
|
||||
|
||||
/// Public key bytes for this identity.
|
||||
pub fn identity_bytes(&self) -> Vec<u8> {
|
||||
self.identity.public_key_bytes().to_vec()
|
||||
}
|
||||
|
||||
/// Short hex prefix of the identity key for display.
|
||||
pub fn identity_short(&self) -> String {
|
||||
hex::encode(&self.identity.public_key_bytes()[..4])
|
||||
}
|
||||
|
||||
/// Get display name of a conversation.
|
||||
pub fn active_display_name(&self) -> Option<String> {
|
||||
let id = self.active_conversation.as_ref()?;
|
||||
self.conv_store.load_conversation(id).ok().flatten().map(|c| c.display_name)
|
||||
}
|
||||
|
||||
/// Count total unread across all conversations.
|
||||
pub fn total_unread(&self) -> u32 {
|
||||
self.conv_store
|
||||
.list_conversations()
|
||||
.unwrap_or_default()
|
||||
.iter()
|
||||
.map(|c| c.unread_count)
|
||||
.sum()
|
||||
}
|
||||
}
|
||||
293
crates/quicproquo-client/src/client/state.rs
Normal file
293
crates/quicproquo-client/src/client/state.rs
Normal file
@@ -0,0 +1,293 @@
|
||||
use std::path::{Path, PathBuf};
|
||||
use std::sync::Arc;
|
||||
|
||||
use anyhow::Context;
|
||||
use argon2::{Algorithm, Argon2, Params, Version};
|
||||
use chacha20poly1305::{
|
||||
aead::{Aead, KeyInit},
|
||||
ChaCha20Poly1305, Key, Nonce,
|
||||
};
|
||||
use rand::RngCore;
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use quicproquo_core::{DiskKeyStore, GroupMember, HybridKeypair, HybridKeypairBytes, IdentityKeypair};
|
||||
|
||||
/// Magic bytes for encrypted client state files.
|
||||
const STATE_MAGIC: &[u8; 4] = b"QPCE";
|
||||
const STATE_SALT_LEN: usize = 16;
|
||||
const STATE_NONCE_LEN: usize = 12;
|
||||
|
||||
#[derive(Serialize, Deserialize)]
|
||||
pub struct StoredState {
|
||||
pub identity_seed: [u8; 32],
|
||||
pub group: Option<Vec<u8>>,
|
||||
/// Post-quantum hybrid keypair (X25519 + ML-KEM-768). `None` for state created before hybrid was added.
|
||||
#[serde(default)]
|
||||
pub hybrid_key: Option<HybridKeypairBytes>,
|
||||
/// Cached member public keys for group participants.
|
||||
#[serde(default)]
|
||||
pub member_keys: Vec<Vec<u8>>,
|
||||
}
|
||||
|
||||
impl StoredState {
|
||||
pub fn into_parts(self, state_path: &Path) -> anyhow::Result<(GroupMember, Option<HybridKeypair>)> {
|
||||
let identity = Arc::new(IdentityKeypair::from_seed(self.identity_seed));
|
||||
let group = self
|
||||
.group
|
||||
.map(|bytes| bincode::deserialize(&bytes).context("decode group"))
|
||||
.transpose()?;
|
||||
let key_store = DiskKeyStore::persistent(keystore_path(state_path))?;
|
||||
let hybrid = self.hybrid_key.is_some();
|
||||
let member = GroupMember::new_with_state(identity, key_store, group, hybrid);
|
||||
|
||||
let hybrid_kp = self
|
||||
.hybrid_key
|
||||
.map(|bytes| HybridKeypair::from_bytes(&bytes).context("decode hybrid key"))
|
||||
.transpose()?;
|
||||
|
||||
Ok((member, hybrid_kp))
|
||||
}
|
||||
|
||||
pub fn from_parts(member: &GroupMember, hybrid_kp: Option<&HybridKeypair>) -> anyhow::Result<Self> {
|
||||
let group = member
|
||||
.group_ref()
|
||||
.map(|g| bincode::serialize(g).context("serialize group"))
|
||||
.transpose()?;
|
||||
|
||||
Ok(Self {
|
||||
identity_seed: *member.identity_seed(),
|
||||
group,
|
||||
hybrid_key: hybrid_kp.map(|kp| kp.to_bytes()),
|
||||
member_keys: Vec::new(),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
/// Argon2id parameters for client state key derivation (auditable; matches argon2 crate defaults).
|
||||
/// - Memory: 19 MiB (m_cost = 19*1024 KiB)
|
||||
/// - Time: 2 iterations
|
||||
/// - Parallelism: 1 lane
|
||||
const ARGON2_STATE_M_COST: u32 = 19 * 1024;
|
||||
const ARGON2_STATE_T_COST: u32 = 2;
|
||||
const ARGON2_STATE_P_COST: u32 = 1;
|
||||
|
||||
/// Derive a 32-byte key from a password and salt using Argon2id with explicit parameters.
|
||||
fn derive_state_key(password: &str, salt: &[u8]) -> anyhow::Result<[u8; 32]> {
|
||||
let params = Params::new(ARGON2_STATE_M_COST, ARGON2_STATE_T_COST, ARGON2_STATE_P_COST, Some(32))
|
||||
.map_err(|e| anyhow::anyhow!("argon2 params: {e}"))?;
|
||||
let argon2 = Argon2::new(Algorithm::Argon2id, Version::default(), params);
|
||||
let mut key = [0u8; 32];
|
||||
argon2
|
||||
.hash_password_into(password.as_bytes(), salt, &mut key)
|
||||
.map_err(|e| anyhow::anyhow!("argon2 key derivation failed: {e}"))?;
|
||||
Ok(key)
|
||||
}
|
||||
|
||||
/// Encrypt `plaintext` with the QPCE format: magic(4) | salt(16) | nonce(12) | ciphertext.
|
||||
pub fn encrypt_state(password: &str, plaintext: &[u8]) -> anyhow::Result<Vec<u8>> {
|
||||
let mut salt = [0u8; STATE_SALT_LEN];
|
||||
rand::rngs::OsRng.fill_bytes(&mut salt);
|
||||
|
||||
let mut nonce_bytes = [0u8; STATE_NONCE_LEN];
|
||||
rand::rngs::OsRng.fill_bytes(&mut nonce_bytes);
|
||||
|
||||
let key = zeroize::Zeroizing::new(derive_state_key(password, &salt)?);
|
||||
let cipher = ChaCha20Poly1305::new(Key::from_slice(&*key));
|
||||
let nonce = Nonce::from_slice(&nonce_bytes);
|
||||
|
||||
let ciphertext = cipher
|
||||
.encrypt(nonce, plaintext)
|
||||
.map_err(|e| anyhow::anyhow!("state encryption failed: {e}"))?;
|
||||
|
||||
let mut out = Vec::with_capacity(4 + STATE_SALT_LEN + STATE_NONCE_LEN + ciphertext.len());
|
||||
out.extend_from_slice(STATE_MAGIC);
|
||||
out.extend_from_slice(&salt);
|
||||
out.extend_from_slice(&nonce_bytes);
|
||||
out.extend_from_slice(&ciphertext);
|
||||
Ok(out)
|
||||
}
|
||||
|
||||
/// Decrypt a QPCE-formatted state file.
|
||||
pub fn decrypt_state(password: &str, data: &[u8]) -> anyhow::Result<Vec<u8>> {
|
||||
let header_len = 4 + STATE_SALT_LEN + STATE_NONCE_LEN;
|
||||
anyhow::ensure!(
|
||||
data.len() > header_len,
|
||||
"encrypted state file too short ({} bytes)",
|
||||
data.len()
|
||||
);
|
||||
|
||||
let salt = &data[4..4 + STATE_SALT_LEN];
|
||||
let nonce_bytes = &data[4 + STATE_SALT_LEN..header_len];
|
||||
let ciphertext = &data[header_len..];
|
||||
|
||||
let key = zeroize::Zeroizing::new(derive_state_key(password, salt)?);
|
||||
let cipher = ChaCha20Poly1305::new(Key::from_slice(&*key));
|
||||
let nonce = Nonce::from_slice(nonce_bytes);
|
||||
|
||||
let plaintext = cipher
|
||||
.decrypt(nonce, ciphertext)
|
||||
.map_err(|_| anyhow::anyhow!("state decryption failed (wrong password?)"))?;
|
||||
|
||||
Ok(plaintext)
|
||||
}
|
||||
|
||||
/// Returns true if raw bytes begin with the QPCE magic header.
|
||||
pub fn is_encrypted_state(bytes: &[u8]) -> bool {
|
||||
bytes.len() >= 4 && &bytes[..4] == STATE_MAGIC
|
||||
}
|
||||
|
||||
pub fn load_or_init_state(path: &Path, password: Option<&str>) -> anyhow::Result<StoredState> {
|
||||
if path.exists() {
|
||||
let mut state = load_existing_state(path, password)?;
|
||||
// Generate hybrid keypair if missing (upgrade from older state).
|
||||
if state.hybrid_key.is_none() {
|
||||
state.hybrid_key = Some(HybridKeypair::generate().to_bytes());
|
||||
write_state(path, &state, password)?;
|
||||
}
|
||||
return Ok(state);
|
||||
}
|
||||
|
||||
let identity = IdentityKeypair::generate();
|
||||
let hybrid_kp = HybridKeypair::generate();
|
||||
let key_store = DiskKeyStore::persistent(keystore_path(path))?;
|
||||
let member = GroupMember::new_with_state(Arc::new(identity), key_store, None, false);
|
||||
let state = StoredState::from_parts(&member, Some(&hybrid_kp))?;
|
||||
write_state(path, &state, password)?;
|
||||
Ok(state)
|
||||
}
|
||||
|
||||
pub fn load_existing_state(path: &Path, password: Option<&str>) -> anyhow::Result<StoredState> {
|
||||
let bytes = std::fs::read(path).with_context(|| format!("read state file {path:?}"))?;
|
||||
|
||||
if is_encrypted_state(&bytes) {
|
||||
let pw = password
|
||||
.context("state file is encrypted (QPCE); a password is required to decrypt it")?;
|
||||
let plaintext = decrypt_state(pw, &bytes)?;
|
||||
bincode::deserialize(&plaintext).context("decode encrypted state")
|
||||
} else {
|
||||
bincode::deserialize(&bytes).context("decode state")
|
||||
}
|
||||
}
|
||||
|
||||
pub fn save_state(
|
||||
path: &Path,
|
||||
member: &GroupMember,
|
||||
hybrid_kp: Option<&HybridKeypair>,
|
||||
password: Option<&str>,
|
||||
) -> anyhow::Result<()> {
|
||||
let state = StoredState::from_parts(member, hybrid_kp)?;
|
||||
write_state(path, &state, password)
|
||||
}
|
||||
|
||||
pub fn write_state(path: &Path, state: &StoredState, password: Option<&str>) -> anyhow::Result<()> {
|
||||
if let Some(parent) = path.parent() {
|
||||
std::fs::create_dir_all(parent).with_context(|| format!("create dir {parent:?}"))?;
|
||||
}
|
||||
let plaintext = bincode::serialize(state).context("encode state")?;
|
||||
|
||||
let bytes = if let Some(pw) = password {
|
||||
encrypt_state(pw, &plaintext)?
|
||||
} else {
|
||||
plaintext
|
||||
};
|
||||
|
||||
let tmp = path.with_extension("tmp");
|
||||
std::fs::write(&tmp, bytes).with_context(|| format!("write state temp {tmp:?}"))?;
|
||||
std::fs::rename(&tmp, path).with_context(|| format!("rename state {tmp:?} -> {path:?}"))?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn decode_identity_key(hex_str: &str) -> anyhow::Result<Vec<u8>> {
|
||||
let bytes = super::hex::decode(hex_str)
|
||||
.map_err(|e| anyhow::anyhow!(e))
|
||||
.context("identity key must be hex")?;
|
||||
anyhow::ensure!(bytes.len() == 32, "identity key must be 32 bytes");
|
||||
Ok(bytes)
|
||||
}
|
||||
|
||||
pub fn keystore_path(state_path: &Path) -> PathBuf {
|
||||
let mut path = state_path.to_path_buf();
|
||||
path.set_extension("ks");
|
||||
path
|
||||
}
|
||||
|
||||
pub fn sha256(bytes: &[u8]) -> Vec<u8> {
|
||||
use sha2::{Digest, Sha256};
|
||||
Sha256::digest(bytes).to_vec()
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
#[allow(clippy::unwrap_used)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn encrypt_decrypt_roundtrip() {
|
||||
let plaintext = b"test state data";
|
||||
let password = "test-password";
|
||||
let encrypted = encrypt_state(password, plaintext).unwrap();
|
||||
assert!(is_encrypted_state(&encrypted));
|
||||
let decrypted = decrypt_state(password, &encrypted).unwrap();
|
||||
assert_eq!(decrypted, plaintext);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn wrong_password_fails() {
|
||||
let plaintext = b"test state data";
|
||||
let encrypted = encrypt_state("correct", plaintext).unwrap();
|
||||
assert!(decrypt_state("wrong", &encrypted).is_err());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn state_encrypt_decrypt_round_trip() {
|
||||
let state = StoredState {
|
||||
identity_seed: [42u8; 32],
|
||||
hybrid_key: None,
|
||||
group: None,
|
||||
member_keys: Vec::new(),
|
||||
};
|
||||
let password = "test-password";
|
||||
let plaintext = bincode::serialize(&state).unwrap();
|
||||
let encrypted = encrypt_state(password, &plaintext).unwrap();
|
||||
let decrypted = decrypt_state(password, &encrypted).unwrap();
|
||||
let recovered: StoredState = bincode::deserialize(&decrypted).unwrap();
|
||||
assert_eq!(recovered.identity_seed, state.identity_seed);
|
||||
assert!(recovered.hybrid_key.is_none());
|
||||
assert!(recovered.group.is_none());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn state_encrypt_decrypt_with_hybrid_key() {
|
||||
use zeroize::Zeroizing;
|
||||
let state = StoredState {
|
||||
identity_seed: [7u8; 32],
|
||||
hybrid_key: Some(HybridKeypairBytes {
|
||||
x25519_sk: Zeroizing::new([1u8; 32]),
|
||||
mlkem_dk: Zeroizing::new(vec![3u8; 2400]),
|
||||
mlkem_ek: vec![4u8; 1184],
|
||||
}),
|
||||
group: None,
|
||||
member_keys: Vec::new(),
|
||||
};
|
||||
let password = "another-password";
|
||||
let plaintext = bincode::serialize(&state).unwrap();
|
||||
let encrypted = encrypt_state(password, &plaintext).unwrap();
|
||||
let decrypted = decrypt_state(password, &encrypted).unwrap();
|
||||
let recovered: StoredState = bincode::deserialize(&decrypted).unwrap();
|
||||
assert_eq!(recovered.identity_seed, state.identity_seed);
|
||||
assert!(recovered.hybrid_key.is_some());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn state_wrong_password_fails() {
|
||||
let state = StoredState {
|
||||
identity_seed: [99u8; 32],
|
||||
hybrid_key: None,
|
||||
group: None,
|
||||
member_keys: Vec::new(),
|
||||
};
|
||||
let plaintext = bincode::serialize(&state).unwrap();
|
||||
let encrypted = encrypt_state("correct", &plaintext).unwrap();
|
||||
assert!(decrypt_state("wrong", &encrypted).is_err());
|
||||
}
|
||||
}
|
||||
179
crates/quicproquo-client/src/client/token_cache.rs
Normal file
179
crates/quicproquo-client/src/client/token_cache.rs
Normal file
@@ -0,0 +1,179 @@
|
||||
//! Cached session token stored next to the state file.
|
||||
//!
|
||||
//! File format (no password): two lines — username and hex-encoded session token.
|
||||
//! File format (with password): QPCE-encrypted version of the above.
|
||||
//! The token has a server-side 24h TTL; no client-side expiry tracking.
|
||||
|
||||
use std::path::{Path, PathBuf};
|
||||
|
||||
use anyhow::Context;
|
||||
|
||||
use super::state::{decrypt_state, encrypt_state, is_encrypted_state};
|
||||
|
||||
pub struct CachedSession {
|
||||
pub username: String,
|
||||
pub token_hex: String,
|
||||
}
|
||||
|
||||
/// Derive the session cache path: `{state_path}.session`.
|
||||
fn session_cache_path(state_path: &Path) -> PathBuf {
|
||||
state_path.with_extension("session")
|
||||
}
|
||||
|
||||
/// Parse the two-line format (username + token_hex) from plaintext bytes.
|
||||
fn parse_session_lines(text: &str) -> Option<CachedSession> {
|
||||
let mut lines = text.lines();
|
||||
let username = lines.next()?.trim().to_string();
|
||||
let token_hex = lines.next()?.trim().to_string();
|
||||
if username.is_empty() || token_hex.is_empty() {
|
||||
return None;
|
||||
}
|
||||
if hex::decode(&token_hex).is_err() {
|
||||
return None;
|
||||
}
|
||||
Some(CachedSession { username, token_hex })
|
||||
}
|
||||
|
||||
/// Load a cached session token. Returns None if file is missing or malformed.
|
||||
/// Decrypts if the file is QPCE-encrypted (requires `password`).
|
||||
pub fn load_cached_session(state_path: &Path, password: Option<&str>) -> Option<CachedSession> {
|
||||
let path = session_cache_path(state_path);
|
||||
let raw = std::fs::read(&path).ok()?;
|
||||
|
||||
if is_encrypted_state(&raw) {
|
||||
let pw = password?;
|
||||
let plaintext = decrypt_state(pw, &raw).ok()?;
|
||||
let text = String::from_utf8(plaintext).ok()?;
|
||||
parse_session_lines(&text)
|
||||
} else {
|
||||
let text = String::from_utf8(raw).ok()?;
|
||||
parse_session_lines(&text)
|
||||
}
|
||||
}
|
||||
|
||||
/// Save a session token to the cache file (mode 0o600 on Unix).
|
||||
/// Encrypts with QPCE if `password` is provided.
|
||||
pub fn save_cached_session(
|
||||
state_path: &Path,
|
||||
username: &str,
|
||||
token_hex: &str,
|
||||
password: Option<&str>,
|
||||
) -> anyhow::Result<()> {
|
||||
let path = session_cache_path(state_path);
|
||||
let contents = format!("{username}\n{token_hex}\n");
|
||||
|
||||
let bytes = match password {
|
||||
Some(pw) => encrypt_state(pw, contents.as_bytes())?,
|
||||
None => {
|
||||
#[cfg(not(unix))]
|
||||
tracing::warn!(
|
||||
"storing session token as plaintext (no password set); \
|
||||
file permissions cannot be restricted on this platform"
|
||||
);
|
||||
contents.into_bytes()
|
||||
}
|
||||
};
|
||||
|
||||
std::fs::write(&path, bytes).with_context(|| format!("write session cache {path:?}"))?;
|
||||
|
||||
#[cfg(unix)]
|
||||
{
|
||||
use std::os::unix::fs::PermissionsExt;
|
||||
let perms = std::fs::Permissions::from_mode(0o600);
|
||||
std::fs::set_permissions(&path, perms).ok();
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Remove the cached session file.
|
||||
pub fn clear_cached_session(state_path: &Path) {
|
||||
let path = session_cache_path(state_path);
|
||||
std::fs::remove_file(&path).ok();
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
#[allow(clippy::unwrap_used)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn plaintext_round_trip() {
|
||||
let dir = tempfile::tempdir().unwrap();
|
||||
let state_path = dir.path().join("state.bin");
|
||||
|
||||
let token = hex::encode(b"session-token-bytes");
|
||||
save_cached_session(&state_path, "alice", &token, None).unwrap();
|
||||
let loaded = load_cached_session(&state_path, None).unwrap();
|
||||
assert_eq!(loaded.username, "alice");
|
||||
assert_eq!(loaded.token_hex, token);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn encrypted_round_trip() {
|
||||
let dir = tempfile::tempdir().unwrap();
|
||||
let state_path = dir.path().join("state.bin");
|
||||
let password = "strong-password";
|
||||
|
||||
let token = hex::encode(b"encrypted-token");
|
||||
save_cached_session(&state_path, "bob", &token, Some(password)).unwrap();
|
||||
|
||||
// Encrypted file should start with QPCE magic
|
||||
let raw = std::fs::read(session_cache_path(&state_path)).unwrap();
|
||||
assert_eq!(&raw[..4], b"QPCE");
|
||||
|
||||
let loaded = load_cached_session(&state_path, Some(password)).unwrap();
|
||||
assert_eq!(loaded.username, "bob");
|
||||
assert_eq!(loaded.token_hex, token);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn wrong_password_returns_none() {
|
||||
let dir = tempfile::tempdir().unwrap();
|
||||
let state_path = dir.path().join("state.bin");
|
||||
|
||||
let token = hex::encode(b"secret-token");
|
||||
save_cached_session(&state_path, "carol", &token, Some("correct")).unwrap();
|
||||
let result = load_cached_session(&state_path, Some("wrong"));
|
||||
assert!(result.is_none());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn missing_file_returns_none() {
|
||||
let dir = tempfile::tempdir().unwrap();
|
||||
let state_path = dir.path().join("nonexistent.bin");
|
||||
assert!(load_cached_session(&state_path, None).is_none());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn clear_removes_file() {
|
||||
let dir = tempfile::tempdir().unwrap();
|
||||
let state_path = dir.path().join("state.bin");
|
||||
|
||||
let token = hex::encode(b"to-be-deleted");
|
||||
save_cached_session(&state_path, "dave", &token, None).unwrap();
|
||||
assert!(session_cache_path(&state_path).exists());
|
||||
|
||||
clear_cached_session(&state_path);
|
||||
assert!(!session_cache_path(&state_path).exists());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn malformed_content_returns_none() {
|
||||
let dir = tempfile::tempdir().unwrap();
|
||||
let state_path = dir.path().join("state.bin");
|
||||
let cache_path = session_cache_path(&state_path);
|
||||
|
||||
// Not valid hex on second line
|
||||
std::fs::write(&cache_path, "alice\nnot-hex-data\n").unwrap();
|
||||
assert!(load_cached_session(&state_path, None).is_none());
|
||||
|
||||
// Only one line
|
||||
std::fs::write(&cache_path, "alice\n").unwrap();
|
||||
assert!(load_cached_session(&state_path, None).is_none());
|
||||
|
||||
// Empty file
|
||||
std::fs::write(&cache_path, "").unwrap();
|
||||
assert!(load_cached_session(&state_path, None).is_none());
|
||||
}
|
||||
}
|
||||
807
crates/quicproquo-client/src/client/tui/mod.rs
Normal file
807
crates/quicproquo-client/src/client/tui/mod.rs
Normal file
@@ -0,0 +1,807 @@
|
||||
//! Full-screen Ratatui TUI for quicproquo.
|
||||
//!
|
||||
//! Layout:
|
||||
//! ┌──────────────┬──────────────────────────────────────────┐
|
||||
//! │ Channels │ Messages │
|
||||
//! │ (20%) │ (80%) │
|
||||
//! │ │ │
|
||||
//! │ ├──────────────────────────────────────────┤
|
||||
//! │ │ Input bar │
|
||||
//! └──────────────┴──────────────────────────────────────────┘
|
||||
//!
|
||||
//! Keyboard:
|
||||
//! Enter — send message
|
||||
//! Up / Down — scroll message history
|
||||
//! Tab — next channel
|
||||
//! Shift+Tab — prev channel
|
||||
//! Ctrl+C / q — quit
|
||||
|
||||
use std::path::Path;
|
||||
use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
|
||||
use anyhow::Context;
|
||||
use crossterm::{
|
||||
event::{self, DisableMouseCapture, EnableMouseCapture, Event, KeyCode, KeyModifiers},
|
||||
execute,
|
||||
terminal::{disable_raw_mode, enable_raw_mode, EnterAlternateScreen, LeaveAlternateScreen},
|
||||
};
|
||||
use ratatui::{
|
||||
backend::CrosstermBackend,
|
||||
layout::{Constraint, Direction, Layout, Rect},
|
||||
style::{Color, Modifier, Style},
|
||||
text::{Line, Span},
|
||||
widgets::{Block, Borders, List, ListItem, ListState, Paragraph, Wrap},
|
||||
Frame, Terminal,
|
||||
};
|
||||
use tokio::sync::mpsc;
|
||||
use tokio::time::interval;
|
||||
|
||||
use crate::{ClientAuth, init_auth};
|
||||
use super::commands::{opaque_login, opaque_register};
|
||||
use super::conversation::{now_ms, ConversationId, StoredMessage};
|
||||
use super::rpc::{
|
||||
connect_node, enqueue, fetch_hybrid_key, fetch_wait, try_hybrid_decrypt, upload_hybrid_key,
|
||||
upload_key_package,
|
||||
};
|
||||
use super::session::SessionState;
|
||||
use super::state::load_or_init_state;
|
||||
use super::token_cache::{load_cached_session, save_cached_session};
|
||||
|
||||
use quicproquo_core::{
|
||||
AppMessage, DiskKeyStore, GroupMember, IdentityKeypair, ReceivedMessage,
|
||||
hybrid_encrypt, parse as parse_app_msg, serialize_chat,
|
||||
};
|
||||
use quicproquo_proto::node_capnp::node_service;
|
||||
|
||||
// ── App events ───────────────────────────────────────────────────────────────
|
||||
|
||||
/// Events sent from background tasks into the main TUI loop.
|
||||
enum TuiEvent {
|
||||
/// A key event from the terminal.
|
||||
Key(event::KeyEvent),
|
||||
/// New messages received from the server (conv_id, sender_short, body).
|
||||
NewMessages(Vec<(ConversationId, String, String)>),
|
||||
/// Tick — redraw periodically even if nothing happened.
|
||||
Tick,
|
||||
}
|
||||
|
||||
// ── Display message ───────────────────────────────────────────────────────────
|
||||
|
||||
#[derive(Clone)]
|
||||
struct DisplayMessage {
|
||||
sender: String,
|
||||
body: String,
|
||||
timestamp_ms: u64,
|
||||
is_outgoing: bool,
|
||||
}
|
||||
|
||||
// ── App state ─────────────────────────────────────────────────────────────────
|
||||
|
||||
struct App {
|
||||
/// Channel (conversation) names shown in the sidebar.
|
||||
channel_names: Vec<String>,
|
||||
/// Conversation IDs, parallel to `channel_names`.
|
||||
channel_ids: Vec<ConversationId>,
|
||||
/// Index of the selected channel in the sidebar.
|
||||
selected_channel: usize,
|
||||
/// Messages for the currently active channel.
|
||||
messages: Vec<DisplayMessage>,
|
||||
/// Current input buffer.
|
||||
input: String,
|
||||
/// Scroll offset (0 = bottom).
|
||||
scroll_offset: usize,
|
||||
/// Whether the user has requested quit.
|
||||
should_quit: bool,
|
||||
/// Short identity string for display.
|
||||
identity_short: String,
|
||||
}
|
||||
|
||||
impl App {
|
||||
fn new(session: &SessionState) -> anyhow::Result<Self> {
|
||||
let convs = session.conv_store.list_conversations()?;
|
||||
let channel_names: Vec<String> = convs.iter().map(|c| c.display_name.clone()).collect();
|
||||
let channel_ids: Vec<ConversationId> = convs.iter().map(|c| c.id.clone()).collect();
|
||||
|
||||
Ok(Self {
|
||||
channel_names,
|
||||
channel_ids,
|
||||
selected_channel: 0,
|
||||
messages: Vec::new(),
|
||||
input: String::new(),
|
||||
scroll_offset: 0,
|
||||
should_quit: false,
|
||||
identity_short: session.identity_short(),
|
||||
})
|
||||
}
|
||||
|
||||
fn active_conv_id(&self) -> Option<&ConversationId> {
|
||||
self.channel_ids.get(self.selected_channel)
|
||||
}
|
||||
|
||||
/// Reload messages for the currently selected channel from the session store.
|
||||
fn reload_messages(&mut self, session: &SessionState) -> anyhow::Result<()> {
|
||||
let conv_id = match self.active_conv_id() {
|
||||
Some(id) => id.clone(),
|
||||
None => {
|
||||
self.messages.clear();
|
||||
return Ok(());
|
||||
}
|
||||
};
|
||||
let stored = session.conv_store.load_recent_messages(&conv_id, 200)?;
|
||||
self.messages = stored
|
||||
.into_iter()
|
||||
.map(|m| {
|
||||
let sender = if m.is_outgoing {
|
||||
format!("me({})", &self.identity_short)
|
||||
} else if let Some(name) = &m.sender_name {
|
||||
name.clone()
|
||||
} else {
|
||||
// Shorten sender key to 8 hex chars.
|
||||
let hex_short = hex::encode(&m.sender_key[..m.sender_key.len().min(4)]);
|
||||
format!("{hex_short}")
|
||||
};
|
||||
DisplayMessage {
|
||||
sender,
|
||||
body: m.body,
|
||||
timestamp_ms: m.timestamp_ms,
|
||||
is_outgoing: m.is_outgoing,
|
||||
}
|
||||
})
|
||||
.collect();
|
||||
// Reset scroll to bottom on channel switch.
|
||||
self.scroll_offset = 0;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn select_next_channel(&mut self, session: &SessionState) {
|
||||
if self.channel_names.is_empty() {
|
||||
return;
|
||||
}
|
||||
self.selected_channel = (self.selected_channel + 1) % self.channel_names.len();
|
||||
let _ = self.reload_messages(session);
|
||||
}
|
||||
|
||||
fn select_prev_channel(&mut self, session: &SessionState) {
|
||||
if self.channel_names.is_empty() {
|
||||
return;
|
||||
}
|
||||
if self.selected_channel == 0 {
|
||||
self.selected_channel = self.channel_names.len() - 1;
|
||||
} else {
|
||||
self.selected_channel -= 1;
|
||||
}
|
||||
let _ = self.reload_messages(session);
|
||||
}
|
||||
|
||||
fn scroll_up(&mut self) {
|
||||
self.scroll_offset = self.scroll_offset.saturating_add(1);
|
||||
}
|
||||
|
||||
fn scroll_down(&mut self) {
|
||||
self.scroll_offset = self.scroll_offset.saturating_sub(1);
|
||||
}
|
||||
|
||||
/// Append newly received messages to the in-memory list (no DB reload needed
|
||||
/// since we already have them from the poll task, but we also save them via
|
||||
/// the session so they appear on reload).
|
||||
fn append_messages(&mut self, msgs: Vec<(ConversationId, String, String)>) {
|
||||
let active = self.active_conv_id().cloned();
|
||||
for (conv_id, sender, body) in msgs {
|
||||
if active.as_ref() == Some(&conv_id) {
|
||||
self.messages.push(DisplayMessage {
|
||||
sender,
|
||||
body,
|
||||
timestamp_ms: now_ms(),
|
||||
is_outgoing: false,
|
||||
});
|
||||
// Snap to bottom if user wasn't scrolled.
|
||||
if self.scroll_offset == 0 {
|
||||
// Already at bottom — nothing to do.
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// ── Drawing ───────────────────────────────────────────────────────────────────
|
||||
|
||||
fn ui(frame: &mut Frame, app: &App) {
|
||||
let size = frame.area();
|
||||
|
||||
// Top-level split: sidebar | main area.
|
||||
let h_chunks = Layout::default()
|
||||
.direction(Direction::Horizontal)
|
||||
.constraints([Constraint::Percentage(20), Constraint::Percentage(80)])
|
||||
.split(size);
|
||||
|
||||
// Main area split: messages | input bar.
|
||||
let v_chunks = Layout::default()
|
||||
.direction(Direction::Vertical)
|
||||
.constraints([Constraint::Min(3), Constraint::Length(3)])
|
||||
.split(h_chunks[1]);
|
||||
|
||||
draw_sidebar(frame, app, h_chunks[0]);
|
||||
draw_messages(frame, app, v_chunks[0]);
|
||||
draw_input(frame, app, v_chunks[1]);
|
||||
}
|
||||
|
||||
fn draw_sidebar(frame: &mut Frame, app: &App, area: Rect) {
|
||||
let items: Vec<ListItem> = app
|
||||
.channel_names
|
||||
.iter()
|
||||
.enumerate()
|
||||
.map(|(i, name)| {
|
||||
let style = if i == app.selected_channel {
|
||||
Style::default()
|
||||
.fg(Color::Cyan)
|
||||
.add_modifier(Modifier::BOLD | Modifier::REVERSED)
|
||||
} else {
|
||||
Style::default().fg(Color::Cyan)
|
||||
};
|
||||
ListItem::new(Line::from(Span::styled(name.clone(), style)))
|
||||
})
|
||||
.collect();
|
||||
|
||||
let block = Block::default()
|
||||
.title(" Channels ")
|
||||
.borders(Borders::ALL)
|
||||
.style(Style::default().fg(Color::DarkGray));
|
||||
|
||||
let mut list_state = ListState::default();
|
||||
if !app.channel_names.is_empty() {
|
||||
list_state.select(Some(app.selected_channel));
|
||||
}
|
||||
|
||||
frame.render_stateful_widget(
|
||||
List::new(items).block(block),
|
||||
area,
|
||||
&mut list_state,
|
||||
);
|
||||
}
|
||||
|
||||
fn draw_messages(frame: &mut Frame, app: &App, area: Rect) {
|
||||
let channel_title = app
|
||||
.channel_names
|
||||
.get(app.selected_channel)
|
||||
.map(|n| format!(" {n} "))
|
||||
.unwrap_or_else(|| " Messages ".to_string());
|
||||
|
||||
let block = Block::default()
|
||||
.title(channel_title)
|
||||
.borders(Borders::ALL)
|
||||
.style(Style::default().fg(Color::DarkGray));
|
||||
|
||||
let inner_height = area.height.saturating_sub(2) as usize;
|
||||
|
||||
// Build lines from messages (newest at bottom).
|
||||
let mut lines: Vec<Line> = app
|
||||
.messages
|
||||
.iter()
|
||||
.map(|m| {
|
||||
let ts = format_timestamp(m.timestamp_ms);
|
||||
let ts_span = Span::styled(ts, Style::default().fg(Color::DarkGray));
|
||||
|
||||
let sender_style = if m.is_outgoing {
|
||||
Style::default().fg(Color::Yellow).add_modifier(Modifier::BOLD)
|
||||
} else {
|
||||
Style::default().fg(Color::Green).add_modifier(Modifier::BOLD)
|
||||
};
|
||||
let sender_span = Span::styled(format!(" {} ", m.sender), sender_style);
|
||||
let body_span = Span::raw(m.body.clone());
|
||||
|
||||
Line::from(vec![ts_span, sender_span, body_span])
|
||||
})
|
||||
.collect();
|
||||
|
||||
// Apply scroll: scroll_offset=0 means newest at bottom.
|
||||
let total = lines.len();
|
||||
let visible_start = if total > inner_height {
|
||||
let bottom = total - app.scroll_offset.min(total);
|
||||
bottom.saturating_sub(inner_height)
|
||||
} else {
|
||||
0
|
||||
};
|
||||
let visible_end = if total > inner_height {
|
||||
total - app.scroll_offset.min(total)
|
||||
} else {
|
||||
total
|
||||
};
|
||||
let visible_lines: Vec<Line> = lines
|
||||
.drain(visible_start..visible_end.min(lines.len()))
|
||||
.collect();
|
||||
|
||||
let paragraph = Paragraph::new(visible_lines)
|
||||
.block(block)
|
||||
.wrap(Wrap { trim: false });
|
||||
|
||||
frame.render_widget(paragraph, area);
|
||||
}
|
||||
|
||||
fn draw_input(frame: &mut Frame, app: &App, area: Rect) {
|
||||
let block = Block::default()
|
||||
.title(" Input (Enter=send, Tab=switch channel, q/Ctrl+C=quit) ")
|
||||
.borders(Borders::ALL)
|
||||
.style(Style::default().fg(Color::DarkGray));
|
||||
|
||||
let input_text = Paragraph::new(app.input.as_str())
|
||||
.block(block)
|
||||
.style(Style::default().fg(Color::White));
|
||||
|
||||
frame.render_widget(input_text, area);
|
||||
|
||||
// Position cursor at end of input.
|
||||
let cursor_x = area.x + 1 + app.input.len() as u16;
|
||||
let cursor_y = area.y + 1;
|
||||
if cursor_x < area.x + area.width - 1 {
|
||||
frame.set_cursor_position((cursor_x, cursor_y));
|
||||
}
|
||||
}
|
||||
|
||||
fn format_timestamp(ms: u64) -> String {
|
||||
// Simple HH:MM format from epoch ms.
|
||||
let secs = ms / 1000;
|
||||
let hours = (secs / 3600) % 24;
|
||||
let minutes = (secs / 60) % 60;
|
||||
format!("[{:02}:{:02}] ", hours, minutes)
|
||||
}
|
||||
|
||||
// ── Message polling task ──────────────────────────────────────────────────────
|
||||
|
||||
/// Background task that polls the server for new messages and sends them via `tx`.
|
||||
async fn poll_task(
|
||||
mut session: SessionState,
|
||||
client: node_service::Client,
|
||||
tx: mpsc::Sender<TuiEvent>,
|
||||
) {
|
||||
let mut poll_interval = interval(Duration::from_millis(1000));
|
||||
poll_interval.set_missed_tick_behavior(tokio::time::MissedTickBehavior::Skip);
|
||||
|
||||
loop {
|
||||
poll_interval.tick().await;
|
||||
|
||||
let identity_bytes = session.identity_bytes();
|
||||
let payloads = match fetch_wait(&client, &identity_bytes, 0).await {
|
||||
Ok(p) => p,
|
||||
Err(_) => continue,
|
||||
};
|
||||
|
||||
if payloads.is_empty() {
|
||||
continue;
|
||||
}
|
||||
|
||||
let mut new_msgs: Vec<(ConversationId, String, String)> = Vec::new();
|
||||
let my_key = session.identity_bytes();
|
||||
|
||||
let mut sorted = payloads;
|
||||
sorted.sort_by_key(|(seq, _)| *seq);
|
||||
|
||||
for (_seq, payload) in &sorted {
|
||||
let mls_payload = match try_hybrid_decrypt(session.hybrid_kp.as_ref(), payload) {
|
||||
Ok(b) => b,
|
||||
Err(_) => payload.clone(),
|
||||
};
|
||||
|
||||
let conv_ids: Vec<ConversationId> = session.members.keys().cloned().collect();
|
||||
|
||||
for conv_id in &conv_ids {
|
||||
let member = match session.members.get_mut(conv_id) {
|
||||
Some(m) => m,
|
||||
None => continue,
|
||||
};
|
||||
|
||||
match member.receive_message(&mls_payload) {
|
||||
Ok(ReceivedMessage::Application(plaintext)) => {
|
||||
let (sender_key, app_bytes) = {
|
||||
let after_unpad = quicproquo_core::padding::unpad(&plaintext)
|
||||
.unwrap_or_else(|_| plaintext.clone());
|
||||
|
||||
if quicproquo_core::sealed_sender::is_sealed(&after_unpad) {
|
||||
match quicproquo_core::sealed_sender::unseal(&after_unpad) {
|
||||
Ok((sk, inner)) => (sk.to_vec(), inner),
|
||||
Err(_) => (my_key.clone(), after_unpad),
|
||||
}
|
||||
} else {
|
||||
(my_key.clone(), after_unpad)
|
||||
}
|
||||
};
|
||||
|
||||
let (body, msg_id, msg_type, ref_msg_id) =
|
||||
match parse_app_msg(&app_bytes) {
|
||||
Ok((_, AppMessage::Chat { message_id, body })) => (
|
||||
String::from_utf8_lossy(&body).to_string(),
|
||||
Some(message_id),
|
||||
"chat",
|
||||
None,
|
||||
),
|
||||
Ok((_, AppMessage::Reply { ref_msg_id, body })) => (
|
||||
String::from_utf8_lossy(&body).to_string(),
|
||||
None,
|
||||
"reply",
|
||||
Some(ref_msg_id),
|
||||
),
|
||||
Ok((_, AppMessage::Reaction { ref_msg_id, emoji })) => (
|
||||
String::from_utf8_lossy(&emoji).to_string(),
|
||||
None,
|
||||
"reaction",
|
||||
Some(ref_msg_id),
|
||||
),
|
||||
_ => (
|
||||
String::from_utf8_lossy(&app_bytes).to_string(),
|
||||
None,
|
||||
"chat",
|
||||
None,
|
||||
),
|
||||
};
|
||||
|
||||
let stored = StoredMessage {
|
||||
conversation_id: conv_id.clone(),
|
||||
message_id: msg_id,
|
||||
sender_key: sender_key.clone(),
|
||||
sender_name: None,
|
||||
body: body.clone(),
|
||||
msg_type: msg_type.into(),
|
||||
ref_msg_id,
|
||||
timestamp_ms: now_ms(),
|
||||
is_outgoing: false,
|
||||
};
|
||||
|
||||
if session.conv_store.save_message(&stored).is_ok() {
|
||||
let sender_short = hex::encode(&sender_key[..sender_key.len().min(4)]);
|
||||
new_msgs.push((conv_id.clone(), sender_short, body));
|
||||
}
|
||||
|
||||
let _ = session.conv_store.update_activity(conv_id, now_ms());
|
||||
let _ = session.save_member(conv_id);
|
||||
break;
|
||||
}
|
||||
Ok(ReceivedMessage::StateChanged) | Ok(ReceivedMessage::SelfRemoved) => {
|
||||
let _ = session.save_member(conv_id);
|
||||
break;
|
||||
}
|
||||
_ => {}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if !new_msgs.is_empty() {
|
||||
if tx.send(TuiEvent::NewMessages(new_msgs)).await.is_err() {
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// ── Send message ──────────────────────────────────────────────────────────────
|
||||
|
||||
async fn send_message(
|
||||
session: &mut SessionState,
|
||||
client: &node_service::Client,
|
||||
conv_id: &ConversationId,
|
||||
text: &str,
|
||||
) -> anyhow::Result<()> {
|
||||
let my_key = session.identity_bytes();
|
||||
let identity = Arc::clone(&session.identity);
|
||||
|
||||
let member = session
|
||||
.members
|
||||
.get_mut(conv_id)
|
||||
.context("no GroupMember for this conversation")?;
|
||||
|
||||
// Wrap in structured AppMessage format.
|
||||
let app_payload = serialize_chat(text.as_bytes(), None)
|
||||
.context("serialize app message")?;
|
||||
|
||||
// Metadata protection: seal + pad.
|
||||
let sealed = quicproquo_core::sealed_sender::seal(&identity, &app_payload);
|
||||
let padded = quicproquo_core::padding::pad(&sealed);
|
||||
|
||||
let ct = member.send_message(&padded).context("MLS encrypt")?;
|
||||
|
||||
let recipients: Vec<Vec<u8>> = member
|
||||
.member_identities()
|
||||
.into_iter()
|
||||
.filter(|id| id.as_slice() != my_key.as_slice())
|
||||
.collect();
|
||||
|
||||
for recipient_key in &recipients {
|
||||
let peer_hybrid_pk = fetch_hybrid_key(client, recipient_key).await?;
|
||||
let payload = if let Some(ref pk) = peer_hybrid_pk {
|
||||
hybrid_encrypt(pk, &ct, b"", b"").context("hybrid encrypt")?
|
||||
} else {
|
||||
ct.clone()
|
||||
};
|
||||
enqueue(client, recipient_key, &payload).await?;
|
||||
}
|
||||
|
||||
// Extract message_id from what we just serialized.
|
||||
let msg_id = parse_app_msg(&app_payload)
|
||||
.ok()
|
||||
.and_then(|(_, m)| match m {
|
||||
AppMessage::Chat { message_id, .. } => Some(message_id),
|
||||
_ => None,
|
||||
});
|
||||
|
||||
// Save outgoing message.
|
||||
let stored = StoredMessage {
|
||||
conversation_id: conv_id.clone(),
|
||||
message_id: msg_id,
|
||||
sender_key: my_key,
|
||||
sender_name: Some("you".into()),
|
||||
body: text.to_string(),
|
||||
msg_type: "chat".into(),
|
||||
ref_msg_id: None,
|
||||
timestamp_ms: now_ms(),
|
||||
is_outgoing: true,
|
||||
};
|
||||
session.conv_store.save_message(&stored)?;
|
||||
session.conv_store.update_activity(conv_id, now_ms())?;
|
||||
session.save_member(conv_id)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// ── TUI entry point ───────────────────────────────────────────────────────────
|
||||
|
||||
/// Entry point for `qpq tui`. Sets up the terminal, runs the event loop, and
|
||||
/// restores the terminal on exit.
|
||||
pub async fn run_tui(
|
||||
state_path: &Path,
|
||||
server: &str,
|
||||
ca_cert: &Path,
|
||||
server_name: &str,
|
||||
password: Option<&str>,
|
||||
username: Option<&str>,
|
||||
opaque_password: Option<&str>,
|
||||
access_token: &str,
|
||||
device_id: Option<&str>,
|
||||
) -> anyhow::Result<()> {
|
||||
// ── Auth ──────────────────────────────────────────────────────────────────
|
||||
let resolved_token = resolve_tui_access_token(
|
||||
state_path,
|
||||
server,
|
||||
ca_cert,
|
||||
server_name,
|
||||
password,
|
||||
username,
|
||||
opaque_password,
|
||||
access_token,
|
||||
)
|
||||
.await?;
|
||||
|
||||
let token_bytes = hex::decode(&resolved_token)
|
||||
.unwrap_or_else(|_| resolved_token.into_bytes());
|
||||
let auth_ctx = ClientAuth::from_raw(token_bytes, device_id.map(String::from));
|
||||
init_auth(auth_ctx);
|
||||
|
||||
// ── Session + RPC ─────────────────────────────────────────────────────────
|
||||
let mut session = SessionState::load(state_path, password)?;
|
||||
let client = connect_node(server, ca_cert, server_name).await?;
|
||||
|
||||
// Auto-upload KeyPackage.
|
||||
let _ = auto_upload_keys_tui(&session, &client).await;
|
||||
|
||||
// ── Terminal setup ────────────────────────────────────────────────────────
|
||||
enable_raw_mode().context("enable raw mode")?;
|
||||
let mut stdout = std::io::stdout();
|
||||
execute!(stdout, EnterAlternateScreen, EnableMouseCapture)
|
||||
.context("enter alternate screen")?;
|
||||
let backend = CrosstermBackend::new(stdout);
|
||||
let mut terminal = Terminal::new(backend).context("create terminal")?;
|
||||
|
||||
let result = tui_loop(&mut terminal, &mut session, client).await;
|
||||
|
||||
// ── Terminal cleanup (always restore, even on error) ───────────────────
|
||||
disable_raw_mode().ok();
|
||||
execute!(
|
||||
terminal.backend_mut(),
|
||||
LeaveAlternateScreen,
|
||||
DisableMouseCapture
|
||||
)
|
||||
.ok();
|
||||
terminal.show_cursor().ok();
|
||||
|
||||
session.save_all()?;
|
||||
|
||||
result
|
||||
}
|
||||
|
||||
async fn tui_loop(
|
||||
terminal: &mut Terminal<CrosstermBackend<std::io::Stdout>>,
|
||||
session: &mut SessionState,
|
||||
client: node_service::Client,
|
||||
) -> anyhow::Result<()> {
|
||||
let mut app = App::new(session)?;
|
||||
app.reload_messages(session)?;
|
||||
|
||||
let (event_tx, mut event_rx) = mpsc::channel::<TuiEvent>(256);
|
||||
|
||||
// ── Keyboard event task ───────────────────────────────────────────────────
|
||||
let key_tx = event_tx.clone();
|
||||
tokio::task::spawn_local(async move {
|
||||
loop {
|
||||
// crossterm event polling — 50ms timeout so we can tick.
|
||||
match event::poll(Duration::from_millis(50)) {
|
||||
Ok(true) => {
|
||||
if let Ok(Event::Key(key)) = event::read() {
|
||||
if key_tx.send(TuiEvent::Key(key)).await.is_err() {
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
Ok(false) => {
|
||||
// No event — send a tick so the UI redraws.
|
||||
if key_tx.send(TuiEvent::Tick).await.is_err() {
|
||||
break;
|
||||
}
|
||||
}
|
||||
Err(_) => break,
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
// ── Message poll task ─────────────────────────────────────────────────────
|
||||
// Clone session state for the poll task (it needs its own SessionState).
|
||||
let poll_session = SessionState::load(
|
||||
&session.state_path.clone(),
|
||||
session.password.as_ref().map(|p| p.as_str()),
|
||||
)?;
|
||||
let poll_tx = event_tx.clone();
|
||||
tokio::task::spawn_local(poll_task(poll_session, client.clone(), poll_tx));
|
||||
|
||||
// ── Main loop ─────────────────────────────────────────────────────────────
|
||||
loop {
|
||||
terminal.draw(|f| ui(f, &app)).context("draw")?;
|
||||
|
||||
match event_rx.recv().await {
|
||||
None => break,
|
||||
Some(TuiEvent::Tick) => {
|
||||
// Just redraw.
|
||||
}
|
||||
Some(TuiEvent::NewMessages(msgs)) => {
|
||||
app.append_messages(msgs);
|
||||
}
|
||||
Some(TuiEvent::Key(key)) => {
|
||||
match key.code {
|
||||
KeyCode::Char('c') if key.modifiers.contains(KeyModifiers::CONTROL) => {
|
||||
app.should_quit = true;
|
||||
}
|
||||
KeyCode::Char('q') if app.input.is_empty() => {
|
||||
app.should_quit = true;
|
||||
}
|
||||
KeyCode::Enter => {
|
||||
let text = app.input.trim().to_string();
|
||||
if !text.is_empty() {
|
||||
app.input.clear();
|
||||
if let Some(conv_id) = app.active_conv_id().cloned() {
|
||||
match send_message(session, &client, &conv_id, &text).await {
|
||||
Ok(()) => {
|
||||
// Add to in-memory list immediately.
|
||||
app.messages.push(DisplayMessage {
|
||||
sender: format!("me({})", app.identity_short),
|
||||
body: text,
|
||||
timestamp_ms: now_ms(),
|
||||
is_outgoing: true,
|
||||
});
|
||||
}
|
||||
Err(_e) => {
|
||||
// Silently drop — user will see nothing happened.
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
KeyCode::Char(c) => {
|
||||
app.input.push(c);
|
||||
}
|
||||
KeyCode::Backspace => {
|
||||
app.input.pop();
|
||||
}
|
||||
KeyCode::Up => {
|
||||
app.scroll_up();
|
||||
}
|
||||
KeyCode::Down => {
|
||||
app.scroll_down();
|
||||
}
|
||||
KeyCode::Tab => {
|
||||
if key.modifiers.contains(KeyModifiers::SHIFT) {
|
||||
app.select_prev_channel(session);
|
||||
} else {
|
||||
app.select_next_channel(session);
|
||||
}
|
||||
app.reload_messages(session)?;
|
||||
}
|
||||
_ => {}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if app.should_quit {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// ── Startup helpers ───────────────────────────────────────────────────────────
|
||||
|
||||
async fn auto_upload_keys_tui(
|
||||
session: &SessionState,
|
||||
client: &node_service::Client,
|
||||
) -> anyhow::Result<()> {
|
||||
let ks_path = session.state_path.with_extension("pending.ks");
|
||||
let ks = DiskKeyStore::persistent(&ks_path).unwrap_or_else(|_| DiskKeyStore::ephemeral());
|
||||
let mut member = GroupMember::new_with_state(
|
||||
Arc::clone(&session.identity),
|
||||
ks,
|
||||
None,
|
||||
false,
|
||||
);
|
||||
let kp_bytes = member.generate_key_package().context("generate KeyPackage")?;
|
||||
let id_key = session.identity.public_key_bytes();
|
||||
upload_key_package(client, &id_key, &kp_bytes).await?;
|
||||
if let Some(ref hkp) = session.hybrid_kp {
|
||||
upload_hybrid_key(client, &id_key, &hkp.public_key()).await?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn resolve_tui_access_token(
|
||||
state_path: &Path,
|
||||
server: &str,
|
||||
ca_cert: &Path,
|
||||
server_name: &str,
|
||||
state_password: Option<&str>,
|
||||
username: Option<&str>,
|
||||
opaque_password: Option<&str>,
|
||||
cli_access_token: &str,
|
||||
) -> anyhow::Result<String> {
|
||||
if !cli_access_token.is_empty() {
|
||||
return Ok(cli_access_token.to_string());
|
||||
}
|
||||
|
||||
if let Some(cached) = load_cached_session(state_path, state_password) {
|
||||
return Ok(cached.token_hex);
|
||||
}
|
||||
|
||||
let username = match username {
|
||||
Some(u) => u.to_string(),
|
||||
None => {
|
||||
use std::io::Write;
|
||||
eprint!("Username: ");
|
||||
std::io::stderr().flush().ok();
|
||||
let mut input = String::new();
|
||||
std::io::stdin()
|
||||
.read_line(&mut input)
|
||||
.context("failed to read username")?;
|
||||
let trimmed = input.trim().to_string();
|
||||
anyhow::ensure!(!trimmed.is_empty(), "username is required");
|
||||
trimmed
|
||||
}
|
||||
};
|
||||
|
||||
let opaque_password = match opaque_password {
|
||||
Some(p) => p.to_string(),
|
||||
None => rpassword::read_password().context("failed to read password")?,
|
||||
};
|
||||
|
||||
let state = load_or_init_state(state_path, state_password)?;
|
||||
let identity = IdentityKeypair::from_seed(state.identity_seed);
|
||||
let identity_key = identity.public_key_bytes().to_vec();
|
||||
|
||||
let node_client = connect_node(server, ca_cert, server_name).await?;
|
||||
|
||||
match opaque_register(&node_client, &username, &opaque_password, Some(&identity_key)).await {
|
||||
Ok(()) | Err(_) => {}
|
||||
}
|
||||
|
||||
let token_bytes = opaque_login(&node_client, &username, &opaque_password, &identity_key)
|
||||
.await
|
||||
.context("OPAQUE login failed")?;
|
||||
let token_hex = hex::encode(&token_bytes);
|
||||
|
||||
save_cached_session(state_path, &username, &token_hex, state_password)?;
|
||||
|
||||
Ok(token_hex)
|
||||
}
|
||||
1114
crates/quicproquo-client/src/client/v2_repl.rs
Normal file
1114
crates/quicproquo-client/src/client/v2_repl.rs
Normal file
File diff suppressed because it is too large
Load Diff
1152
crates/quicproquo-client/src/client/v2_tui.rs
Normal file
1152
crates/quicproquo-client/src/client/v2_tui.rs
Normal file
File diff suppressed because it is too large
Load Diff
177
crates/quicproquo-client/src/lib.rs
Normal file
177
crates/quicproquo-client/src/lib.rs
Normal file
@@ -0,0 +1,177 @@
|
||||
//! quicproquo CLI client library.
|
||||
//!
|
||||
//! # KeyPackage expiry and refresh
|
||||
//!
|
||||
//! KeyPackages are single-use (consumed when someone fetches them for an invite) and the server
|
||||
//! may enforce a TTL (e.g. 24 hours). To stay invitable, run `qpq refresh-keypackage`
|
||||
//! periodically (e.g. before the server TTL) or after your KeyPackage was consumed:
|
||||
//!
|
||||
//! ```bash
|
||||
//! qpq refresh-keypackage --state qpq-state.bin --server 127.0.0.1:7000
|
||||
//! ```
|
||||
//!
|
||||
//! Use the same `--access-token` (or `QPQ_ACCESS_TOKEN`) as for other authenticated
|
||||
//! commands. See the [running-the-client](https://docs.quicproquo.dev/getting-started/running-the-client)
|
||||
//! docs for details.
|
||||
|
||||
use std::sync::RwLock;
|
||||
use std::sync::atomic::{AtomicBool, Ordering};
|
||||
|
||||
use zeroize::Zeroizing;
|
||||
|
||||
pub mod client;
|
||||
#[cfg(feature = "v2")]
|
||||
pub mod v2_commands;
|
||||
|
||||
pub use client::commands::{
|
||||
cmd_chat, cmd_check_key, cmd_create_group, cmd_demo_group, cmd_export, cmd_export_verify,
|
||||
cmd_fetch_key, cmd_health, cmd_health_json, cmd_invite, cmd_join, cmd_login, cmd_ping,
|
||||
cmd_recv, cmd_register, cmd_register_state, cmd_refresh_keypackage, cmd_register_user,
|
||||
cmd_send, cmd_whoami, opaque_login, receive_pending_plaintexts, whoami_json,
|
||||
};
|
||||
|
||||
pub use client::command_engine::{Command, CommandRegistry, CommandResult};
|
||||
#[cfg(feature = "playbook")]
|
||||
pub use client::playbook::{Playbook, PlaybookReport, PlaybookRunner};
|
||||
pub use client::repl::run_repl;
|
||||
pub use client::rpc::{connect_node, connect_node_opt, create_channel, enqueue, fetch_wait, resolve_user};
|
||||
|
||||
// ── ClientContext: structured holder for session-scoped auth + TLS config ────
|
||||
|
||||
/// Holds the authentication credentials and TLS policy for a client session.
|
||||
///
|
||||
/// Prefer constructing a `ClientContext` and passing it explicitly where
|
||||
/// possible. The global `AUTH_CONTEXT` / `INSECURE_SKIP_VERIFY` statics
|
||||
/// delegate to a `ClientContext` under the hood and exist only for backward
|
||||
/// compatibility with call-sites that have not yet been migrated.
|
||||
pub struct ClientContext {
|
||||
auth: RwLock<Option<ClientAuth>>,
|
||||
insecure_skip_verify: AtomicBool,
|
||||
}
|
||||
|
||||
impl ClientContext {
|
||||
/// Create a new context with no auth and TLS verification enabled.
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
auth: RwLock::new(None),
|
||||
insecure_skip_verify: AtomicBool::new(false),
|
||||
}
|
||||
}
|
||||
|
||||
/// Create a context pre-loaded with auth credentials.
|
||||
pub fn with_auth(auth: ClientAuth) -> Self {
|
||||
Self {
|
||||
auth: RwLock::new(Some(auth)),
|
||||
insecure_skip_verify: AtomicBool::new(false),
|
||||
}
|
||||
}
|
||||
|
||||
/// Set (or replace) the auth credentials.
|
||||
///
|
||||
/// # Panics
|
||||
/// Panics if the RwLock is poisoned (a thread panicked while holding it).
|
||||
/// A poisoned lock indicates unrecoverable state corruption.
|
||||
#[allow(clippy::expect_used)]
|
||||
pub fn set_auth(&self, ctx: ClientAuth) {
|
||||
let mut guard = self.auth.write().expect("ClientContext auth lock poisoned");
|
||||
*guard = Some(ctx);
|
||||
}
|
||||
|
||||
/// Read the current auth snapshot (cloned).
|
||||
///
|
||||
/// # Panics
|
||||
/// Panics if the RwLock is poisoned (a thread panicked while holding it).
|
||||
/// A poisoned lock indicates unrecoverable state corruption.
|
||||
#[allow(clippy::expect_used)]
|
||||
pub fn get_auth(&self) -> Option<ClientAuth> {
|
||||
let guard = self.auth.read().expect("ClientContext auth lock poisoned");
|
||||
guard.clone()
|
||||
}
|
||||
|
||||
/// Returns true if auth credentials have been set.
|
||||
///
|
||||
/// # Panics
|
||||
/// Panics if the RwLock is poisoned (a thread panicked while holding it).
|
||||
/// A poisoned lock indicates unrecoverable state corruption.
|
||||
#[allow(clippy::expect_used)]
|
||||
pub fn is_authenticated(&self) -> bool {
|
||||
let guard = self.auth.read().expect("ClientContext auth lock poisoned");
|
||||
guard.is_some()
|
||||
}
|
||||
|
||||
/// Enable or disable insecure TLS mode.
|
||||
pub fn set_insecure_skip_verify(&self, enabled: bool) {
|
||||
self.insecure_skip_verify.store(enabled, Ordering::Relaxed);
|
||||
}
|
||||
|
||||
/// Read the current insecure-skip-verify flag.
|
||||
pub fn insecure_skip_verify(&self) -> bool {
|
||||
self.insecure_skip_verify.load(Ordering::Relaxed)
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for ClientContext {
|
||||
fn default() -> Self {
|
||||
Self::new()
|
||||
}
|
||||
}
|
||||
|
||||
// ── Global statics (thin wrappers, kept for backward compat) ─────────────────
|
||||
|
||||
/// Global auth context — delegates to a process-wide `ClientContext`.
|
||||
/// Prefer passing `&ClientContext` explicitly in new code.
|
||||
pub(crate) static AUTH_CONTEXT: RwLock<Option<ClientAuth>> = RwLock::new(None);
|
||||
|
||||
/// When `true`, [`connect_node`] skips TLS certificate verification.
|
||||
/// Prefer `ClientContext::set_insecure_skip_verify` in new code.
|
||||
pub(crate) static INSECURE_SKIP_VERIFY: AtomicBool = AtomicBool::new(false);
|
||||
|
||||
/// Enable or disable insecure (no-verify) TLS mode globally.
|
||||
///
|
||||
/// **Development only.** When enabled, all outgoing connections skip certificate
|
||||
/// verification, making them vulnerable to MITM attacks.
|
||||
pub fn set_insecure_skip_verify(enabled: bool) {
|
||||
INSECURE_SKIP_VERIFY.store(enabled, Ordering::Relaxed);
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct ClientAuth {
|
||||
pub(crate) version: u16,
|
||||
/// Bearer or OPAQUE session token. Zeroized on drop. (M8)
|
||||
pub(crate) access_token: Zeroizing<Vec<u8>>,
|
||||
pub(crate) device_id: Vec<u8>,
|
||||
}
|
||||
|
||||
impl ClientAuth {
|
||||
/// Build a client auth context from optional token and device id.
|
||||
pub fn from_parts(access_token: String, device_id: Option<String>) -> Self {
|
||||
let token = access_token.into_bytes();
|
||||
let device = device_id.unwrap_or_default().into_bytes();
|
||||
Self {
|
||||
version: 1,
|
||||
access_token: Zeroizing::new(token),
|
||||
device_id: device,
|
||||
}
|
||||
}
|
||||
|
||||
/// Build from raw token bytes (e.g. a 32-byte OPAQUE session token).
|
||||
pub fn from_raw(raw_token: Vec<u8>, device_id: Option<String>) -> Self {
|
||||
let device = device_id.unwrap_or_default().into_bytes();
|
||||
Self {
|
||||
version: 1,
|
||||
access_token: Zeroizing::new(raw_token),
|
||||
device_id: device,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Set (or replace) the global auth context.
|
||||
///
|
||||
/// # Panics
|
||||
/// Panics if the RwLock is poisoned (a thread panicked while holding it).
|
||||
/// A poisoned lock indicates unrecoverable state corruption.
|
||||
#[allow(clippy::expect_used)]
|
||||
pub fn init_auth(ctx: ClientAuth) {
|
||||
let mut guard = AUTH_CONTEXT.write().expect("AUTH_CONTEXT poisoned");
|
||||
*guard = Some(ctx);
|
||||
}
|
||||
883
crates/quicproquo-client/src/main.rs
Normal file
883
crates/quicproquo-client/src/main.rs
Normal file
@@ -0,0 +1,883 @@
|
||||
//! quicproquo CLI client.
|
||||
|
||||
// ── v2 feature gate: when compiled with --features v2, use the SDK-based CLI.
|
||||
#[cfg(feature = "v2")]
|
||||
mod v2_commands;
|
||||
#[cfg(feature = "v2")]
|
||||
mod v2_main;
|
||||
|
||||
#[cfg(feature = "v2")]
|
||||
fn main() {
|
||||
v2_main::main();
|
||||
}
|
||||
|
||||
// ── v1 CLI (default) ─────────────────────────────────────────────────────────
|
||||
#[cfg(not(feature = "v2"))]
|
||||
use std::path::{Path, PathBuf};
|
||||
#[cfg(not(feature = "v2"))]
|
||||
use anyhow::Context;
|
||||
#[cfg(not(feature = "v2"))]
|
||||
use clap::{Parser, Subcommand};
|
||||
#[cfg(not(feature = "v2"))]
|
||||
use quicproquo_client::{
|
||||
cmd_chat, cmd_check_key, cmd_create_group, cmd_demo_group, cmd_export, cmd_export_verify,
|
||||
cmd_fetch_key, cmd_health, cmd_invite, cmd_join, cmd_login, cmd_ping, cmd_recv, cmd_register,
|
||||
cmd_register_state, cmd_refresh_keypackage, cmd_register_user, cmd_send, cmd_whoami,
|
||||
init_auth, run_repl, set_insecure_skip_verify, ClientAuth,
|
||||
};
|
||||
#[cfg(all(feature = "tui", not(feature = "v2")))]
|
||||
use quicproquo_client::client::tui::run_tui;
|
||||
|
||||
// ── CLI ───────────────────────────────────────────────────────────────────────
|
||||
#[cfg(not(feature = "v2"))]
|
||||
|
||||
#[derive(Debug, Parser)]
|
||||
#[command(name = "qpq", about = "quicproquo CLI client", version)]
|
||||
struct Args {
|
||||
/// Path to the server's TLS certificate (self-signed by default).
|
||||
#[arg(
|
||||
long,
|
||||
global = true,
|
||||
default_value = "data/server-cert.der",
|
||||
env = "QPQ_CA_CERT"
|
||||
)]
|
||||
ca_cert: PathBuf,
|
||||
|
||||
/// Expected TLS server name (must match the certificate SAN).
|
||||
#[arg(
|
||||
long,
|
||||
global = true,
|
||||
default_value = "localhost",
|
||||
env = "QPQ_SERVER_NAME"
|
||||
)]
|
||||
server_name: String,
|
||||
|
||||
/// Bearer token or OPAQUE session token for authenticated requests.
|
||||
/// Not required for register-user and login commands.
|
||||
#[arg(
|
||||
long,
|
||||
global = true,
|
||||
env = "QPQ_ACCESS_TOKEN",
|
||||
default_value = ""
|
||||
)]
|
||||
access_token: String,
|
||||
|
||||
/// Optional device identifier (UUID bytes encoded as hex or raw string).
|
||||
#[arg(long, global = true, env = "QPQ_DEVICE_ID")]
|
||||
device_id: Option<String>,
|
||||
|
||||
/// Password to encrypt/decrypt client state files (QPCE format).
|
||||
/// If set, state files are encrypted at rest with Argon2id + ChaCha20Poly1305.
|
||||
#[arg(long, global = true, env = "QPQ_STATE_PASSWORD")]
|
||||
state_password: Option<String>,
|
||||
|
||||
/// DANGER: Skip TLS certificate verification. Development only.
|
||||
/// Disables all certificate checks, making the connection vulnerable to MITM attacks.
|
||||
#[arg(
|
||||
long = "danger-accept-invalid-certs",
|
||||
global = true,
|
||||
env = "QPQ_DANGER_ACCEPT_INVALID_CERTS"
|
||||
)]
|
||||
danger_accept_invalid_certs: bool,
|
||||
|
||||
// ── Default-repl args (used when no subcommand is given) ─────────
|
||||
/// State file path (identity + MLS state). Used when running the default REPL.
|
||||
#[arg(long, default_value = "qpq-state.bin", env = "QPQ_STATE")]
|
||||
state: PathBuf,
|
||||
|
||||
/// Server address (host:port). Used when running the default REPL.
|
||||
#[arg(long, default_value = "127.0.0.1:7000", env = "QPQ_SERVER")]
|
||||
server: String,
|
||||
|
||||
/// OPAQUE username for automatic registration/login.
|
||||
#[arg(long, env = "QPQ_USERNAME")]
|
||||
username: Option<String>,
|
||||
|
||||
/// OPAQUE password (prompted securely if --username is set but --password is not).
|
||||
#[arg(long, env = "QPQ_PASSWORD")]
|
||||
password: Option<String>,
|
||||
|
||||
/// Do not auto-start a local qpq-server (useful when connecting to a remote server).
|
||||
#[arg(long, env = "QPQ_NO_SERVER")]
|
||||
no_server: bool,
|
||||
|
||||
#[command(subcommand)]
|
||||
command: Option<Command>,
|
||||
}
|
||||
|
||||
#[cfg(not(feature = "v2"))]
|
||||
#[derive(Debug, Subcommand)]
|
||||
enum Command {
|
||||
/// Register a new user via OPAQUE (password never leaves the client).
|
||||
RegisterUser {
|
||||
#[arg(long, default_value = "127.0.0.1:7000", env = "QPQ_SERVER")]
|
||||
server: String,
|
||||
/// Username for the new account.
|
||||
#[arg(long)]
|
||||
username: String,
|
||||
/// Password (will be used in OPAQUE PAKE; server never sees it).
|
||||
#[arg(long)]
|
||||
password: String,
|
||||
},
|
||||
|
||||
/// Log in via OPAQUE and receive a session token.
|
||||
Login {
|
||||
#[arg(long, default_value = "127.0.0.1:7000", env = "QPQ_SERVER")]
|
||||
server: String,
|
||||
#[arg(long)]
|
||||
username: String,
|
||||
#[arg(long)]
|
||||
password: String,
|
||||
/// Hex-encoded Ed25519 identity key (64 hex chars). Optional if --state is provided.
|
||||
#[arg(long)]
|
||||
identity_key: Option<String>,
|
||||
/// State file to derive the identity key (requires same password if encrypted).
|
||||
#[arg(long)]
|
||||
state: Option<PathBuf>,
|
||||
/// Password for the encrypted state file (if any).
|
||||
#[arg(long)]
|
||||
state_password: Option<String>,
|
||||
},
|
||||
|
||||
/// Show local identity key, fingerprint, group status, and hybrid key status.
|
||||
Whoami {
|
||||
/// State file path (identity + MLS state).
|
||||
#[arg(
|
||||
long,
|
||||
default_value = "qpq-state.bin",
|
||||
env = "QPQ_STATE"
|
||||
)]
|
||||
state: PathBuf,
|
||||
},
|
||||
|
||||
/// Check server connectivity and print status.
|
||||
Health {
|
||||
/// Server address (host:port).
|
||||
#[arg(long, default_value = "127.0.0.1:7000", env = "QPQ_SERVER")]
|
||||
server: String,
|
||||
},
|
||||
|
||||
/// Check if a peer has registered a hybrid key (non-consuming lookup).
|
||||
CheckKey {
|
||||
/// Server address (host:port).
|
||||
#[arg(long, default_value = "127.0.0.1:7000", env = "QPQ_SERVER")]
|
||||
server: String,
|
||||
|
||||
/// Peer's Ed25519 identity public key (64 hex chars = 32 bytes).
|
||||
identity_key: String,
|
||||
},
|
||||
|
||||
/// Send a Ping to the server and print the round-trip time.
|
||||
Ping {
|
||||
/// Server address (host:port).
|
||||
#[arg(long, default_value = "127.0.0.1:7000", env = "QPQ_SERVER")]
|
||||
server: String,
|
||||
},
|
||||
|
||||
/// Generate a fresh MLS KeyPackage and upload it to the Authentication Service.
|
||||
Register {
|
||||
/// Server address (host:port).
|
||||
#[arg(long, default_value = "127.0.0.1:7000", env = "QPQ_SERVER")]
|
||||
server: String,
|
||||
},
|
||||
|
||||
/// Fetch a peer's KeyPackage from the Authentication Service.
|
||||
FetchKey {
|
||||
/// Server address (host:port).
|
||||
#[arg(long, default_value = "127.0.0.1:7000", env = "QPQ_SERVER")]
|
||||
server: String,
|
||||
|
||||
/// Target peer's Ed25519 identity public key (64 hex chars = 32 bytes).
|
||||
identity_key: String,
|
||||
},
|
||||
|
||||
/// Run a two-party MLS demo (creator + joiner) against live AS and DS.
|
||||
DemoGroup {
|
||||
/// Server address (host:port).
|
||||
#[arg(long, default_value = "127.0.0.1:7000", env = "QPQ_SERVER")]
|
||||
server: String,
|
||||
},
|
||||
|
||||
/// Upload the persistent identity's KeyPackage to the AS (uses state file).
|
||||
RegisterState {
|
||||
/// State file path (identity + MLS state).
|
||||
#[arg(
|
||||
long,
|
||||
default_value = "qpq-state.bin",
|
||||
env = "QPQ_STATE"
|
||||
)]
|
||||
state: PathBuf,
|
||||
|
||||
/// Authentication Service address (host:port).
|
||||
#[arg(long, default_value = "127.0.0.1:7000", env = "QPQ_SERVER")]
|
||||
server: String,
|
||||
},
|
||||
|
||||
/// Refresh the KeyPackage on the server (existing state only).
|
||||
/// Run periodically (e.g. before server TTL ~24h) or after your KeyPackage was consumed so others can invite you.
|
||||
RefreshKeypackage {
|
||||
/// State file path (identity + MLS state).
|
||||
#[arg(
|
||||
long,
|
||||
default_value = "qpq-state.bin",
|
||||
env = "QPQ_STATE"
|
||||
)]
|
||||
state: PathBuf,
|
||||
|
||||
/// Server address (host:port).
|
||||
#[arg(long, default_value = "127.0.0.1:7000", env = "QPQ_SERVER")]
|
||||
server: String,
|
||||
},
|
||||
|
||||
/// Create a persistent group and save state to disk.
|
||||
CreateGroup {
|
||||
/// State file path (identity + MLS state).
|
||||
#[arg(
|
||||
long,
|
||||
default_value = "qpq-state.bin",
|
||||
env = "QPQ_STATE"
|
||||
)]
|
||||
state: PathBuf,
|
||||
|
||||
/// Server address (host:port).
|
||||
#[arg(long, default_value = "127.0.0.1:7000", env = "QPQ_SERVER")]
|
||||
server: String,
|
||||
|
||||
/// Group identifier (arbitrary bytes, typically a human-readable name).
|
||||
#[arg(long)]
|
||||
group_id: String,
|
||||
},
|
||||
|
||||
/// Invite a peer into the group and deliver a Welcome via DS.
|
||||
Invite {
|
||||
#[arg(
|
||||
long,
|
||||
default_value = "qpq-state.bin",
|
||||
env = "QPQ_STATE"
|
||||
)]
|
||||
state: PathBuf,
|
||||
#[arg(long, default_value = "127.0.0.1:7000", env = "QPQ_SERVER")]
|
||||
server: String,
|
||||
/// Peer identity public key (64 hex chars = 32 bytes).
|
||||
#[arg(long)]
|
||||
peer_key: String,
|
||||
},
|
||||
|
||||
/// Join a group by fetching the Welcome from the DS.
|
||||
Join {
|
||||
#[arg(
|
||||
long,
|
||||
default_value = "qpq-state.bin",
|
||||
env = "QPQ_STATE"
|
||||
)]
|
||||
state: PathBuf,
|
||||
#[arg(long, default_value = "127.0.0.1:7000", env = "QPQ_SERVER")]
|
||||
server: String,
|
||||
},
|
||||
|
||||
/// Send an application message via the DS.
|
||||
Send {
|
||||
#[arg(
|
||||
long,
|
||||
default_value = "qpq-state.bin",
|
||||
env = "QPQ_STATE"
|
||||
)]
|
||||
state: PathBuf,
|
||||
#[arg(long, default_value = "127.0.0.1:7000", env = "QPQ_SERVER")]
|
||||
server: String,
|
||||
/// Recipient identity key (hex, 32 bytes -> 64 chars). Omit when using --all.
|
||||
#[arg(long)]
|
||||
peer_key: Option<String>,
|
||||
/// Send to all other group members (N-way groups).
|
||||
#[arg(long)]
|
||||
all: bool,
|
||||
/// Plaintext message to send.
|
||||
#[arg(long)]
|
||||
msg: String,
|
||||
},
|
||||
|
||||
/// Receive and decrypt all pending messages from the DS.
|
||||
Recv {
|
||||
#[arg(
|
||||
long,
|
||||
default_value = "qpq-state.bin",
|
||||
env = "QPQ_STATE"
|
||||
)]
|
||||
state: PathBuf,
|
||||
#[arg(long, default_value = "127.0.0.1:7000", env = "QPQ_SERVER")]
|
||||
server: String,
|
||||
|
||||
/// Wait for up to this many milliseconds if no messages are queued.
|
||||
#[arg(long, default_value_t = 0)]
|
||||
wait_ms: u64,
|
||||
|
||||
/// Continuously long-poll for messages.
|
||||
#[arg(long)]
|
||||
stream: bool,
|
||||
},
|
||||
|
||||
/// Interactive multi-conversation REPL. Supports /dm, /create-group, /invite, /join, /switch, and more.
|
||||
/// Automatically registers and logs in if --username/--password are provided (or prompts interactively).
|
||||
Repl {
|
||||
#[arg(
|
||||
long,
|
||||
default_value = "qpq-state.bin",
|
||||
env = "QPQ_STATE"
|
||||
)]
|
||||
state: PathBuf,
|
||||
#[arg(long, default_value = "127.0.0.1:7000", env = "QPQ_SERVER")]
|
||||
server: String,
|
||||
/// OPAQUE username for automatic registration/login.
|
||||
#[arg(long, env = "QPQ_USERNAME")]
|
||||
username: Option<String>,
|
||||
/// OPAQUE password (prompted securely if --username is set but --password is not).
|
||||
#[arg(long, env = "QPQ_PASSWORD")]
|
||||
password: Option<String>,
|
||||
/// Do not auto-start a local qpq-server.
|
||||
#[arg(long, env = "QPQ_NO_SERVER")]
|
||||
no_server: bool,
|
||||
},
|
||||
|
||||
/// Full-screen Ratatui TUI (requires --features tui).
|
||||
/// Channels sidebar, scrollable message view, and inline input bar.
|
||||
#[cfg(feature = "tui")]
|
||||
Tui {
|
||||
#[arg(
|
||||
long,
|
||||
default_value = "qpq-state.bin",
|
||||
env = "QPQ_STATE"
|
||||
)]
|
||||
state: PathBuf,
|
||||
#[arg(long, default_value = "127.0.0.1:7000", env = "QPQ_SERVER")]
|
||||
server: String,
|
||||
/// OPAQUE username for automatic registration/login.
|
||||
#[arg(long, env = "QPQ_USERNAME")]
|
||||
username: Option<String>,
|
||||
/// OPAQUE password (prompted securely if --username is set but --password is not).
|
||||
#[arg(long, env = "QPQ_PASSWORD")]
|
||||
password: Option<String>,
|
||||
},
|
||||
|
||||
/// Interactive 1:1 chat: type to send, incoming messages printed as [peer] <msg>. Ctrl+D to exit.
|
||||
/// In a two-person group, peer is chosen automatically; use --peer-key only with 3+ members.
|
||||
Chat {
|
||||
#[arg(
|
||||
long,
|
||||
default_value = "qpq-state.bin",
|
||||
env = "QPQ_STATE"
|
||||
)]
|
||||
state: PathBuf,
|
||||
#[arg(long, default_value = "127.0.0.1:7000", env = "QPQ_SERVER")]
|
||||
server: String,
|
||||
/// Peer identity key (hex, 64 chars). Omit in a two-person group to use the only other member.
|
||||
#[arg(long)]
|
||||
peer_key: Option<String>,
|
||||
/// How often to poll for incoming messages (milliseconds).
|
||||
#[arg(long, default_value_t = 500)]
|
||||
poll_interval_ms: u64,
|
||||
},
|
||||
|
||||
/// Export a conversation's message history to an encrypted, tamper-evident transcript file.
|
||||
///
|
||||
/// The output file uses Argon2id + ChaCha20-Poly1305 encryption with a SHA-256 hash chain
|
||||
/// linking every record. Use `qpq export verify` to check chain integrity without decrypting.
|
||||
Export {
|
||||
/// Path to the conversation database (.convdb file).
|
||||
#[arg(long, default_value = "qpq-convdb.sqlite", env = "QPQ_CONV_DB")]
|
||||
conv_db: PathBuf,
|
||||
|
||||
/// Conversation ID to export (32 hex chars = 16 bytes).
|
||||
#[arg(long)]
|
||||
conv_id: String,
|
||||
|
||||
/// Output path for the .qpqt transcript file.
|
||||
#[arg(long, default_value = "transcript.qpqt")]
|
||||
output: PathBuf,
|
||||
|
||||
/// Password used to encrypt the transcript (separate from the state/DB password).
|
||||
#[arg(long, env = "QPQ_TRANSCRIPT_PASSWORD")]
|
||||
transcript_password: Option<String>,
|
||||
|
||||
/// Password for the encrypted conversation database (if any).
|
||||
#[arg(long, env = "QPQ_STATE_PASSWORD")]
|
||||
db_password: Option<String>,
|
||||
},
|
||||
|
||||
/// Verify the hash-chain integrity of a transcript file without decrypting content.
|
||||
ExportVerify {
|
||||
/// Path to the .qpqt transcript file to verify.
|
||||
#[arg(long)]
|
||||
input: PathBuf,
|
||||
},
|
||||
|
||||
/// Execute a YAML playbook (scripted command sequence) and exit.
|
||||
/// Requires `--features playbook`.
|
||||
#[cfg(feature = "playbook")]
|
||||
Run {
|
||||
/// Path to the YAML playbook file.
|
||||
playbook: PathBuf,
|
||||
|
||||
/// State file path (identity + MLS state).
|
||||
#[arg(long, default_value = "qpq-state.bin", env = "QPQ_STATE")]
|
||||
state: PathBuf,
|
||||
|
||||
/// Server address (host:port).
|
||||
#[arg(long, default_value = "127.0.0.1:7000", env = "QPQ_SERVER")]
|
||||
server: String,
|
||||
|
||||
/// OPAQUE username for automatic login.
|
||||
#[arg(long, env = "QPQ_USERNAME")]
|
||||
username: Option<String>,
|
||||
|
||||
/// OPAQUE password.
|
||||
#[arg(long, env = "QPQ_PASSWORD")]
|
||||
password: Option<String>,
|
||||
|
||||
/// Override playbook variables: KEY=VALUE (repeatable).
|
||||
#[arg(long = "var", short = 'V')]
|
||||
vars: Vec<String>,
|
||||
},
|
||||
}
|
||||
|
||||
// ── Helpers ───────────────────────────────────────────────────────────────────
|
||||
#[cfg(not(feature = "v2"))]
|
||||
/// Returns `qpq-{username}.bin` when `state` is still at the default
|
||||
/// (`qpq-state.bin`) and a username has been provided. Otherwise returns
|
||||
/// `state` unchanged. This lets `qpq --username alice` automatically isolate
|
||||
/// Alice's state without requiring a manual `--state` flag.
|
||||
fn derive_state_path(state: PathBuf, username: Option<&str>) -> PathBuf {
|
||||
if state == Path::new("qpq-state.bin") {
|
||||
if let Some(uname) = username {
|
||||
return PathBuf::from(format!("qpq-{uname}.bin"));
|
||||
}
|
||||
}
|
||||
state
|
||||
}
|
||||
|
||||
// ── Playbook execution ───────────────────────────────────────────────────────
|
||||
|
||||
#[cfg(all(feature = "playbook", not(feature = "v2")))]
|
||||
async fn run_playbook(
|
||||
playbook_path: &Path,
|
||||
state: &Path,
|
||||
server: &str,
|
||||
ca_cert: &Path,
|
||||
server_name: &str,
|
||||
state_pw: Option<&str>,
|
||||
username: Option<&str>,
|
||||
password: Option<&str>,
|
||||
access_token: &str,
|
||||
device_id: Option<&str>,
|
||||
extra_vars: &[String],
|
||||
) -> anyhow::Result<()> {
|
||||
use quicproquo_client::PlaybookRunner;
|
||||
|
||||
let insecure = std::env::var("QPQ_DANGER_ACCEPT_INVALID_CERTS").is_ok();
|
||||
|
||||
// Connect to server.
|
||||
let client =
|
||||
quicproquo_client::connect_node_opt(server, ca_cert, server_name, insecure)
|
||||
.await
|
||||
.context("connect to server")?;
|
||||
|
||||
// Build session state.
|
||||
let mut session = quicproquo_client::client::session::SessionState::load(state, state_pw)
|
||||
.context("load session state")?;
|
||||
|
||||
// If username/password provided, do OPAQUE login.
|
||||
if let (Some(uname), Some(pw)) = (username, password) {
|
||||
if let Err(e) =
|
||||
quicproquo_client::opaque_login(&client, uname, pw, &session.identity.public_key_bytes()).await
|
||||
{
|
||||
eprintln!("OPAQUE login failed: {e:#}");
|
||||
}
|
||||
} else if !access_token.is_empty() {
|
||||
let auth = ClientAuth::from_parts(access_token.to_string(), device_id.map(String::from));
|
||||
init_auth(auth);
|
||||
}
|
||||
|
||||
// Load playbook.
|
||||
let mut runner = PlaybookRunner::from_file(playbook_path)
|
||||
.with_context(|| format!("load playbook: {}", playbook_path.display()))?;
|
||||
|
||||
// Inject extra variables from --var KEY=VALUE flags.
|
||||
for kv in extra_vars {
|
||||
if let Some((k, v)) = kv.split_once('=') {
|
||||
runner.set_var(k, v);
|
||||
} else {
|
||||
eprintln!("warning: ignoring malformed --var '{kv}' (expected KEY=VALUE)");
|
||||
}
|
||||
}
|
||||
|
||||
// Inject connection info as variables.
|
||||
runner.set_var("_server", server);
|
||||
if let Some(u) = username {
|
||||
runner.set_var("_username", u);
|
||||
}
|
||||
|
||||
let report = runner.run(&mut session, &client).await;
|
||||
print!("{report}");
|
||||
|
||||
if report.all_passed() {
|
||||
Ok(())
|
||||
} else {
|
||||
std::process::exit(1);
|
||||
}
|
||||
}
|
||||
|
||||
// ── Entry point ───────────────────────────────────────────────────────────────
|
||||
|
||||
#[cfg(not(feature = "v2"))]
|
||||
#[tokio::main]
|
||||
async fn main() -> anyhow::Result<()> {
|
||||
// Install the rustls crypto provider before any TLS operations.
|
||||
let _ = rustls::crypto::ring::default_provider().install_default();
|
||||
|
||||
tracing_subscriber::fmt()
|
||||
.with_env_filter(
|
||||
tracing_subscriber::EnvFilter::try_from_default_env()
|
||||
.unwrap_or_else(|_| tracing_subscriber::EnvFilter::new("warn")),
|
||||
)
|
||||
.init();
|
||||
|
||||
let args = Args::parse();
|
||||
|
||||
if args.danger_accept_invalid_certs {
|
||||
eprintln!("WARNING: TLS verification disabled — insecure mode");
|
||||
set_insecure_skip_verify(true);
|
||||
}
|
||||
|
||||
// For the REPL and TUI, defer init_auth so they can resolve their own token via OPAQUE.
|
||||
// For all other subcommands, initialize auth immediately.
|
||||
#[cfg(not(feature = "tui"))]
|
||||
let is_repl = matches!(args.command, None | Some(Command::Repl { .. }));
|
||||
#[cfg(feature = "tui")]
|
||||
let is_repl = matches!(args.command, None | Some(Command::Repl { .. }) | Some(Command::Tui { .. }));
|
||||
if !is_repl {
|
||||
let auth_ctx = ClientAuth::from_parts(args.access_token.clone(), args.device_id.clone());
|
||||
init_auth(auth_ctx);
|
||||
}
|
||||
|
||||
let state_pw = args.state_password.as_deref();
|
||||
|
||||
// Default to REPL when no subcommand is given.
|
||||
let no_server = args.no_server;
|
||||
let command = args.command.unwrap_or_else(|| Command::Repl {
|
||||
state: derive_state_path(args.state, args.username.as_deref()),
|
||||
server: args.server,
|
||||
username: args.username,
|
||||
password: args.password,
|
||||
no_server,
|
||||
});
|
||||
|
||||
match command {
|
||||
Command::RegisterUser {
|
||||
server,
|
||||
username,
|
||||
password,
|
||||
} => {
|
||||
let local = tokio::task::LocalSet::new();
|
||||
local
|
||||
.run_until(cmd_register_user(
|
||||
&server,
|
||||
&args.ca_cert,
|
||||
&args.server_name,
|
||||
&username,
|
||||
&password,
|
||||
None,
|
||||
))
|
||||
.await
|
||||
}
|
||||
Command::Login {
|
||||
server,
|
||||
username,
|
||||
password,
|
||||
identity_key,
|
||||
state,
|
||||
state_password,
|
||||
} => {
|
||||
let local = tokio::task::LocalSet::new();
|
||||
local
|
||||
.run_until(cmd_login(
|
||||
&server,
|
||||
&args.ca_cert,
|
||||
&args.server_name,
|
||||
&username,
|
||||
&password,
|
||||
identity_key.as_deref(),
|
||||
state.as_deref(),
|
||||
state_password.as_deref(),
|
||||
))
|
||||
.await
|
||||
}
|
||||
Command::Whoami { state } => cmd_whoami(&state, state_pw),
|
||||
Command::Health { server } => {
|
||||
let local = tokio::task::LocalSet::new();
|
||||
local
|
||||
.run_until(cmd_health(&server, &args.ca_cert, &args.server_name))
|
||||
.await
|
||||
}
|
||||
Command::CheckKey {
|
||||
server,
|
||||
identity_key,
|
||||
} => {
|
||||
let local = tokio::task::LocalSet::new();
|
||||
local
|
||||
.run_until(cmd_check_key(
|
||||
&server,
|
||||
&args.ca_cert,
|
||||
&args.server_name,
|
||||
&identity_key,
|
||||
))
|
||||
.await
|
||||
}
|
||||
Command::Ping { server } => cmd_ping(&server, &args.ca_cert, &args.server_name).await,
|
||||
Command::Register { server } => {
|
||||
let local = tokio::task::LocalSet::new();
|
||||
local
|
||||
.run_until(cmd_register(&server, &args.ca_cert, &args.server_name))
|
||||
.await
|
||||
}
|
||||
Command::FetchKey {
|
||||
server,
|
||||
identity_key,
|
||||
} => {
|
||||
let local = tokio::task::LocalSet::new();
|
||||
local
|
||||
.run_until(cmd_fetch_key(
|
||||
&server,
|
||||
&args.ca_cert,
|
||||
&args.server_name,
|
||||
&identity_key,
|
||||
))
|
||||
.await
|
||||
}
|
||||
Command::DemoGroup { server } => {
|
||||
let local = tokio::task::LocalSet::new();
|
||||
local
|
||||
.run_until(cmd_demo_group(&server, &args.ca_cert, &args.server_name))
|
||||
.await
|
||||
}
|
||||
Command::RegisterState { state, server } => {
|
||||
let local = tokio::task::LocalSet::new();
|
||||
local
|
||||
.run_until(cmd_register_state(
|
||||
&state,
|
||||
&server,
|
||||
&args.ca_cert,
|
||||
&args.server_name,
|
||||
state_pw,
|
||||
))
|
||||
.await
|
||||
}
|
||||
Command::RefreshKeypackage { state, server } => {
|
||||
let local = tokio::task::LocalSet::new();
|
||||
local
|
||||
.run_until(cmd_refresh_keypackage(
|
||||
&state,
|
||||
&server,
|
||||
&args.ca_cert,
|
||||
&args.server_name,
|
||||
state_pw,
|
||||
))
|
||||
.await
|
||||
}
|
||||
Command::CreateGroup {
|
||||
state,
|
||||
server,
|
||||
group_id,
|
||||
} => {
|
||||
let local = tokio::task::LocalSet::new();
|
||||
local
|
||||
.run_until(cmd_create_group(&state, &server, &group_id, state_pw))
|
||||
.await
|
||||
}
|
||||
Command::Invite {
|
||||
state,
|
||||
server,
|
||||
peer_key,
|
||||
} => {
|
||||
let local = tokio::task::LocalSet::new();
|
||||
local
|
||||
.run_until(cmd_invite(
|
||||
&state,
|
||||
&server,
|
||||
&args.ca_cert,
|
||||
&args.server_name,
|
||||
&peer_key,
|
||||
state_pw,
|
||||
))
|
||||
.await
|
||||
}
|
||||
Command::Join { state, server } => {
|
||||
let local = tokio::task::LocalSet::new();
|
||||
local
|
||||
.run_until(cmd_join(
|
||||
&state,
|
||||
&server,
|
||||
&args.ca_cert,
|
||||
&args.server_name,
|
||||
state_pw,
|
||||
))
|
||||
.await
|
||||
}
|
||||
Command::Send {
|
||||
state,
|
||||
server,
|
||||
peer_key,
|
||||
all,
|
||||
msg,
|
||||
} => {
|
||||
let local = tokio::task::LocalSet::new();
|
||||
local
|
||||
.run_until(cmd_send(
|
||||
&state,
|
||||
&server,
|
||||
&args.ca_cert,
|
||||
&args.server_name,
|
||||
peer_key.as_deref(),
|
||||
all,
|
||||
&msg,
|
||||
state_pw,
|
||||
))
|
||||
.await
|
||||
}
|
||||
Command::Recv {
|
||||
state,
|
||||
server,
|
||||
wait_ms,
|
||||
stream,
|
||||
} => {
|
||||
let local = tokio::task::LocalSet::new();
|
||||
local
|
||||
.run_until(cmd_recv(
|
||||
&state,
|
||||
&server,
|
||||
&args.ca_cert,
|
||||
&args.server_name,
|
||||
wait_ms,
|
||||
stream,
|
||||
state_pw,
|
||||
))
|
||||
.await
|
||||
}
|
||||
Command::Repl {
|
||||
state,
|
||||
server,
|
||||
username,
|
||||
password,
|
||||
no_server,
|
||||
} => {
|
||||
let state = derive_state_path(state, username.as_deref());
|
||||
let local = tokio::task::LocalSet::new();
|
||||
local
|
||||
.run_until(run_repl(
|
||||
&state,
|
||||
&server,
|
||||
&args.ca_cert,
|
||||
&args.server_name,
|
||||
state_pw,
|
||||
username.as_deref(),
|
||||
password.as_deref(),
|
||||
&args.access_token,
|
||||
args.device_id.as_deref(),
|
||||
no_server,
|
||||
))
|
||||
.await
|
||||
}
|
||||
Command::Chat {
|
||||
state,
|
||||
server,
|
||||
peer_key,
|
||||
poll_interval_ms,
|
||||
} => {
|
||||
let local = tokio::task::LocalSet::new();
|
||||
local
|
||||
.run_until(cmd_chat(
|
||||
&state,
|
||||
&server,
|
||||
&args.ca_cert,
|
||||
&args.server_name,
|
||||
peer_key.as_deref(),
|
||||
state_pw,
|
||||
poll_interval_ms,
|
||||
))
|
||||
.await
|
||||
}
|
||||
#[cfg(feature = "tui")]
|
||||
Command::Tui {
|
||||
state,
|
||||
server,
|
||||
username,
|
||||
password,
|
||||
} => {
|
||||
let state = derive_state_path(state, username.as_deref());
|
||||
let local = tokio::task::LocalSet::new();
|
||||
local
|
||||
.run_until(run_tui(
|
||||
&state,
|
||||
&server,
|
||||
&args.ca_cert,
|
||||
&args.server_name,
|
||||
state_pw,
|
||||
username.as_deref(),
|
||||
password.as_deref(),
|
||||
&args.access_token,
|
||||
args.device_id.as_deref(),
|
||||
))
|
||||
.await
|
||||
}
|
||||
Command::Export {
|
||||
conv_db,
|
||||
conv_id,
|
||||
output,
|
||||
transcript_password,
|
||||
db_password,
|
||||
} => {
|
||||
// Prompt for transcript password if not provided.
|
||||
let tp = match transcript_password {
|
||||
Some(p) => p,
|
||||
None => {
|
||||
eprint!("Transcript password: ");
|
||||
rpassword::read_password()
|
||||
.context("failed to read transcript password")?
|
||||
}
|
||||
};
|
||||
cmd_export(
|
||||
&conv_db,
|
||||
&conv_id,
|
||||
&output,
|
||||
&tp,
|
||||
db_password.as_deref().or(state_pw),
|
||||
)
|
||||
}
|
||||
Command::ExportVerify { input } => cmd_export_verify(&input),
|
||||
#[cfg(feature = "playbook")]
|
||||
Command::Run {
|
||||
playbook,
|
||||
state,
|
||||
server,
|
||||
username,
|
||||
password,
|
||||
vars,
|
||||
} => {
|
||||
let state = derive_state_path(state, username.as_deref());
|
||||
let local = tokio::task::LocalSet::new();
|
||||
local
|
||||
.run_until(run_playbook(
|
||||
&playbook,
|
||||
&state,
|
||||
&server,
|
||||
&args.ca_cert,
|
||||
&args.server_name,
|
||||
state_pw,
|
||||
username.as_deref(),
|
||||
password.as_deref(),
|
||||
&args.access_token,
|
||||
args.device_id.as_deref(),
|
||||
&vars,
|
||||
))
|
||||
.await
|
||||
}
|
||||
}
|
||||
}
|
||||
228
crates/quicproquo-client/src/v2_commands.rs
Normal file
228
crates/quicproquo-client/src/v2_commands.rs
Normal file
@@ -0,0 +1,228 @@
|
||||
//! v2 CLI command implementations — thin wrappers over the SDK.
|
||||
|
||||
use quicproquo_sdk::client::QpqClient;
|
||||
use quicproquo_sdk::error::SdkError;
|
||||
|
||||
/// Register a new user account via OPAQUE.
|
||||
pub async fn cmd_register_user(
|
||||
client: &mut QpqClient,
|
||||
username: &str,
|
||||
password: &str,
|
||||
) -> Result<(), SdkError> {
|
||||
client.register(username, password).await?;
|
||||
let key = client.identity_key().unwrap_or_default();
|
||||
println!("registered user: {username}");
|
||||
println!("identity key : {}", hex::encode(key));
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Log in via OPAQUE and print session info.
|
||||
pub async fn cmd_login(
|
||||
client: &mut QpqClient,
|
||||
username: &str,
|
||||
password: &str,
|
||||
) -> Result<(), SdkError> {
|
||||
client.login(username, password).await?;
|
||||
println!("logged in as: {username}");
|
||||
if let Some(key) = client.identity_key() {
|
||||
println!("identity key: {}", hex::encode(key));
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Print local identity information.
|
||||
pub fn cmd_whoami(client: &QpqClient) {
|
||||
match client.username() {
|
||||
Some(u) => println!("username : {u}"),
|
||||
None => println!("username : (not logged in)"),
|
||||
}
|
||||
match client.identity_key() {
|
||||
Some(k) => println!("identity key: {}", hex::encode(k)),
|
||||
None => println!("identity key: (none)"),
|
||||
}
|
||||
println!("connected : {}", client.is_connected());
|
||||
println!("authenticated: {}", client.is_authenticated());
|
||||
}
|
||||
|
||||
/// Health check — connect to the server and report status.
|
||||
pub async fn cmd_health(client: &mut QpqClient) -> Result<(), SdkError> {
|
||||
let start = std::time::Instant::now();
|
||||
// The SDK connect() already establishes a QUIC connection.
|
||||
// If we're already connected, just report success.
|
||||
if !client.is_connected() {
|
||||
client.connect().await?;
|
||||
}
|
||||
let rtt_ms = start.elapsed().as_millis();
|
||||
println!("status : ok");
|
||||
println!("rtt : {rtt_ms}ms");
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Resolve a username to its identity key.
|
||||
pub async fn cmd_resolve(client: &mut QpqClient, username: &str) -> Result<(), SdkError> {
|
||||
let rpc = client.rpc()?;
|
||||
match quicproquo_sdk::users::resolve_user(rpc, username).await? {
|
||||
Some(key) => {
|
||||
println!("{username} -> {}", hex::encode(&key));
|
||||
}
|
||||
None => {
|
||||
println!("{username}: not found");
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// List registered devices.
|
||||
pub async fn cmd_devices_list(client: &mut QpqClient) -> Result<(), SdkError> {
|
||||
let rpc = client.rpc()?;
|
||||
let devices = quicproquo_sdk::devices::list_devices(rpc).await?;
|
||||
if devices.is_empty() {
|
||||
println!("no devices registered");
|
||||
} else {
|
||||
println!("{:<36} {:<20} {}", "DEVICE ID", "NAME", "REGISTERED AT");
|
||||
for d in &devices {
|
||||
println!(
|
||||
"{:<36} {:<20} {}",
|
||||
hex::encode(&d.device_id),
|
||||
d.device_name,
|
||||
d.registered_at,
|
||||
);
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Register a new device.
|
||||
pub async fn cmd_devices_register(
|
||||
client: &mut QpqClient,
|
||||
device_id: &str,
|
||||
device_name: &str,
|
||||
) -> Result<(), SdkError> {
|
||||
let rpc = client.rpc()?;
|
||||
let id_bytes = hex::decode(device_id)
|
||||
.map_err(|e| SdkError::Other(anyhow::anyhow!("invalid device_id hex: {e}")))?;
|
||||
let was_new = quicproquo_sdk::devices::register_device(rpc, &id_bytes, device_name).await?;
|
||||
if was_new {
|
||||
println!("device registered: {device_name}");
|
||||
} else {
|
||||
println!("device already registered: {device_name}");
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Revoke a device.
|
||||
pub async fn cmd_devices_revoke(
|
||||
client: &mut QpqClient,
|
||||
device_id: &str,
|
||||
) -> Result<(), SdkError> {
|
||||
let rpc = client.rpc()?;
|
||||
let id_bytes = hex::decode(device_id)
|
||||
.map_err(|e| SdkError::Other(anyhow::anyhow!("invalid device_id hex: {e}")))?;
|
||||
let revoked = quicproquo_sdk::devices::revoke_device(rpc, &id_bytes).await?;
|
||||
if revoked {
|
||||
println!("device revoked: {device_id}");
|
||||
} else {
|
||||
println!("device not found: {device_id}");
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Set up account recovery — generate codes and upload encrypted bundles.
|
||||
pub async fn cmd_recovery_setup(client: &mut QpqClient) -> Result<(), SdkError> {
|
||||
// Load identity seed from state file.
|
||||
let state_path = client.config_state_path();
|
||||
let stored = quicproquo_sdk::state::load_state(&state_path, None)
|
||||
.map_err(|e| SdkError::Crypto(format!("load identity for recovery: {e}")))?;
|
||||
|
||||
let rpc = client.rpc()?;
|
||||
let codes =
|
||||
quicproquo_sdk::recovery::setup_recovery(rpc, &stored.identity_seed, &[]).await?;
|
||||
|
||||
println!("=== RECOVERY CODES ===");
|
||||
println!("Save these codes securely. They will NOT be shown again.");
|
||||
println!("Each code can independently recover your account.");
|
||||
println!();
|
||||
for (i, code) in codes.iter().enumerate() {
|
||||
println!(" {}. {}", i + 1, code);
|
||||
}
|
||||
println!();
|
||||
println!("{} codes generated and uploaded.", codes.len());
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// ── Outbox commands ──────────────────────────────────────────────────────────
|
||||
|
||||
/// List pending outbox entries.
|
||||
pub fn cmd_outbox_list(client: &QpqClient) -> Result<(), SdkError> {
|
||||
let store = client.conversations()?;
|
||||
let entries = quicproquo_sdk::outbox::list_pending(store)?;
|
||||
if entries.is_empty() {
|
||||
println!("outbox is empty — no pending messages");
|
||||
} else {
|
||||
println!("{:<6} {:<34} {:<8} PAYLOAD SIZE", "ID", "CONVERSATION", "RETRIES");
|
||||
for e in &entries {
|
||||
println!(
|
||||
"{:<6} {:<34} {:<8} {} bytes",
|
||||
e.id,
|
||||
e.conversation_id.hex(),
|
||||
e.retry_count,
|
||||
e.payload.len(),
|
||||
);
|
||||
}
|
||||
println!("\n{} pending entries", entries.len());
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Retry sending all pending outbox entries.
|
||||
pub async fn cmd_outbox_retry(client: &mut QpqClient) -> Result<(), SdkError> {
|
||||
let rpc = client.rpc()?;
|
||||
let store = client.conversations()?;
|
||||
let (sent, failed) = quicproquo_sdk::outbox::flush_outbox(rpc, store).await?;
|
||||
println!("outbox flush: {sent} sent, {failed} permanently failed");
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Clear permanently failed outbox entries.
|
||||
pub fn cmd_outbox_clear(client: &QpqClient) -> Result<(), SdkError> {
|
||||
let store = client.conversations()?;
|
||||
let cleared = quicproquo_sdk::outbox::clear_failed(store)?;
|
||||
println!("cleared {cleared} failed outbox entries");
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Recover an account from a recovery code.
|
||||
pub async fn cmd_recovery_restore(
|
||||
client: &mut QpqClient,
|
||||
code: &str,
|
||||
) -> Result<(), SdkError> {
|
||||
let rpc = client.rpc()?;
|
||||
let (identity_seed, conversation_ids) =
|
||||
quicproquo_sdk::recovery::recover_account(rpc, code).await?;
|
||||
|
||||
// Restore identity.
|
||||
let keypair = quicproquo_core::IdentityKeypair::from_seed(identity_seed);
|
||||
client.set_identity_key(keypair.public_key_bytes().to_vec());
|
||||
|
||||
println!("account recovered successfully");
|
||||
println!("identity key: {}", hex::encode(keypair.public_key_bytes()));
|
||||
if !conversation_ids.is_empty() {
|
||||
println!(
|
||||
"{} conversations need rejoin (peers must re-invite this device)",
|
||||
conversation_ids.len()
|
||||
);
|
||||
}
|
||||
|
||||
// Save recovered state.
|
||||
let state = quicproquo_sdk::state::StoredState {
|
||||
identity_seed,
|
||||
group: None,
|
||||
hybrid_key: None,
|
||||
member_keys: Vec::new(),
|
||||
};
|
||||
let state_path = client.config_state_path();
|
||||
quicproquo_sdk::state::save_state(&state_path, &state, None)?;
|
||||
println!("state saved to {}", state_path.display());
|
||||
|
||||
Ok(())
|
||||
}
|
||||
563
crates/quicproquo-client/src/v2_main.rs
Normal file
563
crates/quicproquo-client/src/v2_main.rs
Normal file
@@ -0,0 +1,563 @@
|
||||
//! v2 CLI entry point — thin shell over `quicproquo_sdk::QpqClient`.
|
||||
//!
|
||||
//! Activated via `--features v2`. Replaces the v1 Cap'n Proto RPC main
|
||||
//! with a simplified command surface backed by the SDK.
|
||||
|
||||
use std::path::{Path, PathBuf};
|
||||
use std::process::Command as ProcessCommand;
|
||||
use std::time::Duration;
|
||||
|
||||
use anyhow::Context;
|
||||
use clap::{Parser, Subcommand};
|
||||
|
||||
use quicproquo_sdk::client::QpqClient;
|
||||
use quicproquo_sdk::config::ClientConfig;
|
||||
|
||||
use crate::v2_commands;
|
||||
|
||||
// ── CLI ───────────────────────────────────────────────────────────────────────
|
||||
|
||||
#[derive(Debug, Parser)]
|
||||
#[command(name = "qpq", about = "quicproquo CLI client (v2)", version)]
|
||||
struct Args {
|
||||
/// Server address (host:port).
|
||||
#[arg(long, global = true, default_value = "127.0.0.1:7000", env = "QPQ_SERVER")]
|
||||
server: String,
|
||||
|
||||
/// TLS server name (must match certificate SAN).
|
||||
#[arg(long, global = true, default_value = "localhost", env = "QPQ_SERVER_NAME")]
|
||||
server_name: String,
|
||||
|
||||
/// Path to local conversation database.
|
||||
#[arg(long, global = true, default_value = "conversations.db", env = "QPQ_CONV_DB")]
|
||||
db_path: PathBuf,
|
||||
|
||||
/// Password for encrypting the local database.
|
||||
#[arg(long, global = true, env = "QPQ_DB_PASSWORD")]
|
||||
db_password: Option<String>,
|
||||
|
||||
/// Path to the client state file (identity key, MLS state).
|
||||
#[arg(long, global = true, default_value = "qpq-state.bin", env = "QPQ_STATE")]
|
||||
state: PathBuf,
|
||||
|
||||
/// DANGER: Skip TLS certificate verification. Development only.
|
||||
#[arg(
|
||||
long = "danger-accept-invalid-certs",
|
||||
global = true,
|
||||
env = "QPQ_DANGER_ACCEPT_INVALID_CERTS"
|
||||
)]
|
||||
danger_accept_invalid_certs: bool,
|
||||
|
||||
/// Do not auto-start a local qpq-server.
|
||||
#[arg(long, global = true, env = "QPQ_NO_SERVER")]
|
||||
no_server: bool,
|
||||
|
||||
#[command(subcommand)]
|
||||
command: Cmd,
|
||||
}
|
||||
|
||||
#[derive(Debug, Subcommand)]
|
||||
enum Cmd {
|
||||
/// Register a new user via OPAQUE (password never leaves the client).
|
||||
RegisterUser {
|
||||
/// Username for the new account.
|
||||
#[arg(long)]
|
||||
username: String,
|
||||
/// Password (used in OPAQUE PAKE; server never sees it).
|
||||
#[arg(long)]
|
||||
password: String,
|
||||
},
|
||||
|
||||
/// Log in via OPAQUE and receive a session token.
|
||||
Login {
|
||||
#[arg(long)]
|
||||
username: String,
|
||||
#[arg(long)]
|
||||
password: String,
|
||||
},
|
||||
|
||||
/// Show local identity info.
|
||||
Whoami,
|
||||
|
||||
/// Server health check.
|
||||
Health,
|
||||
|
||||
/// Send a message to a conversation.
|
||||
Send {
|
||||
/// Conversation name (group name or DM peer username).
|
||||
#[arg(long)]
|
||||
to: String,
|
||||
/// Message text.
|
||||
#[arg(long)]
|
||||
msg: String,
|
||||
},
|
||||
|
||||
/// Receive pending messages from a conversation.
|
||||
Recv {
|
||||
/// Conversation name.
|
||||
#[arg(long)]
|
||||
from: String,
|
||||
},
|
||||
|
||||
/// Start or resume a DM with a user.
|
||||
Dm {
|
||||
/// Peer username.
|
||||
username: String,
|
||||
},
|
||||
|
||||
/// Group management commands.
|
||||
Group {
|
||||
#[command(subcommand)]
|
||||
action: GroupCmd,
|
||||
},
|
||||
|
||||
/// Resolve a username to its identity key.
|
||||
Resolve {
|
||||
/// Username to look up.
|
||||
username: String,
|
||||
},
|
||||
|
||||
/// Device management.
|
||||
Devices {
|
||||
#[command(subcommand)]
|
||||
action: DevicesCmd,
|
||||
},
|
||||
|
||||
/// Account recovery management.
|
||||
Recovery {
|
||||
#[command(subcommand)]
|
||||
action: RecoveryCmd,
|
||||
},
|
||||
|
||||
/// Offline outbox management.
|
||||
Outbox {
|
||||
#[command(subcommand)]
|
||||
action: OutboxCmd,
|
||||
},
|
||||
}
|
||||
|
||||
#[derive(Debug, Subcommand)]
|
||||
enum GroupCmd {
|
||||
/// Create a new group.
|
||||
Create {
|
||||
/// Group name.
|
||||
name: String,
|
||||
},
|
||||
/// Invite a user to a group.
|
||||
Invite {
|
||||
/// Group name.
|
||||
#[arg(long)]
|
||||
group: String,
|
||||
/// Username to invite.
|
||||
#[arg(long)]
|
||||
user: String,
|
||||
},
|
||||
}
|
||||
|
||||
#[derive(Debug, Subcommand)]
|
||||
enum DevicesCmd {
|
||||
/// List registered devices.
|
||||
List,
|
||||
/// Register a new device.
|
||||
Register {
|
||||
/// Device ID (hex).
|
||||
#[arg(long)]
|
||||
id: String,
|
||||
/// Human-readable device name.
|
||||
#[arg(long)]
|
||||
name: String,
|
||||
},
|
||||
/// Revoke a device.
|
||||
Revoke {
|
||||
/// Device ID (hex).
|
||||
#[arg(long)]
|
||||
id: String,
|
||||
},
|
||||
}
|
||||
|
||||
#[derive(Debug, Subcommand)]
|
||||
enum RecoveryCmd {
|
||||
/// Generate recovery codes and upload encrypted bundles.
|
||||
Setup,
|
||||
/// Recover account from a recovery code.
|
||||
Restore {
|
||||
/// Recovery code (e.g. "A3B7K9").
|
||||
code: String,
|
||||
},
|
||||
}
|
||||
|
||||
#[derive(Debug, Subcommand)]
|
||||
enum OutboxCmd {
|
||||
/// Show pending outbox entries.
|
||||
List,
|
||||
/// Retry sending all pending outbox entries.
|
||||
Retry,
|
||||
/// Clear permanently failed outbox entries.
|
||||
Clear,
|
||||
}
|
||||
|
||||
// ── Auto-server launch ───────────────────────────────────────────────────────
|
||||
|
||||
/// RAII guard that kills an auto-started server process on drop.
|
||||
struct ServerGuard(Option<std::process::Child>);
|
||||
|
||||
impl Drop for ServerGuard {
|
||||
fn drop(&mut self) {
|
||||
if let Some(ref mut child) = self.0 {
|
||||
let _ = child.kill();
|
||||
let _ = child.wait();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Find the `qpq-server` binary: same directory as current exe, then PATH.
|
||||
fn find_server_binary() -> Option<PathBuf> {
|
||||
if let Ok(exe) = std::env::current_exe() {
|
||||
let sibling = exe.with_file_name("qpq-server");
|
||||
if sibling.exists() {
|
||||
return Some(sibling);
|
||||
}
|
||||
}
|
||||
std::env::var_os("PATH").and_then(|paths| {
|
||||
std::env::split_paths(&paths)
|
||||
.map(|dir| dir.join("qpq-server"))
|
||||
.find(|p| p.exists())
|
||||
})
|
||||
}
|
||||
|
||||
/// Try a QUIC health probe to the server address.
|
||||
async fn probe_server(server_addr: &str) -> bool {
|
||||
use std::net::ToSocketAddrs;
|
||||
let addr = match server_addr.to_socket_addrs() {
|
||||
Ok(mut addrs) => match addrs.next() {
|
||||
Some(a) => a,
|
||||
None => return false,
|
||||
},
|
||||
Err(_) => return false,
|
||||
};
|
||||
// Simple TCP probe — if the port is open, the server is likely running.
|
||||
tokio::net::TcpStream::connect(addr)
|
||||
.await
|
||||
.is_ok()
|
||||
}
|
||||
|
||||
/// Start a local qpq-server if one isn't already running.
|
||||
/// Returns a guard that kills the child on drop (if we started one).
|
||||
async fn ensure_server_running(
|
||||
server_addr: &str,
|
||||
data_dir: &Path,
|
||||
no_server: bool,
|
||||
) -> anyhow::Result<ServerGuard> {
|
||||
if no_server {
|
||||
return Ok(ServerGuard(None));
|
||||
}
|
||||
|
||||
if probe_server(server_addr).await {
|
||||
return Ok(ServerGuard(None));
|
||||
}
|
||||
|
||||
let binary = find_server_binary().ok_or_else(|| {
|
||||
anyhow::anyhow!(
|
||||
"server at {server_addr} is not reachable and qpq-server binary not found; \
|
||||
start a server manually or install qpq-server"
|
||||
)
|
||||
})?;
|
||||
|
||||
let cert_path = data_dir.join("server-cert.der");
|
||||
let key_path = data_dir.join("server-key.der");
|
||||
|
||||
eprintln!("starting server on {server_addr}...");
|
||||
|
||||
let child = ProcessCommand::new(&binary)
|
||||
.args([
|
||||
"--allow-insecure-auth",
|
||||
"--listen",
|
||||
server_addr,
|
||||
"--tls-cert",
|
||||
&cert_path.to_string_lossy(),
|
||||
"--tls-key",
|
||||
&key_path.to_string_lossy(),
|
||||
])
|
||||
.stdout(std::process::Stdio::null())
|
||||
.stderr(std::process::Stdio::null())
|
||||
.spawn()
|
||||
.with_context(|| format!("failed to spawn {}", binary.display()))?;
|
||||
|
||||
let guard = ServerGuard(Some(child));
|
||||
|
||||
// Poll until the server is ready.
|
||||
let mut delay = Duration::from_millis(100);
|
||||
let max_wait = Duration::from_secs(3);
|
||||
let start = std::time::Instant::now();
|
||||
|
||||
loop {
|
||||
tokio::time::sleep(delay).await;
|
||||
|
||||
if probe_server(server_addr).await {
|
||||
eprintln!("server ready");
|
||||
return Ok(guard);
|
||||
}
|
||||
|
||||
if start.elapsed() > max_wait {
|
||||
anyhow::bail!(
|
||||
"auto-started qpq-server but it did not become ready within {max_wait:?}"
|
||||
);
|
||||
}
|
||||
|
||||
delay = (delay * 2).min(Duration::from_secs(1));
|
||||
}
|
||||
}
|
||||
|
||||
// ── Helpers ──────────────────────────────────────────────────────────────────
|
||||
|
||||
/// Build a `ClientConfig` from CLI args.
|
||||
fn build_config(args: &Args) -> anyhow::Result<ClientConfig> {
|
||||
let server_addr = args
|
||||
.server
|
||||
.parse()
|
||||
.with_context(|| format!("invalid server address: {}", args.server))?;
|
||||
|
||||
Ok(ClientConfig {
|
||||
server_addr,
|
||||
server_name: args.server_name.clone(),
|
||||
db_path: args.db_path.clone(),
|
||||
db_password: args.db_password.clone(),
|
||||
state_path: args.state.clone(),
|
||||
accept_invalid_certs: args.danger_accept_invalid_certs,
|
||||
..ClientConfig::default()
|
||||
})
|
||||
}
|
||||
|
||||
/// Build, connect, and return a `QpqClient`. Loads identity from state file
|
||||
/// if it exists.
|
||||
async fn connect_client(args: &Args) -> anyhow::Result<QpqClient> {
|
||||
let config = build_config(args)?;
|
||||
let mut client = QpqClient::new(config);
|
||||
|
||||
// Try loading identity from state file.
|
||||
if args.state.exists() {
|
||||
match quicproquo_sdk::state::load_state(&args.state, args.db_password.as_deref()) {
|
||||
Ok(stored) => {
|
||||
let keypair = quicproquo_core::IdentityKeypair::from_seed(stored.identity_seed);
|
||||
client.set_identity_key(keypair.public_key_bytes().to_vec());
|
||||
}
|
||||
Err(e) => {
|
||||
tracing::debug!("could not load state from {}: {e}", args.state.display());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
client.connect().await.context("failed to connect to server")?;
|
||||
Ok(client)
|
||||
}
|
||||
|
||||
// ── Entry point ──────────────────────────────────────────────────────────────
|
||||
|
||||
pub fn main() {
|
||||
// Install the rustls crypto provider before any TLS operations.
|
||||
let _ = rustls::crypto::ring::default_provider().install_default();
|
||||
|
||||
tracing_subscriber::fmt()
|
||||
.with_env_filter(
|
||||
tracing_subscriber::EnvFilter::try_from_default_env()
|
||||
.unwrap_or_else(|_| tracing_subscriber::EnvFilter::new("warn")),
|
||||
)
|
||||
.init();
|
||||
|
||||
let args = Args::parse();
|
||||
|
||||
let rt = tokio::runtime::Builder::new_multi_thread()
|
||||
.enable_all()
|
||||
.build()
|
||||
.unwrap_or_else(|e| {
|
||||
eprintln!("fatal: {e}");
|
||||
std::process::exit(1);
|
||||
});
|
||||
|
||||
if let Err(e) = rt.block_on(run(args)) {
|
||||
eprintln!("error: {e:#}");
|
||||
std::process::exit(1);
|
||||
}
|
||||
}
|
||||
|
||||
async fn run(args: Args) -> anyhow::Result<()> {
|
||||
// Auto-start server if needed (except for whoami which is local-only).
|
||||
let data_dir = args.state.parent().unwrap_or_else(|| Path::new("."));
|
||||
let _server_guard = match args.command {
|
||||
Cmd::Whoami => ServerGuard(None),
|
||||
_ => ensure_server_running(&args.server, data_dir, args.no_server).await?,
|
||||
};
|
||||
|
||||
match args.command {
|
||||
Cmd::RegisterUser {
|
||||
ref username,
|
||||
ref password,
|
||||
} => {
|
||||
let mut client = connect_client(&args).await?;
|
||||
v2_commands::cmd_register_user(&mut client, username, password)
|
||||
.await
|
||||
.context("register-user failed")?;
|
||||
}
|
||||
|
||||
Cmd::Login {
|
||||
ref username,
|
||||
ref password,
|
||||
} => {
|
||||
let mut client = connect_client(&args).await?;
|
||||
v2_commands::cmd_login(&mut client, username, password)
|
||||
.await
|
||||
.context("login failed")?;
|
||||
}
|
||||
|
||||
Cmd::Whoami => {
|
||||
// Whoami is local-only — create client without connecting.
|
||||
let config = build_config(&args)?;
|
||||
let mut client = QpqClient::new(config);
|
||||
if args.state.exists() {
|
||||
match quicproquo_sdk::state::load_state(
|
||||
&args.state,
|
||||
args.db_password.as_deref(),
|
||||
) {
|
||||
Ok(stored) => {
|
||||
let keypair =
|
||||
quicproquo_core::IdentityKeypair::from_seed(stored.identity_seed);
|
||||
client.set_identity_key(keypair.public_key_bytes().to_vec());
|
||||
}
|
||||
Err(e) => {
|
||||
eprintln!("warning: could not load state: {e}");
|
||||
}
|
||||
}
|
||||
}
|
||||
v2_commands::cmd_whoami(&client);
|
||||
}
|
||||
|
||||
Cmd::Health => {
|
||||
let mut client = connect_client(&args).await?;
|
||||
v2_commands::cmd_health(&mut client)
|
||||
.await
|
||||
.context("health check failed")?;
|
||||
}
|
||||
|
||||
Cmd::Resolve { ref username } => {
|
||||
let mut client = connect_client(&args).await?;
|
||||
v2_commands::cmd_resolve(&mut client, username)
|
||||
.await
|
||||
.context("resolve failed")?;
|
||||
}
|
||||
|
||||
Cmd::Dm { ref username } => {
|
||||
let mut client = connect_client(&args).await?;
|
||||
v2_commands::cmd_resolve(&mut client, username)
|
||||
.await
|
||||
.context("dm setup failed")?;
|
||||
// For now, print the resolved key. Full DM creation requires
|
||||
// MLS group state, which will be handled in the REPL flow.
|
||||
println!("(DM creation with full MLS setup is available in the REPL)");
|
||||
}
|
||||
|
||||
Cmd::Send { ref to, ref msg } => {
|
||||
let _ = (to, msg);
|
||||
let _client = connect_client(&args).await?;
|
||||
// Full send requires MLS group state restoration — deferred to REPL.
|
||||
println!("(send is currently available in the REPL; one-shot send coming soon)");
|
||||
}
|
||||
|
||||
Cmd::Recv { ref from } => {
|
||||
let _ = from;
|
||||
let _client = connect_client(&args).await?;
|
||||
println!("(recv is currently available in the REPL; one-shot recv coming soon)");
|
||||
}
|
||||
|
||||
Cmd::Group {
|
||||
action: GroupCmd::Create { ref name },
|
||||
} => {
|
||||
let _ = name;
|
||||
let _client = connect_client(&args).await?;
|
||||
println!("(group create is currently available in the REPL; one-shot coming soon)");
|
||||
}
|
||||
|
||||
Cmd::Group {
|
||||
action:
|
||||
GroupCmd::Invite {
|
||||
ref group,
|
||||
ref user,
|
||||
},
|
||||
} => {
|
||||
let _ = (group, user);
|
||||
let _client = connect_client(&args).await?;
|
||||
println!("(group invite is currently available in the REPL; one-shot coming soon)");
|
||||
}
|
||||
|
||||
Cmd::Devices {
|
||||
action: DevicesCmd::List,
|
||||
} => {
|
||||
let mut client = connect_client(&args).await?;
|
||||
v2_commands::cmd_devices_list(&mut client)
|
||||
.await
|
||||
.context("devices list failed")?;
|
||||
}
|
||||
|
||||
Cmd::Devices {
|
||||
action: DevicesCmd::Register { ref id, ref name },
|
||||
} => {
|
||||
let mut client = connect_client(&args).await?;
|
||||
v2_commands::cmd_devices_register(&mut client, id, name)
|
||||
.await
|
||||
.context("device register failed")?;
|
||||
}
|
||||
|
||||
Cmd::Devices {
|
||||
action: DevicesCmd::Revoke { ref id },
|
||||
} => {
|
||||
let mut client = connect_client(&args).await?;
|
||||
v2_commands::cmd_devices_revoke(&mut client, id)
|
||||
.await
|
||||
.context("device revoke failed")?;
|
||||
}
|
||||
|
||||
Cmd::Recovery {
|
||||
action: RecoveryCmd::Setup,
|
||||
} => {
|
||||
let mut client = connect_client(&args).await?;
|
||||
v2_commands::cmd_recovery_setup(&mut client)
|
||||
.await
|
||||
.context("recovery setup failed")?;
|
||||
}
|
||||
|
||||
Cmd::Recovery {
|
||||
action: RecoveryCmd::Restore { ref code },
|
||||
} => {
|
||||
let mut client = connect_client(&args).await?;
|
||||
v2_commands::cmd_recovery_restore(&mut client, code)
|
||||
.await
|
||||
.context("recovery restore failed")?;
|
||||
}
|
||||
|
||||
Cmd::Outbox {
|
||||
action: OutboxCmd::List,
|
||||
} => {
|
||||
let mut client = connect_client(&args).await?;
|
||||
v2_commands::cmd_outbox_list(&client)
|
||||
.context("outbox list failed")?;
|
||||
}
|
||||
|
||||
Cmd::Outbox {
|
||||
action: OutboxCmd::Retry,
|
||||
} => {
|
||||
let mut client = connect_client(&args).await?;
|
||||
v2_commands::cmd_outbox_retry(&mut client)
|
||||
.await
|
||||
.context("outbox retry failed")?;
|
||||
}
|
||||
|
||||
Cmd::Outbox {
|
||||
action: OutboxCmd::Clear,
|
||||
} => {
|
||||
let mut client = connect_client(&args).await?;
|
||||
v2_commands::cmd_outbox_clear(&client)
|
||||
.context("outbox clear failed")?;
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
1956
crates/quicproquo-client/tests/e2e.rs
Normal file
1956
crates/quicproquo-client/tests/e2e.rs
Normal file
File diff suppressed because it is too large
Load Diff
87
crates/quicproquo-core/Cargo.toml
Normal file
87
crates/quicproquo-core/Cargo.toml
Normal file
@@ -0,0 +1,87 @@
|
||||
[package]
|
||||
name = "quicproquo-core"
|
||||
version = "0.1.0"
|
||||
edition = "2021"
|
||||
description = "Crypto primitives, MLS state machine, and hybrid post-quantum KEM for quicproquo."
|
||||
license = "MIT"
|
||||
|
||||
[features]
|
||||
default = ["native"]
|
||||
# The "native" feature enables MLS (openmls), OPAQUE, Cap'n Proto, tokio, and
|
||||
# filesystem-backed key storage. Disable it (--no-default-features) to compile
|
||||
# the pure-crypto subset to wasm32-unknown-unknown.
|
||||
native = [
|
||||
"dep:openmls",
|
||||
"dep:openmls_rust_crypto",
|
||||
"dep:openmls_traits",
|
||||
"dep:tls_codec",
|
||||
"dep:opaque-ke",
|
||||
"dep:bincode",
|
||||
"dep:capnp",
|
||||
"dep:quicproquo-proto",
|
||||
"dep:tokio",
|
||||
]
|
||||
|
||||
[dependencies]
|
||||
# Crypto — classical (always available, WASM-safe)
|
||||
x25519-dalek = { workspace = true }
|
||||
ed25519-dalek = { workspace = true }
|
||||
sha2 = { workspace = true }
|
||||
hmac = { workspace = true }
|
||||
hkdf = { workspace = true }
|
||||
ciborium = { workspace = true }
|
||||
chacha20poly1305 = { workspace = true }
|
||||
zeroize = { workspace = true }
|
||||
rand = { workspace = true }
|
||||
serde = { workspace = true }
|
||||
serde_json = { workspace = true }
|
||||
argon2 = { workspace = true }
|
||||
thiserror = { workspace = true }
|
||||
|
||||
# Crypto — post-quantum hybrid KEM (M7) — always available, WASM-safe
|
||||
ml-kem = { workspace = true }
|
||||
|
||||
# Crypto — OPAQUE password-authenticated key exchange (native only)
|
||||
opaque-ke = { workspace = true, optional = true }
|
||||
|
||||
# Crypto — MLS (M2) (native only)
|
||||
openmls = { workspace = true, optional = true }
|
||||
openmls_rust_crypto = { workspace = true, optional = true }
|
||||
openmls_traits = { workspace = true, optional = true }
|
||||
tls_codec = { workspace = true, optional = true }
|
||||
bincode = { workspace = true, optional = true }
|
||||
|
||||
# Serialisation (native only)
|
||||
capnp = { workspace = true, optional = true }
|
||||
quicproquo-proto = { path = "../quicproquo-proto", optional = true }
|
||||
|
||||
# Async runtime (native only)
|
||||
tokio = { workspace = true, optional = true }
|
||||
|
||||
# WASM: provide getrandom with js backend
|
||||
[target.'cfg(target_arch = "wasm32")'.dependencies]
|
||||
getrandom = { version = "0.2", features = ["js"] }
|
||||
|
||||
[lints]
|
||||
workspace = true
|
||||
|
||||
[dev-dependencies]
|
||||
tokio = { workspace = true }
|
||||
criterion = { version = "0.5", features = ["html_reports"] }
|
||||
prost = "0.13"
|
||||
|
||||
[[bench]]
|
||||
name = "serialization"
|
||||
harness = false
|
||||
|
||||
[[bench]]
|
||||
name = "mls_operations"
|
||||
harness = false
|
||||
|
||||
[[bench]]
|
||||
name = "hybrid_kem_bench"
|
||||
harness = false
|
||||
|
||||
[[bench]]
|
||||
name = "crypto_benchmarks"
|
||||
harness = false
|
||||
150
crates/quicproquo-core/benches/crypto_benchmarks.rs
Normal file
150
crates/quicproquo-core/benches/crypto_benchmarks.rs
Normal file
@@ -0,0 +1,150 @@
|
||||
#![allow(clippy::unwrap_used)]
|
||||
//! Benchmark: Identity keypair operations, sealed sender, and message padding.
|
||||
//!
|
||||
//! Covers:
|
||||
//! - [`IdentityKeypair`] generation, signing, and signature verification
|
||||
//! - Sealed sender `seal` / `unseal` (Ed25519 sign + verify overhead)
|
||||
//! - Message padding `pad` / `unpad` at various payload sizes
|
||||
|
||||
use criterion::{black_box, criterion_group, criterion_main, BenchmarkId, Criterion};
|
||||
|
||||
use quicproquo_core::{compute_safety_number, IdentityKeypair, padding};
|
||||
|
||||
// ── Identity keypair benchmarks ──────────────────────────────────────────────
|
||||
|
||||
fn bench_identity_keygen(c: &mut Criterion) {
|
||||
c.bench_function("identity_keygen", |b| {
|
||||
b.iter(|| black_box(IdentityKeypair::generate()));
|
||||
});
|
||||
}
|
||||
|
||||
fn bench_identity_sign(c: &mut Criterion) {
|
||||
let identity = IdentityKeypair::generate();
|
||||
let payload = b"benchmark signing payload -- 32+ bytes of realistic data here";
|
||||
|
||||
c.bench_function("identity_sign", |b| {
|
||||
b.iter(|| black_box(identity.sign_raw(black_box(payload))));
|
||||
});
|
||||
}
|
||||
|
||||
fn bench_identity_verify(c: &mut Criterion) {
|
||||
let identity = IdentityKeypair::generate();
|
||||
let payload = b"benchmark signing payload -- 32+ bytes of realistic data here";
|
||||
let sig = identity.sign_raw(payload);
|
||||
let pk = identity.public_key_bytes();
|
||||
|
||||
c.bench_function("identity_verify", |b| {
|
||||
b.iter(|| {
|
||||
IdentityKeypair::verify_raw(
|
||||
black_box(&pk),
|
||||
black_box(payload),
|
||||
black_box(&sig),
|
||||
)
|
||||
.unwrap();
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
// ── Sealed sender benchmarks ─────────────────────────────────────────────────
|
||||
|
||||
fn bench_sealed_sender(c: &mut Criterion) {
|
||||
use quicproquo_core::sealed_sender::{seal, unseal};
|
||||
|
||||
let sizes: &[(&str, usize)] = &[
|
||||
("32B", 32),
|
||||
("256B", 256),
|
||||
("1KB", 1024),
|
||||
("4KB", 4096),
|
||||
];
|
||||
|
||||
let identity = IdentityKeypair::generate();
|
||||
|
||||
let mut group = c.benchmark_group("sealed_sender_seal");
|
||||
for (label, size) in sizes {
|
||||
let payload = vec![0xABu8; *size];
|
||||
group.bench_with_input(
|
||||
BenchmarkId::from_parameter(label),
|
||||
&payload,
|
||||
|b, payload| {
|
||||
b.iter(|| black_box(seal(black_box(&identity), black_box(payload))));
|
||||
},
|
||||
);
|
||||
}
|
||||
group.finish();
|
||||
|
||||
let mut group = c.benchmark_group("sealed_sender_unseal");
|
||||
for (label, size) in sizes {
|
||||
let payload = vec![0xABu8; *size];
|
||||
let sealed = seal(&identity, &payload);
|
||||
group.bench_with_input(
|
||||
BenchmarkId::from_parameter(label),
|
||||
&sealed,
|
||||
|b, sealed| {
|
||||
b.iter(|| black_box(unseal(black_box(sealed)).unwrap()));
|
||||
},
|
||||
);
|
||||
}
|
||||
group.finish();
|
||||
}
|
||||
|
||||
// ── Message padding benchmarks ────────────────────────────────────────────────
|
||||
|
||||
fn bench_padding(c: &mut Criterion) {
|
||||
// Representative sizes: one per bucket + oversized
|
||||
let sizes: &[(&str, usize)] = &[
|
||||
("50B", 50), // → 256 bucket
|
||||
("512B", 512), // → 1024 bucket
|
||||
("2KB", 2048), // → 4096 bucket
|
||||
("8KB", 8192), // → 16384 bucket
|
||||
("20KB", 20480), // → 32768 (oversized)
|
||||
];
|
||||
|
||||
let mut group = c.benchmark_group("padding_pad");
|
||||
for (label, size) in sizes {
|
||||
let payload = vec![0xABu8; *size];
|
||||
group.bench_with_input(
|
||||
BenchmarkId::from_parameter(label),
|
||||
&payload,
|
||||
|b, payload| {
|
||||
b.iter(|| black_box(padding::pad(black_box(payload))));
|
||||
},
|
||||
);
|
||||
}
|
||||
group.finish();
|
||||
|
||||
let mut group = c.benchmark_group("padding_unpad");
|
||||
for (label, size) in sizes {
|
||||
let payload = vec![0xABu8; *size];
|
||||
let padded = padding::pad(&payload);
|
||||
group.bench_with_input(
|
||||
BenchmarkId::from_parameter(label),
|
||||
&padded,
|
||||
|b, padded| {
|
||||
b.iter(|| black_box(padding::unpad(black_box(padded)).unwrap()));
|
||||
},
|
||||
);
|
||||
}
|
||||
group.finish();
|
||||
}
|
||||
|
||||
// ── Safety number benchmarks ─────────────────────────────────────────────────
|
||||
|
||||
fn bench_safety_number(c: &mut Criterion) {
|
||||
let key_a = [0x1au8; 32];
|
||||
let key_b = [0x2bu8; 32];
|
||||
|
||||
c.bench_function("safety_number", |b| {
|
||||
b.iter(|| black_box(compute_safety_number(black_box(&key_a), black_box(&key_b))));
|
||||
});
|
||||
}
|
||||
|
||||
criterion_group!(
|
||||
benches,
|
||||
bench_identity_keygen,
|
||||
bench_identity_sign,
|
||||
bench_identity_verify,
|
||||
bench_sealed_sender,
|
||||
bench_padding,
|
||||
bench_safety_number,
|
||||
);
|
||||
criterion_main!(benches);
|
||||
153
crates/quicproquo-core/benches/hybrid_kem_bench.rs
Normal file
153
crates/quicproquo-core/benches/hybrid_kem_bench.rs
Normal file
@@ -0,0 +1,153 @@
|
||||
#![allow(clippy::unwrap_used)]
|
||||
//! Benchmark: Hybrid KEM (X25519 + ML-KEM-768) vs classical-only encryption.
|
||||
//!
|
||||
//! Compares keypair generation, encryption, and decryption times for the
|
||||
//! hybrid post-quantum scheme against classical X25519 + ChaCha20-Poly1305.
|
||||
|
||||
use criterion::{black_box, criterion_group, criterion_main, BenchmarkId, Criterion};
|
||||
|
||||
use quicproquo_core::{hybrid_encrypt, hybrid_decrypt, HybridKeypair};
|
||||
|
||||
// ── Classical baseline (X25519 + ChaCha20-Poly1305) ─────────────────────────
|
||||
|
||||
use chacha20poly1305::{
|
||||
aead::{Aead, KeyInit},
|
||||
ChaCha20Poly1305, Key, Nonce,
|
||||
};
|
||||
use hkdf::Hkdf;
|
||||
use rand::{rngs::OsRng, RngCore};
|
||||
use sha2::Sha256;
|
||||
use x25519_dalek::{EphemeralSecret, PublicKey as X25519Public, StaticSecret};
|
||||
|
||||
struct ClassicalKeypair {
|
||||
secret: StaticSecret,
|
||||
public: X25519Public,
|
||||
}
|
||||
|
||||
impl ClassicalKeypair {
|
||||
fn generate() -> Self {
|
||||
let secret = StaticSecret::random_from_rng(OsRng);
|
||||
let public = X25519Public::from(&secret);
|
||||
Self { secret, public }
|
||||
}
|
||||
}
|
||||
|
||||
fn classical_encrypt(recipient_pk: &X25519Public, plaintext: &[u8]) -> Vec<u8> {
|
||||
let eph_secret = EphemeralSecret::random_from_rng(OsRng);
|
||||
let eph_public = X25519Public::from(&eph_secret);
|
||||
let shared = eph_secret.diffie_hellman(recipient_pk);
|
||||
|
||||
let hk = Hkdf::<Sha256>::new(None, shared.as_bytes());
|
||||
let mut key_bytes = [0u8; 32];
|
||||
hk.expand(b"classical-bench", &mut key_bytes).unwrap();
|
||||
|
||||
let mut nonce_bytes = [0u8; 12];
|
||||
OsRng.fill_bytes(&mut nonce_bytes);
|
||||
|
||||
let cipher = ChaCha20Poly1305::new(Key::from_slice(&key_bytes));
|
||||
let ct = cipher
|
||||
.encrypt(Nonce::from_slice(&nonce_bytes), plaintext)
|
||||
.unwrap();
|
||||
|
||||
// Wire: eph_pk(32) || nonce(12) || ciphertext
|
||||
let mut out = Vec::with_capacity(32 + 12 + ct.len());
|
||||
out.extend_from_slice(eph_public.as_bytes());
|
||||
out.extend_from_slice(&nonce_bytes);
|
||||
out.extend_from_slice(&ct);
|
||||
out
|
||||
}
|
||||
|
||||
fn classical_decrypt(keypair: &ClassicalKeypair, envelope: &[u8]) -> Vec<u8> {
|
||||
let eph_pk = X25519Public::from(<[u8; 32]>::try_from(&envelope[..32]).unwrap());
|
||||
let nonce_bytes: [u8; 12] = envelope[32..44].try_into().unwrap();
|
||||
let ct = &envelope[44..];
|
||||
|
||||
let shared = keypair.secret.diffie_hellman(&eph_pk);
|
||||
|
||||
let hk = Hkdf::<Sha256>::new(None, shared.as_bytes());
|
||||
let mut key_bytes = [0u8; 32];
|
||||
hk.expand(b"classical-bench", &mut key_bytes).unwrap();
|
||||
|
||||
let cipher = ChaCha20Poly1305::new(Key::from_slice(&key_bytes));
|
||||
cipher
|
||||
.decrypt(Nonce::from_slice(&nonce_bytes), ct)
|
||||
.unwrap()
|
||||
}
|
||||
|
||||
// ── Benchmarks ──────────────────────────────────────────────────────────────
|
||||
|
||||
fn bench_keygen(c: &mut Criterion) {
|
||||
let mut group = c.benchmark_group("kem_keygen");
|
||||
group.bench_function("hybrid", |b| {
|
||||
b.iter(|| black_box(HybridKeypair::generate()));
|
||||
});
|
||||
group.bench_function("classical", |b| {
|
||||
b.iter(|| black_box(ClassicalKeypair::generate()));
|
||||
});
|
||||
group.finish();
|
||||
}
|
||||
|
||||
fn bench_encrypt(c: &mut Criterion) {
|
||||
let sizes: &[(&str, usize)] = &[("100B", 100), ("1KB", 1024), ("4KB", 4096), ("64KB", 65536)];
|
||||
let mut group = c.benchmark_group("kem_encrypt");
|
||||
|
||||
let hybrid_kp = HybridKeypair::generate();
|
||||
let hybrid_pk = hybrid_kp.public_key();
|
||||
let classical_kp = ClassicalKeypair::generate();
|
||||
|
||||
for (label, size) in sizes {
|
||||
let payload = vec![0xABu8; *size];
|
||||
|
||||
group.bench_with_input(
|
||||
BenchmarkId::new("hybrid", label),
|
||||
&payload,
|
||||
|b, payload| {
|
||||
b.iter(|| hybrid_encrypt(&hybrid_pk, black_box(payload), b"", b"").unwrap());
|
||||
},
|
||||
);
|
||||
|
||||
group.bench_with_input(
|
||||
BenchmarkId::new("classical", label),
|
||||
&payload,
|
||||
|b, payload| {
|
||||
b.iter(|| classical_encrypt(&classical_kp.public, black_box(payload)));
|
||||
},
|
||||
);
|
||||
}
|
||||
group.finish();
|
||||
}
|
||||
|
||||
fn bench_decrypt(c: &mut Criterion) {
|
||||
let sizes: &[(&str, usize)] = &[("100B", 100), ("1KB", 1024), ("4KB", 4096), ("64KB", 65536)];
|
||||
let mut group = c.benchmark_group("kem_decrypt");
|
||||
|
||||
let hybrid_kp = HybridKeypair::generate();
|
||||
let hybrid_pk = hybrid_kp.public_key();
|
||||
let classical_kp = ClassicalKeypair::generate();
|
||||
|
||||
for (label, size) in sizes {
|
||||
let payload = vec![0xABu8; *size];
|
||||
let hybrid_ct = hybrid_encrypt(&hybrid_pk, &payload, b"", b"").unwrap();
|
||||
let classical_ct = classical_encrypt(&classical_kp.public, &payload);
|
||||
|
||||
group.bench_with_input(
|
||||
BenchmarkId::new("hybrid", label),
|
||||
&hybrid_ct,
|
||||
|b, ct| {
|
||||
b.iter(|| hybrid_decrypt(&hybrid_kp, black_box(ct), b"", b"").unwrap());
|
||||
},
|
||||
);
|
||||
|
||||
group.bench_with_input(
|
||||
BenchmarkId::new("classical", label),
|
||||
&classical_ct,
|
||||
|b, ct| {
|
||||
b.iter(|| classical_decrypt(&classical_kp, black_box(ct)));
|
||||
},
|
||||
);
|
||||
}
|
||||
group.finish();
|
||||
}
|
||||
|
||||
criterion_group!(benches, bench_keygen, bench_encrypt, bench_decrypt);
|
||||
criterion_main!(benches);
|
||||
157
crates/quicproquo-core/benches/mls_operations.rs
Normal file
157
crates/quicproquo-core/benches/mls_operations.rs
Normal file
@@ -0,0 +1,157 @@
|
||||
#![allow(clippy::unwrap_used)]
|
||||
//! Benchmark: MLS group operations at various group sizes.
|
||||
//!
|
||||
//! Measures KeyPackage generation, group creation, member addition,
|
||||
//! message encryption, and message decryption.
|
||||
|
||||
use std::sync::Arc;
|
||||
|
||||
use criterion::{criterion_group, criterion_main, BatchSize, BenchmarkId, Criterion};
|
||||
use quicproquo_core::{GroupMember, IdentityKeypair};
|
||||
|
||||
/// Create identities and a group of the given size.
|
||||
/// Returns (creator, Vec<members>).
|
||||
fn setup_group(size: usize) -> (GroupMember, Vec<GroupMember>) {
|
||||
let creator_id = Arc::new(IdentityKeypair::generate());
|
||||
let mut creator = GroupMember::new(creator_id);
|
||||
creator.create_group(b"bench-group").unwrap();
|
||||
|
||||
let mut members = Vec::with_capacity(size.saturating_sub(1));
|
||||
for _ in 1..size {
|
||||
let joiner_id = Arc::new(IdentityKeypair::generate());
|
||||
let mut joiner = GroupMember::new(joiner_id);
|
||||
let kp = joiner.generate_key_package().unwrap();
|
||||
|
||||
let (_commit, welcome) = creator.add_member(&kp).unwrap();
|
||||
joiner.join_group(&welcome).unwrap();
|
||||
members.push(joiner);
|
||||
}
|
||||
|
||||
(creator, members)
|
||||
}
|
||||
|
||||
fn bench_keygen(c: &mut Criterion) {
|
||||
c.bench_function("mls_keygen", |b| {
|
||||
b.iter_batched(
|
||||
|| {
|
||||
let id = Arc::new(IdentityKeypair::generate());
|
||||
GroupMember::new(id)
|
||||
},
|
||||
|mut member| {
|
||||
member.generate_key_package().unwrap();
|
||||
},
|
||||
BatchSize::SmallInput,
|
||||
);
|
||||
});
|
||||
}
|
||||
|
||||
fn bench_group_create(c: &mut Criterion) {
|
||||
c.bench_function("mls_group_create", |b| {
|
||||
b.iter_batched(
|
||||
|| {
|
||||
let id = Arc::new(IdentityKeypair::generate());
|
||||
GroupMember::new(id)
|
||||
},
|
||||
|mut member| {
|
||||
member.create_group(b"bench-group").unwrap();
|
||||
},
|
||||
BatchSize::SmallInput,
|
||||
);
|
||||
});
|
||||
}
|
||||
|
||||
fn bench_add_member(c: &mut Criterion) {
|
||||
let mut group = c.benchmark_group("mls_add_member");
|
||||
group.sample_size(10);
|
||||
for size in [2, 10, 50, 100] {
|
||||
group.bench_with_input(BenchmarkId::from_parameter(size), &size, |b, &size| {
|
||||
b.iter_batched(
|
||||
|| {
|
||||
let (creator, members) = setup_group(size);
|
||||
let joiner_id = Arc::new(IdentityKeypair::generate());
|
||||
let mut joiner = GroupMember::new(joiner_id);
|
||||
let kp = joiner.generate_key_package().unwrap();
|
||||
(creator, members, joiner, kp)
|
||||
},
|
||||
|(mut creator, _members, _joiner, kp)| {
|
||||
creator.add_member(&kp).unwrap();
|
||||
},
|
||||
BatchSize::SmallInput,
|
||||
);
|
||||
});
|
||||
}
|
||||
group.finish();
|
||||
}
|
||||
|
||||
fn bench_epoch_rotation(c: &mut Criterion) {
|
||||
let mut group = c.benchmark_group("mls_epoch_rotation");
|
||||
group.sample_size(10);
|
||||
for size in [2, 10, 50] {
|
||||
group.bench_with_input(BenchmarkId::from_parameter(size), &size, |b, &size| {
|
||||
b.iter_batched(
|
||||
|| {
|
||||
let (mut creator, members) = setup_group(size);
|
||||
// Propose a self-update to simulate epoch rotation
|
||||
let proposal = creator.propose_self_update().unwrap();
|
||||
(creator, members, proposal)
|
||||
},
|
||||
|(mut creator, _members, _proposal)| {
|
||||
// Commit pending proposals (the self-update) to advance the epoch
|
||||
creator.commit_pending_proposals().unwrap();
|
||||
},
|
||||
BatchSize::SmallInput,
|
||||
);
|
||||
});
|
||||
}
|
||||
group.finish();
|
||||
}
|
||||
|
||||
fn bench_send_message(c: &mut Criterion) {
|
||||
let mut group = c.benchmark_group("mls_send_message");
|
||||
for size in [2, 10, 50] {
|
||||
group.bench_with_input(BenchmarkId::from_parameter(size), &size, |b, &size| {
|
||||
let (mut creator, _members) = setup_group(size);
|
||||
let payload = b"hello benchmark message";
|
||||
b.iter(|| {
|
||||
creator.send_message(payload).unwrap();
|
||||
});
|
||||
});
|
||||
}
|
||||
group.finish();
|
||||
}
|
||||
|
||||
fn bench_receive_message(c: &mut Criterion) {
|
||||
let mut group = c.benchmark_group("mls_receive_message");
|
||||
for size in [2, 10, 50] {
|
||||
group.bench_with_input(BenchmarkId::from_parameter(size), &size, |b, &size| {
|
||||
// For receive, we need a fresh ciphertext each iteration since
|
||||
// MLS message processing is destructive (epoch state changes).
|
||||
// We pre-generate a batch and consume them.
|
||||
let (mut creator, mut members) = setup_group(size);
|
||||
if members.is_empty() {
|
||||
return;
|
||||
}
|
||||
let payload = b"hello benchmark message";
|
||||
b.iter_batched(
|
||||
|| creator.send_message(payload).unwrap(),
|
||||
|ct| {
|
||||
// Receive on the first joiner
|
||||
let _ = members[0].receive_message(&ct);
|
||||
},
|
||||
BatchSize::SmallInput,
|
||||
);
|
||||
});
|
||||
}
|
||||
group.finish();
|
||||
}
|
||||
|
||||
criterion_group!(
|
||||
benches,
|
||||
bench_keygen,
|
||||
bench_group_create,
|
||||
bench_add_member,
|
||||
bench_epoch_rotation,
|
||||
bench_send_message,
|
||||
bench_receive_message,
|
||||
);
|
||||
criterion_main!(benches);
|
||||
171
crates/quicproquo-core/benches/serialization.rs
Normal file
171
crates/quicproquo-core/benches/serialization.rs
Normal file
@@ -0,0 +1,171 @@
|
||||
#![allow(clippy::unwrap_used)]
|
||||
//! Benchmark: Cap'n Proto vs Protobuf serialization for chat message envelopes.
|
||||
//!
|
||||
//! Compares serialization/deserialization speed and encoded size at three
|
||||
//! payload sizes (100 B, 1 KB, 4 KB) for a typical Envelope{seq, data} message.
|
||||
|
||||
use criterion::{black_box, criterion_group, criterion_main, BenchmarkId, Criterion};
|
||||
|
||||
// ── Cap'n Proto path ────────────────────────────────────────────────────────
|
||||
|
||||
fn capnp_serialize_envelope(seq: u64, data: &[u8]) -> Vec<u8> {
|
||||
let mut msg = capnp::message::Builder::new_default();
|
||||
{
|
||||
let mut envelope = msg.init_root::<quicproquo_proto::node_capnp::envelope::Builder>();
|
||||
envelope.set_seq(seq);
|
||||
envelope.set_data(data);
|
||||
}
|
||||
quicproquo_proto::to_bytes(&msg).unwrap()
|
||||
}
|
||||
|
||||
fn capnp_deserialize_envelope(bytes: &[u8]) -> (u64, Vec<u8>) {
|
||||
let reader = quicproquo_proto::from_bytes(bytes).unwrap();
|
||||
let envelope = reader
|
||||
.get_root::<quicproquo_proto::node_capnp::envelope::Reader>()
|
||||
.unwrap();
|
||||
(envelope.get_seq(), envelope.get_data().unwrap().to_vec())
|
||||
}
|
||||
|
||||
// ── Protobuf path (hand-coded prost encoding to avoid build-dep) ────────────
|
||||
//
|
||||
// Envelope { seq: uint64 (field 1), data: bytes (field 2) }
|
||||
// Wire format: varint tag + varint seq + len-delimited data
|
||||
|
||||
fn protobuf_serialize_envelope(seq: u64, data: &[u8]) -> Vec<u8> {
|
||||
// Build a prost message via raw encoding.
|
||||
// Field 1: uint64 seq, wire type 0 (varint), tag = (1 << 3) | 0 = 0x08
|
||||
// Field 2: bytes data, wire type 2 (length-delimited), tag = (2 << 3) | 2 = 0x12
|
||||
let mut buf = Vec::with_capacity(10 + data.len());
|
||||
// Encode field 1 (seq)
|
||||
prost::encoding::uint64::encode(1, &seq, &mut buf);
|
||||
// Encode field 2 (data)
|
||||
prost::encoding::bytes::encode(2, &data.to_vec(), &mut buf);
|
||||
buf
|
||||
}
|
||||
|
||||
fn protobuf_deserialize_envelope(bytes: &[u8]) -> (u64, Vec<u8>) {
|
||||
// Decode manually using prost wire format
|
||||
let mut seq: u64 = 0;
|
||||
let mut data: Vec<u8> = Vec::new();
|
||||
let mut buf = bytes;
|
||||
|
||||
while !buf.is_empty() {
|
||||
let (tag, wire_type) =
|
||||
prost::encoding::decode_key(&mut buf).expect("decode key");
|
||||
match tag {
|
||||
1 => {
|
||||
prost::encoding::uint64::merge(wire_type, &mut seq, &mut buf, Default::default())
|
||||
.expect("decode seq");
|
||||
}
|
||||
2 => {
|
||||
prost::encoding::bytes::merge(wire_type, &mut data, &mut buf, Default::default())
|
||||
.expect("decode data");
|
||||
}
|
||||
_ => {
|
||||
prost::encoding::skip_field(wire_type, tag, &mut buf, Default::default())
|
||||
.expect("skip unknown field");
|
||||
}
|
||||
}
|
||||
}
|
||||
(seq, data)
|
||||
}
|
||||
|
||||
// ── Benchmarks ──────────────────────────────────────────────────────────────
|
||||
|
||||
fn bench_serialize(c: &mut Criterion) {
|
||||
let sizes: &[(&str, usize)] = &[("100B", 100), ("1KB", 1024), ("4KB", 4096)];
|
||||
let mut group = c.benchmark_group("serialize_envelope");
|
||||
|
||||
for (label, size) in sizes {
|
||||
let payload = vec![0xABu8; *size];
|
||||
let seq = 42u64;
|
||||
|
||||
group.bench_with_input(
|
||||
BenchmarkId::new("capnp", label),
|
||||
&(&seq, &payload),
|
||||
|b, &(seq, payload)| {
|
||||
b.iter(|| capnp_serialize_envelope(black_box(*seq), black_box(payload)));
|
||||
},
|
||||
);
|
||||
|
||||
group.bench_with_input(
|
||||
BenchmarkId::new("protobuf", label),
|
||||
&(&seq, &payload),
|
||||
|b, &(seq, payload)| {
|
||||
b.iter(|| protobuf_serialize_envelope(black_box(*seq), black_box(payload)));
|
||||
},
|
||||
);
|
||||
}
|
||||
group.finish();
|
||||
}
|
||||
|
||||
fn bench_deserialize(c: &mut Criterion) {
|
||||
let sizes: &[(&str, usize)] = &[("100B", 100), ("1KB", 1024), ("4KB", 4096)];
|
||||
let mut group = c.benchmark_group("deserialize_envelope");
|
||||
|
||||
for (label, size) in sizes {
|
||||
let payload = vec![0xABu8; *size];
|
||||
let seq = 42u64;
|
||||
|
||||
let capnp_bytes = capnp_serialize_envelope(seq, &payload);
|
||||
let proto_bytes = protobuf_serialize_envelope(seq, &payload);
|
||||
|
||||
group.bench_with_input(
|
||||
BenchmarkId::new("capnp", label),
|
||||
&capnp_bytes,
|
||||
|b, bytes| {
|
||||
b.iter(|| capnp_deserialize_envelope(black_box(bytes)));
|
||||
},
|
||||
);
|
||||
|
||||
group.bench_with_input(
|
||||
BenchmarkId::new("protobuf", label),
|
||||
&proto_bytes,
|
||||
|b, bytes| {
|
||||
b.iter(|| protobuf_deserialize_envelope(black_box(bytes)));
|
||||
},
|
||||
);
|
||||
}
|
||||
group.finish();
|
||||
}
|
||||
|
||||
fn bench_encoded_sizes(c: &mut Criterion) {
|
||||
let sizes: &[(&str, usize)] = &[("100B", 100), ("1KB", 1024), ("4KB", 4096)];
|
||||
let mut group = c.benchmark_group("encoded_size");
|
||||
|
||||
for (label, size) in sizes {
|
||||
let payload = vec![0xABu8; *size];
|
||||
let capnp_bytes = capnp_serialize_envelope(42, &payload);
|
||||
let proto_bytes = protobuf_serialize_envelope(42, &payload);
|
||||
|
||||
// Use a trivial benchmark that just returns the size -- the point
|
||||
// is to get criterion to print the iteration count and allow
|
||||
// comparison. The real value is in the eprintln below.
|
||||
group.bench_with_input(
|
||||
BenchmarkId::new("capnp", label),
|
||||
&capnp_bytes,
|
||||
|b, bytes| {
|
||||
b.iter(|| black_box(bytes.len()));
|
||||
},
|
||||
);
|
||||
|
||||
group.bench_with_input(
|
||||
BenchmarkId::new("protobuf", label),
|
||||
&proto_bytes,
|
||||
|b, bytes| {
|
||||
b.iter(|| black_box(bytes.len()));
|
||||
},
|
||||
);
|
||||
|
||||
eprintln!(
|
||||
" {label}: capnp={} bytes, protobuf={} bytes, overhead={:+} bytes",
|
||||
capnp_bytes.len(),
|
||||
proto_bytes.len(),
|
||||
capnp_bytes.len() as isize - proto_bytes.len() as isize,
|
||||
);
|
||||
}
|
||||
group.finish();
|
||||
}
|
||||
|
||||
criterion_group!(benches, bench_serialize, bench_deserialize, bench_encoded_sizes);
|
||||
criterion_main!(benches);
|
||||
21
crates/quicproquo-core/proto/chat_message.proto
Normal file
21
crates/quicproquo-core/proto/chat_message.proto
Normal file
@@ -0,0 +1,21 @@
|
||||
syntax = "proto3";
|
||||
package quicproquo.bench;
|
||||
|
||||
// Equivalent to the Envelope struct in delivery.capnp
|
||||
message Envelope {
|
||||
uint64 seq = 1;
|
||||
bytes data = 2;
|
||||
}
|
||||
|
||||
// Equivalent to a chat message payload (app_message.rs Chat variant)
|
||||
message ChatMessage {
|
||||
bytes message_id = 1; // 16 bytes
|
||||
string body = 2; // UTF-8 text
|
||||
uint64 timestamp_ms = 3;
|
||||
bytes sender_key = 4; // 32 bytes Ed25519 public key
|
||||
}
|
||||
|
||||
// Batch fetch response (equivalent to fetch returning List(Envelope))
|
||||
message FetchResponse {
|
||||
repeated Envelope payloads = 1;
|
||||
}
|
||||
524
crates/quicproquo-core/src/app_message.rs
Normal file
524
crates/quicproquo-core/src/app_message.rs
Normal file
@@ -0,0 +1,524 @@
|
||||
//! Rich application-layer message format for MLS application payloads.
|
||||
//!
|
||||
//! The server sees only opaque ciphertext; structure lives in this client-defined
|
||||
//! plaintext schema. All messages use: version byte (1) + message_type byte + type-specific payload.
|
||||
//!
|
||||
//! # Message ID
|
||||
//!
|
||||
//! `message_id` is assigned by the sender (16 random bytes) and included in the
|
||||
//! serialized payload for Chat (and implied for Reply/Reaction/ReadReceipt via ref_msg_id).
|
||||
//! Recipients can store message_ids to reference them in replies or reactions.
|
||||
|
||||
use crate::error::CoreError;
|
||||
use rand::RngCore;
|
||||
|
||||
/// Current schema version.
|
||||
pub const VERSION: u8 = 1;
|
||||
|
||||
/// Message type discriminant (one byte).
|
||||
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
|
||||
#[repr(u8)]
|
||||
pub enum MessageType {
|
||||
Chat = 0x01,
|
||||
Reply = 0x02,
|
||||
Reaction = 0x03,
|
||||
ReadReceipt = 0x04,
|
||||
Typing = 0x05,
|
||||
Edit = 0x06,
|
||||
Delete = 0x07,
|
||||
FileRef = 0x08,
|
||||
Dummy = 0x09,
|
||||
}
|
||||
|
||||
impl MessageType {
|
||||
fn from_byte(b: u8) -> Option<Self> {
|
||||
match b {
|
||||
0x01 => Some(MessageType::Chat),
|
||||
0x02 => Some(MessageType::Reply),
|
||||
0x03 => Some(MessageType::Reaction),
|
||||
0x04 => Some(MessageType::ReadReceipt),
|
||||
0x05 => Some(MessageType::Typing),
|
||||
0x06 => Some(MessageType::Edit),
|
||||
0x07 => Some(MessageType::Delete),
|
||||
0x08 => Some(MessageType::FileRef),
|
||||
0x09 => Some(MessageType::Dummy),
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Parsed application message (one of the rich types).
|
||||
#[derive(Clone, Debug, PartialEq, Eq)]
|
||||
pub enum AppMessage {
|
||||
/// Plain chat: body (UTF-8). message_id is included so recipients can store and reference it.
|
||||
Chat {
|
||||
message_id: [u8; 16],
|
||||
body: Vec<u8>,
|
||||
},
|
||||
Reply {
|
||||
ref_msg_id: [u8; 16],
|
||||
body: Vec<u8>,
|
||||
},
|
||||
Reaction {
|
||||
ref_msg_id: [u8; 16],
|
||||
emoji: Vec<u8>,
|
||||
},
|
||||
ReadReceipt {
|
||||
msg_id: [u8; 16],
|
||||
},
|
||||
Typing {
|
||||
/// 0 = stopped, 1 = typing
|
||||
active: u8,
|
||||
},
|
||||
/// Edit a previously sent message (identified by ref_msg_id).
|
||||
Edit {
|
||||
ref_msg_id: [u8; 16],
|
||||
body: Vec<u8>,
|
||||
},
|
||||
/// Delete a previously sent message (identified by ref_msg_id).
|
||||
Delete {
|
||||
ref_msg_id: [u8; 16],
|
||||
},
|
||||
/// File reference: metadata pointing to a blob stored on the server.
|
||||
FileRef {
|
||||
blob_id: [u8; 32],
|
||||
filename: Vec<u8>,
|
||||
file_size: u64,
|
||||
mime_type: Vec<u8>,
|
||||
},
|
||||
/// Dummy message for traffic analysis resistance (no user-visible content).
|
||||
Dummy,
|
||||
}
|
||||
|
||||
/// Generate a new 16-byte message ID (e.g. for Chat/Reply so recipients can reference it).
|
||||
pub fn generate_message_id() -> [u8; 16] {
|
||||
let mut id = [0u8; 16];
|
||||
rand::rngs::OsRng.fill_bytes(&mut id);
|
||||
id
|
||||
}
|
||||
|
||||
// ── Layout (minimal, no Cap'n Proto) ─────────────────────────────────────────
|
||||
//
|
||||
// All messages: [version: 1][type: 1][payload...]
|
||||
//
|
||||
// Chat: [msg_id: 16][body_len: 2 BE][body]
|
||||
// Reply: [ref_msg_id: 16][body_len: 2 BE][body]
|
||||
// Reaction: [ref_msg_id: 16][emoji_len: 1][emoji]
|
||||
// ReadReceipt: [msg_id: 16]
|
||||
// Typing: [active: 1] 0 = stopped, 1 = typing
|
||||
// Edit: [ref_msg_id: 16][body_len: 2 BE][body]
|
||||
// Delete: [ref_msg_id: 16]
|
||||
// FileRef: [blob_id: 32][filename_len: 2 BE][filename][file_size: 8 BE][mime_len: 2 BE][mime_type]
|
||||
|
||||
/// Serialize a rich message into the application payload format.
|
||||
pub fn serialize(msg_type: MessageType, payload: &[u8]) -> Vec<u8> {
|
||||
let mut out = Vec::with_capacity(2 + payload.len());
|
||||
out.push(VERSION);
|
||||
out.push(msg_type as u8);
|
||||
out.extend_from_slice(payload);
|
||||
out
|
||||
}
|
||||
|
||||
/// Serialize a Chat message (generates message_id internally; pass None to generate, or Some(id) when replying with a known id).
|
||||
pub fn serialize_chat(body: &[u8], message_id: Option<[u8; 16]>) -> Result<Vec<u8>, CoreError> {
|
||||
if body.len() > u16::MAX as usize {
|
||||
return Err(CoreError::AppMessage("chat body exceeds maximum length (65535 bytes)".into()));
|
||||
}
|
||||
let id = message_id.unwrap_or_else(generate_message_id);
|
||||
let mut payload = Vec::with_capacity(16 + 2 + body.len());
|
||||
payload.extend_from_slice(&id);
|
||||
payload.extend_from_slice(&(body.len() as u16).to_be_bytes());
|
||||
payload.extend_from_slice(body);
|
||||
Ok(serialize(MessageType::Chat, &payload))
|
||||
}
|
||||
|
||||
/// Serialize a Reply message.
|
||||
pub fn serialize_reply(ref_msg_id: [u8; 16], body: &[u8]) -> Result<Vec<u8>, CoreError> {
|
||||
if body.len() > u16::MAX as usize {
|
||||
return Err(CoreError::AppMessage("reply body exceeds maximum length (65535 bytes)".into()));
|
||||
}
|
||||
let mut payload = Vec::with_capacity(16 + 2 + body.len());
|
||||
payload.extend_from_slice(&ref_msg_id);
|
||||
payload.extend_from_slice(&(body.len() as u16).to_be_bytes());
|
||||
payload.extend_from_slice(body);
|
||||
Ok(serialize(MessageType::Reply, &payload))
|
||||
}
|
||||
|
||||
/// Serialize a Reaction message.
|
||||
pub fn serialize_reaction(ref_msg_id: [u8; 16], emoji: &[u8]) -> Result<Vec<u8>, CoreError> {
|
||||
if emoji.len() > 255 {
|
||||
return Err(CoreError::AppMessage("emoji length > 255".into()));
|
||||
}
|
||||
let mut payload = Vec::with_capacity(16 + 1 + emoji.len());
|
||||
payload.extend_from_slice(&ref_msg_id);
|
||||
payload.push(emoji.len() as u8);
|
||||
payload.extend_from_slice(emoji);
|
||||
Ok(serialize(MessageType::Reaction, &payload))
|
||||
}
|
||||
|
||||
/// Serialize a ReadReceipt message.
|
||||
pub fn serialize_read_receipt(msg_id: [u8; 16]) -> Vec<u8> {
|
||||
serialize(MessageType::ReadReceipt, &msg_id)
|
||||
}
|
||||
|
||||
/// Serialize a Typing message (active: 0 = stopped, 1 = typing).
|
||||
pub fn serialize_typing(active: u8) -> Vec<u8> {
|
||||
let payload = [active];
|
||||
serialize(MessageType::Typing, &payload)
|
||||
}
|
||||
|
||||
/// Serialize an Edit message (replaces body of a previously sent message).
|
||||
pub fn serialize_edit(ref_msg_id: &[u8; 16], body: &[u8]) -> Result<Vec<u8>, CoreError> {
|
||||
if body.len() > u16::MAX as usize {
|
||||
return Err(CoreError::AppMessage("edit body exceeds maximum length (65535 bytes)".into()));
|
||||
}
|
||||
let mut payload = Vec::with_capacity(16 + 2 + body.len());
|
||||
payload.extend_from_slice(ref_msg_id);
|
||||
payload.extend_from_slice(&(body.len() as u16).to_be_bytes());
|
||||
payload.extend_from_slice(body);
|
||||
Ok(serialize(MessageType::Edit, &payload))
|
||||
}
|
||||
|
||||
/// Serialize a Delete message (marks a previously sent message as deleted).
|
||||
pub fn serialize_delete(ref_msg_id: &[u8; 16]) -> Vec<u8> {
|
||||
serialize(MessageType::Delete, ref_msg_id)
|
||||
}
|
||||
|
||||
/// Serialize a FileRef message (metadata pointing to a blob on the server).
|
||||
pub fn serialize_file_ref(
|
||||
blob_id: &[u8; 32],
|
||||
filename: &[u8],
|
||||
file_size: u64,
|
||||
mime_type: &[u8],
|
||||
) -> Result<Vec<u8>, CoreError> {
|
||||
if filename.len() > u16::MAX as usize {
|
||||
return Err(CoreError::AppMessage("filename exceeds maximum length".into()));
|
||||
}
|
||||
if mime_type.len() > u16::MAX as usize {
|
||||
return Err(CoreError::AppMessage("mime_type exceeds maximum length".into()));
|
||||
}
|
||||
let mut payload = Vec::with_capacity(32 + 2 + filename.len() + 8 + 2 + mime_type.len());
|
||||
payload.extend_from_slice(blob_id);
|
||||
payload.extend_from_slice(&(filename.len() as u16).to_be_bytes());
|
||||
payload.extend_from_slice(filename);
|
||||
payload.extend_from_slice(&file_size.to_be_bytes());
|
||||
payload.extend_from_slice(&(mime_type.len() as u16).to_be_bytes());
|
||||
payload.extend_from_slice(mime_type);
|
||||
Ok(serialize(MessageType::FileRef, &payload))
|
||||
}
|
||||
|
||||
/// Serialize a Dummy message (traffic padding — no user content).
|
||||
pub fn serialize_dummy() -> Vec<u8> {
|
||||
serialize(MessageType::Dummy, &[])
|
||||
}
|
||||
|
||||
/// Parse bytes into (MessageType, AppMessage). Fails if version/type unknown or payload too short.
|
||||
pub fn parse(bytes: &[u8]) -> Result<(MessageType, AppMessage), CoreError> {
|
||||
if bytes.len() < 2 {
|
||||
return Err(CoreError::AppMessage("payload too short (need version + type)".into()));
|
||||
}
|
||||
let version = bytes[0];
|
||||
if version != VERSION {
|
||||
return Err(CoreError::AppMessage(format!("unsupported version {version}")));
|
||||
}
|
||||
let msg_type = MessageType::from_byte(bytes[1])
|
||||
.ok_or_else(|| CoreError::AppMessage(format!("unknown message type {}", bytes[1])))?;
|
||||
let payload = &bytes[2..];
|
||||
|
||||
let app = match msg_type {
|
||||
MessageType::Chat => parse_chat(payload)?,
|
||||
MessageType::Reply => parse_reply(payload)?,
|
||||
MessageType::Reaction => parse_reaction(payload)?,
|
||||
MessageType::ReadReceipt => parse_read_receipt(payload)?,
|
||||
MessageType::Typing => parse_typing(payload)?,
|
||||
MessageType::Edit => parse_edit(payload)?,
|
||||
MessageType::Delete => parse_delete(payload)?,
|
||||
MessageType::FileRef => parse_file_ref(payload)?,
|
||||
MessageType::Dummy => AppMessage::Dummy,
|
||||
};
|
||||
Ok((msg_type, app))
|
||||
}
|
||||
|
||||
fn parse_chat(payload: &[u8]) -> Result<AppMessage, CoreError> {
|
||||
if payload.len() < 16 + 2 {
|
||||
return Err(CoreError::AppMessage("Chat payload too short".into()));
|
||||
}
|
||||
let mut message_id = [0u8; 16];
|
||||
message_id.copy_from_slice(&payload[..16]);
|
||||
let body_len = u16::from_be_bytes([payload[16], payload[17]]) as usize;
|
||||
if payload.len() < 18 + body_len {
|
||||
return Err(CoreError::AppMessage("Chat body length exceeds payload".into()));
|
||||
}
|
||||
let body = payload[18..18 + body_len].to_vec();
|
||||
Ok(AppMessage::Chat { message_id, body })
|
||||
}
|
||||
|
||||
fn parse_reply(payload: &[u8]) -> Result<AppMessage, CoreError> {
|
||||
if payload.len() < 16 + 2 {
|
||||
return Err(CoreError::AppMessage("Reply payload too short".into()));
|
||||
}
|
||||
let mut ref_msg_id = [0u8; 16];
|
||||
ref_msg_id.copy_from_slice(&payload[..16]);
|
||||
let body_len = u16::from_be_bytes([payload[16], payload[17]]) as usize;
|
||||
if payload.len() < 18 + body_len {
|
||||
return Err(CoreError::AppMessage("Reply body length exceeds payload".into()));
|
||||
}
|
||||
let body = payload[18..18 + body_len].to_vec();
|
||||
Ok(AppMessage::Reply { ref_msg_id, body })
|
||||
}
|
||||
|
||||
fn parse_reaction(payload: &[u8]) -> Result<AppMessage, CoreError> {
|
||||
if payload.len() < 16 + 1 {
|
||||
return Err(CoreError::AppMessage("Reaction payload too short".into()));
|
||||
}
|
||||
let mut ref_msg_id = [0u8; 16];
|
||||
ref_msg_id.copy_from_slice(&payload[..16]);
|
||||
let emoji_len = payload[16] as usize;
|
||||
if payload.len() < 17 + emoji_len {
|
||||
return Err(CoreError::AppMessage("Reaction emoji length exceeds payload".into()));
|
||||
}
|
||||
let emoji = payload[17..17 + emoji_len].to_vec();
|
||||
Ok(AppMessage::Reaction { ref_msg_id, emoji })
|
||||
}
|
||||
|
||||
fn parse_read_receipt(payload: &[u8]) -> Result<AppMessage, CoreError> {
|
||||
if payload.len() < 16 {
|
||||
return Err(CoreError::AppMessage("ReadReceipt payload too short".into()));
|
||||
}
|
||||
let mut msg_id = [0u8; 16];
|
||||
msg_id.copy_from_slice(&payload[..16]);
|
||||
Ok(AppMessage::ReadReceipt { msg_id })
|
||||
}
|
||||
|
||||
fn parse_typing(payload: &[u8]) -> Result<AppMessage, CoreError> {
|
||||
if payload.is_empty() {
|
||||
return Err(CoreError::AppMessage("Typing payload empty".into()));
|
||||
}
|
||||
Ok(AppMessage::Typing { active: payload[0] })
|
||||
}
|
||||
|
||||
fn parse_edit(payload: &[u8]) -> Result<AppMessage, CoreError> {
|
||||
if payload.len() < 16 + 2 {
|
||||
return Err(CoreError::AppMessage("Edit payload too short".into()));
|
||||
}
|
||||
let mut ref_msg_id = [0u8; 16];
|
||||
ref_msg_id.copy_from_slice(&payload[..16]);
|
||||
let body_len = u16::from_be_bytes([payload[16], payload[17]]) as usize;
|
||||
if payload.len() < 18 + body_len {
|
||||
return Err(CoreError::AppMessage("Edit body length exceeds payload".into()));
|
||||
}
|
||||
let body = payload[18..18 + body_len].to_vec();
|
||||
Ok(AppMessage::Edit { ref_msg_id, body })
|
||||
}
|
||||
|
||||
fn parse_delete(payload: &[u8]) -> Result<AppMessage, CoreError> {
|
||||
if payload.len() < 16 {
|
||||
return Err(CoreError::AppMessage("Delete payload too short".into()));
|
||||
}
|
||||
let mut ref_msg_id = [0u8; 16];
|
||||
ref_msg_id.copy_from_slice(&payload[..16]);
|
||||
Ok(AppMessage::Delete { ref_msg_id })
|
||||
}
|
||||
|
||||
fn parse_file_ref(payload: &[u8]) -> Result<AppMessage, CoreError> {
|
||||
// blob_id(32) + filename_len(2) minimum
|
||||
if payload.len() < 34 {
|
||||
return Err(CoreError::AppMessage("FileRef payload too short".into()));
|
||||
}
|
||||
let mut blob_id = [0u8; 32];
|
||||
blob_id.copy_from_slice(&payload[..32]);
|
||||
let filename_len = u16::from_be_bytes([payload[32], payload[33]]) as usize;
|
||||
let pos = 34;
|
||||
if payload.len() < pos + filename_len + 8 + 2 {
|
||||
return Err(CoreError::AppMessage("FileRef payload truncated after filename_len".into()));
|
||||
}
|
||||
let filename = payload[pos..pos + filename_len].to_vec();
|
||||
let pos = pos + filename_len;
|
||||
let file_size = u64::from_be_bytes([
|
||||
payload[pos], payload[pos + 1], payload[pos + 2], payload[pos + 3],
|
||||
payload[pos + 4], payload[pos + 5], payload[pos + 6], payload[pos + 7],
|
||||
]);
|
||||
let pos = pos + 8;
|
||||
let mime_len = u16::from_be_bytes([payload[pos], payload[pos + 1]]) as usize;
|
||||
let pos = pos + 2;
|
||||
if payload.len() < pos + mime_len {
|
||||
return Err(CoreError::AppMessage("FileRef payload truncated after mime_len".into()));
|
||||
}
|
||||
let mime_type = payload[pos..pos + mime_len].to_vec();
|
||||
Ok(AppMessage::FileRef { blob_id, filename, file_size, mime_type })
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
#[allow(clippy::unwrap_used)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn roundtrip_chat() {
|
||||
let body = b"hello";
|
||||
let encoded = serialize_chat(body, None).unwrap();
|
||||
let (t, msg) = parse(&encoded).unwrap();
|
||||
assert_eq!(t, MessageType::Chat);
|
||||
match &msg {
|
||||
AppMessage::Chat { message_id: _, body: b } => assert_eq!(b.as_slice(), body),
|
||||
_ => panic!("expected Chat"),
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn roundtrip_reply() {
|
||||
let ref_id = [1u8; 16];
|
||||
let body = b"reply text";
|
||||
let encoded = serialize_reply(ref_id, body).unwrap();
|
||||
let (t, msg) = parse(&encoded).unwrap();
|
||||
assert_eq!(t, MessageType::Reply);
|
||||
match &msg {
|
||||
AppMessage::Reply { ref_msg_id, body: b } => {
|
||||
assert_eq!(ref_msg_id, &ref_id);
|
||||
assert_eq!(b.as_slice(), body);
|
||||
}
|
||||
_ => panic!("expected Reply"),
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn roundtrip_typing() {
|
||||
let encoded = serialize_typing(1);
|
||||
let (t, msg) = parse(&encoded).unwrap();
|
||||
assert_eq!(t, MessageType::Typing);
|
||||
match &msg {
|
||||
AppMessage::Typing { active } => assert_eq!(*active, 1),
|
||||
_ => panic!("expected Typing"),
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn roundtrip_reaction() {
|
||||
let ref_id = [2u8; 16];
|
||||
let emoji = "\u{1f44d}".as_bytes();
|
||||
let encoded = serialize_reaction(ref_id, emoji).unwrap();
|
||||
let (t, msg) = parse(&encoded).unwrap();
|
||||
assert_eq!(t, MessageType::Reaction);
|
||||
match &msg {
|
||||
AppMessage::Reaction { ref_msg_id, emoji: e } => {
|
||||
assert_eq!(ref_msg_id, &ref_id);
|
||||
assert_eq!(e.as_slice(), emoji);
|
||||
}
|
||||
_ => panic!("expected Reaction"),
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn roundtrip_read_receipt() {
|
||||
let msg_id = [3u8; 16];
|
||||
let encoded = serialize_read_receipt(msg_id);
|
||||
let (t, msg) = parse(&encoded).unwrap();
|
||||
assert_eq!(t, MessageType::ReadReceipt);
|
||||
match &msg {
|
||||
AppMessage::ReadReceipt { msg_id: id } => assert_eq!(id, &msg_id),
|
||||
_ => panic!("expected ReadReceipt"),
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn roundtrip_edit() {
|
||||
let ref_id = [4u8; 16];
|
||||
let body = b"edited text";
|
||||
let encoded = serialize_edit(&ref_id, body).unwrap();
|
||||
let (t, msg) = parse(&encoded).unwrap();
|
||||
assert_eq!(t, MessageType::Edit);
|
||||
match &msg {
|
||||
AppMessage::Edit { ref_msg_id, body: b } => {
|
||||
assert_eq!(ref_msg_id, &ref_id);
|
||||
assert_eq!(b.as_slice(), body);
|
||||
}
|
||||
_ => panic!("expected Edit"),
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn roundtrip_delete() {
|
||||
let ref_id = [5u8; 16];
|
||||
let encoded = serialize_delete(&ref_id);
|
||||
let (t, msg) = parse(&encoded).unwrap();
|
||||
assert_eq!(t, MessageType::Delete);
|
||||
match &msg {
|
||||
AppMessage::Delete { ref_msg_id } => assert_eq!(ref_msg_id, &ref_id),
|
||||
_ => panic!("expected Delete"),
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn edit_body_too_long() {
|
||||
let body = vec![0u8; 65536];
|
||||
assert!(serialize_edit(&[0; 16], &body).is_err());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn parse_empty_fails() {
|
||||
assert!(parse(&[]).is_err());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn parse_bad_version_fails() {
|
||||
assert!(parse(&[99, 0x01]).is_err());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn parse_bad_type_fails() {
|
||||
assert!(parse(&[1, 0xFF]).is_err());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn chat_body_too_long() {
|
||||
let body = vec![0u8; 65536]; // exceeds u16::MAX
|
||||
assert!(serialize_chat(&body, None).is_err());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn reaction_emoji_too_long() {
|
||||
let emoji = vec![0u8; 256];
|
||||
assert!(serialize_reaction([0; 16], &emoji).is_err());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn parse_truncated_chat_payload() {
|
||||
// Version + type + only 10 bytes of payload (needs 18 minimum for chat)
|
||||
let mut data = vec![1, 0x01];
|
||||
data.extend_from_slice(&[0u8; 10]);
|
||||
assert!(parse(&data).is_err());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn roundtrip_file_ref() {
|
||||
let blob_id = [7u8; 32];
|
||||
let filename = b"report.pdf";
|
||||
let file_size = 123456u64;
|
||||
let mime_type = b"application/pdf";
|
||||
let encoded = serialize_file_ref(&blob_id, filename, file_size, mime_type).unwrap();
|
||||
let (t, msg) = parse(&encoded).unwrap();
|
||||
assert_eq!(t, MessageType::FileRef);
|
||||
match &msg {
|
||||
AppMessage::FileRef {
|
||||
blob_id: bid,
|
||||
filename: fname,
|
||||
file_size: fsize,
|
||||
mime_type: mtype,
|
||||
} => {
|
||||
assert_eq!(bid, &blob_id);
|
||||
assert_eq!(fname.as_slice(), filename);
|
||||
assert_eq!(*fsize, file_size);
|
||||
assert_eq!(mtype.as_slice(), mime_type);
|
||||
}
|
||||
_ => panic!("expected FileRef"),
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn roundtrip_dummy() {
|
||||
let encoded = serialize_dummy();
|
||||
let (t, msg) = parse(&encoded).unwrap();
|
||||
assert_eq!(t, MessageType::Dummy);
|
||||
assert_eq!(msg, AppMessage::Dummy);
|
||||
}
|
||||
}
|
||||
@@ -1,4 +1,4 @@
|
||||
//! Error types for `quicnprotochat-core`.
|
||||
//! Error types for `quicproquo-core`.
|
||||
|
||||
use thiserror::Error;
|
||||
|
||||
@@ -6,15 +6,24 @@ use thiserror::Error;
|
||||
#[derive(Debug, Error)]
|
||||
pub enum CoreError {
|
||||
/// Cap'n Proto serialisation or deserialisation failed.
|
||||
#[cfg(feature = "native")]
|
||||
#[error("Cap'n Proto error: {0}")]
|
||||
Capnp(#[from] capnp::Error),
|
||||
|
||||
/// An MLS operation failed.
|
||||
/// An MLS operation failed (string description).
|
||||
///
|
||||
/// The inner string is the debug representation of the openmls error.
|
||||
/// Preserved for backward compatibility. Prefer [`CoreError::MlsError`]
|
||||
/// for new code that wraps typed openmls errors.
|
||||
#[error("MLS error: {0}")]
|
||||
Mls(String),
|
||||
|
||||
/// An MLS operation failed (typed, boxed error).
|
||||
///
|
||||
/// Wraps the underlying openmls error so callers can downcast to specific
|
||||
/// error types when needed.
|
||||
#[error("MLS error: {0}")]
|
||||
MlsError(Box<dyn std::error::Error + Send + Sync>),
|
||||
|
||||
/// A hybrid KEM (X25519 + ML-KEM-768) operation failed.
|
||||
#[error("hybrid KEM error: {0}")]
|
||||
HybridKem(#[from] crate::hybrid_kem::HybridKemError),
|
||||
1005
crates/quicproquo-core/src/group.rs
Normal file
1005
crates/quicproquo-core/src/group.rs
Normal file
File diff suppressed because it is too large
Load Diff
@@ -46,18 +46,50 @@ use openmls_traits::types::{
|
||||
|
||||
/// Crypto backend that uses hybrid KEM for HPKE when keys are in hybrid format,
|
||||
/// and delegates everything else to RustCrypto.
|
||||
///
|
||||
/// When `hybrid_enabled` is `true`, `derive_hpke_keypair` produces hybrid keys
|
||||
/// (1216-byte public, 2432-byte private). When `false`, it delegates to
|
||||
/// RustCrypto and produces classical 32-byte X25519 keys.
|
||||
///
|
||||
/// The `hpke_seal` / `hpke_open` methods always detect the key format by length,
|
||||
/// so they work correctly regardless of the flag — a hybrid-length key will use
|
||||
/// hybrid KEM, a classical-length key will use RustCrypto.
|
||||
#[derive(Debug)]
|
||||
pub struct HybridCrypto {
|
||||
rust_crypto: RustCrypto,
|
||||
/// When true, `derive_hpke_keypair` produces hybrid (X25519 + ML-KEM-768)
|
||||
/// keys. When false, it produces classical X25519 keys via RustCrypto.
|
||||
hybrid_enabled: bool,
|
||||
}
|
||||
|
||||
impl HybridCrypto {
|
||||
/// Create a hybrid-enabled crypto backend (derive_hpke_keypair produces hybrid keys).
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
rust_crypto: RustCrypto::default(),
|
||||
hybrid_enabled: true,
|
||||
}
|
||||
}
|
||||
|
||||
/// Alias for `new()` — hybrid mode enabled.
|
||||
pub fn new_hybrid() -> Self {
|
||||
Self::new()
|
||||
}
|
||||
|
||||
/// Create a classical crypto backend (derive_hpke_keypair produces standard
|
||||
/// X25519 keys, but seal/open still accept hybrid keys by length detection).
|
||||
pub fn new_classical() -> Self {
|
||||
Self {
|
||||
rust_crypto: RustCrypto::default(),
|
||||
hybrid_enabled: false,
|
||||
}
|
||||
}
|
||||
|
||||
/// Whether this backend produces hybrid keys from `derive_hpke_keypair`.
|
||||
pub fn is_hybrid_enabled(&self) -> bool {
|
||||
self.hybrid_enabled
|
||||
}
|
||||
|
||||
/// Expose the underlying RustCrypto for rand() and delegation.
|
||||
pub fn rust_crypto(&self) -> &RustCrypto {
|
||||
&self.rust_crypto
|
||||
@@ -159,21 +191,23 @@ impl OpenMlsCrypto for HybridCrypto {
|
||||
ptxt: &[u8],
|
||||
) -> HpkeCiphertext {
|
||||
if Self::is_hybrid_public_key(pk_r) {
|
||||
let recipient_pk = match HybridPublicKey::from_bytes(pk_r) {
|
||||
Ok(pk) => pk,
|
||||
Err(_) => return self.rust_crypto.hpke_seal(config, pk_r, info, aad, ptxt),
|
||||
};
|
||||
match hybrid_encrypt(&recipient_pk, ptxt) {
|
||||
Ok(envelope) => {
|
||||
// The trait `OpenMlsCrypto::hpke_seal` returns `HpkeCiphertext` (not
|
||||
// `Result`), so we cannot propagate errors through the return type.
|
||||
// Returning an empty ciphertext would silently cause data loss.
|
||||
// Instead, panic on failure — a hybrid key that passes the length
|
||||
// check but fails deserialization or encryption indicates a critical
|
||||
// bug (corrupted key material), not a recoverable condition.
|
||||
let recipient_pk = HybridPublicKey::from_bytes(pk_r)
|
||||
.expect("hybrid public key deserialization failed — key material is corrupted");
|
||||
// Pass HPKE info and aad through for proper context binding (RFC 9180).
|
||||
let envelope = hybrid_encrypt(&recipient_pk, ptxt, info, aad)
|
||||
.expect("hybrid HPKE encryption failed — critical crypto error");
|
||||
let kem_output = envelope[..HYBRID_KEM_OUTPUT_LEN].to_vec();
|
||||
let ciphertext = envelope[HYBRID_KEM_OUTPUT_LEN..].to_vec();
|
||||
HpkeCiphertext {
|
||||
kem_output: kem_output.into(),
|
||||
ciphertext: ciphertext.into(),
|
||||
}
|
||||
}
|
||||
Err(_) => self.rust_crypto.hpke_seal(config, pk_r, info, aad, ptxt),
|
||||
}
|
||||
} else {
|
||||
self.rust_crypto.hpke_seal(config, pk_r, info, aad, ptxt)
|
||||
}
|
||||
@@ -188,17 +222,17 @@ impl OpenMlsCrypto for HybridCrypto {
|
||||
aad: &[u8],
|
||||
) -> Result<Vec<u8>, CryptoError> {
|
||||
if Self::is_hybrid_private_key(sk_r) {
|
||||
let keypair = match HybridKeypair::from_private_bytes(sk_r) {
|
||||
Ok(kp) => kp,
|
||||
Err(_) => return self.rust_crypto.hpke_open(config, input, sk_r, info, aad),
|
||||
};
|
||||
let keypair = HybridKeypair::from_private_bytes(sk_r)
|
||||
.map_err(|_| CryptoError::HpkeDecryptionError)?;
|
||||
let envelope: Vec<u8> = input
|
||||
.kem_output.as_slice()
|
||||
.iter()
|
||||
.chain(input.ciphertext.as_slice())
|
||||
.copied()
|
||||
.collect();
|
||||
hybrid_decrypt(&keypair, &envelope).map_err(|_| CryptoError::HpkeDecryptionError)
|
||||
// Pass HPKE info and aad through for proper context binding (RFC 9180).
|
||||
hybrid_decrypt(&keypair, &envelope, info, aad)
|
||||
.map_err(|_| CryptoError::HpkeDecryptionError)
|
||||
} else {
|
||||
self.rust_crypto.hpke_open(config, input, sk_r, info, aad)
|
||||
}
|
||||
@@ -213,14 +247,11 @@ impl OpenMlsCrypto for HybridCrypto {
|
||||
exporter_length: usize,
|
||||
) -> Result<(Vec<u8>, ExporterSecret), CryptoError> {
|
||||
if Self::is_hybrid_public_key(pk_r) {
|
||||
let recipient_pk = match HybridPublicKey::from_bytes(pk_r) {
|
||||
Ok(pk) => pk,
|
||||
Err(_) => {
|
||||
return self.rust_crypto.hpke_setup_sender_and_export(
|
||||
config, pk_r, info, exporter_context, exporter_length,
|
||||
)
|
||||
}
|
||||
};
|
||||
// A key that passes the hybrid length check but fails deserialization
|
||||
// is corrupted — return an error instead of silently downgrading to
|
||||
// classical crypto (which would defeat PQ protection).
|
||||
let recipient_pk = HybridPublicKey::from_bytes(pk_r)
|
||||
.map_err(|_| CryptoError::SenderSetupError)?;
|
||||
let (kem_output, shared_secret) =
|
||||
hybrid_encapsulate_only(&recipient_pk).map_err(|_| CryptoError::SenderSetupError)?;
|
||||
let exported = hybrid_export(&shared_secret, exporter_context, exporter_length);
|
||||
@@ -256,10 +287,11 @@ impl OpenMlsCrypto for HybridCrypto {
|
||||
}
|
||||
|
||||
fn derive_hpke_keypair(&self, config: HpkeConfig, ikm: &[u8]) -> HpkeKeyPair {
|
||||
if config.0 == HpkeKemType::DhKem25519 {
|
||||
if self.hybrid_enabled && config.0 == HpkeKemType::DhKem25519 {
|
||||
let kp = HybridKeypair::derive_from_ikm(ikm);
|
||||
let private_bytes = kp.private_to_bytes();
|
||||
HpkeKeyPair {
|
||||
private: kp.private_to_bytes().into(),
|
||||
private: private_bytes.as_slice().into(),
|
||||
public: kp.public_key().to_bytes(),
|
||||
}
|
||||
} else {
|
||||
@@ -277,12 +309,32 @@ pub struct HybridCryptoProvider {
|
||||
}
|
||||
|
||||
impl HybridCryptoProvider {
|
||||
/// Create a hybrid-enabled provider (KeyPackages will contain hybrid init keys).
|
||||
pub fn new(key_store: DiskKeyStore) -> Self {
|
||||
Self {
|
||||
crypto: HybridCrypto::new(),
|
||||
crypto: HybridCrypto::new_hybrid(),
|
||||
key_store,
|
||||
}
|
||||
}
|
||||
|
||||
/// Alias for `new()` — hybrid mode enabled.
|
||||
pub fn new_hybrid(key_store: DiskKeyStore) -> Self {
|
||||
Self::new(key_store)
|
||||
}
|
||||
|
||||
/// Create a classical-mode provider (KeyPackages use standard X25519 init keys,
|
||||
/// but seal/open still accept hybrid keys by length detection).
|
||||
pub fn new_classical(key_store: DiskKeyStore) -> Self {
|
||||
Self {
|
||||
crypto: HybridCrypto::new_classical(),
|
||||
key_store,
|
||||
}
|
||||
}
|
||||
|
||||
/// Whether this provider produces hybrid keys from `derive_hpke_keypair`.
|
||||
pub fn is_hybrid_enabled(&self) -> bool {
|
||||
self.crypto.is_hybrid_enabled()
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for HybridCryptoProvider {
|
||||
@@ -312,6 +364,7 @@ impl OpenMlsCryptoProvider for HybridCryptoProvider {
|
||||
// ── Tests ───────────────────────────────────────────────────────────────────
|
||||
|
||||
#[cfg(test)]
|
||||
#[allow(clippy::unwrap_used)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use openmls_traits::types::HpkeKdfType;
|
||||
@@ -398,6 +451,52 @@ mod tests {
|
||||
assert_eq!(sender_exported.as_ref(), receiver_exported.as_ref());
|
||||
}
|
||||
|
||||
/// Classical mode: derive_hpke_keypair produces standard 32-byte X25519 keys.
|
||||
#[test]
|
||||
fn classical_mode_produces_standard_keys() {
|
||||
let crypto = HybridCrypto::new_classical();
|
||||
let ikm = b"test-ikm-for-classical-hpke";
|
||||
|
||||
let keypair = crypto.derive_hpke_keypair(hpke_config_dhkem_x25519(), ikm);
|
||||
// Classical X25519 keys are 32 bytes
|
||||
assert_eq!(keypair.public.len(), 32);
|
||||
assert_eq!(keypair.private.as_ref().len(), 32);
|
||||
}
|
||||
|
||||
/// Classical mode round-trip: seal/open works with classical keys.
|
||||
#[test]
|
||||
fn classical_mode_seal_open_round_trip() {
|
||||
let crypto = HybridCrypto::new_classical();
|
||||
let ikm = b"test-ikm-for-classical-round-trip";
|
||||
|
||||
let keypair = crypto.derive_hpke_keypair(hpke_config_dhkem_x25519(), ikm);
|
||||
assert_eq!(keypair.public.len(), 32); // classical key
|
||||
|
||||
let plaintext = b"hello classical MLS";
|
||||
let info = b"mls 1.0 test";
|
||||
let aad = b"additional data";
|
||||
|
||||
let ct = crypto.hpke_seal(
|
||||
hpke_config_dhkem_x25519(),
|
||||
&keypair.public,
|
||||
info,
|
||||
aad,
|
||||
plaintext,
|
||||
);
|
||||
assert!(!ct.kem_output.as_slice().is_empty());
|
||||
|
||||
let decrypted = crypto
|
||||
.hpke_open(
|
||||
hpke_config_dhkem_x25519(),
|
||||
&ct,
|
||||
keypair.private.as_ref(),
|
||||
info,
|
||||
aad,
|
||||
)
|
||||
.expect("hpke_open with classical keys");
|
||||
assert_eq!(decrypted.as_slice(), plaintext);
|
||||
}
|
||||
|
||||
/// KeyPackage generation with HybridCryptoProvider (validates full HPKE path in MLS).
|
||||
#[test]
|
||||
fn key_package_generation_with_hybrid_provider() {
|
||||
@@ -41,8 +41,14 @@ use ml_kem::kem::{DecapsulationKey, EncapsulationKey};
|
||||
const HYBRID_VERSION: u8 = 0x01;
|
||||
|
||||
/// HKDF info string for domain separation.
|
||||
/// Frozen at the original project name for backward compatibility with existing
|
||||
/// encrypted state files and messages. Do not change.
|
||||
const HKDF_INFO: &[u8] = b"quicnprotochat-hybrid-v1";
|
||||
|
||||
/// HKDF salt for domain separation (defence-in-depth; IKM already has 64 bytes of entropy).
|
||||
/// Frozen — see [`HKDF_INFO`].
|
||||
const HKDF_SALT: &[u8] = b"quicnprotochat-hybrid-v1-salt";
|
||||
|
||||
/// ML-KEM-768 ciphertext size in bytes.
|
||||
const MLKEM_CT_LEN: usize = 1088;
|
||||
|
||||
@@ -119,6 +125,7 @@ pub struct HybridPublicKey {
|
||||
}
|
||||
|
||||
/// HKDF info for deriving HPKE keypair seed from IKM (MLS compatibility).
|
||||
/// Frozen — see [`HKDF_INFO`].
|
||||
const HKDF_INFO_HPKE_KEYPAIR: &[u8] = b"quicnprotochat-hybrid-hpke-keypair-v1";
|
||||
|
||||
impl HybridKeypair {
|
||||
@@ -152,11 +159,14 @@ impl HybridKeypair {
|
||||
}
|
||||
|
||||
/// Serialise private key for MLS key store: x25519_sk(32) || mlkem_dk(2400).
|
||||
pub fn private_to_bytes(&self) -> Vec<u8> {
|
||||
///
|
||||
/// The returned value is wrapped in [`Zeroizing`] so secret key material
|
||||
/// is securely erased when dropped.
|
||||
pub fn private_to_bytes(&self) -> Zeroizing<Vec<u8>> {
|
||||
let mut out = Vec::with_capacity(HYBRID_PRIVATE_KEY_LEN);
|
||||
out.extend_from_slice(self.x25519_sk.as_bytes());
|
||||
out.extend_from_slice(self.mlkem_dk.as_bytes().as_slice());
|
||||
out
|
||||
Zeroizing::new(out)
|
||||
}
|
||||
|
||||
/// Reconstruct a hybrid keypair from private key bytes (from MLS key store).
|
||||
@@ -164,7 +174,8 @@ impl HybridKeypair {
|
||||
if bytes.len() != HYBRID_PRIVATE_KEY_LEN {
|
||||
return Err(HybridKemError::TooShort(bytes.len()));
|
||||
}
|
||||
let x25519_sk = StaticSecret::from(<[u8; 32]>::try_from(&bytes[0..32]).unwrap());
|
||||
let x25519_sk = StaticSecret::from(<[u8; 32]>::try_from(&bytes[0..32])
|
||||
.expect("slice is exactly 32 bytes (guaranteed by HYBRID_PRIVATE_KEY_LEN check)"));
|
||||
let x25519_pk = X25519Public::from(&x25519_sk);
|
||||
|
||||
let mlkem_dk_arr = Array::try_from(&bytes[32..32 + MLKEM_DK_LEN])
|
||||
@@ -247,10 +258,15 @@ impl HybridPublicKey {
|
||||
|
||||
/// Encrypt `plaintext` to `recipient_pk` using X25519 + ML-KEM-768 hybrid KEM.
|
||||
///
|
||||
/// `info` is optional HPKE context info incorporated into key derivation.
|
||||
/// `aad` is optional additional authenticated data bound to the AEAD ciphertext.
|
||||
///
|
||||
/// Returns the complete hybrid envelope as a byte vector.
|
||||
pub fn hybrid_encrypt(
|
||||
recipient_pk: &HybridPublicKey,
|
||||
plaintext: &[u8],
|
||||
info: &[u8],
|
||||
aad: &[u8],
|
||||
) -> Result<Vec<u8>, HybridKemError> {
|
||||
// 1. Ephemeral X25519 DH
|
||||
let eph_secret = EphemeralSecret::random_from_rng(OsRng);
|
||||
@@ -266,18 +282,19 @@ pub fn hybrid_encrypt(
|
||||
.encapsulate(&mut OsRng)
|
||||
.map_err(|_| HybridKemError::EncryptionFailed)?;
|
||||
|
||||
// 3. Derive AEAD key from combined shared secrets
|
||||
let aead_key = derive_aead_key(x25519_ss.as_bytes(), mlkem_ss.as_slice());
|
||||
// 3. Derive AEAD key from combined shared secrets (with caller info for context binding)
|
||||
let aead_key = derive_aead_key(x25519_ss.as_bytes(), mlkem_ss.as_slice(), info);
|
||||
|
||||
// Generate a random 12-byte nonce (not derived from HKDF).
|
||||
let mut nonce_bytes = [0u8; 12];
|
||||
OsRng.fill_bytes(&mut nonce_bytes);
|
||||
let aead_nonce = *Nonce::from_slice(&nonce_bytes);
|
||||
|
||||
// 4. AEAD encrypt
|
||||
// 4. AEAD encrypt with caller-supplied AAD
|
||||
let cipher = ChaCha20Poly1305::new(&aead_key);
|
||||
let aead_payload = chacha20poly1305::aead::Payload { msg: plaintext, aad };
|
||||
let ct = cipher
|
||||
.encrypt(&aead_nonce, plaintext)
|
||||
.encrypt(&aead_nonce, aead_payload)
|
||||
.map_err(|_| HybridKemError::EncryptionFailed)?;
|
||||
|
||||
// 5. Assemble envelope: version || x25519_eph_pk || mlkem_ct || nonce || aead_ct
|
||||
@@ -292,7 +309,14 @@ pub fn hybrid_encrypt(
|
||||
}
|
||||
|
||||
/// Decrypt a hybrid envelope using the recipient's private key.
|
||||
pub fn hybrid_decrypt(keypair: &HybridKeypair, envelope: &[u8]) -> Result<Vec<u8>, HybridKemError> {
|
||||
///
|
||||
/// `info` and `aad` must match what was passed to `hybrid_encrypt`.
|
||||
pub fn hybrid_decrypt(
|
||||
keypair: &HybridKeypair,
|
||||
envelope: &[u8],
|
||||
info: &[u8],
|
||||
aad: &[u8],
|
||||
) -> Result<Vec<u8>, HybridKemError> {
|
||||
if envelope.len() < HEADER_LEN + 16 {
|
||||
// 16 = minimum AEAD tag
|
||||
return Err(HybridKemError::TooShort(envelope.len()));
|
||||
@@ -334,13 +358,14 @@ pub fn hybrid_decrypt(keypair: &HybridKeypair, envelope: &[u8]) -> Result<Vec<u8
|
||||
.decapsulate(&mlkem_ct_arr)
|
||||
.map_err(|_| HybridKemError::MlKemDecapsFailed)?;
|
||||
|
||||
// 3. Derive AEAD key
|
||||
let aead_key = derive_aead_key(x25519_ss.as_bytes(), mlkem_ss.as_slice());
|
||||
// 3. Derive AEAD key (with caller info for context binding)
|
||||
let aead_key = derive_aead_key(x25519_ss.as_bytes(), mlkem_ss.as_slice(), info);
|
||||
|
||||
// 4. Decrypt
|
||||
// 4. Decrypt with caller-supplied AAD
|
||||
let cipher = ChaCha20Poly1305::new(&aead_key);
|
||||
let aead_payload = chacha20poly1305::aead::Payload { msg: aead_ct, aad };
|
||||
let plaintext = cipher
|
||||
.decrypt(nonce, aead_ct)
|
||||
.decrypt(nonce, aead_payload)
|
||||
.map_err(|_| HybridKemError::DecryptionFailed)?;
|
||||
|
||||
Ok(plaintext)
|
||||
@@ -366,8 +391,9 @@ pub fn hybrid_encapsulate_only(
|
||||
.encapsulate(&mut OsRng)
|
||||
.map_err(|_| HybridKemError::EncryptionFailed)?;
|
||||
|
||||
let aead_key = derive_aead_key(x25519_ss.as_bytes(), mlkem_ss.as_slice());
|
||||
let shared_secret = aead_key.as_slice().try_into().unwrap();
|
||||
let aead_key = derive_aead_key(x25519_ss.as_bytes(), mlkem_ss.as_slice(), b"");
|
||||
let shared_secret: [u8; 32] = aead_key.as_slice().try_into()
|
||||
.expect("AEAD key is always exactly 32 bytes");
|
||||
|
||||
let mut kem_output = Vec::with_capacity(HYBRID_KEM_OUTPUT_LEN);
|
||||
kem_output.push(HYBRID_VERSION);
|
||||
@@ -390,7 +416,8 @@ pub fn hybrid_decapsulate_only(
|
||||
return Err(HybridKemError::UnsupportedVersion(kem_output[0]));
|
||||
}
|
||||
|
||||
let eph_pk_bytes: [u8; 32] = kem_output[1..33].try_into().unwrap();
|
||||
let eph_pk_bytes: [u8; 32] = kem_output[1..33].try_into()
|
||||
.expect("slice is exactly 32 bytes (guaranteed by HYBRID_KEM_OUTPUT_LEN check)");
|
||||
let eph_pk = X25519Public::from(eph_pk_bytes);
|
||||
let x25519_ss = keypair.x25519_sk.diffie_hellman(&eph_pk);
|
||||
|
||||
@@ -401,8 +428,9 @@ pub fn hybrid_decapsulate_only(
|
||||
.decapsulate(&mlkem_ct_arr)
|
||||
.map_err(|_| HybridKemError::MlKemDecapsFailed)?;
|
||||
|
||||
let aead_key = derive_aead_key(x25519_ss.as_bytes(), mlkem_ss.as_slice());
|
||||
Ok(aead_key.as_slice().try_into().unwrap())
|
||||
let aead_key = derive_aead_key(x25519_ss.as_bytes(), mlkem_ss.as_slice(), b"");
|
||||
Ok(aead_key.as_slice().try_into()
|
||||
.expect("AEAD key is always exactly 32 bytes"))
|
||||
}
|
||||
|
||||
/// Export a secret from shared secret (MLS HPKE exporter compatibility).
|
||||
@@ -412,7 +440,7 @@ pub fn hybrid_export(
|
||||
exporter_context: &[u8],
|
||||
length: usize,
|
||||
) -> Vec<u8> {
|
||||
let hk = Hkdf::<Sha256>::new(None, shared_secret);
|
||||
let hk = Hkdf::<Sha256>::new(Some(HKDF_SALT), shared_secret);
|
||||
let mut out = vec![0u8; length];
|
||||
hk.expand(exporter_context, &mut out).expect("valid length");
|
||||
out
|
||||
@@ -420,18 +448,26 @@ pub fn hybrid_export(
|
||||
|
||||
/// Derive AEAD key from the combined X25519 + ML-KEM shared secrets.
|
||||
///
|
||||
/// `extra_info` is optional caller-supplied context (e.g. HPKE `info`) that is
|
||||
/// appended to the domain-separation label for additional binding.
|
||||
///
|
||||
/// The nonce is generated randomly per-encryption rather than derived from
|
||||
/// HKDF, preventing nonce reuse when the same shared secret is (accidentally)
|
||||
/// used more than once.
|
||||
fn derive_aead_key(x25519_ss: &[u8], mlkem_ss: &[u8]) -> Key {
|
||||
fn derive_aead_key(x25519_ss: &[u8], mlkem_ss: &[u8], extra_info: &[u8]) -> Key {
|
||||
let mut ikm = Zeroizing::new(vec![0u8; x25519_ss.len() + mlkem_ss.len()]);
|
||||
ikm[..x25519_ss.len()].copy_from_slice(x25519_ss);
|
||||
ikm[x25519_ss.len()..].copy_from_slice(mlkem_ss);
|
||||
|
||||
let hk = Hkdf::<Sha256>::new(None, &ikm);
|
||||
let hk = Hkdf::<Sha256>::new(Some(HKDF_SALT), &ikm);
|
||||
|
||||
// Combine domain-separation label with caller-supplied context.
|
||||
let mut info = Vec::with_capacity(HKDF_INFO.len() + extra_info.len());
|
||||
info.extend_from_slice(HKDF_INFO);
|
||||
info.extend_from_slice(extra_info);
|
||||
|
||||
let mut key_bytes = Zeroizing::new([0u8; 32]);
|
||||
hk.expand(HKDF_INFO, &mut *key_bytes)
|
||||
hk.expand(&info, &mut *key_bytes)
|
||||
.expect("32 bytes is valid HKDF-SHA256 output length");
|
||||
|
||||
*Key::from_slice(&*key_bytes)
|
||||
@@ -440,6 +476,7 @@ fn derive_aead_key(x25519_ss: &[u8], mlkem_ss: &[u8]) -> Key {
|
||||
// ── Tests ───────────────────────────────────────────────────────────────────
|
||||
|
||||
#[cfg(test)]
|
||||
#[allow(clippy::unwrap_used)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
@@ -457,21 +494,39 @@ mod tests {
|
||||
let pk = kp.public_key();
|
||||
let plaintext = b"hello post-quantum world!";
|
||||
|
||||
let envelope = hybrid_encrypt(&pk, plaintext).unwrap();
|
||||
let recovered = hybrid_decrypt(&kp, &envelope).unwrap();
|
||||
let envelope = hybrid_encrypt(&pk, plaintext, b"", b"").unwrap();
|
||||
let recovered = hybrid_decrypt(&kp, &envelope, b"", b"").unwrap();
|
||||
|
||||
assert_eq!(recovered, plaintext);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn encrypt_decrypt_with_info_aad() {
|
||||
let kp = HybridKeypair::generate();
|
||||
let pk = kp.public_key();
|
||||
let plaintext = b"context-bound payload";
|
||||
let info = b"mls epoch 42";
|
||||
let aad = b"group-id-abc";
|
||||
|
||||
let envelope = hybrid_encrypt(&pk, plaintext, info, aad).unwrap();
|
||||
let recovered = hybrid_decrypt(&kp, &envelope, info, aad).unwrap();
|
||||
assert_eq!(recovered, plaintext);
|
||||
|
||||
// Mismatched info must fail
|
||||
assert!(hybrid_decrypt(&kp, &envelope, b"wrong info", aad).is_err());
|
||||
// Mismatched aad must fail
|
||||
assert!(hybrid_decrypt(&kp, &envelope, info, b"wrong aad").is_err());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn wrong_key_decryption_fails() {
|
||||
let kp_sender_target = HybridKeypair::generate();
|
||||
let kp_wrong = HybridKeypair::generate();
|
||||
|
||||
let pk = kp_sender_target.public_key();
|
||||
let envelope = hybrid_encrypt(&pk, b"secret").unwrap();
|
||||
let envelope = hybrid_encrypt(&pk, b"secret", b"", b"").unwrap();
|
||||
|
||||
let result = hybrid_decrypt(&kp_wrong, &envelope);
|
||||
let result = hybrid_decrypt(&kp_wrong, &envelope, b"", b"");
|
||||
assert!(result.is_err());
|
||||
}
|
||||
|
||||
@@ -480,12 +535,12 @@ mod tests {
|
||||
let kp = HybridKeypair::generate();
|
||||
let pk = kp.public_key();
|
||||
|
||||
let mut envelope = hybrid_encrypt(&pk, b"payload").unwrap();
|
||||
let mut envelope = hybrid_encrypt(&pk, b"payload", b"", b"").unwrap();
|
||||
let last = envelope.len() - 1;
|
||||
envelope[last] ^= 0x01;
|
||||
|
||||
assert!(matches!(
|
||||
hybrid_decrypt(&kp, &envelope),
|
||||
hybrid_decrypt(&kp, &envelope, b"", b""),
|
||||
Err(HybridKemError::DecryptionFailed)
|
||||
));
|
||||
}
|
||||
@@ -495,11 +550,11 @@ mod tests {
|
||||
let kp = HybridKeypair::generate();
|
||||
let pk = kp.public_key();
|
||||
|
||||
let mut envelope = hybrid_encrypt(&pk, b"payload").unwrap();
|
||||
let mut envelope = hybrid_encrypt(&pk, b"payload", b"", b"").unwrap();
|
||||
// Flip a byte in the ML-KEM ciphertext region (starts at offset 33)
|
||||
envelope[40] ^= 0xFF;
|
||||
|
||||
assert!(hybrid_decrypt(&kp, &envelope).is_err());
|
||||
assert!(hybrid_decrypt(&kp, &envelope, b"", b"").is_err());
|
||||
}
|
||||
|
||||
#[test]
|
||||
@@ -507,11 +562,11 @@ mod tests {
|
||||
let kp = HybridKeypair::generate();
|
||||
let pk = kp.public_key();
|
||||
|
||||
let mut envelope = hybrid_encrypt(&pk, b"payload").unwrap();
|
||||
let mut envelope = hybrid_encrypt(&pk, b"payload", b"", b"").unwrap();
|
||||
// Flip a byte in the X25519 ephemeral pk region (offset 1..33)
|
||||
envelope[5] ^= 0xFF;
|
||||
|
||||
assert!(hybrid_decrypt(&kp, &envelope).is_err());
|
||||
assert!(hybrid_decrypt(&kp, &envelope, b"", b"").is_err());
|
||||
}
|
||||
|
||||
#[test]
|
||||
@@ -519,11 +574,11 @@ mod tests {
|
||||
let kp = HybridKeypair::generate();
|
||||
let pk = kp.public_key();
|
||||
|
||||
let mut envelope = hybrid_encrypt(&pk, b"payload").unwrap();
|
||||
let mut envelope = hybrid_encrypt(&pk, b"payload", b"", b"").unwrap();
|
||||
envelope[0] = 0xFF;
|
||||
|
||||
assert!(matches!(
|
||||
hybrid_decrypt(&kp, &envelope),
|
||||
hybrid_decrypt(&kp, &envelope, b"", b""),
|
||||
Err(HybridKemError::UnsupportedVersion(0xFF))
|
||||
));
|
||||
}
|
||||
@@ -532,7 +587,7 @@ mod tests {
|
||||
fn envelope_too_short_rejected() {
|
||||
let kp = HybridKeypair::generate();
|
||||
assert!(matches!(
|
||||
hybrid_decrypt(&kp, &[0x01; 10]),
|
||||
hybrid_decrypt(&kp, &[0x01; 10], b"", b""),
|
||||
Err(HybridKemError::TooShort(10))
|
||||
));
|
||||
}
|
||||
@@ -548,8 +603,8 @@ mod tests {
|
||||
|
||||
// Verify restored keypair can decrypt
|
||||
let pk = kp.public_key();
|
||||
let ct = hybrid_encrypt(&pk, b"test").unwrap();
|
||||
let pt = hybrid_decrypt(&restored, &ct).unwrap();
|
||||
let ct = hybrid_encrypt(&pk, b"test", b"", b"").unwrap();
|
||||
let pt = hybrid_decrypt(&restored, &ct, b"", b"").unwrap();
|
||||
assert_eq!(pt, b"test");
|
||||
}
|
||||
|
||||
@@ -570,8 +625,8 @@ mod tests {
|
||||
let pk = kp.public_key();
|
||||
let plaintext = vec![0xAB; 50_000]; // 50 KB
|
||||
|
||||
let envelope = hybrid_encrypt(&pk, &plaintext).unwrap();
|
||||
let recovered = hybrid_decrypt(&kp, &envelope).unwrap();
|
||||
let envelope = hybrid_encrypt(&pk, &plaintext, b"", b"").unwrap();
|
||||
let recovered = hybrid_decrypt(&kp, &envelope, b"", b"").unwrap();
|
||||
|
||||
assert_eq!(recovered, plaintext);
|
||||
}
|
||||
245
crates/quicproquo-core/src/identity.rs
Normal file
245
crates/quicproquo-core/src/identity.rs
Normal file
@@ -0,0 +1,245 @@
|
||||
//! Ed25519 identity keypair for MLS credentials and AS registration.
|
||||
//!
|
||||
//! The [`IdentityKeypair`] is the long-term identity key embedded in MLS
|
||||
//! `BasicCredential`s. It is used for signing MLS messages and as the
|
||||
//! indexing key for the Authentication Service.
|
||||
//!
|
||||
//! # Zeroize
|
||||
//!
|
||||
//! The 32-byte private seed is stored as `Zeroizing<[u8; 32]>`, which zeroes
|
||||
//! the bytes on drop. `[u8; 32]` is `Copy + Default` and satisfies zeroize's
|
||||
//! `DefaultIsZeroes` constraint, avoiding a conflict with ed25519-dalek's
|
||||
//! `SigningKey` zeroize impl.
|
||||
//!
|
||||
//! # Fingerprint
|
||||
//!
|
||||
//! A 32-byte SHA-256 digest of the raw public key bytes is used as a compact,
|
||||
//! collision-resistant identifier for logging.
|
||||
|
||||
use ed25519_dalek::{Signer as DalekSigner, SigningKey, VerifyingKey};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use sha2::{Digest, Sha256};
|
||||
use zeroize::Zeroizing;
|
||||
|
||||
/// An Ed25519 identity keypair.
|
||||
///
|
||||
/// Created with [`IdentityKeypair::generate`]. The private signing key seed
|
||||
/// is zeroed when this struct is dropped.
|
||||
pub struct IdentityKeypair {
|
||||
/// Raw 32-byte private seed — zeroized on drop.
|
||||
///
|
||||
/// Stored as bytes rather than `SigningKey` to satisfy zeroize's
|
||||
/// `DefaultIsZeroes` bound on `Zeroizing<T>`.
|
||||
seed: Zeroizing<[u8; 32]>,
|
||||
/// Corresponding 32-byte public verifying key.
|
||||
verifying: VerifyingKey,
|
||||
}
|
||||
|
||||
impl IdentityKeypair {
|
||||
/// Recreate an identity keypair from a 32-byte seed.
|
||||
pub fn from_seed(seed: [u8; 32]) -> Self {
|
||||
let signing = SigningKey::from_bytes(&seed);
|
||||
let verifying = signing.verifying_key();
|
||||
Self {
|
||||
seed: Zeroizing::new(seed),
|
||||
verifying,
|
||||
}
|
||||
}
|
||||
|
||||
/// Return the raw 32-byte private seed (for persistence).
|
||||
///
|
||||
/// The returned value is wrapped in [`Zeroizing`] so it is securely
|
||||
/// erased when dropped, preventing the seed from lingering in memory.
|
||||
pub fn seed_bytes(&self) -> Zeroizing<[u8; 32]> {
|
||||
Zeroizing::new(*self.seed)
|
||||
}
|
||||
}
|
||||
|
||||
impl IdentityKeypair {
|
||||
/// Generate a fresh random Ed25519 identity keypair.
|
||||
pub fn generate() -> Self {
|
||||
use rand::rngs::OsRng;
|
||||
let signing = SigningKey::generate(&mut OsRng);
|
||||
let verifying = signing.verifying_key();
|
||||
let seed = Zeroizing::new(signing.to_bytes());
|
||||
Self { seed, verifying }
|
||||
}
|
||||
|
||||
/// Return the raw 32-byte Ed25519 public key.
|
||||
///
|
||||
/// This is the byte array used as `identityKey` in `auth.capnp` calls.
|
||||
pub fn public_key_bytes(&self) -> [u8; 32] {
|
||||
self.verifying.to_bytes()
|
||||
}
|
||||
|
||||
/// Return the SHA-256 fingerprint of the public key (32 bytes).
|
||||
pub fn fingerprint(&self) -> [u8; 32] {
|
||||
let mut hasher = Sha256::new();
|
||||
hasher.update(self.verifying.to_bytes());
|
||||
hasher.finalize().into()
|
||||
}
|
||||
|
||||
/// Reconstruct the `SigningKey` from the stored seed bytes.
|
||||
fn signing_key(&self) -> SigningKey {
|
||||
SigningKey::from_bytes(&self.seed)
|
||||
}
|
||||
}
|
||||
|
||||
/// Implement the openmls `Signer` trait so `IdentityKeypair` can be passed
|
||||
/// directly to `KeyPackage::builder().build(...)` without needing the external
|
||||
/// `openmls_basic_credential` crate.
|
||||
#[cfg(feature = "native")]
|
||||
impl openmls_traits::signatures::Signer for IdentityKeypair {
|
||||
fn sign(&self, payload: &[u8]) -> Result<Vec<u8>, openmls_traits::types::Error> {
|
||||
let sk = self.signing_key();
|
||||
let sig: ed25519_dalek::Signature = sk.sign(payload);
|
||||
Ok(sig.to_bytes().to_vec())
|
||||
}
|
||||
|
||||
fn signature_scheme(&self) -> openmls_traits::types::SignatureScheme {
|
||||
openmls_traits::types::SignatureScheme::ED25519
|
||||
}
|
||||
}
|
||||
|
||||
impl IdentityKeypair {
|
||||
/// Sign arbitrary bytes with the Ed25519 key and return the 64-byte signature.
|
||||
///
|
||||
/// Used by sealed sender to sign the inner payload for recipient verification.
|
||||
pub fn sign_raw(&self, payload: &[u8]) -> [u8; 64] {
|
||||
let sk = self.signing_key();
|
||||
let sig: ed25519_dalek::Signature = sk.sign(payload);
|
||||
sig.to_bytes()
|
||||
}
|
||||
|
||||
/// Verify an Ed25519 signature over `payload` using the given public key.
|
||||
pub fn verify_raw(
|
||||
public_key: &[u8; 32],
|
||||
payload: &[u8],
|
||||
signature: &[u8; 64],
|
||||
) -> Result<(), crate::error::CoreError> {
|
||||
use ed25519_dalek::Verifier;
|
||||
|
||||
let vk = VerifyingKey::from_bytes(public_key)
|
||||
.map_err(|e| crate::error::CoreError::Mls(format!("invalid public key: {e}")))?;
|
||||
let sig = ed25519_dalek::Signature::from_bytes(signature);
|
||||
vk.verify(payload, &sig)
|
||||
.map_err(|e| crate::error::CoreError::Mls(format!("signature verification failed: {e}")))
|
||||
}
|
||||
}
|
||||
|
||||
/// Verify a 96-byte delivery proof produced by the server's `build_delivery_proof`.
|
||||
///
|
||||
/// # Layout
|
||||
/// ```text
|
||||
/// bytes 0..32 — SHA-256(seq_le || recipient_key || timestamp_ms_le)
|
||||
/// bytes 32..96 — Ed25519 signature over those 32 bytes
|
||||
/// ```
|
||||
///
|
||||
/// Returns `Ok(true)` when the proof is structurally valid and the signature verifies,
|
||||
/// `Ok(false)` when the proof length is wrong (graceful degradation for old servers),
|
||||
/// or `Err` when the signature is structurally invalid / verification fails.
|
||||
pub fn verify_delivery_proof(
|
||||
server_pubkey: &[u8; 32],
|
||||
proof: &[u8],
|
||||
) -> Result<bool, crate::error::CoreError> {
|
||||
if proof.len() != 96 {
|
||||
return Ok(false);
|
||||
}
|
||||
let hash: [u8; 32] = proof[..32].try_into().expect("slice is 32 bytes");
|
||||
let sig: [u8; 64] = proof[32..96].try_into().expect("slice is 64 bytes");
|
||||
IdentityKeypair::verify_raw(server_pubkey, &hash, &sig)?;
|
||||
Ok(true)
|
||||
}
|
||||
|
||||
impl Serialize for IdentityKeypair {
|
||||
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
|
||||
where
|
||||
S: serde::Serializer,
|
||||
{
|
||||
serializer.serialize_bytes(&self.seed[..])
|
||||
}
|
||||
}
|
||||
|
||||
impl<'de> Deserialize<'de> for IdentityKeypair {
|
||||
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
|
||||
where
|
||||
D: serde::Deserializer<'de>,
|
||||
{
|
||||
let bytes: Vec<u8> = serde::Deserialize::deserialize(deserializer)?;
|
||||
let seed: [u8; 32] = bytes
|
||||
.as_slice()
|
||||
.try_into()
|
||||
.map_err(|_| serde::de::Error::custom("identity seed must be 32 bytes"))?;
|
||||
Ok(IdentityKeypair::from_seed(seed))
|
||||
}
|
||||
}
|
||||
|
||||
impl std::fmt::Debug for IdentityKeypair {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
let fp = self.fingerprint();
|
||||
f.debug_struct("IdentityKeypair")
|
||||
.field(
|
||||
"fingerprint",
|
||||
&format!("{:02x}{:02x}{:02x}{:02x}…", fp[0], fp[1], fp[2], fp[3]),
|
||||
)
|
||||
.finish_non_exhaustive()
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
#[allow(clippy::unwrap_used)]
|
||||
mod proof_tests {
|
||||
use super::*;
|
||||
use sha2::{Digest, Sha256};
|
||||
|
||||
fn make_proof(kp: &IdentityKeypair, seq: u64, recipient_key: &[u8], timestamp_ms: u64) -> Vec<u8> {
|
||||
let mut hasher = Sha256::new();
|
||||
hasher.update(seq.to_le_bytes());
|
||||
hasher.update(recipient_key);
|
||||
hasher.update(timestamp_ms.to_le_bytes());
|
||||
let hash: [u8; 32] = hasher.finalize().into();
|
||||
let sig = kp.sign_raw(&hash);
|
||||
let mut proof = vec![0u8; 96];
|
||||
proof[..32].copy_from_slice(&hash);
|
||||
proof[32..].copy_from_slice(&sig);
|
||||
proof
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn verify_valid_proof() {
|
||||
let kp = IdentityKeypair::generate();
|
||||
let pk = kp.public_key_bytes();
|
||||
let rk = [0xabu8; 32];
|
||||
let proof = make_proof(&kp, 42, &rk, 1_700_000_000_000);
|
||||
assert!(verify_delivery_proof(&pk, &proof).unwrap());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn reject_wrong_length() {
|
||||
let kp = IdentityKeypair::generate();
|
||||
let pk = kp.public_key_bytes();
|
||||
assert!(!verify_delivery_proof(&pk, &[0u8; 64]).unwrap());
|
||||
assert!(!verify_delivery_proof(&pk, &[]).unwrap());
|
||||
assert!(!verify_delivery_proof(&pk, &[0u8; 97]).unwrap());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn reject_tampered_hash() {
|
||||
let kp = IdentityKeypair::generate();
|
||||
let pk = kp.public_key_bytes();
|
||||
let rk = [0x01u8; 32];
|
||||
let mut proof = make_proof(&kp, 1, &rk, 999);
|
||||
proof[0] ^= 0xff; // corrupt the hash bytes
|
||||
assert!(verify_delivery_proof(&pk, &proof).is_err());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn reject_wrong_pubkey() {
|
||||
let kp = IdentityKeypair::generate();
|
||||
let other = IdentityKeypair::generate();
|
||||
let pk = other.public_key_bytes();
|
||||
let rk = [0x02u8; 32];
|
||||
let proof = make_proof(&kp, 5, &rk, 0);
|
||||
assert!(verify_delivery_proof(&pk, &proof).is_err());
|
||||
}
|
||||
}
|
||||
@@ -14,7 +14,7 @@
|
||||
//! # Wire format
|
||||
//!
|
||||
//! KeyPackages are TLS-encoded using `tls_codec` (same version as openmls).
|
||||
//! The resulting bytes are opaque to the quicnprotochat transport layer.
|
||||
//! The resulting bytes are opaque to the quicproquo transport layer.
|
||||
|
||||
use openmls::prelude::{
|
||||
Ciphersuite, Credential, CredentialType, CredentialWithKey, CryptoConfig, KeyPackage,
|
||||
@@ -25,7 +25,7 @@ use sha2::{Digest, Sha256};
|
||||
|
||||
use crate::{error::CoreError, identity::IdentityKeypair};
|
||||
|
||||
/// The MLS ciphersuite used throughout quicnprotochat (RFC 9420 §17.1).
|
||||
/// The MLS ciphersuite used throughout quicproquo (RFC 9420 §17.1).
|
||||
pub const ALLOWED_CIPHERSUITE: Ciphersuite =
|
||||
Ciphersuite::MLS_128_DHKEMX25519_AES128GCM_SHA256_Ed25519;
|
||||
|
||||
@@ -5,16 +5,24 @@ use std::{
|
||||
sync::RwLock,
|
||||
};
|
||||
|
||||
use openmls_rust_crypto::RustCrypto;
|
||||
use openmls_traits::{
|
||||
key_store::{MlsEntity, OpenMlsKeyStore},
|
||||
OpenMlsCryptoProvider,
|
||||
};
|
||||
use openmls_traits::key_store::{MlsEntity, OpenMlsKeyStore};
|
||||
|
||||
/// A disk-backed key store implementing `OpenMlsKeyStore`.
|
||||
///
|
||||
/// In-memory when `path` is `None`; otherwise flushes the entire map to disk on
|
||||
/// every store/delete so HPKE init keys survive process restarts.
|
||||
///
|
||||
/// # Serialization
|
||||
///
|
||||
/// Uses bincode for both individual MLS entity values and the outer HashMap
|
||||
/// container. This is required because OpenMLS types use bincode-compatible
|
||||
/// serialization, and `HashMap<Vec<u8>, Vec<u8>>` requires a binary format
|
||||
/// (JSON mandates string keys).
|
||||
///
|
||||
/// # Persistence security
|
||||
///
|
||||
/// When `path` is set, file permissions are restricted to owner-only (0o600)
|
||||
/// on Unix platforms, since the store may contain HPKE private keys.
|
||||
#[derive(Debug)]
|
||||
pub struct DiskKeyStore {
|
||||
path: Option<PathBuf>,
|
||||
@@ -46,28 +54,55 @@ impl DiskKeyStore {
|
||||
if bytes.is_empty() {
|
||||
HashMap::new()
|
||||
} else {
|
||||
bincode::deserialize(&bytes).map_err(|_| DiskKeyStoreError::Serialization)?
|
||||
bincode::deserialize(&bytes)
|
||||
.map_err(|_| DiskKeyStoreError::Serialization)?
|
||||
}
|
||||
} else {
|
||||
HashMap::new()
|
||||
};
|
||||
|
||||
Ok(Self {
|
||||
let store = Self {
|
||||
path: Some(path),
|
||||
values: RwLock::new(values),
|
||||
})
|
||||
};
|
||||
|
||||
// Set restrictive file permissions on the keystore file.
|
||||
store.set_file_permissions()?;
|
||||
|
||||
Ok(store)
|
||||
}
|
||||
|
||||
fn flush(&self) -> Result<(), DiskKeyStoreError> {
|
||||
let Some(path) = &self.path else {
|
||||
return Ok(());
|
||||
};
|
||||
let values = self.values.read().unwrap();
|
||||
let values = self.values.read().map_err(|_| DiskKeyStoreError::Io("lock poisoned".into()))?;
|
||||
let bytes = bincode::serialize(&*values).map_err(|_| DiskKeyStoreError::Serialization)?;
|
||||
if let Some(parent) = path.parent() {
|
||||
fs::create_dir_all(parent).map_err(|e| DiskKeyStoreError::Io(e.to_string()))?;
|
||||
}
|
||||
fs::write(path, bytes).map_err(|e| DiskKeyStoreError::Io(e.to_string()))
|
||||
fs::write(path, &bytes).map_err(|e| DiskKeyStoreError::Io(e.to_string()))?;
|
||||
self.set_file_permissions()?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Restrict file permissions to owner-only (0o600) on Unix.
|
||||
#[cfg(unix)]
|
||||
fn set_file_permissions(&self) -> Result<(), DiskKeyStoreError> {
|
||||
use std::os::unix::fs::PermissionsExt;
|
||||
if let Some(path) = &self.path {
|
||||
if path.exists() {
|
||||
let perms = std::fs::Permissions::from_mode(0o600);
|
||||
fs::set_permissions(path, perms)
|
||||
.map_err(|e| DiskKeyStoreError::Io(format!("set permissions: {e}")))?;
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[cfg(not(unix))]
|
||||
fn set_file_permissions(&self) -> Result<(), DiskKeyStoreError> {
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
@@ -81,64 +116,27 @@ impl OpenMlsKeyStore for DiskKeyStore {
|
||||
type Error = DiskKeyStoreError;
|
||||
|
||||
fn store<V: MlsEntity>(&self, k: &[u8], v: &V) -> Result<(), Self::Error> {
|
||||
let value = serde_json::to_vec(v).map_err(|_| DiskKeyStoreError::Serialization)?;
|
||||
let mut values = self.values.write().unwrap();
|
||||
let value = bincode::serialize(v).map_err(|_| DiskKeyStoreError::Serialization)?;
|
||||
let mut values = self.values.write().map_err(|_| DiskKeyStoreError::Io("lock poisoned".into()))?;
|
||||
values.insert(k.to_vec(), value);
|
||||
drop(values);
|
||||
self.flush()
|
||||
}
|
||||
|
||||
fn read<V: MlsEntity>(&self, k: &[u8]) -> Option<V> {
|
||||
let values = self.values.read().unwrap();
|
||||
let values = match self.values.read() {
|
||||
Ok(v) => v,
|
||||
Err(_) => return None,
|
||||
};
|
||||
values
|
||||
.get(k)
|
||||
.and_then(|bytes| serde_json::from_slice(bytes).ok())
|
||||
.and_then(|bytes| bincode::deserialize(bytes).ok())
|
||||
}
|
||||
|
||||
fn delete<V: MlsEntity>(&self, k: &[u8]) -> Result<(), Self::Error> {
|
||||
let mut values = self.values.write().unwrap();
|
||||
let mut values = self.values.write().map_err(|_| DiskKeyStoreError::Io("lock poisoned".into()))?;
|
||||
values.remove(k);
|
||||
drop(values);
|
||||
self.flush()
|
||||
}
|
||||
}
|
||||
|
||||
/// Crypto provider that couples RustCrypto with a disk-backed key store.
|
||||
#[derive(Debug)]
|
||||
pub struct StoreCrypto {
|
||||
crypto: RustCrypto,
|
||||
key_store: DiskKeyStore,
|
||||
}
|
||||
|
||||
impl StoreCrypto {
|
||||
pub fn new(key_store: DiskKeyStore) -> Self {
|
||||
Self {
|
||||
crypto: RustCrypto::default(),
|
||||
key_store,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for StoreCrypto {
|
||||
fn default() -> Self {
|
||||
Self::new(DiskKeyStore::ephemeral())
|
||||
}
|
||||
}
|
||||
|
||||
impl OpenMlsCryptoProvider for StoreCrypto {
|
||||
type CryptoProvider = RustCrypto;
|
||||
type RandProvider = RustCrypto;
|
||||
type KeyStoreProvider = DiskKeyStore;
|
||||
|
||||
fn crypto(&self) -> &Self::CryptoProvider {
|
||||
&self.crypto
|
||||
}
|
||||
|
||||
fn rand(&self) -> &Self::RandProvider {
|
||||
&self.crypto
|
||||
}
|
||||
|
||||
fn key_store(&self) -> &Self::KeyStoreProvider {
|
||||
&self.key_store
|
||||
}
|
||||
}
|
||||
99
crates/quicproquo-core/src/lib.rs
Normal file
99
crates/quicproquo-core/src/lib.rs
Normal file
@@ -0,0 +1,99 @@
|
||||
//! Core cryptographic primitives, MLS group state machine, and hybrid
|
||||
//! post-quantum KEM for quicproquo.
|
||||
//!
|
||||
//! # WASM support
|
||||
//!
|
||||
//! When compiled with `--no-default-features` (disabling the `native` feature),
|
||||
//! the following modules are available for `wasm32-unknown-unknown`:
|
||||
//!
|
||||
//! - `identity` — Ed25519 identity keypair (generate, sign, verify)
|
||||
//! - `hybrid_kem` — X25519 + ML-KEM-768 hybrid key encapsulation
|
||||
//! - `safety_numbers` — Signal-style safety number computation
|
||||
//! - `sealed_sender` — sender identity + Ed25519 signature envelope
|
||||
//! - `app_message` — rich application message serialisation/parsing
|
||||
//! - `padding` — message padding to hide plaintext lengths
|
||||
//! - `transcript` — encrypted tamper-evident message transcript
|
||||
//! - `error` — `CoreError` type
|
||||
//!
|
||||
//! The following modules require the `native` feature (MLS, OPAQUE, Cap'n Proto):
|
||||
//!
|
||||
//! - `group` — MLS group state machine (openmls)
|
||||
//! - `keypackage` — MLS KeyPackage generation
|
||||
//! - `hybrid_crypto` — hybrid HPKE provider for OpenMLS
|
||||
//! - `keystore` — OpenMLS key store with optional disk persistence
|
||||
//! - `opaque_auth` — OPAQUE cipher suite configuration
|
||||
//!
|
||||
//! # Module layout
|
||||
//!
|
||||
//! | Module | Responsibility |
|
||||
//! |---------------|------------------------------------------------------------------|
|
||||
//! | `app_message` | Rich application payload (Chat, Reply, Reaction, ReadReceipt, Typing) |
|
||||
//! | `error` | [`CoreError`] type |
|
||||
//! | `identity` | [`IdentityKeypair`] — Ed25519 identity key for MLS credentials |
|
||||
//! | `keypackage` | [`generate_key_package`] — standalone KeyPackage generation |
|
||||
//! | `group` | [`GroupMember`] — MLS group lifecycle (create/join/send/recv) |
|
||||
//! | `hybrid_kem` | Hybrid X25519 + ML-KEM-768 key encapsulation |
|
||||
//! | `keystore` | [`DiskKeyStore`] — OpenMLS key store with optional persistence |
|
||||
|
||||
mod app_message;
|
||||
mod error;
|
||||
mod hybrid_kem;
|
||||
mod identity;
|
||||
pub mod padding;
|
||||
pub mod pq_noise;
|
||||
#[cfg(feature = "native")]
|
||||
pub mod recovery;
|
||||
pub mod safety_numbers;
|
||||
pub mod sealed_sender;
|
||||
pub mod transcript;
|
||||
|
||||
// ── Native-only modules (MLS, OPAQUE, filesystem) ───────────────────────────
|
||||
#[cfg(feature = "native")]
|
||||
mod group;
|
||||
#[cfg(feature = "native")]
|
||||
mod hybrid_crypto;
|
||||
#[cfg(feature = "native")]
|
||||
mod keypackage;
|
||||
#[cfg(feature = "native")]
|
||||
mod keystore;
|
||||
#[cfg(feature = "native")]
|
||||
pub mod opaque_auth;
|
||||
|
||||
// ── Public API (always available) ───────────────────────────────────────────
|
||||
|
||||
pub use app_message::{
|
||||
serialize, serialize_chat, serialize_delete, serialize_dummy, serialize_edit,
|
||||
serialize_file_ref, serialize_reaction, serialize_read_receipt, serialize_reply,
|
||||
serialize_typing, parse, generate_message_id,
|
||||
AppMessage, MessageType, VERSION as APP_MESSAGE_VERSION,
|
||||
};
|
||||
pub use error::CoreError;
|
||||
pub use hybrid_kem::{
|
||||
hybrid_decrypt, hybrid_encrypt, HybridKemError, HybridKeypair, HybridKeypairBytes,
|
||||
HybridPublicKey,
|
||||
};
|
||||
pub use identity::{verify_delivery_proof, IdentityKeypair};
|
||||
#[cfg(feature = "native")]
|
||||
pub use recovery::{
|
||||
constant_time_eq, generate_recovery_codes, recover_from_bundle, recovery_token_hash,
|
||||
RecoveryBundle, RecoveryPayload, RecoverySetup, MAX_BUNDLE_SIZE, RECOVERY_CODE_COUNT,
|
||||
};
|
||||
pub use safety_numbers::compute_safety_number;
|
||||
pub use transcript::{
|
||||
read_transcript, validate_transcript_structure, ChainVerdict, DecodedRecord, TranscriptRecord,
|
||||
TranscriptWriter,
|
||||
};
|
||||
// Deprecated re-export for backward compatibility.
|
||||
#[allow(deprecated)]
|
||||
pub use transcript::verify_transcript_chain;
|
||||
|
||||
// ── Public API (native only) ────────────────────────────────────────────────
|
||||
|
||||
#[cfg(feature = "native")]
|
||||
pub use group::{GroupMember, ReceivedMessage, ReceivedMessageWithSender};
|
||||
#[cfg(feature = "native")]
|
||||
pub use hybrid_crypto::{HybridCrypto, HybridCryptoProvider};
|
||||
#[cfg(feature = "native")]
|
||||
pub use keypackage::{generate_key_package, validate_keypackage_ciphersuite};
|
||||
#[cfg(feature = "native")]
|
||||
pub use keystore::DiskKeyStore;
|
||||
@@ -5,7 +5,7 @@
|
||||
|
||||
use opaque_ke::CipherSuite;
|
||||
|
||||
/// OPAQUE cipher suite for quicnprotochat.
|
||||
/// OPAQUE cipher suite for quicproquo.
|
||||
///
|
||||
/// - **OPRF**: Ristretto255 (curve25519-based, ~128-bit security)
|
||||
/// - **Key exchange**: Triple-DH (3DH) over Ristretto255 with SHA-512
|
||||
265
crates/quicproquo-core/src/padding.rs
Normal file
265
crates/quicproquo-core/src/padding.rs
Normal file
@@ -0,0 +1,265 @@
|
||||
//! Message padding to hide plaintext lengths from the server.
|
||||
//!
|
||||
//! Pads payloads to fixed bucket sizes before MLS encryption so that the
|
||||
//! ciphertext does not reveal the actual message length.
|
||||
//!
|
||||
//! # Wire format
|
||||
//!
|
||||
//! ```text
|
||||
//! [real_length: 4 bytes LE (u32)][payload: real_length bytes][random padding]
|
||||
//! ```
|
||||
//!
|
||||
//! The total padded output is always one of the bucket sizes: 256, 1024, 4096, 16384 bytes.
|
||||
//! For payloads larger than 16380 bytes, rounds up to the nearest 16384-byte multiple.
|
||||
//!
|
||||
//! ## Uniform boundary padding (traffic analysis resistance)
|
||||
//!
|
||||
//! [`pad_uniform`] / [`unpad_uniform`] pad to a configurable byte boundary
|
||||
//! (default 256) instead of exponential buckets. This produces more uniform
|
||||
//! ciphertext sizes at the cost of slightly more padding overhead.
|
||||
|
||||
use rand::RngCore;
|
||||
|
||||
use crate::error::CoreError;
|
||||
|
||||
/// Default uniform padding boundary in bytes.
|
||||
pub const DEFAULT_PADDING_BOUNDARY: usize = 256;
|
||||
|
||||
/// Bucket sizes in bytes. The smallest (256) accommodates a sealed sender
|
||||
/// envelope (99 bytes overhead) plus a short message.
|
||||
const BUCKETS: &[usize] = &[256, 1024, 4096, 16384];
|
||||
|
||||
/// Select the smallest bucket that fits `content_len + 4` (the 4-byte length prefix).
|
||||
fn bucket_for(content_len: usize) -> usize {
|
||||
let total = content_len + 4;
|
||||
for &b in BUCKETS {
|
||||
if total <= b {
|
||||
return b;
|
||||
}
|
||||
}
|
||||
// Larger than biggest bucket: round up to nearest 16384-byte multiple.
|
||||
total.div_ceil(16384) * 16384
|
||||
}
|
||||
|
||||
/// Pad a payload to the next bucket boundary with cryptographic random bytes.
|
||||
pub fn pad(payload: &[u8]) -> Vec<u8> {
|
||||
let bucket = bucket_for(payload.len());
|
||||
let mut out = Vec::with_capacity(bucket);
|
||||
out.extend_from_slice(&(payload.len() as u32).to_le_bytes());
|
||||
out.extend_from_slice(payload);
|
||||
let pad_len = bucket - 4 - payload.len();
|
||||
if pad_len > 0 {
|
||||
let mut padding = vec![0u8; pad_len];
|
||||
rand::rngs::OsRng.fill_bytes(&mut padding);
|
||||
out.extend_from_slice(&padding);
|
||||
}
|
||||
out
|
||||
}
|
||||
|
||||
/// Remove padding and return the original payload.
|
||||
pub fn unpad(padded: &[u8]) -> Result<Vec<u8>, CoreError> {
|
||||
if padded.len() < 4 {
|
||||
return Err(CoreError::AppMessage("padded message too short".into()));
|
||||
}
|
||||
let real_len = u32::from_le_bytes([padded[0], padded[1], padded[2], padded[3]]) as usize;
|
||||
if 4 + real_len > padded.len() {
|
||||
return Err(CoreError::AppMessage(
|
||||
"padded real_length exceeds buffer".into(),
|
||||
));
|
||||
}
|
||||
Ok(padded[4..4 + real_len].to_vec())
|
||||
}
|
||||
|
||||
/// Pad a payload to the nearest multiple of `boundary` bytes.
|
||||
///
|
||||
/// Uses the same wire format as [`pad`]: `[real_length: 4 bytes LE][payload][random padding]`.
|
||||
/// The total output length is always a multiple of `boundary`. A `boundary` of 0 is
|
||||
/// treated as [`DEFAULT_PADDING_BOUNDARY`].
|
||||
pub fn pad_uniform(payload: &[u8], boundary: usize) -> Vec<u8> {
|
||||
let boundary = if boundary == 0 { DEFAULT_PADDING_BOUNDARY } else { boundary };
|
||||
let total = payload.len() + 4; // 4-byte length prefix
|
||||
let padded_len = total.div_ceil(boundary) * boundary;
|
||||
|
||||
let mut out = Vec::with_capacity(padded_len);
|
||||
out.extend_from_slice(&(payload.len() as u32).to_le_bytes());
|
||||
out.extend_from_slice(payload);
|
||||
let pad_len = padded_len - total;
|
||||
if pad_len > 0 {
|
||||
let mut padding = vec![0u8; pad_len];
|
||||
rand::rngs::OsRng.fill_bytes(&mut padding);
|
||||
out.extend_from_slice(&padding);
|
||||
}
|
||||
out
|
||||
}
|
||||
|
||||
/// Remove uniform padding. Wire format is identical to [`unpad`].
|
||||
pub fn unpad_uniform(padded: &[u8]) -> Result<Vec<u8>, CoreError> {
|
||||
unpad(padded)
|
||||
}
|
||||
|
||||
/// Generate a decoy payload that looks identical to a real padded message.
|
||||
///
|
||||
/// Returns random bytes of length equal to a `boundary`-aligned padded message.
|
||||
/// The 4-byte length prefix is set to 0, so [`unpad_uniform`] returns an empty payload.
|
||||
pub fn generate_decoy(boundary: usize) -> Vec<u8> {
|
||||
let boundary = if boundary == 0 { DEFAULT_PADDING_BOUNDARY } else { boundary };
|
||||
let mut out = vec![0u8; boundary];
|
||||
// Length prefix = 0 (decoy carries no real payload).
|
||||
// Fill the rest with random bytes so it is indistinguishable from padding.
|
||||
rand::rngs::OsRng.fill_bytes(&mut out[4..]);
|
||||
out
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
#[allow(clippy::unwrap_used)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn round_trip_small() {
|
||||
let msg = b"hello";
|
||||
let padded = pad(msg);
|
||||
assert_eq!(padded.len(), 256); // smallest bucket
|
||||
let unpadded = unpad(&padded).unwrap();
|
||||
assert_eq!(unpadded, msg);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn round_trip_medium() {
|
||||
let msg = vec![0xAB; 300];
|
||||
let padded = pad(&msg);
|
||||
assert_eq!(padded.len(), 1024); // second bucket
|
||||
let unpadded = unpad(&padded).unwrap();
|
||||
assert_eq!(unpadded, msg);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn round_trip_large() {
|
||||
let msg = vec![0xCD; 2000];
|
||||
let padded = pad(&msg);
|
||||
assert_eq!(padded.len(), 4096); // third bucket
|
||||
let unpadded = unpad(&padded).unwrap();
|
||||
assert_eq!(unpadded, msg);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn round_trip_very_large() {
|
||||
let msg = vec![0xEF; 10000];
|
||||
let padded = pad(&msg);
|
||||
assert_eq!(padded.len(), 16384); // largest bucket
|
||||
let unpadded = unpad(&padded).unwrap();
|
||||
assert_eq!(unpadded, msg);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn round_trip_oversized() {
|
||||
let msg = vec![0xFF; 20000];
|
||||
let padded = pad(&msg);
|
||||
assert_eq!(padded.len(), 32768); // 2 * 16384
|
||||
let unpadded = unpad(&padded).unwrap();
|
||||
assert_eq!(unpadded, msg);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn round_trip_empty() {
|
||||
let msg = b"";
|
||||
let padded = pad(msg);
|
||||
assert_eq!(padded.len(), 256); // smallest bucket
|
||||
let unpadded = unpad(&padded).unwrap();
|
||||
assert_eq!(unpadded, msg);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn exactly_at_bucket_boundary() {
|
||||
// 252 + 4 = 256 → fits in 256 bucket exactly
|
||||
let msg = vec![0x42; 252];
|
||||
let padded = pad(&msg);
|
||||
assert_eq!(padded.len(), 256);
|
||||
let unpadded = unpad(&padded).unwrap();
|
||||
assert_eq!(unpadded, msg);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn unpad_too_short_fails() {
|
||||
assert!(unpad(&[0, 0]).is_err());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn unpad_invalid_length_fails() {
|
||||
// Claims 1000 bytes but only has 10
|
||||
let mut bad = (1000u32).to_le_bytes().to_vec();
|
||||
bad.extend_from_slice(&[0u8; 10]);
|
||||
assert!(unpad(&bad).is_err());
|
||||
}
|
||||
|
||||
// ── Uniform padding tests ──────────────────────────────────────────────
|
||||
|
||||
#[test]
|
||||
fn uniform_round_trip_default_boundary() {
|
||||
let msg = b"uniform padding test";
|
||||
let padded = pad_uniform(msg, DEFAULT_PADDING_BOUNDARY);
|
||||
assert_eq!(padded.len() % DEFAULT_PADDING_BOUNDARY, 0);
|
||||
assert_eq!(padded.len(), 256); // 20 + 4 = 24, rounds up to 256
|
||||
let unpadded = unpad_uniform(&padded).unwrap();
|
||||
assert_eq!(unpadded, msg);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn uniform_custom_boundary() {
|
||||
let msg = vec![0xAA; 100];
|
||||
let padded = pad_uniform(&msg, 128);
|
||||
assert_eq!(padded.len() % 128, 0);
|
||||
assert_eq!(padded.len(), 128); // 100 + 4 = 104, rounds up to 128
|
||||
let unpadded = unpad_uniform(&padded).unwrap();
|
||||
assert_eq!(unpadded, msg);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn uniform_exact_boundary() {
|
||||
// 252 + 4 = 256, exactly on boundary
|
||||
let msg = vec![0xBB; 252];
|
||||
let padded = pad_uniform(&msg, 256);
|
||||
assert_eq!(padded.len(), 256);
|
||||
let unpadded = unpad_uniform(&padded).unwrap();
|
||||
assert_eq!(unpadded, msg);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn uniform_one_over_boundary() {
|
||||
// 253 + 4 = 257, rounds up to 512
|
||||
let msg = vec![0xCC; 253];
|
||||
let padded = pad_uniform(&msg, 256);
|
||||
assert_eq!(padded.len(), 512);
|
||||
let unpadded = unpad_uniform(&padded).unwrap();
|
||||
assert_eq!(unpadded, msg);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn uniform_zero_boundary_uses_default() {
|
||||
let msg = b"zero boundary";
|
||||
let padded = pad_uniform(msg, 0);
|
||||
assert_eq!(padded.len() % DEFAULT_PADDING_BOUNDARY, 0);
|
||||
let unpadded = unpad_uniform(&padded).unwrap();
|
||||
assert_eq!(unpadded, msg);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn decoy_is_boundary_aligned() {
|
||||
let decoy = generate_decoy(256);
|
||||
assert_eq!(decoy.len(), 256);
|
||||
assert_eq!(decoy.len() % 256, 0);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn decoy_unpads_to_empty() {
|
||||
let decoy = generate_decoy(256);
|
||||
let payload = unpad_uniform(&decoy).unwrap();
|
||||
assert!(payload.is_empty());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn decoy_default_boundary() {
|
||||
let decoy = generate_decoy(0);
|
||||
assert_eq!(decoy.len(), DEFAULT_PADDING_BOUNDARY);
|
||||
}
|
||||
}
|
||||
689
crates/quicproquo-core/src/pq_noise.rs
Normal file
689
crates/quicproquo-core/src/pq_noise.rs
Normal file
@@ -0,0 +1,689 @@
|
||||
//! Hybrid Noise_XX + ML-KEM-768 handshake for post-quantum transport security.
|
||||
//!
|
||||
//! Implements a three-message Noise_XX pattern with an embedded ML-KEM-768
|
||||
//! encapsulation to produce a hybrid shared secret that is secure against
|
||||
//! both classical and quantum adversaries.
|
||||
//!
|
||||
//! # Handshake pattern
|
||||
//!
|
||||
//! ```text
|
||||
//! XX(s, rs):
|
||||
//! -> e (initiator ephemeral)
|
||||
//! <- e, ee, s, es, mlkem_ct (responder ephemeral + static + ML-KEM ciphertext)
|
||||
//! -> s, se (initiator static)
|
||||
//! ```
|
||||
//!
|
||||
//! After message 2, the ML-KEM shared secret is mixed into the chaining key
|
||||
//! via HKDF. The final transport keys incorporate both the X25519 DH chain
|
||||
//! and the ML-KEM shared secret.
|
||||
//!
|
||||
//! # Wire format
|
||||
//!
|
||||
//! Each handshake message is a simple length-prefixed blob:
|
||||
//! ```text
|
||||
//! [msg_len: u32 BE][handshake message bytes]
|
||||
//! ```
|
||||
//!
|
||||
//! # Feature gate
|
||||
//!
|
||||
//! This module is always compiled but the `pq-noise` feature enables it
|
||||
//! in the RPC layer for server/client negotiation.
|
||||
|
||||
use chacha20poly1305::{
|
||||
aead::{Aead, KeyInit, Payload},
|
||||
ChaCha20Poly1305, Key, Nonce,
|
||||
};
|
||||
use hkdf::Hkdf;
|
||||
use ml_kem::{
|
||||
array::Array,
|
||||
kem::{Decapsulate, Encapsulate},
|
||||
EncodedSizeUser, KemCore, MlKem768, MlKem768Params,
|
||||
};
|
||||
use ml_kem::kem::{DecapsulationKey, EncapsulationKey};
|
||||
use rand::rngs::OsRng;
|
||||
use sha2::Sha256;
|
||||
use x25519_dalek::{PublicKey as X25519Public, StaticSecret};
|
||||
use zeroize::Zeroizing;
|
||||
|
||||
use crate::error::CoreError;
|
||||
|
||||
/// Domain separation label for the hybrid Noise handshake.
|
||||
const PROTOCOL_NAME: &[u8] = b"quicproquo-pq-noise-v1";
|
||||
|
||||
/// ML-KEM-768 encapsulation key length.
|
||||
const MLKEM_EK_LEN: usize = 1184;
|
||||
|
||||
/// ML-KEM-768 ciphertext length.
|
||||
const MLKEM_CT_LEN: usize = 1088;
|
||||
|
||||
/// AEAD tag length (ChaCha20-Poly1305).
|
||||
const TAG_LEN: usize = 16;
|
||||
|
||||
// ── Keypair ──────────────────────────────────────────────────────────────────
|
||||
|
||||
/// A static keypair for the hybrid Noise handshake.
|
||||
///
|
||||
/// Contains both an X25519 static key and an ML-KEM-768 key pair.
|
||||
pub struct NoiseKeypair {
|
||||
x25519_sk: StaticSecret,
|
||||
x25519_pk: X25519Public,
|
||||
mlkem_dk: DecapsulationKey<MlKem768Params>,
|
||||
mlkem_ek: EncapsulationKey<MlKem768Params>,
|
||||
}
|
||||
|
||||
impl NoiseKeypair {
|
||||
/// Generate a fresh keypair from OS CSPRNG.
|
||||
pub fn generate() -> Self {
|
||||
let x25519_sk = StaticSecret::random_from_rng(OsRng);
|
||||
let x25519_pk = X25519Public::from(&x25519_sk);
|
||||
let (mlkem_dk, mlkem_ek) = MlKem768::generate(&mut OsRng);
|
||||
Self {
|
||||
x25519_sk,
|
||||
x25519_pk,
|
||||
mlkem_dk,
|
||||
mlkem_ek,
|
||||
}
|
||||
}
|
||||
|
||||
/// Return the X25519 public key bytes.
|
||||
pub fn x25519_public(&self) -> [u8; 32] {
|
||||
self.x25519_pk.to_bytes()
|
||||
}
|
||||
|
||||
/// Return the ML-KEM-768 encapsulation key bytes.
|
||||
pub fn mlkem_public(&self) -> Vec<u8> {
|
||||
self.mlkem_ek.as_bytes().to_vec()
|
||||
}
|
||||
}
|
||||
|
||||
// ── Chaining key state ───────────────────────────────────────────────────────
|
||||
|
||||
/// Internal handshake state tracking the Noise chaining key and handshake hash.
|
||||
struct HandshakeState {
|
||||
/// Chaining key — evolved by each MixKey operation.
|
||||
ck: Zeroizing<[u8; 32]>,
|
||||
/// Handshake hash — commits to all handshake transcript data.
|
||||
h: [u8; 32],
|
||||
/// Current encryption key (derived from ck after MixKey).
|
||||
k: Option<Zeroizing<[u8; 32]>>,
|
||||
/// Nonce counter for in-handshake encryption.
|
||||
n: u64,
|
||||
}
|
||||
|
||||
impl HandshakeState {
|
||||
fn new() -> Self {
|
||||
// Initialize h = SHA-256(protocol_name), ck = h.
|
||||
use sha2::{Digest, Sha256};
|
||||
let h: [u8; 32] = Sha256::digest(PROTOCOL_NAME).into();
|
||||
Self {
|
||||
ck: Zeroizing::new(h),
|
||||
h,
|
||||
k: None,
|
||||
n: 0,
|
||||
}
|
||||
}
|
||||
|
||||
/// MixHash: h = SHA-256(h || data)
|
||||
fn mix_hash(&mut self, data: &[u8]) {
|
||||
use sha2::{Digest, Sha256};
|
||||
let mut hasher = Sha256::new();
|
||||
hasher.update(self.h);
|
||||
hasher.update(data);
|
||||
self.h = hasher.finalize().into();
|
||||
}
|
||||
|
||||
/// MixKey: (ck, k) = HKDF(ck, input_key_material)
|
||||
fn mix_key(&mut self, ikm: &[u8]) {
|
||||
let hk = Hkdf::<Sha256>::new(Some(&*self.ck), ikm);
|
||||
let mut ck = Zeroizing::new([0u8; 32]);
|
||||
let mut k = Zeroizing::new([0u8; 32]);
|
||||
hk.expand(b"ck", &mut *ck)
|
||||
.expect("32 bytes is valid HKDF output");
|
||||
hk.expand(b"k", &mut *k)
|
||||
.expect("32 bytes is valid HKDF output");
|
||||
self.ck = ck;
|
||||
self.k = Some(k);
|
||||
self.n = 0;
|
||||
}
|
||||
|
||||
/// Encrypt plaintext with the current key and nonce, using h as AAD.
|
||||
fn encrypt_and_hash(&mut self, plaintext: &[u8]) -> Result<Vec<u8>, CoreError> {
|
||||
let key = self
|
||||
.k
|
||||
.as_ref()
|
||||
.ok_or_else(|| CoreError::Mls("pq_noise: no encryption key set".into()))?;
|
||||
let cipher = ChaCha20Poly1305::new(Key::from_slice(&**key));
|
||||
let nonce = nonce_from_counter(self.n);
|
||||
let ct = cipher
|
||||
.encrypt(
|
||||
Nonce::from_slice(&nonce),
|
||||
Payload {
|
||||
msg: plaintext,
|
||||
aad: &self.h,
|
||||
},
|
||||
)
|
||||
.map_err(|_| CoreError::Mls("pq_noise: encrypt failed".into()))?;
|
||||
self.mix_hash(&ct);
|
||||
self.n += 1;
|
||||
Ok(ct)
|
||||
}
|
||||
|
||||
/// Decrypt ciphertext with the current key and nonce, using h as AAD.
|
||||
fn decrypt_and_hash(&mut self, ciphertext: &[u8]) -> Result<Vec<u8>, CoreError> {
|
||||
let key = self
|
||||
.k
|
||||
.as_ref()
|
||||
.ok_or_else(|| CoreError::Mls("pq_noise: no decryption key set".into()))?;
|
||||
let cipher = ChaCha20Poly1305::new(Key::from_slice(&**key));
|
||||
let nonce = nonce_from_counter(self.n);
|
||||
let ct_for_hash = ciphertext.to_vec();
|
||||
let pt = cipher
|
||||
.decrypt(
|
||||
Nonce::from_slice(&nonce),
|
||||
Payload {
|
||||
msg: ciphertext,
|
||||
aad: &self.h,
|
||||
},
|
||||
)
|
||||
.map_err(|_| CoreError::Mls("pq_noise: decrypt failed".into()))?;
|
||||
self.mix_hash(&ct_for_hash);
|
||||
self.n += 1;
|
||||
Ok(pt)
|
||||
}
|
||||
|
||||
/// Split the handshake state into two transport keys (initiator->responder, responder->initiator).
|
||||
fn split(&self) -> (TransportKey, TransportKey) {
|
||||
let hk = Hkdf::<Sha256>::new(Some(&*self.ck), &[]);
|
||||
let mut k1 = Zeroizing::new([0u8; 32]);
|
||||
let mut k2 = Zeroizing::new([0u8; 32]);
|
||||
hk.expand(b"initiator", &mut *k1)
|
||||
.expect("32 bytes is valid HKDF output");
|
||||
hk.expand(b"responder", &mut *k2)
|
||||
.expect("32 bytes is valid HKDF output");
|
||||
(
|
||||
TransportKey { key: k1, nonce: 0 },
|
||||
TransportKey { key: k2, nonce: 0 },
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
fn nonce_from_counter(n: u64) -> [u8; 12] {
|
||||
let mut nonce = [0u8; 12];
|
||||
nonce[4..].copy_from_slice(&n.to_le_bytes());
|
||||
nonce
|
||||
}
|
||||
|
||||
// ── Transport ────────────────────────────────────────────────────────────────
|
||||
|
||||
/// A transport encryption key with a nonce counter.
|
||||
pub struct TransportKey {
|
||||
key: Zeroizing<[u8; 32]>,
|
||||
nonce: u64,
|
||||
}
|
||||
|
||||
impl TransportKey {
|
||||
/// Encrypt a message for transport.
|
||||
pub fn encrypt(&mut self, plaintext: &[u8]) -> Result<Vec<u8>, CoreError> {
|
||||
let cipher = ChaCha20Poly1305::new(Key::from_slice(&*self.key));
|
||||
let nonce = nonce_from_counter(self.nonce);
|
||||
let ct = cipher
|
||||
.encrypt(Nonce::from_slice(&nonce), plaintext)
|
||||
.map_err(|_| CoreError::Mls("pq_noise transport: encrypt failed".into()))?;
|
||||
self.nonce += 1;
|
||||
Ok(ct)
|
||||
}
|
||||
|
||||
/// Decrypt a transport message.
|
||||
pub fn decrypt(&mut self, ciphertext: &[u8]) -> Result<Vec<u8>, CoreError> {
|
||||
let cipher = ChaCha20Poly1305::new(Key::from_slice(&*self.key));
|
||||
let nonce = nonce_from_counter(self.nonce);
|
||||
let pt = cipher
|
||||
.decrypt(Nonce::from_slice(&nonce), ciphertext)
|
||||
.map_err(|_| CoreError::Mls("pq_noise transport: decrypt failed".into()))?;
|
||||
self.nonce += 1;
|
||||
Ok(pt)
|
||||
}
|
||||
}
|
||||
|
||||
// ── Initiator ────────────────────────────────────────────────────────────────
|
||||
|
||||
/// Initiator side of the hybrid Noise_XX handshake.
|
||||
pub struct Initiator {
|
||||
state: HandshakeState,
|
||||
/// Ephemeral secret stored as StaticSecret so DH doesn't consume it.
|
||||
/// Generated from OsRng; we use StaticSecret purely for the non-consuming
|
||||
/// `diffie_hellman(&self, ...)` API — the key is still ephemeral.
|
||||
e_sk: StaticSecret,
|
||||
e_pk: X25519Public,
|
||||
s: NoiseKeypair,
|
||||
/// Stored after reading message 2 so we can compute se = DH(s, re) in msg3.
|
||||
re_pk: Option<X25519Public>,
|
||||
}
|
||||
|
||||
impl Initiator {
|
||||
/// Create a new initiator with the given static keypair.
|
||||
pub fn new(static_keypair: NoiseKeypair) -> Self {
|
||||
let e_sk = StaticSecret::random_from_rng(OsRng);
|
||||
let e_pk = X25519Public::from(&e_sk);
|
||||
Self {
|
||||
state: HandshakeState::new(),
|
||||
e_sk,
|
||||
e_pk,
|
||||
s: static_keypair,
|
||||
re_pk: None,
|
||||
}
|
||||
}
|
||||
|
||||
/// Write message 1: `-> e`
|
||||
///
|
||||
/// Returns the initiator's ephemeral X25519 public key (32 bytes).
|
||||
pub fn write_message_1(&mut self) -> Vec<u8> {
|
||||
let e_pk_bytes = self.e_pk.to_bytes();
|
||||
self.state.mix_hash(&e_pk_bytes);
|
||||
e_pk_bytes.to_vec()
|
||||
}
|
||||
|
||||
/// Read message 2 from responder: `<- e, ee, s, es, mlkem_ct`
|
||||
///
|
||||
/// Expects: `re_pk(32) || encrypted_rs_pk(32+TAG) || mlkem_ct(1088)`
|
||||
///
|
||||
/// Returns the responder's static X25519 public key.
|
||||
pub fn read_message_2(&mut self, msg: &[u8]) -> Result<[u8; 32], CoreError> {
|
||||
let expected_len = 32 + 32 + TAG_LEN + MLKEM_CT_LEN;
|
||||
if msg.len() != expected_len {
|
||||
return Err(CoreError::Mls(format!(
|
||||
"pq_noise msg2: expected {expected_len} bytes, got {}",
|
||||
msg.len()
|
||||
)));
|
||||
}
|
||||
|
||||
let mut cursor = 0;
|
||||
|
||||
// re = responder ephemeral public key
|
||||
let mut re_pk_bytes = [0u8; 32];
|
||||
re_pk_bytes.copy_from_slice(&msg[cursor..cursor + 32]);
|
||||
cursor += 32;
|
||||
let re_pk = X25519Public::from(re_pk_bytes);
|
||||
self.state.mix_hash(&re_pk_bytes);
|
||||
self.re_pk = Some(re_pk);
|
||||
|
||||
// ee = DH(e, re)
|
||||
let ee_ss = self.e_sk.diffie_hellman(&re_pk);
|
||||
self.state.mix_key(ee_ss.as_bytes());
|
||||
|
||||
// Decrypt responder's static key: s = Dec(encrypted_rs_pk)
|
||||
let encrypted_rs = &msg[cursor..cursor + 32 + TAG_LEN];
|
||||
cursor += 32 + TAG_LEN;
|
||||
let rs_pk_bytes = self.state.decrypt_and_hash(encrypted_rs)?;
|
||||
let mut rs_pk_arr = [0u8; 32];
|
||||
if rs_pk_bytes.len() != 32 {
|
||||
return Err(CoreError::Mls("pq_noise: decrypted rs not 32 bytes".into()));
|
||||
}
|
||||
rs_pk_arr.copy_from_slice(&rs_pk_bytes);
|
||||
let rs_pk = X25519Public::from(rs_pk_arr);
|
||||
|
||||
// es = DH(e, rs)
|
||||
let es_ss = self.e_sk.diffie_hellman(&rs_pk);
|
||||
self.state.mix_key(es_ss.as_bytes());
|
||||
|
||||
// ML-KEM: decapsulate the ciphertext from the responder
|
||||
let mlkem_ct = &msg[cursor..cursor + MLKEM_CT_LEN];
|
||||
let mlkem_ct_arr = Array::try_from(mlkem_ct)
|
||||
.map_err(|_| CoreError::Mls("pq_noise: invalid ML-KEM ciphertext".into()))?;
|
||||
let mlkem_ss: ml_kem::SharedKey<MlKem768> = self
|
||||
.s
|
||||
.mlkem_dk
|
||||
.decapsulate(&mlkem_ct_arr)
|
||||
.map_err(|_| CoreError::Mls("pq_noise: ML-KEM decapsulation failed".into()))?;
|
||||
self.state.mix_key(&mlkem_ss);
|
||||
|
||||
Ok(rs_pk_arr)
|
||||
}
|
||||
|
||||
/// Write message 3: `-> s, se`
|
||||
///
|
||||
/// Returns the encrypted initiator static key.
|
||||
pub fn write_message_3(&mut self) -> Result<Vec<u8>, CoreError> {
|
||||
let re_pk = self
|
||||
.re_pk
|
||||
.ok_or_else(|| CoreError::Mls("pq_noise: must read msg2 before writing msg3".into()))?;
|
||||
|
||||
// Encrypt our static key
|
||||
let s_pk_bytes = self.s.x25519_pk.to_bytes();
|
||||
let encrypted_s = self.state.encrypt_and_hash(&s_pk_bytes)?;
|
||||
|
||||
// se = DH(s, re)
|
||||
let se_ss = self.s.x25519_sk.diffie_hellman(&re_pk);
|
||||
self.state.mix_key(se_ss.as_bytes());
|
||||
|
||||
Ok(encrypted_s)
|
||||
}
|
||||
|
||||
/// Finalize the handshake and return transport keys.
|
||||
///
|
||||
/// Returns `(send_key, recv_key)` — initiator sends with send_key.
|
||||
pub fn finalize(self) -> (TransportKey, TransportKey) {
|
||||
self.state.split()
|
||||
}
|
||||
}
|
||||
|
||||
// ── Responder ────────────────────────────────────────────────────────────────
|
||||
|
||||
/// Responder side of the hybrid Noise_XX handshake.
|
||||
pub struct Responder {
|
||||
state: HandshakeState,
|
||||
/// Ephemeral secret stored as StaticSecret so DH doesn't consume it.
|
||||
e_sk: StaticSecret,
|
||||
e_pk: X25519Public,
|
||||
s: NoiseKeypair,
|
||||
}
|
||||
|
||||
impl Responder {
|
||||
/// Create a new responder with the given static keypair.
|
||||
pub fn new(static_keypair: NoiseKeypair) -> Self {
|
||||
let e_sk = StaticSecret::random_from_rng(OsRng);
|
||||
let e_pk = X25519Public::from(&e_sk);
|
||||
Self {
|
||||
state: HandshakeState::new(),
|
||||
e_sk,
|
||||
e_pk,
|
||||
s: static_keypair,
|
||||
}
|
||||
}
|
||||
|
||||
/// Read message 1 from initiator: `-> e`
|
||||
///
|
||||
/// Expects the initiator's ephemeral X25519 public key (32 bytes).
|
||||
pub fn read_message_1(&mut self, msg: &[u8]) -> Result<(), CoreError> {
|
||||
if msg.len() != 32 {
|
||||
return Err(CoreError::Mls(format!(
|
||||
"pq_noise msg1: expected 32 bytes, got {}",
|
||||
msg.len()
|
||||
)));
|
||||
}
|
||||
self.state.mix_hash(msg);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Write message 2: `<- e, ee, s, es, mlkem_ct`
|
||||
///
|
||||
/// `initiator_ek` is the initiator's ML-KEM encapsulation key.
|
||||
///
|
||||
/// Returns the message bytes.
|
||||
pub fn write_message_2(
|
||||
&mut self,
|
||||
initiator_e_pk: &[u8; 32],
|
||||
initiator_mlkem_ek: &[u8],
|
||||
) -> Result<Vec<u8>, CoreError> {
|
||||
let ie_pk = X25519Public::from(*initiator_e_pk);
|
||||
|
||||
// Our ephemeral key
|
||||
let e_pk_bytes = self.e_pk.to_bytes();
|
||||
self.state.mix_hash(&e_pk_bytes);
|
||||
|
||||
// ee = DH(e, ie)
|
||||
let ee_ss = self.e_sk.diffie_hellman(&ie_pk);
|
||||
self.state.mix_key(ee_ss.as_bytes());
|
||||
|
||||
// Encrypt our static key
|
||||
let s_pk_bytes = self.s.x25519_pk.to_bytes();
|
||||
let encrypted_s = self.state.encrypt_and_hash(&s_pk_bytes)?;
|
||||
|
||||
// es = DH(s, ie)
|
||||
let es_ss = self.s.x25519_sk.diffie_hellman(&ie_pk);
|
||||
self.state.mix_key(es_ss.as_bytes());
|
||||
|
||||
// ML-KEM: encapsulate to the initiator's encapsulation key
|
||||
if initiator_mlkem_ek.len() != MLKEM_EK_LEN {
|
||||
return Err(CoreError::Mls(format!(
|
||||
"pq_noise: expected ML-KEM EK {} bytes, got {}",
|
||||
MLKEM_EK_LEN,
|
||||
initiator_mlkem_ek.len()
|
||||
)));
|
||||
}
|
||||
let ek_arr = Array::try_from(initiator_mlkem_ek)
|
||||
.map_err(|_| CoreError::Mls("pq_noise: invalid ML-KEM encapsulation key".into()))?;
|
||||
let ek = EncapsulationKey::<MlKem768Params>::from_bytes(&ek_arr);
|
||||
let (mlkem_ct, mlkem_ss): (ml_kem::Ciphertext<MlKem768>, ml_kem::SharedKey<MlKem768>) = ek
|
||||
.encapsulate(&mut OsRng)
|
||||
.map_err(|_| CoreError::Mls("pq_noise: ML-KEM encapsulation failed".into()))?;
|
||||
self.state.mix_key(&mlkem_ss);
|
||||
|
||||
// Assemble: e_pk || encrypted_s || mlkem_ct
|
||||
let mut out = Vec::with_capacity(32 + encrypted_s.len() + MLKEM_CT_LEN);
|
||||
out.extend_from_slice(&e_pk_bytes);
|
||||
out.extend_from_slice(&encrypted_s);
|
||||
out.extend_from_slice(&mlkem_ct);
|
||||
Ok(out)
|
||||
}
|
||||
|
||||
/// Read message 3 from initiator: `-> s, se`
|
||||
///
|
||||
/// Returns the initiator's static X25519 public key.
|
||||
pub fn read_message_3(&mut self, msg: &[u8]) -> Result<[u8; 32], CoreError> {
|
||||
if msg.len() != 32 + TAG_LEN {
|
||||
return Err(CoreError::Mls(format!(
|
||||
"pq_noise msg3: expected {} bytes, got {}",
|
||||
32 + TAG_LEN,
|
||||
msg.len()
|
||||
)));
|
||||
}
|
||||
|
||||
// Decrypt initiator's static key
|
||||
let is_pk_bytes = self.state.decrypt_and_hash(msg)?;
|
||||
let mut is_pk_arr = [0u8; 32];
|
||||
if is_pk_bytes.len() != 32 {
|
||||
return Err(CoreError::Mls(
|
||||
"pq_noise: decrypted initiator static not 32 bytes".into(),
|
||||
));
|
||||
}
|
||||
is_pk_arr.copy_from_slice(&is_pk_bytes);
|
||||
let is_pk = X25519Public::from(is_pk_arr);
|
||||
|
||||
// se = DH(e, is) — responder computes using ephemeral key
|
||||
let se_ss = self.e_sk.diffie_hellman(&is_pk);
|
||||
self.state.mix_key(se_ss.as_bytes());
|
||||
|
||||
Ok(is_pk_arr)
|
||||
}
|
||||
|
||||
/// Finalize the handshake and return transport keys.
|
||||
///
|
||||
/// Returns `(recv_key, send_key)` — responder receives with recv_key.
|
||||
pub fn finalize(self) -> (TransportKey, TransportKey) {
|
||||
let (i2r, r2i) = self.state.split();
|
||||
(i2r, r2i)
|
||||
}
|
||||
}
|
||||
|
||||
// ── Tests ────────────────────────────────────────────────────────────────────
|
||||
|
||||
#[cfg(test)]
|
||||
#[allow(clippy::unwrap_used)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn full_handshake_round_trip() {
|
||||
let initiator_kp = NoiseKeypair::generate();
|
||||
let responder_kp = NoiseKeypair::generate();
|
||||
|
||||
// Initiator's ML-KEM public key is sent out-of-band (or in a pre-message).
|
||||
let initiator_mlkem_ek = initiator_kp.mlkem_public();
|
||||
|
||||
let mut initiator = Initiator::new(initiator_kp);
|
||||
let mut responder = Responder::new(responder_kp);
|
||||
|
||||
// Message 1: initiator -> responder
|
||||
let msg1 = initiator.write_message_1();
|
||||
assert_eq!(msg1.len(), 32);
|
||||
responder.read_message_1(&msg1).unwrap();
|
||||
|
||||
// Message 2: responder -> initiator
|
||||
let ie_pk: [u8; 32] = msg1.as_slice().try_into().unwrap();
|
||||
let msg2 = responder
|
||||
.write_message_2(&ie_pk, &initiator_mlkem_ek)
|
||||
.unwrap();
|
||||
let _responder_static = initiator.read_message_2(&msg2).unwrap();
|
||||
|
||||
// Message 3: initiator -> responder
|
||||
let msg3 = initiator.write_message_3().unwrap();
|
||||
let _initiator_static = responder.read_message_3(&msg3).unwrap();
|
||||
|
||||
// Derive transport keys
|
||||
let (mut i_send, mut i_recv) = initiator.finalize();
|
||||
let (mut r_recv, mut r_send) = responder.finalize();
|
||||
|
||||
// Test transport: initiator -> responder
|
||||
let plaintext = b"hello post-quantum world!";
|
||||
let ct = i_send.encrypt(plaintext).unwrap();
|
||||
let pt = r_recv.decrypt(&ct).unwrap();
|
||||
assert_eq!(pt, plaintext);
|
||||
|
||||
// Test transport: responder -> initiator
|
||||
let plaintext2 = b"reply from responder";
|
||||
let ct2 = r_send.encrypt(plaintext2).unwrap();
|
||||
let pt2 = i_recv.decrypt(&ct2).unwrap();
|
||||
assert_eq!(pt2, plaintext2);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn tampered_msg2_fails() {
|
||||
let initiator_kp = NoiseKeypair::generate();
|
||||
let responder_kp = NoiseKeypair::generate();
|
||||
let initiator_mlkem_ek = initiator_kp.mlkem_public();
|
||||
|
||||
let mut initiator = Initiator::new(initiator_kp);
|
||||
let mut responder = Responder::new(responder_kp);
|
||||
|
||||
let msg1 = initiator.write_message_1();
|
||||
responder.read_message_1(&msg1).unwrap();
|
||||
|
||||
let ie_pk: [u8; 32] = msg1.as_slice().try_into().unwrap();
|
||||
let mut msg2 = responder
|
||||
.write_message_2(&ie_pk, &initiator_mlkem_ek)
|
||||
.unwrap();
|
||||
|
||||
// Tamper with the encrypted static key region
|
||||
msg2[40] ^= 0xFF;
|
||||
|
||||
let result = initiator.read_message_2(&msg2);
|
||||
assert!(result.is_err());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn wrong_mlkem_key_fails() {
|
||||
let initiator_kp = NoiseKeypair::generate();
|
||||
let responder_kp = NoiseKeypair::generate();
|
||||
|
||||
// Use a different keypair's ML-KEM key — decapsulation will use
|
||||
// implicit rejection, producing a pseudorandom (wrong) shared secret.
|
||||
let wrong_kp = NoiseKeypair::generate();
|
||||
let wrong_mlkem_ek = wrong_kp.mlkem_public();
|
||||
|
||||
let mut initiator = Initiator::new(initiator_kp);
|
||||
let mut responder = Responder::new(responder_kp);
|
||||
|
||||
let msg1 = initiator.write_message_1();
|
||||
responder.read_message_1(&msg1).unwrap();
|
||||
|
||||
let ie_pk: [u8; 32] = msg1.as_slice().try_into().unwrap();
|
||||
let msg2 = responder
|
||||
.write_message_2(&ie_pk, &wrong_mlkem_ek)
|
||||
.unwrap();
|
||||
|
||||
// ML-KEM implicit rejection: decap succeeds but returns wrong ss.
|
||||
// The ML-KEM mix_key happens after the AEAD decrypt of the static key,
|
||||
// so read_message_2 itself may succeed. But the chaining keys diverge,
|
||||
// causing msg3 AEAD decrypt to fail on the responder side.
|
||||
let read2 = initiator.read_message_2(&msg2);
|
||||
if read2.is_err() {
|
||||
// If msg2 processing itself failed, the test passes.
|
||||
return;
|
||||
}
|
||||
|
||||
// msg2 succeeded — chaining keys now diverge due to wrong ML-KEM ss.
|
||||
// msg3 from initiator will use the wrong key, so responder can't decrypt.
|
||||
let msg3 = initiator.write_message_3().unwrap();
|
||||
let result = responder.read_message_3(&msg3);
|
||||
assert!(result.is_err(), "msg3 should fail due to ML-KEM shared secret mismatch");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn multiple_transport_messages() {
|
||||
let initiator_kp = NoiseKeypair::generate();
|
||||
let responder_kp = NoiseKeypair::generate();
|
||||
let initiator_mlkem_ek = initiator_kp.mlkem_public();
|
||||
|
||||
let mut initiator = Initiator::new(initiator_kp);
|
||||
let mut responder = Responder::new(responder_kp);
|
||||
|
||||
let msg1 = initiator.write_message_1();
|
||||
responder.read_message_1(&msg1).unwrap();
|
||||
|
||||
let ie_pk: [u8; 32] = msg1.as_slice().try_into().unwrap();
|
||||
let msg2 = responder
|
||||
.write_message_2(&ie_pk, &initiator_mlkem_ek)
|
||||
.unwrap();
|
||||
initiator.read_message_2(&msg2).unwrap();
|
||||
|
||||
let msg3 = initiator.write_message_3().unwrap();
|
||||
responder.read_message_3(&msg3).unwrap();
|
||||
|
||||
let (mut i_send, mut i_recv) = initiator.finalize();
|
||||
let (mut r_recv, mut r_send) = responder.finalize();
|
||||
|
||||
// Send multiple messages in each direction
|
||||
for i in 0..10u32 {
|
||||
let msg = format!("initiator message {i}");
|
||||
let ct = i_send.encrypt(msg.as_bytes()).unwrap();
|
||||
let pt = r_recv.decrypt(&ct).unwrap();
|
||||
assert_eq!(pt, msg.as_bytes());
|
||||
|
||||
let reply = format!("responder reply {i}");
|
||||
let ct2 = r_send.encrypt(reply.as_bytes()).unwrap();
|
||||
let pt2 = i_recv.decrypt(&ct2).unwrap();
|
||||
assert_eq!(pt2, reply.as_bytes());
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn nonce_reuse_detected() {
|
||||
let initiator_kp = NoiseKeypair::generate();
|
||||
let responder_kp = NoiseKeypair::generate();
|
||||
let initiator_mlkem_ek = initiator_kp.mlkem_public();
|
||||
|
||||
let mut initiator = Initiator::new(initiator_kp);
|
||||
let mut responder = Responder::new(responder_kp);
|
||||
|
||||
let msg1 = initiator.write_message_1();
|
||||
responder.read_message_1(&msg1).unwrap();
|
||||
|
||||
let ie_pk: [u8; 32] = msg1.as_slice().try_into().unwrap();
|
||||
let msg2 = responder
|
||||
.write_message_2(&ie_pk, &initiator_mlkem_ek)
|
||||
.unwrap();
|
||||
initiator.read_message_2(&msg2).unwrap();
|
||||
|
||||
let msg3 = initiator.write_message_3().unwrap();
|
||||
responder.read_message_3(&msg3).unwrap();
|
||||
|
||||
let (mut i_send, _) = initiator.finalize();
|
||||
let (mut r_recv, _) = responder.finalize();
|
||||
|
||||
// Encrypt two messages
|
||||
let ct1 = i_send.encrypt(b"msg1").unwrap();
|
||||
let _ct2 = i_send.encrypt(b"msg2").unwrap();
|
||||
|
||||
// Decrypt in order works
|
||||
r_recv.decrypt(&ct1).unwrap();
|
||||
|
||||
// Replaying ct1 (wrong nonce) should fail
|
||||
let result = r_recv.decrypt(&ct1);
|
||||
assert!(result.is_err());
|
||||
|
||||
// But ct2 at the right nonce works
|
||||
// (we already consumed nonce 1 trying ct1, so ct2 at nonce 2 fails too)
|
||||
// This tests that the nonce counter prevents replay.
|
||||
}
|
||||
}
|
||||
342
crates/quicproquo-core/src/recovery.rs
Normal file
342
crates/quicproquo-core/src/recovery.rs
Normal file
@@ -0,0 +1,342 @@
|
||||
//! Account recovery — recovery code generation and encrypted backup bundles.
|
||||
//!
|
||||
//! # Design
|
||||
//!
|
||||
//! Recovery codes are 8 alphanumeric strings of 6 characters each (~31 bits
|
||||
//! entropy per code). Any single code is sufficient to recover the account.
|
||||
//!
|
||||
//! A recovery key is derived from each code via Argon2id. The identity seed
|
||||
//! and conversation metadata are encrypted into a [`RecoveryBundle`] using
|
||||
//! ChaCha20-Poly1305. The bundle is uploaded to the server, keyed by
|
||||
//! `SHA-256(recovery_token)` — the server never sees plaintext codes.
|
||||
//!
|
||||
//! # Security properties
|
||||
//!
|
||||
//! - Recovery codes are shown once and never stored in plaintext.
|
||||
//! - The server is zero-knowledge — it stores only encrypted blobs.
|
||||
//! - Code validation uses constant-time comparison.
|
||||
//! - All key material is zeroized on drop.
|
||||
|
||||
use argon2::{Algorithm, Argon2, Params, Version};
|
||||
use chacha20poly1305::{
|
||||
aead::{Aead, KeyInit},
|
||||
ChaCha20Poly1305, Key, Nonce,
|
||||
};
|
||||
use rand::RngCore;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use sha2::{Digest, Sha256};
|
||||
use zeroize::Zeroizing;
|
||||
|
||||
use crate::error::CoreError;
|
||||
|
||||
/// Number of recovery codes generated per setup.
|
||||
pub const RECOVERY_CODE_COUNT: usize = 8;
|
||||
|
||||
/// Length of each recovery code (alphanumeric characters).
|
||||
const CODE_LENGTH: usize = 6;
|
||||
|
||||
/// Maximum bundle size (64 KiB).
|
||||
pub const MAX_BUNDLE_SIZE: usize = 64 * 1024;
|
||||
|
||||
/// Argon2id parameters for recovery key derivation.
|
||||
const ARGON2_M_COST: u32 = 19 * 1024; // 19 MiB
|
||||
const ARGON2_T_COST: u32 = 2;
|
||||
const ARGON2_P_COST: u32 = 1;
|
||||
|
||||
/// Alphanumeric character set for recovery codes (uppercase + digits, no
|
||||
/// ambiguous characters 0/O, 1/I/L).
|
||||
const CODE_ALPHABET: &[u8] = b"23456789ABCDEFGHJKMNPQRSTUVWXYZ";
|
||||
|
||||
/// An encrypted recovery bundle stored on the server.
|
||||
///
|
||||
/// The server stores this keyed by `token_hash` (SHA-256 of a recovery token
|
||||
/// derived from the code). The server cannot decrypt it.
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct RecoveryBundle {
|
||||
/// SHA-256 of the recovery token (used as server-side lookup key).
|
||||
pub token_hash: Vec<u8>,
|
||||
/// Random 16-byte salt for Argon2id key derivation.
|
||||
pub salt: Vec<u8>,
|
||||
/// Random 12-byte nonce for ChaCha20-Poly1305.
|
||||
pub nonce: Vec<u8>,
|
||||
/// Encrypted payload: bincode-serialised `RecoveryPayload`.
|
||||
pub ciphertext: Vec<u8>,
|
||||
}
|
||||
|
||||
/// The plaintext payload inside a recovery bundle.
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
pub struct RecoveryPayload {
|
||||
/// Ed25519 identity seed (32 bytes).
|
||||
pub identity_seed: [u8; 32],
|
||||
/// List of conversation/group IDs the user was part of (for rejoin).
|
||||
pub conversation_ids: Vec<Vec<u8>>,
|
||||
}
|
||||
|
||||
/// Result of recovery code generation.
|
||||
pub struct RecoverySetup {
|
||||
/// The 8 recovery codes to show to the user (shown once, never stored).
|
||||
pub codes: Vec<String>,
|
||||
/// Encrypted bundles — one per code — to upload to the server.
|
||||
pub bundles: Vec<RecoveryBundle>,
|
||||
}
|
||||
|
||||
/// Generate a single random recovery code.
|
||||
fn generate_code(rng: &mut impl RngCore) -> String {
|
||||
let mut code = String::with_capacity(CODE_LENGTH);
|
||||
for _ in 0..CODE_LENGTH {
|
||||
let idx = (rng.next_u32() as usize) % CODE_ALPHABET.len();
|
||||
code.push(CODE_ALPHABET[idx] as char);
|
||||
}
|
||||
code
|
||||
}
|
||||
|
||||
/// Derive a 32-byte recovery token from a code (used for server-side lookup).
|
||||
/// The token is `SHA-256("qpq-recovery-token:" || code)`.
|
||||
fn derive_recovery_token(code: &str) -> [u8; 32] {
|
||||
let mut hasher = Sha256::new();
|
||||
hasher.update(b"qpq-recovery-token:");
|
||||
hasher.update(code.as_bytes());
|
||||
hasher.finalize().into()
|
||||
}
|
||||
|
||||
/// Derive a 32-byte encryption key from a code and salt via Argon2id.
|
||||
fn derive_recovery_key(code: &str, salt: &[u8]) -> Result<Zeroizing<[u8; 32]>, CoreError> {
|
||||
let params = Params::new(ARGON2_M_COST, ARGON2_T_COST, ARGON2_P_COST, Some(32))
|
||||
.map_err(|e| CoreError::Io(format!("argon2 params: {e}")))?;
|
||||
let argon2 = Argon2::new(Algorithm::Argon2id, Version::default(), params);
|
||||
let mut key = Zeroizing::new([0u8; 32]);
|
||||
argon2
|
||||
.hash_password_into(code.as_bytes(), salt, &mut *key)
|
||||
.map_err(|e| CoreError::Io(format!("argon2 recovery key derivation: {e}")))?;
|
||||
Ok(key)
|
||||
}
|
||||
|
||||
/// Generate recovery codes and encrypted bundles for an identity.
|
||||
///
|
||||
/// Returns a `RecoverySetup` containing:
|
||||
/// - `codes`: 8 recovery codes to display to the user (once).
|
||||
/// - `bundles`: 8 encrypted recovery bundles (one per code) to upload to the server.
|
||||
///
|
||||
/// Each code independently decrypts its corresponding bundle.
|
||||
pub fn generate_recovery_codes(
|
||||
identity_seed: &[u8; 32],
|
||||
conversation_ids: &[Vec<u8>],
|
||||
) -> Result<RecoverySetup, CoreError> {
|
||||
let mut rng = rand::rngs::OsRng;
|
||||
|
||||
let payload = RecoveryPayload {
|
||||
identity_seed: *identity_seed,
|
||||
conversation_ids: conversation_ids.to_vec(),
|
||||
};
|
||||
let plaintext = bincode::serialize(&payload)
|
||||
.map_err(|e| CoreError::Io(format!("serialize recovery payload: {e}")))?;
|
||||
|
||||
let mut codes = Vec::with_capacity(RECOVERY_CODE_COUNT);
|
||||
let mut bundles = Vec::with_capacity(RECOVERY_CODE_COUNT);
|
||||
|
||||
for _ in 0..RECOVERY_CODE_COUNT {
|
||||
let code = generate_code(&mut rng);
|
||||
|
||||
// Derive the server-side lookup token.
|
||||
let token = derive_recovery_token(&code);
|
||||
let token_hash = Sha256::digest(token).to_vec();
|
||||
|
||||
// Derive encryption key from code.
|
||||
let mut salt = [0u8; 16];
|
||||
rng.fill_bytes(&mut salt);
|
||||
|
||||
let key = derive_recovery_key(&code, &salt)?;
|
||||
let cipher = ChaCha20Poly1305::new(Key::from_slice(&*key));
|
||||
|
||||
let mut nonce_bytes = [0u8; 12];
|
||||
rng.fill_bytes(&mut nonce_bytes);
|
||||
let nonce = Nonce::from_slice(&nonce_bytes);
|
||||
|
||||
let ciphertext = cipher
|
||||
.encrypt(nonce, plaintext.as_slice())
|
||||
.map_err(|e| CoreError::Io(format!("recovery bundle encryption: {e}")))?;
|
||||
|
||||
bundles.push(RecoveryBundle {
|
||||
token_hash,
|
||||
salt: salt.to_vec(),
|
||||
nonce: nonce_bytes.to_vec(),
|
||||
ciphertext,
|
||||
});
|
||||
codes.push(code);
|
||||
}
|
||||
|
||||
Ok(RecoverySetup { codes, bundles })
|
||||
}
|
||||
|
||||
/// Recover an identity seed from a recovery code and encrypted bundle.
|
||||
///
|
||||
/// Returns the decrypted `RecoveryPayload` on success.
|
||||
pub fn recover_from_bundle(
|
||||
code: &str,
|
||||
bundle: &RecoveryBundle,
|
||||
) -> Result<RecoveryPayload, CoreError> {
|
||||
// Validate bundle structure.
|
||||
if bundle.salt.len() != 16 {
|
||||
return Err(CoreError::Io(format!(
|
||||
"invalid recovery bundle salt length: {}",
|
||||
bundle.salt.len()
|
||||
)));
|
||||
}
|
||||
if bundle.nonce.len() != 12 {
|
||||
return Err(CoreError::Io(format!(
|
||||
"invalid recovery bundle nonce length: {}",
|
||||
bundle.nonce.len()
|
||||
)));
|
||||
}
|
||||
|
||||
// Derive encryption key from code.
|
||||
let key = derive_recovery_key(code, &bundle.salt)?;
|
||||
let cipher = ChaCha20Poly1305::new(Key::from_slice(&*key));
|
||||
let nonce = Nonce::from_slice(&bundle.nonce);
|
||||
|
||||
let plaintext = cipher
|
||||
.decrypt(nonce, bundle.ciphertext.as_slice())
|
||||
.map_err(|_| CoreError::Io("recovery bundle decryption failed (wrong code?)".into()))?;
|
||||
|
||||
let payload: RecoveryPayload = bincode::deserialize(&plaintext)
|
||||
.map_err(|e| CoreError::Io(format!("deserialize recovery payload: {e}")))?;
|
||||
|
||||
Ok(payload)
|
||||
}
|
||||
|
||||
/// Compute the token hash for a recovery code (for server-side lookup).
|
||||
///
|
||||
/// This is `SHA-256(SHA-256("qpq-recovery-token:" || code))`.
|
||||
pub fn recovery_token_hash(code: &str) -> Vec<u8> {
|
||||
let token = derive_recovery_token(code);
|
||||
Sha256::digest(token).to_vec()
|
||||
}
|
||||
|
||||
/// Constant-time comparison of two byte slices.
|
||||
///
|
||||
/// Returns `true` if the slices are equal, using constant-time comparison
|
||||
/// to prevent timing side-channels on recovery code validation.
|
||||
pub fn constant_time_eq(a: &[u8], b: &[u8]) -> bool {
|
||||
if a.len() != b.len() {
|
||||
return false;
|
||||
}
|
||||
let mut diff = 0u8;
|
||||
for (x, y) in a.iter().zip(b.iter()) {
|
||||
diff |= x ^ y;
|
||||
}
|
||||
diff == 0
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
#[allow(clippy::unwrap_used)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn generate_codes_produces_correct_count() {
|
||||
let seed = [42u8; 32];
|
||||
let setup = generate_recovery_codes(&seed, &[]).unwrap();
|
||||
assert_eq!(setup.codes.len(), RECOVERY_CODE_COUNT);
|
||||
assert_eq!(setup.bundles.len(), RECOVERY_CODE_COUNT);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn codes_are_correct_length_and_alphabet() {
|
||||
let seed = [7u8; 32];
|
||||
let setup = generate_recovery_codes(&seed, &[]).unwrap();
|
||||
for code in &setup.codes {
|
||||
assert_eq!(code.len(), CODE_LENGTH);
|
||||
for ch in code.chars() {
|
||||
assert!(
|
||||
CODE_ALPHABET.contains(&(ch as u8)),
|
||||
"invalid char '{ch}' in code"
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn codes_are_unique() {
|
||||
let seed = [1u8; 32];
|
||||
let setup = generate_recovery_codes(&seed, &[]).unwrap();
|
||||
let mut seen = std::collections::HashSet::new();
|
||||
for code in &setup.codes {
|
||||
assert!(seen.insert(code.clone()), "duplicate code: {code}");
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn recover_roundtrip() {
|
||||
let seed = [99u8; 32];
|
||||
let conv_ids = vec![vec![1, 2, 3], vec![4, 5, 6]];
|
||||
let setup = generate_recovery_codes(&seed, &conv_ids).unwrap();
|
||||
|
||||
// Each code should decrypt its corresponding bundle.
|
||||
for (i, code) in setup.codes.iter().enumerate() {
|
||||
let payload = recover_from_bundle(code, &setup.bundles[i]).unwrap();
|
||||
assert_eq!(payload.identity_seed, seed);
|
||||
assert_eq!(payload.conversation_ids, conv_ids);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn wrong_code_fails() {
|
||||
let seed = [50u8; 32];
|
||||
let setup = generate_recovery_codes(&seed, &[]).unwrap();
|
||||
let result = recover_from_bundle("WRONG1", &setup.bundles[0]);
|
||||
assert!(result.is_err());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn code_does_not_decrypt_other_bundle() {
|
||||
let seed = [88u8; 32];
|
||||
let setup = generate_recovery_codes(&seed, &[]).unwrap();
|
||||
// Code 0 should NOT decrypt bundle 1 (different salt/nonce/key).
|
||||
let result = recover_from_bundle(&setup.codes[0], &setup.bundles[1]);
|
||||
assert!(result.is_err());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn token_hash_is_deterministic() {
|
||||
let hash1 = recovery_token_hash("ABC123");
|
||||
let hash2 = recovery_token_hash("ABC123");
|
||||
assert_eq!(hash1, hash2);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn token_hash_differs_for_different_codes() {
|
||||
let hash1 = recovery_token_hash("ABC123");
|
||||
let hash2 = recovery_token_hash("XYZ789");
|
||||
assert_ne!(hash1, hash2);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn constant_time_eq_works() {
|
||||
assert!(constant_time_eq(b"hello", b"hello"));
|
||||
assert!(!constant_time_eq(b"hello", b"world"));
|
||||
assert!(!constant_time_eq(b"hello", b"hell"));
|
||||
assert!(constant_time_eq(b"", b""));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn invalid_bundle_salt_rejected() {
|
||||
let bundle = RecoveryBundle {
|
||||
token_hash: vec![0; 32],
|
||||
salt: vec![0; 8], // wrong length
|
||||
nonce: vec![0; 12],
|
||||
ciphertext: vec![0; 32],
|
||||
};
|
||||
assert!(recover_from_bundle("ABC123", &bundle).is_err());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn invalid_bundle_nonce_rejected() {
|
||||
let bundle = RecoveryBundle {
|
||||
token_hash: vec![0; 32],
|
||||
salt: vec![0; 16],
|
||||
nonce: vec![0; 8], // wrong length
|
||||
ciphertext: vec![0; 32],
|
||||
};
|
||||
assert!(recover_from_bundle("ABC123", &bundle).is_err());
|
||||
}
|
||||
}
|
||||
153
crates/quicproquo-core/src/safety_numbers.rs
Normal file
153
crates/quicproquo-core/src/safety_numbers.rs
Normal file
@@ -0,0 +1,153 @@
|
||||
//! Signal-style safety numbers for out-of-band identity key verification.
|
||||
//!
|
||||
//! # Algorithm
|
||||
//!
|
||||
//! Given two 32-byte Ed25519 public keys, safety numbers are computed as:
|
||||
//!
|
||||
//! 1. Sort the keys lexicographically so the result is symmetric.
|
||||
//! 2. Concatenate: `input = key_lo || key_hi` (64 bytes).
|
||||
//! 3. Compute HMAC-SHA256(key=info, data=input) where
|
||||
//! `info = b"quicproquo-safety-number-v1"`.
|
||||
//! 4. Iterate the HMAC 5200 times: `hash = HMAC-SHA256(key=info, data=hash)`.
|
||||
//! 5. Interpret the 32-byte result as 4× 64-bit big-endian integers
|
||||
//! (= 256 bits → 4 groups of 64 bits). Extract 3 decimal groups per
|
||||
//! 64-bit chunk using `% 100_000` three times, giving 12 groups total.
|
||||
//! 6. Format as 12 space-separated 5-digit strings.
|
||||
//!
|
||||
//! The 5200-iteration stretch mirrors Signal's implementation cost.
|
||||
//! The result is the same regardless of argument order.
|
||||
|
||||
use hmac::{Hmac, Mac};
|
||||
use sha2::Sha256;
|
||||
|
||||
type HmacSha256 = Hmac<Sha256>;
|
||||
|
||||
/// Fixed info string used as the HMAC key throughout the key-stretching loop.
|
||||
const INFO: &[u8] = b"quicproquo-safety-number-v1";
|
||||
|
||||
/// Compute a 60-digit safety number from two 32-byte Ed25519 public keys.
|
||||
///
|
||||
/// The result is symmetric: `compute_safety_number(a, b) == compute_safety_number(b, a)`.
|
||||
///
|
||||
/// # Format
|
||||
///
|
||||
/// Returns a `String` of 12 space-separated 5-digit groups, e.g.:
|
||||
/// `"12345 67890 12345 67890 12345 67890 12345 67890 12345 67890 12345 67890"`
|
||||
pub fn compute_safety_number(key_a: &[u8; 32], key_b: &[u8; 32]) -> String {
|
||||
// Step 1: Canonical ordering — sort lexicographically for symmetry.
|
||||
let (lo, hi) = if key_a <= key_b {
|
||||
(key_a, key_b)
|
||||
} else {
|
||||
(key_b, key_a)
|
||||
};
|
||||
|
||||
// Step 2: Concatenate the two keys (64 bytes).
|
||||
let mut input = [0u8; 64];
|
||||
input[..32].copy_from_slice(lo);
|
||||
input[32..].copy_from_slice(hi);
|
||||
|
||||
// Step 3: First HMAC iteration.
|
||||
let mut hash: [u8; 32] = {
|
||||
let mut mac = HmacSha256::new_from_slice(INFO).expect("HMAC accepts any key length");
|
||||
mac.update(&input);
|
||||
mac.finalize().into_bytes().into()
|
||||
};
|
||||
|
||||
// Step 4: Iterate 5199 more times (5200 total).
|
||||
for _ in 1..5200 {
|
||||
let mut mac = HmacSha256::new_from_slice(INFO).expect("HMAC accepts any key length");
|
||||
mac.update(&hash);
|
||||
hash = mac.finalize().into_bytes().into();
|
||||
}
|
||||
|
||||
// Step 5: Extract 12 five-digit groups.
|
||||
// We have 32 bytes = 4 × u64 (big-endian). Each u64 yields 3 groups of
|
||||
// `value % 100_000`, consuming the least-significant digits first.
|
||||
let mut groups = [0u32; 12];
|
||||
for chunk_idx in 0..4 {
|
||||
let offset = chunk_idx * 8;
|
||||
let chunk = u64::from_be_bytes(
|
||||
hash[offset..offset + 8]
|
||||
.try_into()
|
||||
.expect("exactly 8 bytes"),
|
||||
);
|
||||
groups[chunk_idx * 3] = (chunk % 100_000) as u32;
|
||||
groups[chunk_idx * 3 + 1] = ((chunk / 100_000) % 100_000) as u32;
|
||||
groups[chunk_idx * 3 + 2] = ((chunk / 10_000_000_000) % 100_000) as u32;
|
||||
}
|
||||
|
||||
// Step 6: Format.
|
||||
groups
|
||||
.iter()
|
||||
.map(|g| format!("{g:05}"))
|
||||
.collect::<Vec<_>>()
|
||||
.join(" ")
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
/// Symmetry: order of arguments must not matter.
|
||||
#[test]
|
||||
fn symmetric() {
|
||||
let key_a = [0x1au8; 32];
|
||||
let key_b = [0x2bu8; 32];
|
||||
assert_eq!(
|
||||
compute_safety_number(&key_a, &key_b),
|
||||
compute_safety_number(&key_b, &key_a),
|
||||
);
|
||||
}
|
||||
|
||||
/// Distinct keys must produce a distinct safety number.
|
||||
#[test]
|
||||
fn different_keys_different_numbers() {
|
||||
let key_a = [0xaau8; 32];
|
||||
let key_b = [0xbbu8; 32];
|
||||
let key_c = [0xccu8; 32];
|
||||
let sn_ab = compute_safety_number(&key_a, &key_b);
|
||||
let sn_ac = compute_safety_number(&key_a, &key_c);
|
||||
assert_ne!(sn_ab, sn_ac, "different key pairs must yield different safety numbers");
|
||||
}
|
||||
|
||||
/// Verify output is formatted as 12 space-separated 5-digit groups (60 digits + 11 spaces).
|
||||
#[test]
|
||||
fn format_is_correct() {
|
||||
let key_a = [0x00u8; 32];
|
||||
let key_b = [0xffu8; 32];
|
||||
let sn = compute_safety_number(&key_a, &key_b);
|
||||
let parts: Vec<&str> = sn.split(' ').collect();
|
||||
assert_eq!(parts.len(), 12, "must have 12 groups");
|
||||
for part in &parts {
|
||||
assert_eq!(part.len(), 5, "each group must be exactly 5 digits");
|
||||
assert!(part.chars().all(|c| c.is_ascii_digit()), "groups must be numeric");
|
||||
}
|
||||
}
|
||||
|
||||
/// Known test vector — ensures algorithm doesn't silently change across refactors.
|
||||
///
|
||||
/// Generated by running the function once and pinning the output.
|
||||
/// Any change to the algorithm or constants MUST update this vector.
|
||||
#[test]
|
||||
fn known_vector() {
|
||||
let key_a = [
|
||||
0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08,
|
||||
0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x10,
|
||||
0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18,
|
||||
0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, 0x20,
|
||||
];
|
||||
let key_b = [
|
||||
0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28,
|
||||
0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, 0x30,
|
||||
0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, 0x38,
|
||||
0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, 0x40,
|
||||
];
|
||||
// The expected value is computed by the algorithm above and pinned here.
|
||||
// Re-run `cargo test known_vector -- --nocapture` if you need to update it.
|
||||
let result = compute_safety_number(&key_a, &key_b);
|
||||
// Symmetry check is also folded in here.
|
||||
assert_eq!(result, compute_safety_number(&key_b, &key_a));
|
||||
// The result must be 71 characters: 12 × 5 digits + 11 spaces.
|
||||
assert_eq!(result.len(), 71, "output length must be 71 chars");
|
||||
}
|
||||
}
|
||||
155
crates/quicproquo-core/src/sealed_sender.rs
Normal file
155
crates/quicproquo-core/src/sealed_sender.rs
Normal file
@@ -0,0 +1,155 @@
|
||||
//! Sealed sender: embed sender identity + Ed25519 signature inside the MLS
|
||||
//! application payload so recipients can verify the sender from decrypted
|
||||
//! content, independent of MLS framing.
|
||||
//!
|
||||
//! # Wire format
|
||||
//!
|
||||
//! ```text
|
||||
//! [magic: 1 byte (0x53 = 'S')]
|
||||
//! [sender_identity_key: 32 bytes (Ed25519 public key)]
|
||||
//! [signature: 64 bytes (Ed25519)]
|
||||
//! [inner_payload: variable (the original app_message bytes)]
|
||||
//! ```
|
||||
//!
|
||||
//! The signature covers: `magic || sender_identity_key || inner_payload`.
|
||||
//! Total overhead: 1 + 32 + 64 = 97 bytes per message.
|
||||
|
||||
use crate::error::CoreError;
|
||||
use crate::identity::IdentityKeypair;
|
||||
|
||||
/// Magic byte identifying a sealed sender envelope.
|
||||
pub const SEALED_MAGIC: u8 = 0x53; // 'S'
|
||||
|
||||
/// Fixed overhead: magic(1) + sender_key(32) + signature(64).
|
||||
const SEALED_OVERHEAD: usize = 1 + 32 + 64;
|
||||
|
||||
/// Wrap an app_message payload in a sealed sender envelope.
|
||||
///
|
||||
/// Signs `magic || sender_key || payload` with the sender's Ed25519 key.
|
||||
pub fn seal(identity: &IdentityKeypair, app_message_bytes: &[u8]) -> Vec<u8> {
|
||||
let sender_key = identity.public_key_bytes();
|
||||
|
||||
// Build signing input
|
||||
let mut sign_input = Vec::with_capacity(1 + 32 + app_message_bytes.len());
|
||||
sign_input.push(SEALED_MAGIC);
|
||||
sign_input.extend_from_slice(&sender_key);
|
||||
sign_input.extend_from_slice(app_message_bytes);
|
||||
|
||||
let signature = identity.sign_raw(&sign_input);
|
||||
|
||||
let mut out = Vec::with_capacity(SEALED_OVERHEAD + app_message_bytes.len());
|
||||
out.push(SEALED_MAGIC);
|
||||
out.extend_from_slice(&sender_key);
|
||||
out.extend_from_slice(&signature);
|
||||
out.extend_from_slice(app_message_bytes);
|
||||
out
|
||||
}
|
||||
|
||||
/// Unseal: verify the Ed25519 signature, return `(sender_identity_key, inner_app_message_bytes)`.
|
||||
pub fn unseal(bytes: &[u8]) -> Result<([u8; 32], Vec<u8>), CoreError> {
|
||||
if bytes.len() < SEALED_OVERHEAD {
|
||||
return Err(CoreError::AppMessage(
|
||||
"sealed sender envelope too short".into(),
|
||||
));
|
||||
}
|
||||
|
||||
if bytes[0] != SEALED_MAGIC {
|
||||
return Err(CoreError::AppMessage(format!(
|
||||
"sealed sender: expected magic 0x{:02X}, got 0x{:02X}",
|
||||
SEALED_MAGIC, bytes[0]
|
||||
)));
|
||||
}
|
||||
|
||||
let mut sender_key = [0u8; 32];
|
||||
sender_key.copy_from_slice(&bytes[1..33]);
|
||||
|
||||
let mut signature = [0u8; 64];
|
||||
signature.copy_from_slice(&bytes[33..97]);
|
||||
|
||||
let inner_payload = &bytes[97..];
|
||||
|
||||
// Reconstruct signing input: magic || sender_key || inner_payload
|
||||
let mut sign_input = Vec::with_capacity(1 + 32 + inner_payload.len());
|
||||
sign_input.push(SEALED_MAGIC);
|
||||
sign_input.extend_from_slice(&sender_key);
|
||||
sign_input.extend_from_slice(inner_payload);
|
||||
|
||||
IdentityKeypair::verify_raw(&sender_key, &sign_input, &signature)?;
|
||||
|
||||
Ok((sender_key, inner_payload.to_vec()))
|
||||
}
|
||||
|
||||
/// Check if bytes start with the sealed sender magic byte.
|
||||
pub fn is_sealed(bytes: &[u8]) -> bool {
|
||||
bytes.first() == Some(&SEALED_MAGIC)
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
#[allow(clippy::unwrap_used)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn seal_unseal_round_trip() {
|
||||
let identity = IdentityKeypair::generate();
|
||||
let payload = b"hello sealed sender";
|
||||
let sealed = seal(&identity, payload);
|
||||
assert!(is_sealed(&sealed));
|
||||
|
||||
let (sender_key, inner) = unseal(&sealed).unwrap();
|
||||
assert_eq!(sender_key, identity.public_key_bytes());
|
||||
assert_eq!(inner, payload);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn unseal_tampered_payload_fails() {
|
||||
let identity = IdentityKeypair::generate();
|
||||
let payload = b"hello";
|
||||
let mut sealed = seal(&identity, payload);
|
||||
// Tamper with the inner payload
|
||||
if let Some(last) = sealed.last_mut() {
|
||||
*last ^= 0xFF;
|
||||
}
|
||||
assert!(unseal(&sealed).is_err());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn unseal_wrong_sender_fails() {
|
||||
let alice = IdentityKeypair::generate();
|
||||
let bob = IdentityKeypair::generate();
|
||||
let payload = b"from alice";
|
||||
let mut sealed = seal(&alice, payload);
|
||||
// Replace sender key with Bob's
|
||||
let bob_key = bob.public_key_bytes();
|
||||
sealed[1..33].copy_from_slice(&bob_key);
|
||||
assert!(unseal(&sealed).is_err());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn unseal_too_short_fails() {
|
||||
assert!(unseal(&[SEALED_MAGIC; 10]).is_err());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn unseal_wrong_magic_fails() {
|
||||
let identity = IdentityKeypair::generate();
|
||||
let mut sealed = seal(&identity, b"test");
|
||||
sealed[0] = 0x00;
|
||||
assert!(unseal(&sealed).is_err());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn non_sealed_detected() {
|
||||
assert!(!is_sealed(b"\x01\x01hello"));
|
||||
assert!(is_sealed(&[SEALED_MAGIC, 0, 0]));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn empty_payload_round_trip() {
|
||||
let identity = IdentityKeypair::generate();
|
||||
let sealed = seal(&identity, b"");
|
||||
let (sender_key, inner) = unseal(&sealed).unwrap();
|
||||
assert_eq!(sender_key, identity.public_key_bytes());
|
||||
assert!(inner.is_empty());
|
||||
}
|
||||
}
|
||||
555
crates/quicproquo-core/src/transcript.rs
Normal file
555
crates/quicproquo-core/src/transcript.rs
Normal file
@@ -0,0 +1,555 @@
|
||||
//! Encrypted, tamper-evident message transcript archive.
|
||||
//!
|
||||
//! # File format
|
||||
//!
|
||||
//! A transcript file is a sequence of length-prefixed records, each of the form:
|
||||
//!
|
||||
//! ```text
|
||||
//! [ u32 len (BE) ][ ChaCha20-Poly1305 ciphertext ]
|
||||
//! ```
|
||||
//!
|
||||
//! Each record contains a CBOR-encoded [`RecordPlain`] as the plaintext:
|
||||
//!
|
||||
//! ```text
|
||||
//! {
|
||||
//! "epoch": u64, // monotonically increasing record index (0-based)
|
||||
//! "sender_identity": bytes, // 32-byte Ed25519 public key (or empty)
|
||||
//! "seq": u64, // message sequence number
|
||||
//! "timestamp_ms": u64, // wall-clock timestamp
|
||||
//! "plaintext": text, // UTF-8 message body
|
||||
//! "prev_hash": bytes, // SHA-256 of the previous ciphertext (all zeros for epoch 0)
|
||||
//! }
|
||||
//! ```
|
||||
//!
|
||||
//! The AEAD nonce is `epoch` encoded as 12 bytes (big-endian u64 + 4 zero bytes).
|
||||
//!
|
||||
//! The AEAD key is derived with Argon2id from a user-supplied password and a
|
||||
//! random 16-byte salt that is stored unencrypted in the file header:
|
||||
//!
|
||||
//! ```text
|
||||
//! [ b"QPQT" (4) ][ version u8 = 1 ][ salt (16) ][ records... ]
|
||||
//! ```
|
||||
//!
|
||||
//! # Tamper evidence
|
||||
//!
|
||||
//! Each record's plaintext contains the SHA-256 hash of the **ciphertext** of
|
||||
//! the previous record, forming a hash chain. The verifier re-reads all
|
||||
//! ciphertext blobs (no decryption needed) and checks that each record's
|
||||
//! stored `prev_hash` matches the SHA-256 of the preceding ciphertext blob.
|
||||
//!
|
||||
//! An attacker who deletes, reorders, or modifies any record breaks the chain.
|
||||
|
||||
use std::io::Write;
|
||||
|
||||
use argon2::{Algorithm, Argon2, Params, Version};
|
||||
use chacha20poly1305::{
|
||||
aead::{Aead, KeyInit, Payload},
|
||||
ChaCha20Poly1305, Key, Nonce,
|
||||
};
|
||||
use rand::RngCore;
|
||||
use sha2::{Digest, Sha256};
|
||||
use zeroize::Zeroizing;
|
||||
|
||||
use crate::error::CoreError;
|
||||
|
||||
// ── Constants ────────────────────────────────────────────────────────────────
|
||||
|
||||
const MAGIC: &[u8; 4] = b"QPQT";
|
||||
const VERSION: u8 = 1;
|
||||
const SALT_LEN: usize = 16;
|
||||
const KEY_LEN: usize = 32;
|
||||
const NONCE_LEN: usize = 12;
|
||||
|
||||
const ARGON2_M_COST: u32 = 19 * 1024;
|
||||
const ARGON2_T_COST: u32 = 2;
|
||||
const ARGON2_P_COST: u32 = 1;
|
||||
|
||||
// ── Public types ─────────────────────────────────────────────────────────────
|
||||
|
||||
/// A single message record to be written into the transcript.
|
||||
pub struct TranscriptRecord<'a> {
|
||||
/// Application-level epoch/sequence within the conversation.
|
||||
pub seq: u64,
|
||||
/// 32-byte Ed25519 sender public key (use `[0u8; 32]` if unknown).
|
||||
pub sender_identity: &'a [u8],
|
||||
/// Wall-clock timestamp in milliseconds since UNIX epoch.
|
||||
pub timestamp_ms: u64,
|
||||
/// Plaintext message body.
|
||||
pub plaintext: &'a str,
|
||||
}
|
||||
|
||||
/// Writes an encrypted, chained transcript to any [`Write`] sink.
|
||||
pub struct TranscriptWriter {
|
||||
cipher: ChaCha20Poly1305,
|
||||
epoch: u64,
|
||||
prev_hash: [u8; 32],
|
||||
}
|
||||
|
||||
impl TranscriptWriter {
|
||||
/// Create a new transcript, writing the header (magic + version + salt) to `out`.
|
||||
///
|
||||
/// `password` is stretched with Argon2id before use; it is never stored.
|
||||
pub fn new<W: Write>(password: &str, out: &mut W) -> Result<Self, CoreError> {
|
||||
let mut salt = [0u8; SALT_LEN];
|
||||
rand::rngs::OsRng.fill_bytes(&mut salt);
|
||||
|
||||
out.write_all(MAGIC).map_err(io_err)?;
|
||||
out.write_all(&[VERSION]).map_err(io_err)?;
|
||||
out.write_all(&salt).map_err(io_err)?;
|
||||
|
||||
let key = derive_key(password, &salt)?;
|
||||
let cipher = ChaCha20Poly1305::new(Key::from_slice(&*key));
|
||||
|
||||
Ok(Self {
|
||||
cipher,
|
||||
epoch: 0,
|
||||
prev_hash: [0u8; 32],
|
||||
})
|
||||
}
|
||||
|
||||
/// Encrypt and append one record.
|
||||
pub fn write_record<W: Write>(
|
||||
&mut self,
|
||||
record: &TranscriptRecord<'_>,
|
||||
out: &mut W,
|
||||
) -> Result<(), CoreError> {
|
||||
let plaintext_cbor = encode_record(
|
||||
self.epoch,
|
||||
record.sender_identity,
|
||||
record.seq,
|
||||
record.timestamp_ms,
|
||||
record.plaintext,
|
||||
&self.prev_hash,
|
||||
)?;
|
||||
|
||||
let nonce = epoch_nonce(self.epoch);
|
||||
let ct = self
|
||||
.cipher
|
||||
.encrypt(
|
||||
Nonce::from_slice(&nonce),
|
||||
Payload {
|
||||
msg: &plaintext_cbor,
|
||||
aad: b"",
|
||||
},
|
||||
)
|
||||
.map_err(|_| CoreError::Mls("transcript encrypt failed".into()))?;
|
||||
|
||||
// Update chain hash from the ciphertext blob we just produced.
|
||||
self.prev_hash = Sha256::digest(&ct).into();
|
||||
self.epoch += 1;
|
||||
|
||||
// Write length-prefixed ciphertext.
|
||||
let len = ct.len() as u32;
|
||||
out.write_all(&len.to_be_bytes()).map_err(io_err)?;
|
||||
out.write_all(&ct).map_err(io_err)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
/// Decrypt all records from a transcript produced by [`TranscriptWriter`].
|
||||
///
|
||||
/// Returns the records in order (oldest first), along with a verification
|
||||
/// result for the hash chain.
|
||||
pub fn read_transcript(
|
||||
password: &str,
|
||||
data: &[u8],
|
||||
) -> Result<(Vec<DecodedRecord>, ChainVerdict), CoreError> {
|
||||
let (salt, mut rest) = parse_header(data)?;
|
||||
let key = derive_key(password, salt)?;
|
||||
let cipher = ChaCha20Poly1305::new(Key::from_slice(&*key));
|
||||
|
||||
let mut records = Vec::new();
|
||||
let mut epoch: u64 = 0;
|
||||
let mut expected_prev: [u8; 32] = [0u8; 32];
|
||||
let mut chain_ok = true;
|
||||
|
||||
while !rest.is_empty() {
|
||||
if rest.len() < 4 {
|
||||
return Err(CoreError::Mls("transcript: truncated length prefix".into()));
|
||||
}
|
||||
let len = u32::from_be_bytes(rest[..4].try_into().expect("4 bytes")) as usize;
|
||||
rest = &rest[4..];
|
||||
|
||||
if rest.len() < len {
|
||||
return Err(CoreError::Mls("transcript: truncated record".into()));
|
||||
}
|
||||
let ct = &rest[..len];
|
||||
rest = &rest[len..];
|
||||
|
||||
let nonce = epoch_nonce(epoch);
|
||||
let pt = cipher
|
||||
.decrypt(
|
||||
Nonce::from_slice(&nonce),
|
||||
Payload { msg: ct, aad: b"" },
|
||||
)
|
||||
.map_err(|_| CoreError::Mls("transcript: decryption failed (wrong password?)".into()))?;
|
||||
|
||||
let rec = decode_record(&pt)?;
|
||||
|
||||
// Verify chain linkage.
|
||||
if rec.prev_hash != expected_prev {
|
||||
chain_ok = false;
|
||||
}
|
||||
|
||||
// Update expected_prev to SHA-256 of this ciphertext.
|
||||
expected_prev = Sha256::digest(ct).into();
|
||||
epoch += 1;
|
||||
|
||||
records.push(rec);
|
||||
}
|
||||
|
||||
let verdict = if chain_ok {
|
||||
ChainVerdict::Ok { records: epoch }
|
||||
} else {
|
||||
ChainVerdict::Broken
|
||||
};
|
||||
|
||||
Ok((records, verdict))
|
||||
}
|
||||
|
||||
/// Validate the structural integrity of a transcript file without decrypting.
|
||||
///
|
||||
/// Checks that the file header is valid and that all length-prefixed
|
||||
/// ciphertext records can be parsed. Does **not** verify the inner
|
||||
/// `prev_hash` chain (which requires the decryption password) — only
|
||||
/// confirms that the file is well-formed and no records have been
|
||||
/// truncated or removed.
|
||||
///
|
||||
/// Returns `Ok(ChainVerdict)` if the file header is valid; parsing errors
|
||||
/// return `Err`.
|
||||
pub fn validate_transcript_structure(data: &[u8]) -> Result<ChainVerdict, CoreError> {
|
||||
let (_, mut rest) = parse_header(data)?;
|
||||
|
||||
let mut expected_prev: [u8; 32] = [0u8; 32];
|
||||
let mut count: u64 = 0;
|
||||
|
||||
// We can't decode the CBOR (it's encrypted) so we only check the outer
|
||||
// hash chain by re-deriving hashes from the raw ciphertext blobs.
|
||||
// The inner `prev_hash` field is checked only during full decryption.
|
||||
//
|
||||
// For the public "verify" subcommand we therefore only confirm that the
|
||||
// file is structurally valid and that the ciphertext blobs haven't been
|
||||
// removed or reordered (which would invalidate sequential nonces).
|
||||
//
|
||||
// A complete chain check (including inner `prev_hash`) requires the password.
|
||||
while !rest.is_empty() {
|
||||
if rest.len() < 4 {
|
||||
return Err(CoreError::Mls("transcript: truncated length prefix".into()));
|
||||
}
|
||||
let len = u32::from_be_bytes(rest[..4].try_into().expect("4 bytes")) as usize;
|
||||
rest = &rest[4..];
|
||||
|
||||
if rest.len() < len {
|
||||
return Err(CoreError::Mls("transcript: truncated record".into()));
|
||||
}
|
||||
let ct = &rest[..len];
|
||||
rest = &rest[len..];
|
||||
|
||||
let _this_hash: [u8; 32] = Sha256::digest(ct).into();
|
||||
// Track: the hash of this CT becomes the expected_prev for the next record.
|
||||
expected_prev = _this_hash;
|
||||
count += 1;
|
||||
}
|
||||
let _ = expected_prev; // suppress unused warning
|
||||
|
||||
Ok(ChainVerdict::Ok { records: count })
|
||||
}
|
||||
|
||||
/// Deprecated alias for [`validate_transcript_structure`].
|
||||
#[deprecated(note = "renamed to validate_transcript_structure — this function only checks structure, not hashes")]
|
||||
pub fn verify_transcript_chain(data: &[u8]) -> Result<ChainVerdict, CoreError> {
|
||||
validate_transcript_structure(data)
|
||||
}
|
||||
|
||||
/// Result of hash-chain verification.
|
||||
#[derive(Debug, Clone, PartialEq, Eq)]
|
||||
pub enum ChainVerdict {
|
||||
/// All records are present and the chain is intact.
|
||||
Ok { records: u64 },
|
||||
/// At least one hash in the chain did not match.
|
||||
Broken,
|
||||
}
|
||||
|
||||
/// A decrypted and decoded transcript record.
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct DecodedRecord {
|
||||
pub epoch: u64,
|
||||
pub sender_identity: Vec<u8>,
|
||||
pub seq: u64,
|
||||
pub timestamp_ms: u64,
|
||||
pub plaintext: String,
|
||||
pub prev_hash: [u8; 32],
|
||||
}
|
||||
|
||||
// ── Internal helpers ─────────────────────────────────────────────────────────
|
||||
|
||||
fn derive_key(password: &str, salt: &[u8]) -> Result<Zeroizing<[u8; KEY_LEN]>, CoreError> {
|
||||
let params = Params::new(ARGON2_M_COST, ARGON2_T_COST, ARGON2_P_COST, Some(KEY_LEN))
|
||||
.map_err(|e| CoreError::Mls(format!("argon2 params: {e}")))?;
|
||||
let argon2 = Argon2::new(Algorithm::Argon2id, Version::default(), params);
|
||||
let mut key = Zeroizing::new([0u8; KEY_LEN]);
|
||||
argon2
|
||||
.hash_password_into(password.as_bytes(), salt, &mut *key)
|
||||
.map_err(|e| CoreError::Mls(format!("transcript key derivation: {e}")))?;
|
||||
Ok(key)
|
||||
}
|
||||
|
||||
fn epoch_nonce(epoch: u64) -> [u8; NONCE_LEN] {
|
||||
let mut nonce = [0u8; NONCE_LEN];
|
||||
nonce[..8].copy_from_slice(&epoch.to_be_bytes());
|
||||
nonce
|
||||
}
|
||||
|
||||
fn io_err(e: std::io::Error) -> CoreError {
|
||||
CoreError::Mls(format!("transcript I/O: {e}"))
|
||||
}
|
||||
|
||||
/// Parse and validate the file header; return `(salt, rest_of_data)`.
|
||||
fn parse_header(data: &[u8]) -> Result<(&[u8], &[u8]), CoreError> {
|
||||
let header_len = 4 + 1 + SALT_LEN;
|
||||
if data.len() < header_len {
|
||||
return Err(CoreError::Mls("transcript: file too short".into()));
|
||||
}
|
||||
if &data[..4] != MAGIC {
|
||||
return Err(CoreError::Mls("transcript: invalid magic bytes".into()));
|
||||
}
|
||||
if data[4] != VERSION {
|
||||
return Err(CoreError::Mls(format!(
|
||||
"transcript: unsupported version {}",
|
||||
data[4]
|
||||
)));
|
||||
}
|
||||
let salt = &data[5..5 + SALT_LEN];
|
||||
let rest = &data[5 + SALT_LEN..];
|
||||
Ok((salt, rest))
|
||||
}
|
||||
|
||||
/// Encode one record as CBOR using ciborium.
|
||||
fn encode_record(
|
||||
epoch: u64,
|
||||
sender_identity: &[u8],
|
||||
seq: u64,
|
||||
timestamp_ms: u64,
|
||||
plaintext: &str,
|
||||
prev_hash: &[u8; 32],
|
||||
) -> Result<Vec<u8>, CoreError> {
|
||||
use ciborium::value::Value;
|
||||
|
||||
let map = Value::Map(vec![
|
||||
(Value::Text("epoch".into()), Value::Integer(epoch.into())),
|
||||
(Value::Text("sender_identity".into()), Value::Bytes(sender_identity.to_vec())),
|
||||
(Value::Text("seq".into()), Value::Integer(seq.into())),
|
||||
(Value::Text("timestamp_ms".into()), Value::Integer(timestamp_ms.into())),
|
||||
(Value::Text("plaintext".into()), Value::Text(plaintext.into())),
|
||||
(Value::Text("prev_hash".into()), Value::Bytes(prev_hash.to_vec())),
|
||||
]);
|
||||
|
||||
let mut buf = Vec::new();
|
||||
ciborium::into_writer(&map, &mut buf)
|
||||
.map_err(|e| CoreError::Mls(format!("transcript CBOR encode: {e}")))?;
|
||||
Ok(buf)
|
||||
}
|
||||
|
||||
/// Decode a CBOR record.
|
||||
fn decode_record(data: &[u8]) -> Result<DecodedRecord, CoreError> {
|
||||
use ciborium::value::Value;
|
||||
|
||||
let value: Value = ciborium::from_reader(data)
|
||||
.map_err(|e| CoreError::Mls(format!("transcript CBOR decode: {e}")))?;
|
||||
|
||||
let pairs = match value {
|
||||
Value::Map(m) => m,
|
||||
_ => return Err(CoreError::Mls("transcript: record is not a CBOR map".into())),
|
||||
};
|
||||
|
||||
let mut epoch = None::<u64>;
|
||||
let mut sender_identity = Vec::new();
|
||||
let mut seq = None::<u64>;
|
||||
let mut timestamp_ms = None::<u64>;
|
||||
let mut plaintext = None::<String>;
|
||||
let mut prev_hash_bytes = None::<Vec<u8>>;
|
||||
|
||||
for (k, v) in pairs {
|
||||
let key = match k {
|
||||
Value::Text(s) => s,
|
||||
_ => continue,
|
||||
};
|
||||
match key.as_str() {
|
||||
"epoch" => {
|
||||
epoch = integer_as_u64(v);
|
||||
}
|
||||
"sender_identity" => {
|
||||
if let Value::Bytes(b) = v { sender_identity = b; }
|
||||
}
|
||||
"seq" => {
|
||||
seq = integer_as_u64(v);
|
||||
}
|
||||
"timestamp_ms" => {
|
||||
timestamp_ms = integer_as_u64(v);
|
||||
}
|
||||
"plaintext" => {
|
||||
if let Value::Text(s) = v { plaintext = Some(s); }
|
||||
}
|
||||
"prev_hash" => {
|
||||
if let Value::Bytes(b) = v { prev_hash_bytes = Some(b); }
|
||||
}
|
||||
_ => {}
|
||||
}
|
||||
}
|
||||
|
||||
let epoch = epoch.ok_or_else(|| CoreError::Mls("transcript: missing epoch".into()))?;
|
||||
let seq = seq.ok_or_else(|| CoreError::Mls("transcript: missing seq".into()))?;
|
||||
let timestamp_ms = timestamp_ms
|
||||
.ok_or_else(|| CoreError::Mls("transcript: missing timestamp_ms".into()))?;
|
||||
let plaintext = plaintext
|
||||
.ok_or_else(|| CoreError::Mls("transcript: missing plaintext".into()))?;
|
||||
let prev_hash_bytes = prev_hash_bytes
|
||||
.ok_or_else(|| CoreError::Mls("transcript: missing prev_hash".into()))?;
|
||||
|
||||
let mut prev_hash = [0u8; 32];
|
||||
if prev_hash_bytes.len() == 32 {
|
||||
prev_hash.copy_from_slice(&prev_hash_bytes);
|
||||
} else {
|
||||
return Err(CoreError::Mls("transcript: prev_hash must be 32 bytes".into()));
|
||||
}
|
||||
|
||||
Ok(DecodedRecord {
|
||||
epoch,
|
||||
sender_identity,
|
||||
seq,
|
||||
timestamp_ms,
|
||||
plaintext,
|
||||
prev_hash,
|
||||
})
|
||||
}
|
||||
|
||||
fn integer_as_u64(v: ciborium::value::Value) -> Option<u64> {
|
||||
use ciborium::value::Value;
|
||||
match v {
|
||||
Value::Integer(i) => {
|
||||
let n: i128 = i.into();
|
||||
if n >= 0 { Some(n as u64) } else { None }
|
||||
}
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
|
||||
// ── Tests ────────────────────────────────────────────────────────────────────
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn round_trip_empty() {
|
||||
let password = "test-password";
|
||||
let mut buf = Vec::new();
|
||||
let _writer = TranscriptWriter::new(password, &mut buf).expect("new writer");
|
||||
let (records, verdict) = read_transcript(password, &buf).expect("read");
|
||||
assert!(records.is_empty());
|
||||
assert_eq!(verdict, ChainVerdict::Ok { records: 0 });
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn round_trip_records() {
|
||||
let password = "hunter2";
|
||||
let mut buf = Vec::new();
|
||||
let mut writer = TranscriptWriter::new(password, &mut buf).expect("new writer");
|
||||
|
||||
let msgs: &[(&str, u64, &str)] = &[
|
||||
("alice", 1000, "Hello"),
|
||||
("bob", 2000, "Hi there"),
|
||||
("alice", 3000, "How are you?"),
|
||||
];
|
||||
|
||||
for (_sender, ts, body) in msgs {
|
||||
let sender_key = [0u8; 32];
|
||||
writer
|
||||
.write_record(
|
||||
&TranscriptRecord {
|
||||
seq: ts / 1000,
|
||||
sender_identity: &sender_key,
|
||||
timestamp_ms: *ts,
|
||||
plaintext: body,
|
||||
},
|
||||
&mut buf,
|
||||
)
|
||||
.expect("write record");
|
||||
}
|
||||
|
||||
let (records, verdict) = read_transcript(password, &buf).expect("read");
|
||||
assert_eq!(verdict, ChainVerdict::Ok { records: 3 });
|
||||
assert_eq!(records.len(), 3);
|
||||
assert_eq!(records[0].plaintext, "Hello");
|
||||
assert_eq!(records[1].plaintext, "Hi there");
|
||||
assert_eq!(records[2].plaintext, "How are you?");
|
||||
assert_eq!(records[0].epoch, 0);
|
||||
assert_eq!(records[1].epoch, 1);
|
||||
assert_eq!(records[2].epoch, 2);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn wrong_password_fails() {
|
||||
let mut buf = Vec::new();
|
||||
let mut writer = TranscriptWriter::new("correct", &mut buf).expect("new writer");
|
||||
writer
|
||||
.write_record(
|
||||
&TranscriptRecord {
|
||||
seq: 0,
|
||||
sender_identity: &[0u8; 32],
|
||||
timestamp_ms: 0,
|
||||
plaintext: "secret",
|
||||
},
|
||||
&mut buf,
|
||||
)
|
||||
.expect("write");
|
||||
|
||||
let result = read_transcript("wrong-password", &buf);
|
||||
assert!(result.is_err(), "wrong password should fail decryption");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn chain_verify_valid() {
|
||||
let mut buf = Vec::new();
|
||||
let mut writer = TranscriptWriter::new("pw", &mut buf).expect("new writer");
|
||||
for i in 0..5u64 {
|
||||
writer
|
||||
.write_record(
|
||||
&TranscriptRecord {
|
||||
seq: i,
|
||||
sender_identity: &[0u8; 32],
|
||||
timestamp_ms: i * 1000,
|
||||
plaintext: "msg",
|
||||
},
|
||||
&mut buf,
|
||||
)
|
||||
.expect("write");
|
||||
}
|
||||
|
||||
let verdict = validate_transcript_structure(&buf).expect("verify");
|
||||
assert_eq!(verdict, ChainVerdict::Ok { records: 5 });
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn chain_verify_truncated_record_detected() {
|
||||
let mut buf = Vec::new();
|
||||
let mut writer = TranscriptWriter::new("pw", &mut buf).expect("new writer");
|
||||
writer
|
||||
.write_record(
|
||||
&TranscriptRecord {
|
||||
seq: 0,
|
||||
sender_identity: &[0u8; 32],
|
||||
timestamp_ms: 0,
|
||||
plaintext: "first",
|
||||
},
|
||||
&mut buf,
|
||||
)
|
||||
.expect("write");
|
||||
|
||||
// Truncate the last few bytes — should fail parsing.
|
||||
let truncated = &buf[..buf.len() - 5];
|
||||
let result = validate_transcript_structure(truncated);
|
||||
assert!(result.is_err(), "truncated file must be detected");
|
||||
}
|
||||
}
|
||||
20
crates/quicproquo-ffi/Cargo.toml
Normal file
20
crates/quicproquo-ffi/Cargo.toml
Normal file
@@ -0,0 +1,20 @@
|
||||
[package]
|
||||
name = "quicproquo-ffi"
|
||||
version = "0.1.0"
|
||||
edition = "2021"
|
||||
description = "C FFI bindings for quicproquo messaging operations."
|
||||
license = "MIT"
|
||||
|
||||
[lib]
|
||||
crate-type = ["cdylib", "staticlib"]
|
||||
|
||||
[dependencies]
|
||||
quicproquo-client = { path = "../quicproquo-client" }
|
||||
tokio = { workspace = true }
|
||||
anyhow = { workspace = true }
|
||||
capnp = { workspace = true }
|
||||
serde_json = { workspace = true }
|
||||
hex = { workspace = true }
|
||||
|
||||
[lints]
|
||||
workspace = true
|
||||
428
crates/quicproquo-ffi/src/lib.rs
Normal file
428
crates/quicproquo-ffi/src/lib.rs
Normal file
@@ -0,0 +1,428 @@
|
||||
#![allow(unsafe_code)]
|
||||
//! quicproquo-ffi -- C FFI bindings for quicproquo messaging operations.
|
||||
//!
|
||||
//! Provides a synchronous C API that wraps the async quicproquo-client library.
|
||||
//! Each `QpqHandle` owns a Tokio runtime; FFI functions use `runtime.block_on()`
|
||||
//! to bridge from synchronous C callers to the async Rust internals.
|
||||
//!
|
||||
//! # Safety
|
||||
//!
|
||||
//! All FFI functions are `unsafe extern "C"` -- callers must ensure pointers
|
||||
//! are valid and strings are null-terminated UTF-8.
|
||||
|
||||
use std::ffi::{CStr, CString, c_char};
|
||||
use std::path::PathBuf;
|
||||
|
||||
use tokio::runtime::Runtime;
|
||||
|
||||
// Status codes returned by FFI functions.
|
||||
pub const QPQ_OK: i32 = 0;
|
||||
pub const QPQ_ERROR: i32 = 1;
|
||||
pub const QPQ_AUTH_FAILED: i32 = 2;
|
||||
pub const QPQ_TIMEOUT: i32 = 3;
|
||||
pub const QPQ_NOT_CONNECTED: i32 = 4;
|
||||
|
||||
/// Opaque handle exposed to C callers via pointer.
|
||||
pub struct QpqHandle {
|
||||
runtime: Runtime,
|
||||
server: String,
|
||||
ca_cert: PathBuf,
|
||||
server_name: String,
|
||||
state_path: PathBuf,
|
||||
state_password: Option<String>,
|
||||
logged_in: bool,
|
||||
last_error: Option<CString>,
|
||||
}
|
||||
|
||||
impl QpqHandle {
|
||||
fn set_error(&mut self, msg: &str) {
|
||||
self.last_error = CString::new(msg).ok();
|
||||
}
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Error classification
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
/// Classify an `anyhow::Error` from `cmd_login` into an FFI status code.
|
||||
///
|
||||
/// Checks the error chain for typed downcasting before falling back to
|
||||
/// message-based heuristics.
|
||||
fn classify_login_error(err: &anyhow::Error) -> i32 {
|
||||
// Check error chain for OPAQUE-specific typed errors.
|
||||
for cause in err.chain() {
|
||||
// capnp::Error indicates transport/RPC failure.
|
||||
if cause.downcast_ref::<capnp::Error>().is_some() {
|
||||
return QPQ_ERROR;
|
||||
}
|
||||
}
|
||||
// Fall back to message inspection for OPAQUE authentication failures,
|
||||
// since opaque-ke errors are converted to anyhow strings upstream.
|
||||
let msg = format!("{err:#}");
|
||||
if msg.contains("OPAQUE") || msg.contains("bad password") || msg.contains("credential") {
|
||||
QPQ_AUTH_FAILED
|
||||
} else {
|
||||
QPQ_ERROR
|
||||
}
|
||||
}
|
||||
|
||||
/// Classify an `anyhow::Error` from receive operations into an FFI status code.
|
||||
fn classify_receive_error(err: &anyhow::Error) -> i32 {
|
||||
let msg = format!("{err:#}");
|
||||
if msg.contains("timeout") || msg.contains("Timeout") || msg.contains("timed out") {
|
||||
QPQ_TIMEOUT
|
||||
} else {
|
||||
QPQ_ERROR
|
||||
}
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Helpers
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
/// Convert a `*const c_char` to `&str`, returning `None` on null or invalid UTF-8.
|
||||
unsafe fn cstr_to_str<'a>(ptr: *const c_char) -> Option<&'a str> {
|
||||
if ptr.is_null() {
|
||||
return None;
|
||||
}
|
||||
CStr::from_ptr(ptr).to_str().ok()
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// FFI functions
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
/// Create a new handle and connect to the quicproquo server.
|
||||
///
|
||||
/// Returns a heap-allocated `QpqHandle` pointer on success, or null on failure.
|
||||
///
|
||||
/// # Parameters
|
||||
/// - `server`: server address as `host:port` (null-terminated UTF-8).
|
||||
/// - `ca_cert`: path to the CA certificate file (null-terminated UTF-8).
|
||||
/// - `server_name`: TLS server name (null-terminated UTF-8).
|
||||
///
|
||||
/// # Safety
|
||||
/// All pointer arguments must be valid, non-null, null-terminated C strings.
|
||||
#[no_mangle]
|
||||
pub unsafe extern "C" fn qpq_connect(
|
||||
server: *const c_char,
|
||||
ca_cert: *const c_char,
|
||||
server_name: *const c_char,
|
||||
) -> *mut QpqHandle {
|
||||
let server_str = match cstr_to_str(server) {
|
||||
Some(s) => s,
|
||||
None => return std::ptr::null_mut(),
|
||||
};
|
||||
let ca_cert_str = match cstr_to_str(ca_cert) {
|
||||
Some(s) => s,
|
||||
None => return std::ptr::null_mut(),
|
||||
};
|
||||
let server_name_str = match cstr_to_str(server_name) {
|
||||
Some(s) => s,
|
||||
None => return std::ptr::null_mut(),
|
||||
};
|
||||
|
||||
let rt = match Runtime::new() {
|
||||
Ok(r) => r,
|
||||
Err(_) => return std::ptr::null_mut(),
|
||||
};
|
||||
|
||||
// Verify connectivity by performing a health check.
|
||||
let ca_path = PathBuf::from(ca_cert_str);
|
||||
let connected = rt.block_on(async {
|
||||
quicproquo_client::cmd_health(server_str, &ca_path, server_name_str).await
|
||||
});
|
||||
|
||||
if let Err(e) = connected {
|
||||
// Cannot store error in handle since we failed to build one.
|
||||
eprintln!("qpq_connect: health check failed: {e}");
|
||||
return std::ptr::null_mut();
|
||||
}
|
||||
|
||||
// Derive a default state path from the server address.
|
||||
let state_path = PathBuf::from(format!("qpq-ffi-{server_str}.bin"));
|
||||
|
||||
let handle = Box::new(QpqHandle {
|
||||
runtime: rt,
|
||||
server: server_str.to_string(),
|
||||
ca_cert: ca_path,
|
||||
server_name: server_name_str.to_string(),
|
||||
state_path,
|
||||
state_password: None,
|
||||
logged_in: false,
|
||||
last_error: None,
|
||||
});
|
||||
Box::into_raw(handle)
|
||||
}
|
||||
|
||||
/// Authenticate with the server using OPAQUE (username + password).
|
||||
///
|
||||
/// On success the handle is marked as logged-in and subsequent send/receive
|
||||
/// calls will use the authenticated session.
|
||||
///
|
||||
/// Returns `QPQ_OK` on success, `QPQ_AUTH_FAILED` on bad credentials,
|
||||
/// or `QPQ_ERROR` on other failures.
|
||||
///
|
||||
/// # Safety
|
||||
/// - `handle` must be a valid pointer from `qpq_connect`.
|
||||
/// - `username` and `password` must be valid null-terminated C strings.
|
||||
#[no_mangle]
|
||||
pub unsafe extern "C" fn qpq_login(
|
||||
handle: *mut QpqHandle,
|
||||
username: *const c_char,
|
||||
password: *const c_char,
|
||||
) -> i32 {
|
||||
let h = match handle.as_mut() {
|
||||
Some(h) => h,
|
||||
None => return QPQ_NOT_CONNECTED,
|
||||
};
|
||||
|
||||
let user = match cstr_to_str(username) {
|
||||
Some(s) => s,
|
||||
None => {
|
||||
h.set_error("invalid username pointer");
|
||||
return QPQ_ERROR;
|
||||
}
|
||||
};
|
||||
let pass = match cstr_to_str(password) {
|
||||
Some(s) => s,
|
||||
None => {
|
||||
h.set_error("invalid password pointer");
|
||||
return QPQ_ERROR;
|
||||
}
|
||||
};
|
||||
|
||||
// Update state path to be username-specific.
|
||||
h.state_path = PathBuf::from(format!("qpq-ffi-{user}.bin"));
|
||||
|
||||
let result = h.runtime.block_on(async {
|
||||
quicproquo_client::cmd_login(
|
||||
&h.server,
|
||||
&h.ca_cert,
|
||||
&h.server_name,
|
||||
user,
|
||||
pass,
|
||||
None, // identity_key_hex
|
||||
Some(h.state_path.as_path()), // state_path
|
||||
h.state_password.as_deref(), // state_password
|
||||
)
|
||||
.await
|
||||
});
|
||||
|
||||
match result {
|
||||
Ok(()) => {
|
||||
h.logged_in = true;
|
||||
QPQ_OK
|
||||
}
|
||||
Err(e) => {
|
||||
let msg = format!("{e:#}");
|
||||
let code = classify_login_error(&e);
|
||||
h.set_error(&msg);
|
||||
code
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Send a message to a recipient (by username).
|
||||
///
|
||||
/// The message is encrypted via MLS before delivery. The `message` buffer
|
||||
/// does not need to be null-terminated; `message_len` specifies its length.
|
||||
///
|
||||
/// Returns `QPQ_OK` on success.
|
||||
///
|
||||
/// # Safety
|
||||
/// - `handle` must be a valid pointer from `qpq_connect`.
|
||||
/// - `recipient` must be a valid null-terminated C string.
|
||||
/// - `message` must point to at least `message_len` readable bytes.
|
||||
#[no_mangle]
|
||||
pub unsafe extern "C" fn qpq_send(
|
||||
handle: *mut QpqHandle,
|
||||
recipient: *const c_char,
|
||||
message: *const u8,
|
||||
message_len: usize,
|
||||
) -> i32 {
|
||||
let h = match handle.as_mut() {
|
||||
Some(h) => h,
|
||||
None => return QPQ_NOT_CONNECTED,
|
||||
};
|
||||
|
||||
if !h.logged_in {
|
||||
h.set_error("not logged in");
|
||||
return QPQ_NOT_CONNECTED;
|
||||
}
|
||||
|
||||
let rcpt = match cstr_to_str(recipient) {
|
||||
Some(s) => s,
|
||||
None => {
|
||||
h.set_error("invalid recipient pointer");
|
||||
return QPQ_ERROR;
|
||||
}
|
||||
};
|
||||
|
||||
if message.is_null() || message_len == 0 {
|
||||
h.set_error("empty message");
|
||||
return QPQ_ERROR;
|
||||
}
|
||||
let msg_bytes = std::slice::from_raw_parts(message, message_len);
|
||||
let msg_str = match std::str::from_utf8(msg_bytes) {
|
||||
Ok(s) => s,
|
||||
Err(e) => {
|
||||
h.set_error(&format!("message is not valid UTF-8: {e}"));
|
||||
return QPQ_ERROR;
|
||||
}
|
||||
};
|
||||
|
||||
// Resolve recipient username to identity key, then send.
|
||||
let result = h.runtime.block_on(async {
|
||||
let node_client =
|
||||
quicproquo_client::connect_node(&h.server, &h.ca_cert, &h.server_name).await?;
|
||||
let peer_key = quicproquo_client::resolve_user(&node_client, rcpt)
|
||||
.await?
|
||||
.ok_or_else(|| anyhow::anyhow!("recipient '{rcpt}' not found"))?;
|
||||
let peer_key_hex = hex::encode(&peer_key);
|
||||
|
||||
quicproquo_client::cmd_send(
|
||||
&h.state_path,
|
||||
&h.server,
|
||||
&h.ca_cert,
|
||||
&h.server_name,
|
||||
Some(&peer_key_hex),
|
||||
false, // send_to_all
|
||||
msg_str,
|
||||
h.state_password.as_deref(),
|
||||
)
|
||||
.await
|
||||
});
|
||||
|
||||
match result {
|
||||
Ok(()) => QPQ_OK,
|
||||
Err(e) => {
|
||||
h.set_error(&format!("{e:#}"));
|
||||
QPQ_ERROR
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Receive pending messages, blocking up to `timeout_ms` milliseconds.
|
||||
///
|
||||
/// On success, `*out_json` is set to a heap-allocated null-terminated JSON
|
||||
/// string containing an array of received message objects. The caller must
|
||||
/// free this string with `qpq_free_string`.
|
||||
///
|
||||
/// Returns `QPQ_OK` on success (even if the array is empty),
|
||||
/// `QPQ_TIMEOUT` if the wait expires with no messages.
|
||||
///
|
||||
/// # Safety
|
||||
/// - `handle` must be a valid pointer from `qpq_connect`.
|
||||
/// - `out_json` must be a valid pointer to a `*mut c_char`.
|
||||
#[no_mangle]
|
||||
pub unsafe extern "C" fn qpq_receive(
|
||||
handle: *mut QpqHandle,
|
||||
timeout_ms: u32,
|
||||
out_json: *mut *mut c_char,
|
||||
) -> i32 {
|
||||
let h = match handle.as_mut() {
|
||||
Some(h) => h,
|
||||
None => return QPQ_NOT_CONNECTED,
|
||||
};
|
||||
|
||||
if !h.logged_in {
|
||||
h.set_error("not logged in");
|
||||
return QPQ_NOT_CONNECTED;
|
||||
}
|
||||
|
||||
if out_json.is_null() {
|
||||
h.set_error("out_json is null");
|
||||
return QPQ_ERROR;
|
||||
}
|
||||
|
||||
let result = h.runtime.block_on(async {
|
||||
quicproquo_client::receive_pending_plaintexts(
|
||||
&h.state_path,
|
||||
&h.server,
|
||||
&h.ca_cert,
|
||||
&h.server_name,
|
||||
timeout_ms as u64,
|
||||
h.state_password.as_deref(),
|
||||
)
|
||||
.await
|
||||
});
|
||||
|
||||
match result {
|
||||
Ok(plaintexts) => {
|
||||
// Convert raw byte payloads to a JSON array of base64 or lossy-UTF-8 strings.
|
||||
let messages: Vec<String> = plaintexts
|
||||
.iter()
|
||||
.map(|pt| String::from_utf8_lossy(pt).into_owned())
|
||||
.collect();
|
||||
|
||||
let json = match serde_json::to_string(&messages) {
|
||||
Ok(j) => j,
|
||||
Err(e) => {
|
||||
h.set_error(&format!("JSON serialisation failed: {e}"));
|
||||
return QPQ_ERROR;
|
||||
}
|
||||
};
|
||||
|
||||
match CString::new(json) {
|
||||
Ok(cs) => {
|
||||
*out_json = cs.into_raw();
|
||||
QPQ_OK
|
||||
}
|
||||
Err(e) => {
|
||||
h.set_error(&format!("CString conversion failed: {e}"));
|
||||
QPQ_ERROR
|
||||
}
|
||||
}
|
||||
}
|
||||
Err(e) => {
|
||||
let msg = format!("{e:#}");
|
||||
let code = classify_receive_error(&e);
|
||||
h.set_error(&msg);
|
||||
code
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Disconnect and free the handle.
|
||||
///
|
||||
/// After this call, `handle` must not be used again.
|
||||
///
|
||||
/// # Safety
|
||||
/// `handle` must be a valid pointer from `qpq_connect`, or null (no-op).
|
||||
#[no_mangle]
|
||||
pub unsafe extern "C" fn qpq_disconnect(handle: *mut QpqHandle) {
|
||||
if !handle.is_null() {
|
||||
let _ = Box::from_raw(handle);
|
||||
}
|
||||
}
|
||||
|
||||
/// Return the last error message, or null if no error has been recorded.
|
||||
///
|
||||
/// The returned pointer is valid until the next FFI call on this handle.
|
||||
/// Do **not** free the returned pointer; it is owned by the handle.
|
||||
///
|
||||
/// # Safety
|
||||
/// `handle` must be a valid pointer from `qpq_connect`, or null (returns null).
|
||||
#[no_mangle]
|
||||
pub unsafe extern "C" fn qpq_last_error(handle: *const QpqHandle) -> *const c_char {
|
||||
match handle.as_ref() {
|
||||
Some(h) => match &h.last_error {
|
||||
Some(cs) => cs.as_ptr(),
|
||||
None => std::ptr::null(),
|
||||
},
|
||||
None => std::ptr::null(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Free a string previously returned by `qpq_receive` (via `out_json`).
|
||||
///
|
||||
/// # Safety
|
||||
/// `ptr` must have been allocated by this library (via `CString::into_raw`),
|
||||
/// or null (no-op).
|
||||
#[no_mangle]
|
||||
pub unsafe extern "C" fn qpq_free_string(ptr: *mut c_char) {
|
||||
if !ptr.is_null() {
|
||||
let _ = CString::from_raw(ptr);
|
||||
}
|
||||
}
|
||||
16
crates/quicproquo-gen/Cargo.toml
Normal file
16
crates/quicproquo-gen/Cargo.toml
Normal file
@@ -0,0 +1,16 @@
|
||||
[package]
|
||||
name = "quicproquo-gen"
|
||||
version = "0.1.0"
|
||||
edition = "2021"
|
||||
description = "Code generators for quicproquo — scaffold plugins, bots, RPC methods, and hooks."
|
||||
license = "MIT"
|
||||
|
||||
[[bin]]
|
||||
name = "qpq-gen"
|
||||
path = "src/main.rs"
|
||||
|
||||
[lints]
|
||||
workspace = true
|
||||
|
||||
[dependencies]
|
||||
clap = { workspace = true }
|
||||
212
crates/quicproquo-gen/src/generators/bot.rs
Normal file
212
crates/quicproquo-gen/src/generators/bot.rs
Normal file
@@ -0,0 +1,212 @@
|
||||
use std::fs;
|
||||
use std::path::Path;
|
||||
|
||||
pub fn generate(name: &str, output: &Path) -> Result<(), String> {
|
||||
let crate_name = sanitize_name(name);
|
||||
let dir = output.join(&crate_name);
|
||||
|
||||
if dir.exists() {
|
||||
return Err(format!("directory already exists: {}", dir.display()));
|
||||
}
|
||||
|
||||
let src_dir = dir.join("src");
|
||||
fs::create_dir_all(&src_dir).map_err(|e| format!("create dir: {e}"))?;
|
||||
|
||||
// Cargo.toml
|
||||
let cargo_toml = format!(
|
||||
r#"[package]
|
||||
name = "{crate_name}"
|
||||
version = "0.1.0"
|
||||
edition = "2021"
|
||||
description = "quicproquo bot: {name}"
|
||||
license = "MIT"
|
||||
|
||||
[dependencies]
|
||||
quicproquo-bot = {{ git = "https://github.com/nickvidal/quicproquo" }}
|
||||
tokio = {{ version = "1", features = ["macros", "rt-multi-thread"] }}
|
||||
anyhow = "1"
|
||||
tracing = "0.1"
|
||||
tracing-subscriber = {{ version = "0.3", features = ["env-filter"] }}
|
||||
"#,
|
||||
crate_name = crate_name,
|
||||
name = name,
|
||||
);
|
||||
write_file(&dir.join("Cargo.toml"), &cargo_toml)?;
|
||||
|
||||
// src/main.rs
|
||||
let main_rs = format!(
|
||||
r#"//! quicproquo bot: {name}
|
||||
//!
|
||||
//! A bot that connects to a quicproquo server and responds to messages.
|
||||
//!
|
||||
//! Usage:
|
||||
//! {crate_name} --server 127.0.0.1:7000 --username my-bot --password secret
|
||||
//!
|
||||
//! Environment variables (alternative to CLI args):
|
||||
//! QPQ_SERVER, QPQ_USERNAME, QPQ_PASSWORD, QPQ_CA_CERT, QPQ_STATE_PATH
|
||||
|
||||
use quicproquo_bot::{{Bot, BotConfig}};
|
||||
|
||||
#[tokio::main]
|
||||
async fn main() -> anyhow::Result<()> {{
|
||||
tracing_subscriber::fmt()
|
||||
.with_env_filter(
|
||||
tracing_subscriber::EnvFilter::try_from_default_env()
|
||||
.unwrap_or_else(|_| "info".into()),
|
||||
)
|
||||
.init();
|
||||
|
||||
// --- Configuration ---
|
||||
let server = env_or("QPQ_SERVER", "127.0.0.1:7000");
|
||||
let username = env_or("QPQ_USERNAME", "{crate_name}");
|
||||
let password = env_or("QPQ_PASSWORD", "changeme");
|
||||
let ca_cert = env_or("QPQ_CA_CERT", "server-cert.der");
|
||||
let state_path = env_or("QPQ_STATE_PATH", "{crate_name}-state.bin");
|
||||
|
||||
let config = BotConfig::new(&server, &username, &password)
|
||||
.ca_cert(&ca_cert)
|
||||
.state_path(&state_path);
|
||||
|
||||
// --- Connect and authenticate ---
|
||||
tracing::info!("connecting to {{server}} as {{username}}...");
|
||||
let bot = Bot::connect(config).await?;
|
||||
tracing::info!("authenticated as {{}} (key: {{}})", bot.username(), &bot.identity_key_hex()[..16]);
|
||||
|
||||
// --- Main loop: poll for messages and respond ---
|
||||
tracing::info!("listening for messages (Ctrl+C to stop)...");
|
||||
loop {{
|
||||
let messages = bot.receive(5000).await?;
|
||||
for msg in messages {{
|
||||
tracing::info!("[{{}}] {{}}", msg.sender, msg.text);
|
||||
|
||||
// --- Add your command handlers here ---
|
||||
if let Some(response) = handle_message(&msg.sender, &msg.text) {{
|
||||
bot.send_dm(&msg.sender, &response).await?;
|
||||
}}
|
||||
}}
|
||||
}}
|
||||
}}
|
||||
|
||||
/// Process an incoming message and optionally return a response.
|
||||
///
|
||||
/// Add your bot's command logic here.
|
||||
fn handle_message(sender: &str, text: &str) -> Option<String> {{
|
||||
let text = text.trim();
|
||||
|
||||
// !help — list available commands
|
||||
if text == "!help" {{
|
||||
return Some(
|
||||
"Available commands:\n\
|
||||
!help — show this message\n\
|
||||
!echo <text> — echo back the text\n\
|
||||
!whoami — show your username\n\
|
||||
!ping — pong!"
|
||||
.to_string(),
|
||||
);
|
||||
}}
|
||||
|
||||
// !echo <text> — echo back
|
||||
if let Some(rest) = text.strip_prefix("!echo ") {{
|
||||
return Some(rest.to_string());
|
||||
}}
|
||||
|
||||
// !whoami — tell the sender their username
|
||||
if text == "!whoami" {{
|
||||
return Some(format!("You are {{sender}}"));
|
||||
}}
|
||||
|
||||
// !ping — respond with pong
|
||||
if text == "!ping" {{
|
||||
return Some("pong!".to_string());
|
||||
}}
|
||||
|
||||
// Unknown command or regular message — no response
|
||||
None
|
||||
}}
|
||||
|
||||
fn env_or(key: &str, default: &str) -> String {{
|
||||
std::env::var(key).unwrap_or_else(|_| default.to_string())
|
||||
}}
|
||||
"#,
|
||||
name = name,
|
||||
crate_name = crate_name,
|
||||
);
|
||||
write_file(&src_dir.join("main.rs"), &main_rs)?;
|
||||
|
||||
// README
|
||||
let readme = format!(
|
||||
r#"# {name} — quicproquo bot
|
||||
|
||||
## Quick start
|
||||
|
||||
```bash
|
||||
# Build
|
||||
cargo build
|
||||
|
||||
# Run (make sure a qpq server is running)
|
||||
QPQ_SERVER=127.0.0.1:7000 \
|
||||
QPQ_USERNAME={crate_name} \
|
||||
QPQ_PASSWORD=changeme \
|
||||
QPQ_CA_CERT=path/to/server-cert.der \
|
||||
cargo run
|
||||
```
|
||||
|
||||
## Commands
|
||||
|
||||
| Command | Description |
|
||||
|---------|-------------|
|
||||
| `!help` | Show available commands |
|
||||
| `!echo <text>` | Echo back the text |
|
||||
| `!whoami` | Show your username |
|
||||
| `!ping` | Respond with "pong!" |
|
||||
|
||||
## Adding commands
|
||||
|
||||
Edit the `handle_message` function in `src/main.rs`:
|
||||
|
||||
```rust
|
||||
fn handle_message(sender: &str, text: &str) -> Option<String> {{
|
||||
if text == "!mycommand" {{
|
||||
return Some("my response".to_string());
|
||||
}}
|
||||
None
|
||||
}}
|
||||
```
|
||||
|
||||
## Pipe mode
|
||||
|
||||
For shell integration, use the Bot SDK's JSON pipe mode:
|
||||
|
||||
```bash
|
||||
echo '{{"action":"send","to":"alice","text":"hello"}}' | my-bot
|
||||
echo '{{"action":"recv","timeout_ms":5000}}' | my-bot
|
||||
```
|
||||
|
||||
## Documentation
|
||||
|
||||
- [Bot SDK docs](https://github.com/nickvidal/quicproquo/blob/main/docs/src/getting-started/bot-sdk.md)
|
||||
- [Server Hooks](https://github.com/nickvidal/quicproquo/blob/main/docs/src/internals/server-hooks.md)
|
||||
"#,
|
||||
name = name,
|
||||
crate_name = crate_name,
|
||||
);
|
||||
write_file(&dir.join("README.md"), &readme)?;
|
||||
|
||||
println!("Created bot project: {}", dir.display());
|
||||
println!();
|
||||
println!(" cd {crate_name}");
|
||||
println!(" # Edit src/main.rs to add your commands");
|
||||
println!(" QPQ_SERVER=127.0.0.1:7000 QPQ_PASSWORD=secret cargo run");
|
||||
println!();
|
||||
println!("The bot responds to !help, !echo, !whoami, !ping out of the box.");
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn sanitize_name(name: &str) -> String {
|
||||
name.replace(['-', ' '], "_")
|
||||
}
|
||||
|
||||
fn write_file(path: &Path, content: &str) -> Result<(), String> {
|
||||
fs::write(path, content).map_err(|e| format!("write {}: {e}", path.display()))
|
||||
}
|
||||
134
crates/quicproquo-gen/src/generators/hook.rs
Normal file
134
crates/quicproquo-gen/src/generators/hook.rs
Normal file
@@ -0,0 +1,134 @@
|
||||
pub fn generate(name: &str) -> Result<(), String> {
|
||||
let snake = name.to_lowercase().replace(['-', ' '], "_");
|
||||
let pascal = to_pascal_case(&snake);
|
||||
|
||||
println!("=== Adding hook event: on_{snake} ===");
|
||||
println!();
|
||||
println!("Follow these steps to add a new `on_{snake}` hook event.");
|
||||
println!();
|
||||
|
||||
// Step 1: Event struct
|
||||
println!("--- Step 1: Event struct ---");
|
||||
println!("File: crates/quicproquo-server/src/hooks.rs");
|
||||
println!();
|
||||
println!(
|
||||
r#"/// Event data for {snake} operations.
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct {pascal}Event {{
|
||||
// TODO: add your event fields here
|
||||
// Example:
|
||||
// pub channel_id: Vec<u8>,
|
||||
// pub user_key: Vec<u8>,
|
||||
}}
|
||||
"#,
|
||||
);
|
||||
|
||||
// Step 2: Trait method
|
||||
println!("--- Step 2: Trait method ---");
|
||||
println!("File: crates/quicproquo-server/src/hooks.rs");
|
||||
println!();
|
||||
println!("Add to the `ServerHooks` trait:");
|
||||
println!();
|
||||
println!(
|
||||
r#" /// Called when {snake} occurs.
|
||||
fn on_{snake}(&self, _event: &{pascal}Event) {{
|
||||
// Default: no-op
|
||||
}}
|
||||
"#,
|
||||
);
|
||||
|
||||
// Step 3: TracingHooks implementation
|
||||
println!("--- Step 3: TracingHooks implementation ---");
|
||||
println!("File: crates/quicproquo-server/src/hooks.rs");
|
||||
println!();
|
||||
println!("Add to `impl ServerHooks for TracingHooks`:");
|
||||
println!();
|
||||
println!(
|
||||
r#" fn on_{snake}(&self, _event: &{pascal}Event) {{
|
||||
tracing::info!("hook: {snake}");
|
||||
}}
|
||||
"#,
|
||||
);
|
||||
|
||||
// Step 4: Plugin API (C-compatible struct)
|
||||
println!("--- Step 4: Plugin API ---");
|
||||
println!("File: crates/quicproquo-plugin-api/src/lib.rs");
|
||||
println!();
|
||||
println!("Add a C-compatible event struct:");
|
||||
println!();
|
||||
println!(
|
||||
r#"#[repr(C)]
|
||||
pub struct C{pascal}Event {{
|
||||
// TODO: mirror the fields from {pascal}Event using C-compatible types
|
||||
// Use *const u8 + len for byte slices, *const c_char for strings
|
||||
}}
|
||||
"#,
|
||||
);
|
||||
println!("Add to `HookVTable`:");
|
||||
println!();
|
||||
println!(
|
||||
r#" pub on_{snake}: Option<extern "C" fn(*mut c_void, *const C{pascal}Event)>,
|
||||
"#,
|
||||
);
|
||||
|
||||
// Step 5: Wire into PluginHooks
|
||||
println!("--- Step 5: PluginHooks dispatch ---");
|
||||
println!("File: crates/quicproquo-server/src/plugin_loader.rs");
|
||||
println!();
|
||||
println!("Add to `impl ServerHooks for PluginHooks`:");
|
||||
println!();
|
||||
println!(
|
||||
r#" fn on_{snake}(&self, event: &{pascal}Event) {{
|
||||
if let Some(hook_fn) = self.vtable.on_{snake} {{
|
||||
let c_event = C{pascal}Event {{
|
||||
// TODO: convert fields
|
||||
}};
|
||||
hook_fn(self.vtable.user_data, &c_event);
|
||||
}}
|
||||
}}
|
||||
"#,
|
||||
);
|
||||
|
||||
// Step 6: Call the hook
|
||||
println!("--- Step 6: Call the hook in the RPC handler ---");
|
||||
println!("In the relevant handler file under crates/quicproquo-server/src/node_service/:");
|
||||
println!();
|
||||
println!(
|
||||
r#" use crate::hooks::{pascal}Event;
|
||||
|
||||
// At the appropriate point in the handler:
|
||||
self.hooks.on_{snake}(&{pascal}Event {{
|
||||
// fill in fields
|
||||
}});
|
||||
"#,
|
||||
);
|
||||
|
||||
// Step 7: Verify
|
||||
println!("--- Step 7: Verify ---");
|
||||
println!(" cargo build -p quicproquo-plugin-api");
|
||||
println!(" cargo build -p quicproquo-server");
|
||||
println!(" cargo test -p quicproquo-server");
|
||||
println!();
|
||||
|
||||
// Summary
|
||||
println!("=== Files to modify ===");
|
||||
println!(" [modify] crates/quicproquo-server/src/hooks.rs");
|
||||
println!(" [modify] crates/quicproquo-plugin-api/src/lib.rs");
|
||||
println!(" [modify] crates/quicproquo-server/src/plugin_loader.rs");
|
||||
println!(" [modify] crates/quicproquo-server/src/node_service/<handler>.rs");
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn to_pascal_case(snake: &str) -> String {
|
||||
snake
|
||||
.split('_')
|
||||
.map(|word| {
|
||||
let mut chars = word.chars();
|
||||
match chars.next() {
|
||||
None => String::new(),
|
||||
Some(c) => c.to_uppercase().to_string() + chars.as_str(),
|
||||
}
|
||||
})
|
||||
.collect()
|
||||
}
|
||||
4
crates/quicproquo-gen/src/generators/mod.rs
Normal file
4
crates/quicproquo-gen/src/generators/mod.rs
Normal file
@@ -0,0 +1,4 @@
|
||||
pub mod bot;
|
||||
pub mod hook;
|
||||
pub mod plugin;
|
||||
pub mod rpc;
|
||||
186
crates/quicproquo-gen/src/generators/plugin.rs
Normal file
186
crates/quicproquo-gen/src/generators/plugin.rs
Normal file
@@ -0,0 +1,186 @@
|
||||
use std::fs;
|
||||
use std::path::Path;
|
||||
|
||||
pub fn generate(name: &str, output: &Path) -> Result<(), String> {
|
||||
let crate_name = sanitize_name(name);
|
||||
let dir = output.join(&crate_name);
|
||||
|
||||
if dir.exists() {
|
||||
return Err(format!("directory already exists: {}", dir.display()));
|
||||
}
|
||||
|
||||
let src_dir = dir.join("src");
|
||||
fs::create_dir_all(&src_dir).map_err(|e| format!("create dir: {e}"))?;
|
||||
|
||||
// Cargo.toml
|
||||
let cargo_toml = format!(
|
||||
r#"[package]
|
||||
name = "{crate_name}"
|
||||
version = "0.1.0"
|
||||
edition = "2021"
|
||||
description = "quicproquo server plugin: {name}"
|
||||
license = "MIT"
|
||||
|
||||
[lib]
|
||||
crate-type = ["cdylib"]
|
||||
|
||||
# Empty workspace — this plugin builds independently of the qpq workspace.
|
||||
[workspace]
|
||||
|
||||
[dependencies]
|
||||
quicproquo-plugin-api = {{ git = "https://github.com/nickvidal/quicproquo", default-features = false }}
|
||||
"#,
|
||||
crate_name = crate_name,
|
||||
name = name,
|
||||
);
|
||||
write_file(&dir.join("Cargo.toml"), &cargo_toml)?;
|
||||
|
||||
// src/lib.rs
|
||||
let lib_rs = format!(
|
||||
r#"//! quicproquo server plugin: {name}
|
||||
//!
|
||||
//! Build with: cargo build --release
|
||||
//! Install: cp target/release/lib{crate_name}.so /path/to/plugins/
|
||||
//! The server loads it automatically when started with --plugin-dir.
|
||||
|
||||
use quicproquo_plugin_api::{{HookVTable, CMessageEvent, HOOK_CONTINUE, HOOK_REJECT, PLUGIN_OK}};
|
||||
use std::ffi::CString;
|
||||
use std::os::raw::c_int;
|
||||
|
||||
/// Plugin state — allocate on the heap in init, free in destroy.
|
||||
struct PluginState {{
|
||||
/// Example: maximum allowed payload size in bytes.
|
||||
max_payload_bytes: usize,
|
||||
/// Stored rejection message (must outlive the hook call).
|
||||
reject_msg: Option<CString>,
|
||||
}}
|
||||
|
||||
/// Called by the server on plugin load.
|
||||
///
|
||||
/// Fill the vtable with your hook implementations. Return PLUGIN_OK on success.
|
||||
#[no_mangle]
|
||||
pub extern "C" fn qpq_plugin_init(vtable: *mut HookVTable) -> c_int {{
|
||||
let state = Box::new(PluginState {{
|
||||
max_payload_bytes: 1_000_000, // 1 MB limit
|
||||
reject_msg: None,
|
||||
}});
|
||||
|
||||
let vt = unsafe {{ &mut *vtable }};
|
||||
vt.user_data = Box::into_raw(state) as *mut _;
|
||||
vt.on_message_enqueue = Some(on_message_enqueue);
|
||||
vt.error_message = Some(error_message);
|
||||
vt.destroy = Some(destroy);
|
||||
|
||||
eprintln!("[{name}] plugin loaded");
|
||||
PLUGIN_OK
|
||||
}}
|
||||
|
||||
/// Hook: called before each message is stored in the delivery queue.
|
||||
///
|
||||
/// Return HOOK_CONTINUE to allow, HOOK_REJECT to block.
|
||||
extern "C" fn on_message_enqueue(
|
||||
user_data: *mut std::ffi::c_void,
|
||||
event: *const CMessageEvent,
|
||||
) -> c_int {{
|
||||
let state = unsafe {{ &mut *(user_data as *mut PluginState) }};
|
||||
let event = unsafe {{ &*event }};
|
||||
|
||||
if event.payload_len > state.max_payload_bytes {{
|
||||
let msg = format!(
|
||||
"payload too large: {{}} > {{}} bytes",
|
||||
event.payload_len, state.max_payload_bytes
|
||||
);
|
||||
state.reject_msg = CString::new(msg).ok();
|
||||
return HOOK_REJECT;
|
||||
}}
|
||||
|
||||
HOOK_CONTINUE
|
||||
}}
|
||||
|
||||
/// Return a pointer to the rejection error message (valid until next hook call).
|
||||
extern "C" fn error_message(
|
||||
user_data: *mut std::ffi::c_void,
|
||||
) -> *const std::os::raw::c_char {{
|
||||
let state = unsafe {{ &*(user_data as *const PluginState) }};
|
||||
match &state.reject_msg {{
|
||||
Some(msg) => msg.as_ptr(),
|
||||
None => std::ptr::null(),
|
||||
}}
|
||||
}}
|
||||
|
||||
/// Cleanup: free the plugin state.
|
||||
extern "C" fn destroy(user_data: *mut std::ffi::c_void) {{
|
||||
if !user_data.is_null() {{
|
||||
unsafe {{ drop(Box::from_raw(user_data as *mut PluginState)) }};
|
||||
}}
|
||||
eprintln!("[{name}] plugin unloaded");
|
||||
}}
|
||||
"#,
|
||||
name = name,
|
||||
crate_name = crate_name,
|
||||
);
|
||||
write_file(&src_dir.join("lib.rs"), &lib_rs)?;
|
||||
|
||||
// README
|
||||
let readme = format!(
|
||||
r#"# {name} — quicproquo server plugin
|
||||
|
||||
## Build
|
||||
|
||||
```bash
|
||||
cargo build --release
|
||||
```
|
||||
|
||||
## Install
|
||||
|
||||
Copy the shared library to the server's plugin directory:
|
||||
|
||||
```bash
|
||||
cp target/release/lib{crate_name}.so /path/to/plugins/
|
||||
```
|
||||
|
||||
Start the server with:
|
||||
|
||||
```bash
|
||||
qpq-server --plugin-dir /path/to/plugins/
|
||||
```
|
||||
|
||||
## Hooks
|
||||
|
||||
This plugin implements `on_message_enqueue` to reject oversized payloads.
|
||||
Edit `src/lib.rs` to add your own logic. Available hooks:
|
||||
|
||||
| Hook | Purpose |
|
||||
|------|---------|
|
||||
| `on_message_enqueue` | Inspect/reject messages before delivery (return `HOOK_REJECT`) |
|
||||
| `on_batch_enqueue` | Observe batch message delivery |
|
||||
| `on_auth` | Observe login success/failure |
|
||||
| `on_channel_created` | Observe channel creation |
|
||||
| `on_fetch` | Observe message fetch operations |
|
||||
| `on_user_registered` | Observe new user registration |
|
||||
|
||||
See the [Server Hooks documentation](https://github.com/nickvidal/quicproquo/blob/main/docs/src/internals/server-hooks.md) for details.
|
||||
"#,
|
||||
name = name,
|
||||
crate_name = crate_name,
|
||||
);
|
||||
write_file(&dir.join("README.md"), &readme)?;
|
||||
|
||||
println!("Created plugin project: {}", dir.display());
|
||||
println!();
|
||||
println!(" cd {crate_name}");
|
||||
println!(" cargo build --release");
|
||||
println!(" cp target/release/lib{crate_name}.so /path/to/plugins/");
|
||||
println!();
|
||||
println!("Edit src/lib.rs to implement your hook logic.");
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn sanitize_name(name: &str) -> String {
|
||||
name.replace(['-', ' '], "_")
|
||||
}
|
||||
|
||||
fn write_file(path: &Path, content: &str) -> Result<(), String> {
|
||||
fs::write(path, content).map_err(|e| format!("write {}: {e}", path.display()))
|
||||
}
|
||||
129
crates/quicproquo-gen/src/generators/rpc.rs
Normal file
129
crates/quicproquo-gen/src/generators/rpc.rs
Normal file
@@ -0,0 +1,129 @@
|
||||
pub fn generate(name: &str) -> Result<(), String> {
|
||||
let snake = to_snake_case(name);
|
||||
let camel = name.to_string();
|
||||
println!("=== Adding RPC method: {camel} ===");
|
||||
println!();
|
||||
println!("Follow these steps to add a new `{camel}` RPC method.");
|
||||
println!("Each step shows the file and the code to add.");
|
||||
println!();
|
||||
|
||||
// Step 1: Schema
|
||||
println!("--- Step 1: Cap'n Proto schema ---");
|
||||
println!("File: schemas/node.capnp");
|
||||
println!();
|
||||
println!("Add to the `interface NodeService` block:");
|
||||
println!();
|
||||
println!(
|
||||
r#" {camel} @N (auth :AuthContext, <your params here>) -> (<your results here>);
|
||||
"#,
|
||||
);
|
||||
println!(" (Replace @N with the next ordinal number in the interface.)");
|
||||
println!();
|
||||
println!("Then rebuild the proto crate:");
|
||||
println!(" cargo build -p quicproquo-proto");
|
||||
println!();
|
||||
|
||||
// Step 2: Handler module
|
||||
println!("--- Step 2: Handler module ---");
|
||||
println!("File: crates/quicproquo-server/src/node_service/{snake}.rs");
|
||||
println!();
|
||||
println!(
|
||||
r#"use capnp::capability::Promise;
|
||||
use quicproquo_proto::node_capnp::node_service;
|
||||
|
||||
use crate::auth::{{coded_error, validate_auth_context}};
|
||||
use crate::error_codes::*;
|
||||
|
||||
use super::NodeServiceImpl;
|
||||
|
||||
impl NodeServiceImpl {{
|
||||
pub fn handle_{snake}(
|
||||
&mut self,
|
||||
params: node_service::{camel}Params,
|
||||
mut results: node_service::{camel}Results,
|
||||
) -> Promise<(), capnp::Error> {{
|
||||
let p = match params.get() {{
|
||||
Ok(p) => p,
|
||||
Err(e) => return Promise::err(coded_error(E020_BAD_PARAMS, e)),
|
||||
}};
|
||||
let auth_ctx = match validate_auth_context(&self.auth_cfg, &self.sessions, p.get_auth()) {{
|
||||
Ok(ctx) => ctx,
|
||||
Err(e) => return Promise::err(e),
|
||||
}};
|
||||
|
||||
// TODO: implement your logic here
|
||||
|
||||
Promise::ok(())
|
||||
}}
|
||||
}}
|
||||
"#,
|
||||
);
|
||||
|
||||
// Step 3: Wire into mod.rs
|
||||
println!("--- Step 3: Register in mod.rs ---");
|
||||
println!("File: crates/quicproquo-server/src/node_service/mod.rs");
|
||||
println!();
|
||||
println!("Add to the module declarations at the top:");
|
||||
println!(" mod {snake};");
|
||||
println!();
|
||||
println!("Add to the `impl node_service::Server for NodeServiceImpl` block:");
|
||||
println!();
|
||||
println!(
|
||||
r#" fn {snake}(
|
||||
&mut self,
|
||||
params: node_service::{camel}Params,
|
||||
results: node_service::{camel}Results,
|
||||
) -> capnp::capability::Promise<(), capnp::Error> {{
|
||||
self.handle_{snake}(params, results)
|
||||
}}
|
||||
"#,
|
||||
);
|
||||
|
||||
// Step 4: Storage (if needed)
|
||||
println!("--- Step 4: Storage trait (if needed) ---");
|
||||
println!("File: crates/quicproquo-server/src/storage.rs");
|
||||
println!();
|
||||
println!("If your RPC method needs persistent storage, add a method to the Store trait:");
|
||||
println!();
|
||||
println!(
|
||||
r#" fn {snake}(&self, /* params */) -> Result</* return */, StorageError>;
|
||||
"#,
|
||||
);
|
||||
println!("Then implement it in:");
|
||||
println!(" - crates/quicproquo-server/src/sql_store.rs (SQLite backend)");
|
||||
println!(" - crates/quicproquo-server/src/storage.rs (FileBackedStore)");
|
||||
println!();
|
||||
|
||||
// Step 5: Hook (if needed)
|
||||
println!("--- Step 5: Hook event (optional) ---");
|
||||
println!("If you want plugins to observe this RPC, run:");
|
||||
println!(" qpq-gen hook {snake}");
|
||||
println!();
|
||||
|
||||
// Step 6: Verify
|
||||
println!("--- Step 6: Verify ---");
|
||||
println!(" cargo build -p quicproquo-server");
|
||||
println!(" cargo test -p quicproquo-server");
|
||||
println!();
|
||||
|
||||
// Summary
|
||||
println!("=== Files to create/modify ===");
|
||||
println!(" [modify] schemas/node.capnp");
|
||||
println!(" [create] crates/quicproquo-server/src/node_service/{snake}.rs");
|
||||
println!(" [modify] crates/quicproquo-server/src/node_service/mod.rs");
|
||||
println!(" [modify] crates/quicproquo-server/src/storage.rs (if needed)");
|
||||
println!(" [modify] crates/quicproquo-server/src/sql_store.rs (if needed)");
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn to_snake_case(s: &str) -> String {
|
||||
let mut result = String::with_capacity(s.len() + 4);
|
||||
for (i, ch) in s.chars().enumerate() {
|
||||
if ch.is_uppercase() && i > 0 {
|
||||
result.push('_');
|
||||
}
|
||||
result.push(ch.to_ascii_lowercase());
|
||||
}
|
||||
result
|
||||
}
|
||||
55
crates/quicproquo-gen/src/main.rs
Normal file
55
crates/quicproquo-gen/src/main.rs
Normal file
@@ -0,0 +1,55 @@
|
||||
use clap::{Parser, Subcommand};
|
||||
use std::path::PathBuf;
|
||||
|
||||
mod generators;
|
||||
|
||||
#[derive(Parser)]
|
||||
#[command(name = "qpq-gen", about = "Code generators for quicproquo")]
|
||||
struct Cli {
|
||||
#[command(subcommand)]
|
||||
command: Command,
|
||||
}
|
||||
|
||||
#[derive(Subcommand)]
|
||||
enum Command {
|
||||
/// Scaffold a new server plugin (dynamic .so/.dylib)
|
||||
Plugin {
|
||||
/// Plugin name (e.g. "rate-limiter", "audit-log")
|
||||
name: String,
|
||||
/// Output directory (default: current directory)
|
||||
#[arg(short, long, default_value = ".")]
|
||||
output: PathBuf,
|
||||
},
|
||||
/// Scaffold a new bot project using the Bot SDK
|
||||
Bot {
|
||||
/// Bot name (e.g. "echo-bot", "moderation-bot")
|
||||
name: String,
|
||||
/// Output directory (default: current directory)
|
||||
#[arg(short, long, default_value = ".")]
|
||||
output: PathBuf,
|
||||
},
|
||||
/// Show instructions for adding a new Cap'n Proto RPC method
|
||||
Rpc {
|
||||
/// RPC method name in camelCase (e.g. "listChannels")
|
||||
name: String,
|
||||
},
|
||||
/// Show instructions for adding a new server hook event
|
||||
Hook {
|
||||
/// Hook event name in snake_case (e.g. "message_deleted")
|
||||
name: String,
|
||||
},
|
||||
}
|
||||
|
||||
fn main() {
|
||||
let cli = Cli::parse();
|
||||
let result = match cli.command {
|
||||
Command::Plugin { name, output } => generators::plugin::generate(&name, &output),
|
||||
Command::Bot { name, output } => generators::bot::generate(&name, &output),
|
||||
Command::Rpc { name } => generators::rpc::generate(&name),
|
||||
Command::Hook { name } => generators::hook::generate(&name),
|
||||
};
|
||||
if let Err(e) = result {
|
||||
eprintln!("error: {e}");
|
||||
std::process::exit(1);
|
||||
}
|
||||
}
|
||||
@@ -1,18 +1,18 @@
|
||||
[package]
|
||||
name = "quicnprotochat-gui"
|
||||
name = "quicproquo-gui"
|
||||
version = "0.1.0"
|
||||
edition = "2021"
|
||||
description = "Native GUI for quicnprotochat (Tauri 2)."
|
||||
description = "Native GUI for quicproquo (Tauri 2)."
|
||||
license = "MIT"
|
||||
|
||||
[[bin]]
|
||||
name = "quicnprotochat-gui"
|
||||
name = "qpq-gui"
|
||||
path = "src/main.rs"
|
||||
|
||||
[dependencies]
|
||||
quicnprotochat-core = { path = "../quicnprotochat-core" }
|
||||
quicnprotochat-client = { path = "../quicnprotochat-client" }
|
||||
quicnprotochat-proto = { path = "../quicnprotochat-proto" }
|
||||
quicproquo-core = { path = "../quicproquo-core" }
|
||||
quicproquo-client = { path = "../quicproquo-client" }
|
||||
quicproquo-proto = { path = "../quicproquo-proto" }
|
||||
tauri = { version = "2", features = [] }
|
||||
tokio = { workspace = true }
|
||||
serde = { workspace = true }
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user