Compare commits
6 Commits
feat/m1-no
...
cursor/clo
| Author | SHA1 | Date | |
|---|---|---|---|
| 41c57a1181 | |||
| 6b8b61c6ae | |||
| b5b361e2ff | |||
| 96f4128b32 | |||
| 230205a152 | |||
| 00b0aa92a1 |
1
.cursor/.gitignore
vendored
Normal file
1
.cursor/.gitignore
vendored
Normal file
@@ -0,0 +1 @@
|
|||||||
|
plans/
|
||||||
15
.github/CODEOWNERS
vendored
Normal file
15
.github/CODEOWNERS
vendored
Normal file
@@ -0,0 +1,15 @@
|
|||||||
|
# Code owners for quicnprotochat. PRs require review from owners.
|
||||||
|
# See https://docs.github.com/en/repositories/managing-your-repositorys-settings-and-features/customizing-your-repository/about-code-owners
|
||||||
|
# Replace 'maintainers' with your GitHub user/team handle.
|
||||||
|
|
||||||
|
# Default owners for everything in the repo
|
||||||
|
* @maintainers
|
||||||
|
|
||||||
|
# Crate-specific (uncomment and add handles when you have designated owners)
|
||||||
|
# /crates/quicnprotochat-core/ @owner1
|
||||||
|
# /crates/quicnprotochat-proto/ @owner1
|
||||||
|
# /crates/quicnprotochat-server/ @owner1
|
||||||
|
# /crates/quicnprotochat-client/ @owner1
|
||||||
|
# /crates/quicnprotochat-p2p/ @owner1
|
||||||
|
# /schemas/ @owner1
|
||||||
|
# /docs/ @owner1
|
||||||
9
.github/INSTRUCTIONS.md
vendored
Normal file
9
.github/INSTRUCTIONS.md
vendored
Normal file
@@ -0,0 +1,9 @@
|
|||||||
|
# Internal Engineering Guidelines
|
||||||
|
|
||||||
|
## Rust file sizing and layout
|
||||||
|
- Soft cap: keep Rust source files at or below ~400 lines; if a change would exceed that, split into modules first.
|
||||||
|
- Hard cap: avoid exceeding 650 lines in any Rust file; refactor before merging (main.rs should stay <350 lines).
|
||||||
|
- Single-responsibility: group code by concern (config, TLS/setup, auth/session, storage adapters, RPC handlers, CLI parsing) instead of piling into one file.
|
||||||
|
- Structure new features as small modules wired from the entrypoint rather than expanding existing large files.
|
||||||
|
- Co-locate unit tests with their module; keep integration tests in `crates/*/tests` with focused scopes.
|
||||||
|
- Prefer descriptive module names and re-exports over deep `mod` trees that hide logic in `main.rs`.
|
||||||
74
.github/workflows/ci.yml
vendored
Normal file
74
.github/workflows/ci.yml
vendored
Normal file
@@ -0,0 +1,74 @@
|
|||||||
|
name: CI
|
||||||
|
|
||||||
|
on:
|
||||||
|
push:
|
||||||
|
branches: [main, master]
|
||||||
|
pull_request:
|
||||||
|
branches: [main, master]
|
||||||
|
|
||||||
|
env:
|
||||||
|
CARGO_TERM_COLOR: always
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
test:
|
||||||
|
name: Test
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v4
|
||||||
|
|
||||||
|
- name: Install Rust
|
||||||
|
uses: dtolnay/rust-action@stable
|
||||||
|
with:
|
||||||
|
components: clippy, rustfmt
|
||||||
|
|
||||||
|
- name: Install capnp
|
||||||
|
run: sudo apt-get update && sudo apt-get install -y capnproto
|
||||||
|
|
||||||
|
- name: Cache cargo
|
||||||
|
uses: actions/cache@v4
|
||||||
|
with:
|
||||||
|
path: |
|
||||||
|
~/.cargo/registry
|
||||||
|
~/.cargo/git
|
||||||
|
target
|
||||||
|
key: ${{ runner.os }}-cargo-${{ hashFiles('**/Cargo.lock') }}
|
||||||
|
restore-keys: |
|
||||||
|
${{ runner.os }}-cargo-
|
||||||
|
|
||||||
|
- name: Check format
|
||||||
|
run: cargo fmt --all -- --check
|
||||||
|
|
||||||
|
- name: Build
|
||||||
|
run: cargo build --workspace
|
||||||
|
|
||||||
|
- name: Test
|
||||||
|
run: cargo test --workspace
|
||||||
|
|
||||||
|
- name: Clippy
|
||||||
|
run: cargo clippy --workspace --all-targets --
|
||||||
|
|
||||||
|
deny:
|
||||||
|
name: cargo-deny
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v4
|
||||||
|
|
||||||
|
- name: Install cargo-deny
|
||||||
|
run: cargo install cargo-deny --locked
|
||||||
|
|
||||||
|
- name: Check deny
|
||||||
|
run: cargo deny check
|
||||||
|
|
||||||
|
audit:
|
||||||
|
name: cargo-audit
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v4
|
||||||
|
|
||||||
|
- name: Install Rust
|
||||||
|
uses: dtolnay/rust-action@stable
|
||||||
|
|
||||||
|
- name: Run audit
|
||||||
|
run: |
|
||||||
|
cargo install cargo-audit --locked
|
||||||
|
cargo audit
|
||||||
16
.github/workflows/size-lint.yml
vendored
Normal file
16
.github/workflows/size-lint.yml
vendored
Normal file
@@ -0,0 +1,16 @@
|
|||||||
|
name: rust-file-size-lint
|
||||||
|
|
||||||
|
on:
|
||||||
|
pull_request:
|
||||||
|
push:
|
||||||
|
branches: [ main ]
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
check-rust-file-sizes:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- name: Checkout
|
||||||
|
uses: actions/checkout@v4
|
||||||
|
|
||||||
|
- name: Run rust file-size guardrail
|
||||||
|
run: bash scripts/check_rust_file_sizes.sh
|
||||||
5
.gitignore
vendored
5
.gitignore
vendored
@@ -3,3 +3,8 @@
|
|||||||
.vscode/
|
.vscode/
|
||||||
gitea-mcp.json
|
gitea-mcp.json
|
||||||
docs/book/
|
docs/book/
|
||||||
|
|
||||||
|
# Server/client runtime data — do not commit certs, keys, or DBs
|
||||||
|
data/
|
||||||
|
*.der
|
||||||
|
quicnprotochat-server.toml
|
||||||
|
|||||||
3006
Cargo.lock
generated
3006
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
@@ -6,6 +6,7 @@ members = [
|
|||||||
"crates/quicnprotochat-server",
|
"crates/quicnprotochat-server",
|
||||||
"crates/quicnprotochat-client",
|
"crates/quicnprotochat-client",
|
||||||
"crates/quicnprotochat-p2p",
|
"crates/quicnprotochat-p2p",
|
||||||
|
"crates/quicnprotochat-gui",
|
||||||
]
|
]
|
||||||
|
|
||||||
# Shared dependency versions — bump here to affect the whole workspace.
|
# Shared dependency versions — bump here to affect the whole workspace.
|
||||||
@@ -53,6 +54,7 @@ rusqlite = { version = "0.31", features = ["bundled-sqlcipher"] }
|
|||||||
|
|
||||||
# ── Server utilities ──────────────────────────────────────────────────────────
|
# ── Server utilities ──────────────────────────────────────────────────────────
|
||||||
dashmap = { version = "5" }
|
dashmap = { version = "5" }
|
||||||
|
governor = { version = "0.6" }
|
||||||
tracing = { version = "0.1" }
|
tracing = { version = "0.1" }
|
||||||
tracing-subscriber = { version = "0.3", features = ["env-filter"] }
|
tracing-subscriber = { version = "0.3", features = ["env-filter"] }
|
||||||
|
|
||||||
@@ -62,6 +64,8 @@ thiserror = { version = "1" }
|
|||||||
|
|
||||||
# ── CLI ───────────────────────────────────────────────────────────────────────
|
# ── CLI ───────────────────────────────────────────────────────────────────────
|
||||||
clap = { version = "4", features = ["derive", "env"] }
|
clap = { version = "4", features = ["derive", "env"] }
|
||||||
|
clap_complete = { version = "4" }
|
||||||
|
indicatif = { version = "0.17" }
|
||||||
|
|
||||||
# ── Build-time ────────────────────────────────────────────────────────────────
|
# ── Build-time ────────────────────────────────────────────────────────────────
|
||||||
capnpc = { version = "0.19" }
|
capnpc = { version = "0.19" }
|
||||||
|
|||||||
22
README.md
22
README.md
@@ -45,7 +45,7 @@ mdbook serve docs
|
|||||||
### Highlights
|
### Highlights
|
||||||
|
|
||||||
- **[Architecture Overview](docs/src/architecture/overview.md)** — Two-service model, dual-key design, crate layout
|
- **[Architecture Overview](docs/src/architecture/overview.md)** — Two-service model, dual-key design, crate layout
|
||||||
- **[Protocol Deep Dives](docs/src/protocol-layers/overview.md)** — QUIC/TLS, Noise_XX, Cap'n Proto, MLS, Hybrid KEM
|
- **[Protocol Deep Dives](docs/src/protocol-layers/overview.md)** — QUIC/TLS 1.3, Cap'n Proto, MLS, Hybrid KEM
|
||||||
- **[Cryptographic Properties](docs/src/cryptography/overview.md)** — Forward secrecy, post-compromise security, PQ readiness, threat model
|
- **[Cryptographic Properties](docs/src/cryptography/overview.md)** — Forward secrecy, post-compromise security, PQ readiness, threat model
|
||||||
- **[Design Rationale](docs/src/design-rationale/overview.md)** — Why MLS over Signal/Matrix, ADRs for all key decisions
|
- **[Design Rationale](docs/src/design-rationale/overview.md)** — Why MLS over Signal/Matrix, ADRs for all key decisions
|
||||||
- **[Wire Format Reference](docs/src/wire-format/overview.md)** — Annotated Cap'n Proto schemas
|
- **[Wire Format Reference](docs/src/wire-format/overview.md)** — Annotated Cap'n Proto schemas
|
||||||
@@ -61,6 +61,9 @@ mdbook serve docs
|
|||||||
brew install capnp # macOS
|
brew install capnp # macOS
|
||||||
# apt-get install capnproto # Debian/Ubuntu
|
# apt-get install capnproto # Debian/Ubuntu
|
||||||
|
|
||||||
|
# GUI prerequisites (Linux only) — WebKitGTK + GTK3 for Tauri 2
|
||||||
|
# sudo apt install -y libwebkit2gtk-4.1-dev libgtk-3-dev libglib2.0-dev libssl-dev libayatana-appindicator3-dev librsvg2-dev patchelf
|
||||||
|
|
||||||
# Build and test
|
# Build and test
|
||||||
cargo build --workspace
|
cargo build --workspace
|
||||||
cargo test --workspace
|
cargo test --workspace
|
||||||
@@ -81,9 +84,14 @@ db_key = ""
|
|||||||
EOF
|
EOF
|
||||||
cargo run -p quicnprotochat-server -- --config quicnprotochat-server.toml
|
cargo run -p quicnprotochat-server -- --config quicnprotochat-server.toml
|
||||||
|
|
||||||
# Run the Alice/Bob demo
|
# Run the two-party demo
|
||||||
cargo run -p quicnprotochat-client -- demo-group \
|
cargo run -p quicnprotochat-client -- demo-group \
|
||||||
--server 127.0.0.1:7000 --ds-server 127.0.0.1:7000
|
--server 127.0.0.1:7000
|
||||||
|
|
||||||
|
# Interactive 1:1 chat (after creating a group and inviting a peer)
|
||||||
|
# Terminal 1: quicnprotochat chat --peer-key <other_identity_hex>
|
||||||
|
# Terminal 2: quicnprotochat chat --peer-key <first_identity_hex>
|
||||||
|
# Type messages and press Enter; incoming messages appear as [peer] <msg>. Ctrl+D to exit.
|
||||||
```
|
```
|
||||||
|
|
||||||
See the [full demo walkthrough](docs/src/getting-started/demo-walkthrough.md) for a step-by-step guide.
|
See the [full demo walkthrough](docs/src/getting-started/demo-walkthrough.md) for a step-by-step guide.
|
||||||
@@ -97,10 +105,10 @@ See the [full demo walkthrough](docs/src/getting-started/demo-walkthrough.md) fo
|
|||||||
| M1 | QUIC/TLS transport | Done | QUIC + TLS 1.3 endpoint, length-prefixed framing, Ping/Pong |
|
| M1 | QUIC/TLS transport | Done | QUIC + TLS 1.3 endpoint, length-prefixed framing, Ping/Pong |
|
||||||
| M2 | Authentication Service | Done | Ed25519 identity, KeyPackage generation, AS upload/fetch |
|
| M2 | Authentication Service | Done | Ed25519 identity, KeyPackage generation, AS upload/fetch |
|
||||||
| M3 | Delivery Service + MLS groups | Done | DS relay, `GroupMember` create/join/add/send/recv |
|
| M3 | Delivery Service + MLS groups | Done | DS relay, `GroupMember` create/join/add/send/recv |
|
||||||
| M4 | Group CLI subcommands | Next | Persistent CLI (`create-group`, `invite`, `join`, `send`, `recv`) |
|
| M4 | Group CLI subcommands | Done | Persistent CLI (`create-group`, `invite`, `join`, `send`, `recv`), OPAQUE login |
|
||||||
| M5 | Multi-party groups | Planned | N > 2 members, Commit fan-out, Proposal handling |
|
| M5 | Multi-party groups | Done | N > 2 members, Commit fan-out, send --all, epoch sync |
|
||||||
| M6 | Persistence | Planned | SQLite key store, durable group state |
|
| M6 | Persistence | Done | SQLite/SQLCipher, migrations, durable server + client state |
|
||||||
| M7 | Post-quantum | Planned | PQ hybrid for MLS/HPKE (X25519 + ML-KEM-768) |
|
| M7 | Post-quantum | Next | PQ hybrid for MLS/HPKE (X25519 + ML-KEM-768) |
|
||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
|
|||||||
@@ -48,9 +48,13 @@ tracing-subscriber = { workspace = true }
|
|||||||
|
|
||||||
# CLI
|
# CLI
|
||||||
clap = { workspace = true }
|
clap = { workspace = true }
|
||||||
|
clap_complete = { workspace = true }
|
||||||
|
indicatif = { workspace = true }
|
||||||
|
|
||||||
[dev-dependencies]
|
[dev-dependencies]
|
||||||
dashmap = { workspace = true }
|
dashmap = { workspace = true }
|
||||||
assert_cmd = "2"
|
assert_cmd = "2"
|
||||||
tempfile = "3"
|
tempfile = "3"
|
||||||
portpicker = "0.1"
|
portpicker = "0.1"
|
||||||
|
rand = "0.8"
|
||||||
|
hex = "0.4"
|
||||||
|
|||||||
1204
crates/quicnprotochat-client/src/client/commands.rs
Normal file
1204
crates/quicnprotochat-client/src/client/commands.rs
Normal file
File diff suppressed because it is too large
Load Diff
13
crates/quicnprotochat-client/src/client/hex.rs
Normal file
13
crates/quicnprotochat-client/src/client/hex.rs
Normal file
@@ -0,0 +1,13 @@
|
|||||||
|
pub fn encode(bytes: impl AsRef<[u8]>) -> String {
|
||||||
|
bytes.as_ref().iter().map(|b| format!("{b:02x}")).collect()
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn decode(s: &str) -> Result<Vec<u8>, &'static str> {
|
||||||
|
if s.len() % 2 != 0 {
|
||||||
|
return Err("odd-length hex string");
|
||||||
|
}
|
||||||
|
(0..s.len())
|
||||||
|
.step_by(2)
|
||||||
|
.map(|i| u8::from_str_radix(&s[i..i + 2], 16).map_err(|_| "invalid hex character"))
|
||||||
|
.collect()
|
||||||
|
}
|
||||||
9
crates/quicnprotochat-client/src/client/mod.rs
Normal file
9
crates/quicnprotochat-client/src/client/mod.rs
Normal file
@@ -0,0 +1,9 @@
|
|||||||
|
pub mod commands;
|
||||||
|
pub mod hex;
|
||||||
|
pub mod retry;
|
||||||
|
pub mod rpc;
|
||||||
|
pub mod state;
|
||||||
|
|
||||||
|
pub use commands::*;
|
||||||
|
pub use rpc::{connect_node, enqueue, fetch_all, fetch_hybrid_key, fetch_key_package, fetch_wait, upload_hybrid_key, upload_key_package};
|
||||||
|
pub use state::{decode_identity_key, load_existing_state, load_or_init_state, save_state};
|
||||||
102
crates/quicnprotochat-client/src/client/retry.rs
Normal file
102
crates/quicnprotochat-client/src/client/retry.rs
Normal file
@@ -0,0 +1,102 @@
|
|||||||
|
//! Retry with exponential backoff for transient RPC failures.
|
||||||
|
//!
|
||||||
|
//! Used for `enqueue`, `fetch_all`, and `fetch_wait`. Auth and invalid-param
|
||||||
|
//! errors are not retried. Configure via `QUICNPROTOCHAT_MAX_RETRIES` and
|
||||||
|
//! `QUICNPROTOCHAT_BASE_DELAY_MS` (optional).
|
||||||
|
|
||||||
|
use std::future::Future;
|
||||||
|
use std::time::Duration;
|
||||||
|
|
||||||
|
use rand::Rng;
|
||||||
|
use tracing::warn;
|
||||||
|
|
||||||
|
/// Default maximum number of retry attempts (including the first try).
|
||||||
|
pub const DEFAULT_MAX_RETRIES: u32 = 3;
|
||||||
|
/// Default base delay in milliseconds for exponential backoff.
|
||||||
|
pub const DEFAULT_BASE_DELAY_MS: u64 = 500;
|
||||||
|
|
||||||
|
/// Read max retries from env or use default.
|
||||||
|
pub fn max_retries_from_env() -> u32 {
|
||||||
|
std::env::var("QUICNPROTOCHAT_MAX_RETRIES")
|
||||||
|
.ok()
|
||||||
|
.and_then(|s| s.parse().ok())
|
||||||
|
.unwrap_or(DEFAULT_MAX_RETRIES)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Read base delay (ms) from env or use default.
|
||||||
|
pub fn base_delay_ms_from_env() -> u64 {
|
||||||
|
std::env::var("QUICNPROTOCHAT_BASE_DELAY_MS")
|
||||||
|
.ok()
|
||||||
|
.and_then(|s| s.parse().ok())
|
||||||
|
.unwrap_or(DEFAULT_BASE_DELAY_MS)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Runs an async operation with retries. On `Ok(t)` returns immediately.
|
||||||
|
/// On `Err(e)`: if `is_retriable(&e)` and `attempt < max_retries`, sleeps with
|
||||||
|
/// exponential backoff (plus jitter) then retries; otherwise returns the last error.
|
||||||
|
pub async fn retry_async<F, Fut, T, E, P>(
|
||||||
|
op: F,
|
||||||
|
max_retries: u32,
|
||||||
|
base_delay_ms: u64,
|
||||||
|
is_retriable: P,
|
||||||
|
) -> Result<T, E>
|
||||||
|
where
|
||||||
|
F: Fn() -> Fut,
|
||||||
|
Fut: Future<Output = Result<T, E>>,
|
||||||
|
P: Fn(&E) -> bool,
|
||||||
|
{
|
||||||
|
let mut last_err = None;
|
||||||
|
for attempt in 0..max_retries {
|
||||||
|
match op().await {
|
||||||
|
Ok(t) => return Ok(t),
|
||||||
|
Err(e) => {
|
||||||
|
last_err = Some(e);
|
||||||
|
let err = last_err.as_ref().expect("last_err just set in Err branch");
|
||||||
|
if !is_retriable(err) || attempt + 1 >= max_retries {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
let delay_ms = base_delay_ms * 2u64.saturating_pow(attempt);
|
||||||
|
let jitter_ms = rand::thread_rng().gen_range(0..=delay_ms / 2);
|
||||||
|
let total_ms = delay_ms + jitter_ms;
|
||||||
|
warn!(
|
||||||
|
attempt = attempt + 1,
|
||||||
|
max_retries,
|
||||||
|
delay_ms = total_ms,
|
||||||
|
"RPC failed, retrying after backoff"
|
||||||
|
);
|
||||||
|
tokio::time::sleep(Duration::from_millis(total_ms)).await;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// Loop runs at least once (max_retries >= 1) and we only break after storing an Err, so this is always Some.
|
||||||
|
Err(last_err.expect("retry_async: last_err is Some when breaking after Err"))
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Classifies `anyhow::Error` for retry: returns `false` for auth or invalid-param
|
||||||
|
/// errors (do not retry), `true` for transient errors (network, timeout, server 5xx).
|
||||||
|
/// When in doubt, returns `true` (retry).
|
||||||
|
pub fn anyhow_is_retriable(err: &anyhow::Error) -> bool {
|
||||||
|
let s = format!("{:#}", err);
|
||||||
|
let s_lower = s.to_lowercase();
|
||||||
|
// Do not retry: auth / permission
|
||||||
|
if s_lower.contains("unauthorized")
|
||||||
|
|| s_lower.contains("auth failed")
|
||||||
|
|| s_lower.contains("access denied")
|
||||||
|
|| s_lower.contains("401")
|
||||||
|
|| s_lower.contains("forbidden")
|
||||||
|
|| s_lower.contains("403")
|
||||||
|
|| s_lower.contains("token")
|
||||||
|
{
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
// Do not retry: bad request / invalid params
|
||||||
|
if s_lower.contains("bad request")
|
||||||
|
|| s_lower.contains("400")
|
||||||
|
|| s_lower.contains("invalid param")
|
||||||
|
|| s_lower.contains("fingerprint mismatch")
|
||||||
|
{
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
// Retry: network, timeout, connection, server error, or anything else
|
||||||
|
true
|
||||||
|
}
|
||||||
369
crates/quicnprotochat-client/src/client/rpc.rs
Normal file
369
crates/quicnprotochat-client/src/client/rpc.rs
Normal file
@@ -0,0 +1,369 @@
|
|||||||
|
use std::net::SocketAddr;
|
||||||
|
use std::path::Path;
|
||||||
|
use std::sync::Arc;
|
||||||
|
|
||||||
|
use anyhow::Context;
|
||||||
|
use quinn::{ClientConfig, Endpoint};
|
||||||
|
use quinn_proto::crypto::rustls::QuicClientConfig;
|
||||||
|
use rustls::pki_types::CertificateDer;
|
||||||
|
use rustls::{ClientConfig as RustlsClientConfig, RootCertStore};
|
||||||
|
use tokio_util::compat::{TokioAsyncReadCompatExt, TokioAsyncWriteCompatExt};
|
||||||
|
use capnp_rpc::{rpc_twoparty_capnp::Side, twoparty, RpcSystem};
|
||||||
|
|
||||||
|
use quicnprotochat_core::HybridPublicKey;
|
||||||
|
use quicnprotochat_proto::node_capnp::{auth, node_service};
|
||||||
|
|
||||||
|
use crate::AUTH_CONTEXT;
|
||||||
|
|
||||||
|
use super::retry::{
|
||||||
|
anyhow_is_retriable, base_delay_ms_from_env, max_retries_from_env, retry_async,
|
||||||
|
};
|
||||||
|
|
||||||
|
/// Establish a QUIC/TLS connection and return a `NodeService` client.
|
||||||
|
///
|
||||||
|
/// Must be called from within a `LocalSet` because capnp-rpc is `!Send`.
|
||||||
|
pub async fn connect_node(
|
||||||
|
server: &str,
|
||||||
|
ca_cert: &Path,
|
||||||
|
server_name: &str,
|
||||||
|
) -> anyhow::Result<node_service::Client> {
|
||||||
|
let addr: SocketAddr = server
|
||||||
|
.parse()
|
||||||
|
.with_context(|| format!("server must be host:port, got {server}"))?;
|
||||||
|
|
||||||
|
let cert_bytes = std::fs::read(ca_cert).with_context(|| format!("read ca_cert {ca_cert:?}"))?;
|
||||||
|
let mut roots = RootCertStore::empty();
|
||||||
|
roots
|
||||||
|
.add(CertificateDer::from(cert_bytes))
|
||||||
|
.context("add root cert")?;
|
||||||
|
|
||||||
|
let mut tls = RustlsClientConfig::builder()
|
||||||
|
.with_root_certificates(roots)
|
||||||
|
.with_no_client_auth();
|
||||||
|
tls.alpn_protocols = vec![b"capnp".to_vec()];
|
||||||
|
|
||||||
|
let crypto = QuicClientConfig::try_from(tls)
|
||||||
|
.map_err(|e| anyhow::anyhow!("invalid client TLS config: {e}"))?;
|
||||||
|
|
||||||
|
let bind_addr: SocketAddr = "0.0.0.0:0".parse().context("parse client bind address")?;
|
||||||
|
let mut endpoint = Endpoint::client(bind_addr)?;
|
||||||
|
endpoint.set_default_client_config(ClientConfig::new(Arc::new(crypto)));
|
||||||
|
|
||||||
|
let connection = endpoint
|
||||||
|
.connect(addr, server_name)
|
||||||
|
.context("quic connect init")?
|
||||||
|
.await
|
||||||
|
.context("quic connect failed")?;
|
||||||
|
|
||||||
|
let (send, recv) = connection.open_bi().await.context("open bi stream")?;
|
||||||
|
|
||||||
|
let network = twoparty::VatNetwork::new(
|
||||||
|
recv.compat(),
|
||||||
|
send.compat_write(),
|
||||||
|
Side::Client,
|
||||||
|
Default::default(),
|
||||||
|
);
|
||||||
|
|
||||||
|
let mut rpc_system = RpcSystem::new(Box::new(network), None);
|
||||||
|
let client: node_service::Client = rpc_system.bootstrap(Side::Server);
|
||||||
|
|
||||||
|
tokio::task::spawn_local(rpc_system);
|
||||||
|
|
||||||
|
Ok(client)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn set_auth(auth: &mut auth::Builder<'_>) -> anyhow::Result<()> {
|
||||||
|
let ctx = AUTH_CONTEXT.get().ok_or_else(|| {
|
||||||
|
anyhow::anyhow!("init_auth must be called with a non-empty token before RPCs")
|
||||||
|
})?;
|
||||||
|
auth.set_version(ctx.version);
|
||||||
|
auth.set_access_token(&ctx.access_token);
|
||||||
|
auth.set_device_id(&ctx.device_id);
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Upload a KeyPackage and verify the fingerprint echoed by the AS.
|
||||||
|
pub async fn upload_key_package(
|
||||||
|
client: &node_service::Client,
|
||||||
|
identity_key: &[u8],
|
||||||
|
package: &[u8],
|
||||||
|
) -> anyhow::Result<()> {
|
||||||
|
let mut req = client.upload_key_package_request();
|
||||||
|
{
|
||||||
|
let mut p = req.get();
|
||||||
|
p.set_identity_key(identity_key);
|
||||||
|
p.set_package(package);
|
||||||
|
let mut auth = p.reborrow().init_auth();
|
||||||
|
set_auth(&mut auth)?;
|
||||||
|
}
|
||||||
|
|
||||||
|
let resp = req
|
||||||
|
.send()
|
||||||
|
.promise
|
||||||
|
.await
|
||||||
|
.context("upload_key_package RPC failed")?;
|
||||||
|
|
||||||
|
let server_fp = resp
|
||||||
|
.get()
|
||||||
|
.context("upload_key_package: bad response")?
|
||||||
|
.get_fingerprint()
|
||||||
|
.context("upload_key_package: missing fingerprint")?
|
||||||
|
.to_vec();
|
||||||
|
|
||||||
|
let local_fp = super::state::sha256(package);
|
||||||
|
anyhow::ensure!(server_fp == local_fp, "fingerprint mismatch");
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Fetch a KeyPackage for `identity_key` from the AS.
|
||||||
|
pub async fn fetch_key_package(
|
||||||
|
client: &node_service::Client,
|
||||||
|
identity_key: &[u8],
|
||||||
|
) -> anyhow::Result<Vec<u8>> {
|
||||||
|
let mut req = client.fetch_key_package_request();
|
||||||
|
{
|
||||||
|
let mut p = req.get();
|
||||||
|
p.set_identity_key(identity_key);
|
||||||
|
let mut auth = p.reborrow().init_auth();
|
||||||
|
set_auth(&mut auth)?;
|
||||||
|
}
|
||||||
|
|
||||||
|
let resp = req
|
||||||
|
.send()
|
||||||
|
.promise
|
||||||
|
.await
|
||||||
|
.context("fetch_key_package RPC failed")?;
|
||||||
|
|
||||||
|
let pkg = resp
|
||||||
|
.get()
|
||||||
|
.context("fetch_key_package: bad response")?
|
||||||
|
.get_package()
|
||||||
|
.context("fetch_key_package: missing package field")?
|
||||||
|
.to_vec();
|
||||||
|
|
||||||
|
Ok(pkg)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Enqueue an opaque payload to the DS for `recipient_key`.
|
||||||
|
/// Returns the per-inbox sequence number assigned by the server.
|
||||||
|
/// Retries on transient failures with exponential backoff.
|
||||||
|
pub async fn enqueue(
|
||||||
|
client: &node_service::Client,
|
||||||
|
recipient_key: &[u8],
|
||||||
|
payload: &[u8],
|
||||||
|
) -> anyhow::Result<u64> {
|
||||||
|
let client = client.clone();
|
||||||
|
let recipient_key = recipient_key.to_vec();
|
||||||
|
let payload = payload.to_vec();
|
||||||
|
retry_async(
|
||||||
|
|| {
|
||||||
|
let client = client.clone();
|
||||||
|
let recipient_key = recipient_key.clone();
|
||||||
|
let payload = payload.clone();
|
||||||
|
async move {
|
||||||
|
let mut req = client.enqueue_request();
|
||||||
|
{
|
||||||
|
let mut p = req.get();
|
||||||
|
p.set_recipient_key(&recipient_key);
|
||||||
|
p.set_payload(&payload);
|
||||||
|
p.set_channel_id(&[]);
|
||||||
|
p.set_version(1);
|
||||||
|
let mut auth = p.reborrow().init_auth();
|
||||||
|
set_auth(&mut auth)?;
|
||||||
|
}
|
||||||
|
let resp = req.send().promise.await.context("enqueue RPC failed")?;
|
||||||
|
let seq = resp.get().context("enqueue: bad response")?.get_seq();
|
||||||
|
Ok(seq)
|
||||||
|
}
|
||||||
|
},
|
||||||
|
max_retries_from_env(),
|
||||||
|
base_delay_ms_from_env(),
|
||||||
|
anyhow_is_retriable,
|
||||||
|
)
|
||||||
|
.await
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Fetch and drain all payloads for `recipient_key`.
|
||||||
|
/// Returns `(seq, payload)` pairs — sort by `seq` before MLS processing.
|
||||||
|
/// Retries on transient failures with exponential backoff.
|
||||||
|
pub async fn fetch_all(
|
||||||
|
client: &node_service::Client,
|
||||||
|
recipient_key: &[u8],
|
||||||
|
) -> anyhow::Result<Vec<(u64, Vec<u8>)>> {
|
||||||
|
let client = client.clone();
|
||||||
|
let recipient_key = recipient_key.to_vec();
|
||||||
|
retry_async(
|
||||||
|
|| {
|
||||||
|
let client = client.clone();
|
||||||
|
let recipient_key = recipient_key.clone();
|
||||||
|
async move {
|
||||||
|
let mut req = client.fetch_request();
|
||||||
|
{
|
||||||
|
let mut p = req.get();
|
||||||
|
p.set_recipient_key(&recipient_key);
|
||||||
|
p.set_channel_id(&[]);
|
||||||
|
p.set_version(1);
|
||||||
|
p.set_limit(0); // fetch all
|
||||||
|
let mut auth = p.reborrow().init_auth();
|
||||||
|
set_auth(&mut auth)?;
|
||||||
|
}
|
||||||
|
|
||||||
|
let resp = req.send().promise.await.context("fetch RPC failed")?;
|
||||||
|
|
||||||
|
let list = resp
|
||||||
|
.get()
|
||||||
|
.context("fetch: bad response")?
|
||||||
|
.get_payloads()
|
||||||
|
.context("fetch: missing payloads")?;
|
||||||
|
|
||||||
|
let mut payloads = Vec::with_capacity(list.len() as usize);
|
||||||
|
for i in 0..list.len() {
|
||||||
|
let entry = list.get(i);
|
||||||
|
let seq = entry.get_seq();
|
||||||
|
let data = entry
|
||||||
|
.get_data()
|
||||||
|
.context("fetch: envelope data read failed")?
|
||||||
|
.to_vec();
|
||||||
|
payloads.push((seq, data));
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(payloads)
|
||||||
|
}
|
||||||
|
},
|
||||||
|
max_retries_from_env(),
|
||||||
|
base_delay_ms_from_env(),
|
||||||
|
anyhow_is_retriable,
|
||||||
|
)
|
||||||
|
.await
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Long-poll for payloads with optional timeout (ms).
|
||||||
|
/// Returns `(seq, payload)` pairs — sort by `seq` before MLS processing.
|
||||||
|
/// Retries on transient failures with exponential backoff.
|
||||||
|
pub async fn fetch_wait(
|
||||||
|
client: &node_service::Client,
|
||||||
|
recipient_key: &[u8],
|
||||||
|
timeout_ms: u64,
|
||||||
|
) -> anyhow::Result<Vec<(u64, Vec<u8>)>> {
|
||||||
|
let client = client.clone();
|
||||||
|
let recipient_key = recipient_key.to_vec();
|
||||||
|
retry_async(
|
||||||
|
|| {
|
||||||
|
let client = client.clone();
|
||||||
|
let recipient_key = recipient_key.clone();
|
||||||
|
let timeout_ms = timeout_ms;
|
||||||
|
async move {
|
||||||
|
let mut req = client.fetch_wait_request();
|
||||||
|
{
|
||||||
|
let mut p = req.get();
|
||||||
|
p.set_recipient_key(&recipient_key);
|
||||||
|
p.set_timeout_ms(timeout_ms);
|
||||||
|
p.set_channel_id(&[]);
|
||||||
|
p.set_version(1);
|
||||||
|
p.set_limit(0); // fetch all
|
||||||
|
let mut auth = p.reborrow().init_auth();
|
||||||
|
set_auth(&mut auth)?;
|
||||||
|
}
|
||||||
|
|
||||||
|
let resp = req.send().promise.await.context("fetch_wait RPC failed")?;
|
||||||
|
|
||||||
|
let list = resp
|
||||||
|
.get()
|
||||||
|
.context("fetch_wait: bad response")?
|
||||||
|
.get_payloads()
|
||||||
|
.context("fetch_wait: missing payloads")?;
|
||||||
|
|
||||||
|
let mut payloads = Vec::with_capacity(list.len() as usize);
|
||||||
|
for i in 0..list.len() {
|
||||||
|
let entry = list.get(i);
|
||||||
|
let seq = entry.get_seq();
|
||||||
|
let data = entry
|
||||||
|
.get_data()
|
||||||
|
.context("fetch_wait: envelope data read failed")?
|
||||||
|
.to_vec();
|
||||||
|
payloads.push((seq, data));
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(payloads)
|
||||||
|
}
|
||||||
|
},
|
||||||
|
max_retries_from_env(),
|
||||||
|
base_delay_ms_from_env(),
|
||||||
|
anyhow_is_retriable,
|
||||||
|
)
|
||||||
|
.await
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Upload a hybrid (X25519 + ML-KEM-768) public key for an identity.
|
||||||
|
pub async fn upload_hybrid_key(
|
||||||
|
client: &node_service::Client,
|
||||||
|
identity_key: &[u8],
|
||||||
|
hybrid_pk: &HybridPublicKey,
|
||||||
|
) -> anyhow::Result<()> {
|
||||||
|
let mut req = client.upload_hybrid_key_request();
|
||||||
|
{
|
||||||
|
let mut p = req.get();
|
||||||
|
p.set_identity_key(identity_key);
|
||||||
|
p.set_hybrid_public_key(&hybrid_pk.to_bytes());
|
||||||
|
let mut auth = p.reborrow().init_auth();
|
||||||
|
set_auth(&mut auth)?;
|
||||||
|
}
|
||||||
|
req.send()
|
||||||
|
.promise
|
||||||
|
.await
|
||||||
|
.context("upload_hybrid_key RPC failed")?;
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Fetch a peer's hybrid public key from the server.
|
||||||
|
///
|
||||||
|
/// Returns `None` if the peer has not uploaded a hybrid key.
|
||||||
|
pub async fn fetch_hybrid_key(
|
||||||
|
client: &node_service::Client,
|
||||||
|
identity_key: &[u8],
|
||||||
|
) -> anyhow::Result<Option<HybridPublicKey>> {
|
||||||
|
let mut req = client.fetch_hybrid_key_request();
|
||||||
|
{
|
||||||
|
let mut p = req.get();
|
||||||
|
p.set_identity_key(identity_key);
|
||||||
|
let mut auth = p.reborrow().init_auth();
|
||||||
|
set_auth(&mut auth)?;
|
||||||
|
}
|
||||||
|
|
||||||
|
let resp = req
|
||||||
|
.send()
|
||||||
|
.promise
|
||||||
|
.await
|
||||||
|
.context("fetch_hybrid_key RPC failed")?;
|
||||||
|
|
||||||
|
let pk_bytes = resp
|
||||||
|
.get()
|
||||||
|
.context("fetch_hybrid_key: bad response")?
|
||||||
|
.get_hybrid_public_key()
|
||||||
|
.context("fetch_hybrid_key: missing field")?
|
||||||
|
.to_vec();
|
||||||
|
|
||||||
|
if pk_bytes.is_empty() {
|
||||||
|
return Ok(None);
|
||||||
|
}
|
||||||
|
|
||||||
|
let pk = HybridPublicKey::from_bytes(&pk_bytes).context("invalid hybrid public key")?;
|
||||||
|
Ok(Some(pk))
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Decrypt a hybrid envelope. Requires a hybrid key; no fallback to plaintext MLS.
|
||||||
|
pub fn try_hybrid_decrypt(
|
||||||
|
hybrid_kp: Option<&quicnprotochat_core::HybridKeypair>,
|
||||||
|
payload: &[u8],
|
||||||
|
) -> anyhow::Result<Vec<u8>> {
|
||||||
|
let kp = hybrid_kp.ok_or_else(|| anyhow::anyhow!("hybrid key required for decryption"))?;
|
||||||
|
quicnprotochat_core::hybrid_decrypt(kp, payload).map_err(|e| anyhow::anyhow!("{e}"))
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Return the current Unix timestamp in milliseconds.
|
||||||
|
pub fn current_timestamp_ms() -> u64 {
|
||||||
|
std::time::SystemTime::now()
|
||||||
|
.duration_since(std::time::UNIX_EPOCH)
|
||||||
|
.unwrap_or_default()
|
||||||
|
.as_millis() as u64
|
||||||
|
}
|
||||||
382
crates/quicnprotochat-client/src/client/state.rs
Normal file
382
crates/quicnprotochat-client/src/client/state.rs
Normal file
@@ -0,0 +1,382 @@
|
|||||||
|
use std::path::{Path, PathBuf};
|
||||||
|
use std::sync::Arc;
|
||||||
|
|
||||||
|
use anyhow::Context;
|
||||||
|
use argon2::Argon2;
|
||||||
|
use chacha20poly1305::{
|
||||||
|
aead::{Aead, KeyInit},
|
||||||
|
ChaCha20Poly1305, Key, Nonce,
|
||||||
|
};
|
||||||
|
use rand::RngCore;
|
||||||
|
use serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
|
use quicnprotochat_core::{
|
||||||
|
CoreError, DiskKeyStore, GroupMember, HybridCryptoProvider, HybridKeypair, HybridKeypairBytes,
|
||||||
|
IdentityKeypair, MlsGroup, StoreCrypto,
|
||||||
|
};
|
||||||
|
|
||||||
|
/// Magic bytes for encrypted client state files.
|
||||||
|
const STATE_MAGIC: &[u8; 4] = b"QPCE";
|
||||||
|
const STATE_SALT_LEN: usize = 16;
|
||||||
|
const STATE_NONCE_LEN: usize = 12;
|
||||||
|
|
||||||
|
/// Persisted client state (identity, MLS group, optional PQ key).
|
||||||
|
///
|
||||||
|
/// **Production note:** When loading state, use the same `use_pq_backend` value that was used when
|
||||||
|
/// the state was created. Loading PQ state with classical backend (or vice versa) will fail or
|
||||||
|
/// produce incorrect behavior.
|
||||||
|
#[derive(Serialize, Deserialize)]
|
||||||
|
pub struct StoredState {
|
||||||
|
pub identity_seed: [u8; 32],
|
||||||
|
pub group: Option<Vec<u8>>,
|
||||||
|
/// Post-quantum hybrid keypair (X25519 + ML-KEM-768). `None` for state created before hybrid was added.
|
||||||
|
#[serde(default)]
|
||||||
|
pub hybrid_key: Option<HybridKeypairBytes>,
|
||||||
|
/// Cached member public keys for group participants.
|
||||||
|
#[serde(default)]
|
||||||
|
pub member_keys: Vec<Vec<u8>>,
|
||||||
|
/// If true, MLS uses post-quantum hybrid KEM (HybridCryptoProvider) for HPKE. M7.
|
||||||
|
#[serde(default)]
|
||||||
|
pub use_pq_backend: bool,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// MLS member backend: classical (StoreCrypto) or post-quantum hybrid (HybridCryptoProvider).
|
||||||
|
pub enum MemberBackend {
|
||||||
|
Classical(GroupMember<StoreCrypto>),
|
||||||
|
Hybrid(GroupMember<HybridCryptoProvider>),
|
||||||
|
}
|
||||||
|
|
||||||
|
impl MemberBackend {
|
||||||
|
pub fn generate_key_package(&mut self) -> Result<Vec<u8>, CoreError> {
|
||||||
|
match self {
|
||||||
|
MemberBackend::Classical(m) => m.generate_key_package(),
|
||||||
|
MemberBackend::Hybrid(m) => m.generate_key_package(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
pub fn create_group(&mut self, group_id: &[u8]) -> Result<(), CoreError> {
|
||||||
|
match self {
|
||||||
|
MemberBackend::Classical(m) => m.create_group(group_id),
|
||||||
|
MemberBackend::Hybrid(m) => m.create_group(group_id),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
pub fn add_member(&mut self, key_package_bytes: &[u8]) -> Result<(Vec<u8>, Vec<u8>), CoreError> {
|
||||||
|
match self {
|
||||||
|
MemberBackend::Classical(m) => m.add_member(key_package_bytes),
|
||||||
|
MemberBackend::Hybrid(m) => m.add_member(key_package_bytes),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
pub fn join_group(&mut self, welcome: &[u8]) -> Result<(), CoreError> {
|
||||||
|
match self {
|
||||||
|
MemberBackend::Classical(m) => m.join_group(welcome),
|
||||||
|
MemberBackend::Hybrid(m) => m.join_group(welcome),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
pub fn send_message(&mut self, plaintext: &[u8]) -> Result<Vec<u8>, CoreError> {
|
||||||
|
match self {
|
||||||
|
MemberBackend::Classical(m) => m.send_message(plaintext),
|
||||||
|
MemberBackend::Hybrid(m) => m.send_message(plaintext),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
pub fn receive_message(&mut self, bytes: &[u8]) -> Result<Option<Vec<u8>>, CoreError> {
|
||||||
|
match self {
|
||||||
|
MemberBackend::Classical(m) => m.receive_message(bytes),
|
||||||
|
MemberBackend::Hybrid(m) => m.receive_message(bytes),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
pub fn receive_message_with_sender(
|
||||||
|
&mut self,
|
||||||
|
bytes: &[u8],
|
||||||
|
) -> Result<Option<(Vec<u8>, Vec<u8>)>, CoreError> {
|
||||||
|
match self {
|
||||||
|
MemberBackend::Classical(m) => m.receive_message_with_sender(bytes),
|
||||||
|
MemberBackend::Hybrid(m) => m.receive_message_with_sender(bytes),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
pub fn group_id(&self) -> Option<Vec<u8>> {
|
||||||
|
match self {
|
||||||
|
MemberBackend::Classical(m) => m.group_id(),
|
||||||
|
MemberBackend::Hybrid(m) => m.group_id(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
pub fn identity(&self) -> &IdentityKeypair {
|
||||||
|
match self {
|
||||||
|
MemberBackend::Classical(m) => m.identity(),
|
||||||
|
MemberBackend::Hybrid(m) => m.identity(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
pub fn identity_seed(&self) -> [u8; 32] {
|
||||||
|
match self {
|
||||||
|
MemberBackend::Classical(m) => m.identity_seed(),
|
||||||
|
MemberBackend::Hybrid(m) => m.identity_seed(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
pub fn group_ref(&self) -> Option<&MlsGroup> {
|
||||||
|
match self {
|
||||||
|
MemberBackend::Classical(m) => m.group_ref(),
|
||||||
|
MemberBackend::Hybrid(m) => m.group_ref(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
pub fn member_identities(&self) -> Vec<Vec<u8>> {
|
||||||
|
match self {
|
||||||
|
MemberBackend::Classical(m) => m.member_identities(),
|
||||||
|
MemberBackend::Hybrid(m) => m.member_identities(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
pub fn is_pq(&self) -> bool {
|
||||||
|
matches!(self, MemberBackend::Hybrid(_))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl StoredState {
|
||||||
|
/// Rebuild member and hybrid key from stored state. Uses PQ backend if `use_pq_backend` is true.
|
||||||
|
pub fn into_parts(self, state_path: &Path) -> anyhow::Result<(MemberBackend, Option<HybridKeypair>)> {
|
||||||
|
let identity = Arc::new(IdentityKeypair::from_seed(self.identity_seed));
|
||||||
|
let group = self
|
||||||
|
.group
|
||||||
|
.map(|bytes| bincode::deserialize(&bytes).context("decode group"))
|
||||||
|
.transpose()?;
|
||||||
|
let key_store = DiskKeyStore::persistent(keystore_path(state_path))?;
|
||||||
|
|
||||||
|
let member = if self.use_pq_backend {
|
||||||
|
MemberBackend::Hybrid(GroupMember::<HybridCryptoProvider>::new_with_state_hybrid(
|
||||||
|
identity, key_store, group,
|
||||||
|
))
|
||||||
|
} else {
|
||||||
|
MemberBackend::Classical(GroupMember::new_with_state(identity, key_store, group))
|
||||||
|
};
|
||||||
|
|
||||||
|
let hybrid_kp = self
|
||||||
|
.hybrid_key
|
||||||
|
.map(|bytes| HybridKeypair::from_bytes(&bytes).context("decode hybrid key"))
|
||||||
|
.transpose()?;
|
||||||
|
|
||||||
|
Ok((member, hybrid_kp))
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Build state from a classical GroupMember (backward compat / tests). Prefer [`from_member_backend`](Self::from_member_backend) in production.
|
||||||
|
pub fn from_parts(
|
||||||
|
member: &GroupMember<StoreCrypto>,
|
||||||
|
hybrid_kp: Option<&HybridKeypair>,
|
||||||
|
) -> anyhow::Result<Self> {
|
||||||
|
let group = member
|
||||||
|
.group_ref()
|
||||||
|
.map(|g| bincode::serialize(g).context("serialize group"))
|
||||||
|
.transpose()?;
|
||||||
|
|
||||||
|
Ok(Self {
|
||||||
|
identity_seed: member.identity_seed(),
|
||||||
|
group,
|
||||||
|
hybrid_key: hybrid_kp.map(|kp| kp.to_bytes()),
|
||||||
|
member_keys: Vec::new(),
|
||||||
|
use_pq_backend: false,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Build state from MemberBackend (classical or PQ).
|
||||||
|
pub fn from_member_backend(
|
||||||
|
member: &MemberBackend,
|
||||||
|
hybrid_kp: Option<&HybridKeypair>,
|
||||||
|
) -> anyhow::Result<Self> {
|
||||||
|
let group = member
|
||||||
|
.group_ref()
|
||||||
|
.map(|g| bincode::serialize(g).context("serialize group"))
|
||||||
|
.transpose()?;
|
||||||
|
|
||||||
|
Ok(Self {
|
||||||
|
identity_seed: member.identity_seed(),
|
||||||
|
group,
|
||||||
|
hybrid_key: hybrid_kp.map(|kp| kp.to_bytes()),
|
||||||
|
member_keys: Vec::new(),
|
||||||
|
use_pq_backend: member.is_pq(),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Derive a 32-byte key from a password and salt using Argon2id.
|
||||||
|
fn derive_state_key(password: &str, salt: &[u8]) -> anyhow::Result<[u8; 32]> {
|
||||||
|
let mut key = [0u8; 32];
|
||||||
|
Argon2::default()
|
||||||
|
.hash_password_into(password.as_bytes(), salt, &mut key)
|
||||||
|
.map_err(|e| anyhow::anyhow!("argon2 key derivation failed: {e}"))?;
|
||||||
|
Ok(key)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Encrypt `plaintext` with the QPCE format: magic(4) | salt(16) | nonce(12) | ciphertext.
|
||||||
|
pub fn encrypt_state(password: &str, plaintext: &[u8]) -> anyhow::Result<Vec<u8>> {
|
||||||
|
let mut salt = [0u8; STATE_SALT_LEN];
|
||||||
|
rand::rngs::OsRng.fill_bytes(&mut salt);
|
||||||
|
|
||||||
|
let mut nonce_bytes = [0u8; STATE_NONCE_LEN];
|
||||||
|
rand::rngs::OsRng.fill_bytes(&mut nonce_bytes);
|
||||||
|
|
||||||
|
let key = derive_state_key(password, &salt)?;
|
||||||
|
let cipher = ChaCha20Poly1305::new(Key::from_slice(&key));
|
||||||
|
let nonce = Nonce::from_slice(&nonce_bytes);
|
||||||
|
|
||||||
|
let ciphertext = cipher
|
||||||
|
.encrypt(nonce, plaintext)
|
||||||
|
.map_err(|e| anyhow::anyhow!("state encryption failed: {e}"))?;
|
||||||
|
|
||||||
|
let mut out = Vec::with_capacity(4 + STATE_SALT_LEN + STATE_NONCE_LEN + ciphertext.len());
|
||||||
|
out.extend_from_slice(STATE_MAGIC);
|
||||||
|
out.extend_from_slice(&salt);
|
||||||
|
out.extend_from_slice(&nonce_bytes);
|
||||||
|
out.extend_from_slice(&ciphertext);
|
||||||
|
Ok(out)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Decrypt a QPCE-formatted state file.
|
||||||
|
pub fn decrypt_state(password: &str, data: &[u8]) -> anyhow::Result<Vec<u8>> {
|
||||||
|
let header_len = 4 + STATE_SALT_LEN + STATE_NONCE_LEN;
|
||||||
|
anyhow::ensure!(
|
||||||
|
data.len() > header_len,
|
||||||
|
"encrypted state file too short ({} bytes)",
|
||||||
|
data.len()
|
||||||
|
);
|
||||||
|
|
||||||
|
let salt = &data[4..4 + STATE_SALT_LEN];
|
||||||
|
let nonce_bytes = &data[4 + STATE_SALT_LEN..header_len];
|
||||||
|
let ciphertext = &data[header_len..];
|
||||||
|
|
||||||
|
let key = derive_state_key(password, salt)?;
|
||||||
|
let cipher = ChaCha20Poly1305::new(Key::from_slice(&key));
|
||||||
|
let nonce = Nonce::from_slice(nonce_bytes);
|
||||||
|
|
||||||
|
let plaintext = cipher
|
||||||
|
.decrypt(nonce, ciphertext)
|
||||||
|
.map_err(|_| anyhow::anyhow!("state decryption failed (wrong password?)"))?;
|
||||||
|
|
||||||
|
Ok(plaintext)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns true if raw bytes begin with the QPCE magic header.
|
||||||
|
pub fn is_encrypted_state(bytes: &[u8]) -> bool {
|
||||||
|
bytes.len() >= 4 && &bytes[..4] == STATE_MAGIC
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Create new state with optional post-quantum MLS backend (M7). When `use_pq_backend` is true,
|
||||||
|
/// new state uses `HybridCryptoProvider` for MLS HPKE (X25519 + ML-KEM-768).
|
||||||
|
pub fn load_or_init_state(
|
||||||
|
path: &Path,
|
||||||
|
password: Option<&str>,
|
||||||
|
use_pq_backend: bool,
|
||||||
|
) -> anyhow::Result<StoredState> {
|
||||||
|
if path.exists() {
|
||||||
|
let mut state = load_existing_state(path, password)?;
|
||||||
|
// Generate hybrid keypair if missing (upgrade from older state).
|
||||||
|
if state.hybrid_key.is_none() {
|
||||||
|
let pb = indicatif::ProgressBar::new_spinner();
|
||||||
|
pb.set_message("Generating post-quantum keypair\u{2026}");
|
||||||
|
pb.enable_steady_tick(std::time::Duration::from_millis(80));
|
||||||
|
state.hybrid_key = Some(HybridKeypair::generate().to_bytes());
|
||||||
|
pb.finish_and_clear();
|
||||||
|
write_state(path, &state, password)?;
|
||||||
|
}
|
||||||
|
return Ok(state);
|
||||||
|
}
|
||||||
|
|
||||||
|
let pb = indicatif::ProgressBar::new_spinner();
|
||||||
|
pb.set_message("Generating post-quantum keypair\u{2026}");
|
||||||
|
pb.enable_steady_tick(std::time::Duration::from_millis(80));
|
||||||
|
let identity = IdentityKeypair::generate();
|
||||||
|
let hybrid_kp = HybridKeypair::generate();
|
||||||
|
pb.finish_and_clear();
|
||||||
|
|
||||||
|
let key_store = DiskKeyStore::persistent(keystore_path(path))?;
|
||||||
|
let member = if use_pq_backend {
|
||||||
|
MemberBackend::Hybrid(GroupMember::<HybridCryptoProvider>::new_with_state_hybrid(
|
||||||
|
Arc::new(identity),
|
||||||
|
key_store,
|
||||||
|
None,
|
||||||
|
))
|
||||||
|
} else {
|
||||||
|
MemberBackend::Classical(GroupMember::new_with_state(
|
||||||
|
Arc::new(identity),
|
||||||
|
key_store,
|
||||||
|
None,
|
||||||
|
))
|
||||||
|
};
|
||||||
|
let state = StoredState::from_member_backend(&member, Some(&hybrid_kp))?;
|
||||||
|
write_state(path, &state, password)?;
|
||||||
|
Ok(state)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn load_existing_state(path: &Path, password: Option<&str>) -> anyhow::Result<StoredState> {
|
||||||
|
let bytes = std::fs::read(path).with_context(|| format!("read state file {path:?}"))?;
|
||||||
|
|
||||||
|
if is_encrypted_state(&bytes) {
|
||||||
|
let pw = password
|
||||||
|
.context("state file is encrypted (QPCE); a password is required to decrypt it")?;
|
||||||
|
let plaintext = decrypt_state(pw, &bytes)?;
|
||||||
|
bincode::deserialize(&plaintext).context("decode encrypted state")
|
||||||
|
} else {
|
||||||
|
bincode::deserialize(&bytes).context("decode state")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn save_state(
|
||||||
|
path: &Path,
|
||||||
|
member: &MemberBackend,
|
||||||
|
hybrid_kp: Option<&HybridKeypair>,
|
||||||
|
password: Option<&str>,
|
||||||
|
) -> anyhow::Result<()> {
|
||||||
|
let state = StoredState::from_member_backend(member, hybrid_kp)?;
|
||||||
|
write_state(path, &state, password)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn write_state(path: &Path, state: &StoredState, password: Option<&str>) -> anyhow::Result<()> {
|
||||||
|
if let Some(parent) = path.parent() {
|
||||||
|
std::fs::create_dir_all(parent).with_context(|| format!("create dir {parent:?}"))?;
|
||||||
|
}
|
||||||
|
let plaintext = bincode::serialize(state).context("encode state")?;
|
||||||
|
|
||||||
|
let bytes = if let Some(pw) = password {
|
||||||
|
encrypt_state(pw, &plaintext)?
|
||||||
|
} else {
|
||||||
|
plaintext
|
||||||
|
};
|
||||||
|
|
||||||
|
std::fs::write(path, bytes).with_context(|| format!("write state {path:?}"))?;
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn decode_identity_key(hex_str: &str) -> anyhow::Result<Vec<u8>> {
|
||||||
|
let bytes = super::hex::decode(hex_str)
|
||||||
|
.map_err(|e| anyhow::anyhow!(e))
|
||||||
|
.context("identity key must be hex")?;
|
||||||
|
anyhow::ensure!(bytes.len() == 32, "identity key must be 32 bytes");
|
||||||
|
Ok(bytes)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn keystore_path(state_path: &Path) -> PathBuf {
|
||||||
|
let mut path = state_path.to_path_buf();
|
||||||
|
path.set_extension("ks");
|
||||||
|
path
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn sha256(bytes: &[u8]) -> Vec<u8> {
|
||||||
|
use sha2::{Digest, Sha256};
|
||||||
|
Sha256::digest(bytes).to_vec()
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
mod tests {
|
||||||
|
use super::*;
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn encrypt_decrypt_roundtrip() {
|
||||||
|
let plaintext = b"test state data";
|
||||||
|
let password = "test-password";
|
||||||
|
let encrypted = encrypt_state(password, plaintext).unwrap();
|
||||||
|
assert!(is_encrypted_state(&encrypted));
|
||||||
|
let decrypted = decrypt_state(password, &encrypted).unwrap();
|
||||||
|
assert_eq!(decrypted, plaintext);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn wrong_password_fails() {
|
||||||
|
let plaintext = b"test state data";
|
||||||
|
let encrypted = encrypt_state("correct", plaintext).unwrap();
|
||||||
|
assert!(decrypt_state("wrong", &encrypted).is_err());
|
||||||
|
}
|
||||||
|
}
|
||||||
File diff suppressed because it is too large
Load Diff
@@ -5,9 +5,9 @@ use std::path::PathBuf;
|
|||||||
use clap::{Parser, Subcommand};
|
use clap::{Parser, Subcommand};
|
||||||
|
|
||||||
use quicnprotochat_client::{
|
use quicnprotochat_client::{
|
||||||
cmd_create_group, cmd_demo_group, cmd_fetch_key, cmd_invite, cmd_join, cmd_login, cmd_ping,
|
cmd_chat, cmd_check_key, cmd_create_group, cmd_demo_group, cmd_fetch_key, cmd_health,
|
||||||
cmd_recv, cmd_register, cmd_register_state, cmd_register_user, cmd_send, ClientAuth,
|
cmd_invite, cmd_join, cmd_login, cmd_ping, cmd_recv, cmd_register, cmd_register_state,
|
||||||
init_auth,
|
cmd_refresh_keypackage, cmd_register_user, cmd_send, cmd_whoami, init_auth, ClientAuth,
|
||||||
};
|
};
|
||||||
|
|
||||||
// ── CLI ───────────────────────────────────────────────────────────────────────
|
// ── CLI ───────────────────────────────────────────────────────────────────────
|
||||||
@@ -35,7 +35,12 @@ struct Args {
|
|||||||
|
|
||||||
/// Bearer token or OPAQUE session token for authenticated requests.
|
/// Bearer token or OPAQUE session token for authenticated requests.
|
||||||
/// Not required for register-user and login commands.
|
/// Not required for register-user and login commands.
|
||||||
#[arg(long, global = true, env = "QUICNPROTOCHAT_ACCESS_TOKEN", default_value = "")]
|
#[arg(
|
||||||
|
long,
|
||||||
|
global = true,
|
||||||
|
env = "QUICNPROTOCHAT_ACCESS_TOKEN",
|
||||||
|
default_value = ""
|
||||||
|
)]
|
||||||
access_token: String,
|
access_token: String,
|
||||||
|
|
||||||
/// Optional device identifier (UUID bytes encoded as hex or raw string).
|
/// Optional device identifier (UUID bytes encoded as hex or raw string).
|
||||||
@@ -47,6 +52,10 @@ struct Args {
|
|||||||
#[arg(long, global = true, env = "QUICNPROTOCHAT_STATE_PASSWORD")]
|
#[arg(long, global = true, env = "QUICNPROTOCHAT_STATE_PASSWORD")]
|
||||||
state_password: Option<String>,
|
state_password: Option<String>,
|
||||||
|
|
||||||
|
/// Use post-quantum MLS backend (X25519 + ML-KEM-768) for new state. M7.
|
||||||
|
#[arg(long, global = true, env = "QUICNPROTOCHAT_PQ")]
|
||||||
|
pq: bool,
|
||||||
|
|
||||||
#[command(subcommand)]
|
#[command(subcommand)]
|
||||||
command: Command,
|
command: Command,
|
||||||
}
|
}
|
||||||
@@ -73,6 +82,43 @@ enum Command {
|
|||||||
username: String,
|
username: String,
|
||||||
#[arg(long)]
|
#[arg(long)]
|
||||||
password: String,
|
password: String,
|
||||||
|
/// Hex-encoded Ed25519 identity key (64 hex chars). Optional if --state is provided.
|
||||||
|
#[arg(long)]
|
||||||
|
identity_key: Option<String>,
|
||||||
|
/// State file to derive the identity key (requires same password if encrypted).
|
||||||
|
#[arg(long)]
|
||||||
|
state: Option<PathBuf>,
|
||||||
|
/// Password for the encrypted state file (if any).
|
||||||
|
#[arg(long)]
|
||||||
|
state_password: Option<String>,
|
||||||
|
},
|
||||||
|
|
||||||
|
/// Show local identity key, fingerprint, group status, and hybrid key status.
|
||||||
|
Whoami {
|
||||||
|
/// State file path (identity + MLS state).
|
||||||
|
#[arg(
|
||||||
|
long,
|
||||||
|
default_value = "quicnprotochat-state.bin",
|
||||||
|
env = "QUICNPROTOCHAT_STATE"
|
||||||
|
)]
|
||||||
|
state: PathBuf,
|
||||||
|
},
|
||||||
|
|
||||||
|
/// Check server connectivity and print status.
|
||||||
|
Health {
|
||||||
|
/// Server address (host:port).
|
||||||
|
#[arg(long, default_value = "127.0.0.1:7000", env = "QUICNPROTOCHAT_SERVER")]
|
||||||
|
server: String,
|
||||||
|
},
|
||||||
|
|
||||||
|
/// Check if a peer has registered a hybrid key (non-consuming lookup).
|
||||||
|
CheckKey {
|
||||||
|
/// Server address (host:port).
|
||||||
|
#[arg(long, default_value = "127.0.0.1:7000", env = "QUICNPROTOCHAT_SERVER")]
|
||||||
|
server: String,
|
||||||
|
|
||||||
|
/// Peer's Ed25519 identity public key (64 hex chars = 32 bytes).
|
||||||
|
identity_key: String,
|
||||||
},
|
},
|
||||||
|
|
||||||
/// Send a Ping to the server and print the round-trip time.
|
/// Send a Ping to the server and print the round-trip time.
|
||||||
@@ -99,7 +145,7 @@ enum Command {
|
|||||||
identity_key: String,
|
identity_key: String,
|
||||||
},
|
},
|
||||||
|
|
||||||
/// Run a full Alice/Bob MLS round-trip against live AS and DS endpoints.
|
/// Run a two-party MLS demo (creator + joiner) against live AS and DS.
|
||||||
DemoGroup {
|
DemoGroup {
|
||||||
/// Server address (host:port).
|
/// Server address (host:port).
|
||||||
#[arg(long, default_value = "127.0.0.1:7000", env = "QUICNPROTOCHAT_SERVER")]
|
#[arg(long, default_value = "127.0.0.1:7000", env = "QUICNPROTOCHAT_SERVER")]
|
||||||
@@ -121,6 +167,22 @@ enum Command {
|
|||||||
server: String,
|
server: String,
|
||||||
},
|
},
|
||||||
|
|
||||||
|
/// Refresh the KeyPackage on the server (existing state only).
|
||||||
|
/// Run periodically (e.g. before server TTL ~24h) or after your KeyPackage was consumed so others can invite you.
|
||||||
|
RefreshKeypackage {
|
||||||
|
/// State file path (identity + MLS state).
|
||||||
|
#[arg(
|
||||||
|
long,
|
||||||
|
default_value = "quicnprotochat-state.bin",
|
||||||
|
env = "QUICNPROTOCHAT_STATE"
|
||||||
|
)]
|
||||||
|
state: PathBuf,
|
||||||
|
|
||||||
|
/// Server address (host:port).
|
||||||
|
#[arg(long, default_value = "127.0.0.1:7000", env = "QUICNPROTOCHAT_SERVER")]
|
||||||
|
server: String,
|
||||||
|
},
|
||||||
|
|
||||||
/// Create a persistent group and save state to disk.
|
/// Create a persistent group and save state to disk.
|
||||||
CreateGroup {
|
CreateGroup {
|
||||||
/// State file path (identity + MLS state).
|
/// State file path (identity + MLS state).
|
||||||
@@ -177,9 +239,12 @@ enum Command {
|
|||||||
state: PathBuf,
|
state: PathBuf,
|
||||||
#[arg(long, default_value = "127.0.0.1:7000", env = "QUICNPROTOCHAT_SERVER")]
|
#[arg(long, default_value = "127.0.0.1:7000", env = "QUICNPROTOCHAT_SERVER")]
|
||||||
server: String,
|
server: String,
|
||||||
/// Recipient identity key (hex, 32 bytes -> 64 chars).
|
/// Recipient identity key (hex, 32 bytes -> 64 chars). Omit when using --all.
|
||||||
#[arg(long)]
|
#[arg(long)]
|
||||||
peer_key: String,
|
peer_key: Option<String>,
|
||||||
|
/// Send to all other group members (N-way groups).
|
||||||
|
#[arg(long)]
|
||||||
|
all: bool,
|
||||||
/// Plaintext message to send.
|
/// Plaintext message to send.
|
||||||
#[arg(long)]
|
#[arg(long)]
|
||||||
msg: String,
|
msg: String,
|
||||||
@@ -204,6 +269,32 @@ enum Command {
|
|||||||
#[arg(long)]
|
#[arg(long)]
|
||||||
stream: bool,
|
stream: bool,
|
||||||
},
|
},
|
||||||
|
|
||||||
|
/// Interactive 1:1 chat: type to send, incoming messages printed as [peer] <msg>. Ctrl+D to exit.
|
||||||
|
/// In a two-person group, peer is chosen automatically; use --peer-key only with 3+ members.
|
||||||
|
Chat {
|
||||||
|
#[arg(
|
||||||
|
long,
|
||||||
|
default_value = "quicnprotochat-state.bin",
|
||||||
|
env = "QUICNPROTOCHAT_STATE"
|
||||||
|
)]
|
||||||
|
state: PathBuf,
|
||||||
|
#[arg(long, default_value = "127.0.0.1:7000", env = "QUICNPROTOCHAT_SERVER")]
|
||||||
|
server: String,
|
||||||
|
/// Peer identity key (hex, 64 chars). Omit in a two-person group to use the only other member.
|
||||||
|
#[arg(long)]
|
||||||
|
peer_key: Option<String>,
|
||||||
|
/// How often to poll for incoming messages (milliseconds).
|
||||||
|
#[arg(long, default_value_t = 500)]
|
||||||
|
poll_interval_ms: u64,
|
||||||
|
},
|
||||||
|
|
||||||
|
/// Generate shell completions for the given shell and print to stdout.
|
||||||
|
#[command(hide = true)]
|
||||||
|
Completions {
|
||||||
|
shell: clap_complete::Shell,
|
||||||
|
},
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// ── Entry point ───────────────────────────────────────────────────────────────
|
// ── Entry point ───────────────────────────────────────────────────────────────
|
||||||
@@ -239,6 +330,7 @@ async fn main() -> anyhow::Result<()> {
|
|||||||
&args.server_name,
|
&args.server_name,
|
||||||
&username,
|
&username,
|
||||||
&password,
|
&password,
|
||||||
|
None,
|
||||||
))
|
))
|
||||||
.await
|
.await
|
||||||
}
|
}
|
||||||
@@ -246,6 +338,9 @@ async fn main() -> anyhow::Result<()> {
|
|||||||
server,
|
server,
|
||||||
username,
|
username,
|
||||||
password,
|
password,
|
||||||
|
identity_key,
|
||||||
|
state,
|
||||||
|
state_password,
|
||||||
} => {
|
} => {
|
||||||
let local = tokio::task::LocalSet::new();
|
let local = tokio::task::LocalSet::new();
|
||||||
local
|
local
|
||||||
@@ -255,6 +350,30 @@ async fn main() -> anyhow::Result<()> {
|
|||||||
&args.server_name,
|
&args.server_name,
|
||||||
&username,
|
&username,
|
||||||
&password,
|
&password,
|
||||||
|
identity_key.as_deref(),
|
||||||
|
state.as_deref(),
|
||||||
|
state_password.as_deref(),
|
||||||
|
))
|
||||||
|
.await
|
||||||
|
}
|
||||||
|
Command::Whoami { state } => cmd_whoami(&state, state_pw),
|
||||||
|
Command::Health { server } => {
|
||||||
|
let local = tokio::task::LocalSet::new();
|
||||||
|
local
|
||||||
|
.run_until(cmd_health(&server, &args.ca_cert, &args.server_name))
|
||||||
|
.await
|
||||||
|
}
|
||||||
|
Command::CheckKey {
|
||||||
|
server,
|
||||||
|
identity_key,
|
||||||
|
} => {
|
||||||
|
let local = tokio::task::LocalSet::new();
|
||||||
|
local
|
||||||
|
.run_until(cmd_check_key(
|
||||||
|
&server,
|
||||||
|
&args.ca_cert,
|
||||||
|
&args.server_name,
|
||||||
|
&identity_key,
|
||||||
))
|
))
|
||||||
.await
|
.await
|
||||||
}
|
}
|
||||||
@@ -282,7 +401,7 @@ async fn main() -> anyhow::Result<()> {
|
|||||||
Command::DemoGroup { server } => {
|
Command::DemoGroup { server } => {
|
||||||
let local = tokio::task::LocalSet::new();
|
let local = tokio::task::LocalSet::new();
|
||||||
local
|
local
|
||||||
.run_until(cmd_demo_group(&server, &args.ca_cert, &args.server_name))
|
.run_until(cmd_demo_group(&server, &args.ca_cert, &args.server_name, args.pq))
|
||||||
.await
|
.await
|
||||||
}
|
}
|
||||||
Command::RegisterState { state, server } => {
|
Command::RegisterState { state, server } => {
|
||||||
@@ -294,6 +413,19 @@ async fn main() -> anyhow::Result<()> {
|
|||||||
&args.ca_cert,
|
&args.ca_cert,
|
||||||
&args.server_name,
|
&args.server_name,
|
||||||
state_pw,
|
state_pw,
|
||||||
|
args.pq,
|
||||||
|
))
|
||||||
|
.await
|
||||||
|
}
|
||||||
|
Command::RefreshKeypackage { state, server } => {
|
||||||
|
let local = tokio::task::LocalSet::new();
|
||||||
|
local
|
||||||
|
.run_until(cmd_refresh_keypackage(
|
||||||
|
&state,
|
||||||
|
&server,
|
||||||
|
&args.ca_cert,
|
||||||
|
&args.server_name,
|
||||||
|
state_pw,
|
||||||
))
|
))
|
||||||
.await
|
.await
|
||||||
}
|
}
|
||||||
@@ -304,7 +436,7 @@ async fn main() -> anyhow::Result<()> {
|
|||||||
} => {
|
} => {
|
||||||
let local = tokio::task::LocalSet::new();
|
let local = tokio::task::LocalSet::new();
|
||||||
local
|
local
|
||||||
.run_until(cmd_create_group(&state, &server, &group_id, state_pw))
|
.run_until(cmd_create_group(&state, &server, &group_id, state_pw, args.pq))
|
||||||
.await
|
.await
|
||||||
}
|
}
|
||||||
Command::Invite {
|
Command::Invite {
|
||||||
@@ -327,13 +459,20 @@ async fn main() -> anyhow::Result<()> {
|
|||||||
Command::Join { state, server } => {
|
Command::Join { state, server } => {
|
||||||
let local = tokio::task::LocalSet::new();
|
let local = tokio::task::LocalSet::new();
|
||||||
local
|
local
|
||||||
.run_until(cmd_join(&state, &server, &args.ca_cert, &args.server_name, state_pw))
|
.run_until(cmd_join(
|
||||||
|
&state,
|
||||||
|
&server,
|
||||||
|
&args.ca_cert,
|
||||||
|
&args.server_name,
|
||||||
|
state_pw,
|
||||||
|
))
|
||||||
.await
|
.await
|
||||||
}
|
}
|
||||||
Command::Send {
|
Command::Send {
|
||||||
state,
|
state,
|
||||||
server,
|
server,
|
||||||
peer_key,
|
peer_key,
|
||||||
|
all,
|
||||||
msg,
|
msg,
|
||||||
} => {
|
} => {
|
||||||
let local = tokio::task::LocalSet::new();
|
let local = tokio::task::LocalSet::new();
|
||||||
@@ -343,7 +482,8 @@ async fn main() -> anyhow::Result<()> {
|
|||||||
&server,
|
&server,
|
||||||
&args.ca_cert,
|
&args.ca_cert,
|
||||||
&args.server_name,
|
&args.server_name,
|
||||||
&peer_key,
|
peer_key.as_deref(),
|
||||||
|
all,
|
||||||
&msg,
|
&msg,
|
||||||
state_pw,
|
state_pw,
|
||||||
))
|
))
|
||||||
@@ -368,5 +508,34 @@ async fn main() -> anyhow::Result<()> {
|
|||||||
))
|
))
|
||||||
.await
|
.await
|
||||||
}
|
}
|
||||||
|
Command::Chat {
|
||||||
|
state,
|
||||||
|
server,
|
||||||
|
peer_key,
|
||||||
|
poll_interval_ms,
|
||||||
|
} => {
|
||||||
|
let local = tokio::task::LocalSet::new();
|
||||||
|
local
|
||||||
|
.run_until(cmd_chat(
|
||||||
|
&state,
|
||||||
|
&server,
|
||||||
|
&args.ca_cert,
|
||||||
|
&args.server_name,
|
||||||
|
peer_key.as_deref(),
|
||||||
|
state_pw,
|
||||||
|
poll_interval_ms,
|
||||||
|
))
|
||||||
|
.await
|
||||||
|
}
|
||||||
|
Command::Completions { shell } => {
|
||||||
|
use clap::CommandFactory;
|
||||||
|
clap_complete::generate(
|
||||||
|
shell,
|
||||||
|
&mut Args::command(),
|
||||||
|
"quicnprotochat",
|
||||||
|
&mut std::io::stdout(),
|
||||||
|
);
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,13 +1,24 @@
|
|||||||
|
// cargo_bin! only works for current package's binary; we spawn quicnprotochat-server from another package.
|
||||||
|
#![allow(deprecated)]
|
||||||
|
|
||||||
use std::{path::PathBuf, process::Command, time::Duration};
|
use std::{path::PathBuf, process::Command, time::Duration};
|
||||||
|
|
||||||
use assert_cmd::cargo::cargo_bin;
|
use assert_cmd::cargo::cargo_bin;
|
||||||
use portpicker::pick_unused_port;
|
use portpicker::pick_unused_port;
|
||||||
|
use rand::RngCore;
|
||||||
use tempfile::TempDir;
|
use tempfile::TempDir;
|
||||||
use tokio::time::sleep;
|
use tokio::time::sleep;
|
||||||
|
use hex;
|
||||||
|
|
||||||
|
// Required by rustls 0.23 when QUIC/TLS is used from this process (e.g. client in test).
|
||||||
|
fn ensure_rustls_provider() {
|
||||||
|
let _ = rustls::crypto::ring::default_provider().install_default();
|
||||||
|
}
|
||||||
|
|
||||||
use quicnprotochat_client::{
|
use quicnprotochat_client::{
|
||||||
cmd_create_group, cmd_invite, cmd_join, cmd_ping, cmd_register_state, cmd_send, ClientAuth,
|
cmd_create_group, cmd_invite, cmd_join, cmd_login, cmd_ping, cmd_register_state,
|
||||||
connect_node, fetch_wait, init_auth,
|
cmd_register_user, cmd_send, connect_node, enqueue, fetch_wait, init_auth,
|
||||||
|
load_existing_state, receive_pending_plaintexts, ClientAuth,
|
||||||
};
|
};
|
||||||
use quicnprotochat_core::IdentityKeypair;
|
use quicnprotochat_core::IdentityKeypair;
|
||||||
|
|
||||||
@@ -15,12 +26,6 @@ fn hex_encode(bytes: &[u8]) -> String {
|
|||||||
bytes.iter().map(|b| format!("{b:02x}")).collect()
|
bytes.iter().map(|b| format!("{b:02x}")).collect()
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(serde::Deserialize)]
|
|
||||||
struct StoredStateCompat {
|
|
||||||
identity_seed: [u8; 32],
|
|
||||||
#[allow(dead_code)]
|
|
||||||
group: Option<Vec<u8>>,
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn wait_for_health(server: &str, ca_cert: &PathBuf, server_name: &str) -> anyhow::Result<()> {
|
async fn wait_for_health(server: &str, ca_cert: &PathBuf, server_name: &str) -> anyhow::Result<()> {
|
||||||
let local = tokio::task::LocalSet::new();
|
let local = tokio::task::LocalSet::new();
|
||||||
@@ -37,8 +42,12 @@ async fn wait_for_health(server: &str, ca_cert: &PathBuf, server_name: &str) ->
|
|||||||
anyhow::bail!("server health never became ready")
|
anyhow::bail!("server health never became ready")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Creator and joiner register; creator creates group and invites joiner; joiner joins;
|
||||||
|
/// creator sends a message; assert joiner's mailbox receives it.
|
||||||
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
|
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
|
||||||
async fn e2e_happy_path_register_invite_join_send_recv() -> anyhow::Result<()> {
|
async fn e2e_happy_path_register_invite_join_send_recv() -> anyhow::Result<()> {
|
||||||
|
ensure_rustls_provider();
|
||||||
|
|
||||||
let temp = TempDir::new()?;
|
let temp = TempDir::new()?;
|
||||||
let base = temp.path();
|
let base = temp.path();
|
||||||
let port = pick_unused_port().expect("free port");
|
let port = pick_unused_port().expect("free port");
|
||||||
@@ -51,7 +60,7 @@ async fn e2e_happy_path_register_invite_join_send_recv() -> anyhow::Result<()> {
|
|||||||
|
|
||||||
// Spawn server binary.
|
// Spawn server binary.
|
||||||
let server_bin = cargo_bin("quicnprotochat-server");
|
let server_bin = cargo_bin("quicnprotochat-server");
|
||||||
let mut child = Command::new(server_bin)
|
let child = Command::new(server_bin)
|
||||||
.arg("--listen")
|
.arg("--listen")
|
||||||
.arg(&listen)
|
.arg(&listen)
|
||||||
.arg("--data-dir")
|
.arg("--data-dir")
|
||||||
@@ -62,6 +71,7 @@ async fn e2e_happy_path_register_invite_join_send_recv() -> anyhow::Result<()> {
|
|||||||
.arg(&tls_key)
|
.arg(&tls_key)
|
||||||
.arg("--auth-token")
|
.arg("--auth-token")
|
||||||
.arg(auth_token)
|
.arg(auth_token)
|
||||||
|
.arg("--allow-insecure-auth")
|
||||||
.spawn()
|
.spawn()
|
||||||
.expect("spawn server");
|
.expect("spawn server");
|
||||||
|
|
||||||
@@ -81,90 +91,503 @@ async fn e2e_happy_path_register_invite_join_send_recv() -> anyhow::Result<()> {
|
|||||||
// Set client auth context.
|
// Set client auth context.
|
||||||
init_auth(ClientAuth::from_parts(auth_token.to_string(), None));
|
init_auth(ClientAuth::from_parts(auth_token.to_string(), None));
|
||||||
|
|
||||||
// LocalSet for capnp !Send operations.
|
|
||||||
let local = tokio::task::LocalSet::new();
|
let local = tokio::task::LocalSet::new();
|
||||||
|
|
||||||
let alice_state = base.join("alice.bin");
|
let creator_state = base.join("creator.bin");
|
||||||
let bob_state = base.join("bob.bin");
|
let joiner_state = base.join("joiner.bin");
|
||||||
|
|
||||||
local
|
local
|
||||||
.run_until(cmd_register_state(
|
.run_until(cmd_register_state(
|
||||||
&alice_state,
|
&creator_state,
|
||||||
&server,
|
&server,
|
||||||
&ca_cert,
|
&ca_cert,
|
||||||
"localhost",
|
"localhost",
|
||||||
None,
|
None,
|
||||||
|
false,
|
||||||
))
|
))
|
||||||
.await?;
|
.await?;
|
||||||
|
|
||||||
local
|
local
|
||||||
.run_until(cmd_register_state(
|
.run_until(cmd_register_state(
|
||||||
&bob_state,
|
&joiner_state,
|
||||||
&server,
|
&server,
|
||||||
&ca_cert,
|
&ca_cert,
|
||||||
"localhost",
|
"localhost",
|
||||||
None,
|
None,
|
||||||
|
false,
|
||||||
))
|
))
|
||||||
.await?;
|
.await?;
|
||||||
|
|
||||||
local
|
local
|
||||||
.run_until(cmd_create_group(
|
.run_until(cmd_create_group(&creator_state, &server, "test-group", None, false))
|
||||||
&alice_state,
|
|
||||||
&server,
|
|
||||||
"test-group",
|
|
||||||
None,
|
|
||||||
))
|
|
||||||
.await?;
|
.await?;
|
||||||
|
|
||||||
// Load Bob identity key from persisted state to use as peer key.
|
let joiner_state_loaded = load_existing_state(&joiner_state, None)?;
|
||||||
let bob_bytes = std::fs::read(&bob_state)?;
|
let joiner_identity = IdentityKeypair::from_seed(joiner_state_loaded.identity_seed);
|
||||||
let bob_state_compat: StoredStateCompat = bincode::deserialize(&bob_bytes)?;
|
let joiner_pk_hex = hex_encode(&joiner_identity.public_key_bytes());
|
||||||
let bob_identity = IdentityKeypair::from_seed(bob_state_compat.identity_seed);
|
|
||||||
let bob_pk_hex = hex_encode(&bob_identity.public_key_bytes());
|
|
||||||
|
|
||||||
local
|
local
|
||||||
.run_until(cmd_invite(
|
.run_until(cmd_invite(
|
||||||
&alice_state,
|
&creator_state,
|
||||||
&server,
|
&server,
|
||||||
&ca_cert,
|
&ca_cert,
|
||||||
"localhost",
|
"localhost",
|
||||||
&bob_pk_hex,
|
&joiner_pk_hex,
|
||||||
None,
|
None,
|
||||||
))
|
))
|
||||||
.await?;
|
.await?;
|
||||||
|
|
||||||
local
|
local
|
||||||
.run_until(cmd_join(
|
.run_until(cmd_join(&joiner_state, &server, &ca_cert, "localhost", None))
|
||||||
&bob_state,
|
|
||||||
&server,
|
|
||||||
&ca_cert,
|
|
||||||
"localhost",
|
|
||||||
None,
|
|
||||||
))
|
|
||||||
.await?;
|
.await?;
|
||||||
|
|
||||||
// Send Alice -> Bob.
|
|
||||||
local
|
local
|
||||||
.run_until(cmd_send(
|
.run_until(cmd_send(
|
||||||
&alice_state,
|
&creator_state,
|
||||||
&server,
|
&server,
|
||||||
&ca_cert,
|
&ca_cert,
|
||||||
"localhost",
|
"localhost",
|
||||||
&bob_pk_hex,
|
Some(&joiner_pk_hex),
|
||||||
"hello bob",
|
false,
|
||||||
|
"hello",
|
||||||
None,
|
None,
|
||||||
))
|
))
|
||||||
.await?;
|
.await?;
|
||||||
|
|
||||||
// Confirm Bob can fetch at least one payload.
|
|
||||||
local
|
local
|
||||||
.run_until(async {
|
.run_until(async {
|
||||||
let client = connect_node(&server, &ca_cert, "localhost").await?;
|
let client = connect_node(&server, &ca_cert, "localhost").await?;
|
||||||
let payloads = fetch_wait(&client, &bob_identity.public_key_bytes(), 1000).await?;
|
let payloads = fetch_wait(&client, &joiner_identity.public_key_bytes(), 1000).await?;
|
||||||
anyhow::ensure!(!payloads.is_empty(), "no payloads delivered to Bob");
|
anyhow::ensure!(!payloads.is_empty(), "no payloads delivered to joiner");
|
||||||
Ok::<(), anyhow::Error>(())
|
Ok::<(), anyhow::Error>(())
|
||||||
})
|
})
|
||||||
.await?;
|
.await?;
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Three-party group: A creates group, invites B then C; B and C join; A sends, B and C receive;
|
||||||
|
/// B sends, A and C receive.
|
||||||
|
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
|
||||||
|
async fn e2e_three_party_group_invite_join_send_recv() -> anyhow::Result<()> {
|
||||||
|
ensure_rustls_provider();
|
||||||
|
|
||||||
|
let temp = TempDir::new()?;
|
||||||
|
let base = temp.path();
|
||||||
|
let port = pick_unused_port().expect("free port");
|
||||||
|
let listen = format!("127.0.0.1:{port}");
|
||||||
|
let server = listen.clone();
|
||||||
|
let ca_cert = base.join("server-cert.der");
|
||||||
|
let tls_key = base.join("server-key.der");
|
||||||
|
let data_dir = base.join("data");
|
||||||
|
let auth_token = "devtoken";
|
||||||
|
|
||||||
|
let server_bin = cargo_bin("quicnprotochat-server");
|
||||||
|
let child = Command::new(server_bin)
|
||||||
|
.arg("--listen")
|
||||||
|
.arg(&listen)
|
||||||
|
.arg("--data-dir")
|
||||||
|
.arg(&data_dir)
|
||||||
|
.arg("--tls-cert")
|
||||||
|
.arg(&ca_cert)
|
||||||
|
.arg("--tls-key")
|
||||||
|
.arg(&tls_key)
|
||||||
|
.arg("--auth-token")
|
||||||
|
.arg(auth_token)
|
||||||
|
.arg("--allow-insecure-auth")
|
||||||
|
.spawn()
|
||||||
|
.expect("spawn server");
|
||||||
|
|
||||||
|
struct ChildGuard(std::process::Child);
|
||||||
|
impl Drop for ChildGuard {
|
||||||
|
fn drop(&mut self) {
|
||||||
|
let _ = self.0.kill();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
let _child_guard = ChildGuard(child);
|
||||||
|
|
||||||
|
wait_for_health(&server, &ca_cert, "localhost").await?;
|
||||||
|
init_auth(ClientAuth::from_parts(auth_token.to_string(), None));
|
||||||
|
|
||||||
|
let local = tokio::task::LocalSet::new();
|
||||||
|
|
||||||
|
let creator_state = base.join("creator.bin");
|
||||||
|
let b_state = base.join("b.bin");
|
||||||
|
let c_state = base.join("c.bin");
|
||||||
|
|
||||||
|
local
|
||||||
|
.run_until(cmd_register_state(
|
||||||
|
&creator_state,
|
||||||
|
&server,
|
||||||
|
&ca_cert,
|
||||||
|
"localhost",
|
||||||
|
None,
|
||||||
|
false,
|
||||||
|
))
|
||||||
|
.await?;
|
||||||
|
local
|
||||||
|
.run_until(cmd_register_state(
|
||||||
|
&b_state,
|
||||||
|
&server,
|
||||||
|
&ca_cert,
|
||||||
|
"localhost",
|
||||||
|
None,
|
||||||
|
false,
|
||||||
|
))
|
||||||
|
.await?;
|
||||||
|
local
|
||||||
|
.run_until(cmd_register_state(
|
||||||
|
&c_state,
|
||||||
|
&server,
|
||||||
|
&ca_cert,
|
||||||
|
"localhost",
|
||||||
|
None,
|
||||||
|
false,
|
||||||
|
))
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
let b_loaded = load_existing_state(&b_state, None)?;
|
||||||
|
let b_pk_hex = hex_encode(&IdentityKeypair::from_seed(b_loaded.identity_seed).public_key_bytes());
|
||||||
|
|
||||||
|
let c_loaded = load_existing_state(&c_state, None)?;
|
||||||
|
let c_pk_hex = hex_encode(&IdentityKeypair::from_seed(c_loaded.identity_seed).public_key_bytes());
|
||||||
|
|
||||||
|
local
|
||||||
|
.run_until(cmd_create_group(&creator_state, &server, "test-group", None, false))
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
local
|
||||||
|
.run_until(cmd_invite(
|
||||||
|
&creator_state,
|
||||||
|
&server,
|
||||||
|
&ca_cert,
|
||||||
|
"localhost",
|
||||||
|
&b_pk_hex,
|
||||||
|
None,
|
||||||
|
))
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
local
|
||||||
|
.run_until(cmd_invite(
|
||||||
|
&creator_state,
|
||||||
|
&server,
|
||||||
|
&ca_cert,
|
||||||
|
"localhost",
|
||||||
|
&c_pk_hex,
|
||||||
|
None,
|
||||||
|
))
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
local
|
||||||
|
.run_until(cmd_join(&b_state, &server, &ca_cert, "localhost", None))
|
||||||
|
.await?;
|
||||||
|
local
|
||||||
|
.run_until(cmd_join(&c_state, &server, &ca_cert, "localhost", None))
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
local
|
||||||
|
.run_until(cmd_send(
|
||||||
|
&creator_state,
|
||||||
|
&server,
|
||||||
|
&ca_cert,
|
||||||
|
"localhost",
|
||||||
|
None,
|
||||||
|
true,
|
||||||
|
"hello",
|
||||||
|
None,
|
||||||
|
))
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
sleep(Duration::from_millis(150)).await;
|
||||||
|
|
||||||
|
let b_plaintexts = local
|
||||||
|
.run_until(receive_pending_plaintexts(
|
||||||
|
&b_state,
|
||||||
|
&server,
|
||||||
|
&ca_cert,
|
||||||
|
"localhost",
|
||||||
|
1500,
|
||||||
|
None,
|
||||||
|
))
|
||||||
|
.await?;
|
||||||
|
let c_plaintexts = local
|
||||||
|
.run_until(receive_pending_plaintexts(
|
||||||
|
&c_state,
|
||||||
|
&server,
|
||||||
|
&ca_cert,
|
||||||
|
"localhost",
|
||||||
|
1500,
|
||||||
|
None,
|
||||||
|
))
|
||||||
|
.await?;
|
||||||
|
anyhow::ensure!(
|
||||||
|
b_plaintexts.iter().any(|p| p.as_slice() == b"hello"),
|
||||||
|
"B did not receive 'hello', got {:?}",
|
||||||
|
b_plaintexts
|
||||||
|
);
|
||||||
|
anyhow::ensure!(
|
||||||
|
c_plaintexts.iter().any(|p| p.as_slice() == b"hello"),
|
||||||
|
"C did not receive 'hello', got {:?}",
|
||||||
|
c_plaintexts
|
||||||
|
);
|
||||||
|
|
||||||
|
local
|
||||||
|
.run_until(cmd_send(
|
||||||
|
&b_state,
|
||||||
|
&server,
|
||||||
|
&ca_cert,
|
||||||
|
"localhost",
|
||||||
|
None,
|
||||||
|
true,
|
||||||
|
"hi",
|
||||||
|
None,
|
||||||
|
))
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
sleep(Duration::from_millis(200)).await;
|
||||||
|
|
||||||
|
let a_plaintexts = local
|
||||||
|
.run_until(receive_pending_plaintexts(
|
||||||
|
&creator_state,
|
||||||
|
&server,
|
||||||
|
&ca_cert,
|
||||||
|
"localhost",
|
||||||
|
1500,
|
||||||
|
None,
|
||||||
|
))
|
||||||
|
.await?;
|
||||||
|
let c_plaintexts2 = local
|
||||||
|
.run_until(receive_pending_plaintexts(
|
||||||
|
&c_state,
|
||||||
|
&server,
|
||||||
|
&ca_cert,
|
||||||
|
"localhost",
|
||||||
|
1500,
|
||||||
|
None,
|
||||||
|
))
|
||||||
|
.await?;
|
||||||
|
anyhow::ensure!(
|
||||||
|
a_plaintexts.iter().any(|p| p.as_slice() == b"hi"),
|
||||||
|
"A did not receive 'hi', got {:?}",
|
||||||
|
a_plaintexts
|
||||||
|
);
|
||||||
|
anyhow::ensure!(
|
||||||
|
c_plaintexts2.iter().any(|p| p.as_slice() == b"hi"),
|
||||||
|
"C did not receive 'hi', got {:?}",
|
||||||
|
c_plaintexts2
|
||||||
|
);
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Login should refuse if the presented identity key does not match the registered key.
|
||||||
|
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
|
||||||
|
async fn e2e_login_rejects_mismatched_identity() -> anyhow::Result<()> {
|
||||||
|
ensure_rustls_provider();
|
||||||
|
|
||||||
|
let temp = TempDir::new()?;
|
||||||
|
let base = temp.path();
|
||||||
|
let port = pick_unused_port().expect("free port");
|
||||||
|
let listen = format!("127.0.0.1:{port}");
|
||||||
|
let server = listen.clone();
|
||||||
|
let ca_cert = base.join("server-cert.der");
|
||||||
|
let tls_key = base.join("server-key.der");
|
||||||
|
let data_dir = base.join("data");
|
||||||
|
let auth_token = "devtoken";
|
||||||
|
|
||||||
|
// Spawn server binary.
|
||||||
|
let server_bin = cargo_bin("quicnprotochat-server");
|
||||||
|
let child = Command::new(server_bin)
|
||||||
|
.arg("--listen")
|
||||||
|
.arg(&listen)
|
||||||
|
.arg("--data-dir")
|
||||||
|
.arg(&data_dir)
|
||||||
|
.arg("--tls-cert")
|
||||||
|
.arg(&ca_cert)
|
||||||
|
.arg("--tls-key")
|
||||||
|
.arg(&tls_key)
|
||||||
|
.arg("--auth-token")
|
||||||
|
.arg(auth_token)
|
||||||
|
.arg("--allow-insecure-auth")
|
||||||
|
.spawn()
|
||||||
|
.expect("spawn server");
|
||||||
|
|
||||||
|
struct ChildGuard(std::process::Child);
|
||||||
|
impl Drop for ChildGuard {
|
||||||
|
fn drop(&mut self) {
|
||||||
|
let _ = self.0.kill();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
let child_guard = ChildGuard(child);
|
||||||
|
let _ = child_guard;
|
||||||
|
|
||||||
|
wait_for_health(&server, &ca_cert, "localhost").await?;
|
||||||
|
|
||||||
|
init_auth(ClientAuth::from_parts(auth_token.to_string(), None));
|
||||||
|
|
||||||
|
let local = tokio::task::LocalSet::new();
|
||||||
|
let state_path = base.join("user.bin");
|
||||||
|
|
||||||
|
// Register and persist state (includes identity key binding).
|
||||||
|
local
|
||||||
|
.run_until(cmd_register_state(
|
||||||
|
&state_path,
|
||||||
|
&server,
|
||||||
|
&ca_cert,
|
||||||
|
"localhost",
|
||||||
|
None,
|
||||||
|
false,
|
||||||
|
))
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
// Register the user with the bound identity so login can enforce mismatches.
|
||||||
|
let stored_state = load_existing_state(&state_path, None)?;
|
||||||
|
let identity_hex = hex::encode(
|
||||||
|
IdentityKeypair::from_seed(stored_state.identity_seed).public_key_bytes(),
|
||||||
|
);
|
||||||
|
|
||||||
|
local
|
||||||
|
.run_until(cmd_register_user(
|
||||||
|
&server,
|
||||||
|
&ca_cert,
|
||||||
|
"localhost",
|
||||||
|
"user1",
|
||||||
|
"pass",
|
||||||
|
Some(&identity_hex),
|
||||||
|
))
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
// Craft an unrelated identity key and attempt login with it.
|
||||||
|
let mut bogus_identity = [0u8; 32];
|
||||||
|
rand::thread_rng().fill_bytes(&mut bogus_identity);
|
||||||
|
let bogus_hex = hex::encode(bogus_identity);
|
||||||
|
|
||||||
|
let result = local
|
||||||
|
.run_until(cmd_login(
|
||||||
|
&server,
|
||||||
|
&ca_cert,
|
||||||
|
"localhost",
|
||||||
|
"user1",
|
||||||
|
"pass",
|
||||||
|
Some(&bogus_hex),
|
||||||
|
None,
|
||||||
|
None,
|
||||||
|
))
|
||||||
|
.await;
|
||||||
|
|
||||||
|
match result {
|
||||||
|
Ok(_) => anyhow::bail!("login unexpectedly succeeded with mismatched identity"),
|
||||||
|
Err(e) => {
|
||||||
|
// Show the full error chain so we can match the server's E016 response.
|
||||||
|
let msg = format!("{e:#}");
|
||||||
|
anyhow::ensure!(
|
||||||
|
msg.contains("identity") || msg.contains("E016"),
|
||||||
|
"login failed but not for identity mismatch: {msg}"
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Sealed Sender: enqueue with valid token (no identity binding) succeeds; recipient can fetch.
|
||||||
|
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
|
||||||
|
async fn e2e_sealed_sender_enqueue_then_fetch() -> anyhow::Result<()> {
|
||||||
|
ensure_rustls_provider();
|
||||||
|
|
||||||
|
let temp = TempDir::new()?;
|
||||||
|
let base = temp.path();
|
||||||
|
let port = pick_unused_port().expect("free port");
|
||||||
|
let listen = format!("127.0.0.1:{port}");
|
||||||
|
let server = listen.clone();
|
||||||
|
let ca_cert = base.join("server-cert.der");
|
||||||
|
let tls_key = base.join("server-key.der");
|
||||||
|
let data_dir = base.join("data");
|
||||||
|
let auth_token = "devtoken";
|
||||||
|
|
||||||
|
let server_bin = cargo_bin("quicnprotochat-server");
|
||||||
|
let child = Command::new(server_bin)
|
||||||
|
.arg("--listen")
|
||||||
|
.arg(&listen)
|
||||||
|
.arg("--data-dir")
|
||||||
|
.arg(&data_dir)
|
||||||
|
.arg("--tls-cert")
|
||||||
|
.arg(&ca_cert)
|
||||||
|
.arg("--tls-key")
|
||||||
|
.arg(&tls_key)
|
||||||
|
.arg("--auth-token")
|
||||||
|
.arg(auth_token)
|
||||||
|
.arg("--allow-insecure-auth")
|
||||||
|
.arg("--sealed-sender")
|
||||||
|
.spawn()
|
||||||
|
.expect("spawn server");
|
||||||
|
|
||||||
|
struct ChildGuard(std::process::Child);
|
||||||
|
impl Drop for ChildGuard {
|
||||||
|
fn drop(&mut self) {
|
||||||
|
let _ = self.0.kill();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
let _child_guard = ChildGuard(child);
|
||||||
|
|
||||||
|
wait_for_health(&server, &ca_cert, "localhost").await?;
|
||||||
|
init_auth(ClientAuth::from_parts(auth_token.to_string(), None));
|
||||||
|
|
||||||
|
let local = tokio::task::LocalSet::new();
|
||||||
|
let state_path = base.join("recipient.bin");
|
||||||
|
|
||||||
|
local
|
||||||
|
.run_until(cmd_register_state(
|
||||||
|
&state_path,
|
||||||
|
&server,
|
||||||
|
&ca_cert,
|
||||||
|
"localhost",
|
||||||
|
None,
|
||||||
|
false,
|
||||||
|
))
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
let stored = load_existing_state(&state_path, None)?;
|
||||||
|
let recipient_key = IdentityKeypair::from_seed(stored.identity_seed).public_key_bytes();
|
||||||
|
let identity_hex = hex_encode(&recipient_key);
|
||||||
|
|
||||||
|
local
|
||||||
|
.run_until(cmd_register_user(
|
||||||
|
&server,
|
||||||
|
&ca_cert,
|
||||||
|
"localhost",
|
||||||
|
"recipient",
|
||||||
|
"pass",
|
||||||
|
Some(&identity_hex),
|
||||||
|
))
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
local
|
||||||
|
.run_until(cmd_login(
|
||||||
|
&server,
|
||||||
|
&ca_cert,
|
||||||
|
"localhost",
|
||||||
|
"recipient",
|
||||||
|
"pass",
|
||||||
|
Some(&identity_hex),
|
||||||
|
None,
|
||||||
|
None,
|
||||||
|
))
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
let client = local.run_until(connect_node(&server, &ca_cert, "localhost")).await?;
|
||||||
|
local
|
||||||
|
.run_until(enqueue(&client, &recipient_key, b"sealed-payload"))
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
let payloads = local
|
||||||
|
.run_until(fetch_wait(&client, &recipient_key, 500))
|
||||||
|
.await?;
|
||||||
|
anyhow::ensure!(
|
||||||
|
payloads.len() == 1 && payloads[0].1.as_slice() == b"sealed-payload",
|
||||||
|
"expected one payload 'sealed-payload', got {:?}",
|
||||||
|
payloads
|
||||||
|
);
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|||||||
256
crates/quicnprotochat-core/src/app_message.rs
Normal file
256
crates/quicnprotochat-core/src/app_message.rs
Normal file
@@ -0,0 +1,256 @@
|
|||||||
|
//! Rich application-layer message format for MLS application payloads.
|
||||||
|
//!
|
||||||
|
//! The server sees only opaque ciphertext; structure lives in this client-defined
|
||||||
|
//! plaintext schema. All messages use: version byte (1) + message_type byte + type-specific payload.
|
||||||
|
//!
|
||||||
|
//! # Message ID
|
||||||
|
//!
|
||||||
|
//! `message_id` is assigned by the sender (16 random bytes) and included in the
|
||||||
|
//! serialized payload for Chat (and implied for Reply/Reaction/ReadReceipt via ref_msg_id).
|
||||||
|
//! Recipients can store message_ids to reference them in replies or reactions.
|
||||||
|
|
||||||
|
use crate::error::CoreError;
|
||||||
|
use rand::RngCore;
|
||||||
|
|
||||||
|
/// Current schema version.
|
||||||
|
pub const VERSION: u8 = 1;
|
||||||
|
|
||||||
|
/// Message type discriminant (one byte).
|
||||||
|
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
|
||||||
|
#[repr(u8)]
|
||||||
|
pub enum MessageType {
|
||||||
|
Chat = 0x01,
|
||||||
|
Reply = 0x02,
|
||||||
|
Reaction = 0x03,
|
||||||
|
ReadReceipt = 0x04,
|
||||||
|
Typing = 0x05,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl MessageType {
|
||||||
|
fn from_byte(b: u8) -> Option<Self> {
|
||||||
|
match b {
|
||||||
|
0x01 => Some(MessageType::Chat),
|
||||||
|
0x02 => Some(MessageType::Reply),
|
||||||
|
0x03 => Some(MessageType::Reaction),
|
||||||
|
0x04 => Some(MessageType::ReadReceipt),
|
||||||
|
0x05 => Some(MessageType::Typing),
|
||||||
|
_ => None,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Parsed application message (one of the rich types).
|
||||||
|
#[derive(Clone, Debug, PartialEq, Eq)]
|
||||||
|
pub enum AppMessage {
|
||||||
|
/// Plain chat: body (UTF-8). message_id is included so recipients can store and reference it.
|
||||||
|
Chat {
|
||||||
|
message_id: [u8; 16],
|
||||||
|
body: Vec<u8>,
|
||||||
|
},
|
||||||
|
Reply {
|
||||||
|
ref_msg_id: [u8; 16],
|
||||||
|
body: Vec<u8>,
|
||||||
|
},
|
||||||
|
Reaction {
|
||||||
|
ref_msg_id: [u8; 16],
|
||||||
|
emoji: Vec<u8>,
|
||||||
|
},
|
||||||
|
ReadReceipt {
|
||||||
|
msg_id: [u8; 16],
|
||||||
|
},
|
||||||
|
Typing {
|
||||||
|
/// 0 = stopped, 1 = typing
|
||||||
|
active: u8,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Generate a new 16-byte message ID (e.g. for Chat/Reply so recipients can reference it).
|
||||||
|
pub fn generate_message_id() -> [u8; 16] {
|
||||||
|
let mut id = [0u8; 16];
|
||||||
|
rand::rngs::OsRng.fill_bytes(&mut id);
|
||||||
|
id
|
||||||
|
}
|
||||||
|
|
||||||
|
// ── Layout (minimal, no Cap'n Proto) ─────────────────────────────────────────
|
||||||
|
//
|
||||||
|
// All messages: [version: 1][type: 1][payload...]
|
||||||
|
//
|
||||||
|
// Chat: [msg_id: 16][body_len: 2 BE][body]
|
||||||
|
// Reply: [ref_msg_id: 16][body_len: 2 BE][body]
|
||||||
|
// Reaction: [ref_msg_id: 16][emoji_len: 1][emoji]
|
||||||
|
// ReadReceipt: [msg_id: 16]
|
||||||
|
// Typing: [active: 1] 0 = stopped, 1 = typing
|
||||||
|
|
||||||
|
/// Serialize a rich message into the application payload format.
|
||||||
|
pub fn serialize(msg_type: MessageType, payload: &[u8]) -> Vec<u8> {
|
||||||
|
let mut out = Vec::with_capacity(2 + payload.len());
|
||||||
|
out.push(VERSION);
|
||||||
|
out.push(msg_type as u8);
|
||||||
|
out.extend_from_slice(payload);
|
||||||
|
out
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Serialize a Chat message (generates message_id internally; pass None to generate, or Some(id) when replying with a known id).
|
||||||
|
pub fn serialize_chat(body: &[u8], message_id: Option<[u8; 16]>) -> Vec<u8> {
|
||||||
|
let id = message_id.unwrap_or_else(generate_message_id);
|
||||||
|
let mut payload = Vec::with_capacity(16 + 2 + body.len());
|
||||||
|
payload.extend_from_slice(&id);
|
||||||
|
payload.extend_from_slice(&(body.len() as u16).to_be_bytes());
|
||||||
|
payload.extend_from_slice(body);
|
||||||
|
serialize(MessageType::Chat, &payload)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Serialize a Reply message.
|
||||||
|
pub fn serialize_reply(ref_msg_id: [u8; 16], body: &[u8]) -> Vec<u8> {
|
||||||
|
let mut payload = Vec::with_capacity(16 + 2 + body.len());
|
||||||
|
payload.extend_from_slice(&ref_msg_id);
|
||||||
|
payload.extend_from_slice(&(body.len() as u16).to_be_bytes());
|
||||||
|
payload.extend_from_slice(body);
|
||||||
|
serialize(MessageType::Reply, &payload)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Serialize a Reaction message.
|
||||||
|
pub fn serialize_reaction(ref_msg_id: [u8; 16], emoji: &[u8]) -> Result<Vec<u8>, CoreError> {
|
||||||
|
if emoji.len() > 255 {
|
||||||
|
return Err(CoreError::AppMessage("emoji length > 255".into()));
|
||||||
|
}
|
||||||
|
let mut payload = Vec::with_capacity(16 + 1 + emoji.len());
|
||||||
|
payload.extend_from_slice(&ref_msg_id);
|
||||||
|
payload.push(emoji.len() as u8);
|
||||||
|
payload.extend_from_slice(emoji);
|
||||||
|
Ok(serialize(MessageType::Reaction, &payload))
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Serialize a ReadReceipt message.
|
||||||
|
pub fn serialize_read_receipt(msg_id: [u8; 16]) -> Vec<u8> {
|
||||||
|
serialize(MessageType::ReadReceipt, &msg_id)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Serialize a Typing message (active: 0 = stopped, 1 = typing).
|
||||||
|
pub fn serialize_typing(active: u8) -> Vec<u8> {
|
||||||
|
let payload = [active];
|
||||||
|
serialize(MessageType::Typing, &payload)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Parse bytes into (MessageType, AppMessage). Fails if version/type unknown or payload too short.
|
||||||
|
pub fn parse(bytes: &[u8]) -> Result<(MessageType, AppMessage), CoreError> {
|
||||||
|
if bytes.len() < 2 {
|
||||||
|
return Err(CoreError::AppMessage("payload too short (need version + type)".into()));
|
||||||
|
}
|
||||||
|
let version = bytes[0];
|
||||||
|
if version != VERSION {
|
||||||
|
return Err(CoreError::AppMessage(format!("unsupported version {version}").into()));
|
||||||
|
}
|
||||||
|
let msg_type = MessageType::from_byte(bytes[1])
|
||||||
|
.ok_or_else(|| CoreError::AppMessage(format!("unknown message type {}", bytes[1]).into()))?;
|
||||||
|
let payload = &bytes[2..];
|
||||||
|
|
||||||
|
let app = match msg_type {
|
||||||
|
MessageType::Chat => parse_chat(payload)?,
|
||||||
|
MessageType::Reply => parse_reply(payload)?,
|
||||||
|
MessageType::Reaction => parse_reaction(payload)?,
|
||||||
|
MessageType::ReadReceipt => parse_read_receipt(payload)?,
|
||||||
|
MessageType::Typing => parse_typing(payload)?,
|
||||||
|
};
|
||||||
|
Ok((msg_type, app))
|
||||||
|
}
|
||||||
|
|
||||||
|
fn parse_chat(payload: &[u8]) -> Result<AppMessage, CoreError> {
|
||||||
|
if payload.len() < 16 + 2 {
|
||||||
|
return Err(CoreError::AppMessage("Chat payload too short".into()));
|
||||||
|
}
|
||||||
|
let mut message_id = [0u8; 16];
|
||||||
|
message_id.copy_from_slice(&payload[..16]);
|
||||||
|
let body_len = u16::from_be_bytes([payload[16], payload[17]]) as usize;
|
||||||
|
if payload.len() < 18 + body_len {
|
||||||
|
return Err(CoreError::AppMessage("Chat body length exceeds payload".into()));
|
||||||
|
}
|
||||||
|
let body = payload[18..18 + body_len].to_vec();
|
||||||
|
Ok(AppMessage::Chat { message_id, body })
|
||||||
|
}
|
||||||
|
|
||||||
|
fn parse_reply(payload: &[u8]) -> Result<AppMessage, CoreError> {
|
||||||
|
if payload.len() < 16 + 2 {
|
||||||
|
return Err(CoreError::AppMessage("Reply payload too short".into()));
|
||||||
|
}
|
||||||
|
let mut ref_msg_id = [0u8; 16];
|
||||||
|
ref_msg_id.copy_from_slice(&payload[..16]);
|
||||||
|
let body_len = u16::from_be_bytes([payload[16], payload[17]]) as usize;
|
||||||
|
if payload.len() < 18 + body_len {
|
||||||
|
return Err(CoreError::AppMessage("Reply body length exceeds payload".into()));
|
||||||
|
}
|
||||||
|
let body = payload[18..18 + body_len].to_vec();
|
||||||
|
Ok(AppMessage::Reply { ref_msg_id, body })
|
||||||
|
}
|
||||||
|
|
||||||
|
fn parse_reaction(payload: &[u8]) -> Result<AppMessage, CoreError> {
|
||||||
|
if payload.len() < 16 + 1 {
|
||||||
|
return Err(CoreError::AppMessage("Reaction payload too short".into()));
|
||||||
|
}
|
||||||
|
let mut ref_msg_id = [0u8; 16];
|
||||||
|
ref_msg_id.copy_from_slice(&payload[..16]);
|
||||||
|
let emoji_len = payload[16] as usize;
|
||||||
|
if payload.len() < 17 + emoji_len {
|
||||||
|
return Err(CoreError::AppMessage("Reaction emoji length exceeds payload".into()));
|
||||||
|
}
|
||||||
|
let emoji = payload[17..17 + emoji_len].to_vec();
|
||||||
|
Ok(AppMessage::Reaction { ref_msg_id, emoji })
|
||||||
|
}
|
||||||
|
|
||||||
|
fn parse_read_receipt(payload: &[u8]) -> Result<AppMessage, CoreError> {
|
||||||
|
if payload.len() < 16 {
|
||||||
|
return Err(CoreError::AppMessage("ReadReceipt payload too short".into()));
|
||||||
|
}
|
||||||
|
let mut msg_id = [0u8; 16];
|
||||||
|
msg_id.copy_from_slice(&payload[..16]);
|
||||||
|
Ok(AppMessage::ReadReceipt { msg_id })
|
||||||
|
}
|
||||||
|
|
||||||
|
fn parse_typing(payload: &[u8]) -> Result<AppMessage, CoreError> {
|
||||||
|
if payload.is_empty() {
|
||||||
|
return Err(CoreError::AppMessage("Typing payload empty".into()));
|
||||||
|
}
|
||||||
|
Ok(AppMessage::Typing { active: payload[0] })
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
mod tests {
|
||||||
|
use super::*;
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn roundtrip_chat() {
|
||||||
|
let body = b"hello";
|
||||||
|
let encoded = serialize_chat(body, None);
|
||||||
|
let (t, msg) = parse(&encoded).expect("serialize_chat output is valid");
|
||||||
|
assert_eq!(t, MessageType::Chat);
|
||||||
|
assert!(matches!(&msg, AppMessage::Chat { .. }), "expected Chat, got {:?}", msg);
|
||||||
|
if let AppMessage::Chat { body: b, .. } = &msg {
|
||||||
|
assert_eq!(b.as_slice(), body);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn roundtrip_reply() {
|
||||||
|
let ref_id = [1u8; 16];
|
||||||
|
let body = b"reply text";
|
||||||
|
let encoded = serialize_reply(ref_id, body);
|
||||||
|
let (t, msg) = parse(&encoded).expect("serialize_reply output is valid");
|
||||||
|
assert_eq!(t, MessageType::Reply);
|
||||||
|
assert!(matches!(&msg, AppMessage::Reply { .. }), "expected Reply, got {:?}", msg);
|
||||||
|
if let AppMessage::Reply { ref_msg_id, body: b } = &msg {
|
||||||
|
assert_eq!(ref_msg_id, &ref_id);
|
||||||
|
assert_eq!(b.as_slice(), body);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn roundtrip_typing() {
|
||||||
|
let encoded = serialize_typing(1);
|
||||||
|
let (t, msg) = parse(&encoded).expect("serialize_typing output is valid");
|
||||||
|
assert_eq!(t, MessageType::Typing);
|
||||||
|
assert!(matches!(&msg, AppMessage::Typing { .. }), "expected Typing, got {:?}", msg);
|
||||||
|
if let AppMessage::Typing { active } = &msg {
|
||||||
|
assert_eq!(*active, 1);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -18,4 +18,12 @@ pub enum CoreError {
|
|||||||
/// A hybrid KEM (X25519 + ML-KEM-768) operation failed.
|
/// A hybrid KEM (X25519 + ML-KEM-768) operation failed.
|
||||||
#[error("hybrid KEM error: {0}")]
|
#[error("hybrid KEM error: {0}")]
|
||||||
HybridKem(#[from] crate::hybrid_kem::HybridKemError),
|
HybridKem(#[from] crate::hybrid_kem::HybridKemError),
|
||||||
|
|
||||||
|
/// IO or persistence failure.
|
||||||
|
#[error("io error: {0}")]
|
||||||
|
Io(String),
|
||||||
|
|
||||||
|
/// Application message (rich payload) parse or serialisation error.
|
||||||
|
#[error("app message: {0}")]
|
||||||
|
AppMessage(String),
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -2,9 +2,10 @@
|
|||||||
//!
|
//!
|
||||||
//! # Design
|
//! # Design
|
||||||
//!
|
//!
|
||||||
//! [`GroupMember`] wraps an openmls [`MlsGroup`] plus the per-client
|
//! [`GroupMember`] wraps an openmls [`MlsGroup`] plus a per-client crypto
|
||||||
//! [`StoreCrypto`] backend. The backend is **persistent** — it holds the
|
//! backend ([`StoreCrypto`] or [`HybridCryptoProvider`] for M7). The backend
|
||||||
//! in-memory key store that maps init-key references to HPKE private keys.
|
//! is **persistent** — it holds the key store that maps init-key references
|
||||||
|
//! to HPKE private keys (classical or hybrid).
|
||||||
//! openmls's `new_from_welcome` reads those private keys from the key store to
|
//! openmls's `new_from_welcome` reads those private keys from the key store to
|
||||||
//! decrypt the Welcome, so the same backend instance must be used from
|
//! decrypt the Welcome, so the same backend instance must be used from
|
||||||
//! `generate_key_package` through `join_group`.
|
//! `generate_key_package` through `join_group`.
|
||||||
@@ -25,7 +26,7 @@
|
|||||||
//! in Welcome messages. `new_from_welcome` is called with `ratchet_tree = None`;
|
//! in Welcome messages. `new_from_welcome` is called with `ratchet_tree = None`;
|
||||||
//! openmls extracts the tree from the Welcome's `GroupInfo` extension.
|
//! openmls extracts the tree from the Welcome's `GroupInfo` extension.
|
||||||
|
|
||||||
use std::sync::Arc;
|
use std::{path::Path, sync::Arc};
|
||||||
|
|
||||||
use openmls::prelude::{
|
use openmls::prelude::{
|
||||||
Ciphersuite, Credential, CredentialType, CredentialWithKey, CryptoConfig, GroupId, KeyPackage,
|
Ciphersuite, Credential, CredentialType, CredentialWithKey, CryptoConfig, GroupId, KeyPackage,
|
||||||
@@ -37,6 +38,7 @@ use openmls_traits::OpenMlsCryptoProvider;
|
|||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
error::CoreError,
|
error::CoreError,
|
||||||
|
hybrid_crypto::HybridCryptoProvider,
|
||||||
identity::IdentityKeypair,
|
identity::IdentityKeypair,
|
||||||
keystore::{DiskKeyStore, StoreCrypto},
|
keystore::{DiskKeyStore, StoreCrypto},
|
||||||
};
|
};
|
||||||
@@ -49,6 +51,9 @@ const CIPHERSUITE: Ciphersuite = Ciphersuite::MLS_128_DHKEMX25519_AES128GCM_SHA2
|
|||||||
|
|
||||||
/// Per-client MLS state: identity keypair, crypto backend, and optional group.
|
/// Per-client MLS state: identity keypair, crypto backend, and optional group.
|
||||||
///
|
///
|
||||||
|
/// Generic over the crypto provider `P`: [`StoreCrypto`] (default, classical)
|
||||||
|
/// or [`HybridCryptoProvider`] (M7, post-quantum hybrid KEM).
|
||||||
|
///
|
||||||
/// # Lifecycle
|
/// # Lifecycle
|
||||||
///
|
///
|
||||||
/// ```text
|
/// ```text
|
||||||
@@ -60,10 +65,10 @@ const CIPHERSUITE: Ciphersuite = Ciphersuite::MLS_128_DHKEMX25519_AES128GCM_SHA2
|
|||||||
/// ├─ send_message(msg) → encrypt application data
|
/// ├─ send_message(msg) → encrypt application data
|
||||||
/// └─ receive_message(b) → decrypt; returns Some(plaintext) or None
|
/// └─ receive_message(b) → decrypt; returns Some(plaintext) or None
|
||||||
/// ```
|
/// ```
|
||||||
pub struct GroupMember {
|
pub struct GroupMember<P: OpenMlsCryptoProvider = StoreCrypto> {
|
||||||
/// Persistent crypto backend. Holds the in-memory key store with HPKE
|
/// Crypto backend (classical or hybrid). Holds the key store with HPKE
|
||||||
/// private keys created during `generate_key_package`.
|
/// private keys created during `generate_key_package`.
|
||||||
backend: StoreCrypto,
|
backend: P,
|
||||||
/// Long-term Ed25519 identity keypair. Also used as the MLS `Signer`.
|
/// Long-term Ed25519 identity keypair. Also used as the MLS `Signer`.
|
||||||
identity: Arc<IdentityKeypair>,
|
identity: Arc<IdentityKeypair>,
|
||||||
/// Active MLS group, if any.
|
/// Active MLS group, if any.
|
||||||
@@ -72,12 +77,22 @@ pub struct GroupMember {
|
|||||||
config: MlsGroupConfig,
|
config: MlsGroupConfig,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl GroupMember {
|
impl GroupMember<StoreCrypto> {
|
||||||
/// Create a new `GroupMember` with a fresh crypto backend.
|
/// Create a new `GroupMember` with a fresh crypto backend (classical X25519).
|
||||||
pub fn new(identity: Arc<IdentityKeypair>) -> Self {
|
pub fn new(identity: Arc<IdentityKeypair>) -> Self {
|
||||||
Self::new_with_state(identity, DiskKeyStore::ephemeral(), None)
|
Self::new_with_state(identity, DiskKeyStore::ephemeral(), None)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Create a `GroupMember` with a persistent keystore at `path`.
|
||||||
|
pub fn new_persistent(
|
||||||
|
identity: Arc<IdentityKeypair>,
|
||||||
|
path: impl AsRef<Path>,
|
||||||
|
) -> Result<Self, CoreError> {
|
||||||
|
let key_store = DiskKeyStore::persistent(path)
|
||||||
|
.map_err(|e| CoreError::Io(format!("keystore: {e}")))?;
|
||||||
|
Ok(Self::new_with_state(identity, key_store, None))
|
||||||
|
}
|
||||||
|
|
||||||
/// Create a `GroupMember` from pre-existing state (identity + optional group + store).
|
/// Create a `GroupMember` from pre-existing state (identity + optional group + store).
|
||||||
pub fn new_with_state(
|
pub fn new_with_state(
|
||||||
identity: Arc<IdentityKeypair>,
|
identity: Arc<IdentityKeypair>,
|
||||||
@@ -95,6 +110,41 @@ impl GroupMember {
|
|||||||
config,
|
config,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl GroupMember<HybridCryptoProvider> {
|
||||||
|
/// Create a `GroupMember` that uses post-quantum hybrid KEM (X25519 + ML-KEM-768) for HPKE.
|
||||||
|
///
|
||||||
|
/// All members of a group must use the same provider type: if the creator uses
|
||||||
|
/// `new_with_hybrid`, KeyPackages will have hybrid init keys and joiners must
|
||||||
|
/// also use `new_with_hybrid` to decrypt the Welcome.
|
||||||
|
pub fn new_with_hybrid(
|
||||||
|
identity: Arc<IdentityKeypair>,
|
||||||
|
key_store: DiskKeyStore,
|
||||||
|
) -> Self {
|
||||||
|
Self::new_with_state_hybrid(identity, key_store, None)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Create a PQ `GroupMember` from persisted state (identity, key store, optional group).
|
||||||
|
pub fn new_with_state_hybrid(
|
||||||
|
identity: Arc<IdentityKeypair>,
|
||||||
|
key_store: DiskKeyStore,
|
||||||
|
group: Option<MlsGroup>,
|
||||||
|
) -> Self {
|
||||||
|
let config = MlsGroupConfig::builder()
|
||||||
|
.use_ratchet_tree_extension(true)
|
||||||
|
.build();
|
||||||
|
|
||||||
|
Self {
|
||||||
|
backend: HybridCryptoProvider::new(key_store),
|
||||||
|
identity,
|
||||||
|
group,
|
||||||
|
config,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<P: OpenMlsCryptoProvider> GroupMember<P> {
|
||||||
|
|
||||||
// ── KeyPackage ────────────────────────────────────────────────────────────
|
// ── KeyPackage ────────────────────────────────────────────────────────────
|
||||||
|
|
||||||
@@ -185,7 +235,7 @@ impl GroupMember {
|
|||||||
/// group exists, or openmls fails.
|
/// group exists, or openmls fails.
|
||||||
pub fn add_member(
|
pub fn add_member(
|
||||||
&mut self,
|
&mut self,
|
||||||
key_package_bytes: &[u8],
|
mut key_package_bytes: &[u8],
|
||||||
) -> Result<(Vec<u8>, Vec<u8>), CoreError> {
|
) -> Result<(Vec<u8>, Vec<u8>), CoreError> {
|
||||||
let group = self
|
let group = self
|
||||||
.group
|
.group
|
||||||
@@ -196,7 +246,7 @@ impl GroupMember {
|
|||||||
// TlsSerialize; KeyPackageIn derives TlsDeserialize and provides validate()
|
// TlsSerialize; KeyPackageIn derives TlsDeserialize and provides validate()
|
||||||
// which verifies the signature and returns a trusted KeyPackage.
|
// which verifies the signature and returns a trusted KeyPackage.
|
||||||
let key_package: KeyPackage =
|
let key_package: KeyPackage =
|
||||||
KeyPackageIn::tls_deserialize(&mut key_package_bytes.as_ref())
|
KeyPackageIn::tls_deserialize(&mut key_package_bytes)
|
||||||
.map_err(|e| CoreError::Mls(format!("KeyPackage deserialise: {e:?}")))?
|
.map_err(|e| CoreError::Mls(format!("KeyPackage deserialise: {e:?}")))?
|
||||||
.validate(self.backend.crypto(), ProtocolVersion::Mls10)
|
.validate(self.backend.crypto(), ProtocolVersion::Mls10)
|
||||||
.map_err(|e| CoreError::Mls(format!("KeyPackage validate: {e:?}")))?;
|
.map_err(|e| CoreError::Mls(format!("KeyPackage validate: {e:?}")))?;
|
||||||
@@ -234,9 +284,9 @@ impl GroupMember {
|
|||||||
/// KeyPackage, or openmls validation fails.
|
/// KeyPackage, or openmls validation fails.
|
||||||
///
|
///
|
||||||
/// [`generate_key_package`]: Self::generate_key_package
|
/// [`generate_key_package`]: Self::generate_key_package
|
||||||
pub fn join_group(&mut self, welcome_bytes: &[u8]) -> Result<(), CoreError> {
|
pub fn join_group(&mut self, mut welcome_bytes: &[u8]) -> Result<(), CoreError> {
|
||||||
// Deserialise MlsMessageIn, then extract the inner Welcome.
|
// Deserialise MlsMessageIn, then extract the inner Welcome.
|
||||||
let msg_in = openmls::prelude::MlsMessageIn::tls_deserialize(&mut welcome_bytes.as_ref())
|
let msg_in = openmls::prelude::MlsMessageIn::tls_deserialize(&mut welcome_bytes)
|
||||||
.map_err(|e| CoreError::Mls(format!("Welcome deserialise: {e:?}")))?;
|
.map_err(|e| CoreError::Mls(format!("Welcome deserialise: {e:?}")))?;
|
||||||
|
|
||||||
// into_welcome() is feature-gated in openmls 0.5; extract() is public.
|
// into_welcome() is feature-gated in openmls 0.5; extract() is public.
|
||||||
@@ -291,13 +341,13 @@ impl GroupMember {
|
|||||||
///
|
///
|
||||||
/// Returns [`CoreError::Mls`] if the message is malformed, fails
|
/// Returns [`CoreError::Mls`] if the message is malformed, fails
|
||||||
/// authentication, or the group state is inconsistent.
|
/// authentication, or the group state is inconsistent.
|
||||||
pub fn receive_message(&mut self, bytes: &[u8]) -> Result<Option<Vec<u8>>, CoreError> {
|
pub fn receive_message(&mut self, mut bytes: &[u8]) -> Result<Option<Vec<u8>>, CoreError> {
|
||||||
let group = self
|
let group = self
|
||||||
.group
|
.group
|
||||||
.as_mut()
|
.as_mut()
|
||||||
.ok_or_else(|| CoreError::Mls("no active group".into()))?;
|
.ok_or_else(|| CoreError::Mls("no active group".into()))?;
|
||||||
|
|
||||||
let msg_in = openmls::prelude::MlsMessageIn::tls_deserialize(&mut bytes.as_ref())
|
let msg_in = openmls::prelude::MlsMessageIn::tls_deserialize(&mut bytes)
|
||||||
.map_err(|e| CoreError::Mls(format!("message deserialise: {e:?}")))?;
|
.map_err(|e| CoreError::Mls(format!("message deserialise: {e:?}")))?;
|
||||||
|
|
||||||
// into_protocol_message() is feature-gated; extract() + manual construction is not.
|
// into_protocol_message() is feature-gated; extract() + manual construction is not.
|
||||||
@@ -332,6 +382,58 @@ impl GroupMember {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Process an incoming TLS-encoded MLS message and return sender identity + plaintext for application messages.
|
||||||
|
///
|
||||||
|
/// Same as [`receive_message`], but for Application messages returns
|
||||||
|
/// `Some((sender_identity_bytes, plaintext))` so the client can display who sent the message.
|
||||||
|
/// `sender_identity_bytes` is the MLS credential identity (e.g. Ed25519 public key for Basic credential).
|
||||||
|
///
|
||||||
|
/// Returns `Ok(None)` for Commit and Proposal messages (group state is updated internally).
|
||||||
|
pub fn receive_message_with_sender(
|
||||||
|
&mut self,
|
||||||
|
mut bytes: &[u8],
|
||||||
|
) -> Result<Option<(Vec<u8>, Vec<u8>)>, CoreError> {
|
||||||
|
let group = self
|
||||||
|
.group
|
||||||
|
.as_mut()
|
||||||
|
.ok_or_else(|| CoreError::Mls("no active group".into()))?;
|
||||||
|
|
||||||
|
let msg_in = openmls::prelude::MlsMessageIn::tls_deserialize(&mut bytes)
|
||||||
|
.map_err(|e| CoreError::Mls(format!("message deserialise: {e:?}")))?;
|
||||||
|
|
||||||
|
let protocol_message = match msg_in.extract() {
|
||||||
|
MlsMessageInBody::PrivateMessage(m) => ProtocolMessage::PrivateMessage(m),
|
||||||
|
MlsMessageInBody::PublicMessage(m) => ProtocolMessage::PublicMessage(m),
|
||||||
|
_ => return Err(CoreError::Mls("not a protocol message".into())),
|
||||||
|
};
|
||||||
|
|
||||||
|
let processed = group
|
||||||
|
.process_message(&self.backend, protocol_message)
|
||||||
|
.map_err(|e| CoreError::Mls(format!("process_message: {e:?}")))?;
|
||||||
|
|
||||||
|
let sender_identity = processed.credential().identity().to_vec();
|
||||||
|
|
||||||
|
match processed.into_content() {
|
||||||
|
ProcessedMessageContent::ApplicationMessage(app) => {
|
||||||
|
Ok(Some((sender_identity, app.into_bytes())))
|
||||||
|
}
|
||||||
|
ProcessedMessageContent::StagedCommitMessage(staged) => {
|
||||||
|
group
|
||||||
|
.merge_staged_commit(&self.backend, *staged)
|
||||||
|
.map_err(|e| CoreError::Mls(format!("merge_staged_commit: {e:?}")))?;
|
||||||
|
Ok(None)
|
||||||
|
}
|
||||||
|
ProcessedMessageContent::ProposalMessage(proposal) => {
|
||||||
|
group.store_pending_proposal(*proposal);
|
||||||
|
Ok(None)
|
||||||
|
}
|
||||||
|
ProcessedMessageContent::ExternalJoinProposalMessage(proposal) => {
|
||||||
|
group.store_pending_proposal(*proposal);
|
||||||
|
Ok(None)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// ── Accessors ─────────────────────────────────────────────────────────────
|
// ── Accessors ─────────────────────────────────────────────────────────────
|
||||||
|
|
||||||
/// Return the MLS group ID bytes, or `None` if no group is active.
|
/// Return the MLS group ID bytes, or `None` if no group is active.
|
||||||
@@ -352,7 +454,7 @@ impl GroupMember {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Return a reference to the underlying crypto backend.
|
/// Return a reference to the underlying crypto backend.
|
||||||
pub fn backend(&self) -> &StoreCrypto {
|
pub fn backend(&self) -> &P {
|
||||||
&self.backend
|
&self.backend
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -398,45 +500,84 @@ impl GroupMember {
|
|||||||
mod tests {
|
mod tests {
|
||||||
use super::*;
|
use super::*;
|
||||||
|
|
||||||
/// Full two-party MLS round-trip: create group → add member → exchange messages.
|
/// Full two-party MLS round-trip: creator creates group, adds joiner, then they exchange messages.
|
||||||
#[test]
|
#[test]
|
||||||
fn two_party_mls_round_trip() {
|
fn two_party_mls_round_trip() {
|
||||||
let alice_id = Arc::new(IdentityKeypair::generate());
|
let creator_id = Arc::new(IdentityKeypair::generate());
|
||||||
let bob_id = Arc::new(IdentityKeypair::generate());
|
let joiner_id = Arc::new(IdentityKeypair::generate());
|
||||||
|
|
||||||
let mut alice = GroupMember::new(Arc::clone(&alice_id));
|
let mut creator = GroupMember::new(Arc::clone(&creator_id));
|
||||||
let mut bob = GroupMember::new(Arc::clone(&bob_id));
|
let mut joiner = GroupMember::new(Arc::clone(&joiner_id));
|
||||||
|
|
||||||
// Bob generates a KeyPackage (stored in bob's backend key store).
|
let joiner_kp = joiner
|
||||||
let bob_kp = bob.generate_key_package().expect("Bob KeyPackage");
|
.generate_key_package()
|
||||||
|
.expect("joiner KeyPackage");
|
||||||
|
|
||||||
// Alice creates the group.
|
creator
|
||||||
alice
|
|
||||||
.create_group(b"test-group-m3")
|
.create_group(b"test-group-m3")
|
||||||
.expect("Alice create group");
|
.expect("creator create group");
|
||||||
|
|
||||||
// Alice adds Bob → (commit, welcome).
|
let (_, welcome) = creator
|
||||||
// Alice is the sole existing member, so she merges the commit herself.
|
.add_member(&joiner_kp)
|
||||||
let (_, welcome) = alice.add_member(&bob_kp).expect("Alice add Bob");
|
.expect("creator add joiner");
|
||||||
|
|
||||||
// Bob joins via the Welcome. His backend holds the matching init key.
|
joiner.join_group(&welcome).expect("joiner join group");
|
||||||
bob.join_group(&welcome).expect("Bob join group");
|
|
||||||
|
|
||||||
// Alice → Bob: application message.
|
let ct_creator = creator.send_message(b"hello").expect("creator send");
|
||||||
let ct_a = alice.send_message(b"hello bob").expect("Alice send");
|
let pt_joiner = joiner
|
||||||
let pt_b = bob
|
.receive_message(&ct_creator)
|
||||||
.receive_message(&ct_a)
|
.expect("joiner recv")
|
||||||
.expect("Bob recv")
|
.expect("application message");
|
||||||
.expect("should be application message");
|
assert_eq!(pt_joiner, b"hello");
|
||||||
assert_eq!(pt_b, b"hello bob");
|
|
||||||
|
|
||||||
// Bob → Alice: reply.
|
let ct_joiner = joiner.send_message(b"hello back").expect("joiner send");
|
||||||
let ct_b = bob.send_message(b"hello alice").expect("Bob send");
|
let pt_creator = creator
|
||||||
let pt_a = alice
|
.receive_message(&ct_joiner)
|
||||||
.receive_message(&ct_b)
|
.expect("creator recv")
|
||||||
.expect("Alice recv")
|
.expect("application message");
|
||||||
.expect("should be application message");
|
assert_eq!(pt_creator, b"hello back");
|
||||||
assert_eq!(pt_a, b"hello alice");
|
}
|
||||||
|
|
||||||
|
/// M7: Full two-party MLS round-trip with post-quantum hybrid KEM (HybridCryptoProvider).
|
||||||
|
#[test]
|
||||||
|
fn two_party_mls_round_trip_hybrid() {
|
||||||
|
let creator_id = Arc::new(IdentityKeypair::generate());
|
||||||
|
let joiner_id = Arc::new(IdentityKeypair::generate());
|
||||||
|
let key_store_creator = DiskKeyStore::ephemeral();
|
||||||
|
let key_store_joiner = DiskKeyStore::ephemeral();
|
||||||
|
|
||||||
|
let mut creator =
|
||||||
|
GroupMember::<HybridCryptoProvider>::new_with_hybrid(Arc::clone(&creator_id), key_store_creator);
|
||||||
|
let mut joiner =
|
||||||
|
GroupMember::<HybridCryptoProvider>::new_with_hybrid(Arc::clone(&joiner_id), key_store_joiner);
|
||||||
|
|
||||||
|
let joiner_kp = joiner
|
||||||
|
.generate_key_package()
|
||||||
|
.expect("joiner KeyPackage (hybrid)");
|
||||||
|
|
||||||
|
creator
|
||||||
|
.create_group(b"test-group-m7-hybrid")
|
||||||
|
.expect("creator create group");
|
||||||
|
|
||||||
|
let (_, welcome) = creator
|
||||||
|
.add_member(&joiner_kp)
|
||||||
|
.expect("creator add joiner");
|
||||||
|
|
||||||
|
joiner.join_group(&welcome).expect("joiner join group");
|
||||||
|
|
||||||
|
let ct_creator = creator.send_message(b"hello pq").expect("creator send");
|
||||||
|
let pt_joiner = joiner
|
||||||
|
.receive_message(&ct_creator)
|
||||||
|
.expect("joiner recv")
|
||||||
|
.expect("application message");
|
||||||
|
assert_eq!(pt_joiner, b"hello pq");
|
||||||
|
|
||||||
|
let ct_joiner = joiner.send_message(b"hello back pq").expect("joiner send");
|
||||||
|
let pt_creator = creator
|
||||||
|
.receive_message(&ct_joiner)
|
||||||
|
.expect("creator recv")
|
||||||
|
.expect("application message");
|
||||||
|
assert_eq!(pt_creator, b"hello back pq");
|
||||||
}
|
}
|
||||||
|
|
||||||
/// `group_id()` returns None before create_group, Some afterwards.
|
/// `group_id()` returns None before create_group, Some afterwards.
|
||||||
|
|||||||
442
crates/quicnprotochat-core/src/hybrid_crypto.rs
Normal file
442
crates/quicnprotochat-core/src/hybrid_crypto.rs
Normal file
@@ -0,0 +1,442 @@
|
|||||||
|
//! Post-quantum hybrid crypto provider for OpenMLS (M7 PoC).
|
||||||
|
//!
|
||||||
|
//! Uses X25519 + ML-KEM-768 hybrid KEM for HPKE operations where openmls
|
||||||
|
//! would use DHKEM(X25519), and delegates all other operations (AEAD, hash,
|
||||||
|
//! signatures, KDF, randomness) to `openmls_rust_crypto::RustCrypto`.
|
||||||
|
//!
|
||||||
|
//! # Key format
|
||||||
|
//!
|
||||||
|
//! When the provider sees a **hybrid public key** (length `HYBRID_PUBLIC_KEY_LEN` =
|
||||||
|
//! 32 + 1184 bytes) or **hybrid private key** (length `HYBRID_PRIVATE_KEY_LEN` =
|
||||||
|
//! 32 + 2400 bytes), it uses `hybrid_kem` for HPKE. Otherwise it delegates to
|
||||||
|
//! RustCrypto (classical X25519 HPKE).
|
||||||
|
//!
|
||||||
|
//! # MLS compatibility
|
||||||
|
//!
|
||||||
|
//! The current MLS ciphersuite (MLS_128_DHKEMX25519_AES128GCM_SHA256_Ed25519)
|
||||||
|
//! uses 32-byte X25519 init keys in the wire format. This provider can produce
|
||||||
|
//! and consume **hybrid** init keys (1216-byte public, 2432-byte private), but
|
||||||
|
//! that is a non-standard extension: other MLS implementations will not
|
||||||
|
//! accept KeyPackages with hybrid init keys unless they implement the same
|
||||||
|
//! extension. This PoC validates that the OpenMLS trait surface is satisfiable
|
||||||
|
//! with a custom HPKE backend; full interoperability would require a new
|
||||||
|
//! ciphersuite or protocol extension.
|
||||||
|
|
||||||
|
use openmls_rust_crypto::RustCrypto;
|
||||||
|
use openmls_traits::{
|
||||||
|
crypto::OpenMlsCrypto,
|
||||||
|
types::{
|
||||||
|
CryptoError, ExporterSecret, HpkeCiphertext, HpkeConfig, HpkeKeyPair, HpkeKemType,
|
||||||
|
},
|
||||||
|
OpenMlsCryptoProvider,
|
||||||
|
};
|
||||||
|
use tls_codec::SecretVLBytes;
|
||||||
|
|
||||||
|
use crate::hybrid_kem::{
|
||||||
|
hybrid_decapsulate_only, hybrid_decrypt, hybrid_encapsulate_only, hybrid_encrypt,
|
||||||
|
hybrid_export, HybridKeypair, HybridPublicKey,
|
||||||
|
HYBRID_KEM_OUTPUT_LEN, HYBRID_PRIVATE_KEY_LEN, HYBRID_PUBLIC_KEY_LEN,
|
||||||
|
};
|
||||||
|
use crate::keystore::DiskKeyStore;
|
||||||
|
|
||||||
|
// Re-export types used by OpenMlsCrypto (full path for clarity).
|
||||||
|
use openmls_traits::types::{
|
||||||
|
AeadType, Ciphersuite, HashType, SignatureScheme,
|
||||||
|
};
|
||||||
|
|
||||||
|
/// Crypto backend that uses hybrid KEM for HPKE when keys are in hybrid format,
|
||||||
|
/// and delegates everything else to RustCrypto.
|
||||||
|
#[derive(Debug)]
|
||||||
|
pub struct HybridCrypto {
|
||||||
|
rust_crypto: RustCrypto,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl HybridCrypto {
|
||||||
|
pub fn new() -> Self {
|
||||||
|
Self {
|
||||||
|
rust_crypto: RustCrypto::default(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Expose the underlying RustCrypto for rand() and delegation.
|
||||||
|
pub fn rust_crypto(&self) -> &RustCrypto {
|
||||||
|
&self.rust_crypto
|
||||||
|
}
|
||||||
|
|
||||||
|
fn is_hybrid_public_key(pk_r: &[u8]) -> bool {
|
||||||
|
pk_r.len() == HYBRID_PUBLIC_KEY_LEN
|
||||||
|
}
|
||||||
|
|
||||||
|
fn is_hybrid_private_key(sk_r: &[u8]) -> bool {
|
||||||
|
sk_r.len() == HYBRID_PRIVATE_KEY_LEN
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Default for HybridCrypto {
|
||||||
|
fn default() -> Self {
|
||||||
|
Self::new()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl OpenMlsCrypto for HybridCrypto {
|
||||||
|
fn supports(&self, ciphersuite: Ciphersuite) -> Result<(), CryptoError> {
|
||||||
|
self.rust_crypto.supports(ciphersuite)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn supported_ciphersuites(&self) -> Vec<Ciphersuite> {
|
||||||
|
self.rust_crypto.supported_ciphersuites()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn hkdf_extract(
|
||||||
|
&self,
|
||||||
|
hash_type: HashType,
|
||||||
|
salt: &[u8],
|
||||||
|
ikm: &[u8],
|
||||||
|
) -> Result<SecretVLBytes, CryptoError> {
|
||||||
|
self.rust_crypto.hkdf_extract(hash_type, salt, ikm)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn hkdf_expand(
|
||||||
|
&self,
|
||||||
|
hash_type: HashType,
|
||||||
|
prk: &[u8],
|
||||||
|
info: &[u8],
|
||||||
|
okm_len: usize,
|
||||||
|
) -> Result<SecretVLBytes, CryptoError> {
|
||||||
|
self.rust_crypto.hkdf_expand(hash_type, prk, info, okm_len)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn hash(&self, hash_type: HashType, data: &[u8]) -> Result<Vec<u8>, CryptoError> {
|
||||||
|
self.rust_crypto.hash(hash_type, data)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn aead_encrypt(
|
||||||
|
&self,
|
||||||
|
alg: AeadType,
|
||||||
|
key: &[u8],
|
||||||
|
data: &[u8],
|
||||||
|
nonce: &[u8],
|
||||||
|
aad: &[u8],
|
||||||
|
) -> Result<Vec<u8>, CryptoError> {
|
||||||
|
self.rust_crypto.aead_encrypt(alg, key, data, nonce, aad)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn aead_decrypt(
|
||||||
|
&self,
|
||||||
|
alg: AeadType,
|
||||||
|
key: &[u8],
|
||||||
|
ct_tag: &[u8],
|
||||||
|
nonce: &[u8],
|
||||||
|
aad: &[u8],
|
||||||
|
) -> Result<Vec<u8>, CryptoError> {
|
||||||
|
self.rust_crypto.aead_decrypt(alg, key, ct_tag, nonce, aad)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn signature_key_gen(&self, alg: SignatureScheme) -> Result<(Vec<u8>, Vec<u8>), CryptoError> {
|
||||||
|
self.rust_crypto.signature_key_gen(alg)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn verify_signature(
|
||||||
|
&self,
|
||||||
|
alg: SignatureScheme,
|
||||||
|
data: &[u8],
|
||||||
|
pk: &[u8],
|
||||||
|
signature: &[u8],
|
||||||
|
) -> Result<(), CryptoError> {
|
||||||
|
self.rust_crypto.verify_signature(alg, data, pk, signature)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn sign(&self, alg: SignatureScheme, data: &[u8], key: &[u8]) -> Result<Vec<u8>, CryptoError> {
|
||||||
|
self.rust_crypto.sign(alg, data, key)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn hpke_seal(
|
||||||
|
&self,
|
||||||
|
config: HpkeConfig,
|
||||||
|
pk_r: &[u8],
|
||||||
|
info: &[u8],
|
||||||
|
aad: &[u8],
|
||||||
|
ptxt: &[u8],
|
||||||
|
) -> HpkeCiphertext {
|
||||||
|
if Self::is_hybrid_public_key(pk_r) {
|
||||||
|
let recipient_pk = match HybridPublicKey::from_bytes(pk_r) {
|
||||||
|
Ok(pk) => pk,
|
||||||
|
Err(_) => return self.rust_crypto.hpke_seal(config, pk_r, info, aad, ptxt),
|
||||||
|
};
|
||||||
|
match hybrid_encrypt(&recipient_pk, ptxt) {
|
||||||
|
Ok(envelope) => {
|
||||||
|
let kem_output = envelope[..HYBRID_KEM_OUTPUT_LEN].to_vec();
|
||||||
|
let ciphertext = envelope[HYBRID_KEM_OUTPUT_LEN..].to_vec();
|
||||||
|
HpkeCiphertext {
|
||||||
|
kem_output: kem_output.into(),
|
||||||
|
ciphertext: ciphertext.into(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Err(_) => self.rust_crypto.hpke_seal(config, pk_r, info, aad, ptxt),
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
self.rust_crypto.hpke_seal(config, pk_r, info, aad, ptxt)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn hpke_open(
|
||||||
|
&self,
|
||||||
|
config: HpkeConfig,
|
||||||
|
input: &HpkeCiphertext,
|
||||||
|
sk_r: &[u8],
|
||||||
|
info: &[u8],
|
||||||
|
aad: &[u8],
|
||||||
|
) -> Result<Vec<u8>, CryptoError> {
|
||||||
|
if Self::is_hybrid_private_key(sk_r) {
|
||||||
|
let keypair = match HybridKeypair::from_private_bytes(sk_r) {
|
||||||
|
Ok(kp) => kp,
|
||||||
|
Err(_) => return self.rust_crypto.hpke_open(config, input, sk_r, info, aad),
|
||||||
|
};
|
||||||
|
let envelope: Vec<u8> = input
|
||||||
|
.kem_output.as_slice()
|
||||||
|
.iter()
|
||||||
|
.chain(input.ciphertext.as_slice())
|
||||||
|
.copied()
|
||||||
|
.collect();
|
||||||
|
hybrid_decrypt(&keypair, &envelope).map_err(|_| CryptoError::HpkeDecryptionError)
|
||||||
|
} else {
|
||||||
|
self.rust_crypto.hpke_open(config, input, sk_r, info, aad)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn hpke_setup_sender_and_export(
|
||||||
|
&self,
|
||||||
|
config: HpkeConfig,
|
||||||
|
pk_r: &[u8],
|
||||||
|
info: &[u8],
|
||||||
|
exporter_context: &[u8],
|
||||||
|
exporter_length: usize,
|
||||||
|
) -> Result<(Vec<u8>, ExporterSecret), CryptoError> {
|
||||||
|
if Self::is_hybrid_public_key(pk_r) {
|
||||||
|
let recipient_pk = match HybridPublicKey::from_bytes(pk_r) {
|
||||||
|
Ok(pk) => pk,
|
||||||
|
Err(_) => {
|
||||||
|
return self.rust_crypto.hpke_setup_sender_and_export(
|
||||||
|
config, pk_r, info, exporter_context, exporter_length,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
};
|
||||||
|
let (kem_output, shared_secret) =
|
||||||
|
hybrid_encapsulate_only(&recipient_pk).map_err(|_| CryptoError::SenderSetupError)?;
|
||||||
|
let exported = hybrid_export(&shared_secret, exporter_context, exporter_length);
|
||||||
|
Ok((kem_output, exported.into()))
|
||||||
|
} else {
|
||||||
|
self.rust_crypto.hpke_setup_sender_and_export(
|
||||||
|
config, pk_r, info, exporter_context, exporter_length,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn hpke_setup_receiver_and_export(
|
||||||
|
&self,
|
||||||
|
config: HpkeConfig,
|
||||||
|
enc: &[u8],
|
||||||
|
sk_r: &[u8],
|
||||||
|
info: &[u8],
|
||||||
|
exporter_context: &[u8],
|
||||||
|
exporter_length: usize,
|
||||||
|
) -> Result<ExporterSecret, CryptoError> {
|
||||||
|
if Self::is_hybrid_private_key(sk_r) {
|
||||||
|
let keypair = HybridKeypair::from_private_bytes(sk_r)
|
||||||
|
.map_err(|_| CryptoError::ReceiverSetupError)?;
|
||||||
|
let shared_secret =
|
||||||
|
hybrid_decapsulate_only(&keypair, enc).map_err(|_| CryptoError::ReceiverSetupError)?;
|
||||||
|
let exported = hybrid_export(&shared_secret, exporter_context, exporter_length);
|
||||||
|
Ok(exported.into())
|
||||||
|
} else {
|
||||||
|
self.rust_crypto.hpke_setup_receiver_and_export(
|
||||||
|
config, enc, sk_r, info, exporter_context, exporter_length,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn derive_hpke_keypair(&self, config: HpkeConfig, ikm: &[u8]) -> HpkeKeyPair {
|
||||||
|
if config.0 == HpkeKemType::DhKem25519 {
|
||||||
|
let kp = HybridKeypair::derive_from_ikm(ikm);
|
||||||
|
HpkeKeyPair {
|
||||||
|
private: kp.private_to_bytes().into(),
|
||||||
|
public: kp.public_key().to_bytes(),
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
self.rust_crypto.derive_hpke_keypair(config, ikm)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// OpenMLS crypto provider that uses hybrid KEM for HPKE (when keys are in
|
||||||
|
/// hybrid format) and delegates the rest to RustCrypto.
|
||||||
|
#[derive(Debug)]
|
||||||
|
pub struct HybridCryptoProvider {
|
||||||
|
crypto: HybridCrypto,
|
||||||
|
key_store: DiskKeyStore,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl HybridCryptoProvider {
|
||||||
|
pub fn new(key_store: DiskKeyStore) -> Self {
|
||||||
|
Self {
|
||||||
|
crypto: HybridCrypto::new(),
|
||||||
|
key_store,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Default for HybridCryptoProvider {
|
||||||
|
fn default() -> Self {
|
||||||
|
Self::new(DiskKeyStore::ephemeral())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl OpenMlsCryptoProvider for HybridCryptoProvider {
|
||||||
|
type CryptoProvider = HybridCrypto;
|
||||||
|
type RandProvider = RustCrypto;
|
||||||
|
type KeyStoreProvider = DiskKeyStore;
|
||||||
|
|
||||||
|
fn crypto(&self) -> &Self::CryptoProvider {
|
||||||
|
&self.crypto
|
||||||
|
}
|
||||||
|
|
||||||
|
fn rand(&self) -> &Self::RandProvider {
|
||||||
|
self.crypto.rust_crypto()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn key_store(&self) -> &Self::KeyStoreProvider {
|
||||||
|
&self.key_store
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// ── Tests ───────────────────────────────────────────────────────────────────
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
mod tests {
|
||||||
|
use super::*;
|
||||||
|
use openmls_traits::types::HpkeKdfType;
|
||||||
|
|
||||||
|
fn hpke_config_dhkem_x25519() -> HpkeConfig {
|
||||||
|
HpkeConfig(
|
||||||
|
HpkeKemType::DhKem25519,
|
||||||
|
HpkeKdfType::HkdfSha256,
|
||||||
|
openmls_traits::types::HpkeAeadType::AesGcm128,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// HPKE path with hybrid keys: derive_hpke_keypair (hybrid) -> hpke_seal -> hpke_open.
|
||||||
|
#[test]
|
||||||
|
fn hybrid_hpke_seal_open_round_trip() {
|
||||||
|
let crypto = HybridCrypto::new();
|
||||||
|
let ikm = b"test-ikm-for-hybrid-hpke-keypair";
|
||||||
|
|
||||||
|
let keypair = crypto.derive_hpke_keypair(hpke_config_dhkem_x25519(), ikm);
|
||||||
|
assert_eq!(keypair.public.len(), HYBRID_PUBLIC_KEY_LEN);
|
||||||
|
assert_eq!(keypair.private.as_ref().len(), HYBRID_PRIVATE_KEY_LEN);
|
||||||
|
|
||||||
|
let plaintext = b"hello post-quantum MLS";
|
||||||
|
let info = b"mls 1.0 test";
|
||||||
|
let aad = b"additional data";
|
||||||
|
|
||||||
|
let ct = crypto.hpke_seal(
|
||||||
|
hpke_config_dhkem_x25519(),
|
||||||
|
&keypair.public,
|
||||||
|
info,
|
||||||
|
aad,
|
||||||
|
plaintext,
|
||||||
|
);
|
||||||
|
assert!(!ct.kem_output.as_slice().is_empty());
|
||||||
|
assert!(!ct.ciphertext.as_slice().is_empty());
|
||||||
|
|
||||||
|
let decrypted = crypto
|
||||||
|
.hpke_open(
|
||||||
|
hpke_config_dhkem_x25519(),
|
||||||
|
&ct,
|
||||||
|
keypair.private.as_ref(),
|
||||||
|
info,
|
||||||
|
aad,
|
||||||
|
)
|
||||||
|
.expect("hpke_open with hybrid keys");
|
||||||
|
assert_eq!(decrypted.as_slice(), plaintext);
|
||||||
|
}
|
||||||
|
|
||||||
|
/// HPKE exporter path: setup_sender_and_export then setup_receiver_and_export.
|
||||||
|
#[test]
|
||||||
|
fn hybrid_hpke_setup_sender_receiver_export() {
|
||||||
|
let crypto = HybridCrypto::new();
|
||||||
|
let ikm = b"exporter-ikm";
|
||||||
|
|
||||||
|
let keypair = crypto.derive_hpke_keypair(hpke_config_dhkem_x25519(), ikm);
|
||||||
|
let info = b"";
|
||||||
|
let exporter_context = b"MLS 1.0 external init";
|
||||||
|
let exporter_length = 32;
|
||||||
|
|
||||||
|
let (kem_output, sender_exported) = crypto
|
||||||
|
.hpke_setup_sender_and_export(
|
||||||
|
hpke_config_dhkem_x25519(),
|
||||||
|
&keypair.public,
|
||||||
|
info,
|
||||||
|
exporter_context,
|
||||||
|
exporter_length,
|
||||||
|
)
|
||||||
|
.expect("sender and export");
|
||||||
|
|
||||||
|
assert_eq!(kem_output.len(), HYBRID_KEM_OUTPUT_LEN);
|
||||||
|
assert_eq!(sender_exported.as_ref().len(), exporter_length);
|
||||||
|
|
||||||
|
let receiver_exported = crypto
|
||||||
|
.hpke_setup_receiver_and_export(
|
||||||
|
hpke_config_dhkem_x25519(),
|
||||||
|
&kem_output,
|
||||||
|
keypair.private.as_ref(),
|
||||||
|
info,
|
||||||
|
exporter_context,
|
||||||
|
exporter_length,
|
||||||
|
)
|
||||||
|
.expect("receiver and export");
|
||||||
|
|
||||||
|
assert_eq!(sender_exported.as_ref(), receiver_exported.as_ref());
|
||||||
|
}
|
||||||
|
|
||||||
|
/// KeyPackage generation with HybridCryptoProvider (validates full HPKE path in MLS).
|
||||||
|
#[test]
|
||||||
|
fn key_package_generation_with_hybrid_provider() {
|
||||||
|
use openmls::prelude::{
|
||||||
|
Credential, CredentialType, CredentialWithKey, CryptoConfig, KeyPackage,
|
||||||
|
};
|
||||||
|
use std::sync::Arc;
|
||||||
|
use tls_codec::Serialize;
|
||||||
|
|
||||||
|
use crate::identity::IdentityKeypair;
|
||||||
|
|
||||||
|
const CIPHERSUITE: Ciphersuite =
|
||||||
|
Ciphersuite::MLS_128_DHKEMX25519_AES128GCM_SHA256_Ed25519;
|
||||||
|
|
||||||
|
let provider = HybridCryptoProvider::default();
|
||||||
|
let identity = Arc::new(IdentityKeypair::generate());
|
||||||
|
|
||||||
|
let credential = Credential::new(
|
||||||
|
identity.public_key_bytes().to_vec(),
|
||||||
|
CredentialType::Basic,
|
||||||
|
)
|
||||||
|
.unwrap();
|
||||||
|
let credential_with_key = CredentialWithKey {
|
||||||
|
credential,
|
||||||
|
signature_key: identity.public_key_bytes().to_vec().into(),
|
||||||
|
};
|
||||||
|
|
||||||
|
let key_package = KeyPackage::builder()
|
||||||
|
.build(
|
||||||
|
CryptoConfig::with_default_version(CIPHERSUITE),
|
||||||
|
&provider,
|
||||||
|
identity.as_ref(),
|
||||||
|
credential_with_key,
|
||||||
|
)
|
||||||
|
.expect("KeyPackage with hybrid HPKE");
|
||||||
|
|
||||||
|
let bytes = key_package
|
||||||
|
.tls_serialize_detached()
|
||||||
|
.expect("serialize KeyPackage");
|
||||||
|
assert!(!bytes.is_empty());
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -28,7 +28,7 @@ use ml_kem::{
|
|||||||
kem::{Decapsulate, Encapsulate},
|
kem::{Decapsulate, Encapsulate},
|
||||||
EncodedSizeUser, KemCore, MlKem768, MlKem768Params,
|
EncodedSizeUser, KemCore, MlKem768, MlKem768Params,
|
||||||
};
|
};
|
||||||
use rand::{rngs::OsRng, RngCore};
|
use rand::{rngs::OsRng, rngs::StdRng, CryptoRng, RngCore, SeedableRng};
|
||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
use sha2::Sha256;
|
use sha2::Sha256;
|
||||||
use x25519_dalek::{EphemeralSecret, PublicKey as X25519Public, StaticSecret};
|
use x25519_dalek::{EphemeralSecret, PublicKey as X25519Public, StaticSecret};
|
||||||
@@ -55,6 +55,15 @@ pub const MLKEM_DK_LEN: usize = 2400;
|
|||||||
/// Envelope header: version(1) + x25519 eph pk(32) + mlkem ct(1088) + nonce(12).
|
/// Envelope header: version(1) + x25519 eph pk(32) + mlkem ct(1088) + nonce(12).
|
||||||
const HEADER_LEN: usize = 1 + 32 + MLKEM_CT_LEN + 12;
|
const HEADER_LEN: usize = 1 + 32 + MLKEM_CT_LEN + 12;
|
||||||
|
|
||||||
|
/// KEM output length (version + x25519 eph pk + mlkem ct) for HPKE adapter.
|
||||||
|
pub const HYBRID_KEM_OUTPUT_LEN: usize = 1 + 32 + MLKEM_CT_LEN;
|
||||||
|
|
||||||
|
/// Hybrid public key length: x25519(32) + mlkem_ek(1184). Used to detect hybrid keys in MLS.
|
||||||
|
pub const HYBRID_PUBLIC_KEY_LEN: usize = 32 + MLKEM_EK_LEN;
|
||||||
|
|
||||||
|
/// Hybrid private key length: x25519(32) + mlkem_dk(2400). Used to detect hybrid keys in MLS.
|
||||||
|
pub const HYBRID_PRIVATE_KEY_LEN: usize = 32 + MLKEM_DK_LEN;
|
||||||
|
|
||||||
// ── Error type ──────────────────────────────────────────────────────────────
|
// ── Error type ──────────────────────────────────────────────────────────────
|
||||||
|
|
||||||
#[derive(Debug, thiserror::Error)]
|
#[derive(Debug, thiserror::Error)]
|
||||||
@@ -109,12 +118,20 @@ pub struct HybridPublicKey {
|
|||||||
pub mlkem_ek: Vec<u8>,
|
pub mlkem_ek: Vec<u8>,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// HKDF info for deriving HPKE keypair seed from IKM (MLS compatibility).
|
||||||
|
const HKDF_INFO_HPKE_KEYPAIR: &[u8] = b"quicnprotochat-hybrid-hpke-keypair-v1";
|
||||||
|
|
||||||
impl HybridKeypair {
|
impl HybridKeypair {
|
||||||
/// Generate a fresh hybrid keypair from OS CSPRNG.
|
/// Generate a fresh hybrid keypair from OS CSPRNG.
|
||||||
pub fn generate() -> Self {
|
pub fn generate() -> Self {
|
||||||
let x25519_sk = StaticSecret::random_from_rng(OsRng);
|
Self::generate_from_rng(&mut OsRng)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Generate a hybrid keypair from a seeded RNG (deterministic).
|
||||||
|
pub fn generate_from_rng<R: RngCore + CryptoRng>(rng: &mut R) -> Self {
|
||||||
|
let x25519_sk = StaticSecret::random_from_rng(&mut *rng);
|
||||||
let x25519_pk = X25519Public::from(&x25519_sk);
|
let x25519_pk = X25519Public::from(&x25519_sk);
|
||||||
let (mlkem_dk, mlkem_ek) = MlKem768::generate(&mut OsRng);
|
let (mlkem_dk, mlkem_ek) = MlKem768::generate(rng);
|
||||||
|
|
||||||
Self {
|
Self {
|
||||||
x25519_sk,
|
x25519_sk,
|
||||||
@@ -124,6 +141,45 @@ impl HybridKeypair {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Derive a deterministic hybrid keypair from IKM (for MLS HPKE key schedule).
|
||||||
|
pub fn derive_from_ikm(ikm: &[u8]) -> Self {
|
||||||
|
let mut seed = [0u8; 32];
|
||||||
|
let hk = Hkdf::<Sha256>::new(None, ikm);
|
||||||
|
hk.expand(HKDF_INFO_HPKE_KEYPAIR, &mut seed)
|
||||||
|
.expect("32 bytes is valid HKDF output");
|
||||||
|
let mut rng = StdRng::from_seed(seed);
|
||||||
|
Self::generate_from_rng(&mut rng)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Serialise private key for MLS key store: x25519_sk(32) || mlkem_dk(2400).
|
||||||
|
pub fn private_to_bytes(&self) -> Vec<u8> {
|
||||||
|
let mut out = Vec::with_capacity(HYBRID_PRIVATE_KEY_LEN);
|
||||||
|
out.extend_from_slice(self.x25519_sk.as_bytes());
|
||||||
|
out.extend_from_slice(self.mlkem_dk.as_bytes().as_slice());
|
||||||
|
out
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Reconstruct a hybrid keypair from private key bytes (from MLS key store).
|
||||||
|
pub fn from_private_bytes(bytes: &[u8]) -> Result<Self, HybridKemError> {
|
||||||
|
if bytes.len() != HYBRID_PRIVATE_KEY_LEN {
|
||||||
|
return Err(HybridKemError::TooShort(bytes.len()));
|
||||||
|
}
|
||||||
|
let x25519_sk = StaticSecret::from(<[u8; 32]>::try_from(&bytes[0..32]).unwrap());
|
||||||
|
let x25519_pk = X25519Public::from(&x25519_sk);
|
||||||
|
|
||||||
|
let mlkem_dk_arr = Array::try_from(&bytes[32..32 + MLKEM_DK_LEN])
|
||||||
|
.map_err(|_| HybridKemError::InvalidMlKemKey)?;
|
||||||
|
let mlkem_dk = DecapsulationKey::<MlKem768Params>::from_bytes(&mlkem_dk_arr);
|
||||||
|
let mlkem_ek = mlkem_dk.encapsulation_key().clone();
|
||||||
|
|
||||||
|
Ok(Self {
|
||||||
|
x25519_sk,
|
||||||
|
x25519_pk,
|
||||||
|
mlkem_dk,
|
||||||
|
mlkem_ek,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
/// Reconstruct from serialised bytes.
|
/// Reconstruct from serialised bytes.
|
||||||
pub fn from_bytes(bytes: &HybridKeypairBytes) -> Result<Self, HybridKemError> {
|
pub fn from_bytes(bytes: &HybridKeypairBytes) -> Result<Self, HybridKemError> {
|
||||||
let x25519_sk = StaticSecret::from(*bytes.x25519_sk);
|
let x25519_sk = StaticSecret::from(*bytes.x25519_sk);
|
||||||
@@ -236,10 +292,7 @@ pub fn hybrid_encrypt(
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Decrypt a hybrid envelope using the recipient's private key.
|
/// Decrypt a hybrid envelope using the recipient's private key.
|
||||||
pub fn hybrid_decrypt(
|
pub fn hybrid_decrypt(keypair: &HybridKeypair, envelope: &[u8]) -> Result<Vec<u8>, HybridKemError> {
|
||||||
keypair: &HybridKeypair,
|
|
||||||
envelope: &[u8],
|
|
||||||
) -> Result<Vec<u8>, HybridKemError> {
|
|
||||||
if envelope.len() < HEADER_LEN + 16 {
|
if envelope.len() < HEADER_LEN + 16 {
|
||||||
// 16 = minimum AEAD tag
|
// 16 = minimum AEAD tag
|
||||||
return Err(HybridKemError::TooShort(envelope.len()));
|
return Err(HybridKemError::TooShort(envelope.len()));
|
||||||
@@ -274,8 +327,8 @@ pub fn hybrid_decrypt(
|
|||||||
|
|
||||||
// 2. ML-KEM decapsulation — convert bytes to the ciphertext array type
|
// 2. ML-KEM decapsulation — convert bytes to the ciphertext array type
|
||||||
// that `DecapsulationKey::decapsulate` expects.
|
// that `DecapsulationKey::decapsulate` expects.
|
||||||
let mlkem_ct_arr = Array::try_from(mlkem_ct_bytes)
|
let mlkem_ct_arr =
|
||||||
.map_err(|_| HybridKemError::MlKemDecapsFailed)?;
|
Array::try_from(mlkem_ct_bytes).map_err(|_| HybridKemError::MlKemDecapsFailed)?;
|
||||||
let mlkem_ss = keypair
|
let mlkem_ss = keypair
|
||||||
.mlkem_dk
|
.mlkem_dk
|
||||||
.decapsulate(&mlkem_ct_arr)
|
.decapsulate(&mlkem_ct_arr)
|
||||||
@@ -293,6 +346,78 @@ pub fn hybrid_decrypt(
|
|||||||
Ok(plaintext)
|
Ok(plaintext)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Encapsulate only: compute shared secret and KEM output (no AEAD).
|
||||||
|
/// Returns `(kem_output, shared_secret)` where `kem_output` is the first
|
||||||
|
/// `HYBRID_KEM_OUTPUT_LEN` bytes of the hybrid envelope and `shared_secret`
|
||||||
|
/// is the 32-byte derived key (same as used for AEAD in `hybrid_encrypt`).
|
||||||
|
/// Used by MLS HPKE exporter (setup_sender_and_export).
|
||||||
|
pub fn hybrid_encapsulate_only(
|
||||||
|
recipient_pk: &HybridPublicKey,
|
||||||
|
) -> Result<(Vec<u8>, [u8; 32]), HybridKemError> {
|
||||||
|
let eph_secret = EphemeralSecret::random_from_rng(OsRng);
|
||||||
|
let eph_public = X25519Public::from(&eph_secret);
|
||||||
|
let x25519_recipient = X25519Public::from(recipient_pk.x25519_pk);
|
||||||
|
let x25519_ss = eph_secret.diffie_hellman(&x25519_recipient);
|
||||||
|
|
||||||
|
let mlkem_ek_arr = Array::try_from(recipient_pk.mlkem_ek.as_slice())
|
||||||
|
.map_err(|_| HybridKemError::InvalidMlKemKey)?;
|
||||||
|
let mlkem_ek = EncapsulationKey::<MlKem768Params>::from_bytes(&mlkem_ek_arr);
|
||||||
|
let (mlkem_ct, mlkem_ss) = mlkem_ek
|
||||||
|
.encapsulate(&mut OsRng)
|
||||||
|
.map_err(|_| HybridKemError::EncryptionFailed)?;
|
||||||
|
|
||||||
|
let aead_key = derive_aead_key(x25519_ss.as_bytes(), mlkem_ss.as_slice());
|
||||||
|
let shared_secret = aead_key.as_slice().try_into().unwrap();
|
||||||
|
|
||||||
|
let mut kem_output = Vec::with_capacity(HYBRID_KEM_OUTPUT_LEN);
|
||||||
|
kem_output.push(HYBRID_VERSION);
|
||||||
|
kem_output.extend_from_slice(&eph_public.to_bytes());
|
||||||
|
kem_output.extend_from_slice(mlkem_ct.as_slice());
|
||||||
|
|
||||||
|
Ok((kem_output, shared_secret))
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Decapsulate only: recover shared secret from KEM output (no AEAD).
|
||||||
|
/// Used by MLS HPKE exporter (setup_receiver_and_export).
|
||||||
|
pub fn hybrid_decapsulate_only(
|
||||||
|
keypair: &HybridKeypair,
|
||||||
|
kem_output: &[u8],
|
||||||
|
) -> Result<[u8; 32], HybridKemError> {
|
||||||
|
if kem_output.len() < HYBRID_KEM_OUTPUT_LEN {
|
||||||
|
return Err(HybridKemError::TooShort(kem_output.len()));
|
||||||
|
}
|
||||||
|
if kem_output[0] != HYBRID_VERSION {
|
||||||
|
return Err(HybridKemError::UnsupportedVersion(kem_output[0]));
|
||||||
|
}
|
||||||
|
|
||||||
|
let eph_pk_bytes: [u8; 32] = kem_output[1..33].try_into().unwrap();
|
||||||
|
let eph_pk = X25519Public::from(eph_pk_bytes);
|
||||||
|
let x25519_ss = keypair.x25519_sk.diffie_hellman(&eph_pk);
|
||||||
|
|
||||||
|
let mlkem_ct_arr = Array::try_from(&kem_output[33..33 + MLKEM_CT_LEN])
|
||||||
|
.map_err(|_| HybridKemError::MlKemDecapsFailed)?;
|
||||||
|
let mlkem_ss = keypair
|
||||||
|
.mlkem_dk
|
||||||
|
.decapsulate(&mlkem_ct_arr)
|
||||||
|
.map_err(|_| HybridKemError::MlKemDecapsFailed)?;
|
||||||
|
|
||||||
|
let aead_key = derive_aead_key(x25519_ss.as_bytes(), mlkem_ss.as_slice());
|
||||||
|
Ok(aead_key.as_slice().try_into().unwrap())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Export a secret from shared secret (MLS HPKE exporter compatibility).
|
||||||
|
/// Uses HKDF-Expand(prk, exporter_context, length) with prk = HKDF-Extract(0, shared_secret).
|
||||||
|
pub fn hybrid_export(
|
||||||
|
shared_secret: &[u8; 32],
|
||||||
|
exporter_context: &[u8],
|
||||||
|
length: usize,
|
||||||
|
) -> Vec<u8> {
|
||||||
|
let hk = Hkdf::<Sha256>::new(None, shared_secret);
|
||||||
|
let mut out = vec![0u8; length];
|
||||||
|
hk.expand(exporter_context, &mut out).expect("valid length");
|
||||||
|
out
|
||||||
|
}
|
||||||
|
|
||||||
/// Derive AEAD key from the combined X25519 + ML-KEM shared secrets.
|
/// Derive AEAD key from the combined X25519 + ML-KEM shared secrets.
|
||||||
///
|
///
|
||||||
/// The nonce is generated randomly per-encryption rather than derived from
|
/// The nonce is generated randomly per-encryption rather than derived from
|
||||||
@@ -419,10 +544,7 @@ mod tests {
|
|||||||
let restored = HybridKeypair::from_bytes(&bytes).unwrap();
|
let restored = HybridKeypair::from_bytes(&bytes).unwrap();
|
||||||
|
|
||||||
assert_eq!(kp.x25519_pk.to_bytes(), restored.x25519_pk.to_bytes());
|
assert_eq!(kp.x25519_pk.to_bytes(), restored.x25519_pk.to_bytes());
|
||||||
assert_eq!(
|
assert_eq!(kp.public_key().mlkem_ek, restored.public_key().mlkem_ek);
|
||||||
kp.public_key().mlkem_ek,
|
|
||||||
restored.public_key().mlkem_ek
|
|
||||||
);
|
|
||||||
|
|
||||||
// Verify restored keypair can decrypt
|
// Verify restored keypair can decrypt
|
||||||
let pk = kp.public_key();
|
let pk = kp.public_key();
|
||||||
|
|||||||
@@ -18,15 +18,44 @@
|
|||||||
|
|
||||||
use openmls::prelude::{
|
use openmls::prelude::{
|
||||||
Ciphersuite, Credential, CredentialType, CredentialWithKey, CryptoConfig, KeyPackage,
|
Ciphersuite, Credential, CredentialType, CredentialWithKey, CryptoConfig, KeyPackage,
|
||||||
TlsSerializeTrait,
|
KeyPackageIn, TlsDeserializeTrait, TlsSerializeTrait,
|
||||||
};
|
};
|
||||||
use openmls_rust_crypto::OpenMlsRustCrypto;
|
use openmls_rust_crypto::OpenMlsRustCrypto;
|
||||||
use sha2::{Digest, Sha256};
|
use sha2::{Digest, Sha256};
|
||||||
|
|
||||||
use crate::{error::CoreError, identity::IdentityKeypair};
|
use crate::{error::CoreError, identity::IdentityKeypair};
|
||||||
|
|
||||||
/// The MLS ciphersuite used throughout quicnprotochat.
|
/// The MLS ciphersuite used throughout quicnprotochat (RFC 9420 §17.1).
|
||||||
const CIPHERSUITE: Ciphersuite = Ciphersuite::MLS_128_DHKEMX25519_AES128GCM_SHA256_Ed25519;
|
pub const ALLOWED_CIPHERSUITE: Ciphersuite =
|
||||||
|
Ciphersuite::MLS_128_DHKEMX25519_AES128GCM_SHA256_Ed25519;
|
||||||
|
|
||||||
|
/// Wire value of the allowed ciphersuite (KeyPackage TLS encoding: version 2B, ciphersuite 2B).
|
||||||
|
const ALLOWED_CIPHERSUITE_WIRE: u16 = 0x0001;
|
||||||
|
|
||||||
|
const CIPHERSUITE: Ciphersuite = ALLOWED_CIPHERSUITE;
|
||||||
|
|
||||||
|
/// Validates that the KeyPackage bytes use an allowed ciphersuite (Phase 2: ciphersuite allowlist).
|
||||||
|
///
|
||||||
|
/// Parses the TLS-encoded KeyPackage and rejects if the ciphersuite is not
|
||||||
|
/// `MLS_128_DHKEMX25519_AES128GCM_SHA256_Ed25519`. Does not verify signatures;
|
||||||
|
/// the server uses this only to enforce policy before storing.
|
||||||
|
pub fn validate_keypackage_ciphersuite(bytes: &[u8]) -> Result<(), CoreError> {
|
||||||
|
if bytes.len() < 4 {
|
||||||
|
return Err(CoreError::Mls("KeyPackage too short for version+ciphersuite".into()));
|
||||||
|
}
|
||||||
|
let cs_wire = u16::from_be_bytes([bytes[2], bytes[3]]);
|
||||||
|
if cs_wire != ALLOWED_CIPHERSUITE_WIRE {
|
||||||
|
return Err(CoreError::Mls(format!(
|
||||||
|
"KeyPackage ciphersuite {:#06x} not in allowlist (only {:#06x} allowed)",
|
||||||
|
cs_wire, ALLOWED_CIPHERSUITE_WIRE
|
||||||
|
)));
|
||||||
|
}
|
||||||
|
// Optionally confirm full parse so we don't accept garbage that happens to have 0x0001 at offset 2.
|
||||||
|
let mut cursor = bytes;
|
||||||
|
let _kp = KeyPackageIn::tls_deserialize(&mut cursor)
|
||||||
|
.map_err(|e| CoreError::Mls(format!("KeyPackage parse: {e:?}")))?;
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
/// Generate a fresh MLS KeyPackage for `identity` and serialise it.
|
/// Generate a fresh MLS KeyPackage for `identity` and serialise it.
|
||||||
///
|
///
|
||||||
|
|||||||
@@ -4,7 +4,8 @@
|
|||||||
//! # Module layout
|
//! # Module layout
|
||||||
//!
|
//!
|
||||||
//! | Module | Responsibility |
|
//! | Module | Responsibility |
|
||||||
//! |--------------|------------------------------------------------------------------|
|
//! |---------------|------------------------------------------------------------------|
|
||||||
|
//! | `app_message` | Rich application payload (Chat, Reply, Reaction, ReadReceipt, Typing) |
|
||||||
//! | `error` | [`CoreError`] type |
|
//! | `error` | [`CoreError`] type |
|
||||||
//! | `identity` | [`IdentityKeypair`] — Ed25519 identity key for MLS credentials |
|
//! | `identity` | [`IdentityKeypair`] — Ed25519 identity key for MLS credentials |
|
||||||
//! | `keypackage` | [`generate_key_package`] — standalone KeyPackage generation |
|
//! | `keypackage` | [`generate_key_package`] — standalone KeyPackage generation |
|
||||||
@@ -12,8 +13,10 @@
|
|||||||
//! | `hybrid_kem` | Hybrid X25519 + ML-KEM-768 key encapsulation |
|
//! | `hybrid_kem` | Hybrid X25519 + ML-KEM-768 key encapsulation |
|
||||||
//! | `keystore` | [`DiskKeyStore`] — OpenMLS key store with optional persistence |
|
//! | `keystore` | [`DiskKeyStore`] — OpenMLS key store with optional persistence |
|
||||||
|
|
||||||
|
mod app_message;
|
||||||
mod error;
|
mod error;
|
||||||
mod group;
|
mod group;
|
||||||
|
pub mod hybrid_crypto;
|
||||||
pub mod hybrid_kem;
|
pub mod hybrid_kem;
|
||||||
mod identity;
|
mod identity;
|
||||||
mod keypackage;
|
mod keypackage;
|
||||||
@@ -22,12 +25,18 @@ pub mod opaque_auth;
|
|||||||
|
|
||||||
// ── Public API ────────────────────────────────────────────────────────────────
|
// ── Public API ────────────────────────────────────────────────────────────────
|
||||||
|
|
||||||
|
pub use app_message::{
|
||||||
|
serialize, serialize_chat, serialize_reaction, serialize_read_receipt, serialize_reply,
|
||||||
|
serialize_typing, parse, generate_message_id, AppMessage, MessageType, VERSION as APP_MESSAGE_VERSION,
|
||||||
|
};
|
||||||
pub use error::CoreError;
|
pub use error::CoreError;
|
||||||
pub use group::GroupMember;
|
pub use group::GroupMember;
|
||||||
pub use hybrid_kem::{
|
pub use hybrid_kem::{
|
||||||
hybrid_decrypt, hybrid_encrypt, HybridKeypair, HybridKeypairBytes, HybridKemError,
|
hybrid_decrypt, hybrid_encrypt, HybridKemError, HybridKeypair, HybridKeypairBytes,
|
||||||
HybridPublicKey,
|
HybridPublicKey,
|
||||||
};
|
};
|
||||||
|
pub use hybrid_crypto::{HybridCrypto, HybridCryptoProvider};
|
||||||
pub use identity::IdentityKeypair;
|
pub use identity::IdentityKeypair;
|
||||||
pub use keypackage::generate_key_package;
|
pub use keypackage::{generate_key_package, validate_keypackage_ciphersuite};
|
||||||
pub use keystore::DiskKeyStore;
|
pub use keystore::{DiskKeyStore, StoreCrypto};
|
||||||
|
pub use openmls::prelude::MlsGroup;
|
||||||
|
|||||||
@@ -14,9 +14,7 @@ pub struct OpaqueSuite;
|
|||||||
|
|
||||||
impl CipherSuite for OpaqueSuite {
|
impl CipherSuite for OpaqueSuite {
|
||||||
type OprfCs = opaque_ke::Ristretto255;
|
type OprfCs = opaque_ke::Ristretto255;
|
||||||
type KeyExchange = opaque_ke::key_exchange::tripledh::TripleDh<
|
type KeyExchange =
|
||||||
opaque_ke::Ristretto255,
|
opaque_ke::key_exchange::tripledh::TripleDh<opaque_ke::Ristretto255, sha2::Sha512>;
|
||||||
sha2::Sha512,
|
|
||||||
>;
|
|
||||||
type Ksf = argon2::Argon2<'static>;
|
type Ksf = argon2::Argon2<'static>;
|
||||||
}
|
}
|
||||||
|
|||||||
22
crates/quicnprotochat-gui/Cargo.toml
Normal file
22
crates/quicnprotochat-gui/Cargo.toml
Normal file
@@ -0,0 +1,22 @@
|
|||||||
|
[package]
|
||||||
|
name = "quicnprotochat-gui"
|
||||||
|
version = "0.1.0"
|
||||||
|
edition = "2021"
|
||||||
|
description = "Native GUI for quicnprotochat (Tauri 2)."
|
||||||
|
license = "MIT"
|
||||||
|
|
||||||
|
[[bin]]
|
||||||
|
name = "quicnprotochat-gui"
|
||||||
|
path = "src/main.rs"
|
||||||
|
|
||||||
|
[dependencies]
|
||||||
|
quicnprotochat-core = { path = "../quicnprotochat-core" }
|
||||||
|
quicnprotochat-client = { path = "../quicnprotochat-client" }
|
||||||
|
quicnprotochat-proto = { path = "../quicnprotochat-proto" }
|
||||||
|
tauri = { version = "2", features = [] }
|
||||||
|
tokio = { workspace = true }
|
||||||
|
serde = { workspace = true }
|
||||||
|
serde_json = { workspace = true }
|
||||||
|
|
||||||
|
[build-dependencies]
|
||||||
|
tauri-build = "2"
|
||||||
32
crates/quicnprotochat-gui/README.md
Normal file
32
crates/quicnprotochat-gui/README.md
Normal file
@@ -0,0 +1,32 @@
|
|||||||
|
# quicnprotochat-gui
|
||||||
|
|
||||||
|
Native GUI for quicnprotochat using [Tauri 2](https://v2.tauri.app/). The UI runs in a webview; all server-facing work (capnp-rpc, `node_service::Client`) runs on a **dedicated backend thread** with a tokio `LocalSet`, since that code is `!Send`.
|
||||||
|
|
||||||
|
## Backend threading model
|
||||||
|
|
||||||
|
- A single **backend thread** runs a tokio `LocalSet` and a request-response loop.
|
||||||
|
- The UI thread sends commands over an `mpsc` channel: `Whoami { state_path, password }` or `Health { server, ca_cert, server_name }`.
|
||||||
|
- For each request, the backend runs sync code (whoami) or `LocalSet::run_until(async { ... })` (health). It then sends `Result<String, String>` back on the provided reply channel.
|
||||||
|
- Tauri commands (`whoami`, `health`) block on that reply so the frontend gets a simple async-style result.
|
||||||
|
|
||||||
|
## How to run
|
||||||
|
|
||||||
|
From the workspace root:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
cargo run -p quicnprotochat-gui
|
||||||
|
```
|
||||||
|
|
||||||
|
**Linux:** Tauri uses GTK. Install development packages if the build fails, e.g.:
|
||||||
|
|
||||||
|
- Debian/Ubuntu: `sudo apt install libgtk-3-dev libwebkit2gtk-4.1-dev`
|
||||||
|
- Fedora: `sudo dnf install gtk3-devel webkit2gtk4.1-devel`
|
||||||
|
|
||||||
|
## Frontend
|
||||||
|
|
||||||
|
The frontend is static HTML in `ui/index.html` (no npm or build step). It provides:
|
||||||
|
|
||||||
|
- **Whoami** – state path (and optional password); calls `whoami` and shows JSON (identity_key, fingerprint, etc.).
|
||||||
|
- **Health** – server address; calls `health` and shows server status and RTT JSON.
|
||||||
|
|
||||||
|
Default CA cert and server name for health are the same as the CLI (`data/server-cert.der`, `localhost`) unless overridden via optional params.
|
||||||
3
crates/quicnprotochat-gui/build.rs
Normal file
3
crates/quicnprotochat-gui/build.rs
Normal file
@@ -0,0 +1,3 @@
|
|||||||
|
fn main() {
|
||||||
|
tauri_build::build()
|
||||||
|
}
|
||||||
11
crates/quicnprotochat-gui/capabilities/default.json
Normal file
11
crates/quicnprotochat-gui/capabilities/default.json
Normal file
@@ -0,0 +1,11 @@
|
|||||||
|
{
|
||||||
|
"$schema": "https://schema.tauri.app/config/2/capability",
|
||||||
|
"identifier": "default",
|
||||||
|
"description": "Capability for the main window (custom commands whoami, health are allowed by default)",
|
||||||
|
"windows": ["main"],
|
||||||
|
"permissions": [
|
||||||
|
"core:default",
|
||||||
|
"core:window:allow-close",
|
||||||
|
"core:window:allow-set-title"
|
||||||
|
]
|
||||||
|
}
|
||||||
1
crates/quicnprotochat-gui/gen/schemas/acl-manifests.json
Normal file
1
crates/quicnprotochat-gui/gen/schemas/acl-manifests.json
Normal file
File diff suppressed because one or more lines are too long
1
crates/quicnprotochat-gui/gen/schemas/capabilities.json
Normal file
1
crates/quicnprotochat-gui/gen/schemas/capabilities.json
Normal file
@@ -0,0 +1 @@
|
|||||||
|
{"default":{"identifier":"default","description":"Capability for the main window (custom commands whoami, health are allowed by default)","local":true,"windows":["main"],"permissions":["core:default","core:window:allow-close","core:window:allow-set-title"]}}
|
||||||
2244
crates/quicnprotochat-gui/gen/schemas/desktop-schema.json
Normal file
2244
crates/quicnprotochat-gui/gen/schemas/desktop-schema.json
Normal file
File diff suppressed because it is too large
Load Diff
2244
crates/quicnprotochat-gui/gen/schemas/linux-schema.json
Normal file
2244
crates/quicnprotochat-gui/gen/schemas/linux-schema.json
Normal file
File diff suppressed because it is too large
Load Diff
BIN
crates/quicnprotochat-gui/icons/icon.png
Normal file
BIN
crates/quicnprotochat-gui/icons/icon.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 2.1 KiB |
86
crates/quicnprotochat-gui/src/backend.rs
Normal file
86
crates/quicnprotochat-gui/src/backend.rs
Normal file
@@ -0,0 +1,86 @@
|
|||||||
|
//! Backend service running on a dedicated thread with a tokio LocalSet.
|
||||||
|
//!
|
||||||
|
//! All server-facing work (capnp-rpc, node_service::Client) is !Send and must run on this
|
||||||
|
//! single thread. The UI thread sends commands over a channel; this thread runs
|
||||||
|
//! `LocalSet::run_until` for each request and sends the result back.
|
||||||
|
|
||||||
|
use std::path::PathBuf;
|
||||||
|
use std::sync::mpsc;
|
||||||
|
use std::thread;
|
||||||
|
|
||||||
|
use tokio::runtime::Builder;
|
||||||
|
use tokio::task::LocalSet;
|
||||||
|
|
||||||
|
use quicnprotochat_client::{cmd_health_json, whoami_json};
|
||||||
|
|
||||||
|
/// Commands the UI can send to the backend thread.
|
||||||
|
pub enum BackendCommand {
|
||||||
|
Whoami {
|
||||||
|
state_path: String,
|
||||||
|
password: Option<String>,
|
||||||
|
},
|
||||||
|
Health {
|
||||||
|
server: String,
|
||||||
|
ca_cert: PathBuf,
|
||||||
|
server_name: String,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Response sent back to the UI.
|
||||||
|
pub type BackendResponse = Result<String, String>;
|
||||||
|
|
||||||
|
/// Spawn the backend thread and return a sender to post commands and a join handle.
|
||||||
|
/// The backend runs a tokio LocalSet and processes one command at a time:
|
||||||
|
/// for each received command it runs `LocalSet::run_until(future)` (for async commands)
|
||||||
|
/// or runs sync code (whoami), then sends the result on the provided reply channel.
|
||||||
|
pub fn spawn_backend() -> (mpsc::Sender<(BackendCommand, mpsc::Sender<BackendResponse>)>, thread::JoinHandle<()>) {
|
||||||
|
let (tx, rx) = mpsc::channel::<(BackendCommand, mpsc::Sender<BackendResponse>)>();
|
||||||
|
|
||||||
|
let handle = thread::spawn(move || {
|
||||||
|
let rt = Builder::new_current_thread()
|
||||||
|
.enable_all()
|
||||||
|
.build()
|
||||||
|
.expect("backend tokio runtime");
|
||||||
|
let local = LocalSet::new();
|
||||||
|
|
||||||
|
while let Ok((cmd, reply_tx)) = rx.recv() {
|
||||||
|
let result = run_command(&local, &rt, cmd);
|
||||||
|
let _ = reply_tx.send(result);
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
(tx, handle)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn run_command(
|
||||||
|
local: &LocalSet,
|
||||||
|
rt: &tokio::runtime::Runtime,
|
||||||
|
cmd: BackendCommand,
|
||||||
|
) -> BackendResponse {
|
||||||
|
match cmd {
|
||||||
|
BackendCommand::Whoami { state_path, password } => {
|
||||||
|
let path = PathBuf::from(&state_path);
|
||||||
|
whoami_json(&path, password.as_deref()).map_err(|e| e.to_string())
|
||||||
|
}
|
||||||
|
BackendCommand::Health {
|
||||||
|
server,
|
||||||
|
ca_cert,
|
||||||
|
server_name,
|
||||||
|
} => {
|
||||||
|
// Request-response: we run LocalSet::run_until for this single request so capnp-rpc
|
||||||
|
// and connect_node stay on this thread (!Send).
|
||||||
|
let fut = cmd_health_json(&server, &ca_cert, &server_name);
|
||||||
|
rt.block_on(local.run_until(fut)).map_err(|e| e.to_string())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Default CA cert path (relative to cwd or absolute); same default as CLI.
|
||||||
|
pub fn default_ca_cert() -> PathBuf {
|
||||||
|
PathBuf::from("data/server-cert.der")
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Default TLS server name.
|
||||||
|
pub fn default_server_name() -> String {
|
||||||
|
"localhost".to_string()
|
||||||
|
}
|
||||||
76
crates/quicnprotochat-gui/src/lib.rs
Normal file
76
crates/quicnprotochat-gui/src/lib.rs
Normal file
@@ -0,0 +1,76 @@
|
|||||||
|
//! quicnprotochat native GUI (Tauri 2).
|
||||||
|
//!
|
||||||
|
//! The backend runs on a dedicated thread with a tokio LocalSet; all server-facing
|
||||||
|
//! work (capnp-rpc, node_service::Client) is dispatched there. Tauri commands
|
||||||
|
//! block on the request-response channel until the backend returns.
|
||||||
|
|
||||||
|
mod backend;
|
||||||
|
|
||||||
|
use std::path::PathBuf;
|
||||||
|
use std::sync::mpsc;
|
||||||
|
|
||||||
|
use backend::{spawn_backend, BackendCommand};
|
||||||
|
|
||||||
|
/// Shared state: sender to the backend thread.
|
||||||
|
struct BackendState {
|
||||||
|
tx: mpsc::Sender<(BackendCommand, mpsc::Sender<backend::BackendResponse>)>,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Runs whoami on the backend thread and returns JSON string (identity_key, fingerprint, etc.).
|
||||||
|
#[tauri::command]
|
||||||
|
fn whoami(
|
||||||
|
state: tauri::State<BackendState>,
|
||||||
|
state_path: String,
|
||||||
|
password: Option<String>,
|
||||||
|
) -> Result<String, String> {
|
||||||
|
let (reply_tx, reply_rx) = mpsc::channel();
|
||||||
|
state
|
||||||
|
.tx
|
||||||
|
.send((
|
||||||
|
BackendCommand::Whoami {
|
||||||
|
state_path,
|
||||||
|
password,
|
||||||
|
},
|
||||||
|
reply_tx,
|
||||||
|
))
|
||||||
|
.map_err(|e| e.to_string())?;
|
||||||
|
reply_rx.recv().map_err(|e| e.to_string())?
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Runs health check on the backend thread (LocalSet::run_until) and returns status JSON.
|
||||||
|
#[tauri::command]
|
||||||
|
fn health(
|
||||||
|
state: tauri::State<BackendState>,
|
||||||
|
server: String,
|
||||||
|
ca_cert: Option<String>,
|
||||||
|
server_name: Option<String>,
|
||||||
|
) -> Result<String, String> {
|
||||||
|
let ca_cert = ca_cert
|
||||||
|
.map(PathBuf::from)
|
||||||
|
.unwrap_or_else(backend::default_ca_cert);
|
||||||
|
let server_name = server_name.unwrap_or_else(backend::default_server_name);
|
||||||
|
let (reply_tx, reply_rx) = mpsc::channel();
|
||||||
|
state
|
||||||
|
.tx
|
||||||
|
.send((
|
||||||
|
BackendCommand::Health {
|
||||||
|
server,
|
||||||
|
ca_cert,
|
||||||
|
server_name,
|
||||||
|
},
|
||||||
|
reply_tx,
|
||||||
|
))
|
||||||
|
.map_err(|e| e.to_string())?;
|
||||||
|
reply_rx.recv().map_err(|e| e.to_string())?
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg_attr(mobile, tauri::mobile_entry_point)]
|
||||||
|
pub fn run() {
|
||||||
|
let (backend_tx, _backend_handle) = spawn_backend();
|
||||||
|
|
||||||
|
tauri::Builder::default()
|
||||||
|
.manage(BackendState { tx: backend_tx })
|
||||||
|
.invoke_handler(tauri::generate_handler![whoami, health])
|
||||||
|
.run(tauri::generate_context!())
|
||||||
|
.expect("error while running tauri application");
|
||||||
|
}
|
||||||
5
crates/quicnprotochat-gui/src/main.rs
Normal file
5
crates/quicnprotochat-gui/src/main.rs
Normal file
@@ -0,0 +1,5 @@
|
|||||||
|
//! Desktop entry point for quicnprotochat-gui.
|
||||||
|
|
||||||
|
fn main() {
|
||||||
|
quicnprotochat_gui::run()
|
||||||
|
}
|
||||||
24
crates/quicnprotochat-gui/tauri.conf.json
Normal file
24
crates/quicnprotochat-gui/tauri.conf.json
Normal file
@@ -0,0 +1,24 @@
|
|||||||
|
{
|
||||||
|
"$schema": "https://schema.tauri.app/config/2",
|
||||||
|
"productName": "quicnprotochat-gui",
|
||||||
|
"identifier": "chat.quicnproto.gui",
|
||||||
|
"build": {
|
||||||
|
"frontendDist": "./ui",
|
||||||
|
"beforeBuildCommand": "",
|
||||||
|
"beforeDevCommand": ""
|
||||||
|
},
|
||||||
|
"app": {
|
||||||
|
"windows": [
|
||||||
|
{
|
||||||
|
"title": "quicnprotochat",
|
||||||
|
"width": 640,
|
||||||
|
"height": 480
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"security": {
|
||||||
|
"csp": null
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"bundle": {},
|
||||||
|
"plugins": {}
|
||||||
|
}
|
||||||
54
crates/quicnprotochat-gui/ui/index.html
Normal file
54
crates/quicnprotochat-gui/ui/index.html
Normal file
@@ -0,0 +1,54 @@
|
|||||||
|
<!DOCTYPE html>
|
||||||
|
<html lang="en">
|
||||||
|
<head>
|
||||||
|
<meta charset="UTF-8" />
|
||||||
|
<meta name="viewport" content="width=device-width, initial-scale=1.0" />
|
||||||
|
<title>quicnprotochat</title>
|
||||||
|
<style>
|
||||||
|
body { font-family: system-ui, sans-serif; margin: 1rem; }
|
||||||
|
button { margin: 0.25rem; padding: 0.5rem 1rem; cursor: pointer; }
|
||||||
|
#output { white-space: pre-wrap; background: #f0f0f0; padding: 0.75rem; margin-top: 1rem; min-height: 4rem; border-radius: 4px; }
|
||||||
|
.error { color: #c00; }
|
||||||
|
</style>
|
||||||
|
</head>
|
||||||
|
<body>
|
||||||
|
<h1>quicnprotochat</h1>
|
||||||
|
<p>
|
||||||
|
<button id="whoami">Whoami</button>
|
||||||
|
<button id="health">Health</button>
|
||||||
|
</p>
|
||||||
|
<label>State path: <input id="statePath" type="text" value="quicnprotochat-state.bin" size="32" /></label>
|
||||||
|
<br />
|
||||||
|
<label>Server: <input id="server" type="text" value="127.0.0.1:7000" size="24" /></label>
|
||||||
|
<div id="output">Click Whoami or Health. Results appear here.</div>
|
||||||
|
|
||||||
|
<script>
|
||||||
|
const output = document.getElementById('output');
|
||||||
|
const statePath = document.getElementById('statePath');
|
||||||
|
const server = document.getElementById('server');
|
||||||
|
|
||||||
|
function show(result, isError = false) {
|
||||||
|
output.textContent = result;
|
||||||
|
output.className = isError ? 'error' : '';
|
||||||
|
}
|
||||||
|
|
||||||
|
const invoke = window.__TAURI__?.core?.invoke;
|
||||||
|
if (!invoke) {
|
||||||
|
show('Tauri API not available (not running inside Tauri?).', true);
|
||||||
|
} else {
|
||||||
|
document.getElementById('whoami').addEventListener('click', function () {
|
||||||
|
show('Running whoami…');
|
||||||
|
invoke('whoami', { statePath: statePath.value.trim(), password: null })
|
||||||
|
.then(function (s) { show(s); })
|
||||||
|
.catch(function (e) { show(String(e), true); });
|
||||||
|
});
|
||||||
|
document.getElementById('health').addEventListener('click', function () {
|
||||||
|
show('Running health…');
|
||||||
|
invoke('health', { server: server.value.trim() })
|
||||||
|
.then(function (s) { show(s); })
|
||||||
|
.catch(function (e) { show(String(e), true); });
|
||||||
|
});
|
||||||
|
}
|
||||||
|
</script>
|
||||||
|
</body>
|
||||||
|
</html>
|
||||||
12
crates/quicnprotochat-p2p/Cargo.toml
Normal file
12
crates/quicnprotochat-p2p/Cargo.toml
Normal file
@@ -0,0 +1,12 @@
|
|||||||
|
[package]
|
||||||
|
name = "quicnprotochat-p2p"
|
||||||
|
version = "0.1.0"
|
||||||
|
edition = "2021"
|
||||||
|
description = "P2P transport layer for quicnprotochat using iroh."
|
||||||
|
license = "MIT"
|
||||||
|
|
||||||
|
[dependencies]
|
||||||
|
iroh = "0.96"
|
||||||
|
tokio = { workspace = true }
|
||||||
|
tracing = { workspace = true }
|
||||||
|
anyhow = { workspace = true }
|
||||||
186
crates/quicnprotochat-p2p/src/lib.rs
Normal file
186
crates/quicnprotochat-p2p/src/lib.rs
Normal file
@@ -0,0 +1,186 @@
|
|||||||
|
//! P2P transport layer for quicnprotochat using iroh.
|
||||||
|
//!
|
||||||
|
//! Provides direct peer-to-peer QUIC connections with NAT traversal via iroh
|
||||||
|
//! relay servers. When both peers are online, messages bypass the central
|
||||||
|
//! server entirely.
|
||||||
|
//!
|
||||||
|
//! # Architecture
|
||||||
|
//!
|
||||||
|
//! ```text
|
||||||
|
//! Client A ── iroh direct (QUIC) ── Client B (preferred: low latency)
|
||||||
|
//! │ │
|
||||||
|
//! └── QUIC/TLS ── Server ── QUIC/TLS ┘ (fallback: store-and-forward)
|
||||||
|
//! ```
|
||||||
|
|
||||||
|
use iroh::{Endpoint, EndpointAddr, PublicKey, SecretKey};
|
||||||
|
|
||||||
|
/// ALPN protocol identifier for quicnprotochat P2P messaging.
|
||||||
|
const P2P_ALPN: &[u8] = b"quicnprotochat/p2p/1";
|
||||||
|
|
||||||
|
/// A P2P node backed by an iroh endpoint.
|
||||||
|
///
|
||||||
|
/// Manages direct QUIC connections to peers with automatic NAT traversal.
|
||||||
|
pub struct P2pNode {
|
||||||
|
endpoint: Endpoint,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Received P2P message with sender information.
|
||||||
|
pub struct P2pMessage {
|
||||||
|
pub sender: PublicKey,
|
||||||
|
pub payload: Vec<u8>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl P2pNode {
|
||||||
|
/// Start a new P2P node.
|
||||||
|
///
|
||||||
|
/// Generates a fresh identity or reuses a provided secret key.
|
||||||
|
pub async fn start(secret_key: Option<SecretKey>) -> anyhow::Result<Self> {
|
||||||
|
let mut builder = Endpoint::builder();
|
||||||
|
if let Some(sk) = secret_key {
|
||||||
|
builder = builder.secret_key(sk);
|
||||||
|
}
|
||||||
|
builder = builder.alpns(vec![P2P_ALPN.to_vec()]);
|
||||||
|
|
||||||
|
let endpoint = builder.bind().await?;
|
||||||
|
|
||||||
|
tracing::info!(
|
||||||
|
node_id = %endpoint.id().fmt_short(),
|
||||||
|
"P2P node started"
|
||||||
|
);
|
||||||
|
|
||||||
|
Ok(Self { endpoint })
|
||||||
|
}
|
||||||
|
|
||||||
|
/// This node's public key (used as node ID for peer discovery).
|
||||||
|
pub fn node_id(&self) -> PublicKey {
|
||||||
|
self.endpoint.id()
|
||||||
|
}
|
||||||
|
|
||||||
|
/// This node's secret key (for persistence across restarts).
|
||||||
|
pub fn secret_key(&self) -> SecretKey {
|
||||||
|
self.endpoint.secret_key().clone()
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Get the node's network address information for publishing to discovery.
|
||||||
|
pub fn endpoint_addr(&self) -> EndpointAddr {
|
||||||
|
self.endpoint.addr()
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Send a payload directly to a peer via P2P QUIC.
|
||||||
|
pub async fn send(&self, peer: impl Into<EndpointAddr>, payload: &[u8]) -> anyhow::Result<()> {
|
||||||
|
let peer = peer.into();
|
||||||
|
let conn = self.endpoint.connect(peer, P2P_ALPN).await?;
|
||||||
|
|
||||||
|
let mut send = conn.open_uni().await.map_err(|e| anyhow::anyhow!("{e}"))?;
|
||||||
|
|
||||||
|
// Simple framing: 4-byte length prefix + payload.
|
||||||
|
let len = (payload.len() as u32).to_be_bytes();
|
||||||
|
send.write_all(&len)
|
||||||
|
.await
|
||||||
|
.map_err(|e| anyhow::anyhow!("{e}"))?;
|
||||||
|
send.write_all(payload)
|
||||||
|
.await
|
||||||
|
.map_err(|e| anyhow::anyhow!("{e}"))?;
|
||||||
|
send.finish().map_err(|e| anyhow::anyhow!("{e}"))?;
|
||||||
|
// Wait until the peer has consumed the stream before dropping.
|
||||||
|
send.stopped().await.map_err(|e| anyhow::anyhow!("{e}"))?;
|
||||||
|
|
||||||
|
tracing::debug!(
|
||||||
|
peer = %conn.remote_id().fmt_short(),
|
||||||
|
bytes = payload.len(),
|
||||||
|
"P2P message sent"
|
||||||
|
);
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Accept a single incoming P2P message.
|
||||||
|
///
|
||||||
|
/// Blocks until a peer connects and sends data.
|
||||||
|
pub async fn recv(&self) -> anyhow::Result<P2pMessage> {
|
||||||
|
let incoming = self
|
||||||
|
.endpoint
|
||||||
|
.accept()
|
||||||
|
.await
|
||||||
|
.ok_or_else(|| anyhow::anyhow!("no more incoming connections"))?;
|
||||||
|
|
||||||
|
let conn = incoming.await.map_err(|e| anyhow::anyhow!("{e}"))?;
|
||||||
|
let sender = conn.remote_id();
|
||||||
|
|
||||||
|
let mut recv = conn
|
||||||
|
.accept_uni()
|
||||||
|
.await
|
||||||
|
.map_err(|e| anyhow::anyhow!("{e}"))?;
|
||||||
|
|
||||||
|
// Read length-prefixed payload.
|
||||||
|
let mut len_buf = [0u8; 4];
|
||||||
|
recv.read_exact(&mut len_buf)
|
||||||
|
.await
|
||||||
|
.map_err(|e| anyhow::anyhow!("{e}"))?;
|
||||||
|
let len = u32::from_be_bytes(len_buf) as usize;
|
||||||
|
|
||||||
|
if len > 5 * 1024 * 1024 {
|
||||||
|
anyhow::bail!("P2P payload too large: {len} bytes");
|
||||||
|
}
|
||||||
|
|
||||||
|
let mut payload = vec![0u8; len];
|
||||||
|
recv.read_exact(&mut payload)
|
||||||
|
.await
|
||||||
|
.map_err(|e| anyhow::anyhow!("{e}"))?;
|
||||||
|
|
||||||
|
tracing::debug!(
|
||||||
|
peer = %sender.fmt_short(),
|
||||||
|
bytes = len,
|
||||||
|
"P2P message received"
|
||||||
|
);
|
||||||
|
|
||||||
|
Ok(P2pMessage { sender, payload })
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Gracefully shut down the P2P node.
|
||||||
|
pub async fn close(self) {
|
||||||
|
self.endpoint.close().await;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
mod tests {
|
||||||
|
use super::*;
|
||||||
|
use iroh::RelayMode;
|
||||||
|
|
||||||
|
/// Create a local-only P2P node with relays disabled (for testing).
|
||||||
|
async fn local_node() -> P2pNode {
|
||||||
|
let endpoint = Endpoint::builder()
|
||||||
|
.alpns(vec![P2P_ALPN.to_vec()])
|
||||||
|
.relay_mode(RelayMode::Disabled)
|
||||||
|
.bind()
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
P2pNode { endpoint }
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn p2p_round_trip() {
|
||||||
|
let sender = local_node().await;
|
||||||
|
let receiver = local_node().await;
|
||||||
|
|
||||||
|
let receiver_addr = receiver.endpoint_addr();
|
||||||
|
let sender_id = sender.node_id();
|
||||||
|
let payload = b"hello via P2P";
|
||||||
|
|
||||||
|
let recv_handle = tokio::spawn(async move {
|
||||||
|
let msg = receiver.recv().await.unwrap();
|
||||||
|
assert_eq!(msg.payload, payload.to_vec());
|
||||||
|
assert_eq!(msg.sender, sender_id);
|
||||||
|
});
|
||||||
|
|
||||||
|
tokio::time::sleep(std::time::Duration::from_millis(200)).await;
|
||||||
|
|
||||||
|
sender.send(receiver_addr, payload).await.unwrap();
|
||||||
|
|
||||||
|
recv_handle.await.unwrap();
|
||||||
|
|
||||||
|
tokio::time::sleep(std::time::Duration::from_millis(100)).await;
|
||||||
|
sender.close().await;
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -1,5 +1,8 @@
|
|||||||
//! Cap'n Proto schemas, generated types, and serialisation helpers for quicnprotochat.
|
//! Cap'n Proto schemas, generated types, and serialisation helpers for quicnprotochat.
|
||||||
//!
|
//!
|
||||||
|
//! Generated Cap'n Proto code emits unnecessary parentheses; allow per coding standards.
|
||||||
|
#![allow(unused_parens)]
|
||||||
|
|
||||||
//! # Design constraints
|
//! # Design constraints
|
||||||
//!
|
//!
|
||||||
//! This crate is intentionally restricted:
|
//! This crate is intentionally restricted:
|
||||||
|
|||||||
@@ -24,6 +24,7 @@ futures = { workspace = true }
|
|||||||
|
|
||||||
# Server utilities
|
# Server utilities
|
||||||
dashmap = { workspace = true }
|
dashmap = { workspace = true }
|
||||||
|
governor = { workspace = true }
|
||||||
sha2 = { workspace = true }
|
sha2 = { workspace = true }
|
||||||
tracing = { workspace = true }
|
tracing = { workspace = true }
|
||||||
tracing-subscriber = { workspace = true }
|
tracing-subscriber = { workspace = true }
|
||||||
@@ -49,3 +50,10 @@ serde = { workspace = true }
|
|||||||
# CLI
|
# CLI
|
||||||
clap = { workspace = true }
|
clap = { workspace = true }
|
||||||
toml = { version = "0.8" }
|
toml = { version = "0.8" }
|
||||||
|
|
||||||
|
# Metrics (Prometheus)
|
||||||
|
metrics = "0.22"
|
||||||
|
metrics-exporter-prometheus = "0.15"
|
||||||
|
|
||||||
|
[dev-dependencies]
|
||||||
|
tempfile = "3"
|
||||||
|
|||||||
47
crates/quicnprotochat-server/migrations/001_initial.sql
Normal file
47
crates/quicnprotochat-server/migrations/001_initial.sql
Normal file
@@ -0,0 +1,47 @@
|
|||||||
|
CREATE TABLE IF NOT EXISTS key_packages (
|
||||||
|
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||||
|
identity_key BLOB NOT NULL,
|
||||||
|
package_data BLOB NOT NULL,
|
||||||
|
created_at INTEGER DEFAULT (strftime('%s','now'))
|
||||||
|
);
|
||||||
|
|
||||||
|
CREATE TABLE IF NOT EXISTS deliveries (
|
||||||
|
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||||
|
recipient_key BLOB NOT NULL,
|
||||||
|
channel_id BLOB NOT NULL DEFAULT X'',
|
||||||
|
payload BLOB NOT NULL,
|
||||||
|
created_at INTEGER DEFAULT (strftime('%s','now'))
|
||||||
|
);
|
||||||
|
|
||||||
|
CREATE TABLE IF NOT EXISTS hybrid_keys (
|
||||||
|
identity_key BLOB PRIMARY KEY,
|
||||||
|
hybrid_public_key BLOB NOT NULL
|
||||||
|
);
|
||||||
|
|
||||||
|
CREATE INDEX IF NOT EXISTS idx_kp_identity
|
||||||
|
ON key_packages(identity_key);
|
||||||
|
|
||||||
|
CREATE INDEX IF NOT EXISTS idx_del_recipient_channel
|
||||||
|
ON deliveries(recipient_key, channel_id);
|
||||||
|
|
||||||
|
CREATE TABLE IF NOT EXISTS server_setup (
|
||||||
|
id INTEGER PRIMARY KEY CHECK (id = 1),
|
||||||
|
setup_data BLOB NOT NULL
|
||||||
|
);
|
||||||
|
|
||||||
|
CREATE TABLE IF NOT EXISTS users (
|
||||||
|
username TEXT PRIMARY KEY,
|
||||||
|
opaque_record BLOB NOT NULL,
|
||||||
|
created_at INTEGER DEFAULT (strftime('%s','now'))
|
||||||
|
);
|
||||||
|
|
||||||
|
CREATE TABLE IF NOT EXISTS user_identity_keys (
|
||||||
|
username TEXT PRIMARY KEY,
|
||||||
|
identity_key BLOB NOT NULL
|
||||||
|
);
|
||||||
|
|
||||||
|
CREATE TABLE IF NOT EXISTS endpoints (
|
||||||
|
identity_key BLOB PRIMARY KEY,
|
||||||
|
node_addr BLOB NOT NULL,
|
||||||
|
updated_at INTEGER DEFAULT (strftime('%s','now'))
|
||||||
|
);
|
||||||
21
crates/quicnprotochat-server/migrations/002_add_seq.sql
Normal file
21
crates/quicnprotochat-server/migrations/002_add_seq.sql
Normal file
@@ -0,0 +1,21 @@
|
|||||||
|
-- Migration 002: add per-inbox delivery sequence numbers.
|
||||||
|
--
|
||||||
|
-- Adds a `seq` column to the deliveries table and a separate counter table
|
||||||
|
-- that tracks the next sequence number per (recipient_key, channel_id) inbox.
|
||||||
|
-- The counter is atomically incremented on each enqueue via an UPSERT so
|
||||||
|
-- sequence numbers are gapless even under concurrent inserts.
|
||||||
|
--
|
||||||
|
-- Requires SQLite >= 3.35 (RETURNING clause support; available on Ubuntu 22.04+).
|
||||||
|
|
||||||
|
ALTER TABLE deliveries ADD COLUMN seq INTEGER NOT NULL DEFAULT 0;
|
||||||
|
|
||||||
|
CREATE TABLE IF NOT EXISTS delivery_seq_counters (
|
||||||
|
recipient_key BLOB NOT NULL,
|
||||||
|
channel_id BLOB NOT NULL,
|
||||||
|
next_seq INTEGER NOT NULL DEFAULT 0,
|
||||||
|
PRIMARY KEY (recipient_key, channel_id)
|
||||||
|
);
|
||||||
|
|
||||||
|
-- Index lets ORDER BY seq queries use an index scan instead of a sort.
|
||||||
|
CREATE INDEX IF NOT EXISTS idx_del_seq
|
||||||
|
ON deliveries (recipient_key, channel_id, seq);
|
||||||
202
crates/quicnprotochat-server/src/auth.rs
Normal file
202
crates/quicnprotochat-server/src/auth.rs
Normal file
@@ -0,0 +1,202 @@
|
|||||||
|
use std::sync::Arc;
|
||||||
|
|
||||||
|
use dashmap::DashMap;
|
||||||
|
use quicnprotochat_proto::node_capnp::auth;
|
||||||
|
use sha2::Digest;
|
||||||
|
use subtle::ConstantTimeEq;
|
||||||
|
use tokio::sync::Notify;
|
||||||
|
|
||||||
|
use crate::error_codes::*;
|
||||||
|
|
||||||
|
pub const SESSION_TTL_SECS: u64 = 24 * 60 * 60; // 24 hours
|
||||||
|
pub const PENDING_LOGIN_TTL_SECS: u64 = 300; // 5 minutes
|
||||||
|
/// Maximum enqueues per second per token before GCRA rate limiting kicks in.
|
||||||
|
pub const RATE_LIMIT_MAX_PER_SEC: std::num::NonZeroU32 =
|
||||||
|
std::num::NonZeroU32::new(100).expect("RATE_LIMIT_MAX_PER_SEC must be non-zero");
|
||||||
|
|
||||||
|
/// Keyed GCRA rate limiter backed by DashMap (one bucket per session token).
|
||||||
|
pub type RateLimiter = governor::DefaultKeyedRateLimiter<Vec<u8>>;
|
||||||
|
|
||||||
|
#[derive(Clone, Debug)]
|
||||||
|
pub struct AuthConfig {
|
||||||
|
pub required_token: Option<Vec<u8>>,
|
||||||
|
/// When true, a valid bearer token (no session) is accepted and the request's identity/key is used (dev/e2e only).
|
||||||
|
pub allow_insecure_identity_from_request: bool,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl AuthConfig {
|
||||||
|
pub fn new(required_token: Option<String>, allow_insecure_identity_from_request: bool) -> Self {
|
||||||
|
let required_token = required_token
|
||||||
|
.filter(|s| !s.is_empty())
|
||||||
|
.map(|s| s.into_bytes());
|
||||||
|
Self {
|
||||||
|
required_token,
|
||||||
|
allow_insecure_identity_from_request,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Clone)]
|
||||||
|
pub struct SessionInfo {
|
||||||
|
#[allow(dead_code)]
|
||||||
|
pub username: String,
|
||||||
|
pub identity_key: Vec<u8>,
|
||||||
|
#[allow(dead_code)]
|
||||||
|
pub created_at: u64,
|
||||||
|
pub expires_at: u64,
|
||||||
|
}
|
||||||
|
|
||||||
|
pub struct PendingLogin {
|
||||||
|
pub state_bytes: Vec<u8>,
|
||||||
|
pub created_at: u64,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Clone)]
|
||||||
|
pub struct AuthContext {
|
||||||
|
pub token: Vec<u8>,
|
||||||
|
pub identity_key: Option<Vec<u8>>,
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn current_timestamp() -> u64 {
|
||||||
|
std::time::SystemTime::now()
|
||||||
|
.duration_since(std::time::UNIX_EPOCH)
|
||||||
|
.unwrap_or_default()
|
||||||
|
.as_secs()
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Check the GCRA rate limit for a token. Returns an error if the token has exceeded the quota.
|
||||||
|
pub fn check_rate_limit(limiter: &RateLimiter, token: &[u8]) -> Result<(), capnp::Error> {
|
||||||
|
limiter.check_key(&token.to_vec()).map_err(|_| {
|
||||||
|
crate::error_codes::coded_error(
|
||||||
|
E014_RATE_LIMITED,
|
||||||
|
format!("rate limit exceeded: max {} enqueues/s", RATE_LIMIT_MAX_PER_SEC),
|
||||||
|
)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn validate_auth(
|
||||||
|
cfg: &AuthConfig,
|
||||||
|
sessions: &DashMap<Vec<u8>, SessionInfo>,
|
||||||
|
auth: Result<auth::Reader<'_>, capnp::Error>,
|
||||||
|
) -> Result<(), capnp::Error> {
|
||||||
|
validate_auth_context(cfg, sessions, auth).map(|_| ())
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn validate_auth_context(
|
||||||
|
cfg: &AuthConfig,
|
||||||
|
sessions: &DashMap<Vec<u8>, SessionInfo>,
|
||||||
|
auth: Result<auth::Reader<'_>, capnp::Error>,
|
||||||
|
) -> Result<AuthContext, capnp::Error> {
|
||||||
|
let auth = auth?;
|
||||||
|
let version = auth.get_version();
|
||||||
|
|
||||||
|
if version != 1 {
|
||||||
|
return Err(crate::error_codes::coded_error(
|
||||||
|
E001_BAD_AUTH_VERSION,
|
||||||
|
format!("unsupported auth version {} (expected 1)", version),
|
||||||
|
));
|
||||||
|
}
|
||||||
|
|
||||||
|
let token = auth
|
||||||
|
.get_access_token()
|
||||||
|
.map_err(|e| crate::error_codes::coded_error(E020_BAD_PARAMS, format!("auth.accessToken: {e}")))?
|
||||||
|
.to_vec();
|
||||||
|
|
||||||
|
if token.is_empty() {
|
||||||
|
return Err(crate::error_codes::coded_error(
|
||||||
|
E002_EMPTY_TOKEN,
|
||||||
|
"auth.version=1 requires non-empty accessToken",
|
||||||
|
));
|
||||||
|
}
|
||||||
|
|
||||||
|
if let Some(expected) = &cfg.required_token {
|
||||||
|
if expected.len() == token.len() && bool::from(expected.ct_eq(&token)) {
|
||||||
|
return Ok(AuthContext {
|
||||||
|
token,
|
||||||
|
identity_key: None,
|
||||||
|
});
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if let Some(session) = sessions.get(&token) {
|
||||||
|
let now = current_timestamp();
|
||||||
|
if session.expires_at > now {
|
||||||
|
let identity = if session.identity_key.is_empty() {
|
||||||
|
None
|
||||||
|
} else {
|
||||||
|
Some(session.identity_key.clone())
|
||||||
|
};
|
||||||
|
|
||||||
|
return Ok(AuthContext {
|
||||||
|
token,
|
||||||
|
identity_key: identity,
|
||||||
|
});
|
||||||
|
}
|
||||||
|
drop(session);
|
||||||
|
sessions.remove(&token);
|
||||||
|
return Err(crate::error_codes::coded_error(
|
||||||
|
E017_SESSION_EXPIRED,
|
||||||
|
"session token has expired",
|
||||||
|
));
|
||||||
|
}
|
||||||
|
|
||||||
|
Err(crate::error_codes::coded_error(E003_INVALID_TOKEN, "invalid accessToken"))
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn require_identity<'a>(auth_ctx: &'a AuthContext) -> Result<&'a [u8], capnp::Error> {
|
||||||
|
match auth_ctx.identity_key.as_deref() {
|
||||||
|
Some(ik) => Ok(ik),
|
||||||
|
None => Err(crate::error_codes::coded_error(
|
||||||
|
E003_INVALID_TOKEN,
|
||||||
|
"access token is not identity-bound; login required",
|
||||||
|
)),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn require_identity_match(auth_ctx: &AuthContext, expected: &[u8]) -> Result<(), capnp::Error> {
|
||||||
|
let ik = require_identity(auth_ctx)?;
|
||||||
|
if ik != expected {
|
||||||
|
return Err(crate::error_codes::coded_error(
|
||||||
|
E016_IDENTITY_MISMATCH,
|
||||||
|
"access token is bound to a different identity",
|
||||||
|
));
|
||||||
|
}
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// When the token is a valid session, require it to match `request_identity`.
|
||||||
|
/// When the token is a bearer token (no identity) and `allow_insecure_identity_from_request` is true, accept the request identity (dev/e2e).
|
||||||
|
pub fn require_identity_or_request(
|
||||||
|
auth_ctx: &AuthContext,
|
||||||
|
request_identity: &[u8],
|
||||||
|
allow_insecure: bool,
|
||||||
|
) -> Result<(), capnp::Error> {
|
||||||
|
match auth_ctx.identity_key.as_deref() {
|
||||||
|
Some(_) => require_identity_match(auth_ctx, request_identity),
|
||||||
|
None if allow_insecure => Ok(()),
|
||||||
|
None => Err(crate::error_codes::coded_error(
|
||||||
|
E003_INVALID_TOKEN,
|
||||||
|
"access token is not identity-bound; login required",
|
||||||
|
)),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn fmt_hex(bytes: &[u8]) -> String {
|
||||||
|
let hex: String = bytes.iter().map(|b| format!("{b:02x}")).collect();
|
||||||
|
format!("{hex}…")
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn waiter(waiters: &DashMap<Vec<u8>, Arc<Notify>>, recipient_key: &[u8]) -> Arc<Notify> {
|
||||||
|
waiters
|
||||||
|
.entry(recipient_key.to_vec())
|
||||||
|
.or_insert_with(|| Arc::new(Notify::new()))
|
||||||
|
.clone()
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn fingerprint(data: &[u8]) -> Vec<u8> {
|
||||||
|
sha2::Sha256::digest(data).to_vec()
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn coded_error(code: &str, msg: impl std::fmt::Display) -> capnp::Error {
|
||||||
|
crate::error_codes::coded_error(code, msg)
|
||||||
|
}
|
||||||
187
crates/quicnprotochat-server/src/config.rs
Normal file
187
crates/quicnprotochat-server/src/config.rs
Normal file
@@ -0,0 +1,187 @@
|
|||||||
|
use std::path::{Path, PathBuf};
|
||||||
|
|
||||||
|
use anyhow::Context;
|
||||||
|
use serde::Deserialize;
|
||||||
|
|
||||||
|
pub const DEFAULT_LISTEN: &str = "0.0.0.0:7000";
|
||||||
|
pub const DEFAULT_DATA_DIR: &str = "data";
|
||||||
|
pub const DEFAULT_TLS_CERT: &str = "data/server-cert.der";
|
||||||
|
pub const DEFAULT_TLS_KEY: &str = "data/server-key.der";
|
||||||
|
pub const DEFAULT_STORE_BACKEND: &str = "file";
|
||||||
|
pub const DEFAULT_DB_PATH: &str = "data/quicnprotochat.db";
|
||||||
|
|
||||||
|
#[derive(Debug, Default, Deserialize)]
|
||||||
|
pub struct FileConfig {
|
||||||
|
pub listen: Option<String>,
|
||||||
|
pub data_dir: Option<String>,
|
||||||
|
pub tls_cert: Option<PathBuf>,
|
||||||
|
pub tls_key: Option<PathBuf>,
|
||||||
|
pub auth_token: Option<String>,
|
||||||
|
pub allow_insecure_auth: Option<bool>,
|
||||||
|
/// When true, enqueue does not require an identity-bound session: only a valid token is required.
|
||||||
|
/// The server does not associate the request with a specific sender (Sealed Sender).
|
||||||
|
#[serde(default)]
|
||||||
|
pub sealed_sender: Option<bool>,
|
||||||
|
pub store_backend: Option<String>,
|
||||||
|
pub db_path: Option<PathBuf>,
|
||||||
|
pub db_key: Option<String>,
|
||||||
|
/// Metrics HTTP listen address (e.g. "0.0.0.0:9090"). If set, /metrics is served there.
|
||||||
|
pub metrics_listen: Option<String>,
|
||||||
|
/// When true and metrics_listen is set, start the metrics server.
|
||||||
|
#[serde(default)]
|
||||||
|
pub metrics_enabled: Option<bool>,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug)]
|
||||||
|
pub struct EffectiveConfig {
|
||||||
|
pub listen: String,
|
||||||
|
pub data_dir: String,
|
||||||
|
pub tls_cert: PathBuf,
|
||||||
|
pub tls_key: PathBuf,
|
||||||
|
pub auth_token: Option<String>,
|
||||||
|
pub allow_insecure_auth: bool,
|
||||||
|
/// When true, enqueue does not require identity; valid token only (Sealed Sender).
|
||||||
|
pub sealed_sender: bool,
|
||||||
|
pub store_backend: String,
|
||||||
|
pub db_path: PathBuf,
|
||||||
|
pub db_key: String,
|
||||||
|
/// If Some(addr), metrics server listens here (e.g. "0.0.0.0:9090").
|
||||||
|
pub metrics_listen: Option<String>,
|
||||||
|
/// Start metrics server only when true and metrics_listen is set.
|
||||||
|
pub metrics_enabled: bool,
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn load_config(path: Option<&Path>) -> anyhow::Result<FileConfig> {
|
||||||
|
let path = match path {
|
||||||
|
Some(p) => PathBuf::from(p),
|
||||||
|
None => PathBuf::from("quicnprotochat-server.toml"),
|
||||||
|
};
|
||||||
|
|
||||||
|
if !path.exists() {
|
||||||
|
return Ok(FileConfig::default());
|
||||||
|
}
|
||||||
|
|
||||||
|
let contents =
|
||||||
|
std::fs::read_to_string(&path).with_context(|| format!("read config file {path:?}"))?;
|
||||||
|
let cfg: FileConfig =
|
||||||
|
toml::from_str(&contents).with_context(|| format!("parse config file {path:?}"))?;
|
||||||
|
Ok(cfg)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn merge_config(args: &crate::Args, file: &FileConfig) -> EffectiveConfig {
|
||||||
|
let listen = if args.listen == DEFAULT_LISTEN {
|
||||||
|
file.listen
|
||||||
|
.clone()
|
||||||
|
.unwrap_or_else(|| DEFAULT_LISTEN.to_string())
|
||||||
|
} else {
|
||||||
|
args.listen.clone()
|
||||||
|
};
|
||||||
|
|
||||||
|
let data_dir = if args.data_dir == DEFAULT_DATA_DIR {
|
||||||
|
file.data_dir
|
||||||
|
.clone()
|
||||||
|
.unwrap_or_else(|| DEFAULT_DATA_DIR.to_string())
|
||||||
|
} else {
|
||||||
|
args.data_dir.clone()
|
||||||
|
};
|
||||||
|
|
||||||
|
let tls_cert = if args.tls_cert == PathBuf::from(DEFAULT_TLS_CERT) {
|
||||||
|
file.tls_cert
|
||||||
|
.clone()
|
||||||
|
.unwrap_or_else(|| PathBuf::from(DEFAULT_TLS_CERT))
|
||||||
|
} else {
|
||||||
|
args.tls_cert.clone()
|
||||||
|
};
|
||||||
|
|
||||||
|
let tls_key = if args.tls_key == PathBuf::from(DEFAULT_TLS_KEY) {
|
||||||
|
file.tls_key
|
||||||
|
.clone()
|
||||||
|
.unwrap_or_else(|| PathBuf::from(DEFAULT_TLS_KEY))
|
||||||
|
} else {
|
||||||
|
args.tls_key.clone()
|
||||||
|
};
|
||||||
|
|
||||||
|
let auth_token = if args.auth_token.is_some() {
|
||||||
|
args.auth_token.clone()
|
||||||
|
} else {
|
||||||
|
file.auth_token.clone()
|
||||||
|
};
|
||||||
|
|
||||||
|
let allow_insecure_auth = if args.allow_insecure_auth {
|
||||||
|
true
|
||||||
|
} else {
|
||||||
|
file.allow_insecure_auth.unwrap_or(false)
|
||||||
|
};
|
||||||
|
|
||||||
|
let sealed_sender = args.sealed_sender || file.sealed_sender.unwrap_or(false);
|
||||||
|
|
||||||
|
let store_backend = if args.store_backend == DEFAULT_STORE_BACKEND {
|
||||||
|
file.store_backend
|
||||||
|
.clone()
|
||||||
|
.unwrap_or_else(|| DEFAULT_STORE_BACKEND.to_string())
|
||||||
|
} else {
|
||||||
|
args.store_backend.clone()
|
||||||
|
};
|
||||||
|
|
||||||
|
let db_path = if args.db_path == PathBuf::from(DEFAULT_DB_PATH) {
|
||||||
|
file.db_path
|
||||||
|
.clone()
|
||||||
|
.unwrap_or_else(|| PathBuf::from(DEFAULT_DB_PATH))
|
||||||
|
} else {
|
||||||
|
args.db_path.clone()
|
||||||
|
};
|
||||||
|
|
||||||
|
let db_key = if args.db_key.is_empty() {
|
||||||
|
file.db_key.clone().unwrap_or_else(|| args.db_key.clone())
|
||||||
|
} else {
|
||||||
|
args.db_key.clone()
|
||||||
|
};
|
||||||
|
|
||||||
|
let metrics_listen = args
|
||||||
|
.metrics_listen
|
||||||
|
.clone()
|
||||||
|
.or_else(|| file.metrics_listen.clone());
|
||||||
|
let metrics_enabled = args
|
||||||
|
.metrics_enabled
|
||||||
|
.or(file.metrics_enabled)
|
||||||
|
.unwrap_or(metrics_listen.is_some());
|
||||||
|
|
||||||
|
EffectiveConfig {
|
||||||
|
listen,
|
||||||
|
data_dir,
|
||||||
|
tls_cert,
|
||||||
|
tls_key,
|
||||||
|
auth_token,
|
||||||
|
allow_insecure_auth,
|
||||||
|
sealed_sender,
|
||||||
|
store_backend,
|
||||||
|
db_path,
|
||||||
|
db_key,
|
||||||
|
metrics_listen,
|
||||||
|
metrics_enabled,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn validate_production_config(effective: &EffectiveConfig) -> anyhow::Result<()> {
|
||||||
|
let token = effective
|
||||||
|
.auth_token
|
||||||
|
.as_deref()
|
||||||
|
.filter(|s| !s.is_empty())
|
||||||
|
.ok_or_else(|| {
|
||||||
|
anyhow::anyhow!("production requires QUICNPROTOCHAT_AUTH_TOKEN (non-empty)")
|
||||||
|
})?;
|
||||||
|
if token == "devtoken" {
|
||||||
|
anyhow::bail!(
|
||||||
|
"production forbids auth_token 'devtoken'; set a strong QUICNPROTOCHAT_AUTH_TOKEN"
|
||||||
|
);
|
||||||
|
}
|
||||||
|
if effective.store_backend == "sql" && effective.db_key.is_empty() {
|
||||||
|
anyhow::bail!("production with store_backend=sql requires non-empty QUICNPROTOCHAT_DB_KEY");
|
||||||
|
}
|
||||||
|
if !effective.tls_cert.exists() || !effective.tls_key.exists() {
|
||||||
|
anyhow::bail!(
|
||||||
|
"production requires existing TLS cert and key (no auto-generation); provide QUICNPROTOCHAT_TLS_CERT and QUICNPROTOCHAT_TLS_KEY"
|
||||||
|
);
|
||||||
|
}
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
@@ -23,6 +23,7 @@ pub const E017_SESSION_EXPIRED: &str = "E017";
|
|||||||
pub const E018_USER_EXISTS: &str = "E018";
|
pub const E018_USER_EXISTS: &str = "E018";
|
||||||
pub const E019_NO_PENDING_LOGIN: &str = "E019";
|
pub const E019_NO_PENDING_LOGIN: &str = "E019";
|
||||||
pub const E020_BAD_PARAMS: &str = "E020";
|
pub const E020_BAD_PARAMS: &str = "E020";
|
||||||
|
pub const E021_CIPHERSUITE_NOT_ALLOWED: &str = "E021";
|
||||||
|
|
||||||
/// Build a `capnp::Error::failed()` with the structured code prefix.
|
/// Build a `capnp::Error::failed()` with the structured code prefix.
|
||||||
pub fn coded_error(code: &str, msg: impl std::fmt::Display) -> capnp::Error {
|
pub fn coded_error(code: &str, msg: impl std::fmt::Display) -> capnp::Error {
|
||||||
|
|||||||
File diff suppressed because it is too large
Load Diff
49
crates/quicnprotochat-server/src/metrics.rs
Normal file
49
crates/quicnprotochat-server/src/metrics.rs
Normal file
@@ -0,0 +1,49 @@
|
|||||||
|
//! Prometheus metrics for the server.
|
||||||
|
//!
|
||||||
|
//! All counters/histograms/gauges use the `metrics` crate and are exported
|
||||||
|
//! via metrics-exporter-prometheus on a configurable HTTP port (e.g. /metrics).
|
||||||
|
|
||||||
|
/// Record one enqueue (success). Call after a message is enqueued.
|
||||||
|
pub fn record_enqueue_total() {
|
||||||
|
metrics::counter!("enqueue_total").increment(1);
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Record enqueued payload size in bytes.
|
||||||
|
pub fn record_enqueue_bytes(bytes: u64) {
|
||||||
|
metrics::counter!("enqueue_bytes_total").increment(bytes);
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Record one fetch (success). Call when fetch returns.
|
||||||
|
pub fn record_fetch_total() {
|
||||||
|
metrics::counter!("fetch_total").increment(1);
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Record one fetch_wait (success). Call when fetch_wait returns.
|
||||||
|
pub fn record_fetch_wait_total() {
|
||||||
|
metrics::counter!("fetch_wait_total").increment(1);
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Set the delivery queue depth gauge (sample). Updated at enqueue/fetch time.
|
||||||
|
pub fn record_delivery_queue_depth(depth: usize) {
|
||||||
|
metrics::gauge!("delivery_queue_depth").set(depth as f64);
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Record one KeyPackage upload (success).
|
||||||
|
pub fn record_key_package_upload_total() {
|
||||||
|
metrics::counter!("key_package_upload_total").increment(1);
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Record successful auth login (session token issued).
|
||||||
|
pub fn record_auth_login_success_total() {
|
||||||
|
metrics::counter!("auth_login_success_total").increment(1);
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Record failed auth login attempt.
|
||||||
|
pub fn record_auth_login_failure_total() {
|
||||||
|
metrics::counter!("auth_login_failure_total").increment(1);
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Record rate limit hit (enqueue rejected).
|
||||||
|
pub fn record_rate_limit_hit_total() {
|
||||||
|
metrics::counter!("rate_limit_hit_total").increment(1);
|
||||||
|
}
|
||||||
351
crates/quicnprotochat-server/src/node_service/auth_ops.rs
Normal file
351
crates/quicnprotochat-server/src/node_service/auth_ops.rs
Normal file
@@ -0,0 +1,351 @@
|
|||||||
|
use capnp::capability::Promise;
|
||||||
|
use opaque_ke::{
|
||||||
|
CredentialFinalization, CredentialRequest, RegistrationRequest, RegistrationUpload,
|
||||||
|
ServerLogin, ServerRegistration,
|
||||||
|
};
|
||||||
|
use quicnprotochat_core::opaque_auth::OpaqueSuite;
|
||||||
|
use quicnprotochat_proto::node_capnp::node_service;
|
||||||
|
|
||||||
|
use crate::auth::{coded_error, current_timestamp, PendingLogin, SESSION_TTL_SECS};
|
||||||
|
use crate::error_codes::*;
|
||||||
|
use crate::metrics;
|
||||||
|
use crate::storage::StorageError;
|
||||||
|
|
||||||
|
use super::NodeServiceImpl;
|
||||||
|
|
||||||
|
// Audit events in this module must never include secrets (no session tokens, passwords, or raw keys).
|
||||||
|
|
||||||
|
fn storage_err(err: StorageError) -> capnp::Error {
|
||||||
|
coded_error(E009_STORAGE_ERROR, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
impl NodeServiceImpl {
|
||||||
|
pub fn handle_opaque_login_start(
|
||||||
|
&mut self,
|
||||||
|
params: node_service::OpaqueLoginStartParams,
|
||||||
|
mut results: node_service::OpaqueLoginStartResults,
|
||||||
|
) -> Promise<(), capnp::Error> {
|
||||||
|
let p = match params.get() {
|
||||||
|
Ok(p) => p,
|
||||||
|
Err(e) => return Promise::err(coded_error(E020_BAD_PARAMS, e)),
|
||||||
|
};
|
||||||
|
let username = match p.get_username() {
|
||||||
|
Ok(v) => v.to_string().unwrap_or_default().to_string(),
|
||||||
|
Err(e) => return Promise::err(coded_error(E020_BAD_PARAMS, e)),
|
||||||
|
};
|
||||||
|
let request_bytes = match p.get_request() {
|
||||||
|
Ok(v) => v.to_vec(),
|
||||||
|
Err(e) => return Promise::err(coded_error(E020_BAD_PARAMS, e)),
|
||||||
|
};
|
||||||
|
|
||||||
|
if username.is_empty() {
|
||||||
|
return Promise::err(coded_error(E011_USERNAME_EMPTY, "username must not be empty"));
|
||||||
|
}
|
||||||
|
|
||||||
|
let credential_request = match CredentialRequest::<OpaqueSuite>::deserialize(&request_bytes) {
|
||||||
|
Ok(r) => r,
|
||||||
|
Err(e) => {
|
||||||
|
return Promise::err(coded_error(
|
||||||
|
E010_OPAQUE_ERROR,
|
||||||
|
format!("invalid credential request: {e}"),
|
||||||
|
))
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
let password_file = match self.store.get_user_record(&username) {
|
||||||
|
Ok(Some(bytes)) => match ServerRegistration::<OpaqueSuite>::deserialize(&bytes) {
|
||||||
|
Ok(pf) => Some(pf),
|
||||||
|
Err(e) => {
|
||||||
|
return Promise::err(coded_error(
|
||||||
|
E010_OPAQUE_ERROR,
|
||||||
|
format!("corrupt user record: {e}"),
|
||||||
|
))
|
||||||
|
}
|
||||||
|
},
|
||||||
|
Ok(None) => {
|
||||||
|
return Promise::err(coded_error(E010_OPAQUE_ERROR, "user not registered"))
|
||||||
|
}
|
||||||
|
Err(e) => return Promise::err(storage_err(e)),
|
||||||
|
};
|
||||||
|
|
||||||
|
let mut rng = rand::rngs::OsRng;
|
||||||
|
let result = match ServerLogin::<OpaqueSuite>::start(
|
||||||
|
&mut rng,
|
||||||
|
&self.opaque_setup,
|
||||||
|
password_file,
|
||||||
|
credential_request,
|
||||||
|
username.as_bytes(),
|
||||||
|
Default::default(),
|
||||||
|
) {
|
||||||
|
Ok(r) => r,
|
||||||
|
Err(e) => {
|
||||||
|
return Promise::err(coded_error(
|
||||||
|
E010_OPAQUE_ERROR,
|
||||||
|
format!("OPAQUE login start failed: {e}"),
|
||||||
|
))
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
let state_bytes = result.state.serialize().to_vec();
|
||||||
|
self.pending_logins.insert(
|
||||||
|
username.clone(),
|
||||||
|
PendingLogin {
|
||||||
|
state_bytes,
|
||||||
|
created_at: current_timestamp(),
|
||||||
|
},
|
||||||
|
);
|
||||||
|
|
||||||
|
let response_bytes = result.message.serialize();
|
||||||
|
results.get().set_response(&response_bytes);
|
||||||
|
|
||||||
|
tracing::info!(user = %username, "OPAQUE login started");
|
||||||
|
Promise::ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn handle_opaque_register_start(
|
||||||
|
&mut self,
|
||||||
|
params: node_service::OpaqueRegisterStartParams,
|
||||||
|
mut results: node_service::OpaqueRegisterStartResults,
|
||||||
|
) -> Promise<(), capnp::Error> {
|
||||||
|
let p = match params.get() {
|
||||||
|
Ok(p) => p,
|
||||||
|
Err(e) => return Promise::err(coded_error(E020_BAD_PARAMS, e)),
|
||||||
|
};
|
||||||
|
let username = match p.get_username() {
|
||||||
|
Ok(v) => v.to_string().unwrap_or_default().to_string(),
|
||||||
|
Err(e) => return Promise::err(coded_error(E020_BAD_PARAMS, e)),
|
||||||
|
};
|
||||||
|
let request_bytes = match p.get_request() {
|
||||||
|
Ok(v) => v.to_vec(),
|
||||||
|
Err(e) => return Promise::err(coded_error(E020_BAD_PARAMS, e)),
|
||||||
|
};
|
||||||
|
|
||||||
|
if username.is_empty() {
|
||||||
|
return Promise::err(coded_error(E011_USERNAME_EMPTY, "username must not be empty"));
|
||||||
|
}
|
||||||
|
|
||||||
|
if let Ok(true) = self.store.has_user_record(&username) {
|
||||||
|
return Promise::err(coded_error(
|
||||||
|
E018_USER_EXISTS,
|
||||||
|
format!("user '{}' already registered", username),
|
||||||
|
));
|
||||||
|
}
|
||||||
|
|
||||||
|
let registration_request = match RegistrationRequest::<OpaqueSuite>::deserialize(&request_bytes) {
|
||||||
|
Ok(r) => r,
|
||||||
|
Err(e) => {
|
||||||
|
return Promise::err(coded_error(
|
||||||
|
E010_OPAQUE_ERROR,
|
||||||
|
format!("invalid registration request: {e}"),
|
||||||
|
))
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
let result = match ServerRegistration::<OpaqueSuite>::start(
|
||||||
|
&self.opaque_setup,
|
||||||
|
registration_request,
|
||||||
|
username.as_bytes(),
|
||||||
|
) {
|
||||||
|
Ok(r) => r,
|
||||||
|
Err(e) => {
|
||||||
|
return Promise::err(coded_error(
|
||||||
|
E010_OPAQUE_ERROR,
|
||||||
|
format!("OPAQUE registration start failed: {e}"),
|
||||||
|
))
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
let response_bytes = result.message.serialize();
|
||||||
|
results.get().set_response(&response_bytes);
|
||||||
|
|
||||||
|
tracing::info!(user = %username, "OPAQUE registration started");
|
||||||
|
Promise::ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn handle_opaque_login_finish(
|
||||||
|
&mut self,
|
||||||
|
params: node_service::OpaqueLoginFinishParams,
|
||||||
|
mut results: node_service::OpaqueLoginFinishResults,
|
||||||
|
) -> Promise<(), capnp::Error> {
|
||||||
|
let p = match params.get() {
|
||||||
|
Ok(p) => p,
|
||||||
|
Err(e) => return Promise::err(coded_error(E020_BAD_PARAMS, e)),
|
||||||
|
};
|
||||||
|
let username = match p.get_username() {
|
||||||
|
Ok(v) => v.to_string().unwrap_or_default().to_string(),
|
||||||
|
Err(e) => return Promise::err(coded_error(E020_BAD_PARAMS, e)),
|
||||||
|
};
|
||||||
|
let finalization_bytes = match p.get_finalization() {
|
||||||
|
Ok(v) => v.to_vec(),
|
||||||
|
Err(e) => return Promise::err(coded_error(E020_BAD_PARAMS, e)),
|
||||||
|
};
|
||||||
|
let identity_key = p.get_identity_key().unwrap_or_default().to_vec();
|
||||||
|
|
||||||
|
if username.is_empty() {
|
||||||
|
return Promise::err(coded_error(E011_USERNAME_EMPTY, "username must not be empty"));
|
||||||
|
}
|
||||||
|
|
||||||
|
let pending = match self.pending_logins.remove(&username) {
|
||||||
|
Some((_, pl)) => pl,
|
||||||
|
None => {
|
||||||
|
// Audit: login failure — do not log secrets (no token, no password).
|
||||||
|
tracing::warn!(user = %username, "audit: auth login failure (no pending login)");
|
||||||
|
metrics::record_auth_login_failure_total();
|
||||||
|
return Promise::err(coded_error(E019_NO_PENDING_LOGIN, "no pending login for this username"))
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
let server_login = match ServerLogin::<OpaqueSuite>::deserialize(&pending.state_bytes) {
|
||||||
|
Ok(s) => s,
|
||||||
|
Err(e) => {
|
||||||
|
return Promise::err(coded_error(
|
||||||
|
E010_OPAQUE_ERROR,
|
||||||
|
format!("corrupt login state: {e}"),
|
||||||
|
))
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
let finalization = match CredentialFinalization::<OpaqueSuite>::deserialize(&finalization_bytes) {
|
||||||
|
Ok(f) => f,
|
||||||
|
Err(e) => {
|
||||||
|
return Promise::err(coded_error(
|
||||||
|
E010_OPAQUE_ERROR,
|
||||||
|
format!("invalid credential finalization: {e}"),
|
||||||
|
))
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
let _result = match server_login.finish(finalization, Default::default()) {
|
||||||
|
Ok(r) => r,
|
||||||
|
Err(e) => {
|
||||||
|
tracing::warn!(user = %username, "audit: auth login failure (OPAQUE finish failed)");
|
||||||
|
metrics::record_auth_login_failure_total();
|
||||||
|
return Promise::err(coded_error(
|
||||||
|
E010_OPAQUE_ERROR,
|
||||||
|
format!("OPAQUE login finish failed (bad password?): {e}"),
|
||||||
|
))
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
if identity_key.is_empty() {
|
||||||
|
metrics::record_auth_login_failure_total();
|
||||||
|
return Promise::err(coded_error(
|
||||||
|
E016_IDENTITY_MISMATCH,
|
||||||
|
"identity key required to bind session token",
|
||||||
|
));
|
||||||
|
}
|
||||||
|
|
||||||
|
if let Ok(Some(stored_ik)) = self.store.get_user_identity_key(&username) {
|
||||||
|
if stored_ik != identity_key {
|
||||||
|
tracing::warn!(user = %username, "audit: auth login failure (identity mismatch)");
|
||||||
|
metrics::record_auth_login_failure_total();
|
||||||
|
return Promise::err(coded_error(
|
||||||
|
E016_IDENTITY_MISMATCH,
|
||||||
|
"identity key does not match registered key",
|
||||||
|
));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
let mut token = [0u8; 32];
|
||||||
|
rand::RngCore::fill_bytes(&mut rand::rngs::OsRng, &mut token);
|
||||||
|
let token_vec = token.to_vec();
|
||||||
|
|
||||||
|
let now = current_timestamp();
|
||||||
|
self.sessions.insert(
|
||||||
|
token_vec.clone(),
|
||||||
|
crate::auth::SessionInfo {
|
||||||
|
username: username.clone(),
|
||||||
|
identity_key,
|
||||||
|
created_at: now,
|
||||||
|
expires_at: now + SESSION_TTL_SECS,
|
||||||
|
},
|
||||||
|
);
|
||||||
|
|
||||||
|
results.get().set_session_token(&token_vec);
|
||||||
|
|
||||||
|
// Audit: login success — do not log session token or any secrets.
|
||||||
|
metrics::record_auth_login_success_total();
|
||||||
|
tracing::info!(user = %username, "audit: auth login success — session token issued");
|
||||||
|
Promise::ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn handle_opaque_register_finish(
|
||||||
|
&mut self,
|
||||||
|
params: node_service::OpaqueRegisterFinishParams,
|
||||||
|
mut results: node_service::OpaqueRegisterFinishResults,
|
||||||
|
) -> Promise<(), capnp::Error> {
|
||||||
|
let p = match params.get() {
|
||||||
|
Ok(p) => p,
|
||||||
|
Err(e) => return Promise::err(coded_error(E020_BAD_PARAMS, e)),
|
||||||
|
};
|
||||||
|
let username = match p.get_username() {
|
||||||
|
Ok(v) => v.to_string().unwrap_or_default().to_string(),
|
||||||
|
Err(e) => return Promise::err(coded_error(E020_BAD_PARAMS, e)),
|
||||||
|
};
|
||||||
|
let upload_bytes = match p.get_upload() {
|
||||||
|
Ok(v) => v.to_vec(),
|
||||||
|
Err(e) => return Promise::err(coded_error(E020_BAD_PARAMS, e)),
|
||||||
|
};
|
||||||
|
let identity_key = p.get_identity_key().unwrap_or_default().to_vec();
|
||||||
|
|
||||||
|
if username.is_empty() {
|
||||||
|
return Promise::err(coded_error(E011_USERNAME_EMPTY, "username must not be empty"));
|
||||||
|
}
|
||||||
|
|
||||||
|
let _request = match RegistrationRequest::<OpaqueSuite>::deserialize(&upload_bytes) {
|
||||||
|
Ok(r) => r,
|
||||||
|
Err(e) => {
|
||||||
|
return Promise::err(coded_error(
|
||||||
|
E010_OPAQUE_ERROR,
|
||||||
|
format!("invalid registration upload: {e}"),
|
||||||
|
))
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
match self.store.has_user_record(&username) {
|
||||||
|
Ok(true) => {
|
||||||
|
return Promise::err(coded_error(
|
||||||
|
E018_USER_EXISTS,
|
||||||
|
format!("user '{}' already registered", username),
|
||||||
|
))
|
||||||
|
}
|
||||||
|
Err(e) => return Promise::err(storage_err(e)),
|
||||||
|
_ => {}
|
||||||
|
}
|
||||||
|
|
||||||
|
let upload = match RegistrationUpload::<OpaqueSuite>::deserialize(&upload_bytes) {
|
||||||
|
Ok(u) => u,
|
||||||
|
Err(e) => {
|
||||||
|
return Promise::err(coded_error(
|
||||||
|
E010_OPAQUE_ERROR,
|
||||||
|
format!("invalid registration upload: {e}"),
|
||||||
|
))
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
let password_file = ServerRegistration::<OpaqueSuite>::finish(upload);
|
||||||
|
let record_bytes = password_file.serialize().to_vec();
|
||||||
|
|
||||||
|
if let Err(e) = self
|
||||||
|
.store
|
||||||
|
.store_user_record(&username, record_bytes)
|
||||||
|
.map_err(storage_err)
|
||||||
|
{
|
||||||
|
return Promise::err(e);
|
||||||
|
}
|
||||||
|
|
||||||
|
if !identity_key.is_empty() {
|
||||||
|
if let Err(e) = self
|
||||||
|
.store
|
||||||
|
.store_user_identity_key(&username, identity_key)
|
||||||
|
.map_err(storage_err)
|
||||||
|
{
|
||||||
|
return Promise::err(e);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
results.get().set_success(true);
|
||||||
|
tracing::info!(user = %username, "OPAQUE registration complete");
|
||||||
|
Promise::ok(())
|
||||||
|
}
|
||||||
|
}
|
||||||
318
crates/quicnprotochat-server/src/node_service/delivery.rs
Normal file
318
crates/quicnprotochat-server/src/node_service/delivery.rs
Normal file
@@ -0,0 +1,318 @@
|
|||||||
|
use std::sync::Arc;
|
||||||
|
use std::time::Duration;
|
||||||
|
|
||||||
|
use capnp::capability::Promise;
|
||||||
|
use dashmap::DashMap;
|
||||||
|
use quicnprotochat_proto::node_capnp::node_service;
|
||||||
|
use tokio::sync::Notify;
|
||||||
|
use tokio::time::timeout;
|
||||||
|
|
||||||
|
use crate::auth::{
|
||||||
|
check_rate_limit, coded_error, fmt_hex, require_identity_or_request, validate_auth_context,
|
||||||
|
};
|
||||||
|
use crate::error_codes::*;
|
||||||
|
use crate::metrics;
|
||||||
|
use crate::storage::{StorageError, Store};
|
||||||
|
|
||||||
|
use super::{NodeServiceImpl, CURRENT_WIRE_VERSION};
|
||||||
|
|
||||||
|
// Audit events here must not include secrets: no payload content, no full recipient/token bytes (prefix only).
|
||||||
|
|
||||||
|
const MAX_PAYLOAD_BYTES: usize = 5 * 1024 * 1024; // 5 MB cap per message
|
||||||
|
const MAX_QUEUE_DEPTH: usize = 1000;
|
||||||
|
|
||||||
|
fn storage_err(err: StorageError) -> capnp::Error {
|
||||||
|
coded_error(E009_STORAGE_ERROR, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn fill_payloads_wait(
|
||||||
|
results: &mut node_service::FetchWaitResults,
|
||||||
|
messages: Vec<(u64, Vec<u8>)>,
|
||||||
|
) {
|
||||||
|
let mut list = results.get().init_payloads(messages.len() as u32);
|
||||||
|
for (i, (seq, data)) in messages.iter().enumerate() {
|
||||||
|
let mut entry = list.reborrow().get(i as u32);
|
||||||
|
entry.set_seq(*seq);
|
||||||
|
entry.set_data(data);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl NodeServiceImpl {
|
||||||
|
pub fn handle_enqueue(
|
||||||
|
&mut self,
|
||||||
|
params: node_service::EnqueueParams,
|
||||||
|
mut results: node_service::EnqueueResults,
|
||||||
|
) -> Promise<(), capnp::Error> {
|
||||||
|
let p = match params.get() {
|
||||||
|
Ok(p) => p,
|
||||||
|
Err(e) => return Promise::err(coded_error(E020_BAD_PARAMS, e)),
|
||||||
|
};
|
||||||
|
let recipient_key = match p.get_recipient_key() {
|
||||||
|
Ok(v) => v.to_vec(),
|
||||||
|
Err(e) => return Promise::err(coded_error(E020_BAD_PARAMS, e)),
|
||||||
|
};
|
||||||
|
let payload = match p.get_payload() {
|
||||||
|
Ok(v) => v.to_vec(),
|
||||||
|
Err(e) => return Promise::err(coded_error(E020_BAD_PARAMS, e)),
|
||||||
|
};
|
||||||
|
let channel_id = p.get_channel_id().unwrap_or_default().to_vec();
|
||||||
|
let version = p.get_version();
|
||||||
|
let auth_ctx = match validate_auth_context(&self.auth_cfg, &self.sessions, p.get_auth()) {
|
||||||
|
Ok(ctx) => ctx,
|
||||||
|
Err(e) => return Promise::err(e),
|
||||||
|
};
|
||||||
|
|
||||||
|
if recipient_key.len() != 32 {
|
||||||
|
return Promise::err(coded_error(
|
||||||
|
E004_IDENTITY_KEY_LENGTH,
|
||||||
|
format!("recipientKey must be exactly 32 bytes, got {}", recipient_key.len()),
|
||||||
|
));
|
||||||
|
}
|
||||||
|
if payload.is_empty() {
|
||||||
|
return Promise::err(coded_error(E005_PAYLOAD_EMPTY, "payload must not be empty"));
|
||||||
|
}
|
||||||
|
if payload.len() > MAX_PAYLOAD_BYTES {
|
||||||
|
return Promise::err(coded_error(
|
||||||
|
E006_PAYLOAD_TOO_LARGE,
|
||||||
|
format!("payload exceeds max size ({} bytes)", MAX_PAYLOAD_BYTES),
|
||||||
|
));
|
||||||
|
}
|
||||||
|
if version != CURRENT_WIRE_VERSION {
|
||||||
|
return Promise::err(coded_error(
|
||||||
|
E012_WIRE_VERSION,
|
||||||
|
format!("unsupported wire version {} (expected {CURRENT_WIRE_VERSION})", version),
|
||||||
|
));
|
||||||
|
}
|
||||||
|
|
||||||
|
if let Err(e) = check_rate_limit(&self.rate_limiter, &auth_ctx.token) {
|
||||||
|
// Audit: rate limit hit — do not log token or identity.
|
||||||
|
tracing::warn!("rate_limit_hit");
|
||||||
|
metrics::record_rate_limit_hit_total();
|
||||||
|
return Promise::err(e);
|
||||||
|
}
|
||||||
|
|
||||||
|
// When sealed_sender is true, enqueue does not require identity; valid token only.
|
||||||
|
if !self.sealed_sender {
|
||||||
|
if let Err(e) = require_identity_or_request(
|
||||||
|
&auth_ctx,
|
||||||
|
&recipient_key,
|
||||||
|
self.auth_cfg.allow_insecure_identity_from_request,
|
||||||
|
) {
|
||||||
|
return Promise::err(e);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
match self.store.queue_depth(&recipient_key, &channel_id) {
|
||||||
|
Ok(depth) if depth >= MAX_QUEUE_DEPTH => {
|
||||||
|
return Promise::err(coded_error(
|
||||||
|
E015_QUEUE_FULL,
|
||||||
|
format!("queue depth {} exceeds limit {}", depth, MAX_QUEUE_DEPTH),
|
||||||
|
));
|
||||||
|
}
|
||||||
|
Err(e) => return Promise::err(storage_err(e)),
|
||||||
|
_ => {}
|
||||||
|
}
|
||||||
|
|
||||||
|
let payload_len = payload.len();
|
||||||
|
let seq = match self
|
||||||
|
.store
|
||||||
|
.enqueue(&recipient_key, &channel_id, payload)
|
||||||
|
.map_err(storage_err)
|
||||||
|
{
|
||||||
|
Ok(seq) => seq,
|
||||||
|
Err(e) => return Promise::err(e),
|
||||||
|
};
|
||||||
|
|
||||||
|
results.get().set_seq(seq);
|
||||||
|
|
||||||
|
// Metrics and audit. Audit events must not include secrets (no payload, no full keys).
|
||||||
|
metrics::record_enqueue_total();
|
||||||
|
metrics::record_enqueue_bytes(payload_len as u64);
|
||||||
|
if let Ok(depth) = self.store.queue_depth(&recipient_key, &channel_id) {
|
||||||
|
metrics::record_delivery_queue_depth(depth);
|
||||||
|
}
|
||||||
|
tracing::info!(
|
||||||
|
recipient_prefix = %fmt_hex(&recipient_key[..4]),
|
||||||
|
payload_len = payload_len,
|
||||||
|
seq = seq,
|
||||||
|
"audit: enqueue"
|
||||||
|
);
|
||||||
|
|
||||||
|
crate::auth::waiter(&self.waiters, &recipient_key).notify_waiters();
|
||||||
|
|
||||||
|
Promise::ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn handle_fetch(
|
||||||
|
&mut self,
|
||||||
|
params: node_service::FetchParams,
|
||||||
|
mut results: node_service::FetchResults,
|
||||||
|
) -> Promise<(), capnp::Error> {
|
||||||
|
let recipient_key = match params.get() {
|
||||||
|
Ok(p) => match p.get_recipient_key() {
|
||||||
|
Ok(v) => v.to_vec(),
|
||||||
|
Err(e) => return Promise::err(coded_error(E020_BAD_PARAMS, e)),
|
||||||
|
},
|
||||||
|
Err(e) => return Promise::err(coded_error(E020_BAD_PARAMS, e)),
|
||||||
|
};
|
||||||
|
let channel_id = params
|
||||||
|
.get()
|
||||||
|
.ok()
|
||||||
|
.and_then(|p| p.get_channel_id().ok())
|
||||||
|
.map(|c| c.to_vec())
|
||||||
|
.unwrap_or_default();
|
||||||
|
let version = params
|
||||||
|
.get()
|
||||||
|
.ok()
|
||||||
|
.map(|p| p.get_version())
|
||||||
|
.unwrap_or(CURRENT_WIRE_VERSION);
|
||||||
|
let limit = params.get().ok().map(|p| p.get_limit()).unwrap_or(0);
|
||||||
|
let auth_ctx = match params
|
||||||
|
.get()
|
||||||
|
.ok()
|
||||||
|
.map(|p| validate_auth_context(&self.auth_cfg, &self.sessions, p.get_auth()))
|
||||||
|
.transpose()
|
||||||
|
{
|
||||||
|
Ok(ctx) => ctx,
|
||||||
|
Err(e) => return Promise::err(e),
|
||||||
|
};
|
||||||
|
|
||||||
|
if recipient_key.len() != 32 {
|
||||||
|
return Promise::err(coded_error(
|
||||||
|
E004_IDENTITY_KEY_LENGTH,
|
||||||
|
format!("recipientKey must be exactly 32 bytes, got {}", recipient_key.len()),
|
||||||
|
));
|
||||||
|
}
|
||||||
|
if version != CURRENT_WIRE_VERSION {
|
||||||
|
return Promise::err(coded_error(
|
||||||
|
E012_WIRE_VERSION,
|
||||||
|
format!("unsupported wire version {} (expected {CURRENT_WIRE_VERSION})", version),
|
||||||
|
));
|
||||||
|
}
|
||||||
|
|
||||||
|
let auth_ctx = match auth_ctx {
|
||||||
|
Some(ctx) => ctx,
|
||||||
|
None => return Promise::err(coded_error(E003_INVALID_TOKEN, "auth required")),
|
||||||
|
};
|
||||||
|
|
||||||
|
if let Err(e) = require_identity_or_request(
|
||||||
|
&auth_ctx,
|
||||||
|
&recipient_key,
|
||||||
|
self.auth_cfg.allow_insecure_identity_from_request,
|
||||||
|
) {
|
||||||
|
return Promise::err(e);
|
||||||
|
}
|
||||||
|
|
||||||
|
let messages = if limit > 0 {
|
||||||
|
match self
|
||||||
|
.store
|
||||||
|
.fetch_limited(&recipient_key, &channel_id, limit as usize)
|
||||||
|
.map_err(storage_err)
|
||||||
|
{
|
||||||
|
Ok(m) => m,
|
||||||
|
Err(e) => return Promise::err(e),
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
match self
|
||||||
|
.store
|
||||||
|
.fetch(&recipient_key, &channel_id)
|
||||||
|
.map_err(storage_err)
|
||||||
|
{
|
||||||
|
Ok(m) => m,
|
||||||
|
Err(e) => return Promise::err(e),
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
// Audit: fetch — do not log payload or full keys.
|
||||||
|
metrics::record_fetch_total();
|
||||||
|
tracing::info!(
|
||||||
|
recipient_prefix = %fmt_hex(&recipient_key[..4]),
|
||||||
|
count = messages.len(),
|
||||||
|
"audit: fetch"
|
||||||
|
);
|
||||||
|
|
||||||
|
let mut list = results.get().init_payloads(messages.len() as u32);
|
||||||
|
for (i, (seq, data)) in messages.iter().enumerate() {
|
||||||
|
let mut entry = list.reborrow().get(i as u32);
|
||||||
|
entry.set_seq(*seq);
|
||||||
|
entry.set_data(data);
|
||||||
|
}
|
||||||
|
|
||||||
|
Promise::ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn handle_fetch_wait(
|
||||||
|
&mut self,
|
||||||
|
params: node_service::FetchWaitParams,
|
||||||
|
mut results: node_service::FetchWaitResults,
|
||||||
|
) -> Promise<(), capnp::Error> {
|
||||||
|
let p = match params.get() {
|
||||||
|
Ok(p) => p,
|
||||||
|
Err(e) => return Promise::err(coded_error(E020_BAD_PARAMS, e)),
|
||||||
|
};
|
||||||
|
let recipient_key = match p.get_recipient_key() {
|
||||||
|
Ok(v) => v.to_vec(),
|
||||||
|
Err(e) => return Promise::err(coded_error(E020_BAD_PARAMS, e)),
|
||||||
|
};
|
||||||
|
let channel_id = p.get_channel_id().unwrap_or_default().to_vec();
|
||||||
|
let version = p.get_version();
|
||||||
|
let timeout_ms = p.get_timeout_ms();
|
||||||
|
let limit = p.get_limit();
|
||||||
|
let auth_ctx = match validate_auth_context(&self.auth_cfg, &self.sessions, p.get_auth()) {
|
||||||
|
Ok(ctx) => ctx,
|
||||||
|
Err(e) => return Promise::err(e),
|
||||||
|
};
|
||||||
|
|
||||||
|
if recipient_key.len() != 32 {
|
||||||
|
return Promise::err(coded_error(
|
||||||
|
E004_IDENTITY_KEY_LENGTH,
|
||||||
|
format!("recipientKey must be exactly 32 bytes, got {}", recipient_key.len()),
|
||||||
|
));
|
||||||
|
}
|
||||||
|
if version != CURRENT_WIRE_VERSION {
|
||||||
|
return Promise::err(coded_error(
|
||||||
|
E012_WIRE_VERSION,
|
||||||
|
format!("unsupported wire version {} (expected {CURRENT_WIRE_VERSION})", version),
|
||||||
|
));
|
||||||
|
}
|
||||||
|
|
||||||
|
if let Err(e) = require_identity_or_request(
|
||||||
|
&auth_ctx,
|
||||||
|
&recipient_key,
|
||||||
|
self.auth_cfg.allow_insecure_identity_from_request,
|
||||||
|
) {
|
||||||
|
return Promise::err(e);
|
||||||
|
}
|
||||||
|
|
||||||
|
let store = Arc::clone(&self.store);
|
||||||
|
let waiters: Arc<DashMap<Vec<u8>, Arc<Notify>>> = self.waiters.clone();
|
||||||
|
|
||||||
|
Promise::from_future(async move {
|
||||||
|
let fetch_fn = |s: &Arc<dyn Store>, rk: &[u8], ch: &[u8], lim: u32| -> Result<Vec<(u64, Vec<u8>)>, capnp::Error> {
|
||||||
|
if lim > 0 {
|
||||||
|
s.fetch_limited(rk, ch, lim as usize).map_err(storage_err)
|
||||||
|
} else {
|
||||||
|
s.fetch(rk, ch).map_err(storage_err)
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
let messages = fetch_fn(&store, &recipient_key, &channel_id, limit)?;
|
||||||
|
|
||||||
|
if messages.is_empty() && timeout_ms > 0 {
|
||||||
|
let waiter = waiters
|
||||||
|
.entry(recipient_key.clone())
|
||||||
|
.or_insert_with(|| Arc::new(Notify::new()))
|
||||||
|
.clone();
|
||||||
|
let _ = timeout(Duration::from_millis(timeout_ms), waiter.notified()).await;
|
||||||
|
let msgs = fetch_fn(&store, &recipient_key, &channel_id, limit)?;
|
||||||
|
fill_payloads_wait(&mut results, msgs);
|
||||||
|
metrics::record_fetch_wait_total();
|
||||||
|
return Ok(());
|
||||||
|
}
|
||||||
|
|
||||||
|
fill_payloads_wait(&mut results, messages);
|
||||||
|
metrics::record_fetch_wait_total();
|
||||||
|
Ok(())
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
259
crates/quicnprotochat-server/src/node_service/key_ops.rs
Normal file
259
crates/quicnprotochat-server/src/node_service/key_ops.rs
Normal file
@@ -0,0 +1,259 @@
|
|||||||
|
use capnp::capability::Promise;
|
||||||
|
use quicnprotochat_proto::node_capnp::node_service;
|
||||||
|
|
||||||
|
use crate::auth::{coded_error, fmt_hex, require_identity_or_request, validate_auth_context};
|
||||||
|
use crate::error_codes::*;
|
||||||
|
use crate::metrics;
|
||||||
|
use crate::storage::StorageError;
|
||||||
|
|
||||||
|
use super::NodeServiceImpl;
|
||||||
|
|
||||||
|
fn storage_err(err: StorageError) -> capnp::Error {
|
||||||
|
coded_error(E009_STORAGE_ERROR, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
const MAX_KEYPACKAGE_BYTES: usize = 1 * 1024 * 1024; // 1 MB cap per KeyPackage
|
||||||
|
|
||||||
|
impl NodeServiceImpl {
|
||||||
|
pub fn handle_upload_key_package(
|
||||||
|
&mut self,
|
||||||
|
params: node_service::UploadKeyPackageParams,
|
||||||
|
mut results: node_service::UploadKeyPackageResults,
|
||||||
|
) -> Promise<(), capnp::Error> {
|
||||||
|
let (auth_ctx, identity_key, package) = match params.get() {
|
||||||
|
Ok(p) => {
|
||||||
|
let auth_ctx = match validate_auth_context(&self.auth_cfg, &self.sessions, p.get_auth()) {
|
||||||
|
Ok(ctx) => ctx,
|
||||||
|
Err(e) => return Promise::err(e),
|
||||||
|
};
|
||||||
|
let ik = match p.get_identity_key() {
|
||||||
|
Ok(v) => v.to_vec(),
|
||||||
|
Err(e) => return Promise::err(coded_error(E020_BAD_PARAMS, e)),
|
||||||
|
};
|
||||||
|
let pkg = match p.get_package() {
|
||||||
|
Ok(v) => v.to_vec(),
|
||||||
|
Err(e) => return Promise::err(coded_error(E020_BAD_PARAMS, e)),
|
||||||
|
};
|
||||||
|
(auth_ctx, ik, pkg)
|
||||||
|
}
|
||||||
|
Err(e) => return Promise::err(e),
|
||||||
|
};
|
||||||
|
|
||||||
|
if identity_key.len() != 32 {
|
||||||
|
return Promise::err(coded_error(
|
||||||
|
E004_IDENTITY_KEY_LENGTH,
|
||||||
|
format!("identityKey must be exactly 32 bytes, got {}", identity_key.len()),
|
||||||
|
));
|
||||||
|
}
|
||||||
|
if package.is_empty() {
|
||||||
|
return Promise::err(coded_error(E007_PACKAGE_EMPTY, "package must not be empty"));
|
||||||
|
}
|
||||||
|
if package.len() > MAX_KEYPACKAGE_BYTES {
|
||||||
|
return Promise::err(coded_error(
|
||||||
|
E008_PACKAGE_TOO_LARGE,
|
||||||
|
format!("package exceeds max size ({} bytes)", MAX_KEYPACKAGE_BYTES),
|
||||||
|
));
|
||||||
|
}
|
||||||
|
|
||||||
|
if let Err(e) = require_identity_or_request(
|
||||||
|
&auth_ctx,
|
||||||
|
&identity_key,
|
||||||
|
self.auth_cfg.allow_insecure_identity_from_request,
|
||||||
|
) {
|
||||||
|
return Promise::err(e);
|
||||||
|
}
|
||||||
|
|
||||||
|
if let Err(e) = quicnprotochat_core::validate_keypackage_ciphersuite(&package) {
|
||||||
|
return Promise::err(coded_error(
|
||||||
|
E021_CIPHERSUITE_NOT_ALLOWED,
|
||||||
|
format!("KeyPackage ciphersuite not allowed: {e}"),
|
||||||
|
));
|
||||||
|
}
|
||||||
|
|
||||||
|
let fingerprint: Vec<u8> = crate::auth::fingerprint(&package);
|
||||||
|
if let Err(e) = self
|
||||||
|
.store
|
||||||
|
.upload_key_package(&identity_key, package)
|
||||||
|
.map_err(storage_err)
|
||||||
|
{
|
||||||
|
return Promise::err(e);
|
||||||
|
}
|
||||||
|
|
||||||
|
results.get().set_fingerprint(&fingerprint);
|
||||||
|
|
||||||
|
metrics::record_key_package_upload_total();
|
||||||
|
// Audit: KeyPackage upload — only fingerprint prefix, no secrets.
|
||||||
|
tracing::info!(
|
||||||
|
identity_prefix = %fmt_hex(&identity_key[..4]),
|
||||||
|
fingerprint_prefix = %fmt_hex(&fingerprint[..4]),
|
||||||
|
"audit: key_package_upload"
|
||||||
|
);
|
||||||
|
|
||||||
|
Promise::ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn handle_fetch_key_package(
|
||||||
|
&mut self,
|
||||||
|
params: node_service::FetchKeyPackageParams,
|
||||||
|
mut results: node_service::FetchKeyPackageResults,
|
||||||
|
) -> Promise<(), capnp::Error> {
|
||||||
|
let identity_key = match params.get() {
|
||||||
|
Ok(p) => match p.get_identity_key() {
|
||||||
|
Ok(v) => v.to_vec(),
|
||||||
|
Err(e) => return Promise::err(coded_error(E020_BAD_PARAMS, e)),
|
||||||
|
},
|
||||||
|
Err(e) => return Promise::err(coded_error(E020_BAD_PARAMS, e)),
|
||||||
|
};
|
||||||
|
if let Err(e) = params
|
||||||
|
.get()
|
||||||
|
.ok()
|
||||||
|
.map(|p| crate::auth::validate_auth(&self.auth_cfg, &self.sessions, p.get_auth()))
|
||||||
|
.transpose()
|
||||||
|
{
|
||||||
|
return Promise::err(e);
|
||||||
|
}
|
||||||
|
|
||||||
|
if identity_key.len() != 32 {
|
||||||
|
return Promise::err(coded_error(
|
||||||
|
E004_IDENTITY_KEY_LENGTH,
|
||||||
|
format!("identityKey must be exactly 32 bytes, got {}", identity_key.len()),
|
||||||
|
));
|
||||||
|
}
|
||||||
|
|
||||||
|
let package = match self
|
||||||
|
.store
|
||||||
|
.fetch_key_package(&identity_key)
|
||||||
|
.map_err(storage_err)
|
||||||
|
{
|
||||||
|
Ok(p) => p,
|
||||||
|
Err(e) => return Promise::err(e),
|
||||||
|
};
|
||||||
|
|
||||||
|
match package {
|
||||||
|
Some(pkg) => {
|
||||||
|
tracing::debug!(identity = %fmt_hex(&identity_key[..4]), "KeyPackage fetched");
|
||||||
|
results.get().set_package(&pkg);
|
||||||
|
}
|
||||||
|
None => {
|
||||||
|
tracing::debug!(
|
||||||
|
identity = %fmt_hex(&identity_key[..4]),
|
||||||
|
"no KeyPackage available for identity"
|
||||||
|
);
|
||||||
|
results.get().set_package(&[]);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
Promise::ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn handle_upload_hybrid_key(
|
||||||
|
&mut self,
|
||||||
|
params: node_service::UploadHybridKeyParams,
|
||||||
|
_results: node_service::UploadHybridKeyResults,
|
||||||
|
) -> Promise<(), capnp::Error> {
|
||||||
|
let p = match params.get() {
|
||||||
|
Ok(p) => p,
|
||||||
|
Err(e) => return Promise::err(coded_error(E020_BAD_PARAMS, e)),
|
||||||
|
};
|
||||||
|
let identity_key = match p.get_identity_key() {
|
||||||
|
Ok(v) => v.to_vec(),
|
||||||
|
Err(e) => return Promise::err(coded_error(E020_BAD_PARAMS, e)),
|
||||||
|
};
|
||||||
|
let hybrid_pk = match p.get_hybrid_public_key() {
|
||||||
|
Ok(v) => v.to_vec(),
|
||||||
|
Err(e) => return Promise::err(coded_error(E020_BAD_PARAMS, e)),
|
||||||
|
};
|
||||||
|
|
||||||
|
let auth_ctx = match validate_auth_context(&self.auth_cfg, &self.sessions, p.get_auth()) {
|
||||||
|
Ok(ctx) => ctx,
|
||||||
|
Err(e) => return Promise::err(e),
|
||||||
|
};
|
||||||
|
|
||||||
|
if identity_key.len() != 32 {
|
||||||
|
return Promise::err(coded_error(
|
||||||
|
E004_IDENTITY_KEY_LENGTH,
|
||||||
|
format!("identityKey must be exactly 32 bytes, got {}", identity_key.len()),
|
||||||
|
));
|
||||||
|
}
|
||||||
|
if hybrid_pk.is_empty() {
|
||||||
|
return Promise::err(coded_error(E013_HYBRID_KEY_EMPTY, "hybridPublicKey must not be empty"));
|
||||||
|
}
|
||||||
|
|
||||||
|
if let Err(e) = require_identity_or_request(
|
||||||
|
&auth_ctx,
|
||||||
|
&identity_key,
|
||||||
|
self.auth_cfg.allow_insecure_identity_from_request,
|
||||||
|
) {
|
||||||
|
return Promise::err(e);
|
||||||
|
}
|
||||||
|
|
||||||
|
if let Err(e) = self
|
||||||
|
.store
|
||||||
|
.upload_hybrid_key(&identity_key, hybrid_pk)
|
||||||
|
.map_err(storage_err)
|
||||||
|
{
|
||||||
|
return Promise::err(e);
|
||||||
|
}
|
||||||
|
|
||||||
|
tracing::debug!(identity = %fmt_hex(&identity_key[..4]), "hybrid public key uploaded");
|
||||||
|
|
||||||
|
Promise::ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn handle_fetch_hybrid_key(
|
||||||
|
&mut self,
|
||||||
|
params: node_service::FetchHybridKeyParams,
|
||||||
|
mut results: node_service::FetchHybridKeyResults,
|
||||||
|
) -> Promise<(), capnp::Error> {
|
||||||
|
let p = match params.get() {
|
||||||
|
Ok(p) => p,
|
||||||
|
Err(e) => return Promise::err(coded_error(E020_BAD_PARAMS, e)),
|
||||||
|
};
|
||||||
|
let identity_key = match p.get_identity_key() {
|
||||||
|
Ok(v) => v.to_vec(),
|
||||||
|
Err(e) => return Promise::err(coded_error(E020_BAD_PARAMS, e)),
|
||||||
|
};
|
||||||
|
|
||||||
|
let auth_ctx = match validate_auth_context(&self.auth_cfg, &self.sessions, p.get_auth()) {
|
||||||
|
Ok(ctx) => ctx,
|
||||||
|
Err(e) => return Promise::err(e),
|
||||||
|
};
|
||||||
|
|
||||||
|
if identity_key.len() != 32 {
|
||||||
|
return Promise::err(coded_error(
|
||||||
|
E004_IDENTITY_KEY_LENGTH,
|
||||||
|
format!("identityKey must be exactly 32 bytes, got {}", identity_key.len()),
|
||||||
|
));
|
||||||
|
}
|
||||||
|
|
||||||
|
if let Err(e) = require_identity_or_request(
|
||||||
|
&auth_ctx,
|
||||||
|
&identity_key,
|
||||||
|
self.auth_cfg.allow_insecure_identity_from_request,
|
||||||
|
) {
|
||||||
|
return Promise::err(e);
|
||||||
|
}
|
||||||
|
|
||||||
|
let hybrid_pk = match self
|
||||||
|
.store
|
||||||
|
.fetch_hybrid_key(&identity_key)
|
||||||
|
.map_err(storage_err)
|
||||||
|
{
|
||||||
|
Ok(p) => p,
|
||||||
|
Err(e) => return Promise::err(e),
|
||||||
|
};
|
||||||
|
|
||||||
|
match hybrid_pk {
|
||||||
|
Some(pk) => {
|
||||||
|
tracing::debug!(identity = %fmt_hex(&identity_key[..4]), "hybrid key fetched");
|
||||||
|
results.get().set_hybrid_public_key(&pk);
|
||||||
|
}
|
||||||
|
None => {
|
||||||
|
tracing::debug!(identity = %fmt_hex(&identity_key[..4]), "no hybrid key for identity");
|
||||||
|
results.get().set_hybrid_public_key(&[]);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
Promise::ok(())
|
||||||
|
}
|
||||||
|
}
|
||||||
244
crates/quicnprotochat-server/src/node_service/mod.rs
Normal file
244
crates/quicnprotochat-server/src/node_service/mod.rs
Normal file
@@ -0,0 +1,244 @@
|
|||||||
|
use std::sync::Arc;
|
||||||
|
use std::time::Duration;
|
||||||
|
|
||||||
|
use capnp_rpc::RpcSystem;
|
||||||
|
use dashmap::DashMap;
|
||||||
|
use opaque_ke::ServerSetup;
|
||||||
|
use quicnprotochat_core::opaque_auth::OpaqueSuite;
|
||||||
|
use quicnprotochat_proto::node_capnp::node_service;
|
||||||
|
use tokio::sync::Notify;
|
||||||
|
use tokio_util::compat::{TokioAsyncReadCompatExt, TokioAsyncWriteCompatExt};
|
||||||
|
|
||||||
|
use crate::auth::{
|
||||||
|
current_timestamp, AuthConfig, PendingLogin, RateLimiter, SessionInfo, PENDING_LOGIN_TTL_SECS,
|
||||||
|
};
|
||||||
|
use crate::storage::Store;
|
||||||
|
|
||||||
|
mod auth_ops;
|
||||||
|
mod delivery;
|
||||||
|
mod key_ops;
|
||||||
|
mod p2p_ops;
|
||||||
|
|
||||||
|
impl node_service::Server for NodeServiceImpl {
|
||||||
|
fn upload_key_package(
|
||||||
|
&mut self,
|
||||||
|
params: node_service::UploadKeyPackageParams,
|
||||||
|
results: node_service::UploadKeyPackageResults,
|
||||||
|
) -> capnp::capability::Promise<(), capnp::Error> {
|
||||||
|
self.handle_upload_key_package(params, results)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn fetch_key_package(
|
||||||
|
&mut self,
|
||||||
|
params: node_service::FetchKeyPackageParams,
|
||||||
|
results: node_service::FetchKeyPackageResults,
|
||||||
|
) -> capnp::capability::Promise<(), capnp::Error> {
|
||||||
|
self.handle_fetch_key_package(params, results)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn enqueue(
|
||||||
|
&mut self,
|
||||||
|
params: node_service::EnqueueParams,
|
||||||
|
results: node_service::EnqueueResults,
|
||||||
|
) -> capnp::capability::Promise<(), capnp::Error> {
|
||||||
|
self.handle_enqueue(params, results)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn fetch(
|
||||||
|
&mut self,
|
||||||
|
params: node_service::FetchParams,
|
||||||
|
results: node_service::FetchResults,
|
||||||
|
) -> capnp::capability::Promise<(), capnp::Error> {
|
||||||
|
self.handle_fetch(params, results)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn fetch_wait(
|
||||||
|
&mut self,
|
||||||
|
params: node_service::FetchWaitParams,
|
||||||
|
results: node_service::FetchWaitResults,
|
||||||
|
) -> capnp::capability::Promise<(), capnp::Error> {
|
||||||
|
self.handle_fetch_wait(params, results)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn health(
|
||||||
|
&mut self,
|
||||||
|
params: node_service::HealthParams,
|
||||||
|
results: node_service::HealthResults,
|
||||||
|
) -> capnp::capability::Promise<(), capnp::Error> {
|
||||||
|
self.handle_health(params, results)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn upload_hybrid_key(
|
||||||
|
&mut self,
|
||||||
|
params: node_service::UploadHybridKeyParams,
|
||||||
|
results: node_service::UploadHybridKeyResults,
|
||||||
|
) -> capnp::capability::Promise<(), capnp::Error> {
|
||||||
|
self.handle_upload_hybrid_key(params, results)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn fetch_hybrid_key(
|
||||||
|
&mut self,
|
||||||
|
params: node_service::FetchHybridKeyParams,
|
||||||
|
results: node_service::FetchHybridKeyResults,
|
||||||
|
) -> capnp::capability::Promise<(), capnp::Error> {
|
||||||
|
self.handle_fetch_hybrid_key(params, results)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn opaque_login_start(
|
||||||
|
&mut self,
|
||||||
|
params: node_service::OpaqueLoginStartParams,
|
||||||
|
results: node_service::OpaqueLoginStartResults,
|
||||||
|
) -> capnp::capability::Promise<(), capnp::Error> {
|
||||||
|
self.handle_opaque_login_start(params, results)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn opaque_register_start(
|
||||||
|
&mut self,
|
||||||
|
params: node_service::OpaqueRegisterStartParams,
|
||||||
|
results: node_service::OpaqueRegisterStartResults,
|
||||||
|
) -> capnp::capability::Promise<(), capnp::Error> {
|
||||||
|
self.handle_opaque_register_start(params, results)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn opaque_login_finish(
|
||||||
|
&mut self,
|
||||||
|
params: node_service::OpaqueLoginFinishParams,
|
||||||
|
results: node_service::OpaqueLoginFinishResults,
|
||||||
|
) -> capnp::capability::Promise<(), capnp::Error> {
|
||||||
|
self.handle_opaque_login_finish(params, results)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn opaque_register_finish(
|
||||||
|
&mut self,
|
||||||
|
params: node_service::OpaqueRegisterFinishParams,
|
||||||
|
results: node_service::OpaqueRegisterFinishResults,
|
||||||
|
) -> capnp::capability::Promise<(), capnp::Error> {
|
||||||
|
self.handle_opaque_register_finish(params, results)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn publish_endpoint(
|
||||||
|
&mut self,
|
||||||
|
params: node_service::PublishEndpointParams,
|
||||||
|
results: node_service::PublishEndpointResults,
|
||||||
|
) -> capnp::capability::Promise<(), capnp::Error> {
|
||||||
|
self.handle_publish_endpoint(params, results)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn resolve_endpoint(
|
||||||
|
&mut self,
|
||||||
|
params: node_service::ResolveEndpointParams,
|
||||||
|
results: node_service::ResolveEndpointResults,
|
||||||
|
) -> capnp::capability::Promise<(), capnp::Error> {
|
||||||
|
self.handle_resolve_endpoint(params, results)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub const CURRENT_WIRE_VERSION: u16 = 1;
|
||||||
|
|
||||||
|
pub struct NodeServiceImpl {
|
||||||
|
pub store: Arc<dyn Store>,
|
||||||
|
pub waiters: Arc<DashMap<Vec<u8>, Arc<Notify>>>,
|
||||||
|
pub auth_cfg: Arc<AuthConfig>,
|
||||||
|
pub opaque_setup: Arc<ServerSetup<OpaqueSuite>>,
|
||||||
|
pub pending_logins: Arc<DashMap<String, PendingLogin>>,
|
||||||
|
pub sessions: Arc<DashMap<Vec<u8>, SessionInfo>>,
|
||||||
|
pub rate_limiter: Arc<RateLimiter>,
|
||||||
|
/// When true, enqueue does not require identity-bound session (Sealed Sender).
|
||||||
|
pub sealed_sender: bool,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl NodeServiceImpl {
|
||||||
|
pub fn new(
|
||||||
|
store: Arc<dyn Store>,
|
||||||
|
waiters: Arc<DashMap<Vec<u8>, Arc<Notify>>>,
|
||||||
|
auth_cfg: Arc<AuthConfig>,
|
||||||
|
opaque_setup: Arc<ServerSetup<OpaqueSuite>>,
|
||||||
|
pending_logins: Arc<DashMap<String, PendingLogin>>,
|
||||||
|
sessions: Arc<DashMap<Vec<u8>, SessionInfo>>,
|
||||||
|
rate_limiter: Arc<RateLimiter>,
|
||||||
|
sealed_sender: bool,
|
||||||
|
) -> Self {
|
||||||
|
Self {
|
||||||
|
store,
|
||||||
|
waiters,
|
||||||
|
auth_cfg,
|
||||||
|
opaque_setup,
|
||||||
|
pending_logins,
|
||||||
|
sessions,
|
||||||
|
rate_limiter,
|
||||||
|
sealed_sender,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn handle_node_connection(
|
||||||
|
connecting: quinn::Connecting,
|
||||||
|
store: Arc<dyn Store>,
|
||||||
|
waiters: Arc<DashMap<Vec<u8>, Arc<Notify>>>,
|
||||||
|
auth_cfg: Arc<AuthConfig>,
|
||||||
|
opaque_setup: Arc<ServerSetup<OpaqueSuite>>,
|
||||||
|
pending_logins: Arc<DashMap<String, PendingLogin>>,
|
||||||
|
sessions: Arc<DashMap<Vec<u8>, SessionInfo>>,
|
||||||
|
rate_limiter: Arc<RateLimiter>,
|
||||||
|
sealed_sender: bool,
|
||||||
|
) -> Result<(), anyhow::Error> {
|
||||||
|
let connection = connecting.await?;
|
||||||
|
|
||||||
|
tracing::info!(peer = %connection.remote_address(), "QUIC connected");
|
||||||
|
|
||||||
|
let (send, recv) = connection
|
||||||
|
.accept_bi()
|
||||||
|
.await
|
||||||
|
.map_err(|e| anyhow::anyhow!("failed to accept bi stream: {e}"))?;
|
||||||
|
let (reader, writer) = (recv.compat(), send.compat_write());
|
||||||
|
|
||||||
|
let network = capnp_rpc::twoparty::VatNetwork::new(
|
||||||
|
reader,
|
||||||
|
writer,
|
||||||
|
capnp_rpc::rpc_twoparty_capnp::Side::Server,
|
||||||
|
Default::default(),
|
||||||
|
);
|
||||||
|
|
||||||
|
let service: node_service::Client = capnp_rpc::new_client(NodeServiceImpl::new(
|
||||||
|
store,
|
||||||
|
waiters,
|
||||||
|
auth_cfg,
|
||||||
|
opaque_setup,
|
||||||
|
pending_logins,
|
||||||
|
sessions,
|
||||||
|
rate_limiter,
|
||||||
|
sealed_sender,
|
||||||
|
));
|
||||||
|
|
||||||
|
RpcSystem::new(Box::new(network), Some(service.client))
|
||||||
|
.await
|
||||||
|
.map_err(|e| anyhow::anyhow!("NodeService RPC error: {e}"))
|
||||||
|
}
|
||||||
|
|
||||||
|
const MESSAGE_TTL_SECS: u64 = 7 * 24 * 60 * 60; // 7 days
|
||||||
|
|
||||||
|
pub fn spawn_cleanup_task(
|
||||||
|
sessions: Arc<DashMap<Vec<u8>, SessionInfo>>,
|
||||||
|
pending_logins: Arc<DashMap<String, PendingLogin>>,
|
||||||
|
store: Arc<dyn Store>,
|
||||||
|
) {
|
||||||
|
tokio::spawn(async move {
|
||||||
|
let mut interval = tokio::time::interval(Duration::from_secs(60));
|
||||||
|
loop {
|
||||||
|
interval.tick().await;
|
||||||
|
let now = current_timestamp();
|
||||||
|
|
||||||
|
sessions.retain(|_, info| info.expires_at > now);
|
||||||
|
pending_logins.retain(|_, pl| now - pl.created_at < PENDING_LOGIN_TTL_SECS);
|
||||||
|
// Rate limit cleanup is handled automatically by governor's DashMapStateStore.
|
||||||
|
|
||||||
|
match store.gc_expired_messages(MESSAGE_TTL_SECS) {
|
||||||
|
Ok(n) if n > 0 => {
|
||||||
|
tracing::debug!(expired = n, "garbage collected expired messages")
|
||||||
|
}
|
||||||
|
Err(e) => tracing::warn!(error = %e, "message GC failed"),
|
||||||
|
_ => {}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
});
|
||||||
|
}
|
||||||
118
crates/quicnprotochat-server/src/node_service/p2p_ops.rs
Normal file
118
crates/quicnprotochat-server/src/node_service/p2p_ops.rs
Normal file
@@ -0,0 +1,118 @@
|
|||||||
|
use capnp::capability::Promise;
|
||||||
|
use quicnprotochat_proto::node_capnp::node_service;
|
||||||
|
|
||||||
|
use crate::auth::{
|
||||||
|
coded_error, fmt_hex, require_identity_or_request, validate_auth, validate_auth_context,
|
||||||
|
};
|
||||||
|
use crate::error_codes::*;
|
||||||
|
use crate::storage::StorageError;
|
||||||
|
|
||||||
|
use super::NodeServiceImpl;
|
||||||
|
|
||||||
|
fn storage_err(err: StorageError) -> capnp::Error {
|
||||||
|
coded_error(E009_STORAGE_ERROR, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
impl NodeServiceImpl {
|
||||||
|
pub fn handle_health(
|
||||||
|
&mut self,
|
||||||
|
_params: node_service::HealthParams,
|
||||||
|
mut results: node_service::HealthResults,
|
||||||
|
) -> Promise<(), capnp::Error> {
|
||||||
|
results.get().set_status("ok");
|
||||||
|
Promise::ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn handle_publish_endpoint(
|
||||||
|
&mut self,
|
||||||
|
params: node_service::PublishEndpointParams,
|
||||||
|
_results: node_service::PublishEndpointResults,
|
||||||
|
) -> Promise<(), capnp::Error> {
|
||||||
|
let p = match params.get() {
|
||||||
|
Ok(p) => p,
|
||||||
|
Err(e) => return Promise::err(coded_error(E020_BAD_PARAMS, e)),
|
||||||
|
};
|
||||||
|
let identity_key = match p.get_identity_key() {
|
||||||
|
Ok(v) => v.to_vec(),
|
||||||
|
Err(e) => return Promise::err(coded_error(E020_BAD_PARAMS, e)),
|
||||||
|
};
|
||||||
|
let node_addr = match p.get_node_addr() {
|
||||||
|
Ok(v) => v.to_vec(),
|
||||||
|
Err(e) => return Promise::err(coded_error(E020_BAD_PARAMS, e)),
|
||||||
|
};
|
||||||
|
let auth_ctx = match validate_auth_context(&self.auth_cfg, &self.sessions, p.get_auth()) {
|
||||||
|
Ok(ctx) => ctx,
|
||||||
|
Err(e) => return Promise::err(e),
|
||||||
|
};
|
||||||
|
|
||||||
|
if identity_key.len() != 32 {
|
||||||
|
return Promise::err(coded_error(
|
||||||
|
E004_IDENTITY_KEY_LENGTH,
|
||||||
|
format!("identityKey must be exactly 32 bytes, got {}", identity_key.len()),
|
||||||
|
));
|
||||||
|
}
|
||||||
|
|
||||||
|
if let Err(e) = require_identity_or_request(
|
||||||
|
&auth_ctx,
|
||||||
|
&identity_key,
|
||||||
|
self.auth_cfg.allow_insecure_identity_from_request,
|
||||||
|
) {
|
||||||
|
return Promise::err(e);
|
||||||
|
}
|
||||||
|
|
||||||
|
if let Err(e) = self
|
||||||
|
.store
|
||||||
|
.publish_endpoint(&identity_key, node_addr)
|
||||||
|
.map_err(storage_err)
|
||||||
|
{
|
||||||
|
return Promise::err(e);
|
||||||
|
}
|
||||||
|
|
||||||
|
tracing::debug!(identity = %fmt_hex(&identity_key[..4]), "endpoint published");
|
||||||
|
|
||||||
|
Promise::ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn handle_resolve_endpoint(
|
||||||
|
&mut self,
|
||||||
|
params: node_service::ResolveEndpointParams,
|
||||||
|
mut results: node_service::ResolveEndpointResults,
|
||||||
|
) -> Promise<(), capnp::Error> {
|
||||||
|
let p = match params.get() {
|
||||||
|
Ok(p) => p,
|
||||||
|
Err(e) => return Promise::err(coded_error(E020_BAD_PARAMS, e)),
|
||||||
|
};
|
||||||
|
let identity_key = match p.get_identity_key() {
|
||||||
|
Ok(v) => v.to_vec(),
|
||||||
|
Err(e) => return Promise::err(coded_error(E020_BAD_PARAMS, e)),
|
||||||
|
};
|
||||||
|
if let Err(e) = validate_auth(&self.auth_cfg, &self.sessions, p.get_auth()) {
|
||||||
|
return Promise::err(e);
|
||||||
|
}
|
||||||
|
|
||||||
|
if identity_key.len() != 32 {
|
||||||
|
return Promise::err(coded_error(
|
||||||
|
E004_IDENTITY_KEY_LENGTH,
|
||||||
|
format!("identityKey must be exactly 32 bytes, got {}", identity_key.len()),
|
||||||
|
));
|
||||||
|
}
|
||||||
|
|
||||||
|
let endpoint = match self
|
||||||
|
.store
|
||||||
|
.resolve_endpoint(&identity_key)
|
||||||
|
.map_err(storage_err)
|
||||||
|
{
|
||||||
|
Ok(e) => e,
|
||||||
|
Err(e) => return Promise::err(e),
|
||||||
|
};
|
||||||
|
|
||||||
|
if let Some(ep) = endpoint {
|
||||||
|
tracing::debug!(identity = %fmt_hex(&identity_key[..4]), "endpoint resolved");
|
||||||
|
results.get().set_node_addr(&ep);
|
||||||
|
} else {
|
||||||
|
results.get().set_node_addr(&[]);
|
||||||
|
}
|
||||||
|
|
||||||
|
Promise::ok(())
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -7,12 +7,45 @@ use rusqlite::{params, Connection};
|
|||||||
|
|
||||||
use crate::storage::{StorageError, Store};
|
use crate::storage::{StorageError, Store};
|
||||||
|
|
||||||
|
/// Schema version after introducing the migration runner (existing DBs had 1).
|
||||||
|
const SCHEMA_VERSION: i32 = 3;
|
||||||
|
|
||||||
|
/// Migrations: (migration_number, SQL). Files named NNN_name.sql, applied in order when N > user_version.
|
||||||
|
const MIGRATIONS: &[(i32, &str)] = &[
|
||||||
|
(1, include_str!("../migrations/001_initial.sql")),
|
||||||
|
(3, include_str!("../migrations/002_add_seq.sql")),
|
||||||
|
];
|
||||||
|
|
||||||
|
/// Runs pending migrations on an open connection: applies any migration whose number is greater
|
||||||
|
/// than the current PRAGMA user_version, then sets user_version to SCHEMA_VERSION.
|
||||||
|
fn run_migrations(conn: &Connection) -> Result<(), StorageError> {
|
||||||
|
let current_version: i32 = conn
|
||||||
|
.pragma_query_value(None, "user_version", |row| row.get(0))
|
||||||
|
.map_err(|e| StorageError::Db(format!("PRAGMA user_version failed: {e}")))?;
|
||||||
|
|
||||||
|
for (migration_num, sql) in MIGRATIONS {
|
||||||
|
if *migration_num > current_version {
|
||||||
|
conn.execute_batch(sql).map_err(|e| StorageError::Db(e.to_string()))?;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
conn.pragma_update(None, "user_version", SCHEMA_VERSION)
|
||||||
|
.map_err(|e| StorageError::Db(format!("set user_version failed: {e}")))?;
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
/// SQLCipher-encrypted storage backend.
|
/// SQLCipher-encrypted storage backend.
|
||||||
pub struct SqlStore {
|
pub struct SqlStore {
|
||||||
conn: Mutex<Connection>,
|
conn: Mutex<Connection>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl SqlStore {
|
impl SqlStore {
|
||||||
|
fn lock_conn(&self) -> Result<std::sync::MutexGuard<'_, Connection>, StorageError> {
|
||||||
|
self.conn
|
||||||
|
.lock()
|
||||||
|
.map_err(|e| StorageError::Db(format!("lock poisoned: {e}")))
|
||||||
|
}
|
||||||
|
|
||||||
pub fn open(path: impl AsRef<Path>, key: &str) -> Result<Self, StorageError> {
|
pub fn open(path: impl AsRef<Path>, key: &str) -> Result<Self, StorageError> {
|
||||||
let conn = Connection::open(path).map_err(|e| StorageError::Db(e.to_string()))?;
|
let conn = Connection::open(path).map_err(|e| StorageError::Db(e.to_string()))?;
|
||||||
|
|
||||||
@@ -28,66 +61,21 @@ impl SqlStore {
|
|||||||
)
|
)
|
||||||
.map_err(|e| StorageError::Db(e.to_string()))?;
|
.map_err(|e| StorageError::Db(e.to_string()))?;
|
||||||
|
|
||||||
let store = Self {
|
let current_version: i32 = conn
|
||||||
conn: Mutex::new(conn),
|
.pragma_query_value(None, "user_version", |row| row.get(0))
|
||||||
};
|
.map_err(|e| StorageError::Db(format!("PRAGMA user_version failed: {e}")))?;
|
||||||
store.migrate()?;
|
|
||||||
Ok(store)
|
if current_version > SCHEMA_VERSION {
|
||||||
|
return Err(StorageError::Db(format!(
|
||||||
|
"database schema version {current_version} is newer than supported {SCHEMA_VERSION}"
|
||||||
|
)));
|
||||||
}
|
}
|
||||||
|
|
||||||
fn migrate(&self) -> Result<(), StorageError> {
|
run_migrations(&conn)?;
|
||||||
let conn = self.conn.lock().unwrap();
|
|
||||||
conn.execute_batch(
|
|
||||||
"CREATE TABLE IF NOT EXISTS key_packages (
|
|
||||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
|
||||||
identity_key BLOB NOT NULL,
|
|
||||||
package_data BLOB NOT NULL,
|
|
||||||
created_at INTEGER DEFAULT (strftime('%s','now'))
|
|
||||||
);
|
|
||||||
|
|
||||||
CREATE TABLE IF NOT EXISTS deliveries (
|
Ok(Self {
|
||||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
conn: Mutex::new(conn),
|
||||||
recipient_key BLOB NOT NULL,
|
})
|
||||||
channel_id BLOB NOT NULL DEFAULT X'',
|
|
||||||
payload BLOB NOT NULL,
|
|
||||||
created_at INTEGER DEFAULT (strftime('%s','now'))
|
|
||||||
);
|
|
||||||
|
|
||||||
CREATE TABLE IF NOT EXISTS hybrid_keys (
|
|
||||||
identity_key BLOB PRIMARY KEY,
|
|
||||||
hybrid_public_key BLOB NOT NULL
|
|
||||||
);
|
|
||||||
|
|
||||||
CREATE INDEX IF NOT EXISTS idx_kp_identity
|
|
||||||
ON key_packages(identity_key);
|
|
||||||
|
|
||||||
CREATE INDEX IF NOT EXISTS idx_del_recipient_channel
|
|
||||||
ON deliveries(recipient_key, channel_id);
|
|
||||||
|
|
||||||
CREATE TABLE IF NOT EXISTS server_setup (
|
|
||||||
id INTEGER PRIMARY KEY CHECK (id = 1),
|
|
||||||
setup_data BLOB NOT NULL
|
|
||||||
);
|
|
||||||
|
|
||||||
CREATE TABLE IF NOT EXISTS users (
|
|
||||||
username TEXT PRIMARY KEY,
|
|
||||||
opaque_record BLOB NOT NULL,
|
|
||||||
created_at INTEGER DEFAULT (strftime('%s','now'))
|
|
||||||
);
|
|
||||||
|
|
||||||
CREATE TABLE IF NOT EXISTS user_identity_keys (
|
|
||||||
username TEXT PRIMARY KEY,
|
|
||||||
identity_key BLOB NOT NULL
|
|
||||||
);
|
|
||||||
|
|
||||||
CREATE TABLE IF NOT EXISTS endpoints (
|
|
||||||
identity_key BLOB PRIMARY KEY,
|
|
||||||
node_addr BLOB NOT NULL,
|
|
||||||
updated_at INTEGER DEFAULT (strftime('%s','now'))
|
|
||||||
);",
|
|
||||||
)
|
|
||||||
.map_err(|e| StorageError::Db(e.to_string()))?;
|
|
||||||
Ok(())
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -97,7 +85,7 @@ impl Store for SqlStore {
|
|||||||
identity_key: &[u8],
|
identity_key: &[u8],
|
||||||
package: Vec<u8>,
|
package: Vec<u8>,
|
||||||
) -> Result<(), StorageError> {
|
) -> Result<(), StorageError> {
|
||||||
let conn = self.conn.lock().unwrap();
|
let conn = self.lock_conn()?;
|
||||||
conn.execute(
|
conn.execute(
|
||||||
"INSERT INTO key_packages (identity_key, package_data) VALUES (?1, ?2)",
|
"INSERT INTO key_packages (identity_key, package_data) VALUES (?1, ?2)",
|
||||||
params![identity_key, package],
|
params![identity_key, package],
|
||||||
@@ -107,7 +95,7 @@ impl Store for SqlStore {
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn fetch_key_package(&self, identity_key: &[u8]) -> Result<Option<Vec<u8>>, StorageError> {
|
fn fetch_key_package(&self, identity_key: &[u8]) -> Result<Option<Vec<u8>>, StorageError> {
|
||||||
let conn = self.conn.lock().unwrap();
|
let conn = self.lock_conn()?;
|
||||||
|
|
||||||
let mut stmt = conn
|
let mut stmt = conn
|
||||||
.prepare(
|
.prepare(
|
||||||
@@ -140,50 +128,64 @@ impl Store for SqlStore {
|
|||||||
recipient_key: &[u8],
|
recipient_key: &[u8],
|
||||||
channel_id: &[u8],
|
channel_id: &[u8],
|
||||||
payload: Vec<u8>,
|
payload: Vec<u8>,
|
||||||
) -> Result<(), StorageError> {
|
) -> Result<u64, StorageError> {
|
||||||
let conn = self.conn.lock().unwrap();
|
let conn = self.lock_conn()?;
|
||||||
conn.execute(
|
// Atomically get-and-increment the per-inbox sequence counter.
|
||||||
"INSERT INTO deliveries (recipient_key, channel_id, payload) VALUES (?1, ?2, ?3)",
|
// RETURNING gives us the post-update next_seq; the assigned seq is next_seq - 1.
|
||||||
params![recipient_key, channel_id, payload],
|
let seq: i64 = conn
|
||||||
|
.query_row(
|
||||||
|
"INSERT INTO delivery_seq_counters (recipient_key, channel_id, next_seq)
|
||||||
|
VALUES (?1, ?2, 1)
|
||||||
|
ON CONFLICT(recipient_key, channel_id) DO UPDATE SET next_seq = next_seq + 1
|
||||||
|
RETURNING next_seq - 1",
|
||||||
|
params![recipient_key, channel_id],
|
||||||
|
|row| row.get(0),
|
||||||
)
|
)
|
||||||
.map_err(|e| StorageError::Db(e.to_string()))?;
|
.map_err(|e| StorageError::Db(e.to_string()))?;
|
||||||
Ok(())
|
conn.execute(
|
||||||
|
"INSERT INTO deliveries (recipient_key, channel_id, seq, payload) VALUES (?1, ?2, ?3, ?4)",
|
||||||
|
params![recipient_key, channel_id, seq, payload],
|
||||||
|
)
|
||||||
|
.map_err(|e| StorageError::Db(e.to_string()))?;
|
||||||
|
Ok(seq as u64)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn fetch(
|
fn fetch(
|
||||||
&self,
|
&self,
|
||||||
recipient_key: &[u8],
|
recipient_key: &[u8],
|
||||||
channel_id: &[u8],
|
channel_id: &[u8],
|
||||||
) -> Result<Vec<Vec<u8>>, StorageError> {
|
) -> Result<Vec<(u64, Vec<u8>)>, StorageError> {
|
||||||
let conn = self.conn.lock().unwrap();
|
let conn = self.lock_conn()?;
|
||||||
|
|
||||||
let mut stmt = conn
|
let mut stmt = conn
|
||||||
.prepare(
|
.prepare(
|
||||||
"SELECT id, payload FROM deliveries
|
"SELECT id, seq, payload FROM deliveries
|
||||||
WHERE recipient_key = ?1 AND channel_id = ?2
|
WHERE recipient_key = ?1 AND channel_id = ?2
|
||||||
ORDER BY id ASC",
|
ORDER BY seq ASC",
|
||||||
)
|
)
|
||||||
.map_err(|e| StorageError::Db(e.to_string()))?;
|
.map_err(|e| StorageError::Db(e.to_string()))?;
|
||||||
|
|
||||||
let rows: Vec<(i64, Vec<u8>)> = stmt
|
let rows: Vec<(i64, i64, Vec<u8>)> = stmt
|
||||||
.query_map(params![recipient_key, channel_id], |row| {
|
.query_map(params![recipient_key, channel_id], |row| {
|
||||||
Ok((row.get(0)?, row.get(1)?))
|
Ok((row.get(0)?, row.get(1)?, row.get(2)?))
|
||||||
})
|
})
|
||||||
.map_err(|e| StorageError::Db(e.to_string()))?
|
.map_err(|e| StorageError::Db(e.to_string()))?
|
||||||
.collect::<Result<Vec<_>, _>>()
|
.collect::<Result<Vec<_>, _>>()
|
||||||
.map_err(|e| StorageError::Db(e.to_string()))?;
|
.map_err(|e| StorageError::Db(e.to_string()))?;
|
||||||
|
|
||||||
if !rows.is_empty() {
|
if !rows.is_empty() {
|
||||||
let ids: Vec<i64> = rows.iter().map(|(id, _)| *id).collect();
|
let ids: Vec<i64> = rows.iter().map(|(id, _, _)| *id).collect();
|
||||||
let placeholders: String = ids.iter().map(|_| "?").collect::<Vec<_>>().join(",");
|
let placeholders: String = ids.iter().map(|_| "?").collect::<Vec<_>>().join(",");
|
||||||
let sql = format!("DELETE FROM deliveries WHERE id IN ({placeholders})");
|
let sql = format!("DELETE FROM deliveries WHERE id IN ({placeholders})");
|
||||||
let params: Vec<&dyn rusqlite::types::ToSql> =
|
let params: Vec<&dyn rusqlite::types::ToSql> = ids
|
||||||
ids.iter().map(|id| id as &dyn rusqlite::types::ToSql).collect();
|
.iter()
|
||||||
|
.map(|id| id as &dyn rusqlite::types::ToSql)
|
||||||
|
.collect();
|
||||||
conn.execute(&sql, params.as_slice())
|
conn.execute(&sql, params.as_slice())
|
||||||
.map_err(|e| StorageError::Db(e.to_string()))?;
|
.map_err(|e| StorageError::Db(e.to_string()))?;
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(rows.into_iter().map(|(_, payload)| payload).collect())
|
Ok(rows.into_iter().map(|(_, seq, payload)| (seq as u64, payload)).collect())
|
||||||
}
|
}
|
||||||
|
|
||||||
fn fetch_limited(
|
fn fetch_limited(
|
||||||
@@ -191,45 +193,43 @@ impl Store for SqlStore {
|
|||||||
recipient_key: &[u8],
|
recipient_key: &[u8],
|
||||||
channel_id: &[u8],
|
channel_id: &[u8],
|
||||||
limit: usize,
|
limit: usize,
|
||||||
) -> Result<Vec<Vec<u8>>, StorageError> {
|
) -> Result<Vec<(u64, Vec<u8>)>, StorageError> {
|
||||||
let conn = self.conn.lock().unwrap();
|
let conn = self.lock_conn()?;
|
||||||
|
|
||||||
let mut stmt = conn
|
let mut stmt = conn
|
||||||
.prepare(
|
.prepare(
|
||||||
"SELECT id, payload FROM deliveries
|
"SELECT id, seq, payload FROM deliveries
|
||||||
WHERE recipient_key = ?1 AND channel_id = ?2
|
WHERE recipient_key = ?1 AND channel_id = ?2
|
||||||
ORDER BY id ASC
|
ORDER BY seq ASC
|
||||||
LIMIT ?3",
|
LIMIT ?3",
|
||||||
)
|
)
|
||||||
.map_err(|e| StorageError::Db(e.to_string()))?;
|
.map_err(|e| StorageError::Db(e.to_string()))?;
|
||||||
|
|
||||||
let rows: Vec<(i64, Vec<u8>)> = stmt
|
let rows: Vec<(i64, i64, Vec<u8>)> = stmt
|
||||||
.query_map(params![recipient_key, channel_id, limit as i64], |row| {
|
.query_map(params![recipient_key, channel_id, limit as i64], |row| {
|
||||||
Ok((row.get(0)?, row.get(1)?))
|
Ok((row.get(0)?, row.get(1)?, row.get(2)?))
|
||||||
})
|
})
|
||||||
.map_err(|e| StorageError::Db(e.to_string()))?
|
.map_err(|e| StorageError::Db(e.to_string()))?
|
||||||
.collect::<Result<Vec<_>, _>>()
|
.collect::<Result<Vec<_>, _>>()
|
||||||
.map_err(|e| StorageError::Db(e.to_string()))?;
|
.map_err(|e| StorageError::Db(e.to_string()))?;
|
||||||
|
|
||||||
if !rows.is_empty() {
|
if !rows.is_empty() {
|
||||||
let ids: Vec<i64> = rows.iter().map(|(id, _)| *id).collect();
|
let ids: Vec<i64> = rows.iter().map(|(id, _, _)| *id).collect();
|
||||||
let placeholders: String = ids.iter().map(|_| "?").collect::<Vec<_>>().join(",");
|
let placeholders: String = ids.iter().map(|_| "?").collect::<Vec<_>>().join(",");
|
||||||
let sql = format!("DELETE FROM deliveries WHERE id IN ({placeholders})");
|
let sql = format!("DELETE FROM deliveries WHERE id IN ({placeholders})");
|
||||||
let params: Vec<&dyn rusqlite::types::ToSql> =
|
let params: Vec<&dyn rusqlite::types::ToSql> = ids
|
||||||
ids.iter().map(|id| id as &dyn rusqlite::types::ToSql).collect();
|
.iter()
|
||||||
|
.map(|id| id as &dyn rusqlite::types::ToSql)
|
||||||
|
.collect();
|
||||||
conn.execute(&sql, params.as_slice())
|
conn.execute(&sql, params.as_slice())
|
||||||
.map_err(|e| StorageError::Db(e.to_string()))?;
|
.map_err(|e| StorageError::Db(e.to_string()))?;
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(rows.into_iter().map(|(_, payload)| payload).collect())
|
Ok(rows.into_iter().map(|(_, seq, payload)| (seq as u64, payload)).collect())
|
||||||
}
|
}
|
||||||
|
|
||||||
fn queue_depth(
|
fn queue_depth(&self, recipient_key: &[u8], channel_id: &[u8]) -> Result<usize, StorageError> {
|
||||||
&self,
|
let conn = self.lock_conn()?;
|
||||||
recipient_key: &[u8],
|
|
||||||
channel_id: &[u8],
|
|
||||||
) -> Result<usize, StorageError> {
|
|
||||||
let conn = self.conn.lock().unwrap();
|
|
||||||
let count: i64 = conn
|
let count: i64 = conn
|
||||||
.query_row(
|
.query_row(
|
||||||
"SELECT COUNT(*) FROM deliveries WHERE recipient_key = ?1 AND channel_id = ?2",
|
"SELECT COUNT(*) FROM deliveries WHERE recipient_key = ?1 AND channel_id = ?2",
|
||||||
@@ -241,7 +241,7 @@ impl Store for SqlStore {
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn gc_expired_messages(&self, max_age_secs: u64) -> Result<usize, StorageError> {
|
fn gc_expired_messages(&self, max_age_secs: u64) -> Result<usize, StorageError> {
|
||||||
let conn = self.conn.lock().unwrap();
|
let conn = self.lock_conn()?;
|
||||||
let cutoff = std::time::SystemTime::now()
|
let cutoff = std::time::SystemTime::now()
|
||||||
.duration_since(std::time::UNIX_EPOCH)
|
.duration_since(std::time::UNIX_EPOCH)
|
||||||
.unwrap_or_default()
|
.unwrap_or_default()
|
||||||
@@ -261,7 +261,7 @@ impl Store for SqlStore {
|
|||||||
identity_key: &[u8],
|
identity_key: &[u8],
|
||||||
hybrid_pk: Vec<u8>,
|
hybrid_pk: Vec<u8>,
|
||||||
) -> Result<(), StorageError> {
|
) -> Result<(), StorageError> {
|
||||||
let conn = self.conn.lock().unwrap();
|
let conn = self.lock_conn()?;
|
||||||
conn.execute(
|
conn.execute(
|
||||||
"INSERT OR REPLACE INTO hybrid_keys (identity_key, hybrid_public_key) VALUES (?1, ?2)",
|
"INSERT OR REPLACE INTO hybrid_keys (identity_key, hybrid_public_key) VALUES (?1, ?2)",
|
||||||
params![identity_key, hybrid_pk],
|
params![identity_key, hybrid_pk],
|
||||||
@@ -271,7 +271,7 @@ impl Store for SqlStore {
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn fetch_hybrid_key(&self, identity_key: &[u8]) -> Result<Option<Vec<u8>>, StorageError> {
|
fn fetch_hybrid_key(&self, identity_key: &[u8]) -> Result<Option<Vec<u8>>, StorageError> {
|
||||||
let conn = self.conn.lock().unwrap();
|
let conn = self.lock_conn()?;
|
||||||
let mut stmt = conn
|
let mut stmt = conn
|
||||||
.prepare("SELECT hybrid_public_key FROM hybrid_keys WHERE identity_key = ?1")
|
.prepare("SELECT hybrid_public_key FROM hybrid_keys WHERE identity_key = ?1")
|
||||||
.map_err(|e| StorageError::Db(e.to_string()))?;
|
.map_err(|e| StorageError::Db(e.to_string()))?;
|
||||||
@@ -282,7 +282,7 @@ impl Store for SqlStore {
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn store_server_setup(&self, setup: Vec<u8>) -> Result<(), StorageError> {
|
fn store_server_setup(&self, setup: Vec<u8>) -> Result<(), StorageError> {
|
||||||
let conn = self.conn.lock().unwrap();
|
let conn = self.lock_conn()?;
|
||||||
conn.execute(
|
conn.execute(
|
||||||
"INSERT OR REPLACE INTO server_setup (id, setup_data) VALUES (1, ?1)",
|
"INSERT OR REPLACE INTO server_setup (id, setup_data) VALUES (1, ?1)",
|
||||||
params![setup],
|
params![setup],
|
||||||
@@ -292,7 +292,7 @@ impl Store for SqlStore {
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn get_server_setup(&self) -> Result<Option<Vec<u8>>, StorageError> {
|
fn get_server_setup(&self) -> Result<Option<Vec<u8>>, StorageError> {
|
||||||
let conn = self.conn.lock().unwrap();
|
let conn = self.lock_conn()?;
|
||||||
let mut stmt = conn
|
let mut stmt = conn
|
||||||
.prepare("SELECT setup_data FROM server_setup WHERE id = 1")
|
.prepare("SELECT setup_data FROM server_setup WHERE id = 1")
|
||||||
.map_err(|e| StorageError::Db(e.to_string()))?;
|
.map_err(|e| StorageError::Db(e.to_string()))?;
|
||||||
@@ -303,7 +303,7 @@ impl Store for SqlStore {
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn store_user_record(&self, username: &str, record: Vec<u8>) -> Result<(), StorageError> {
|
fn store_user_record(&self, username: &str, record: Vec<u8>) -> Result<(), StorageError> {
|
||||||
let conn = self.conn.lock().unwrap();
|
let conn = self.lock_conn()?;
|
||||||
conn.execute(
|
conn.execute(
|
||||||
"INSERT OR REPLACE INTO users (username, opaque_record) VALUES (?1, ?2)",
|
"INSERT OR REPLACE INTO users (username, opaque_record) VALUES (?1, ?2)",
|
||||||
params![username, record],
|
params![username, record],
|
||||||
@@ -313,7 +313,7 @@ impl Store for SqlStore {
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn get_user_record(&self, username: &str) -> Result<Option<Vec<u8>>, StorageError> {
|
fn get_user_record(&self, username: &str) -> Result<Option<Vec<u8>>, StorageError> {
|
||||||
let conn = self.conn.lock().unwrap();
|
let conn = self.lock_conn()?;
|
||||||
let mut stmt = conn
|
let mut stmt = conn
|
||||||
.prepare("SELECT opaque_record FROM users WHERE username = ?1")
|
.prepare("SELECT opaque_record FROM users WHERE username = ?1")
|
||||||
.map_err(|e| StorageError::Db(e.to_string()))?;
|
.map_err(|e| StorageError::Db(e.to_string()))?;
|
||||||
@@ -324,7 +324,7 @@ impl Store for SqlStore {
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn has_user_record(&self, username: &str) -> Result<bool, StorageError> {
|
fn has_user_record(&self, username: &str) -> Result<bool, StorageError> {
|
||||||
let conn = self.conn.lock().unwrap();
|
let conn = self.lock_conn()?;
|
||||||
let exists: bool = conn
|
let exists: bool = conn
|
||||||
.query_row(
|
.query_row(
|
||||||
"SELECT EXISTS(SELECT 1 FROM users WHERE username = ?1)",
|
"SELECT EXISTS(SELECT 1 FROM users WHERE username = ?1)",
|
||||||
@@ -340,7 +340,7 @@ impl Store for SqlStore {
|
|||||||
username: &str,
|
username: &str,
|
||||||
identity_key: Vec<u8>,
|
identity_key: Vec<u8>,
|
||||||
) -> Result<(), StorageError> {
|
) -> Result<(), StorageError> {
|
||||||
let conn = self.conn.lock().unwrap();
|
let conn = self.lock_conn()?;
|
||||||
conn.execute(
|
conn.execute(
|
||||||
"INSERT OR REPLACE INTO user_identity_keys (username, identity_key) VALUES (?1, ?2)",
|
"INSERT OR REPLACE INTO user_identity_keys (username, identity_key) VALUES (?1, ?2)",
|
||||||
params![username, identity_key],
|
params![username, identity_key],
|
||||||
@@ -350,7 +350,7 @@ impl Store for SqlStore {
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn get_user_identity_key(&self, username: &str) -> Result<Option<Vec<u8>>, StorageError> {
|
fn get_user_identity_key(&self, username: &str) -> Result<Option<Vec<u8>>, StorageError> {
|
||||||
let conn = self.conn.lock().unwrap();
|
let conn = self.lock_conn()?;
|
||||||
let mut stmt = conn
|
let mut stmt = conn
|
||||||
.prepare("SELECT identity_key FROM user_identity_keys WHERE username = ?1")
|
.prepare("SELECT identity_key FROM user_identity_keys WHERE username = ?1")
|
||||||
.map_err(|e| StorageError::Db(e.to_string()))?;
|
.map_err(|e| StorageError::Db(e.to_string()))?;
|
||||||
@@ -365,7 +365,7 @@ impl Store for SqlStore {
|
|||||||
identity_key: &[u8],
|
identity_key: &[u8],
|
||||||
node_addr: Vec<u8>,
|
node_addr: Vec<u8>,
|
||||||
) -> Result<(), StorageError> {
|
) -> Result<(), StorageError> {
|
||||||
let conn = self.conn.lock().unwrap();
|
let conn = self.lock_conn()?;
|
||||||
conn.execute(
|
conn.execute(
|
||||||
"INSERT OR REPLACE INTO endpoints (identity_key, node_addr) VALUES (?1, ?2)",
|
"INSERT OR REPLACE INTO endpoints (identity_key, node_addr) VALUES (?1, ?2)",
|
||||||
params![identity_key, node_addr],
|
params![identity_key, node_addr],
|
||||||
@@ -375,7 +375,7 @@ impl Store for SqlStore {
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn resolve_endpoint(&self, identity_key: &[u8]) -> Result<Option<Vec<u8>>, StorageError> {
|
fn resolve_endpoint(&self, identity_key: &[u8]) -> Result<Option<Vec<u8>>, StorageError> {
|
||||||
let conn = self.conn.lock().unwrap();
|
let conn = self.lock_conn()?;
|
||||||
let mut stmt = conn
|
let mut stmt = conn
|
||||||
.prepare("SELECT node_addr FROM endpoints WHERE identity_key = ?1")
|
.prepare("SELECT node_addr FROM endpoints WHERE identity_key = ?1")
|
||||||
.map_err(|e| StorageError::Db(e.to_string()))?;
|
.map_err(|e| StorageError::Db(e.to_string()))?;
|
||||||
@@ -404,16 +404,34 @@ impl<T> OptionalExt<T> for Result<T, rusqlite::Error> {
|
|||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod tests {
|
mod tests {
|
||||||
use super::*;
|
use super::*;
|
||||||
|
use std::path::PathBuf;
|
||||||
|
|
||||||
fn open_in_memory() -> SqlStore {
|
fn open_in_memory() -> SqlStore {
|
||||||
SqlStore::open(":memory:", "").unwrap()
|
SqlStore::open(":memory:", "").unwrap()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn sets_user_version_after_migrate() {
|
||||||
|
let dir = tempfile::tempdir().expect("tempdir");
|
||||||
|
let db_path: PathBuf = dir.path().join("store.db");
|
||||||
|
|
||||||
|
{
|
||||||
|
let store = SqlStore::open(&db_path, "").expect("open store");
|
||||||
|
let _guard = store.lock_conn().unwrap();
|
||||||
|
}
|
||||||
|
|
||||||
|
let conn = rusqlite::Connection::open(&db_path).expect("reopen db");
|
||||||
|
let version: i32 = conn
|
||||||
|
.pragma_query_value(None, "user_version", |row| row.get(0))
|
||||||
|
.expect("read user_version");
|
||||||
|
|
||||||
|
assert_eq!(version, SCHEMA_VERSION);
|
||||||
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn key_package_fifo() {
|
fn key_package_fifo() {
|
||||||
let store = open_in_memory();
|
let store = open_in_memory();
|
||||||
let mut identity = [0u8; 32];
|
let identity = [1u8; 32];
|
||||||
identity[..31].copy_from_slice(b"alice_identity_key__32bytes_lon");
|
|
||||||
|
|
||||||
store
|
store
|
||||||
.upload_key_package(&identity, b"kp1".to_vec())
|
.upload_key_package(&identity, b"kp1".to_vec())
|
||||||
@@ -439,11 +457,13 @@ mod tests {
|
|||||||
let rk = [1u8; 32];
|
let rk = [1u8; 32];
|
||||||
let ch = b"channel-1";
|
let ch = b"channel-1";
|
||||||
|
|
||||||
store.enqueue(&rk, ch, b"msg1".to_vec()).unwrap();
|
let seq0 = store.enqueue(&rk, ch, b"msg1".to_vec()).unwrap();
|
||||||
store.enqueue(&rk, ch, b"msg2".to_vec()).unwrap();
|
let seq1 = store.enqueue(&rk, ch, b"msg2".to_vec()).unwrap();
|
||||||
|
assert_eq!(seq0, 0);
|
||||||
|
assert_eq!(seq1, 1);
|
||||||
|
|
||||||
let msgs = store.fetch(&rk, ch).unwrap();
|
let msgs = store.fetch(&rk, ch).unwrap();
|
||||||
assert_eq!(msgs, vec![b"msg1".to_vec(), b"msg2".to_vec()]);
|
assert_eq!(msgs, vec![(0u64, b"msg1".to_vec()), (1u64, b"msg2".to_vec())]);
|
||||||
|
|
||||||
assert!(store.fetch(&rk, ch).unwrap().is_empty());
|
assert!(store.fetch(&rk, ch).unwrap().is_empty());
|
||||||
}
|
}
|
||||||
@@ -459,10 +479,10 @@ mod tests {
|
|||||||
store.enqueue(&rk, ch, b"c".to_vec()).unwrap();
|
store.enqueue(&rk, ch, b"c".to_vec()).unwrap();
|
||||||
|
|
||||||
let msgs = store.fetch_limited(&rk, ch, 2).unwrap();
|
let msgs = store.fetch_limited(&rk, ch, 2).unwrap();
|
||||||
assert_eq!(msgs, vec![b"a".to_vec(), b"b".to_vec()]);
|
assert_eq!(msgs, vec![(0u64, b"a".to_vec()), (1u64, b"b".to_vec())]);
|
||||||
|
|
||||||
let remaining = store.fetch(&rk, ch).unwrap();
|
let remaining = store.fetch(&rk, ch).unwrap();
|
||||||
assert_eq!(remaining, vec![b"c".to_vec()]);
|
assert_eq!(remaining, vec![(2u64, b"c".to_vec())]);
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
@@ -480,18 +500,25 @@ mod tests {
|
|||||||
#[test]
|
#[test]
|
||||||
fn has_user_record_check() {
|
fn has_user_record_check() {
|
||||||
let store = open_in_memory();
|
let store = open_in_memory();
|
||||||
assert!(!store.has_user_record("alice").unwrap());
|
assert!(!store.has_user_record("user1").unwrap());
|
||||||
store.store_user_record("alice", b"record".to_vec()).unwrap();
|
store
|
||||||
assert!(store.has_user_record("alice").unwrap());
|
.store_user_record("user1", b"record".to_vec())
|
||||||
assert!(!store.has_user_record("bob").unwrap());
|
.unwrap();
|
||||||
|
assert!(store.has_user_record("user1").unwrap());
|
||||||
|
assert!(!store.has_user_record("user2").unwrap());
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn user_identity_key_round_trip() {
|
fn user_identity_key_round_trip() {
|
||||||
let store = open_in_memory();
|
let store = open_in_memory();
|
||||||
assert!(store.get_user_identity_key("alice").unwrap().is_none());
|
assert!(store.get_user_identity_key("user1").unwrap().is_none());
|
||||||
store.store_user_identity_key("alice", vec![1u8; 32]).unwrap();
|
store
|
||||||
assert_eq!(store.get_user_identity_key("alice").unwrap(), Some(vec![1u8; 32]));
|
.store_user_identity_key("user1", vec![1u8; 32])
|
||||||
|
.unwrap();
|
||||||
|
assert_eq!(
|
||||||
|
store.get_user_identity_key("user1").unwrap(),
|
||||||
|
Some(vec![1u8; 32])
|
||||||
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
@@ -513,9 +540,9 @@ mod tests {
|
|||||||
store.enqueue(&rk, b"ch-b", b"b1".to_vec()).unwrap();
|
store.enqueue(&rk, b"ch-b", b"b1".to_vec()).unwrap();
|
||||||
|
|
||||||
let a_msgs = store.fetch(&rk, b"ch-a").unwrap();
|
let a_msgs = store.fetch(&rk, b"ch-a").unwrap();
|
||||||
assert_eq!(a_msgs, vec![b"a1".to_vec()]);
|
assert_eq!(a_msgs, vec![(0u64, b"a1".to_vec())]);
|
||||||
|
|
||||||
let b_msgs = store.fetch(&rk, b"ch-b").unwrap();
|
let b_msgs = store.fetch(&rk, b"ch-b").unwrap();
|
||||||
assert_eq!(b_msgs, vec![b"b1".to_vec()]);
|
assert_eq!(b_msgs, vec![(0u64, b"b1".to_vec())]);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -18,45 +18,47 @@ pub enum StorageError {
|
|||||||
Db(String),
|
Db(String),
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn lock<T>(m: &Mutex<T>) -> Result<std::sync::MutexGuard<'_, T>, StorageError> {
|
||||||
|
m.lock()
|
||||||
|
.map_err(|e| StorageError::Io(format!("lock poisoned: {e}")))
|
||||||
|
}
|
||||||
|
|
||||||
// ── Store trait ──────────────────────────────────────────────────────────────
|
// ── Store trait ──────────────────────────────────────────────────────────────
|
||||||
|
|
||||||
/// Abstraction over storage backends (file-backed, SQLCipher, etc.).
|
/// Abstraction over storage backends (file-backed, SQLCipher, etc.).
|
||||||
pub trait Store: Send + Sync {
|
pub trait Store: Send + Sync {
|
||||||
fn upload_key_package(
|
fn upload_key_package(&self, identity_key: &[u8], package: Vec<u8>)
|
||||||
&self,
|
-> Result<(), StorageError>;
|
||||||
identity_key: &[u8],
|
|
||||||
package: Vec<u8>,
|
|
||||||
) -> Result<(), StorageError>;
|
|
||||||
|
|
||||||
fn fetch_key_package(&self, identity_key: &[u8]) -> Result<Option<Vec<u8>>, StorageError>;
|
fn fetch_key_package(&self, identity_key: &[u8]) -> Result<Option<Vec<u8>>, StorageError>;
|
||||||
|
|
||||||
|
/// Enqueue a payload and return the monotonically increasing per-inbox sequence number
|
||||||
|
/// assigned to this message. Clients sort by seq before MLS processing.
|
||||||
fn enqueue(
|
fn enqueue(
|
||||||
&self,
|
&self,
|
||||||
recipient_key: &[u8],
|
recipient_key: &[u8],
|
||||||
channel_id: &[u8],
|
channel_id: &[u8],
|
||||||
payload: Vec<u8>,
|
payload: Vec<u8>,
|
||||||
) -> Result<(), StorageError>;
|
) -> Result<u64, StorageError>;
|
||||||
|
|
||||||
|
/// Fetch and drain all queued messages, returning `(seq, payload)` pairs ordered by seq.
|
||||||
fn fetch(
|
fn fetch(
|
||||||
&self,
|
&self,
|
||||||
recipient_key: &[u8],
|
recipient_key: &[u8],
|
||||||
channel_id: &[u8],
|
channel_id: &[u8],
|
||||||
) -> Result<Vec<Vec<u8>>, StorageError>;
|
) -> Result<Vec<(u64, Vec<u8>)>, StorageError>;
|
||||||
|
|
||||||
/// Fetch up to `limit` messages without draining the entire queue (Fix 8).
|
/// Fetch up to `limit` messages without draining the entire queue (Fix 8).
|
||||||
|
/// Returns `(seq, payload)` pairs ordered by seq.
|
||||||
fn fetch_limited(
|
fn fetch_limited(
|
||||||
&self,
|
&self,
|
||||||
recipient_key: &[u8],
|
recipient_key: &[u8],
|
||||||
channel_id: &[u8],
|
channel_id: &[u8],
|
||||||
limit: usize,
|
limit: usize,
|
||||||
) -> Result<Vec<Vec<u8>>, StorageError>;
|
) -> Result<Vec<(u64, Vec<u8>)>, StorageError>;
|
||||||
|
|
||||||
/// Return the number of queued messages for (recipient, channel) (Fix 7).
|
/// Return the number of queued messages for (recipient, channel) (Fix 7).
|
||||||
fn queue_depth(
|
fn queue_depth(&self, recipient_key: &[u8], channel_id: &[u8]) -> Result<usize, StorageError>;
|
||||||
&self,
|
|
||||||
recipient_key: &[u8],
|
|
||||||
channel_id: &[u8],
|
|
||||||
) -> Result<usize, StorageError>;
|
|
||||||
|
|
||||||
/// Delete messages older than `max_age_secs`. Returns count deleted (Fix 7).
|
/// Delete messages older than `max_age_secs`. Returns count deleted (Fix 7).
|
||||||
fn gc_expired_messages(&self, max_age_secs: u64) -> Result<usize, StorageError>;
|
fn gc_expired_messages(&self, max_age_secs: u64) -> Result<usize, StorageError>;
|
||||||
@@ -95,11 +97,8 @@ pub trait Store: Send + Sync {
|
|||||||
fn get_user_identity_key(&self, username: &str) -> Result<Option<Vec<u8>>, StorageError>;
|
fn get_user_identity_key(&self, username: &str) -> Result<Option<Vec<u8>>, StorageError>;
|
||||||
|
|
||||||
/// Publish a P2P endpoint address for an identity key.
|
/// Publish a P2P endpoint address for an identity key.
|
||||||
fn publish_endpoint(
|
fn publish_endpoint(&self, identity_key: &[u8], node_addr: Vec<u8>)
|
||||||
&self,
|
-> Result<(), StorageError>;
|
||||||
identity_key: &[u8],
|
|
||||||
node_addr: Vec<u8>,
|
|
||||||
) -> Result<(), StorageError>;
|
|
||||||
|
|
||||||
/// Resolve a peer's P2P endpoint address.
|
/// Resolve a peer's P2P endpoint address.
|
||||||
fn resolve_endpoint(&self, identity_key: &[u8]) -> Result<Option<Vec<u8>>, StorageError>;
|
fn resolve_endpoint(&self, identity_key: &[u8]) -> Result<Option<Vec<u8>>, StorageError>;
|
||||||
@@ -132,6 +131,19 @@ struct QueueMapV2 {
|
|||||||
map: HashMap<ChannelKey, VecDeque<Vec<u8>>>,
|
map: HashMap<ChannelKey, VecDeque<Vec<u8>>>,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[derive(Serialize, Deserialize, Default, Clone)]
|
||||||
|
struct SeqEntry {
|
||||||
|
seq: u64,
|
||||||
|
data: Vec<u8>,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// V3 delivery store: each queue entry carries a monotonic per-inbox sequence number.
|
||||||
|
#[derive(Serialize, Deserialize, Default)]
|
||||||
|
struct QueueMapV3 {
|
||||||
|
map: HashMap<ChannelKey, VecDeque<SeqEntry>>,
|
||||||
|
next_seq: HashMap<ChannelKey, u64>,
|
||||||
|
}
|
||||||
|
|
||||||
/// File-backed storage for KeyPackages and delivery queues.
|
/// File-backed storage for KeyPackages and delivery queues.
|
||||||
///
|
///
|
||||||
/// Each mutation flushes the entire map to disk. Suitable for MVP-scale loads.
|
/// Each mutation flushes the entire map to disk. Suitable for MVP-scale loads.
|
||||||
@@ -143,7 +155,7 @@ pub struct FileBackedStore {
|
|||||||
users_path: PathBuf,
|
users_path: PathBuf,
|
||||||
identity_keys_path: PathBuf,
|
identity_keys_path: PathBuf,
|
||||||
key_packages: Mutex<HashMap<Vec<u8>, VecDeque<Vec<u8>>>>,
|
key_packages: Mutex<HashMap<Vec<u8>, VecDeque<Vec<u8>>>>,
|
||||||
deliveries: Mutex<HashMap<ChannelKey, VecDeque<Vec<u8>>>>,
|
deliveries: Mutex<QueueMapV3>,
|
||||||
hybrid_keys: Mutex<HashMap<Vec<u8>, Vec<u8>>>,
|
hybrid_keys: Mutex<HashMap<Vec<u8>, Vec<u8>>>,
|
||||||
users: Mutex<HashMap<String, Vec<u8>>>,
|
users: Mutex<HashMap<String, Vec<u8>>>,
|
||||||
identity_keys: Mutex<HashMap<String, Vec<u8>>>,
|
identity_keys: Mutex<HashMap<String, Vec<u8>>>,
|
||||||
@@ -164,7 +176,7 @@ impl FileBackedStore {
|
|||||||
let identity_keys_path = dir.join("identity_keys.bin");
|
let identity_keys_path = dir.join("identity_keys.bin");
|
||||||
|
|
||||||
let key_packages = Mutex::new(Self::load_kp_map(&kp_path)?);
|
let key_packages = Mutex::new(Self::load_kp_map(&kp_path)?);
|
||||||
let deliveries = Mutex::new(Self::load_delivery_map(&ds_path)?);
|
let deliveries = Mutex::new(Self::load_delivery_map_v3(&ds_path)?);
|
||||||
let hybrid_keys = Mutex::new(Self::load_hybrid_keys(&hk_path)?);
|
let hybrid_keys = Mutex::new(Self::load_hybrid_keys(&hk_path)?);
|
||||||
let users = Mutex::new(Self::load_users(&users_path)?);
|
let users = Mutex::new(Self::load_users(&users_path)?);
|
||||||
let identity_keys = Mutex::new(Self::load_map_string_bytes(&identity_keys_path)?);
|
let identity_keys = Mutex::new(Self::load_map_string_bytes(&identity_keys_path)?);
|
||||||
@@ -210,39 +222,38 @@ impl FileBackedStore {
|
|||||||
fs::write(path, bytes).map_err(|e| StorageError::Io(e.to_string()))
|
fs::write(path, bytes).map_err(|e| StorageError::Io(e.to_string()))
|
||||||
}
|
}
|
||||||
|
|
||||||
fn load_delivery_map(path: &Path) -> Result<HashMap<ChannelKey, VecDeque<Vec<u8>>>, StorageError> {
|
/// Load deliveries as V3. Falls back to V2 format (assigns seqs starting at 0).
|
||||||
|
fn load_delivery_map_v3(path: &Path) -> Result<QueueMapV3, StorageError> {
|
||||||
if !path.exists() {
|
if !path.exists() {
|
||||||
return Ok(HashMap::new());
|
return Ok(QueueMapV3::default());
|
||||||
}
|
}
|
||||||
let bytes = fs::read(path).map_err(|e| StorageError::Io(e.to_string()))?;
|
let bytes = fs::read(path).map_err(|e| StorageError::Io(e.to_string()))?;
|
||||||
if bytes.is_empty() {
|
if bytes.is_empty() {
|
||||||
return Ok(HashMap::new());
|
return Ok(QueueMapV3::default());
|
||||||
}
|
}
|
||||||
// Try v2 format (channel-aware). Fallback to legacy v1 for upgrade.
|
// Try V3 first.
|
||||||
if let Ok(map) = bincode::deserialize::<QueueMapV2>(&bytes) {
|
if let Ok(v3) = bincode::deserialize::<QueueMapV3>(&bytes) {
|
||||||
return Ok(map.map);
|
return Ok(v3);
|
||||||
}
|
}
|
||||||
let legacy: QueueMapV1 = bincode::deserialize(&bytes).map_err(|_| StorageError::Serde)?;
|
// Fall back to V2: assign ascending seqs starting at 0 per channel.
|
||||||
let mut upgraded = HashMap::new();
|
let v2 = bincode::deserialize::<QueueMapV2>(&bytes)
|
||||||
for (recipient_key, queue) in legacy.map.into_iter() {
|
.map_err(|_| StorageError::Io("deliveries file: unrecognised format".into()))?;
|
||||||
upgraded.insert(
|
let mut v3 = QueueMapV3::default();
|
||||||
ChannelKey {
|
for (key, queue) in v2.map {
|
||||||
channel_id: Vec::new(),
|
let entries: VecDeque<SeqEntry> = queue
|
||||||
recipient_key,
|
.into_iter()
|
||||||
},
|
.enumerate()
|
||||||
queue,
|
.map(|(i, data)| SeqEntry { seq: i as u64, data })
|
||||||
);
|
.collect();
|
||||||
|
let next = entries.len() as u64;
|
||||||
|
v3.next_seq.insert(key.clone(), next);
|
||||||
|
v3.map.insert(key, entries);
|
||||||
}
|
}
|
||||||
Ok(upgraded)
|
Ok(v3)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn flush_delivery_map(
|
fn flush_delivery_map(&self, path: &Path, map: &QueueMapV3) -> Result<(), StorageError> {
|
||||||
&self,
|
let bytes = bincode::serialize(map).map_err(|_| StorageError::Serde)?;
|
||||||
path: &Path,
|
|
||||||
map: &HashMap<ChannelKey, VecDeque<Vec<u8>>>,
|
|
||||||
) -> Result<(), StorageError> {
|
|
||||||
let payload = QueueMapV2 { map: map.clone() };
|
|
||||||
let bytes = bincode::serialize(&payload).map_err(|_| StorageError::Serde)?;
|
|
||||||
if let Some(parent) = path.parent() {
|
if let Some(parent) = path.parent() {
|
||||||
fs::create_dir_all(parent).map_err(|e| StorageError::Io(e.to_string()))?;
|
fs::create_dir_all(parent).map_err(|e| StorageError::Io(e.to_string()))?;
|
||||||
}
|
}
|
||||||
@@ -283,11 +294,7 @@ impl FileBackedStore {
|
|||||||
bincode::deserialize(&bytes).map_err(|_| StorageError::Serde)
|
bincode::deserialize(&bytes).map_err(|_| StorageError::Serde)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn flush_users(
|
fn flush_users(&self, path: &Path, map: &HashMap<String, Vec<u8>>) -> Result<(), StorageError> {
|
||||||
&self,
|
|
||||||
path: &Path,
|
|
||||||
map: &HashMap<String, Vec<u8>>,
|
|
||||||
) -> Result<(), StorageError> {
|
|
||||||
let bytes = bincode::serialize(map).map_err(|_| StorageError::Serde)?;
|
let bytes = bincode::serialize(map).map_err(|_| StorageError::Serde)?;
|
||||||
if let Some(parent) = path.parent() {
|
if let Some(parent) = path.parent() {
|
||||||
fs::create_dir_all(parent).map_err(|e| StorageError::Io(e.to_string()))?;
|
fs::create_dir_all(parent).map_err(|e| StorageError::Io(e.to_string()))?;
|
||||||
@@ -314,7 +321,7 @@ impl Store for FileBackedStore {
|
|||||||
identity_key: &[u8],
|
identity_key: &[u8],
|
||||||
package: Vec<u8>,
|
package: Vec<u8>,
|
||||||
) -> Result<(), StorageError> {
|
) -> Result<(), StorageError> {
|
||||||
let mut map = self.key_packages.lock().unwrap();
|
let mut map = lock(&self.key_packages)?;
|
||||||
map.entry(identity_key.to_vec())
|
map.entry(identity_key.to_vec())
|
||||||
.or_default()
|
.or_default()
|
||||||
.push_back(package);
|
.push_back(package);
|
||||||
@@ -322,7 +329,7 @@ impl Store for FileBackedStore {
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn fetch_key_package(&self, identity_key: &[u8]) -> Result<Option<Vec<u8>>, StorageError> {
|
fn fetch_key_package(&self, identity_key: &[u8]) -> Result<Option<Vec<u8>>, StorageError> {
|
||||||
let mut map = self.key_packages.lock().unwrap();
|
let mut map = lock(&self.key_packages)?;
|
||||||
let package = map.get_mut(identity_key).and_then(|q| q.pop_front());
|
let package = map.get_mut(identity_key).and_then(|q| q.pop_front());
|
||||||
self.flush_kp_map(&self.kp_path, &*map)?;
|
self.flush_kp_map(&self.kp_path, &*map)?;
|
||||||
Ok(package)
|
Ok(package)
|
||||||
@@ -333,33 +340,39 @@ impl Store for FileBackedStore {
|
|||||||
recipient_key: &[u8],
|
recipient_key: &[u8],
|
||||||
channel_id: &[u8],
|
channel_id: &[u8],
|
||||||
payload: Vec<u8>,
|
payload: Vec<u8>,
|
||||||
) -> Result<(), StorageError> {
|
) -> Result<u64, StorageError> {
|
||||||
let mut map = self.deliveries.lock().unwrap();
|
let mut inner = lock(&self.deliveries)?;
|
||||||
let key = ChannelKey {
|
let key = ChannelKey {
|
||||||
channel_id: channel_id.to_vec(),
|
channel_id: channel_id.to_vec(),
|
||||||
recipient_key: recipient_key.to_vec(),
|
recipient_key: recipient_key.to_vec(),
|
||||||
};
|
};
|
||||||
map.entry(key)
|
let seq = {
|
||||||
.or_default()
|
let entry = inner.next_seq.entry(key.clone()).or_insert(0);
|
||||||
.push_back(payload);
|
let s = *entry;
|
||||||
self.flush_delivery_map(&self.ds_path, &*map)
|
*entry = s + 1;
|
||||||
|
s
|
||||||
|
};
|
||||||
|
inner.map.entry(key).or_default().push_back(SeqEntry { seq, data: payload });
|
||||||
|
self.flush_delivery_map(&self.ds_path, &*inner)?;
|
||||||
|
Ok(seq)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn fetch(
|
fn fetch(
|
||||||
&self,
|
&self,
|
||||||
recipient_key: &[u8],
|
recipient_key: &[u8],
|
||||||
channel_id: &[u8],
|
channel_id: &[u8],
|
||||||
) -> Result<Vec<Vec<u8>>, StorageError> {
|
) -> Result<Vec<(u64, Vec<u8>)>, StorageError> {
|
||||||
let mut map = self.deliveries.lock().unwrap();
|
let mut inner = lock(&self.deliveries)?;
|
||||||
let key = ChannelKey {
|
let key = ChannelKey {
|
||||||
channel_id: channel_id.to_vec(),
|
channel_id: channel_id.to_vec(),
|
||||||
recipient_key: recipient_key.to_vec(),
|
recipient_key: recipient_key.to_vec(),
|
||||||
};
|
};
|
||||||
let messages = map
|
let messages: Vec<(u64, Vec<u8>)> = inner
|
||||||
|
.map
|
||||||
.get_mut(&key)
|
.get_mut(&key)
|
||||||
.map(|q| q.drain(..).collect())
|
.map(|q| q.drain(..).map(|e| (e.seq, e.data)).collect())
|
||||||
.unwrap_or_default();
|
.unwrap_or_default();
|
||||||
self.flush_delivery_map(&self.ds_path, &*map)?;
|
self.flush_delivery_map(&self.ds_path, &*inner)?;
|
||||||
Ok(messages)
|
Ok(messages)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -368,34 +381,31 @@ impl Store for FileBackedStore {
|
|||||||
recipient_key: &[u8],
|
recipient_key: &[u8],
|
||||||
channel_id: &[u8],
|
channel_id: &[u8],
|
||||||
limit: usize,
|
limit: usize,
|
||||||
) -> Result<Vec<Vec<u8>>, StorageError> {
|
) -> Result<Vec<(u64, Vec<u8>)>, StorageError> {
|
||||||
let mut map = self.deliveries.lock().unwrap();
|
let mut inner = lock(&self.deliveries)?;
|
||||||
let key = ChannelKey {
|
let key = ChannelKey {
|
||||||
channel_id: channel_id.to_vec(),
|
channel_id: channel_id.to_vec(),
|
||||||
recipient_key: recipient_key.to_vec(),
|
recipient_key: recipient_key.to_vec(),
|
||||||
};
|
};
|
||||||
let messages = map
|
let messages: Vec<(u64, Vec<u8>)> = inner
|
||||||
|
.map
|
||||||
.get_mut(&key)
|
.get_mut(&key)
|
||||||
.map(|q| {
|
.map(|q| {
|
||||||
let count = limit.min(q.len());
|
let count = limit.min(q.len());
|
||||||
q.drain(..count).collect()
|
q.drain(..count).map(|e| (e.seq, e.data)).collect()
|
||||||
})
|
})
|
||||||
.unwrap_or_default();
|
.unwrap_or_default();
|
||||||
self.flush_delivery_map(&self.ds_path, &*map)?;
|
self.flush_delivery_map(&self.ds_path, &*inner)?;
|
||||||
Ok(messages)
|
Ok(messages)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn queue_depth(
|
fn queue_depth(&self, recipient_key: &[u8], channel_id: &[u8]) -> Result<usize, StorageError> {
|
||||||
&self,
|
let inner = lock(&self.deliveries)?;
|
||||||
recipient_key: &[u8],
|
|
||||||
channel_id: &[u8],
|
|
||||||
) -> Result<usize, StorageError> {
|
|
||||||
let map = self.deliveries.lock().unwrap();
|
|
||||||
let key = ChannelKey {
|
let key = ChannelKey {
|
||||||
channel_id: channel_id.to_vec(),
|
channel_id: channel_id.to_vec(),
|
||||||
recipient_key: recipient_key.to_vec(),
|
recipient_key: recipient_key.to_vec(),
|
||||||
};
|
};
|
||||||
Ok(map.get(&key).map(|q| q.len()).unwrap_or(0))
|
Ok(inner.map.get(&key).map(|q| q.len()).unwrap_or(0))
|
||||||
}
|
}
|
||||||
|
|
||||||
fn gc_expired_messages(&self, _max_age_secs: u64) -> Result<usize, StorageError> {
|
fn gc_expired_messages(&self, _max_age_secs: u64) -> Result<usize, StorageError> {
|
||||||
@@ -408,13 +418,13 @@ impl Store for FileBackedStore {
|
|||||||
identity_key: &[u8],
|
identity_key: &[u8],
|
||||||
hybrid_pk: Vec<u8>,
|
hybrid_pk: Vec<u8>,
|
||||||
) -> Result<(), StorageError> {
|
) -> Result<(), StorageError> {
|
||||||
let mut map = self.hybrid_keys.lock().unwrap();
|
let mut map = lock(&self.hybrid_keys)?;
|
||||||
map.insert(identity_key.to_vec(), hybrid_pk);
|
map.insert(identity_key.to_vec(), hybrid_pk);
|
||||||
self.flush_hybrid_keys(&self.hk_path, &*map)
|
self.flush_hybrid_keys(&self.hk_path, &*map)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn fetch_hybrid_key(&self, identity_key: &[u8]) -> Result<Option<Vec<u8>>, StorageError> {
|
fn fetch_hybrid_key(&self, identity_key: &[u8]) -> Result<Option<Vec<u8>>, StorageError> {
|
||||||
let map = self.hybrid_keys.lock().unwrap();
|
let map = lock(&self.hybrid_keys)?;
|
||||||
Ok(map.get(identity_key).cloned())
|
Ok(map.get(identity_key).cloned())
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -437,18 +447,18 @@ impl Store for FileBackedStore {
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn store_user_record(&self, username: &str, record: Vec<u8>) -> Result<(), StorageError> {
|
fn store_user_record(&self, username: &str, record: Vec<u8>) -> Result<(), StorageError> {
|
||||||
let mut map = self.users.lock().unwrap();
|
let mut map = lock(&self.users)?;
|
||||||
map.insert(username.to_string(), record);
|
map.insert(username.to_string(), record);
|
||||||
self.flush_users(&self.users_path, &*map)
|
self.flush_users(&self.users_path, &*map)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn get_user_record(&self, username: &str) -> Result<Option<Vec<u8>>, StorageError> {
|
fn get_user_record(&self, username: &str) -> Result<Option<Vec<u8>>, StorageError> {
|
||||||
let map = self.users.lock().unwrap();
|
let map = lock(&self.users)?;
|
||||||
Ok(map.get(username).cloned())
|
Ok(map.get(username).cloned())
|
||||||
}
|
}
|
||||||
|
|
||||||
fn has_user_record(&self, username: &str) -> Result<bool, StorageError> {
|
fn has_user_record(&self, username: &str) -> Result<bool, StorageError> {
|
||||||
let map = self.users.lock().unwrap();
|
let map = lock(&self.users)?;
|
||||||
Ok(map.contains_key(username))
|
Ok(map.contains_key(username))
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -457,13 +467,13 @@ impl Store for FileBackedStore {
|
|||||||
username: &str,
|
username: &str,
|
||||||
identity_key: Vec<u8>,
|
identity_key: Vec<u8>,
|
||||||
) -> Result<(), StorageError> {
|
) -> Result<(), StorageError> {
|
||||||
let mut map = self.identity_keys.lock().unwrap();
|
let mut map = lock(&self.identity_keys)?;
|
||||||
map.insert(username.to_string(), identity_key);
|
map.insert(username.to_string(), identity_key);
|
||||||
self.flush_map_string_bytes(&self.identity_keys_path, &*map)
|
self.flush_map_string_bytes(&self.identity_keys_path, &*map)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn get_user_identity_key(&self, username: &str) -> Result<Option<Vec<u8>>, StorageError> {
|
fn get_user_identity_key(&self, username: &str) -> Result<Option<Vec<u8>>, StorageError> {
|
||||||
let map = self.identity_keys.lock().unwrap();
|
let map = lock(&self.identity_keys)?;
|
||||||
Ok(map.get(username).cloned())
|
Ok(map.get(username).cloned())
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -472,13 +482,13 @@ impl Store for FileBackedStore {
|
|||||||
identity_key: &[u8],
|
identity_key: &[u8],
|
||||||
node_addr: Vec<u8>,
|
node_addr: Vec<u8>,
|
||||||
) -> Result<(), StorageError> {
|
) -> Result<(), StorageError> {
|
||||||
let mut map = self.endpoints.lock().unwrap();
|
let mut map = lock(&self.endpoints)?;
|
||||||
map.insert(identity_key.to_vec(), node_addr);
|
map.insert(identity_key.to_vec(), node_addr);
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
fn resolve_endpoint(&self, identity_key: &[u8]) -> Result<Option<Vec<u8>>, StorageError> {
|
fn resolve_endpoint(&self, identity_key: &[u8]) -> Result<Option<Vec<u8>>, StorageError> {
|
||||||
let map = self.endpoints.lock().unwrap();
|
let map = lock(&self.endpoints)?;
|
||||||
Ok(map.get(identity_key).cloned())
|
Ok(map.get(identity_key).cloned())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
72
crates/quicnprotochat-server/src/tls.rs
Normal file
72
crates/quicnprotochat-server/src/tls.rs
Normal file
@@ -0,0 +1,72 @@
|
|||||||
|
use std::path::PathBuf;
|
||||||
|
|
||||||
|
use anyhow::Context;
|
||||||
|
use quinn::ServerConfig;
|
||||||
|
use quinn_proto::crypto::rustls::QuicServerConfig;
|
||||||
|
use rcgen::generate_simple_self_signed;
|
||||||
|
use rustls::pki_types::{CertificateDer, PrivateKeyDer};
|
||||||
|
use rustls::version::TLS13;
|
||||||
|
|
||||||
|
/// Ensure a self-signed certificate exists on disk and return a QUIC server config.
|
||||||
|
/// When `production` is true, cert and key must already exist (no auto-generation).
|
||||||
|
pub fn build_server_config(
|
||||||
|
cert_path: &PathBuf,
|
||||||
|
key_path: &PathBuf,
|
||||||
|
production: bool,
|
||||||
|
) -> anyhow::Result<ServerConfig> {
|
||||||
|
if !cert_path.exists() || !key_path.exists() {
|
||||||
|
if production {
|
||||||
|
anyhow::bail!(
|
||||||
|
"TLS cert or key missing at {:?} / {:?}; production mode forbids auto-generation",
|
||||||
|
cert_path,
|
||||||
|
key_path
|
||||||
|
);
|
||||||
|
}
|
||||||
|
generate_self_signed_cert(cert_path, key_path)?;
|
||||||
|
}
|
||||||
|
|
||||||
|
let cert_bytes = std::fs::read(cert_path).context("read cert")?;
|
||||||
|
let key_bytes = std::fs::read(key_path).context("read key")?;
|
||||||
|
|
||||||
|
let cert_chain = vec![CertificateDer::from(cert_bytes)];
|
||||||
|
let key = PrivateKeyDer::try_from(key_bytes).map_err(|_| anyhow::anyhow!("invalid key"))?;
|
||||||
|
|
||||||
|
let mut tls = rustls::ServerConfig::builder_with_protocol_versions(&[&TLS13])
|
||||||
|
.with_no_client_auth()
|
||||||
|
.with_single_cert(cert_chain, key)?;
|
||||||
|
tls.alpn_protocols = vec![b"capnp".to_vec()];
|
||||||
|
|
||||||
|
let crypto = QuicServerConfig::try_from(tls)
|
||||||
|
.map_err(|e| anyhow::anyhow!("invalid server TLS config: {e}"))?;
|
||||||
|
|
||||||
|
Ok(ServerConfig::with_crypto(std::sync::Arc::new(crypto)))
|
||||||
|
}
|
||||||
|
|
||||||
|
fn generate_self_signed_cert(cert_path: &PathBuf, key_path: &PathBuf) -> anyhow::Result<()> {
|
||||||
|
if let Some(parent) = cert_path.parent() {
|
||||||
|
std::fs::create_dir_all(parent).context("create cert dir")?;
|
||||||
|
}
|
||||||
|
if let Some(parent) = key_path.parent() {
|
||||||
|
std::fs::create_dir_all(parent).context("create key dir")?;
|
||||||
|
}
|
||||||
|
|
||||||
|
let subject_alt_names = vec![
|
||||||
|
"localhost".to_string(),
|
||||||
|
"127.0.0.1".to_string(),
|
||||||
|
"::1".to_string(),
|
||||||
|
];
|
||||||
|
|
||||||
|
let issued = generate_simple_self_signed(subject_alt_names)?;
|
||||||
|
let key_der = issued.key_pair.serialize_der();
|
||||||
|
|
||||||
|
std::fs::write(cert_path, issued.cert.der()).context("write cert")?;
|
||||||
|
std::fs::write(key_path, &key_der).context("write key")?;
|
||||||
|
|
||||||
|
tracing::info!(
|
||||||
|
cert = %cert_path.display(),
|
||||||
|
key = %key_path.display(),
|
||||||
|
"generated self-signed TLS certificate"
|
||||||
|
);
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
29
deny.toml
Normal file
29
deny.toml
Normal file
@@ -0,0 +1,29 @@
|
|||||||
|
# cargo-deny configuration for license compatibility and duplicate detection.
|
||||||
|
# See https://embarkstudios.github.io/cargo-deny/
|
||||||
|
|
||||||
|
[advisories]
|
||||||
|
db-path = "~/.cargo/advisory-db"
|
||||||
|
db-urls = ["https://github.com/rustsec/advisory-db"]
|
||||||
|
vulnerability = "deny"
|
||||||
|
unmaintained = "warn"
|
||||||
|
yanked = "deny"
|
||||||
|
notice = "warn"
|
||||||
|
|
||||||
|
[bans]
|
||||||
|
multiple-versions = "warn"
|
||||||
|
wildcards = "allow"
|
||||||
|
highlight = "all"
|
||||||
|
|
||||||
|
[licenses]
|
||||||
|
unlicensed = "deny"
|
||||||
|
allow = ["MIT", "Apache-2.0", "BSD-2-Clause", "BSD-3-Clause"]
|
||||||
|
deny = []
|
||||||
|
copyleft = "warn"
|
||||||
|
default = "deny"
|
||||||
|
allow-osi-fsf-free = "both"
|
||||||
|
|
||||||
|
[sources]
|
||||||
|
unknown-registry = "deny"
|
||||||
|
unknown-git = "deny"
|
||||||
|
allow-registry = ["https://github.com/rust-lang/crates.io-index"]
|
||||||
|
allow-git = []
|
||||||
@@ -16,6 +16,7 @@ COPY crates/quicnprotochat-core/Cargo.toml crates/quicnprotochat-core/Cargo.to
|
|||||||
COPY crates/quicnprotochat-proto/Cargo.toml crates/quicnprotochat-proto/Cargo.toml
|
COPY crates/quicnprotochat-proto/Cargo.toml crates/quicnprotochat-proto/Cargo.toml
|
||||||
COPY crates/quicnprotochat-server/Cargo.toml crates/quicnprotochat-server/Cargo.toml
|
COPY crates/quicnprotochat-server/Cargo.toml crates/quicnprotochat-server/Cargo.toml
|
||||||
COPY crates/quicnprotochat-client/Cargo.toml crates/quicnprotochat-client/Cargo.toml
|
COPY crates/quicnprotochat-client/Cargo.toml crates/quicnprotochat-client/Cargo.toml
|
||||||
|
COPY crates/quicnprotochat-p2p/Cargo.toml crates/quicnprotochat-p2p/Cargo.toml
|
||||||
|
|
||||||
# Create dummy source files so `cargo build` can resolve the dependency graph
|
# Create dummy source files so `cargo build` can resolve the dependency graph
|
||||||
# and cache the compiled dependencies before copying real source.
|
# and cache the compiled dependencies before copying real source.
|
||||||
@@ -24,10 +25,12 @@ RUN mkdir -p \
|
|||||||
crates/quicnprotochat-proto/src \
|
crates/quicnprotochat-proto/src \
|
||||||
crates/quicnprotochat-server/src \
|
crates/quicnprotochat-server/src \
|
||||||
crates/quicnprotochat-client/src \
|
crates/quicnprotochat-client/src \
|
||||||
|
crates/quicnprotochat-p2p/src \
|
||||||
&& echo 'fn main() {}' > crates/quicnprotochat-server/src/main.rs \
|
&& echo 'fn main() {}' > crates/quicnprotochat-server/src/main.rs \
|
||||||
&& echo 'fn main() {}' > crates/quicnprotochat-client/src/main.rs \
|
&& echo 'fn main() {}' > crates/quicnprotochat-client/src/main.rs \
|
||||||
&& touch crates/quicnprotochat-core/src/lib.rs \
|
&& touch crates/quicnprotochat-core/src/lib.rs \
|
||||||
&& touch crates/quicnprotochat-proto/src/lib.rs
|
&& touch crates/quicnprotochat-proto/src/lib.rs \
|
||||||
|
&& touch crates/quicnprotochat-p2p/src/lib.rs
|
||||||
|
|
||||||
# Schemas must exist before the proto crate's build.rs runs.
|
# Schemas must exist before the proto crate's build.rs runs.
|
||||||
COPY schemas/ schemas/
|
COPY schemas/ schemas/
|
||||||
@@ -38,10 +41,11 @@ RUN cargo build --release --bin quicnprotochat-server 2>/dev/null || true
|
|||||||
# Copy real source and build for real.
|
# Copy real source and build for real.
|
||||||
COPY crates/ crates/
|
COPY crates/ crates/
|
||||||
|
|
||||||
# Touch main.rs files to force re-compilation of the binary crates.
|
# Touch source to force re-compilation after copying real crates.
|
||||||
RUN touch \
|
RUN touch \
|
||||||
crates/quicnprotochat-core/src/lib.rs \
|
crates/quicnprotochat-core/src/lib.rs \
|
||||||
crates/quicnprotochat-proto/src/lib.rs \
|
crates/quicnprotochat-proto/src/lib.rs \
|
||||||
|
crates/quicnprotochat-p2p/src/lib.rs \
|
||||||
crates/quicnprotochat-server/src/main.rs \
|
crates/quicnprotochat-server/src/main.rs \
|
||||||
crates/quicnprotochat-client/src/main.rs
|
crates/quicnprotochat-client/src/main.rs
|
||||||
|
|
||||||
|
|||||||
127
docs/PRODUCTION-READINESS-AUDIT.md
Normal file
127
docs/PRODUCTION-READINESS-AUDIT.md
Normal file
@@ -0,0 +1,127 @@
|
|||||||
|
# Production Readiness Audit
|
||||||
|
|
||||||
|
This document summarizes issues and fixes needed to get quicnprotochat production-ready, based on a codebase review. It aligns with the existing [Production Readiness WBS](src/roadmap/production-readiness.md) and [Coding Standards](src/contributing/coding-standards.md).
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Critical (fix before production)
|
||||||
|
|
||||||
|
### 1. **Auth token and dev defaults**
|
||||||
|
|
||||||
|
- **README and example config** use `auth_token = "devtoken"` and `db_key = ""`.
|
||||||
|
- **Risk:** Deploying with default/example config allows weak or no auth and unencrypted DB.
|
||||||
|
- **Fix:** Require explicit `QUICNPROTOCHAT_AUTH_TOKEN` (or config) in production; reject empty or `"devtoken"` when a production mode/env is set. Document that `db_key` empty disables SQLCipher and is not acceptable for production.
|
||||||
|
|
||||||
|
### 2. **Database encryption optional**
|
||||||
|
|
||||||
|
- **`sql_store.rs`:** If `db_key` is empty, SQLCipher is not applied; DB is plaintext on disk.
|
||||||
|
- **Fix:** In production, require non-empty `db_key` (or fail startup with a clear error). Document in README and deployment docs.
|
||||||
|
|
||||||
|
### 3. **Secrets and generated files not ignored**
|
||||||
|
|
||||||
|
- **`.gitignore`** does not include `data/`, so `data/server-cert.der`, `data/server-key.der`, and `data/quicnprotochat.db` could be committed.
|
||||||
|
- **Fix:** Add `data/` (and any other dirs that hold certs, keys, or DBs) to `.gitignore`. Consider adding `*.der` and `*.db` if used only for local/dev.
|
||||||
|
|
||||||
|
### 4. **Dockerfile out of sync with workspace**
|
||||||
|
|
||||||
|
- **Workspace** has 5 members including `crates/quicnprotochat-p2p`.
|
||||||
|
- **Dockerfile** only copies 4 crate manifests and creates stub dirs for those 4; it never copies `quicnprotochat-p2p`.
|
||||||
|
- **Result:** `cargo build --release --bin quicnprotochat-server` can fail (missing workspace member) or behave inconsistently.
|
||||||
|
- **Fix:** Add `COPY crates/quicnprotochat-p2p/Cargo.toml` and a stub `crates/quicnprotochat-p2p/src` (or equivalent) in the dependency-cache layer so the workspace resolves. Ensure the final `COPY crates/ crates/` still brings in real p2p source.
|
||||||
|
|
||||||
|
### 5. **E2E test failing (rustls CryptoProvider)**
|
||||||
|
|
||||||
|
- **Symptom:** `e2e_happy_path_register_invite_join_send_recv` panics: *"Could not automatically determine the process-level CryptoProvider"*.
|
||||||
|
- **Cause:** rustls 0.23 requires a default `CryptoProvider` (e.g. `ring` or `aws-lc-rs`). In the test process, nothing calls `CryptoProvider::install_default()` before the client uses QUIC/rustls.
|
||||||
|
- **Fix:** In the E2E test (or in a shared test harness), call `rustls::crypto::ring::default_provider().install_default().ok()` (or the chosen provider) once at process start before any QUIC/rustls usage. Ensure the crate has exactly one of the `ring` / `aws-lc-rs` features so the default is unambiguous.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## High (security and reliability)
|
||||||
|
|
||||||
|
### 6. **Panic risk in client RPC path**
|
||||||
|
|
||||||
|
- **`quicnprotochat-client/src/lib.rs`:** `set_auth()` uses `.expect("init_auth must be called with a non-empty token before RPCs")`. If RPC is called without `init_auth`, the process panics.
|
||||||
|
- **Fix:** Replace with a `Result` or an error return (e.g. a dedicated error type) so callers get a recoverable error instead of a panic. Document that `init_auth` must be called before RPCs.
|
||||||
|
|
||||||
|
### 7. **Mutex `.unwrap()` in production paths**
|
||||||
|
|
||||||
|
- **`sql_store.rs`:** All `self.conn.lock().unwrap()` calls can panic if the mutex is poisoned.
|
||||||
|
- **`storage.rs` (file backend):** Same pattern with `.lock().unwrap()` on shared maps.
|
||||||
|
- **Coding standards:** Prefer handling `Result` from `lock()` (e.g. `lock().map_err(...)?`) or use a type that encapsulates poisoning so production paths don’t panic on contention/poison.
|
||||||
|
|
||||||
|
### 8. **`unwrap()` in client library**
|
||||||
|
|
||||||
|
- **`lib.rs`:** `"0.0.0.0:0".parse().unwrap()` for the client endpoint. If parsing ever changed or failed, this would panic.
|
||||||
|
- **Fix:** Use `.context("parse client bind address")?` (or equivalent) so this is a proper error path.
|
||||||
|
|
||||||
|
### 9. **TLS certificate generation is silent on first run**
|
||||||
|
|
||||||
|
- **Server** auto-generates a self-signed cert if files are missing. Production readiness WBS says: *"Self-signed certificates acceptable for development; production deployments must use a CA-signed certificate or certificate pinning."*
|
||||||
|
- **Fix:** Add a startup check (e.g. env or config flag) that in production rejects auto-generation and requires existing cert/key paths. Log clearly when running with self-signed certs so operators know they’re in dev mode.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Medium (hygiene and ops)
|
||||||
|
|
||||||
|
### 10. **No CI pipeline**
|
||||||
|
|
||||||
|
- **Production Readiness WBS** expects: GitHub Actions with `cargo test --workspace`, `cargo clippy`, `cargo fmt --check`, `cargo deny check`.
|
||||||
|
- **Current state:** No `.github/workflows` (or equivalent) found.
|
||||||
|
- **Fix:** Add a CI workflow that runs tests, clippy, fmt, and deny so every PR is validated.
|
||||||
|
|
||||||
|
### 11. **No CODEOWNERS**
|
||||||
|
|
||||||
|
- WBS requires CODEOWNERS for review ownership and security-sensitive changes.
|
||||||
|
- **Fix:** Add `.github/CODEOWNERS` mapping crates to owners.
|
||||||
|
|
||||||
|
### 12. **No dependency audit in CI**
|
||||||
|
|
||||||
|
- WBS mentions `cargo audit` in CI.
|
||||||
|
- **Fix:** Add a CI job that runs `cargo audit` (and optionally `cargo deny check`) and fails on known vulnerabilities or policy violations.
|
||||||
|
|
||||||
|
### 13. **No `deny.toml` / `deny.toml` config**
|
||||||
|
|
||||||
|
- Coding standards reference `cargo deny check`; no config file was found.
|
||||||
|
- **Fix:** Add `deny.toml` (or equivalent) and run `cargo deny check` in CI.
|
||||||
|
|
||||||
|
### 14. **Warnings in build**
|
||||||
|
|
||||||
|
- **Cap'n Proto generated code:** `unused_parens` in generated `.rs` files. Standards allow `#[allow(...)]` on generated code; consider suppressing in the codegen output or in the crate that includes it.
|
||||||
|
- **Server:** `SessionInfo` has `username` and `identity_key` never read (dead code). Either use them (e.g. audit logging) or remove/allow with a short comment.
|
||||||
|
- **E2E test:** Deprecated `cargo_bin`, `unused_mut`; trivial to fix.
|
||||||
|
- **openmls:** Future-incompat warning; track upstream and plan upgrade.
|
||||||
|
|
||||||
|
### 15. **Docker image runs as `nobody`**
|
||||||
|
|
||||||
|
- **Dockerfile** uses `USER nobody`. Good for not running as root, but `nobody` may not have a writable home or data dir.
|
||||||
|
- **Fix:** Ensure `QUICNPROTOCHAT_DATA_DIR` (and cert paths) point to a directory writable by `nobody`, or create a dedicated user/group with a known UID and use that in the Dockerfile and docs.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Already in good shape
|
||||||
|
|
||||||
|
- **Auth token comparison:** Uses `subtle::ConstantTimeEq` (`ct_eq`) for the static token — good.
|
||||||
|
- **Input validation:** Recipient key length (32), payload size (5 MB), wire version, rate limiting, queue depth — present and consistent.
|
||||||
|
- **Structured logging:** `tracing` with env filter; no secret material in log messages in the reviewed paths.
|
||||||
|
- **Error handling:** RPC handlers return coded errors; no `unwrap()` on crypto in server RPC paths.
|
||||||
|
- **Health endpoint:** Server exposes a health RPC used by E2E and can be used for readiness probes.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Summary checklist
|
||||||
|
|
||||||
|
| Area | Status | Action |
|
||||||
|
|-------------------|----------|--------|
|
||||||
|
| Auth / tokens | Fix | Require strong auth in prod; document devtoken / empty db_key |
|
||||||
|
| DB encryption | Fix | Require non-empty db_key in production |
|
||||||
|
| .gitignore | Fix | Add `data/` (and cert/DB patterns as needed) |
|
||||||
|
| Dockerfile | Fix | Include p2p crate in workspace build |
|
||||||
|
| E2E test | Fix | Set rustls CryptoProvider in test harness |
|
||||||
|
| Client panic | Improve | Replace expect with Result in set_auth |
|
||||||
|
| Mutex unwrap | Improve | Handle poison or use non-panicking API |
|
||||||
|
| TLS in production| Improve | Reject auto-generated cert in prod mode |
|
||||||
|
| CI / CODEOWNERS | Add | GitHub Actions, deny, audit, CODEOWNERS |
|
||||||
|
| Warnings | Clean up | Dead code, deprecated APIs, generated allows |
|
||||||
|
|
||||||
|
This audit should be revisited after implementing Phase 1–2 of the [Production Readiness WBS](src/roadmap/production-readiness.md) and before any production deployment.
|
||||||
@@ -4,6 +4,13 @@
|
|||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
|
# Why quicnprotochat?
|
||||||
|
|
||||||
|
- [Comparison with Classical Chat Protocols](design-rationale/protocol-comparison.md)
|
||||||
|
- [Why This Design, Not Signal/Matrix/...](design-rationale/why-not-signal.md)
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
# Getting Started
|
# Getting Started
|
||||||
|
|
||||||
- [Prerequisites](getting-started/prerequisites.md)
|
- [Prerequisites](getting-started/prerequisites.md)
|
||||||
@@ -60,7 +67,6 @@
|
|||||||
# Design Rationale
|
# Design Rationale
|
||||||
|
|
||||||
- [Design Decisions Overview](design-rationale/overview.md)
|
- [Design Decisions Overview](design-rationale/overview.md)
|
||||||
- [Why This Design, Not Signal/Matrix/...](design-rationale/why-not-signal.md)
|
|
||||||
- [ADR-002: Cap'n Proto over MessagePack](design-rationale/adr-002-capnproto.md)
|
- [ADR-002: Cap'n Proto over MessagePack](design-rationale/adr-002-capnproto.md)
|
||||||
- [ADR-004: MLS-Unaware Delivery Service](design-rationale/adr-004-mls-unaware-ds.md)
|
- [ADR-004: MLS-Unaware Delivery Service](design-rationale/adr-004-mls-unaware-ds.md)
|
||||||
- [ADR-005: Single-Use KeyPackages](design-rationale/adr-005-single-use-keypackages.md)
|
- [ADR-005: Single-Use KeyPackages](design-rationale/adr-005-single-use-keypackages.md)
|
||||||
@@ -80,6 +86,7 @@
|
|||||||
# Roadmap and Research
|
# Roadmap and Research
|
||||||
|
|
||||||
- [Milestone Tracker](roadmap/milestones.md)
|
- [Milestone Tracker](roadmap/milestones.md)
|
||||||
|
- [Phase 2 + M4–M6 Roadmap](roadmap/phase2-and-m4-m6.md)
|
||||||
- [Production Readiness WBS](roadmap/production-readiness.md)
|
- [Production Readiness WBS](roadmap/production-readiness.md)
|
||||||
- [Auth, Devices, and Tokens](roadmap/authz-plan.md)
|
- [Auth, Devices, and Tokens](roadmap/authz-plan.md)
|
||||||
- [1:1 Channel Design](roadmap/dm-channels.md)
|
- [1:1 Channel Design](roadmap/dm-channels.md)
|
||||||
|
|||||||
524
docs/src/design-rationale/protocol-comparison.md
Normal file
524
docs/src/design-rationale/protocol-comparison.md
Normal file
@@ -0,0 +1,524 @@
|
|||||||
|
# Comparison with Classical Chat Protocols
|
||||||
|
|
||||||
|
This page compares quicnprotochat against **classical and legacy chat protocols** -- IRC+SSL, XMPP (with and without OMEMO), Telegram's MTProto, and plain TCP/TLS chat systems -- to demonstrate what a modern, cryptographically rigorous design provides over protocols that were designed before end-to-end encryption, post-compromise security, and post-quantum readiness were practical concerns.
|
||||||
|
|
||||||
|
For a comparison against modern E2E-encrypted protocols (Signal, Matrix/Olm/Megolm), see [Why This Design, Not Signal/Matrix/...](why-not-signal.md).
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## At a glance
|
||||||
|
|
||||||
|
```
|
||||||
|
Classical IRC+SSL quicnprotochat
|
||||||
|
───────────────── ──────────────
|
||||||
|
|
||||||
|
You ──TLS──▶ Server ──TLS──▶ Bob You ──QUIC/TLS──▶ Server ──QUIC/TLS──▶ Bob
|
||||||
|
│ │
|
||||||
|
reads your sees only opaque
|
||||||
|
plaintext MLS ciphertext
|
||||||
|
messages (cannot decrypt)
|
||||||
|
```
|
||||||
|
|
||||||
|
The fundamental difference: **classical protocols trust the server with your plaintext**. quicnprotochat's server is cryptographically excluded from reading message content.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Protocol comparison matrix
|
||||||
|
|
||||||
|
| Property | IRC+SSL | XMPP+TLS | XMPP+OMEMO | Telegram (MTProto) | quicnprotochat |
|
||||||
|
|---|---|---|---|---|---|
|
||||||
|
| **Transport encryption** | TLS (server-to-server optional) | STARTTLS / direct TLS | STARTTLS / direct TLS | MTProto 2.0 (custom) | QUIC + TLS 1.3 |
|
||||||
|
| **End-to-end encryption** | None | None | Double Ratchet (1:1) | "Secret chats" only (1:1) | MLS RFC 9420 (groups native) |
|
||||||
|
| **Group E2E encryption** | None | None | Partial (OMEMO group) | None (cloud chats) | MLS ratchet tree |
|
||||||
|
| **Forward secrecy** | TLS session only | TLS session only | Yes (Double Ratchet) | Secret chats only | Yes (MLS epoch ratchet + TLS) |
|
||||||
|
| **Post-compromise security** | None | None | None (groups) | None | Yes (MLS Update proposals) |
|
||||||
|
| **Server sees plaintext** | Yes | Yes | No (1:1); partial (groups) | Yes (cloud chats) | Never |
|
||||||
|
| **Post-quantum readiness** | None | None | None | None | Hybrid KEM (X25519 + ML-KEM-768) |
|
||||||
|
| **Group operation cost** | N/A (no E2E) | N/A (no E2E) | O(n) per member | N/A (no group E2E) | O(log n) via ratchet tree |
|
||||||
|
| **Wire format** | Text (RFC 1459) | XML | XML + Protobuf | TL (Type Language) | Cap'n Proto (zero-copy) |
|
||||||
|
| **Standardization** | RFC 1459 / RFC 2812 | RFC 6120 / 6121 | XEP-0384 | Proprietary | IETF RFC 9420 (MLS) |
|
||||||
|
| **Authentication** | SASL / NickServ | SASL / TLS client certs | SASL + device fingerprints | Phone number + SMS | OPAQUE PAKE (password never leaves client) |
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Deep dive: IRC+SSL vs. quicnprotochat
|
||||||
|
|
||||||
|
IRC (Internet Relay Chat) is the archetypal chat protocol, designed in 1988. Adding SSL/TLS wraps the TCP connection in transport encryption, but the protocol's security model remains fundamentally unchanged.
|
||||||
|
|
||||||
|
### What happens when Alice sends a message on IRC+SSL
|
||||||
|
|
||||||
|
```
|
||||||
|
┌───────┐ ┌──────────┐ ┌──────────┐ ┌─────┐
|
||||||
|
│ Alice │──TLS───▶│ Server A │──plain──▶│ Server B │──TLS───▶│ Bob │
|
||||||
|
└───────┘ └──────────┘ └──────────┘ └─────┘
|
||||||
|
│ │
|
||||||
|
Sees: "PRIVMSG Sees: "PRIVMSG
|
||||||
|
#secret :hey Bob, #secret :hey Bob,
|
||||||
|
the password is the password is
|
||||||
|
hunter2" hunter2"
|
||||||
|
```
|
||||||
|
|
||||||
|
**Problems:**
|
||||||
|
|
||||||
|
1. **Server reads all plaintext.** The IRC server receives, parses, and forwards every message in cleartext. TLS only protects the client-to-server hop.
|
||||||
|
2. **Server-to-server links may be unencrypted.** IRC federation uses inter-server links that historically lack TLS. Even with modern IRCd configurations, each server in the network sees every message.
|
||||||
|
3. **No forward secrecy beyond TLS session.** If a server's TLS private key is compromised, a passive attacker who recorded past traffic can decrypt all historical sessions (unless ECDHE was negotiated).
|
||||||
|
4. **No post-compromise security.** There is no mechanism to recover from a key compromise. If a server is breached, all messages flowing through it are exposed indefinitely.
|
||||||
|
5. **No identity binding.** NickServ password authentication is plaintext over the IRC protocol (inside TLS, but visible to the server). There is no cryptographic binding between a user's identity and their messages.
|
||||||
|
|
||||||
|
### What happens when Alice sends a message on quicnprotochat
|
||||||
|
|
||||||
|
```
|
||||||
|
┌───────┐ ┌────────┐ ┌─────┐
|
||||||
|
│ Alice │──QUIC/TLS 1.3─────▶│ Server │──QUIC/TLS 1.3─────▶│ Bob │
|
||||||
|
└───────┘ └────────┘ └─────┘
|
||||||
|
│ │ │
|
||||||
|
│ MLS encrypt( │ Sees only: │ MLS decrypt(
|
||||||
|
│ epoch_key, │ 0x8a3f...c7b2 │ epoch_key,
|
||||||
|
│ "hey Bob, │ (opaque blob, │ ciphertext
|
||||||
|
│ the password │ cannot decrypt) │ ) → "hey Bob,
|
||||||
|
│ is hunter2" │ │ the password
|
||||||
|
│ ) → 0x8a3f...c7b2 │ │ is hunter2"
|
||||||
|
│ │ │
|
||||||
|
│ ◄── epoch advances ──► │ │
|
||||||
|
│ old keys deleted │ │ old keys deleted
|
||||||
|
│ (forward secrecy) │ │ (forward secrecy)
|
||||||
|
```
|
||||||
|
|
||||||
|
**Key differences:**
|
||||||
|
|
||||||
|
- The server handles only **opaque ciphertext**. It cannot decrypt, modify, or selectively censor messages.
|
||||||
|
- Each MLS epoch derives fresh keys. Past epoch keys are **deleted** -- even if the server is fully compromised, historical messages remain encrypted.
|
||||||
|
- If Alice's device is compromised at epoch *n*, a single Update proposal heals the ratchet tree. Messages after epoch *n+1* are protected (**post-compromise security**).
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Deep dive: XMPP+OMEMO vs. quicnprotochat
|
||||||
|
|
||||||
|
XMPP with OMEMO (XEP-0384) adds end-to-end encryption via the Signal Double Ratchet protocol. This is a significant improvement over plain XMPP, but OMEMO inherits the limitations of the Signal Protocol for group messaging.
|
||||||
|
|
||||||
|
### Group messaging comparison
|
||||||
|
|
||||||
|
```
|
||||||
|
XMPP + OMEMO group (4 members)
|
||||||
|
|
||||||
|
Alice encrypts separately for each member:
|
||||||
|
┌───────┐ ── encrypt(Bob_key) ──────▶ Bob
|
||||||
|
│ Alice │ ── encrypt(Carol_key) ────▶ Carol
|
||||||
|
└───────┘ ── encrypt(Dave_key) ─────▶ Dave
|
||||||
|
3 encryptions per message
|
||||||
|
O(n) cost per send
|
||||||
|
|
||||||
|
quicnprotochat MLS group (4 members)
|
||||||
|
|
||||||
|
Alice encrypts once with group epoch key:
|
||||||
|
┌───────┐ ── MLS_encrypt(epoch_key) ──▶ Server
|
||||||
|
│ Alice │ 1 encryption per message │
|
||||||
|
└───────┘ O(1) cost per send ├──▶ Bob
|
||||||
|
├──▶ Carol
|
||||||
|
└──▶ Dave
|
||||||
|
(all decrypt with same epoch key)
|
||||||
|
```
|
||||||
|
|
||||||
|
| Property | XMPP+OMEMO groups | quicnprotochat MLS groups |
|
||||||
|
|---|---|---|
|
||||||
|
| **Encryption per message** | O(n) -- encrypt once per recipient | O(1) -- single MLS application message |
|
||||||
|
| **Add member** | O(n) -- distribute sender keys to all | O(log n) -- single MLS Commit |
|
||||||
|
| **Remove member** | O(n) -- rotate all sender keys | O(log n) -- single MLS Commit |
|
||||||
|
| **Post-compromise security** | No (sender keys have no PCS) | Yes (any member can issue Update) |
|
||||||
|
| **Group state consistency** | No formal guarantee | MLS transcript hash ensures all members see identical state |
|
||||||
|
| **Max practical group size** | ~100 (pairwise overhead) | Thousands (log-scaling ratchet tree) |
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Deep dive: Telegram (MTProto) vs. quicnprotochat
|
||||||
|
|
||||||
|
Telegram is often perceived as a "secure" messenger, but its default mode provides **no end-to-end encryption**. Only "Secret Chats" (1:1 only, not available on desktop) use E2E encryption.
|
||||||
|
|
||||||
|
### Telegram's two modes
|
||||||
|
|
||||||
|
```
|
||||||
|
┌──────────────────────────────────────────────────────────────────┐
|
||||||
|
│ Telegram Cloud Chats │
|
||||||
|
│ (default, all platforms) │
|
||||||
|
│ │
|
||||||
|
│ You ──MTProto──▶ Telegram Server ──MTProto──▶ Recipient │
|
||||||
|
│ │ │
|
||||||
|
│ Server decrypts, │
|
||||||
|
│ stores plaintext, │
|
||||||
|
│ indexes for search, │
|
||||||
|
│ processes for features │
|
||||||
|
│ (synced across devices) │
|
||||||
|
└──────────────────────────────────────────────────────────────────┘
|
||||||
|
|
||||||
|
┌──────────────────────────────────────────────────────────────────┐
|
||||||
|
│ Telegram Secret Chats │
|
||||||
|
│ (1:1 only, mobile only, opt-in) │
|
||||||
|
│ │
|
||||||
|
│ You ──DH key exchange──▶ Recipient │
|
||||||
|
│ (no PCS, no FS beyond initial DH, │
|
||||||
|
│ no group support, proprietary crypto) │
|
||||||
|
└──────────────────────────────────────────────────────────────────┘
|
||||||
|
```
|
||||||
|
|
||||||
|
### Comparison
|
||||||
|
|
||||||
|
| Property | Telegram Cloud Chats | Telegram Secret Chats | quicnprotochat |
|
||||||
|
|---|---|---|---|
|
||||||
|
| **Server reads plaintext** | Yes | No | No |
|
||||||
|
| **Group E2E** | No | N/A (1:1 only) | Yes (MLS) |
|
||||||
|
| **Forward secrecy** | None | Limited (no ratchet) | Full (MLS epoch ratchet) |
|
||||||
|
| **Post-compromise security** | None | None | Yes |
|
||||||
|
| **Cryptographic standard** | MTProto 2.0 (proprietary, custom) | MTProto 2.0 | IETF RFC 9420 (peer-reviewed) |
|
||||||
|
| **Open source server** | No | No | Yes (MIT license) |
|
||||||
|
| **Post-quantum** | None | None | Hybrid KEM (X25519 + ML-KEM-768) |
|
||||||
|
|
||||||
|
**Critical concern with Telegram:** MTProto is a custom, proprietary cryptographic protocol that has not undergone the same level of independent cryptographic review as standard protocols (TLS, MLS, Signal Protocol). Multiple academic papers have identified weaknesses in earlier versions. quicnprotochat exclusively uses IETF-standardized protocols (TLS 1.3, MLS RFC 9420) and widely reviewed cryptographic primitives.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Practical attack scenarios
|
||||||
|
|
||||||
|
The following scenarios illustrate how the same attack plays out differently across protocol designs.
|
||||||
|
|
||||||
|
### Scenario 1: Server compromise
|
||||||
|
|
||||||
|
An attacker gains root access to the chat server.
|
||||||
|
|
||||||
|
```
|
||||||
|
Attacker
|
||||||
|
│
|
||||||
|
▼
|
||||||
|
┌──────────────────────────────────────────────────┐
|
||||||
|
│ Chat Server │
|
||||||
|
├──────────────────────────────────────────────────┤
|
||||||
|
│ │
|
||||||
|
│ IRC+SSL: Full access to all messages. │
|
||||||
|
│ Read history, impersonate users, │
|
||||||
|
│ inject messages. │
|
||||||
|
│ │
|
||||||
|
│ XMPP+TLS: Full access to all messages. │
|
||||||
|
│ Same as IRC. │
|
||||||
|
│ │
|
||||||
|
│ Telegram: Full access to cloud chat │
|
||||||
|
│ plaintext. User photos, contacts, │
|
||||||
|
│ message history all exposed. │
|
||||||
|
│ │
|
||||||
|
│ XMPP+OMEMO: Cannot read E2E messages, but │
|
||||||
|
│ sees metadata (who talks to whom, │
|
||||||
|
│ when, message sizes). │
|
||||||
|
│ │
|
||||||
|
│ quicnprotochat: │
|
||||||
|
│ Cannot read messages (MLS E2E). │
|
||||||
|
│ Sees metadata (recipient keys, │
|
||||||
|
│ timing, sizes). │
|
||||||
|
│ Cannot inject valid messages │
|
||||||
|
│ (lacks MLS group keys). │
|
||||||
|
│ Cannot impersonate users │
|
||||||
|
│ (lacks Ed25519 private keys). │
|
||||||
|
│ Past messages remain encrypted │
|
||||||
|
│ (forward secrecy). │
|
||||||
|
│ Future messages protected after │
|
||||||
|
│ any member issues MLS Update │
|
||||||
|
│ (post-compromise security). │
|
||||||
|
│ │
|
||||||
|
└──────────────────────────────────────────────────┘
|
||||||
|
```
|
||||||
|
|
||||||
|
### Scenario 2: Harvest-now, decrypt-later (quantum threat)
|
||||||
|
|
||||||
|
A state-level adversary records all encrypted traffic today, planning to decrypt it with a future quantum computer.
|
||||||
|
|
||||||
|
```
|
||||||
|
2025: Adversary passively records all ciphertext
|
||||||
|
─────────────────────────────────────────────────
|
||||||
|
|
||||||
|
IRC+SSL (RSA/ECDHE):
|
||||||
|
└── Quantum computer breaks ECDHE → all recorded sessions decrypted
|
||||||
|
(and plaintext was already visible on the server anyway)
|
||||||
|
|
||||||
|
XMPP+OMEMO (X25519):
|
||||||
|
└── Quantum computer breaks X25519 → all recorded E2E messages decrypted
|
||||||
|
|
||||||
|
Telegram (MTProto / custom DH):
|
||||||
|
└── Quantum computer breaks DH → all recorded messages decrypted
|
||||||
|
|
||||||
|
quicnprotochat (Hybrid KEM):
|
||||||
|
└── Transport: QUIC/TLS with ECDHE → quantum computer breaks this layer
|
||||||
|
└── Inner layer: MLS content encrypted with group epoch keys
|
||||||
|
└── Hybrid KEM envelope: X25519 + ML-KEM-768
|
||||||
|
└── Quantum computer breaks X25519 ✓
|
||||||
|
└── Quantum computer breaks ML-KEM-768 ✗ (NIST Level 3, ~192-bit PQ)
|
||||||
|
└── Combined key: STILL SECURE (both must be broken)
|
||||||
|
```
|
||||||
|
|
||||||
|
quicnprotochat's hybrid "belt and suspenders" design means that **even if X25519 falls to a quantum computer, ML-KEM-768 protects the content**. The adversary's recorded ciphertext remains useless.
|
||||||
|
|
||||||
|
### Scenario 3: Device theft / compromise
|
||||||
|
|
||||||
|
An attacker steals Alice's unlocked device and extracts her key material.
|
||||||
|
|
||||||
|
```
|
||||||
|
After device compromise at time T:
|
||||||
|
────────────────────────────────────
|
||||||
|
|
||||||
|
IRC+SSL:
|
||||||
|
Messages before T: visible on server (no E2E)
|
||||||
|
Messages after T: visible on server (no E2E)
|
||||||
|
Recovery: change NickServ password (server-side only)
|
||||||
|
|
||||||
|
XMPP+OMEMO:
|
||||||
|
Messages before T: protected (forward secrecy via Double Ratchet)
|
||||||
|
Messages after T: exposed until sender key is rotated
|
||||||
|
Group messages: no PCS -- attacker reads all future group messages
|
||||||
|
until manual re-keying
|
||||||
|
Recovery: manual device revocation + new sender keys
|
||||||
|
|
||||||
|
Telegram (cloud):
|
||||||
|
Messages before T: all accessible (stored on server in plaintext)
|
||||||
|
Messages after T: all accessible (cloud sync)
|
||||||
|
Recovery: terminate session from another device
|
||||||
|
|
||||||
|
quicnprotochat:
|
||||||
|
Messages before T: protected (MLS forward secrecy, past epoch keys deleted)
|
||||||
|
Messages after T: exposed only until next MLS epoch advance
|
||||||
|
Recovery: ANY group member issues an MLS Update proposal →
|
||||||
|
new epoch key derived → attacker locked out
|
||||||
|
(post-compromise security heals automatically)
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Transport layer comparison
|
||||||
|
|
||||||
|
### Why QUIC over TCP
|
||||||
|
|
||||||
|
Classical protocols (IRC, XMPP) use TCP, which suffers from head-of-line (HOL) blocking. quicnprotochat uses QUIC, which provides independent streams over UDP.
|
||||||
|
|
||||||
|
```
|
||||||
|
TCP (IRC/XMPP): all streams share one ordered byte stream
|
||||||
|
─────────────────────────────────────────────────────────
|
||||||
|
|
||||||
|
Stream A: ████████░░░░████████████ (blocked waiting for
|
||||||
|
Stream B: ░░░░░░░░░░░░████████████ lost packet in A)
|
||||||
|
Stream C: ░░░░░░░░░░░░████████████
|
||||||
|
|
||||||
|
Lost packet ──▲
|
||||||
|
in Stream A │
|
||||||
|
└── ALL streams blocked until retransmit
|
||||||
|
|
||||||
|
|
||||||
|
QUIC (quicnprotochat): each stream is independent
|
||||||
|
──────────────────────────────────────────────────
|
||||||
|
|
||||||
|
Stream A: ████████░░██████████████ (only A waits)
|
||||||
|
Stream B: ████████████████████████ (unaffected)
|
||||||
|
Stream C: ████████████████████████ (unaffected)
|
||||||
|
|
||||||
|
Lost packet ──▲
|
||||||
|
in Stream A │
|
||||||
|
└── Only Stream A waits; B and C continue
|
||||||
|
```
|
||||||
|
|
||||||
|
### Connection establishment
|
||||||
|
|
||||||
|
```
|
||||||
|
IRC+SSL: TCP handshake (1 RTT) + TLS handshake (1-2 RTT) = 2-3 RTT
|
||||||
|
──────────────────────────────────────────────────────────────────────
|
||||||
|
Client ──SYN──▶ Server │
|
||||||
|
Client ◀──SYN-ACK── Server │ TCP: 1 RTT
|
||||||
|
Client ──ACK──▶ Server │
|
||||||
|
Client ──ClientHello──▶ Server │
|
||||||
|
Client ◀──ServerHello+Cert── Server │ TLS: 1-2 RTT
|
||||||
|
Client ──Finished──▶ Server │
|
||||||
|
════════════════════════════════════════════════════
|
||||||
|
Total: 2-3 round trips before first message
|
||||||
|
|
||||||
|
quicnprotochat: QUIC integrates crypto into handshake = 1 RTT (or 0-RTT)
|
||||||
|
──────────────────────────────────────────────────────────────────────────
|
||||||
|
Client ──Initial(ClientHello)──▶ Server │
|
||||||
|
Client ◀──Initial(ServerHello)── Server │ 1 RTT total
|
||||||
|
Client ──Handshake(Finished)──▶ Server │
|
||||||
|
════════════════════════════════════════════════════
|
||||||
|
Total: 1 round trip (0-RTT with session resumption)
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Authentication comparison
|
||||||
|
|
||||||
|
### How users prove identity
|
||||||
|
|
||||||
|
```
|
||||||
|
IRC:
|
||||||
|
NICK alice
|
||||||
|
PASS hunter2 ← password sent in plaintext (inside TLS)
|
||||||
|
(NickServ sees password) ← server stores/verifies password hash
|
||||||
|
|
||||||
|
XMPP:
|
||||||
|
SASL PLAIN: base64(alice:hunter2) ← password sent to server
|
||||||
|
(server verifies against stored hash)
|
||||||
|
|
||||||
|
Telegram:
|
||||||
|
Phone number + SMS OTP ← carrier and Telegram see phone number
|
||||||
|
(identity = phone number) ← no cryptographic identity
|
||||||
|
|
||||||
|
quicnprotochat (OPAQUE PAKE):
|
||||||
|
Client ──blinded_element──▶ Server │ Server never sees password
|
||||||
|
Client ◀──evaluated_element── Server │ Mutual authentication
|
||||||
|
Client ──finalization──▶ Server │ Session key derived
|
||||||
|
│ │
|
||||||
|
└── password never leaves the client │
|
||||||
|
server stores only an opaque │
|
||||||
|
cryptographic record │
|
||||||
|
(Argon2id + Ristretto255) │
|
||||||
|
```
|
||||||
|
|
||||||
|
**OPAQUE** (Oblivious Pseudo-Random Function + Authenticated Key Exchange) ensures that:
|
||||||
|
|
||||||
|
1. The server **never sees the plaintext password** -- not during registration, not during login.
|
||||||
|
2. The server stores only a cryptographic record that cannot be used for offline dictionary attacks without the client's cooperation.
|
||||||
|
3. **Argon2id** key stretching makes brute-force attacks memory-hard.
|
||||||
|
4. The login protocol produces a mutually authenticated session key, not just a server-verified credential.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Wire format efficiency
|
||||||
|
|
||||||
|
```
|
||||||
|
IRC message (RFC 1459):
|
||||||
|
┌──────────────────────────────────────────────────────────┐
|
||||||
|
│ :alice!alice@host PRIVMSG #channel :Hello everyone\r\n │
|
||||||
|
│ │
|
||||||
|
│ 56 bytes for 14 bytes of payload ("Hello everyone") │
|
||||||
|
│ Text parsing required. No schema. No type safety. │
|
||||||
|
│ Ambiguous parsing rules (RFC 1459 vs RFC 2812 conflicts) │
|
||||||
|
└──────────────────────────────────────────────────────────┘
|
||||||
|
|
||||||
|
XMPP message (XML):
|
||||||
|
┌──────────────────────────────────────────────────────────┐
|
||||||
|
│ <message to='bob@example.com' type='chat'> │
|
||||||
|
│ <body>Hello everyone</body> │
|
||||||
|
│ </message> │
|
||||||
|
│ │
|
||||||
|
│ ~120 bytes for 14 bytes of payload │
|
||||||
|
│ XML parsing required (expensive). Verbose. │
|
||||||
|
│ Schema via XSD exists but rarely enforced at runtime. │
|
||||||
|
└──────────────────────────────────────────────────────────┘
|
||||||
|
|
||||||
|
Cap'n Proto (quicnprotochat):
|
||||||
|
┌──────────────────────────────────────────────────────────┐
|
||||||
|
│ [8-byte aligned struct with pointers] │
|
||||||
|
│ │
|
||||||
|
│ ~40 bytes for 14 bytes of payload │
|
||||||
|
│ Zero-copy: wire bytes = memory layout. No parsing step. │
|
||||||
|
│ Schema enforced at compile time via capnpc codegen. │
|
||||||
|
│ Canonical form: deterministic bytes for signing. │
|
||||||
|
│ Built-in async RPC (no separate HTTP/gRPC layer). │
|
||||||
|
└──────────────────────────────────────────────────────────┘
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Security properties summary
|
||||||
|
|
||||||
|
The following diagram maps each protocol against the security properties it provides:
|
||||||
|
|
||||||
|
```
|
||||||
|
FS PCS E2E E2E PQ Zero Server IETF
|
||||||
|
(1:1) (grp) (1:1) (grp) ready trust excluded std
|
||||||
|
│ │ │ │ │ │ │ │
|
||||||
|
IRC+SSL · · · · · · · ·
|
||||||
|
XMPP+TLS · · · · · · · ·
|
||||||
|
XMPP+OMEMO ● · ● △ · ● · ·
|
||||||
|
Telegram Cloud · · · · · · · ·
|
||||||
|
Telegram Secret △ · ● · · ● · ·
|
||||||
|
Signal ● · ● ● △ ● · ·
|
||||||
|
quicnprotochat ● ● ● ● ● ● ● ●
|
||||||
|
|
||||||
|
Legend: ● = yes △ = partial · = no
|
||||||
|
FS = forward secrecy PCS = post-compromise security
|
||||||
|
E2E = end-to-end encryption PQ = post-quantum readiness
|
||||||
|
Zero trust = server excluded from crypto
|
||||||
|
Server excluded = server cannot read, modify, or forge messages
|
||||||
|
IETF std = based on IETF-standardized protocol (RFC)
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## The quicnprotochat advantage: a layered defense
|
||||||
|
|
||||||
|
Classical protocols rely on a **single layer** of security (transport TLS). quicnprotochat applies defense in depth with **three independent layers**, each of which must be broken separately:
|
||||||
|
|
||||||
|
```
|
||||||
|
IRC+SSL security layers: quicnprotochat security layers:
|
||||||
|
|
||||||
|
┌─────────────────────────┐ ┌─────────────────────────────────┐
|
||||||
|
│ TLS (transport) │ │ Layer 3: Hybrid KEM envelope │
|
||||||
|
│ • server sees plain │ │ • X25519 + ML-KEM-768 │
|
||||||
|
│ • single point of │ │ • post-quantum resistant │
|
||||||
|
│ failure │ │ • both must be broken │
|
||||||
|
└─────────────────────────┘ ├─────────────────────────────────┤
|
||||||
|
│ Layer 2: MLS (RFC 9420) │
|
||||||
|
│ • end-to-end group encryption │
|
||||||
|
│ • forward secrecy per epoch │
|
||||||
|
│ • post-compromise security │
|
||||||
|
│ • ratchet tree (O(log n)) │
|
||||||
|
├─────────────────────────────────┤
|
||||||
|
│ Layer 1: QUIC + TLS 1.3 │
|
||||||
|
│ • transport confidentiality │
|
||||||
|
│ • 0-RTT resumption │
|
||||||
|
│ • no head-of-line blocking │
|
||||||
|
│ • multiplexed streams │
|
||||||
|
└─────────────────────────────────┘
|
||||||
|
|
||||||
|
To read a message, attacker must break:
|
||||||
|
IRC+SSL: TLS (1 layer)
|
||||||
|
quicnprotochat: TLS + MLS + Hybrid KEM (3 layers)
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## When would you still choose IRC?
|
||||||
|
|
||||||
|
Fairness demands acknowledging where classical protocols genuinely excel:
|
||||||
|
|
||||||
|
| Advantage | IRC | quicnprotochat |
|
||||||
|
|---|---|---|
|
||||||
|
| **Simplicity** | Telnet-compatible text protocol | Binary protocol requiring client implementation |
|
||||||
|
| **Maturity** | 35+ years of production use | Early-stage research project |
|
||||||
|
| **Federation** | Built-in multi-server mesh | Single server per deployment |
|
||||||
|
| **Client ecosystem** | Hundreds of clients on every platform | CLI only (currently) |
|
||||||
|
| **Low resource usage** | Runs on minimal hardware | Requires modern TLS/QUIC stack |
|
||||||
|
| **Public channels** | Designed for open, unencrypted discussion | Designed for private, encrypted communication |
|
||||||
|
| **Anonymity** | No identity required | Requires Ed25519 identity keypair |
|
||||||
|
|
||||||
|
IRC remains an excellent choice for **public, open discussion** where encryption is not needed and simplicity is valued. quicnprotochat is designed for a different threat model: private communication where **confidentiality, forward secrecy, and post-compromise security** are requirements, not luxuries.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Migration path: what changes for users
|
||||||
|
|
||||||
|
For users and operators coming from classical chat systems, here is what changes practically:
|
||||||
|
|
||||||
|
| Concern | Classical (IRC/XMPP) | quicnprotochat |
|
||||||
|
|---|---|---|
|
||||||
|
| **Server setup** | Install IRCd, configure TLS cert | `cargo build && ./quicnprotochat-server` (auto-generates TLS cert) |
|
||||||
|
| **Client setup** | Install any IRC client | `./quicnprotochat-client register-user` (generates Ed25519 identity) |
|
||||||
|
| **Joining a group** | `/join #channel` | Receive MLS Welcome message from group creator |
|
||||||
|
| **Sending a message** | Type and press enter | Same -- client handles MLS encryption transparently |
|
||||||
|
| **Server admin sees messages** | Yes (always) | No (never -- server sees only ciphertext) |
|
||||||
|
| **Key management** | None (password only) | Automatic -- MLS handles key rotation, epoch advancement |
|
||||||
|
| **Device compromise recovery** | Change password | Any group member issues Update -- automatic PCS recovery |
|
||||||
|
| **Logging / compliance** | Server-side logging trivial | Requires client-side export (server has no plaintext) |
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Further reading
|
||||||
|
|
||||||
|
- [Why This Design, Not Signal/Matrix/...](why-not-signal.md) -- comparison with modern E2E-encrypted protocols
|
||||||
|
- [Protocol Layers Overview](../protocol-layers/overview.md) -- detailed protocol stack documentation
|
||||||
|
- [Threat Model](../cryptography/threat-model.md) -- what quicnprotochat does and does not protect against
|
||||||
|
- [Post-Quantum Readiness](../cryptography/post-quantum-readiness.md) -- hybrid KEM design and rationale
|
||||||
|
- [MLS (RFC 9420)](../protocol-layers/mls.md) -- deep dive into the group key agreement protocol
|
||||||
|
- [Architecture Overview](../architecture/overview.md) -- system-level architecture
|
||||||
@@ -83,7 +83,7 @@ KeyPackage fetched successfully.
|
|||||||
No KeyPackage available for this identity.
|
No KeyPackage available for this identity.
|
||||||
```
|
```
|
||||||
|
|
||||||
KeyPackages are single-use: fetching a KeyPackage atomically removes it from the server. If the peer needs to be added to another group, they must upload a new KeyPackage.
|
KeyPackages are single-use: fetching a KeyPackage atomically removes it from the server. The server may also enforce a TTL (e.g. 24 hours) on stored KeyPackages. If the peer needs to be added to another group, or their KeyPackage expired, they must upload a new one (see `refresh-keypackage` below).
|
||||||
|
|
||||||
### `demo-group`
|
### `demo-group`
|
||||||
|
|
||||||
@@ -125,7 +125,7 @@ cargo run -p quicnprotochat-client -- register-state \
|
|||||||
--server 127.0.0.1:7000
|
--server 127.0.0.1:7000
|
||||||
```
|
```
|
||||||
|
|
||||||
If `alice.bin` does not exist, a new identity is generated and saved. If it already exists, the existing identity is loaded and a new KeyPackage is generated from it.
|
If `alice.bin` does not exist, a new identity is generated and saved. If it already exists, the existing identity is loaded and a new KeyPackage is generated from it. You can run `register-state` again at any time to upload a fresh KeyPackage (e.g. after the previous one was consumed or expired). For refresh-only (no new identity), use `refresh-keypackage` instead.
|
||||||
|
|
||||||
**Output:**
|
**Output:**
|
||||||
```
|
```
|
||||||
@@ -134,6 +134,30 @@ fingerprint : 9f8e7d6c5b4a...
|
|||||||
KeyPackage uploaded successfully.
|
KeyPackage uploaded successfully.
|
||||||
```
|
```
|
||||||
|
|
||||||
|
### `refresh-keypackage`
|
||||||
|
|
||||||
|
Refresh the KeyPackage on the server using your **existing** state file. Does not create a new identity. Use this when:
|
||||||
|
|
||||||
|
- Your KeyPackage has expired (server TTL, e.g. 24h).
|
||||||
|
- Your KeyPackage was consumed (someone invited you) and you want to be invitable again.
|
||||||
|
|
||||||
|
Run with the same `--access-token` (or `QUICNPROTOCHAT_ACCESS_TOKEN`) as for other commands.
|
||||||
|
|
||||||
|
```bash
|
||||||
|
cargo run -p quicnprotochat-client -- refresh-keypackage \
|
||||||
|
--state alice.bin \
|
||||||
|
--server 127.0.0.1:7000
|
||||||
|
```
|
||||||
|
|
||||||
|
**Output:**
|
||||||
|
```
|
||||||
|
identity_key : a1b2c3d4e5f6...
|
||||||
|
fingerprint : 9f8e7d6c5b4a...
|
||||||
|
KeyPackage uploaded successfully.
|
||||||
|
```
|
||||||
|
|
||||||
|
If you are told "no key" when someone tries to invite you, have them wait and run `refresh-keypackage`, then try the invite again.
|
||||||
|
|
||||||
### `create-group`
|
### `create-group`
|
||||||
|
|
||||||
Create a new MLS group. The caller becomes the sole member at epoch 0.
|
Create a new MLS group. The caller becomes the sole member at epoch 0.
|
||||||
@@ -269,7 +293,8 @@ In ephemeral mode (`register` and `demo-group`), the key is held in process memo
|
|||||||
| `register` | No | Generate ephemeral identity + KeyPackage, upload to AS |
|
| `register` | No | Generate ephemeral identity + KeyPackage, upload to AS |
|
||||||
| `fetch-key <hex>` | No | Fetch a peer's KeyPackage from AS |
|
| `fetch-key <hex>` | No | Fetch a peer's KeyPackage from AS |
|
||||||
| `demo-group` | No | Automated Alice-and-Bob round-trip |
|
| `demo-group` | No | Automated Alice-and-Bob round-trip |
|
||||||
| `register-state` | Yes | Upload KeyPackage for persistent identity |
|
| `register-state` | Yes | Upload KeyPackage for persistent identity (creates identity if needed) |
|
||||||
|
| `refresh-keypackage` | Yes | Upload a fresh KeyPackage from existing state (no new identity) |
|
||||||
| `create-group` | Yes | Create MLS group (sole member, epoch 0) |
|
| `create-group` | Yes | Create MLS group (sole member, epoch 0) |
|
||||||
| `invite` | Yes | Add peer to group, deliver Welcome via DS |
|
| `invite` | Yes | Add peer to group, deliver Welcome via DS |
|
||||||
| `join` | Yes | Consume Welcome from DS, join group |
|
| `join` | Yes | Consume Welcome from DS, join group |
|
||||||
|
|||||||
@@ -6,6 +6,8 @@ credential (Ed25519 public key), and a signature proving ownership. The
|
|||||||
quicnprotochat Authentication Service (AS) provides a simple upload/fetch
|
quicnprotochat Authentication Service (AS) provides a simple upload/fetch
|
||||||
interface for distributing KeyPackages between clients.
|
interface for distributing KeyPackages between clients.
|
||||||
|
|
||||||
|
**Expiry and refresh:** KeyPackages are consumed on fetch (single-use). The server may also enforce a TTL (e.g. 24h). Clients should upload a fresh KeyPackage periodically or on demand so they remain invitable. The CLI provides `refresh-keypackage`: load existing state, generate a new KeyPackage, upload to the AS. See [Running the Client](../getting-started/running-the-client.md#refresh-keypackage).
|
||||||
|
|
||||||
This page describes the end-to-end flow: from client-side generation through
|
This page describes the end-to-end flow: from client-side generation through
|
||||||
server-side storage to peer-side retrieval and consumption.
|
server-side storage to peer-side retrieval and consumption.
|
||||||
|
|
||||||
|
|||||||
@@ -64,6 +64,7 @@ For a deeper discussion of the cryptographic guarantees, threat model, and known
|
|||||||
|
|
||||||
| Section | What you will find |
|
| Section | What you will find |
|
||||||
|---|---|
|
|---|---|
|
||||||
|
| **[Comparison with Classical Protocols](design-rationale/protocol-comparison.md)** | **Why quicnprotochat? IRC+SSL, XMPP, Telegram vs. our design** |
|
||||||
| [Prerequisites](getting-started/prerequisites.md) | Toolchain and system dependencies |
|
| [Prerequisites](getting-started/prerequisites.md) | Toolchain and system dependencies |
|
||||||
| [Building from Source](getting-started/building.md) | `cargo build`, Cap'n Proto codegen, troubleshooting |
|
| [Building from Source](getting-started/building.md) | `cargo build`, Cap'n Proto codegen, troubleshooting |
|
||||||
| [Running the Server](getting-started/running-the-server.md) | Server startup, configuration, TLS cert generation |
|
| [Running the Server](getting-started/running-the-server.md) | Server startup, configuration, TLS cert generation |
|
||||||
@@ -74,7 +75,7 @@ For a deeper discussion of the cryptographic guarantees, threat model, and known
|
|||||||
| [Protocol Layers](protocol-layers/overview.md) | Deep dives into QUIC/TLS, Cap'n Proto, MLS, Hybrid KEM |
|
| [Protocol Layers](protocol-layers/overview.md) | Deep dives into QUIC/TLS, Cap'n Proto, MLS, Hybrid KEM |
|
||||||
| [Wire Format Reference](wire-format/overview.md) | Cap'n Proto schema documentation |
|
| [Wire Format Reference](wire-format/overview.md) | Cap'n Proto schema documentation |
|
||||||
| [Cryptography](cryptography/overview.md) | Identity keys, key lifecycle, forward secrecy, PCS, threat model |
|
| [Cryptography](cryptography/overview.md) | Identity keys, key lifecycle, forward secrecy, PCS, threat model |
|
||||||
| [Design Rationale](design-rationale/overview.md) | ADRs and "why not Signal/Matrix" comparison |
|
| [Design Rationale](design-rationale/overview.md) | ADRs and protocol design decisions |
|
||||||
| [Roadmap](roadmap/milestones.md) | Milestone tracker and future research directions |
|
| [Roadmap](roadmap/milestones.md) | Milestone tracker and future research directions |
|
||||||
|
|
||||||
---
|
---
|
||||||
|
|||||||
135
docs/src/roadmap/fully-operational-checklist.md
Normal file
135
docs/src/roadmap/fully-operational-checklist.md
Normal file
@@ -0,0 +1,135 @@
|
|||||||
|
# Features Needed to Be Fully Operational
|
||||||
|
|
||||||
|
This checklist reflects the current state after M1–M3, M4-style CLI, M6 migrations, rich messaging, Sealed Sender, and GUI scaffold. It lists what is **done**, what is **partially done**, and what still **must be implemented** for a fully operational chat system.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Summary Table
|
||||||
|
|
||||||
|
| Area | Status | Notes |
|
||||||
|
|------|--------|--------|
|
||||||
|
| Transport (QUIC/TLS) | Done | M1 |
|
||||||
|
| Auth service (KeyPackage, OPAQUE) | Done | M2 + register-user, login |
|
||||||
|
| Delivery + MLS groups (2-party) | Done | M3 |
|
||||||
|
| Group CLI (create, invite, join, send, recv, chat) | Done | M4-style |
|
||||||
|
| Server persistence (SQL + migrations) | Done | M6 migrations + runner |
|
||||||
|
| Client state persistence | Done | State file, DiskKeyStore, encrypted (QPCE) |
|
||||||
|
| Rich messaging (app payload schema) | Done | Chat, Reply, Reaction, ReadReceipt, Typing + sender |
|
||||||
|
| Sealed Sender | Done | Server config; enqueue without identity |
|
||||||
|
| Native GUI scaffold | Done | Tauri, whoami, health |
|
||||||
|
| **Multi-party groups (N > 2)** | Done | M5: Commit fan-out, send --all, epoch sync, three-party E2E |
|
||||||
|
| **KeyPackage rotation** | **To do** | Client upload before TTL (24h) |
|
||||||
|
| **Observability** | **To do** | Metrics (Prometheus), tracing (OpenTelemetry), health |
|
||||||
|
| **Client resilience** | **To do** | Retry/backoff, idempotent message IDs, gap detection |
|
||||||
|
| **1:1 channel semantics** | Partial | channelId in DS; per-channel authz/TTL not formalized |
|
||||||
|
| **Production hardening** | **To do** | CI, CODEOWNERS, SBOM, backup/restore, rate-limit tuning |
|
||||||
|
| **Post-quantum (M7)** | Next | Custom OpenMlsCryptoProvider with hybrid KEM |
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 1. Must-Have for “Fully Operational”
|
||||||
|
|
||||||
|
These are the features that, if missing, prevent the system from being considered fully operational for real use (multi-user groups, reliability, and operations).
|
||||||
|
|
||||||
|
### 1.1 Multi-party groups (M5)
|
||||||
|
|
||||||
|
**Current:** Core supports `add_member` and `merge_staged_commit`; client/server only exercise 2-party (creator + one joiner).
|
||||||
|
|
||||||
|
**To implement:**
|
||||||
|
|
||||||
|
- **Commit fan-out:** When creator invites a new member, the Commit must be delivered to **all existing members** (not just the creator). Client flow: after `add_member`, enqueue the Commit to each existing member’s queue (by identity / recipient_key) in addition to sending the Welcome to the new member.
|
||||||
|
- **Proposal handling:** Ensure all members process Commits and Proposals (Add/Remove/Update) so epoch advancement is consistent; already partially in core (`merge_staged_commit`, `store_pending_proposal`).
|
||||||
|
- **CLI/API:** Extend `invite` so that after adding a member, the client fetches the list of existing members (e.g. from local group state) and enqueues the Commit to each. Optional: `recv` processes incoming Commits and updates local group state before returning application messages.
|
||||||
|
- **Tests:** E2E with 3+ members: create group, invite B, invite C, send from A, B, C; all receive and decrypt.
|
||||||
|
|
||||||
|
### 1.2 KeyPackage rotation
|
||||||
|
|
||||||
|
**Current:** KeyPackages are single-use (consume-on-fetch). Server TTL (e.g. 24h) and client upload are in place, but there is no **scheduled client-side rotation**.
|
||||||
|
|
||||||
|
**To implement:**
|
||||||
|
|
||||||
|
- **Timer or on-demand:** Before KeyPackage TTL expires (e.g. 24h), client uploads a fresh KeyPackage (and optionally removes or replaces the old one). Can be a background task in the client (CLI daemon or GUI backend) or triggered when a “fetch key” fails with “no key”.
|
||||||
|
- **Documentation:** Document TTL and rotation in user/ops docs.
|
||||||
|
|
||||||
|
### 1.3 Observability
|
||||||
|
|
||||||
|
**Current:** Health RPC and basic tracing exist; no structured metrics or distributed tracing.
|
||||||
|
|
||||||
|
**To implement:**
|
||||||
|
|
||||||
|
- **Metrics:** Prometheus (or equivalent) export for: enqueue/fetch rate, RPC latency histograms, queue depth per recipient, KeyPackage store size, active connections. See [Future Research](future-research.md).
|
||||||
|
- **Health:** Existing `health` RPC is sufficient; optionally add a simple HTTP health endpoint for load balancers (e.g. on a separate port).
|
||||||
|
- **Structured logging:** Ensure sensitive data is never logged; audit events (auth, enqueue, rate limit) as in [Production Readiness](production-readiness.md).
|
||||||
|
|
||||||
|
### 1.4 Client resilience
|
||||||
|
|
||||||
|
**Current:** Single attempt for send/recv; no retry, no idempotent message IDs, no gap detection.
|
||||||
|
|
||||||
|
**To implement:**
|
||||||
|
|
||||||
|
- **Retry with backoff:** On transient failures (network, server busy), retry with exponential backoff + jitter for enqueue, fetch, fetchWait.
|
||||||
|
- **Idempotent message IDs:** Client-generated message IDs (already in rich messaging); server-side deduplication by (recipient_key, channel_id, message_id) if desired, to avoid duplicate delivery on retry.
|
||||||
|
- **Gap detection (optional):** Per-channel sequence numbers or epoch checks so the client can detect missing Commits or messages and re-sync (e.g. re-fetch or rejoin).
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 2. Important for Production Readiness
|
||||||
|
|
||||||
|
Not strictly required for “operational” but expected for production deployments.
|
||||||
|
|
||||||
|
### 2.1 1:1 channel semantics (Phase 4)
|
||||||
|
|
||||||
|
**Current:** Delivery is per `(recipient_key, channel_id)`; channelId is used in enqueue/fetch. No formal per-channel authz or TTL.
|
||||||
|
|
||||||
|
**To implement:**
|
||||||
|
|
||||||
|
- **Per-channel authz:** Ensure fetch/fetchWait only return messages for channels the authenticated identity is allowed to read (e.g. identity bound to recipient_key or to a channel membership list).
|
||||||
|
- **TTL eviction:** Server already has message TTL (e.g. 7 days) and GC; document and optionally make TTL configurable per channel type.
|
||||||
|
|
||||||
|
### 2.2 Wire versioning and protocol hardening (Phase 2)
|
||||||
|
|
||||||
|
**Current:** Wire version is checked on enqueue/fetch (e.g. `CURRENT_WIRE_VERSION`). Ciphersuite allowlist and ALPN are partially in place.
|
||||||
|
|
||||||
|
**To implement:**
|
||||||
|
|
||||||
|
- **Ciphersuite allowlist:** Server rejects KeyPackages with unknown ciphersuites.
|
||||||
|
- **Downgrade guards:** Reject Commits with weaker ciphersuites once a group has advanced.
|
||||||
|
- **Connection draining:** Graceful QUIC `CONNECTION_CLOSE` on server shutdown.
|
||||||
|
|
||||||
|
### 2.3 Production hardening (Phase 1 + 6)
|
||||||
|
|
||||||
|
- **CODEOWNERS:** Map crates to reviewers.
|
||||||
|
- **CI:** `cargo test --workspace`, `cargo clippy`, `cargo fmt --check`, `cargo audit`, optional `cargo deny`.
|
||||||
|
- **SBOM:** e.g. `cargo-cyclonedx` or `cargo-about` in CI.
|
||||||
|
- **Backup/restore:** SQLite/SQLCipher backup and integrity verification for server DB.
|
||||||
|
- **Rate limiting:** Already per-token; optionally add per-IP and per-account limits and document.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 3. Roadmap and Documentation Updates
|
||||||
|
|
||||||
|
- **Milestones doc:** Mark M4 as **Complete** (CLI subcommands exist). Mark M6 as **Complete** (migrations + runner; server and client persistence in place). Leave M5 as **Next** and M7 as **Planned**.
|
||||||
|
- **README:** Update milestone table to reflect M4 and M6 complete; add one line on migrations (e.g. “Server supports SQL migrations under `quicnprotochat-server/migrations/`”).
|
||||||
|
- **Migration convention:** Document in README or a dev doc: add new migrations as `NNN_name.sql`, add to `MIGRATIONS` in `sql_store.rs`, bump `SCHEMA_VERSION`.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 4. Optional / Later
|
||||||
|
|
||||||
|
- **Post-quantum (M7):** Custom `OpenMlsCryptoProvider` with hybrid X25519 + ML-KEM-768 for MLS HPKE; all M3–M5 tests pass with PQ backend.
|
||||||
|
- **GUI completion:** Full flows (login, conversation list, chat view with send/recv, settings); long-lived connection and streaming recv.
|
||||||
|
- **WebTransport + WASM:** Browser client.
|
||||||
|
- **iroh / P2P:** NAT traversal and optional direct peer-to-peer delivery.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Priority Order for “Fully Operational”
|
||||||
|
|
||||||
|
1. **M5 Multi-party groups** — Commit fan-out and client flow for N > 2.
|
||||||
|
2. **KeyPackage rotation** — Client upload before TTL.
|
||||||
|
3. **Observability** — Metrics + health + safe logging.
|
||||||
|
4. **Client resilience** — Retry, backoff, idempotent message IDs.
|
||||||
|
5. **Docs** — Update milestones and README (M4, M6, migrations).
|
||||||
|
6. **Production hardening** — CI, CODEOWNERS, SBOM, backup, rate-limit docs.
|
||||||
|
|
||||||
|
Once 1–5 are in place, the system can be considered **fully operational** for multi-user group chat with durable state and observable, resilient clients. Item 6 and the optional items bring it to **production-ready** and beyond.
|
||||||
@@ -14,10 +14,10 @@ for what that means in practice.
|
|||||||
| M1 | QUIC/TLS Transport | **Complete** | QUIC + TLS 1.3 endpoint, length-prefixed framing, Ping/Pong |
|
| M1 | QUIC/TLS Transport | **Complete** | QUIC + TLS 1.3 endpoint, length-prefixed framing, Ping/Pong |
|
||||||
| M2 | Authentication Service | **Complete** | Ed25519 identity, KeyPackage generation, AS upload/fetch |
|
| M2 | Authentication Service | **Complete** | Ed25519 identity, KeyPackage generation, AS upload/fetch |
|
||||||
| M3 | Delivery Service + MLS Groups | **Complete** | DS relay, GroupMember create/join/add/send/recv |
|
| M3 | Delivery Service + MLS Groups | **Complete** | DS relay, GroupMember create/join/add/send/recv |
|
||||||
| M4 | Group CLI Subcommands | **Next** | Persistent CLI (create-group, invite, join, send, recv); `demo-group` already available |
|
| M4 | Group CLI Subcommands | **Complete** | Persistent CLI (create-group, invite, join, send, recv), OPAQUE login |
|
||||||
| M5 | Multi-party Groups | Planned | N > 2 members, Commit fan-out, Proposal handling |
|
| M5 | Multi-party Groups | **Complete** | N > 2 members, Commit fan-out, send --all, epoch sync |
|
||||||
| M6 | Persistence | Planned | SQLite key store, durable group state |
|
| M6 | Persistence | **Complete** | SQLite/SQLCipher, migrations, durable server + client state |
|
||||||
| M7 | Post-quantum | Planned | PQ hybrid for MLS/HPKE (X25519 + ML-KEM-768) |
|
| M7 | Post-quantum | **Next** | PQ hybrid for MLS/HPKE (X25519 + ML-KEM-768) |
|
||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
@@ -103,65 +103,54 @@ group\_id lifecycle, MLS integration.
|
|||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
## M4 -- Group CLI Subcommands (Next)
|
## M4 -- Group CLI Subcommands (Complete)
|
||||||
|
|
||||||
**Goal:** Persistent, composable CLI subcommands for group operations, replacing
|
**Goal:** Persistent, composable CLI subcommands for group operations, replacing
|
||||||
the monolithic `demo-group` proof-of-concept.
|
the monolithic `demo-group` proof-of-concept.
|
||||||
|
|
||||||
**Planned deliverables:**
|
**Deliverables:** `create-group`, `invite`, `join`, `send`, `recv`, `chat`;
|
||||||
|
OPAQUE `register-user` and `login`; `demo-group` remains for single-command demo.
|
||||||
- `create-group` -- creates a new MLS group, stores state locally
|
|
||||||
- `invite <identity>` -- adds a member by fetching their KeyPackage from the AS
|
|
||||||
- `join` -- processes a Welcome message and joins an existing group
|
|
||||||
- `send <message>` -- encrypts and enqueues an application message
|
|
||||||
- `recv` -- fetches and decrypts pending messages (or long-polls with `fetchWait`)
|
|
||||||
|
|
||||||
The `demo-group` subcommand remains available as a single-command demonstration
|
|
||||||
of the full flow.
|
|
||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
## M5 -- Multi-party Groups (Planned)
|
## M5 -- Multi-party Groups (Complete)
|
||||||
|
|
||||||
**Goal:** Support groups with N > 2 members, including Commit fan-out and
|
**Goal:** Support groups with N > 2 members, including Commit fan-out and
|
||||||
Proposal handling.
|
epoch synchronisation.
|
||||||
|
|
||||||
**Planned deliverables:**
|
**Deliverables:** Commit fan-out to existing members on invite; `send --all`;
|
||||||
|
`cmd_join` processes all queued payloads (Welcome + Commits); three-party E2E
|
||||||
- Commit fan-out through the DS to all group members
|
passing. Proposal handling (Remove, Update) and Criterion benchmarks are
|
||||||
- Proposal handling (Add, Remove, Update)
|
optional follow-ups.
|
||||||
- Epoch synchronisation across N members
|
|
||||||
- Criterion benchmarks: key generation, encap/decap, group-add latency
|
|
||||||
(10/100/1000 members)
|
|
||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
## M6 -- Persistence (Planned)
|
## M6 -- Persistence (Complete)
|
||||||
|
|
||||||
**Goal:** Server survives restart. Client state persists across sessions.
|
**Goal:** Server survives restart. Client state persists across sessions.
|
||||||
|
|
||||||
**Planned deliverables:**
|
**Deliverables:** SQLite/SQLCipher via rusqlite, `migrations/` directory and
|
||||||
|
migration runner; client state file and DiskKeyStore (encrypted QPCE optional).
|
||||||
- `quicnprotochat-server`: SQLite via `sqlx` for AS key store and DS message log,
|
|
||||||
`migrations/` directory
|
|
||||||
- `docker/Dockerfile`: multi-stage build (`rust:bookworm` builder, `debian:bookworm-slim` runtime)
|
|
||||||
- `docker-compose.yml`: server + SQLite volume, healthcheck
|
|
||||||
- Client reconnect with session resume (re-handshake + rejoin group epoch from
|
|
||||||
DS log)
|
|
||||||
|
|
||||||
See [Future Research: SQLCipher](future-research.md#storage--persistence) for
|
See [Future Research: SQLCipher](future-research.md#storage--persistence) for
|
||||||
encrypted-at-rest options.
|
encrypted-at-rest options.
|
||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
## M7 -- Post-quantum (Planned)
|
## M7 -- Post-quantum (Next)
|
||||||
|
|
||||||
**Goal:** Replace the MLS crypto backend with a hybrid X25519 + ML-KEM-768 KEM,
|
**Goal:** Replace the MLS crypto backend with a hybrid X25519 + ML-KEM-768 KEM,
|
||||||
providing post-quantum confidentiality for all group key material.
|
providing post-quantum confidentiality for all group key material.
|
||||||
|
|
||||||
**Planned deliverables:**
|
**Status:** PoC complete. `HybridCryptoProvider` and `HybridCrypto` implement
|
||||||
|
`OpenMlsCryptoProvider` / HPKE with hybrid KEM; `GroupMember<HybridCryptoProvider>`
|
||||||
|
via `new_with_hybrid()` runs full two-party MLS (create, add, join, send, recv).
|
||||||
|
Unit test `two_party_mls_round_trip_hybrid` passes. Remaining: optional CLI/client
|
||||||
|
flag to use hybrid provider for new groups; interoperability note (hybrid init
|
||||||
|
keys are non-standard until MLS adopts PQ).
|
||||||
|
|
||||||
- Custom `OpenMlsCryptoProvider` with hybrid KEM in `quicnprotochat-core`
|
**Deliverables:**
|
||||||
|
|
||||||
|
- Custom `OpenMlsCryptoProvider` with hybrid KEM in `quicnprotochat-core` (**done**)
|
||||||
- Hybrid shared secret derivation:
|
- Hybrid shared secret derivation:
|
||||||
|
|
||||||
```
|
```
|
||||||
|
|||||||
80
docs/src/roadmap/phase2-and-m4-m6.md
Normal file
80
docs/src/roadmap/phase2-and-m4-m6.md
Normal file
@@ -0,0 +1,80 @@
|
|||||||
|
# Phase 2 (Protocol Hardening) + M4–M6 Roadmap
|
||||||
|
|
||||||
|
This page tracks implementation of **Phase 2** (protocol hardening) from the
|
||||||
|
[Production Readiness WBS](production-readiness.md), followed by **M4** (Group CLI),
|
||||||
|
**M5** (Multi-party groups), and **M6** (Persistence).
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Legacy code removed
|
||||||
|
|
||||||
|
The following legacy behaviour has been removed; only current behaviour is supported:
|
||||||
|
|
||||||
|
- **Auth:** Server no longer accepts "any non-empty token" when no static token is configured. Either a static `auth_token` or a valid OPAQUE session token is required (auth version 1 only).
|
||||||
|
- **Wire version:** Only wire version `1` is accepted on `enqueue`, `fetch`, `fetchWait`. Version `0` is rejected.
|
||||||
|
- **Delivery storage:** Server only loads the channel-aware delivery map format (v2). Old v1 `deliveries.bin` files will not load; delete or migrate the file.
|
||||||
|
- **Client:** Hybrid decryption is required for Welcome and application payloads. No fallback to plaintext MLS; missing or failed hybrid decrypt returns an error.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Phase 2 — Protocols and Core Hardening
|
||||||
|
|
||||||
|
| Task | Status | Notes |
|
||||||
|
|------|--------|-------|
|
||||||
|
| **Ciphersuite allowlist** | **Done** | Server rejects KeyPackages whose ciphersuite is not `MLS_128_DHKEMX25519_AES128GCM_SHA256_Ed25519`. See `quicnprotochat_core::validate_keypackage_ciphersuite` and `upload_key_package` (E021). |
|
||||||
|
| **ALPN enforcement** | **Done** | Server TLS config sets `alpn_protocols = [b"capnp"]`; handshake completes only if client offers `capnp`. |
|
||||||
|
| **Connection draining** | **Done** | On `Ctrl+C`, server calls `endpoint.close(0, b"server shutdown")` and exits the accept loop. |
|
||||||
|
| **Wire versioning** | **Done** | `enqueue`, `fetch`, `fetchWait` require `version == CURRENT_WIRE_VERSION` (1). Other RPCs use auth version. |
|
||||||
|
| **Downgrade guards** | **Deferred** | MLS epoch/ciphersuite consistency is enforced by openmls when processing commits. Explicit epoch-rollback checks can be added in M5. |
|
||||||
|
| **KeyPackage rotation** | **Doc** | Clients should upload a fresh KeyPackage before the 24h TTL. Helper or background task can be added in M4. |
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## M4 — Group CLI Subcommands
|
||||||
|
|
||||||
|
**Goal:** Persistent, composable CLI for group operations (replace monolithic `demo-group`).
|
||||||
|
|
||||||
|
| Deliverable | Status |
|
||||||
|
|-------------|--------|
|
||||||
|
| `create-group` | Planned |
|
||||||
|
| `invite <identity>` | Planned |
|
||||||
|
| `join` | Planned |
|
||||||
|
| `send <message>` | Planned |
|
||||||
|
| `recv` | Planned |
|
||||||
|
| Keep `demo-group` | Existing |
|
||||||
|
|
||||||
|
See [Milestones](milestones.md#m4--group-cli-subcommands-next).
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## M5 — Multi-party Groups
|
||||||
|
|
||||||
|
**Goal:** N > 2 members, commit fan-out, proposal handling.
|
||||||
|
|
||||||
|
| Deliverable | Status |
|
||||||
|
|-------------|--------|
|
||||||
|
| Commit fan-out via DS | Planned |
|
||||||
|
| Proposal handling (Add, Remove, Update) | Planned |
|
||||||
|
| Epoch sync across N members | Planned |
|
||||||
|
| Benchmarks | Planned |
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## M6 — Persistence
|
||||||
|
|
||||||
|
**Goal:** Server survives restart; client state persists across sessions.
|
||||||
|
|
||||||
|
| Deliverable | Status |
|
||||||
|
|-------------|--------|
|
||||||
|
| SQLite/SQLCipher (AS + DS) | Partial (SqlStore exists) |
|
||||||
|
| `migrations/` | Planned |
|
||||||
|
| Client reconnect + session resume | Planned |
|
||||||
|
| Docker + healthcheck | Partial (Dockerfile exists) |
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Cross-references
|
||||||
|
|
||||||
|
- [Production Readiness WBS](production-readiness.md) — Phase 2 definition
|
||||||
|
- [Milestones](milestones.md) — M4, M5, M6 details
|
||||||
|
- [Auth, Devices, and Tokens](authz-plan.md) — Phase 3
|
||||||
@@ -21,8 +21,7 @@ interface NodeService {
|
|||||||
# Upload a single-use KeyPackage for later retrieval by peers.
|
# Upload a single-use KeyPackage for later retrieval by peers.
|
||||||
# identityKey : Ed25519 public key bytes (32 bytes)
|
# identityKey : Ed25519 public key bytes (32 bytes)
|
||||||
# package : TLS-encoded openmls KeyPackage
|
# package : TLS-encoded openmls KeyPackage
|
||||||
# auth : Auth context (versioned). For legacy clients, pass an empty
|
# auth : Auth context (version=1, non-empty accessToken required).
|
||||||
# struct or version=0.
|
|
||||||
uploadKeyPackage @0 (identityKey :Data, package :Data, auth :Auth)
|
uploadKeyPackage @0 (identityKey :Data, package :Data, auth :Auth)
|
||||||
-> (fingerprint :Data);
|
-> (fingerprint :Data);
|
||||||
|
|
||||||
@@ -33,7 +32,7 @@ interface NodeService {
|
|||||||
# Enqueue an opaque payload for delivery to a recipient.
|
# Enqueue an opaque payload for delivery to a recipient.
|
||||||
# channelId : Optional channel identifier (empty for legacy). A 16-byte UUID
|
# channelId : Optional channel identifier (empty for legacy). A 16-byte UUID
|
||||||
# is recommended for 1:1 channels.
|
# is recommended for 1:1 channels.
|
||||||
# version : Schema/wire version. Must be 0 (legacy) or 1 (this spec).
|
# version : Schema/wire version. Must be 1.
|
||||||
enqueue @2 (recipientKey :Data, payload :Data, channelId :Data,
|
enqueue @2 (recipientKey :Data, payload :Data, channelId :Data,
|
||||||
version :UInt16, auth :Auth) -> ();
|
version :UInt16, auth :Auth) -> ();
|
||||||
|
|
||||||
@@ -57,7 +56,7 @@ interface NodeService {
|
|||||||
}
|
}
|
||||||
|
|
||||||
struct Auth {
|
struct Auth {
|
||||||
version @0 :UInt16; # 0 = legacy/none, 1 = token-based auth
|
version @0 :UInt16; # 1 = token-based auth (required)
|
||||||
accessToken @1 :Data; # opaque bearer token issued at login
|
accessToken @1 :Data; # opaque bearer token issued at login
|
||||||
deviceId @2 :Data; # optional UUID bytes for auditing/rate limiting
|
deviceId @2 :Data; # optional UUID bytes for auditing/rate limiting
|
||||||
}
|
}
|
||||||
@@ -108,7 +107,7 @@ Enqueues an opaque payload for delivery. Identical semantics to the standalone [
|
|||||||
| `recipientKey` | `Data` | 32 bytes | Recipient's raw Ed25519 public key |
|
| `recipientKey` | `Data` | 32 bytes | Recipient's raw Ed25519 public key |
|
||||||
| `payload` | `Data` | Variable | Opaque byte string (typically MLS ciphertext) |
|
| `payload` | `Data` | Variable | Opaque byte string (typically MLS ciphertext) |
|
||||||
| `channelId` | `Data` | 0 or 16 bytes | Channel identifier (empty for legacy, UUID recommended) |
|
| `channelId` | `Data` | 0 or 16 bytes | Channel identifier (empty for legacy, UUID recommended) |
|
||||||
| `version` | `UInt16` | 2 bytes | Wire version: `0` = legacy, `1` = current |
|
| `version` | `UInt16` | 2 bytes | Wire version: `1` = current (required) |
|
||||||
| `auth` | `Auth` | Struct | Authentication context |
|
| `auth` | `Auth` | Struct | Authentication context |
|
||||||
|
|
||||||
#### `fetch @3`
|
#### `fetch @3`
|
||||||
@@ -204,18 +203,9 @@ The `Auth` struct is attached to every mutating or per-user method call. It prov
|
|||||||
|
|
||||||
| Version | Behavior |
|
| Version | Behavior |
|
||||||
|---|---|
|
|---|---|
|
||||||
| `0` | **Legacy / no authentication.** The server ignores `accessToken` and `deviceId`. All requests are accepted unconditionally. This is the default for M1-M3 development. |
|
| `1` | **Token-based authentication (required).** The server validates `accessToken` (static token or OPAQUE session) and rejects requests with missing or invalid tokens. `deviceId` is used for audit logging. |
|
||||||
| `1` | **Token-based authentication.** The server validates `accessToken` and rejects requests with missing or invalid tokens. `deviceId` is used for audit logging. |
|
|
||||||
|
|
||||||
### Backward compatibility
|
Auth version `0` is no longer supported; clients must send `version=1` and a valid token.
|
||||||
|
|
||||||
The `version` field enables a clean migration path:
|
|
||||||
|
|
||||||
1. **Existing clients** that do not set the `Auth` struct (or set `version=0`) continue to work with servers running in legacy mode.
|
|
||||||
2. **New clients** set `version=1` and provide a valid `accessToken`.
|
|
||||||
3. **The server** inspects `version` to decide which validation path to use. When the migration is complete, the server can reject `version=0` requests.
|
|
||||||
|
|
||||||
This pattern avoids the need for a breaking schema change when authentication is introduced.
|
|
||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
|
|||||||
@@ -20,9 +20,9 @@ interface DeliveryService {
|
|||||||
# recipientKey : Ed25519 public key of the intended recipient (exactly 32 bytes).
|
# recipientKey : Ed25519 public key of the intended recipient (exactly 32 bytes).
|
||||||
# payload : Opaque byte string — a TLS-encoded MlsMessageOut blob or any
|
# payload : Opaque byte string — a TLS-encoded MlsMessageOut blob or any
|
||||||
# other framed data the application layer wants to deliver.
|
# other framed data the application layer wants to deliver.
|
||||||
# channelId : Optional channel identifier (empty for legacy). A 16-byte UUID
|
# channelId : Optional channel identifier (empty for default). A 16-byte UUID
|
||||||
# is recommended for 1:1 channels.
|
# is recommended for 1:1 channels.
|
||||||
# version : Schema/wire version. Must be 0 (legacy) or 1 (this spec).
|
# version : Schema/wire version. Must be 1.
|
||||||
#
|
#
|
||||||
# The payload is appended to the recipient's FIFO queue. Returns immediately;
|
# The payload is appended to the recipient's FIFO queue. Returns immediately;
|
||||||
# the recipient retrieves it via `fetch`.
|
# the recipient retrieves it via `fetch`.
|
||||||
@@ -31,8 +31,8 @@ interface DeliveryService {
|
|||||||
# Fetch and atomically drain all queued payloads for a given recipient.
|
# Fetch and atomically drain all queued payloads for a given recipient.
|
||||||
#
|
#
|
||||||
# recipientKey : Ed25519 public key of the caller (exactly 32 bytes).
|
# recipientKey : Ed25519 public key of the caller (exactly 32 bytes).
|
||||||
# channelId : Optional channel identifier (empty for legacy).
|
# channelId : Optional channel identifier (empty for default).
|
||||||
# version : Schema/wire version. Must be 0 (legacy) or 1 (this spec).
|
# version : Schema/wire version. Must be 1.
|
||||||
#
|
#
|
||||||
# Returns the complete queue in FIFO order and clears it. Returns an empty
|
# Returns the complete queue in FIFO order and clears it. Returns an empty
|
||||||
# list if there are no pending messages.
|
# list if there are no pending messages.
|
||||||
|
|||||||
@@ -9,8 +9,7 @@ interface NodeService {
|
|||||||
# Upload a single-use KeyPackage for later retrieval by peers.
|
# Upload a single-use KeyPackage for later retrieval by peers.
|
||||||
# identityKey : Ed25519 public key bytes (32 bytes)
|
# identityKey : Ed25519 public key bytes (32 bytes)
|
||||||
# package : TLS-encoded openmls KeyPackage
|
# package : TLS-encoded openmls KeyPackage
|
||||||
# auth : Auth context (versioned). For legacy clients, pass an empty
|
# auth : Auth context (version=1, non-empty accessToken required).
|
||||||
# struct or version=0.
|
|
||||||
uploadKeyPackage @0 (identityKey :Data, package :Data, auth :Auth) -> (fingerprint :Data);
|
uploadKeyPackage @0 (identityKey :Data, package :Data, auth :Auth) -> (fingerprint :Data);
|
||||||
|
|
||||||
# Fetch and atomically remove one KeyPackage for a given identity key.
|
# Fetch and atomically remove one KeyPackage for a given identity key.
|
||||||
@@ -18,18 +17,21 @@ interface NodeService {
|
|||||||
fetchKeyPackage @1 (identityKey :Data, auth :Auth) -> (package :Data);
|
fetchKeyPackage @1 (identityKey :Data, auth :Auth) -> (package :Data);
|
||||||
|
|
||||||
# Enqueue an opaque payload for delivery to a recipient.
|
# Enqueue an opaque payload for delivery to a recipient.
|
||||||
# channelId : Optional channel identifier (empty for legacy). A 16-byte UUID
|
# channelId : Optional channel identifier (empty for default). A 16-byte UUID
|
||||||
# is recommended for 1:1 channels.
|
# is recommended for 1:1 channels.
|
||||||
# version : Schema/wire version. Must be 0 (legacy) or 1 (this spec).
|
# version : Schema/wire version. Must be 1.
|
||||||
enqueue @2 (recipientKey :Data, payload :Data, channelId :Data, version :UInt16, auth :Auth) -> ();
|
# Returns the monotonically increasing per-inbox sequence number assigned to this message.
|
||||||
|
enqueue @2 (recipientKey :Data, payload :Data, channelId :Data, version :UInt16, auth :Auth) -> (seq :UInt64);
|
||||||
|
|
||||||
# Fetch and drain all queued payloads for the recipient.
|
# Fetch and drain all queued payloads for the recipient.
|
||||||
# limit: max number of messages to return (0 = fetch all).
|
# limit: max number of messages to return (0 = fetch all).
|
||||||
fetch @3 (recipientKey :Data, channelId :Data, version :UInt16, auth :Auth, limit :UInt32) -> (payloads :List(Data));
|
# Returns envelopes with per-inbox sequence numbers for ordered MLS processing.
|
||||||
|
fetch @3 (recipientKey :Data, channelId :Data, version :UInt16, auth :Auth, limit :UInt32) -> (payloads :List(Envelope));
|
||||||
|
|
||||||
# Long-poll: wait up to timeoutMs for new payloads, then drain queue.
|
# Long-poll: wait up to timeoutMs for new payloads, then drain queue.
|
||||||
# limit: max number of messages to return (0 = fetch all).
|
# limit: max number of messages to return (0 = fetch all).
|
||||||
fetchWait @4 (recipientKey :Data, channelId :Data, version :UInt16, timeoutMs :UInt64, auth :Auth, limit :UInt32) -> (payloads :List(Data));
|
# Returns envelopes with per-inbox sequence numbers for ordered MLS processing.
|
||||||
|
fetchWait @4 (recipientKey :Data, channelId :Data, version :UInt16, timeoutMs :UInt64, auth :Auth, limit :UInt32) -> (payloads :List(Envelope));
|
||||||
|
|
||||||
# Health probe for readiness/liveness.
|
# Health probe for readiness/liveness.
|
||||||
health @5 () -> (status :Text);
|
health @5 () -> (status :Text);
|
||||||
@@ -67,7 +69,14 @@ interface NodeService {
|
|||||||
}
|
}
|
||||||
|
|
||||||
struct Auth {
|
struct Auth {
|
||||||
version @0 :UInt16; # 0 = legacy/none, 1 = token-based auth
|
version @0 :UInt16; # 1 = token-based auth (required)
|
||||||
accessToken @1 :Data; # opaque bearer token issued at login
|
accessToken @1 :Data; # opaque bearer token issued at login
|
||||||
deviceId @2 :Data; # optional UUID bytes for auditing/rate limiting
|
deviceId @2 :Data; # optional UUID bytes for auditing/rate limiting
|
||||||
}
|
}
|
||||||
|
|
||||||
|
# A delivery envelope pairing a per-inbox sequence number with an opaque payload.
|
||||||
|
# Clients sort by `seq` before processing to guarantee MLS commit ordering.
|
||||||
|
struct Envelope {
|
||||||
|
seq @0 :UInt64; # monotonically increasing per-inbox counter (assigned by server)
|
||||||
|
data @1 :Data; # opaque payload (hybrid-encrypted MLS message)
|
||||||
|
}
|
||||||
|
|||||||
38
scripts/check_rust_file_sizes.sh
Normal file
38
scripts/check_rust_file_sizes.sh
Normal file
@@ -0,0 +1,38 @@
|
|||||||
|
#!/usr/bin/env bash
|
||||||
|
set -euo pipefail
|
||||||
|
|
||||||
|
SOFT_CAP=${SOFT_CAP:-400}
|
||||||
|
HARD_CAP=${HARD_CAP:-650}
|
||||||
|
warn=0
|
||||||
|
fail=0
|
||||||
|
ALLOW_FILE=${SIZE_ALLOWLIST:-.size-limits.allow}
|
||||||
|
|
||||||
|
is_allowed() {
|
||||||
|
local file=$1
|
||||||
|
[[ -f "$ALLOW_FILE" ]] && grep -Fxq "$file" "$ALLOW_FILE"
|
||||||
|
}
|
||||||
|
|
||||||
|
while IFS= read -r file; do
|
||||||
|
lines=$(wc -l <"$file")
|
||||||
|
if (( lines > HARD_CAP )); then
|
||||||
|
if is_allowed "$file"; then
|
||||||
|
printf 'ALLOW (hard cap): %s has %d lines (hard cap %d)\n' "$file" "$lines" "$HARD_CAP"
|
||||||
|
warn=1
|
||||||
|
continue
|
||||||
|
fi
|
||||||
|
printf 'FAIL: %s has %d lines (hard cap %d)\n' "$file" "$lines" "$HARD_CAP"
|
||||||
|
fail=1
|
||||||
|
elif (( lines > SOFT_CAP )); then
|
||||||
|
printf 'WARN: %s has %d lines (soft cap %d)\n' "$file" "$lines" "$SOFT_CAP"
|
||||||
|
warn=1
|
||||||
|
fi
|
||||||
|
done < <(git ls-files '*.rs')
|
||||||
|
|
||||||
|
if (( fail == 1 )); then
|
||||||
|
echo "One or more Rust files exceed the hard cap. Please split them before merging."
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
if (( warn == 1 )); then
|
||||||
|
echo "Warnings emitted for files exceeding the soft cap. Consider splitting them."
|
||||||
|
fi
|
||||||
Reference in New Issue
Block a user