Rename project to quicnprotochat
This commit is contained in:
890
Cargo.lock
generated
890
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
15
Cargo.toml
15
Cargo.toml
@@ -1,10 +1,10 @@
|
|||||||
[workspace]
|
[workspace]
|
||||||
resolver = "2"
|
resolver = "2"
|
||||||
members = [
|
members = [
|
||||||
"crates/noiseml-core",
|
"crates/quicnprotochat-core",
|
||||||
"crates/noiseml-proto",
|
"crates/quicnprotochat-proto",
|
||||||
"crates/noiseml-server",
|
"crates/quicnprotochat-server",
|
||||||
"crates/noiseml-client",
|
"crates/quicnprotochat-client",
|
||||||
]
|
]
|
||||||
|
|
||||||
# Shared dependency versions — bump here to affect the whole workspace.
|
# Shared dependency versions — bump here to affect the whole workspace.
|
||||||
@@ -27,6 +27,9 @@ sha2 = { version = "0.10" }
|
|||||||
hkdf = { version = "0.12" }
|
hkdf = { version = "0.12" }
|
||||||
zeroize = { version = "1", features = ["derive"] }
|
zeroize = { version = "1", features = ["derive"] }
|
||||||
rand = { version = "0.8" }
|
rand = { version = "0.8" }
|
||||||
|
serde = { version = "1", features = ["derive"] }
|
||||||
|
serde_json = { version = "1" }
|
||||||
|
bincode = { version = "1" }
|
||||||
|
|
||||||
# ── Serialisation + RPC ───────────────────────────────────────────────────────
|
# ── Serialisation + RPC ───────────────────────────────────────────────────────
|
||||||
capnp = { version = "0.19" }
|
capnp = { version = "0.19" }
|
||||||
@@ -36,6 +39,10 @@ capnp-rpc = { version = "0.19" }
|
|||||||
tokio = { version = "1", features = ["full"] }
|
tokio = { version = "1", features = ["full"] }
|
||||||
tokio-util = { version = "0.7", features = ["codec", "compat"] }
|
tokio-util = { version = "0.7", features = ["codec", "compat"] }
|
||||||
futures = { version = "0.3" }
|
futures = { version = "0.3" }
|
||||||
|
quinn = { version = "0.11" }
|
||||||
|
quinn-proto = { version = "0.11" }
|
||||||
|
rustls = { version = "0.23", default-features = false, features = ["std"] }
|
||||||
|
rcgen = { version = "0.13" }
|
||||||
|
|
||||||
# ── Server utilities ──────────────────────────────────────────────────────────
|
# ── Server utilities ──────────────────────────────────────────────────────────
|
||||||
dashmap = { version = "5" }
|
dashmap = { version = "5" }
|
||||||
|
|||||||
81
M3_STATUS.md
81
M3_STATUS.md
@@ -1,6 +1,6 @@
|
|||||||
# M3 Implementation Status
|
# M3 Implementation Status
|
||||||
|
|
||||||
**Last updated:** 2026-02-19
|
**Last updated:** 2026-02-20
|
||||||
**Branch:** feat/m1-noise-transport (all milestones on this branch so far)
|
**Branch:** feat/m1-noise-transport (all milestones on this branch so far)
|
||||||
|
|
||||||
---
|
---
|
||||||
@@ -18,13 +18,13 @@ M3 adds:
|
|||||||
### `schemas/delivery.capnp` ✅
|
### `schemas/delivery.capnp` ✅
|
||||||
Simple DS schema: `enqueue(recipientKey, payload)` + `fetch(recipientKey) → List(Data)`.
|
Simple DS schema: `enqueue(recipientKey, payload)` + `fetch(recipientKey) → List(Data)`.
|
||||||
|
|
||||||
### `noiseml-proto/build.rs` ✅
|
### `quicnprotochat-proto/build.rs` ✅
|
||||||
Compiles `delivery.capnp` alongside `envelope.capnp` and `auth.capnp`.
|
Compiles `delivery.capnp` alongside `envelope.capnp` and `auth.capnp`.
|
||||||
|
|
||||||
### `noiseml-proto/src/lib.rs` ✅
|
### `quicnprotochat-proto/src/lib.rs` ✅
|
||||||
Exposes `pub mod delivery_capnp`.
|
Exposes `pub mod delivery_capnp`.
|
||||||
|
|
||||||
### `noiseml-core/src/group.rs` ✅ (FULLY FIXED, ALL TESTS PASS)
|
### `quicnprotochat-core/src/group.rs` ✅ (FULLY FIXED, ALL TESTS PASS)
|
||||||
`GroupMember` struct with methods:
|
`GroupMember` struct with methods:
|
||||||
- `new(identity: Arc<IdentityKeypair>) -> Self`
|
- `new(identity: Arc<IdentityKeypair>) -> Self`
|
||||||
- `generate_key_package() -> Result<Vec<u8>, CoreError>` — TLS-encoded KeyPackage bytes
|
- `generate_key_package() -> Result<Vec<u8>, CoreError>` — TLS-encoded KeyPackage bytes
|
||||||
@@ -43,70 +43,27 @@ Exposes `pub mod delivery_capnp`.
|
|||||||
- `From<MlsMessageIn> for ProtocolMessage` is also feature-gated
|
- `From<MlsMessageIn> for ProtocolMessage` is also feature-gated
|
||||||
- Must use `OpenMlsCryptoProvider` trait in scope for `backend.crypto()`
|
- Must use `OpenMlsCryptoProvider` trait in scope for `backend.crypto()`
|
||||||
|
|
||||||
### `noiseml-core/src/lib.rs` ✅
|
### `quicnprotochat-core/src/lib.rs` ✅
|
||||||
Exposes `pub use group::GroupMember`.
|
Exposes `pub use group::GroupMember`.
|
||||||
|
|
||||||
### `noiseml-server/src/main.rs` ✅
|
### `quicnprotochat-server/src/main.rs` ✅
|
||||||
Two listeners on one `LocalSet`:
|
Two listeners on one `LocalSet`:
|
||||||
- Port 7000 (AS): `AuthServiceImpl` — unchanged from M2
|
- Port 7000 (AS): `AuthServiceImpl` — unchanged from M2
|
||||||
- Port 7001 (DS): `DeliveryServiceImpl` — new; uses `DashMap<Vec<u8>, VecDeque<Vec<u8>>>` keyed by Ed25519 public key
|
- Port 7001 (DS): `DeliveryServiceImpl` — new; uses `DashMap<Vec<u8>, VecDeque<Vec<u8>>>` keyed by Ed25519 public key
|
||||||
|
|
||||||
New CLI flag: `--ds-listen` (default `0.0.0.0:7001`, env `NOISEML_DS_LISTEN`).
|
New CLI flag: `--ds-listen` (default `0.0.0.0:7001`, env `QUICNPROTOCHAT_DS_LISTEN`).
|
||||||
|
|
||||||
|
### `quicnprotochat-client/src/main.rs` ✅
|
||||||
|
Added `demo-group` subcommand to exercise the full Alice↔Bob MLS flow against live AS (7000) and DS (7001): uploads both KeyPackages, delivers Welcome via DS, and exchanges application messages.
|
||||||
|
|
||||||
|
### `quicnprotochat-client/tests` ✅
|
||||||
|
`cargo test -p quicnprotochat-client --tests` passes, including the MLS round-trip integration test.
|
||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
## NOT YET DONE (continue tomorrow)
|
## Notes
|
||||||
|
|
||||||
### 1. `noiseml-client/src/main.rs` — Group subcommands
|
Open question (future work): if we need persistent groups instead of ephemeral demo runs, enable openmls `serde` feature and add statefile-backed subcommands (`create-group`, `invite`, `join`, `send`, `recv`). For M3, the demo path is sufficient.
|
||||||
|
|
||||||
Add these subcommands (note: need state persistence or a `demo` command approach):
|
|
||||||
|
|
||||||
**Recommended approach:** Add a `demo-group` subcommand that runs the full Alice-Bob MLS round-trip in a single process invocation against a live server. This avoids the `MlsGroup` serialization problem (openmls 0.5 MlsGroup state is hard to persist without the `serde` feature).
|
|
||||||
|
|
||||||
**Alternatively (with state file):** Enable `serde` feature on openmls in `Cargo.toml` and store `MlsGroup` state to disk. The workspace Cargo.toml uses `features = ["crypto-subtle"]` for openmls — add `"serde"` to that list.
|
|
||||||
|
|
||||||
Subcommands needed:
|
|
||||||
- `create-group --as-server --ds-server --group-id <NAME>` — creates group, saves state
|
|
||||||
- `invite --as-server --ds-server --peer-key <HEX>` — fetches peer KP from AS, creates Welcome, enqueues to DS
|
|
||||||
- `join --ds-server` — fetches Welcome from DS, joins group, saves state
|
|
||||||
- `send --ds-server --peer-key <HEX> --msg <TEXT>` — sends application message to DS
|
|
||||||
- `recv --ds-server` — fetches and decrypts messages from DS
|
|
||||||
|
|
||||||
OR: just add `demo-group --server --ds-server` that does the whole flow.
|
|
||||||
|
|
||||||
### 2. `noiseml-client/tests/mls_group.rs` — Integration test
|
|
||||||
|
|
||||||
This is the PRIORITY for testing. The integration test should:
|
|
||||||
|
|
||||||
```rust
|
|
||||||
// 1. Spawn server (AS on port X, DS on port Y) with tokio::process::Command
|
|
||||||
// or by directly calling the server's accept loop in a LocalSet
|
|
||||||
// 2. Alice: GroupMember::new, generate_key_package, upload to AS
|
|
||||||
// 3. Bob: GroupMember::new, generate_key_package, upload to AS
|
|
||||||
// 4. Alice: create_group, fetch Bob's KP from AS, add_member → (commit, welcome)
|
|
||||||
// Alice: enqueue welcome for Bob via DS (recipient = bob's identity.public_key_bytes())
|
|
||||||
// 5. Bob: fetch from DS, join_group(welcome)
|
|
||||||
// 6. Alice: send_message(b"hello bob"), enqueue to DS
|
|
||||||
// 7. Bob: fetch from DS, receive_message → assert plaintext == b"hello bob"
|
|
||||||
// 8. Bob: send_message(b"hello alice"), enqueue to DS
|
|
||||||
// 9. Alice: fetch from DS, receive_message → assert plaintext == b"hello alice"
|
|
||||||
```
|
|
||||||
|
|
||||||
**Important:** For the integration test, you can bypass the CLI and use `GroupMember` + capnp-rpc client helpers directly.
|
|
||||||
|
|
||||||
Connect to DS (port 7001):
|
|
||||||
```rust
|
|
||||||
async fn connect_ds(server: &str, keypair: &NoiseKeypair) -> anyhow::Result<delivery_service::Client> {
|
|
||||||
let stream = TcpStream::connect(server).await?;
|
|
||||||
let transport = handshake_initiator(stream, keypair).await?;
|
|
||||||
let (reader, writer) = transport.into_capnp_io();
|
|
||||||
let network = twoparty::VatNetwork::new(reader.compat(), writer.compat_write(), Side::Client, Default::default());
|
|
||||||
let mut rpc = RpcSystem::new(Box::new(network), None);
|
|
||||||
let ds: delivery_service::Client = rpc.bootstrap(Side::Server);
|
|
||||||
tokio::task::spawn_local(rpc);
|
|
||||||
Ok(ds)
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
@@ -141,13 +98,13 @@ test group::tests::group_id_lifecycle ... ok
|
|||||||
```bash
|
```bash
|
||||||
cd /home/c/projects/poc-mes
|
cd /home/c/projects/poc-mes
|
||||||
git log --oneline -5 # see where we are
|
git log --oneline -5 # see where we are
|
||||||
cargo test -p noiseml-core # verify green
|
cargo test -p quicnprotochat-core # verify green
|
||||||
```
|
```
|
||||||
|
|
||||||
Then:
|
Then:
|
||||||
1. Write `crates/noiseml-client/tests/mls_group.rs` (integration test) — highest priority
|
1. Write `crates/quicnprotochat-client/tests/mls_group.rs` (integration test) — highest priority
|
||||||
2. Add group subcommands to `crates/noiseml-client/src/main.rs`
|
2. Add group subcommands to `crates/quicnprotochat-client/src/main.rs`
|
||||||
|
|
||||||
The integration test is the most important piece — it proves the full M3 stack works end-to-end.
|
The integration test is the most important piece — it proves the full M3 stack works end-to-end.
|
||||||
|
|
||||||
For the test, see the pattern in `crates/noiseml-client/tests/auth_service.rs` (M2 test) for how to spin up the server and connect clients.
|
For the test, see the pattern in `crates/quicnprotochat-client/tests/auth_service.rs` (M2 test) for how to spin up the server and connect clients.
|
||||||
|
|||||||
45
README.md
45
README.md
@@ -1,4 +1,4 @@
|
|||||||
# noiseml
|
# quicnprotochat
|
||||||
|
|
||||||
> End-to-end encrypted group messaging over **Noise_XX + MLS** (RFC 9420), written in Rust.
|
> End-to-end encrypted group messaging over **Noise_XX + MLS** (RFC 9420), written in Rust.
|
||||||
|
|
||||||
@@ -38,9 +38,9 @@ key agreement across any number of participants. Messages are framed with
|
|||||||
## Repository layout
|
## Repository layout
|
||||||
|
|
||||||
```
|
```
|
||||||
noiseml/
|
quicnprotochat/
|
||||||
├── crates/
|
├── crates/
|
||||||
│ ├── noiseml-core/ # Crypto primitives, Noise transport, MLS group state machine
|
│ ├── quicnprotochat-core/ # Crypto primitives, Noise transport, MLS group state machine
|
||||||
│ │ ├── src/codec.rs # LengthPrefixedCodec — Tokio Encoder + Decoder
|
│ │ ├── src/codec.rs # LengthPrefixedCodec — Tokio Encoder + Decoder
|
||||||
│ │ ├── src/keypair.rs # NoiseKeypair — X25519 static key, zeroize-on-drop
|
│ │ ├── src/keypair.rs # NoiseKeypair — X25519 static key, zeroize-on-drop
|
||||||
│ │ ├── src/identity.rs # IdentityKeypair — Ed25519 identity + MLS Signer
|
│ │ ├── src/identity.rs # IdentityKeypair — Ed25519 identity + MLS Signer
|
||||||
@@ -48,11 +48,11 @@ noiseml/
|
|||||||
│ │ ├── src/noise.rs # handshake_initiator / handshake_responder / NoiseTransport
|
│ │ ├── src/noise.rs # handshake_initiator / handshake_responder / NoiseTransport
|
||||||
│ │ └── src/group.rs # GroupMember — full MLS group lifecycle
|
│ │ └── src/group.rs # GroupMember — full MLS group lifecycle
|
||||||
│ │
|
│ │
|
||||||
│ ├── noiseml-proto/ # Cap'n Proto schemas + generated types + serde helpers
|
│ ├── quicnprotochat-proto/ # Cap'n Proto schemas + generated types + serde helpers
|
||||||
│ │ └── schemas/ → # (symlinked to workspace root schemas/)
|
│ │ └── schemas/ → # (symlinked to workspace root schemas/)
|
||||||
│ │
|
│ │
|
||||||
│ ├── noiseml-server/ # Authentication Service (AS) + Delivery Service (DS) binary
|
│ ├── quicnprotochat-server/ # Authentication Service (AS) + Delivery Service (DS) binary
|
||||||
│ └── noiseml-client/ # CLI client (ping, register, fetch-key, …)
|
│ └── quicnprotochat-client/ # CLI client (ping, register, fetch-key, …)
|
||||||
│
|
│
|
||||||
└── schemas/
|
└── schemas/
|
||||||
├── envelope.capnp # Top-level wire envelope (MsgType discriminant + payload)
|
├── envelope.capnp # Top-level wire envelope (MsgType discriminant + payload)
|
||||||
@@ -147,27 +147,46 @@ cargo test --workspace
|
|||||||
**Start the server** (AS on :7000, DS on :7001):
|
**Start the server** (AS on :7000, DS on :7001):
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
cargo run -p noiseml-server
|
cargo run -p quicnprotochat-server
|
||||||
# or with custom ports:
|
# or with custom ports:
|
||||||
cargo run -p noiseml-server -- --listen 0.0.0.0:7000 --ds-listen 0.0.0.0:7001
|
cargo run -p quicnprotochat-server -- --listen 0.0.0.0:7000 --ds-listen 0.0.0.0:7001
|
||||||
```
|
```
|
||||||
|
|
||||||
**Client commands:**
|
**Client commands:**
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
# Check connectivity
|
# Check connectivity
|
||||||
cargo run -p noiseml-client -- ping
|
cargo run -p quicnprotochat-client -- ping
|
||||||
|
|
||||||
# Generate a fresh identity + KeyPackage, upload to AS
|
# Generate a fresh identity + KeyPackage, upload to AS
|
||||||
# Prints your identity_key (hex) — share this with peers
|
# Prints your identity_key (hex) — share this with peers
|
||||||
cargo run -p noiseml-client -- register
|
cargo run -p quicnprotochat-client -- register
|
||||||
|
|
||||||
# Fetch a peer's KeyPackage (they must have registered first)
|
# Fetch a peer's KeyPackage (they must have registered first)
|
||||||
cargo run -p noiseml-client -- fetch-key <64-hex-char identity key>
|
cargo run -p quicnprotochat-client -- fetch-key <64-hex-char identity key>
|
||||||
|
|
||||||
|
# Run an end-to-end Alice↔Bob demo against live AS + DS
|
||||||
|
cargo run -p quicnprotochat-client -- demo-group \
|
||||||
|
--server 127.0.0.1:7000 \
|
||||||
|
--ds-server 127.0.0.1:7001
|
||||||
|
|
||||||
|
# Persistent group CLI (stateful)
|
||||||
|
cargo run -p quicnprotochat-client -- register-state --state state.bin --server 127.0.0.1:7000
|
||||||
|
cargo run -p quicnprotochat-client -- create-group --state state.bin --group-id my-group
|
||||||
|
cargo run -p quicnprotochat-client -- invite --state state.bin --peer-key <peer hex> --server 127.0.0.1:7000 --ds-server 127.0.0.1:7001
|
||||||
|
cargo run -p quicnprotochat-client -- join --state state.bin --ds-server 127.0.0.1:7001
|
||||||
|
cargo run -p quicnprotochat-client -- send --state state.bin --peer-key <peer hex> --msg "hello" --ds-server 127.0.0.1:7001
|
||||||
|
cargo run -p quicnprotochat-client -- recv --state state.bin --ds-server 127.0.0.1:7001
|
||||||
```
|
```
|
||||||
|
|
||||||
Server address defaults to `127.0.0.1:7000`; override with `--server` or
|
Server address defaults to `127.0.0.1:7000`; override with `--server` or
|
||||||
`NOISEML_SERVER`.
|
`QUICNPROTOCHAT_SERVER`. Delivery Service defaults to `127.0.0.1:7001`; override with
|
||||||
|
`--ds-server` or `QUICNPROTOCHAT_DS_SERVER`.
|
||||||
|
|
||||||
|
State file notes: the persisted state stores your identity and MLS group state
|
||||||
|
after you have joined. If you generate a KeyPackage (`register-state`) and then
|
||||||
|
restart before consuming the Welcome, the join may fail because the HPKE init
|
||||||
|
key is not retained; run join in the same session you register.
|
||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
@@ -178,7 +197,7 @@ Server address defaults to `127.0.0.1:7000`; override with `--server` or
|
|||||||
| M1 | Noise transport | ✅ | Noise_XX handshake, length-prefixed framing, Ping/Pong |
|
| M1 | Noise transport | ✅ | Noise_XX handshake, length-prefixed framing, Ping/Pong |
|
||||||
| M2 | Authentication Service | ✅ | Ed25519 identity, KeyPackage generation, AS upload/fetch |
|
| M2 | Authentication Service | ✅ | Ed25519 identity, KeyPackage generation, AS upload/fetch |
|
||||||
| M3 | Delivery Service + MLS groups | ✅ | DS relay, `GroupMember` create/join/add/send/recv |
|
| M3 | Delivery Service + MLS groups | ✅ | DS relay, `GroupMember` create/join/add/send/recv |
|
||||||
| M4 | Group CLI subcommands | 🔜 | `create-group`, `invite`, `join`, `send`, `recv` |
|
| M4 | Group CLI subcommands | 🔜 | Persistent CLI (`create-group`, `invite`, `join`, `send`, `recv`); demo-group already available |
|
||||||
| M5 | Multi-party groups | 🔜 | N > 2 members, Commit fan-out, Proposal handling |
|
| M5 | Multi-party groups | 🔜 | N > 2 members, Commit fan-out, Proposal handling |
|
||||||
| M6 | Persistence | 🔜 | SQLite key store, durable group state |
|
| M6 | Persistence | 🔜 | SQLite key store, durable group state |
|
||||||
| M7 | Post-quantum | 🔜 | ML-KEM-768 hybrid in Noise layer |
|
| M7 | Post-quantum | 🔜 | ML-KEM-768 hybrid in Noise layer |
|
||||||
|
|||||||
@@ -1,313 +0,0 @@
|
|||||||
//! noiseml CLI client.
|
|
||||||
//!
|
|
||||||
//! # Subcommands
|
|
||||||
//!
|
|
||||||
//! | Subcommand | Description |
|
|
||||||
//! |--------------|----------------------------------------------------------|
|
|
||||||
//! | `ping` | Send a Ping to the server, print RTT |
|
|
||||||
//! | `register` | Generate a KeyPackage and upload it to the AS |
|
|
||||||
//! | `fetch-key` | Fetch a peer's KeyPackage from the AS by identity key |
|
|
||||||
//!
|
|
||||||
//! # Configuration
|
|
||||||
//!
|
|
||||||
//! | Env var | CLI flag | Default |
|
|
||||||
//! |-----------------|--------------|---------------------|
|
|
||||||
//! | `NOISEML_SERVER`| `--server` | `127.0.0.1:7000` |
|
|
||||||
//! | `RUST_LOG` | — | `warn` |
|
|
||||||
|
|
||||||
use anyhow::Context;
|
|
||||||
use capnp_rpc::{RpcSystem, rpc_twoparty_capnp::Side, twoparty};
|
|
||||||
use clap::{Parser, Subcommand};
|
|
||||||
use tokio::net::TcpStream;
|
|
||||||
use tokio_util::compat::{TokioAsyncReadCompatExt, TokioAsyncWriteCompatExt};
|
|
||||||
|
|
||||||
use noiseml_core::{IdentityKeypair, NoiseKeypair, generate_key_package, handshake_initiator};
|
|
||||||
use noiseml_proto::{MsgType, ParsedEnvelope, auth_capnp::authentication_service};
|
|
||||||
|
|
||||||
// ── CLI ───────────────────────────────────────────────────────────────────────
|
|
||||||
|
|
||||||
#[derive(Debug, Parser)]
|
|
||||||
#[command(name = "noiseml", about = "noiseml CLI client", version)]
|
|
||||||
struct Args {
|
|
||||||
#[command(subcommand)]
|
|
||||||
command: Command,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug, Subcommand)]
|
|
||||||
enum Command {
|
|
||||||
/// Send a Ping to the server and print the round-trip time.
|
|
||||||
Ping {
|
|
||||||
/// Server address (host:port).
|
|
||||||
#[arg(long, default_value = "127.0.0.1:7000", env = "NOISEML_SERVER")]
|
|
||||||
server: String,
|
|
||||||
},
|
|
||||||
|
|
||||||
/// Generate a fresh MLS KeyPackage and upload it to the Authentication Service.
|
|
||||||
///
|
|
||||||
/// Prints the SHA-256 fingerprint of the uploaded package and the raw
|
|
||||||
/// Ed25519 identity public key bytes (hex), which peers need to fetch it.
|
|
||||||
Register {
|
|
||||||
/// Server address (host:port).
|
|
||||||
#[arg(long, default_value = "127.0.0.1:7000", env = "NOISEML_SERVER")]
|
|
||||||
server: String,
|
|
||||||
},
|
|
||||||
|
|
||||||
/// Fetch a peer's KeyPackage from the Authentication Service.
|
|
||||||
///
|
|
||||||
/// IDENTITY_KEY is the peer's Ed25519 public key encoded as 64 lowercase
|
|
||||||
/// hex characters (32 bytes).
|
|
||||||
FetchKey {
|
|
||||||
/// Server address (host:port).
|
|
||||||
#[arg(long, default_value = "127.0.0.1:7000", env = "NOISEML_SERVER")]
|
|
||||||
server: String,
|
|
||||||
|
|
||||||
/// Target peer's Ed25519 identity public key (64 hex chars = 32 bytes).
|
|
||||||
identity_key: String,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
// ── Entry point ───────────────────────────────────────────────────────────────
|
|
||||||
|
|
||||||
#[tokio::main]
|
|
||||||
async fn main() -> anyhow::Result<()> {
|
|
||||||
tracing_subscriber::fmt()
|
|
||||||
.with_env_filter(
|
|
||||||
tracing_subscriber::EnvFilter::try_from_default_env()
|
|
||||||
.unwrap_or_else(|_| tracing_subscriber::EnvFilter::new("warn")),
|
|
||||||
)
|
|
||||||
.init();
|
|
||||||
|
|
||||||
let args = Args::parse();
|
|
||||||
|
|
||||||
match args.command {
|
|
||||||
Command::Ping { server } => cmd_ping(&server).await,
|
|
||||||
Command::Register { server } => {
|
|
||||||
let local = tokio::task::LocalSet::new();
|
|
||||||
local.run_until(cmd_register(&server)).await
|
|
||||||
}
|
|
||||||
Command::FetchKey {
|
|
||||||
server,
|
|
||||||
identity_key,
|
|
||||||
} => {
|
|
||||||
let local = tokio::task::LocalSet::new();
|
|
||||||
local.run_until(cmd_fetch_key(&server, &identity_key)).await
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// ── Subcommand implementations ────────────────────────────────────────────────
|
|
||||||
|
|
||||||
/// Connect to `server`, complete Noise_XX, send a Ping, and print RTT.
|
|
||||||
async fn cmd_ping(server: &str) -> anyhow::Result<()> {
|
|
||||||
let keypair = NoiseKeypair::generate();
|
|
||||||
|
|
||||||
let stream = TcpStream::connect(server)
|
|
||||||
.await
|
|
||||||
.with_context(|| format!("could not connect to {server}"))?;
|
|
||||||
|
|
||||||
tracing::debug!(server = %server, "TCP connection established");
|
|
||||||
|
|
||||||
let mut transport = handshake_initiator(stream, &keypair)
|
|
||||||
.await
|
|
||||||
.context("Noise_XX handshake failed")?;
|
|
||||||
|
|
||||||
{
|
|
||||||
let remote = transport
|
|
||||||
.remote_static_public_key()
|
|
||||||
.map(|k| fmt_hex(&k[..4]))
|
|
||||||
.unwrap_or_else(|| "unknown".into());
|
|
||||||
tracing::debug!(server_key = %remote, "handshake complete");
|
|
||||||
}
|
|
||||||
|
|
||||||
let sent_at = current_timestamp_ms();
|
|
||||||
|
|
||||||
transport
|
|
||||||
.send_envelope(&ParsedEnvelope {
|
|
||||||
msg_type: MsgType::Ping,
|
|
||||||
group_id: vec![],
|
|
||||||
sender_id: vec![],
|
|
||||||
payload: vec![],
|
|
||||||
timestamp_ms: sent_at,
|
|
||||||
})
|
|
||||||
.await
|
|
||||||
.context("failed to send Ping")?;
|
|
||||||
|
|
||||||
tracing::debug!("Ping sent");
|
|
||||||
|
|
||||||
let response = transport
|
|
||||||
.recv_envelope()
|
|
||||||
.await
|
|
||||||
.context("failed to receive Pong")?;
|
|
||||||
|
|
||||||
match response.msg_type {
|
|
||||||
MsgType::Pong => {
|
|
||||||
let rtt_ms = current_timestamp_ms().saturating_sub(sent_at);
|
|
||||||
println!("Pong from {server} rtt={rtt_ms}ms");
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
_ => anyhow::bail!(
|
|
||||||
"protocol error: expected Pong from {server}, got unexpected message type"
|
|
||||||
),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Generate a KeyPackage for a fresh identity and upload it to the AS.
|
|
||||||
///
|
|
||||||
/// Must run on a `LocalSet` because capnp-rpc is `!Send`.
|
|
||||||
async fn cmd_register(server: &str) -> anyhow::Result<()> {
|
|
||||||
let noise_keypair = NoiseKeypair::generate();
|
|
||||||
let identity = IdentityKeypair::generate();
|
|
||||||
|
|
||||||
let (tls_bytes, fingerprint) =
|
|
||||||
generate_key_package(&identity).context("KeyPackage generation failed")?;
|
|
||||||
|
|
||||||
let as_client = connect_as(server, &noise_keypair).await?;
|
|
||||||
|
|
||||||
let mut req = as_client.upload_key_package_request();
|
|
||||||
req.get().set_identity_key(&identity.public_key_bytes());
|
|
||||||
req.get().set_package(&tls_bytes);
|
|
||||||
|
|
||||||
let response = req
|
|
||||||
.send()
|
|
||||||
.promise
|
|
||||||
.await
|
|
||||||
.context("upload_key_package RPC failed")?;
|
|
||||||
|
|
||||||
let server_fp = response
|
|
||||||
.get()
|
|
||||||
.context("upload_key_package: bad response")?
|
|
||||||
.get_fingerprint()
|
|
||||||
.context("upload_key_package: missing fingerprint")?
|
|
||||||
.to_vec();
|
|
||||||
|
|
||||||
// Verify the server echoed the same fingerprint.
|
|
||||||
anyhow::ensure!(
|
|
||||||
server_fp == fingerprint,
|
|
||||||
"fingerprint mismatch: local={} server={}",
|
|
||||||
hex::encode(&fingerprint),
|
|
||||||
hex::encode(&server_fp),
|
|
||||||
);
|
|
||||||
|
|
||||||
println!("identity_key : {}", hex::encode(identity.public_key_bytes()));
|
|
||||||
println!("fingerprint : {}", hex::encode(&fingerprint));
|
|
||||||
println!("KeyPackage uploaded successfully.");
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Fetch a peer's KeyPackage from the AS by their hex-encoded identity key.
|
|
||||||
///
|
|
||||||
/// Must run on a `LocalSet` because capnp-rpc is `!Send`.
|
|
||||||
async fn cmd_fetch_key(server: &str, identity_key_hex: &str) -> anyhow::Result<()> {
|
|
||||||
let identity_key = hex::decode(identity_key_hex)
|
|
||||||
.map_err(|e| anyhow::anyhow!(e))
|
|
||||||
.context("identity_key must be 64 hex characters (32 bytes)")?;
|
|
||||||
anyhow::ensure!(
|
|
||||||
identity_key.len() == 32,
|
|
||||||
"identity_key must be exactly 32 bytes, got {}",
|
|
||||||
identity_key.len()
|
|
||||||
);
|
|
||||||
|
|
||||||
let noise_keypair = NoiseKeypair::generate();
|
|
||||||
let as_client = connect_as(server, &noise_keypair).await?;
|
|
||||||
|
|
||||||
let mut req = as_client.fetch_key_package_request();
|
|
||||||
req.get().set_identity_key(&identity_key);
|
|
||||||
|
|
||||||
let response = req
|
|
||||||
.send()
|
|
||||||
.promise
|
|
||||||
.await
|
|
||||||
.context("fetch_key_package RPC failed")?;
|
|
||||||
|
|
||||||
let package = response
|
|
||||||
.get()
|
|
||||||
.context("fetch_key_package: bad response")?
|
|
||||||
.get_package()
|
|
||||||
.context("fetch_key_package: missing package field")?
|
|
||||||
.to_vec();
|
|
||||||
|
|
||||||
if package.is_empty() {
|
|
||||||
println!("No KeyPackage available for this identity.");
|
|
||||||
return Ok(());
|
|
||||||
}
|
|
||||||
|
|
||||||
use sha2::{Digest, Sha256};
|
|
||||||
let fingerprint = Sha256::digest(&package);
|
|
||||||
|
|
||||||
println!("fingerprint : {}", hex::encode(fingerprint));
|
|
||||||
println!("package_len : {} bytes", package.len());
|
|
||||||
println!("KeyPackage fetched successfully.");
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
// ── Shared helpers ────────────────────────────────────────────────────────────
|
|
||||||
|
|
||||||
/// Establish a Noise_XX connection and return an `AuthenticationService` client.
|
|
||||||
///
|
|
||||||
/// Must be called from within a `LocalSet` because capnp-rpc is `!Send`.
|
|
||||||
async fn connect_as(
|
|
||||||
server: &str,
|
|
||||||
noise_keypair: &NoiseKeypair,
|
|
||||||
) -> anyhow::Result<authentication_service::Client> {
|
|
||||||
let stream = TcpStream::connect(server)
|
|
||||||
.await
|
|
||||||
.with_context(|| format!("could not connect to {server}"))?;
|
|
||||||
|
|
||||||
let transport = handshake_initiator(stream, noise_keypair)
|
|
||||||
.await
|
|
||||||
.context("Noise_XX handshake failed")?;
|
|
||||||
|
|
||||||
let (reader, writer) = transport.into_capnp_io();
|
|
||||||
|
|
||||||
let network = twoparty::VatNetwork::new(
|
|
||||||
reader.compat(),
|
|
||||||
writer.compat_write(),
|
|
||||||
Side::Client,
|
|
||||||
Default::default(),
|
|
||||||
);
|
|
||||||
|
|
||||||
let mut rpc_system = RpcSystem::new(Box::new(network), None);
|
|
||||||
let as_client: authentication_service::Client =
|
|
||||||
rpc_system.bootstrap(Side::Server);
|
|
||||||
|
|
||||||
// Drive the RPC system on the local set.
|
|
||||||
tokio::task::spawn_local(rpc_system);
|
|
||||||
|
|
||||||
Ok(as_client)
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Format the first `n` bytes as lowercase hex with a trailing `…`.
|
|
||||||
fn fmt_hex(bytes: &[u8]) -> String {
|
|
||||||
let hex: String = bytes.iter().map(|b| format!("{b:02x}")).collect();
|
|
||||||
format!("{hex}…")
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Return the current Unix timestamp in milliseconds.
|
|
||||||
fn current_timestamp_ms() -> u64 {
|
|
||||||
std::time::SystemTime::now()
|
|
||||||
.duration_since(std::time::UNIX_EPOCH)
|
|
||||||
.unwrap_or_default()
|
|
||||||
.as_millis() as u64
|
|
||||||
}
|
|
||||||
|
|
||||||
// ── Hex encoding helper ───────────────────────────────────────────────────────
|
|
||||||
//
|
|
||||||
// We use a tiny inline module rather than adding `hex` as a dependency.
|
|
||||||
|
|
||||||
mod hex {
|
|
||||||
pub fn encode(bytes: impl AsRef<[u8]>) -> String {
|
|
||||||
bytes.as_ref().iter().map(|b| format!("{b:02x}")).collect()
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn decode(s: &str) -> Result<Vec<u8>, &'static str> {
|
|
||||||
if s.len() % 2 != 0 {
|
|
||||||
return Err("odd-length hex string");
|
|
||||||
}
|
|
||||||
(0..s.len())
|
|
||||||
.step_by(2)
|
|
||||||
.map(|i| u8::from_str_radix(&s[i..i + 2], 16).map_err(|_| "invalid hex character"))
|
|
||||||
.collect()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,460 +0,0 @@
|
|||||||
//! noiseml-server — Delivery Service + Authentication Service binary.
|
|
||||||
//!
|
|
||||||
//! # M3 scope
|
|
||||||
//!
|
|
||||||
//! The server exposes two Noise_XX-protected Cap'n Proto RPC endpoints:
|
|
||||||
//!
|
|
||||||
//! * **AS** (`--listen`, default `0.0.0.0:7000`) — `AuthenticationService`:
|
|
||||||
//! upload and fetch single-use MLS KeyPackages.
|
|
||||||
//! * **DS** (`--ds-listen`, default `0.0.0.0:7001`) — `DeliveryService`:
|
|
||||||
//! enqueue and fetch opaque payloads (Welcome messages, Commits, Application
|
|
||||||
//! messages) keyed by recipient Ed25519 public key.
|
|
||||||
//!
|
|
||||||
//! # Architecture
|
|
||||||
//!
|
|
||||||
//! ```text
|
|
||||||
//! TcpListener (AS, 7000) TcpListener (DS, 7001)
|
|
||||||
//! └─ Noise_XX handshake └─ Noise_XX handshake
|
|
||||||
//! └─ capnp-rpc VatNetwork (LocalSet, !Send)
|
|
||||||
//! ├─ AuthServiceImpl (shares KeyPackageStore via Arc)
|
|
||||||
//! └─ DeliveryServiceImpl (shares DeliveryStore via Arc)
|
|
||||||
//! ```
|
|
||||||
//!
|
|
||||||
//! Because `capnp-rpc` uses `Rc<RefCell<>>` internally it is `!Send`.
|
|
||||||
//! The entire RPC stack lives on a `tokio::task::LocalSet` spawned per
|
|
||||||
//! connection.
|
|
||||||
//!
|
|
||||||
//! # Configuration
|
|
||||||
//!
|
|
||||||
//! | Env var | CLI flag | Default |
|
|
||||||
//! |---------------------|----------------|-----------------|
|
|
||||||
//! | `NOISEML_LISTEN` | `--listen` | `0.0.0.0:7000` |
|
|
||||||
//! | `NOISEML_DS_LISTEN` | `--ds-listen` | `0.0.0.0:7001` |
|
|
||||||
//! | `RUST_LOG` | — | `info` |
|
|
||||||
|
|
||||||
use std::{collections::VecDeque, sync::Arc};
|
|
||||||
|
|
||||||
use anyhow::Context;
|
|
||||||
use capnp::capability::Promise;
|
|
||||||
use capnp_rpc::{RpcSystem, rpc_twoparty_capnp::Side, twoparty};
|
|
||||||
use clap::Parser;
|
|
||||||
use dashmap::DashMap;
|
|
||||||
use noiseml_core::{NoiseKeypair, handshake_responder};
|
|
||||||
use noiseml_proto::{
|
|
||||||
auth_capnp::authentication_service,
|
|
||||||
delivery_capnp::delivery_service,
|
|
||||||
};
|
|
||||||
use sha2::{Digest, Sha256};
|
|
||||||
use tokio::net::{TcpListener, TcpStream};
|
|
||||||
use tokio_util::compat::{TokioAsyncReadCompatExt, TokioAsyncWriteCompatExt};
|
|
||||||
use tracing::Instrument;
|
|
||||||
|
|
||||||
// ── CLI ───────────────────────────────────────────────────────────────────────
|
|
||||||
|
|
||||||
#[derive(Debug, Parser)]
|
|
||||||
#[command(
|
|
||||||
name = "noiseml-server",
|
|
||||||
about = "noiseml Delivery Service + Authentication Service",
|
|
||||||
version
|
|
||||||
)]
|
|
||||||
struct Args {
|
|
||||||
/// TCP address for the Authentication Service.
|
|
||||||
#[arg(long, default_value = "0.0.0.0:7000", env = "NOISEML_LISTEN")]
|
|
||||||
listen: String,
|
|
||||||
|
|
||||||
/// TCP address for the Delivery Service.
|
|
||||||
#[arg(long, default_value = "0.0.0.0:7001", env = "NOISEML_DS_LISTEN")]
|
|
||||||
ds_listen: String,
|
|
||||||
}
|
|
||||||
|
|
||||||
// ── Shared store types ────────────────────────────────────────────────────────
|
|
||||||
|
|
||||||
/// Thread-safe map from Ed25519 identity public key bytes (32 B) to a queue
|
|
||||||
/// of serialised MLS KeyPackage blobs.
|
|
||||||
///
|
|
||||||
/// Each KeyPackage is single-use per RFC 9420: `fetch_key_package` removes
|
|
||||||
/// and returns exactly one entry.
|
|
||||||
type KeyPackageStore = Arc<DashMap<Vec<u8>, VecDeque<Vec<u8>>>>;
|
|
||||||
|
|
||||||
/// Thread-safe message queue for the Delivery Service.
|
|
||||||
///
|
|
||||||
/// Maps recipient Ed25519 public key (32 bytes) to a FIFO queue of opaque
|
|
||||||
/// payload bytes (TLS-encoded MLS messages or other framed data).
|
|
||||||
type DeliveryStore = Arc<DashMap<Vec<u8>, VecDeque<Vec<u8>>>>;
|
|
||||||
|
|
||||||
// ── Authentication Service implementation ─────────────────────────────────────
|
|
||||||
|
|
||||||
/// Cap'n Proto RPC server implementation for `AuthenticationService`.
|
|
||||||
struct AuthServiceImpl {
|
|
||||||
store: KeyPackageStore,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl authentication_service::Server for AuthServiceImpl {
|
|
||||||
/// Upload a single-use KeyPackage and return its SHA-256 fingerprint.
|
|
||||||
fn upload_key_package(
|
|
||||||
&mut self,
|
|
||||||
params: authentication_service::UploadKeyPackageParams,
|
|
||||||
mut results: authentication_service::UploadKeyPackageResults,
|
|
||||||
) -> Promise<(), capnp::Error> {
|
|
||||||
let params = params.get().map_err(|e| {
|
|
||||||
capnp::Error::failed(format!("upload_key_package: bad params: {e}"))
|
|
||||||
});
|
|
||||||
|
|
||||||
let (identity_key, package) = match params {
|
|
||||||
Ok(p) => {
|
|
||||||
let ik = match p.get_identity_key() {
|
|
||||||
Ok(v) => v.to_vec(),
|
|
||||||
Err(e) => return Promise::err(capnp::Error::failed(format!("{e}"))),
|
|
||||||
};
|
|
||||||
let pkg = match p.get_package() {
|
|
||||||
Ok(v) => v.to_vec(),
|
|
||||||
Err(e) => return Promise::err(capnp::Error::failed(format!("{e}"))),
|
|
||||||
};
|
|
||||||
(ik, pkg)
|
|
||||||
}
|
|
||||||
Err(e) => return Promise::err(e),
|
|
||||||
};
|
|
||||||
|
|
||||||
if identity_key.len() != 32 {
|
|
||||||
return Promise::err(capnp::Error::failed(format!(
|
|
||||||
"identityKey must be exactly 32 bytes, got {}",
|
|
||||||
identity_key.len()
|
|
||||||
)));
|
|
||||||
}
|
|
||||||
if package.is_empty() {
|
|
||||||
return Promise::err(capnp::Error::failed(
|
|
||||||
"package must not be empty".to_string(),
|
|
||||||
));
|
|
||||||
}
|
|
||||||
|
|
||||||
let fingerprint: Vec<u8> = Sha256::digest(&package).to_vec();
|
|
||||||
|
|
||||||
self.store
|
|
||||||
.entry(identity_key)
|
|
||||||
.or_default()
|
|
||||||
.push_back(package);
|
|
||||||
|
|
||||||
results
|
|
||||||
.get()
|
|
||||||
.set_fingerprint(&fingerprint);
|
|
||||||
|
|
||||||
tracing::debug!(
|
|
||||||
fingerprint = %fmt_hex(&fingerprint[..4]),
|
|
||||||
"KeyPackage uploaded"
|
|
||||||
);
|
|
||||||
|
|
||||||
Promise::ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Atomically remove and return one KeyPackage for the given identity key.
|
|
||||||
fn fetch_key_package(
|
|
||||||
&mut self,
|
|
||||||
params: authentication_service::FetchKeyPackageParams,
|
|
||||||
mut results: authentication_service::FetchKeyPackageResults,
|
|
||||||
) -> Promise<(), capnp::Error> {
|
|
||||||
let identity_key = match params.get() {
|
|
||||||
Ok(p) => match p.get_identity_key() {
|
|
||||||
Ok(v) => v.to_vec(),
|
|
||||||
Err(e) => return Promise::err(capnp::Error::failed(format!("{e}"))),
|
|
||||||
},
|
|
||||||
Err(e) => return Promise::err(capnp::Error::failed(format!("{e}"))),
|
|
||||||
};
|
|
||||||
|
|
||||||
if identity_key.len() != 32 {
|
|
||||||
return Promise::err(capnp::Error::failed(format!(
|
|
||||||
"identityKey must be exactly 32 bytes, got {}",
|
|
||||||
identity_key.len()
|
|
||||||
)));
|
|
||||||
}
|
|
||||||
|
|
||||||
// Atomically pop one package from the front of the queue.
|
|
||||||
let package = self
|
|
||||||
.store
|
|
||||||
.get_mut(&identity_key)
|
|
||||||
.and_then(|mut q| q.pop_front());
|
|
||||||
|
|
||||||
match package {
|
|
||||||
Some(pkg) => {
|
|
||||||
tracing::debug!(
|
|
||||||
identity = %fmt_hex(&identity_key[..4]),
|
|
||||||
"KeyPackage fetched"
|
|
||||||
);
|
|
||||||
results.get().set_package(&pkg);
|
|
||||||
}
|
|
||||||
None => {
|
|
||||||
tracing::debug!(
|
|
||||||
identity = %fmt_hex(&identity_key[..4]),
|
|
||||||
"no KeyPackage available for identity"
|
|
||||||
);
|
|
||||||
// Return empty Data — schema specifies this as the "no package" sentinel.
|
|
||||||
results.get().set_package(&[]);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
Promise::ok(())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// ── Delivery Service implementation ───────────────────────────────────────────
|
|
||||||
|
|
||||||
/// Cap'n Proto RPC server implementation for `DeliveryService`.
|
|
||||||
///
|
|
||||||
/// Provides a simple store-and-forward relay for MLS messages:
|
|
||||||
/// * `enqueue` appends an opaque payload to the recipient's FIFO queue.
|
|
||||||
/// * `fetch` atomically drains and returns the entire queue.
|
|
||||||
struct DeliveryServiceImpl {
|
|
||||||
store: DeliveryStore,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl delivery_service::Server for DeliveryServiceImpl {
|
|
||||||
/// Append `payload` to the queue for `recipient_key`.
|
|
||||||
fn enqueue(
|
|
||||||
&mut self,
|
|
||||||
params: delivery_service::EnqueueParams,
|
|
||||||
_results: delivery_service::EnqueueResults,
|
|
||||||
) -> Promise<(), capnp::Error> {
|
|
||||||
let p = match params.get() {
|
|
||||||
Ok(p) => p,
|
|
||||||
Err(e) => return Promise::err(capnp::Error::failed(format!("{e}"))),
|
|
||||||
};
|
|
||||||
let recipient_key = match p.get_recipient_key() {
|
|
||||||
Ok(v) => v.to_vec(),
|
|
||||||
Err(e) => return Promise::err(capnp::Error::failed(format!("{e}"))),
|
|
||||||
};
|
|
||||||
let payload = match p.get_payload() {
|
|
||||||
Ok(v) => v.to_vec(),
|
|
||||||
Err(e) => return Promise::err(capnp::Error::failed(format!("{e}"))),
|
|
||||||
};
|
|
||||||
|
|
||||||
if recipient_key.len() != 32 {
|
|
||||||
return Promise::err(capnp::Error::failed(format!(
|
|
||||||
"recipientKey must be exactly 32 bytes, got {}",
|
|
||||||
recipient_key.len()
|
|
||||||
)));
|
|
||||||
}
|
|
||||||
if payload.is_empty() {
|
|
||||||
return Promise::err(capnp::Error::failed(
|
|
||||||
"payload must not be empty".to_string(),
|
|
||||||
));
|
|
||||||
}
|
|
||||||
|
|
||||||
self.store
|
|
||||||
.entry(recipient_key.clone())
|
|
||||||
.or_default()
|
|
||||||
.push_back(payload);
|
|
||||||
|
|
||||||
tracing::debug!(
|
|
||||||
recipient = %fmt_hex(&recipient_key[..4]),
|
|
||||||
"message enqueued"
|
|
||||||
);
|
|
||||||
|
|
||||||
Promise::ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Atomically drain and return all queued payloads for `recipient_key`.
|
|
||||||
fn fetch(
|
|
||||||
&mut self,
|
|
||||||
params: delivery_service::FetchParams,
|
|
||||||
mut results: delivery_service::FetchResults,
|
|
||||||
) -> Promise<(), capnp::Error> {
|
|
||||||
let recipient_key = match params.get() {
|
|
||||||
Ok(p) => match p.get_recipient_key() {
|
|
||||||
Ok(v) => v.to_vec(),
|
|
||||||
Err(e) => return Promise::err(capnp::Error::failed(format!("{e}"))),
|
|
||||||
},
|
|
||||||
Err(e) => return Promise::err(capnp::Error::failed(format!("{e}"))),
|
|
||||||
};
|
|
||||||
|
|
||||||
if recipient_key.len() != 32 {
|
|
||||||
return Promise::err(capnp::Error::failed(format!(
|
|
||||||
"recipientKey must be exactly 32 bytes, got {}",
|
|
||||||
recipient_key.len()
|
|
||||||
)));
|
|
||||||
}
|
|
||||||
|
|
||||||
// Atomically drain the entire queue.
|
|
||||||
let messages: Vec<Vec<u8>> = self
|
|
||||||
.store
|
|
||||||
.get_mut(&recipient_key)
|
|
||||||
.map(|mut q| q.drain(..).collect())
|
|
||||||
.unwrap_or_default();
|
|
||||||
|
|
||||||
tracing::debug!(
|
|
||||||
recipient = %fmt_hex(&recipient_key[..4]),
|
|
||||||
count = messages.len(),
|
|
||||||
"messages fetched"
|
|
||||||
);
|
|
||||||
|
|
||||||
let mut list = results.get().init_payloads(messages.len() as u32);
|
|
||||||
for (i, msg) in messages.iter().enumerate() {
|
|
||||||
list.set(i as u32, msg);
|
|
||||||
}
|
|
||||||
|
|
||||||
Promise::ok(())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// ── Entry point ───────────────────────────────────────────────────────────────
|
|
||||||
|
|
||||||
#[tokio::main]
|
|
||||||
async fn main() -> anyhow::Result<()> {
|
|
||||||
tracing_subscriber::fmt()
|
|
||||||
.with_env_filter(
|
|
||||||
tracing_subscriber::EnvFilter::try_from_default_env()
|
|
||||||
.unwrap_or_else(|_| tracing_subscriber::EnvFilter::new("info")),
|
|
||||||
)
|
|
||||||
.init();
|
|
||||||
|
|
||||||
let args = Args::parse();
|
|
||||||
|
|
||||||
// Generate a fresh static Noise keypair for this server instance.
|
|
||||||
// M6 replaces this with persistent key loading from SQLite.
|
|
||||||
let keypair = Arc::new(NoiseKeypair::generate());
|
|
||||||
|
|
||||||
{
|
|
||||||
let pub_bytes = keypair.public_bytes();
|
|
||||||
tracing::info!(
|
|
||||||
listen = %args.listen,
|
|
||||||
ds_listen = %args.ds_listen,
|
|
||||||
public_key = %fmt_hex(&pub_bytes[..4]),
|
|
||||||
"noiseml-server starting (M3) — keypair is ephemeral"
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Shared stores — all connections share the same in-memory maps.
|
|
||||||
let kp_store: KeyPackageStore = Arc::new(DashMap::new());
|
|
||||||
let ds_store: DeliveryStore = Arc::new(DashMap::new());
|
|
||||||
|
|
||||||
let as_listener = TcpListener::bind(&args.listen)
|
|
||||||
.await
|
|
||||||
.with_context(|| format!("failed to bind AS to {}", args.listen))?;
|
|
||||||
|
|
||||||
let ds_listener = TcpListener::bind(&args.ds_listen)
|
|
||||||
.await
|
|
||||||
.with_context(|| format!("failed to bind DS to {}", args.ds_listen))?;
|
|
||||||
|
|
||||||
tracing::info!(
|
|
||||||
as_addr = %args.listen,
|
|
||||||
ds_addr = %args.ds_listen,
|
|
||||||
"accepting connections"
|
|
||||||
);
|
|
||||||
|
|
||||||
// capnp-rpc is !Send (Rc internals), so all RPC tasks must stay on a
|
|
||||||
// LocalSet. Both accept loops share one LocalSet.
|
|
||||||
let local = tokio::task::LocalSet::new();
|
|
||||||
local
|
|
||||||
.run_until(async move {
|
|
||||||
loop {
|
|
||||||
tokio::select! {
|
|
||||||
result = as_listener.accept() => {
|
|
||||||
let (stream, peer_addr) = result.context("AS accept failed")?;
|
|
||||||
let keypair = Arc::clone(&keypair);
|
|
||||||
let store = Arc::clone(&kp_store);
|
|
||||||
tokio::task::spawn_local(
|
|
||||||
async move {
|
|
||||||
match handle_as_connection(stream, keypair, store).await {
|
|
||||||
Ok(()) => tracing::debug!("AS connection closed"),
|
|
||||||
Err(e) => tracing::warn!(error = %e, "AS connection error"),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
.instrument(tracing::info_span!("as_conn", peer = %peer_addr)),
|
|
||||||
);
|
|
||||||
}
|
|
||||||
result = ds_listener.accept() => {
|
|
||||||
let (stream, peer_addr) = result.context("DS accept failed")?;
|
|
||||||
let keypair = Arc::clone(&keypair);
|
|
||||||
let store = Arc::clone(&ds_store);
|
|
||||||
tokio::task::spawn_local(
|
|
||||||
async move {
|
|
||||||
match handle_ds_connection(stream, keypair, store).await {
|
|
||||||
Ok(()) => tracing::debug!("DS connection closed"),
|
|
||||||
Err(e) => tracing::warn!(error = %e, "DS connection error"),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
.instrument(tracing::info_span!("ds_conn", peer = %peer_addr)),
|
|
||||||
);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
#[allow(unreachable_code)]
|
|
||||||
Ok::<(), anyhow::Error>(())
|
|
||||||
})
|
|
||||||
.await
|
|
||||||
}
|
|
||||||
|
|
||||||
// ── Per-connection handlers ───────────────────────────────────────────────────
|
|
||||||
|
|
||||||
/// Handle one Authentication Service connection.
|
|
||||||
async fn handle_as_connection(
|
|
||||||
stream: TcpStream,
|
|
||||||
keypair: Arc<NoiseKeypair>,
|
|
||||||
store: KeyPackageStore,
|
|
||||||
) -> Result<(), anyhow::Error> {
|
|
||||||
let transport = noise_handshake(stream, &keypair, "AS").await?;
|
|
||||||
let (reader, writer) = transport.into_capnp_io();
|
|
||||||
|
|
||||||
let network = twoparty::VatNetwork::new(
|
|
||||||
reader.compat(),
|
|
||||||
writer.compat_write(),
|
|
||||||
Side::Server,
|
|
||||||
Default::default(),
|
|
||||||
);
|
|
||||||
|
|
||||||
let service: authentication_service::Client =
|
|
||||||
capnp_rpc::new_client(AuthServiceImpl { store });
|
|
||||||
|
|
||||||
RpcSystem::new(Box::new(network), Some(service.client))
|
|
||||||
.await
|
|
||||||
.map_err(|e| anyhow::anyhow!("AS RPC error: {e}"))
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Handle one Delivery Service connection.
|
|
||||||
async fn handle_ds_connection(
|
|
||||||
stream: TcpStream,
|
|
||||||
keypair: Arc<NoiseKeypair>,
|
|
||||||
store: DeliveryStore,
|
|
||||||
) -> Result<(), anyhow::Error> {
|
|
||||||
let transport = noise_handshake(stream, &keypair, "DS").await?;
|
|
||||||
let (reader, writer) = transport.into_capnp_io();
|
|
||||||
|
|
||||||
let network = twoparty::VatNetwork::new(
|
|
||||||
reader.compat(),
|
|
||||||
writer.compat_write(),
|
|
||||||
Side::Server,
|
|
||||||
Default::default(),
|
|
||||||
);
|
|
||||||
|
|
||||||
let service: delivery_service::Client =
|
|
||||||
capnp_rpc::new_client(DeliveryServiceImpl { store });
|
|
||||||
|
|
||||||
RpcSystem::new(Box::new(network), Some(service.client))
|
|
||||||
.await
|
|
||||||
.map_err(|e| anyhow::anyhow!("DS RPC error: {e}"))
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Perform the Noise_XX handshake and log the remote key.
|
|
||||||
async fn noise_handshake(
|
|
||||||
stream: TcpStream,
|
|
||||||
keypair: &NoiseKeypair,
|
|
||||||
label: &str,
|
|
||||||
) -> anyhow::Result<noiseml_core::NoiseTransport> {
|
|
||||||
let transport = handshake_responder(stream, keypair)
|
|
||||||
.await
|
|
||||||
.map_err(|e| anyhow::anyhow!("{label} Noise handshake failed: {e}"))?;
|
|
||||||
|
|
||||||
let remote = transport
|
|
||||||
.remote_static_public_key()
|
|
||||||
.map(|k| fmt_hex(&k[..4]))
|
|
||||||
.unwrap_or_else(|| "unknown".into());
|
|
||||||
tracing::info!(remote_key = %remote, "{label} Noise_XX handshake complete");
|
|
||||||
|
|
||||||
Ok(transport)
|
|
||||||
}
|
|
||||||
|
|
||||||
// ── Helpers ───────────────────────────────────────────────────────────────────
|
|
||||||
|
|
||||||
/// Format the first `n` bytes of a slice as lowercase hex with a trailing `…`.
|
|
||||||
fn fmt_hex(bytes: &[u8]) -> String {
|
|
||||||
let hex: String = bytes.iter().map(|b| format!("{b:02x}")).collect();
|
|
||||||
format!("{hex}…")
|
|
||||||
}
|
|
||||||
@@ -1,17 +1,18 @@
|
|||||||
[package]
|
[package]
|
||||||
name = "noiseml-client"
|
name = "quicnprotochat-client"
|
||||||
version = "0.1.0"
|
version = "0.1.0"
|
||||||
edition = "2021"
|
edition = "2021"
|
||||||
description = "CLI client for noiseml."
|
description = "CLI client for quicnprotochat."
|
||||||
license = "MIT"
|
license = "MIT"
|
||||||
|
|
||||||
[[bin]]
|
[[bin]]
|
||||||
name = "noiseml"
|
name = "quicnprotochat"
|
||||||
path = "src/main.rs"
|
path = "src/main.rs"
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
noiseml-core = { path = "../noiseml-core" }
|
quicnprotochat-core = { path = "../quicnprotochat-core" }
|
||||||
noiseml-proto = { path = "../noiseml-proto" }
|
quicnprotochat-proto = { path = "../quicnprotochat-proto" }
|
||||||
|
openmls_rust_crypto = { workspace = true }
|
||||||
|
|
||||||
# Serialisation + RPC
|
# Serialisation + RPC
|
||||||
capnp = { workspace = true }
|
capnp = { workspace = true }
|
||||||
@@ -21,6 +22,9 @@ capnp-rpc = { workspace = true }
|
|||||||
tokio = { workspace = true }
|
tokio = { workspace = true }
|
||||||
tokio-util = { workspace = true }
|
tokio-util = { workspace = true }
|
||||||
futures = { workspace = true }
|
futures = { workspace = true }
|
||||||
|
serde = { workspace = true }
|
||||||
|
serde_json = { workspace = true }
|
||||||
|
bincode = { workspace = true }
|
||||||
|
|
||||||
# Error handling
|
# Error handling
|
||||||
anyhow = { workspace = true }
|
anyhow = { workspace = true }
|
||||||
@@ -28,6 +32,9 @@ thiserror = { workspace = true }
|
|||||||
|
|
||||||
# Crypto — for fingerprint verification in fetch-key subcommand
|
# Crypto — for fingerprint verification in fetch-key subcommand
|
||||||
sha2 = { workspace = true }
|
sha2 = { workspace = true }
|
||||||
|
quinn = { workspace = true }
|
||||||
|
quinn-proto = { workspace = true }
|
||||||
|
rustls = { workspace = true }
|
||||||
|
|
||||||
# Logging
|
# Logging
|
||||||
tracing = { workspace = true }
|
tracing = { workspace = true }
|
||||||
@@ -37,5 +44,5 @@ tracing-subscriber = { workspace = true }
|
|||||||
clap = { workspace = true }
|
clap = { workspace = true }
|
||||||
|
|
||||||
[dev-dependencies]
|
[dev-dependencies]
|
||||||
# Integration tests use noiseml-core, noiseml-proto, and capnp-rpc directly.
|
# Integration tests use quicnprotochat-core, quicnprotochat-proto, and capnp-rpc directly.
|
||||||
dashmap = { workspace = true }
|
dashmap = { workspace = true }
|
||||||
1019
crates/quicnprotochat-client/src/main.rs
Normal file
1019
crates/quicnprotochat-client/src/main.rs
Normal file
File diff suppressed because it is too large
Load Diff
@@ -6,13 +6,12 @@
|
|||||||
use std::{collections::VecDeque, sync::Arc};
|
use std::{collections::VecDeque, sync::Arc};
|
||||||
|
|
||||||
use capnp::capability::Promise;
|
use capnp::capability::Promise;
|
||||||
use capnp_rpc::{RpcSystem, rpc_twoparty_capnp::Side, twoparty};
|
use capnp_rpc::{rpc_twoparty_capnp::Side, twoparty, RpcSystem};
|
||||||
use dashmap::DashMap;
|
use dashmap::DashMap;
|
||||||
use noiseml_core::{
|
use quicnprotochat_core::{
|
||||||
IdentityKeypair, NoiseKeypair, generate_key_package, handshake_initiator,
|
generate_key_package, handshake_initiator, handshake_responder, IdentityKeypair, NoiseKeypair,
|
||||||
handshake_responder,
|
|
||||||
};
|
};
|
||||||
use noiseml_proto::auth_capnp::authentication_service;
|
use quicnprotochat_proto::auth_capnp::authentication_service;
|
||||||
use sha2::{Digest, Sha256};
|
use sha2::{Digest, Sha256};
|
||||||
use tokio::net::{TcpListener, TcpStream};
|
use tokio::net::{TcpListener, TcpStream};
|
||||||
use tokio_util::compat::{TokioAsyncReadCompatExt, TokioAsyncWriteCompatExt};
|
use tokio_util::compat::{TokioAsyncReadCompatExt, TokioAsyncWriteCompatExt};
|
||||||
@@ -111,8 +110,7 @@ async fn serve_one(stream: TcpStream, keypair: Arc<NoiseKeypair>, store: Store)
|
|||||||
Side::Server,
|
Side::Server,
|
||||||
Default::default(),
|
Default::default(),
|
||||||
);
|
);
|
||||||
let svc: authentication_service::Client =
|
let svc: authentication_service::Client = capnp_rpc::new_client(TestAuthService { store });
|
||||||
capnp_rpc::new_client(TestAuthService { store });
|
|
||||||
let rpc = RpcSystem::new(Box::new(network), Some(svc.client));
|
let rpc = RpcSystem::new(Box::new(network), Some(svc.client));
|
||||||
tokio::task::spawn_local(rpc).await.ok();
|
tokio::task::spawn_local(rpc).await.ok();
|
||||||
}
|
}
|
||||||
@@ -156,7 +154,8 @@ async fn upload_then_fetch_fingerprints_match() {
|
|||||||
|
|
||||||
let alice = connect_client(addr).await;
|
let alice = connect_client(addr).await;
|
||||||
let mut req = alice.upload_key_package_request();
|
let mut req = alice.upload_key_package_request();
|
||||||
req.get().set_identity_key(&alice_identity.public_key_bytes());
|
req.get()
|
||||||
|
.set_identity_key(&alice_identity.public_key_bytes());
|
||||||
req.get().set_package(&tls_bytes);
|
req.get().set_package(&tls_bytes);
|
||||||
let resp = req.send().promise.await.unwrap();
|
let resp = req.send().promise.await.unwrap();
|
||||||
let server_fp = resp.get().unwrap().get_fingerprint().unwrap().to_vec();
|
let server_fp = resp.get().unwrap().get_fingerprint().unwrap().to_vec();
|
||||||
@@ -166,15 +165,22 @@ async fn upload_then_fetch_fingerprints_match() {
|
|||||||
// Bob: fetch Alice's package by her identity key.
|
// Bob: fetch Alice's package by her identity key.
|
||||||
let bob = connect_client(addr).await;
|
let bob = connect_client(addr).await;
|
||||||
let mut req2 = bob.fetch_key_package_request();
|
let mut req2 = bob.fetch_key_package_request();
|
||||||
req2.get().set_identity_key(&alice_identity.public_key_bytes());
|
req2.get()
|
||||||
|
.set_identity_key(&alice_identity.public_key_bytes());
|
||||||
let resp2 = req2.send().promise.await.unwrap();
|
let resp2 = req2.send().promise.await.unwrap();
|
||||||
let fetched = resp2.get().unwrap().get_package().unwrap().to_vec();
|
let fetched = resp2.get().unwrap().get_package().unwrap().to_vec();
|
||||||
|
|
||||||
assert!(!fetched.is_empty(), "fetched package must not be empty");
|
assert!(!fetched.is_empty(), "fetched package must not be empty");
|
||||||
assert_eq!(fetched, tls_bytes, "fetched bytes must match uploaded bytes");
|
assert_eq!(
|
||||||
|
fetched, tls_bytes,
|
||||||
|
"fetched bytes must match uploaded bytes"
|
||||||
|
);
|
||||||
|
|
||||||
let fetched_fp: Vec<u8> = Sha256::digest(&fetched).to_vec();
|
let fetched_fp: Vec<u8> = Sha256::digest(&fetched).to_vec();
|
||||||
assert_eq!(fetched_fp, local_fp, "fetched fingerprint must match uploaded");
|
assert_eq!(
|
||||||
|
fetched_fp, local_fp,
|
||||||
|
"fetched fingerprint must match uploaded"
|
||||||
|
);
|
||||||
})
|
})
|
||||||
.await;
|
.await;
|
||||||
}
|
}
|
||||||
@@ -234,7 +240,11 @@ async fn packages_consumed_in_fifo_order() {
|
|||||||
.get_package()
|
.get_package()
|
||||||
.unwrap()
|
.unwrap()
|
||||||
.to_vec();
|
.to_vec();
|
||||||
assert_eq!(pkg1, vec![1u8, 2, 3], "first fetch must return first package");
|
assert_eq!(
|
||||||
|
pkg1,
|
||||||
|
vec![1u8, 2, 3],
|
||||||
|
"first fetch must return first package"
|
||||||
|
);
|
||||||
|
|
||||||
let client2 = connect_client(addr).await;
|
let client2 = connect_client(addr).await;
|
||||||
let mut req2 = client2.fetch_key_package_request();
|
let mut req2 = client2.fetch_key_package_request();
|
||||||
@@ -249,7 +259,11 @@ async fn packages_consumed_in_fifo_order() {
|
|||||||
.get_package()
|
.get_package()
|
||||||
.unwrap()
|
.unwrap()
|
||||||
.to_vec();
|
.to_vec();
|
||||||
assert_eq!(pkg2, vec![4u8, 5, 6], "second fetch must return second package");
|
assert_eq!(
|
||||||
|
pkg2,
|
||||||
|
vec![4u8, 5, 6],
|
||||||
|
"second fetch must return second package"
|
||||||
|
);
|
||||||
})
|
})
|
||||||
.await;
|
.await;
|
||||||
}
|
}
|
||||||
433
crates/quicnprotochat-client/tests/mls_group.rs
Normal file
433
crates/quicnprotochat-client/tests/mls_group.rs
Normal file
@@ -0,0 +1,433 @@
|
|||||||
|
//! Integration test: full MLS group flow via Authentication Service + Delivery Service.
|
||||||
|
//!
|
||||||
|
//! Steps:
|
||||||
|
//! - Start in-process AS and DS (Noise_XX + capnp-rpc) on a LocalSet.
|
||||||
|
//! - Alice and Bob generate KeyPackages and upload to AS.
|
||||||
|
//! - Alice fetches Bob's KeyPackage, creates a group, and invites Bob.
|
||||||
|
//! - Welcome + application messages traverse the Delivery Service.
|
||||||
|
//! - Both sides decrypt and confirm plaintext payloads.
|
||||||
|
|
||||||
|
use std::{collections::VecDeque, sync::Arc, time::Duration};
|
||||||
|
|
||||||
|
use anyhow::Context;
|
||||||
|
use capnp::capability::Promise;
|
||||||
|
use capnp_rpc::{rpc_twoparty_capnp::Side, twoparty, RpcSystem};
|
||||||
|
use dashmap::DashMap;
|
||||||
|
use quicnprotochat_core::{
|
||||||
|
handshake_initiator, handshake_responder, GroupMember, IdentityKeypair, NoiseKeypair,
|
||||||
|
};
|
||||||
|
use quicnprotochat_proto::{auth_capnp::authentication_service, delivery_capnp::delivery_service};
|
||||||
|
use sha2::{Digest, Sha256};
|
||||||
|
use tokio::net::{TcpListener, TcpStream};
|
||||||
|
use tokio_util::compat::{TokioAsyncReadCompatExt, TokioAsyncWriteCompatExt};
|
||||||
|
|
||||||
|
type KeyPackageStore = Arc<DashMap<Vec<u8>, VecDeque<Vec<u8>>>>;
|
||||||
|
type DeliveryStore = Arc<DashMap<Vec<u8>, VecDeque<Vec<u8>>>>;
|
||||||
|
|
||||||
|
/// Full Alice↔Bob MLS round-trip against live AS + DS.
|
||||||
|
#[tokio::test]
|
||||||
|
async fn mls_group_end_to_end_round_trip() -> anyhow::Result<()> {
|
||||||
|
let local = tokio::task::LocalSet::new();
|
||||||
|
|
||||||
|
local
|
||||||
|
.run_until(async move {
|
||||||
|
let server_keypair = Arc::new(NoiseKeypair::generate());
|
||||||
|
let kp_store: KeyPackageStore = Arc::new(DashMap::new());
|
||||||
|
let ds_store: DeliveryStore = Arc::new(DashMap::new());
|
||||||
|
|
||||||
|
let as_addr =
|
||||||
|
spawn_as_server(2, Arc::clone(&server_keypair), Arc::clone(&kp_store)).await;
|
||||||
|
let ds_addr =
|
||||||
|
spawn_ds_server(2, Arc::clone(&server_keypair), Arc::clone(&ds_store)).await;
|
||||||
|
|
||||||
|
tokio::time::sleep(Duration::from_millis(10)).await;
|
||||||
|
|
||||||
|
let alice_id = Arc::new(IdentityKeypair::generate());
|
||||||
|
let bob_id = Arc::new(IdentityKeypair::generate());
|
||||||
|
|
||||||
|
let mut alice = GroupMember::new(Arc::clone(&alice_id));
|
||||||
|
let mut bob = GroupMember::new(Arc::clone(&bob_id));
|
||||||
|
|
||||||
|
let alice_kp = alice.generate_key_package()?;
|
||||||
|
let bob_kp = bob.generate_key_package()?;
|
||||||
|
|
||||||
|
let alice_as = connect_as(as_addr, &NoiseKeypair::generate()).await?;
|
||||||
|
let bob_as = connect_as(as_addr, &NoiseKeypair::generate()).await?;
|
||||||
|
|
||||||
|
upload_key_package(&alice_as, &alice_id.public_key_bytes(), &alice_kp).await?;
|
||||||
|
upload_key_package(&bob_as, &bob_id.public_key_bytes(), &bob_kp).await?;
|
||||||
|
|
||||||
|
let fetched_bob_kp = fetch_key_package(&alice_as, &bob_id.public_key_bytes()).await?;
|
||||||
|
anyhow::ensure!(
|
||||||
|
!fetched_bob_kp.is_empty(),
|
||||||
|
"AS must return Bob's KeyPackage"
|
||||||
|
);
|
||||||
|
|
||||||
|
alice.create_group(b"m3-integration")?;
|
||||||
|
let (_commit, welcome) = alice.add_member(&fetched_bob_kp)?;
|
||||||
|
|
||||||
|
let alice_ds = connect_ds(ds_addr, &NoiseKeypair::generate()).await?;
|
||||||
|
let bob_ds = connect_ds(ds_addr, &NoiseKeypair::generate()).await?;
|
||||||
|
|
||||||
|
enqueue(&alice_ds, &bob_id.public_key_bytes(), &welcome).await?;
|
||||||
|
|
||||||
|
let welcome_payloads = fetch_all(&bob_ds, &bob_id.public_key_bytes()).await?;
|
||||||
|
let welcome_bytes = welcome_payloads
|
||||||
|
.first()
|
||||||
|
.cloned()
|
||||||
|
.context("welcome must be present")?;
|
||||||
|
bob.join_group(&welcome_bytes)?;
|
||||||
|
|
||||||
|
let ct_ab = alice.send_message(b"hello bob")?;
|
||||||
|
enqueue(&alice_ds, &bob_id.public_key_bytes(), &ct_ab).await?;
|
||||||
|
let bob_msgs = fetch_all(&bob_ds, &bob_id.public_key_bytes()).await?;
|
||||||
|
let ab_plaintext = bob
|
||||||
|
.receive_message(bob_msgs.first().context("missing alice→bob payload")?)?
|
||||||
|
.context("alice→bob must be application message")?;
|
||||||
|
assert_eq!(ab_plaintext, b"hello bob");
|
||||||
|
|
||||||
|
let ct_ba = bob.send_message(b"hello alice")?;
|
||||||
|
enqueue(&bob_ds, &alice_id.public_key_bytes(), &ct_ba).await?;
|
||||||
|
let alice_msgs = fetch_all(&alice_ds, &alice_id.public_key_bytes()).await?;
|
||||||
|
let ba_plaintext = alice
|
||||||
|
.receive_message(alice_msgs.first().context("missing bob→alice payload")?)?
|
||||||
|
.context("bob→alice must be application message")?;
|
||||||
|
assert_eq!(ba_plaintext, b"hello alice");
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
})
|
||||||
|
.await
|
||||||
|
}
|
||||||
|
|
||||||
|
// ── Test helpers ────────────────────────────────────────────────────────────
|
||||||
|
|
||||||
|
async fn spawn_as_server(
|
||||||
|
n_connections: usize,
|
||||||
|
keypair: Arc<NoiseKeypair>,
|
||||||
|
store: KeyPackageStore,
|
||||||
|
) -> std::net::SocketAddr {
|
||||||
|
let listener = TcpListener::bind("127.0.0.1:0").await.unwrap();
|
||||||
|
let addr = listener.local_addr().unwrap();
|
||||||
|
|
||||||
|
tokio::task::spawn_local(async move {
|
||||||
|
for _ in 0..n_connections {
|
||||||
|
let (stream, _) = listener.accept().await.unwrap();
|
||||||
|
let kp = Arc::clone(&keypair);
|
||||||
|
let st = Arc::clone(&store);
|
||||||
|
tokio::task::spawn_local(async move {
|
||||||
|
serve_as_connection(stream, kp, st).await;
|
||||||
|
});
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
addr
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn serve_as_connection(
|
||||||
|
stream: TcpStream,
|
||||||
|
keypair: Arc<NoiseKeypair>,
|
||||||
|
store: KeyPackageStore,
|
||||||
|
) {
|
||||||
|
let transport = handshake_responder(stream, &keypair).await.unwrap();
|
||||||
|
let (reader, writer) = transport.into_capnp_io();
|
||||||
|
let network = twoparty::VatNetwork::new(
|
||||||
|
reader.compat(),
|
||||||
|
writer.compat_write(),
|
||||||
|
Side::Server,
|
||||||
|
Default::default(),
|
||||||
|
);
|
||||||
|
|
||||||
|
let service: authentication_service::Client = capnp_rpc::new_client(AuthService { store });
|
||||||
|
|
||||||
|
RpcSystem::new(Box::new(network), Some(service.client))
|
||||||
|
.await
|
||||||
|
.ok();
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn spawn_ds_server(
|
||||||
|
n_connections: usize,
|
||||||
|
keypair: Arc<NoiseKeypair>,
|
||||||
|
store: DeliveryStore,
|
||||||
|
) -> std::net::SocketAddr {
|
||||||
|
let listener = TcpListener::bind("127.0.0.1:0").await.unwrap();
|
||||||
|
let addr = listener.local_addr().unwrap();
|
||||||
|
|
||||||
|
tokio::task::spawn_local(async move {
|
||||||
|
for _ in 0..n_connections {
|
||||||
|
let (stream, _) = listener.accept().await.unwrap();
|
||||||
|
let kp = Arc::clone(&keypair);
|
||||||
|
let st = Arc::clone(&store);
|
||||||
|
tokio::task::spawn_local(async move {
|
||||||
|
serve_ds_connection(stream, kp, st).await;
|
||||||
|
});
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
addr
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn serve_ds_connection(stream: TcpStream, keypair: Arc<NoiseKeypair>, store: DeliveryStore) {
|
||||||
|
let transport = handshake_responder(stream, &keypair).await.unwrap();
|
||||||
|
let (reader, writer) = transport.into_capnp_io();
|
||||||
|
let network = twoparty::VatNetwork::new(
|
||||||
|
reader.compat(),
|
||||||
|
writer.compat_write(),
|
||||||
|
Side::Server,
|
||||||
|
Default::default(),
|
||||||
|
);
|
||||||
|
|
||||||
|
let service: delivery_service::Client = capnp_rpc::new_client(DeliveryService { store });
|
||||||
|
|
||||||
|
RpcSystem::new(Box::new(network), Some(service.client))
|
||||||
|
.await
|
||||||
|
.ok();
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn connect_as(
|
||||||
|
addr: std::net::SocketAddr,
|
||||||
|
noise_keypair: &NoiseKeypair,
|
||||||
|
) -> anyhow::Result<authentication_service::Client> {
|
||||||
|
let stream = TcpStream::connect(addr)
|
||||||
|
.await
|
||||||
|
.with_context(|| format!("could not connect to AS at {addr}"))?;
|
||||||
|
|
||||||
|
let transport = handshake_initiator(stream, noise_keypair)
|
||||||
|
.await
|
||||||
|
.context("Noise handshake to AS failed")?;
|
||||||
|
let (reader, writer) = transport.into_capnp_io();
|
||||||
|
|
||||||
|
let network = twoparty::VatNetwork::new(
|
||||||
|
reader.compat(),
|
||||||
|
writer.compat_write(),
|
||||||
|
Side::Client,
|
||||||
|
Default::default(),
|
||||||
|
);
|
||||||
|
|
||||||
|
let mut rpc = RpcSystem::new(Box::new(network), None);
|
||||||
|
let client: authentication_service::Client = rpc.bootstrap(Side::Server);
|
||||||
|
tokio::task::spawn_local(rpc);
|
||||||
|
Ok(client)
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn connect_ds(
|
||||||
|
addr: std::net::SocketAddr,
|
||||||
|
noise_keypair: &NoiseKeypair,
|
||||||
|
) -> anyhow::Result<delivery_service::Client> {
|
||||||
|
let stream = TcpStream::connect(addr)
|
||||||
|
.await
|
||||||
|
.with_context(|| format!("could not connect to DS at {addr}"))?;
|
||||||
|
|
||||||
|
let transport = handshake_initiator(stream, noise_keypair)
|
||||||
|
.await
|
||||||
|
.context("Noise handshake to DS failed")?;
|
||||||
|
let (reader, writer) = transport.into_capnp_io();
|
||||||
|
|
||||||
|
let network = twoparty::VatNetwork::new(
|
||||||
|
reader.compat(),
|
||||||
|
writer.compat_write(),
|
||||||
|
Side::Client,
|
||||||
|
Default::default(),
|
||||||
|
);
|
||||||
|
|
||||||
|
let mut rpc = RpcSystem::new(Box::new(network), None);
|
||||||
|
let client: delivery_service::Client = rpc.bootstrap(Side::Server);
|
||||||
|
tokio::task::spawn_local(rpc);
|
||||||
|
Ok(client)
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn upload_key_package(
|
||||||
|
as_client: &authentication_service::Client,
|
||||||
|
identity_key: &[u8],
|
||||||
|
package: &[u8],
|
||||||
|
) -> anyhow::Result<()> {
|
||||||
|
let mut req = as_client.upload_key_package_request();
|
||||||
|
req.get().set_identity_key(identity_key);
|
||||||
|
req.get().set_package(package);
|
||||||
|
|
||||||
|
let resp = req
|
||||||
|
.send()
|
||||||
|
.promise
|
||||||
|
.await
|
||||||
|
.context("upload_key_package RPC failed")?;
|
||||||
|
|
||||||
|
let server_fp = resp
|
||||||
|
.get()
|
||||||
|
.context("upload_key_package: bad response")?
|
||||||
|
.get_fingerprint()
|
||||||
|
.context("upload_key_package: missing fingerprint")?
|
||||||
|
.to_vec();
|
||||||
|
|
||||||
|
let local_fp: Vec<u8> = Sha256::digest(package).to_vec();
|
||||||
|
anyhow::ensure!(server_fp == local_fp, "fingerprint mismatch");
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn fetch_key_package(
|
||||||
|
as_client: &authentication_service::Client,
|
||||||
|
identity_key: &[u8],
|
||||||
|
) -> anyhow::Result<Vec<u8>> {
|
||||||
|
let mut req = as_client.fetch_key_package_request();
|
||||||
|
req.get().set_identity_key(identity_key);
|
||||||
|
|
||||||
|
let resp = req
|
||||||
|
.send()
|
||||||
|
.promise
|
||||||
|
.await
|
||||||
|
.context("fetch_key_package RPC failed")?;
|
||||||
|
|
||||||
|
let pkg = resp
|
||||||
|
.get()
|
||||||
|
.context("fetch_key_package: bad response")?
|
||||||
|
.get_package()
|
||||||
|
.context("fetch_key_package: missing package")?
|
||||||
|
.to_vec();
|
||||||
|
|
||||||
|
Ok(pkg)
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn enqueue(
|
||||||
|
ds_client: &delivery_service::Client,
|
||||||
|
recipient_key: &[u8],
|
||||||
|
payload: &[u8],
|
||||||
|
) -> anyhow::Result<()> {
|
||||||
|
let mut req = ds_client.enqueue_request();
|
||||||
|
req.get().set_recipient_key(recipient_key);
|
||||||
|
req.get().set_payload(payload);
|
||||||
|
req.send().promise.await.context("enqueue RPC failed")?;
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn fetch_all(
|
||||||
|
ds_client: &delivery_service::Client,
|
||||||
|
recipient_key: &[u8],
|
||||||
|
) -> anyhow::Result<Vec<Vec<u8>>> {
|
||||||
|
let mut req = ds_client.fetch_request();
|
||||||
|
req.get().set_recipient_key(recipient_key);
|
||||||
|
|
||||||
|
let resp = req.send().promise.await.context("fetch RPC failed")?;
|
||||||
|
|
||||||
|
let list = resp
|
||||||
|
.get()
|
||||||
|
.context("fetch: bad response")?
|
||||||
|
.get_payloads()
|
||||||
|
.context("fetch: missing payloads")?;
|
||||||
|
|
||||||
|
let mut payloads = Vec::with_capacity(list.len() as usize);
|
||||||
|
for i in 0..list.len() {
|
||||||
|
payloads.push(list.get(i).context("fetch: payload read failed")?.to_vec());
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(payloads)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ── Inline service implementations ─────────────────────────────────────────-
|
||||||
|
|
||||||
|
struct AuthService {
|
||||||
|
store: KeyPackageStore,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl authentication_service::Server for AuthService {
|
||||||
|
fn upload_key_package(
|
||||||
|
&mut self,
|
||||||
|
params: authentication_service::UploadKeyPackageParams,
|
||||||
|
mut results: authentication_service::UploadKeyPackageResults,
|
||||||
|
) -> Promise<(), capnp::Error> {
|
||||||
|
let params = match params.get() {
|
||||||
|
Ok(p) => p,
|
||||||
|
Err(e) => return Promise::err(e),
|
||||||
|
};
|
||||||
|
|
||||||
|
let ik = match params.get_identity_key() {
|
||||||
|
Ok(v) => v.to_vec(),
|
||||||
|
Err(e) => return Promise::err(capnp::Error::failed(format!("{e}"))),
|
||||||
|
};
|
||||||
|
let pkg = match params.get_package() {
|
||||||
|
Ok(v) => v.to_vec(),
|
||||||
|
Err(e) => return Promise::err(capnp::Error::failed(format!("{e}"))),
|
||||||
|
};
|
||||||
|
|
||||||
|
let fp: Vec<u8> = Sha256::digest(&pkg).to_vec();
|
||||||
|
self.store.entry(ik).or_default().push_back(pkg);
|
||||||
|
results.get().set_fingerprint(&fp);
|
||||||
|
Promise::ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
fn fetch_key_package(
|
||||||
|
&mut self,
|
||||||
|
params: authentication_service::FetchKeyPackageParams,
|
||||||
|
mut results: authentication_service::FetchKeyPackageResults,
|
||||||
|
) -> Promise<(), capnp::Error> {
|
||||||
|
let ik = match params.get() {
|
||||||
|
Ok(p) => match p.get_identity_key() {
|
||||||
|
Ok(v) => v.to_vec(),
|
||||||
|
Err(e) => return Promise::err(capnp::Error::failed(format!("{e}"))),
|
||||||
|
},
|
||||||
|
Err(e) => return Promise::err(capnp::Error::failed(format!("{e}"))),
|
||||||
|
};
|
||||||
|
|
||||||
|
let pkg = self
|
||||||
|
.store
|
||||||
|
.get_mut(&ik)
|
||||||
|
.and_then(|mut q| q.pop_front())
|
||||||
|
.unwrap_or_default();
|
||||||
|
|
||||||
|
results.get().set_package(&pkg);
|
||||||
|
Promise::ok(())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
struct DeliveryService {
|
||||||
|
store: DeliveryStore,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl delivery_service::Server for DeliveryService {
|
||||||
|
fn enqueue(
|
||||||
|
&mut self,
|
||||||
|
params: delivery_service::EnqueueParams,
|
||||||
|
_results: delivery_service::EnqueueResults,
|
||||||
|
) -> Promise<(), capnp::Error> {
|
||||||
|
let params = match params.get() {
|
||||||
|
Ok(p) => p,
|
||||||
|
Err(e) => return Promise::err(e),
|
||||||
|
};
|
||||||
|
|
||||||
|
let recipient = match params.get_recipient_key() {
|
||||||
|
Ok(v) => v.to_vec(),
|
||||||
|
Err(e) => return Promise::err(capnp::Error::failed(format!("{e}"))),
|
||||||
|
};
|
||||||
|
let payload = match params.get_payload() {
|
||||||
|
Ok(v) => v.to_vec(),
|
||||||
|
Err(e) => return Promise::err(capnp::Error::failed(format!("{e}"))),
|
||||||
|
};
|
||||||
|
|
||||||
|
self.store.entry(recipient).or_default().push_back(payload);
|
||||||
|
Promise::ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
fn fetch(
|
||||||
|
&mut self,
|
||||||
|
params: delivery_service::FetchParams,
|
||||||
|
mut results: delivery_service::FetchResults,
|
||||||
|
) -> Promise<(), capnp::Error> {
|
||||||
|
let recipient = match params.get() {
|
||||||
|
Ok(p) => match p.get_recipient_key() {
|
||||||
|
Ok(v) => v.to_vec(),
|
||||||
|
Err(e) => return Promise::err(capnp::Error::failed(format!("{e}"))),
|
||||||
|
},
|
||||||
|
Err(e) => return Promise::err(capnp::Error::failed(format!("{e}"))),
|
||||||
|
};
|
||||||
|
|
||||||
|
let messages: Vec<Vec<u8>> = self
|
||||||
|
.store
|
||||||
|
.get_mut(&recipient)
|
||||||
|
.map(|mut q| q.drain(..).collect())
|
||||||
|
.unwrap_or_default();
|
||||||
|
|
||||||
|
let mut list = results.get().init_payloads(messages.len() as u32);
|
||||||
|
for (i, msg) in messages.iter().enumerate() {
|
||||||
|
list.set(i as u32, msg);
|
||||||
|
}
|
||||||
|
|
||||||
|
Promise::ok(())
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -13,8 +13,8 @@ use std::sync::Arc;
|
|||||||
|
|
||||||
use tokio::net::TcpListener;
|
use tokio::net::TcpListener;
|
||||||
|
|
||||||
use noiseml_core::{NoiseKeypair, handshake_initiator, handshake_responder};
|
use quicnprotochat_core::{handshake_initiator, handshake_responder, NoiseKeypair};
|
||||||
use noiseml_proto::{MsgType, ParsedEnvelope};
|
use quicnprotochat_proto::{MsgType, ParsedEnvelope};
|
||||||
|
|
||||||
/// Completes a full Noise_XX handshake and Ping/Pong exchange, then verifies
|
/// Completes a full Noise_XX handshake and Ping/Pong exchange, then verifies
|
||||||
/// mutual authentication by comparing observed vs. actual static public keys.
|
/// mutual authentication by comparing observed vs. actual static public keys.
|
||||||
@@ -96,10 +96,7 @@ async fn noise_xx_ping_pong_round_trip() {
|
|||||||
.await
|
.await
|
||||||
.expect("client send_envelope failed");
|
.expect("client send_envelope failed");
|
||||||
|
|
||||||
let pong = tokio::time::timeout(
|
let pong = tokio::time::timeout(std::time::Duration::from_secs(5), transport.recv_envelope())
|
||||||
std::time::Duration::from_secs(5),
|
|
||||||
transport.recv_envelope(),
|
|
||||||
)
|
|
||||||
.await
|
.await
|
||||||
.expect("timed out waiting for Pong — server task likely panicked")
|
.expect("timed out waiting for Pong — server task likely panicked")
|
||||||
.expect("client recv_envelope failed");
|
.expect("client recv_envelope failed");
|
||||||
@@ -135,9 +132,7 @@ async fn noise_xx_ping_pong_round_trip() {
|
|||||||
async fn two_sequential_connections_both_authenticate() {
|
async fn two_sequential_connections_both_authenticate() {
|
||||||
let server_keypair = Arc::new(NoiseKeypair::generate());
|
let server_keypair = Arc::new(NoiseKeypair::generate());
|
||||||
|
|
||||||
let listener = TcpListener::bind("127.0.0.1:0")
|
let listener = TcpListener::bind("127.0.0.1:0").await.expect("bind failed");
|
||||||
.await
|
|
||||||
.expect("bind failed");
|
|
||||||
let server_addr = listener.local_addr().expect("local_addr failed");
|
let server_addr = listener.local_addr().expect("local_addr failed");
|
||||||
|
|
||||||
let server_kp = Arc::clone(&server_keypair);
|
let server_kp = Arc::clone(&server_keypair);
|
||||||
@@ -186,10 +181,7 @@ async fn two_sequential_connections_both_authenticate() {
|
|||||||
.await
|
.await
|
||||||
.expect("client send failed");
|
.expect("client send failed");
|
||||||
|
|
||||||
let pong = tokio::time::timeout(
|
let pong = tokio::time::timeout(std::time::Duration::from_secs(5), t.recv_envelope())
|
||||||
std::time::Duration::from_secs(5),
|
|
||||||
t.recv_envelope(),
|
|
||||||
)
|
|
||||||
.await
|
.await
|
||||||
.expect("timeout")
|
.expect("timeout")
|
||||||
.expect("recv failed");
|
.expect("recv failed");
|
||||||
@@ -1,8 +1,8 @@
|
|||||||
[package]
|
[package]
|
||||||
name = "noiseml-core"
|
name = "quicnprotochat-core"
|
||||||
version = "0.1.0"
|
version = "0.1.0"
|
||||||
edition = "2021"
|
edition = "2021"
|
||||||
description = "Crypto primitives, Noise_XX transport, MLS state machine, and Cap'n Proto frame codec for noiseml."
|
description = "Crypto primitives, TLS/QUIC transport, MLS state machine, and Cap'n Proto frame codec for quicnprotochat."
|
||||||
license = "MIT"
|
license = "MIT"
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
@@ -20,10 +20,13 @@ openmls = { workspace = true }
|
|||||||
openmls_rust_crypto = { workspace = true }
|
openmls_rust_crypto = { workspace = true }
|
||||||
openmls_traits = { workspace = true }
|
openmls_traits = { workspace = true }
|
||||||
tls_codec = { workspace = true }
|
tls_codec = { workspace = true }
|
||||||
|
serde = { workspace = true }
|
||||||
|
bincode = { workspace = true }
|
||||||
|
serde_json = { workspace = true }
|
||||||
|
|
||||||
# Serialisation
|
# Serialisation
|
||||||
capnp = { workspace = true }
|
capnp = { workspace = true }
|
||||||
noiseml-proto = { path = "../noiseml-proto" }
|
quicnprotochat-proto = { path = "../quicnprotochat-proto" }
|
||||||
|
|
||||||
# Async runtime + codec
|
# Async runtime + codec
|
||||||
tokio = { workspace = true }
|
tokio = { workspace = true }
|
||||||
@@ -92,8 +92,7 @@ impl Decoder for LengthPrefixedCodec {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Peek at the length without advancing — avoid mutating state on None.
|
// Peek at the length without advancing — avoid mutating state on None.
|
||||||
let frame_len =
|
let frame_len = u32::from_le_bytes([src[0], src[1], src[2], src[3]]) as usize;
|
||||||
u32::from_le_bytes([src[0], src[1], src[2], src[3]]) as usize;
|
|
||||||
|
|
||||||
if frame_len > NOISE_MAX_MSG {
|
if frame_len > NOISE_MAX_MSG {
|
||||||
return Err(CodecError::FrameTooLarge {
|
return Err(CodecError::FrameTooLarge {
|
||||||
@@ -139,7 +138,7 @@ mod tests {
|
|||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn round_trip_small_payload() {
|
fn round_trip_small_payload() {
|
||||||
let payload = b"hello noiseml";
|
let payload = b"hello quicnprotochat";
|
||||||
let result = encode_then_decode(payload);
|
let result = encode_then_decode(payload);
|
||||||
assert_eq!(&result[..], payload);
|
assert_eq!(&result[..], payload);
|
||||||
}
|
}
|
||||||
@@ -1,4 +1,4 @@
|
|||||||
//! Error types for `noiseml-core`.
|
//! Error types for `quicnprotochat-core`.
|
||||||
//!
|
//!
|
||||||
//! Two separate error types are used to preserve type-level separation of concerns:
|
//! Two separate error types are used to preserve type-level separation of concerns:
|
||||||
//!
|
//!
|
||||||
@@ -3,7 +3,7 @@
|
|||||||
//! # Design
|
//! # Design
|
||||||
//!
|
//!
|
||||||
//! [`GroupMember`] wraps an openmls [`MlsGroup`] plus the per-client
|
//! [`GroupMember`] wraps an openmls [`MlsGroup`] plus the per-client
|
||||||
//! [`OpenMlsRustCrypto`] backend. The backend is **persistent** — it holds the
|
//! [`StoreCrypto`] backend. The backend is **persistent** — it holds the
|
||||||
//! in-memory key store that maps init-key references to HPKE private keys.
|
//! in-memory key store that maps init-key references to HPKE private keys.
|
||||||
//! openmls's `new_from_welcome` reads those private keys from the key store to
|
//! openmls's `new_from_welcome` reads those private keys from the key store to
|
||||||
//! decrypt the Welcome, so the same backend instance must be used from
|
//! decrypt the Welcome, so the same backend instance must be used from
|
||||||
@@ -28,20 +28,22 @@
|
|||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
|
||||||
use openmls::prelude::{
|
use openmls::prelude::{
|
||||||
Ciphersuite, CryptoConfig, Credential, CredentialType, CredentialWithKey,
|
Ciphersuite, Credential, CredentialType, CredentialWithKey, CryptoConfig, GroupId, KeyPackage,
|
||||||
GroupId, KeyPackage, KeyPackageIn, MlsGroup, MlsGroupConfig, MlsMessageInBody,
|
KeyPackageIn, MlsGroup, MlsGroupConfig, MlsMessageInBody, MlsMessageOut,
|
||||||
MlsMessageOut, ProcessedMessageContent, ProtocolMessage, ProtocolVersion,
|
ProcessedMessageContent, ProtocolMessage, ProtocolVersion, TlsDeserializeTrait,
|
||||||
TlsDeserializeTrait, TlsSerializeTrait,
|
TlsSerializeTrait,
|
||||||
};
|
};
|
||||||
use openmls_rust_crypto::OpenMlsRustCrypto;
|
|
||||||
use openmls_traits::OpenMlsCryptoProvider;
|
use openmls_traits::OpenMlsCryptoProvider;
|
||||||
|
|
||||||
use crate::{error::CoreError, identity::IdentityKeypair};
|
use crate::{
|
||||||
|
error::CoreError,
|
||||||
|
identity::IdentityKeypair,
|
||||||
|
keystore::{DiskKeyStore, StoreCrypto},
|
||||||
|
};
|
||||||
|
|
||||||
// ── Constants ─────────────────────────────────────────────────────────────────
|
// ── Constants ─────────────────────────────────────────────────────────────────
|
||||||
|
|
||||||
const CIPHERSUITE: Ciphersuite =
|
const CIPHERSUITE: Ciphersuite = Ciphersuite::MLS_128_DHKEMX25519_AES128GCM_SHA256_Ed25519;
|
||||||
Ciphersuite::MLS_128_DHKEMX25519_AES128GCM_SHA256_Ed25519;
|
|
||||||
|
|
||||||
// ── GroupMember ───────────────────────────────────────────────────────────────
|
// ── GroupMember ───────────────────────────────────────────────────────────────
|
||||||
|
|
||||||
@@ -61,7 +63,7 @@ const CIPHERSUITE: Ciphersuite =
|
|||||||
pub struct GroupMember {
|
pub struct GroupMember {
|
||||||
/// Persistent crypto backend. Holds the in-memory key store with HPKE
|
/// Persistent crypto backend. Holds the in-memory key store with HPKE
|
||||||
/// private keys created during `generate_key_package`.
|
/// private keys created during `generate_key_package`.
|
||||||
backend: OpenMlsRustCrypto,
|
backend: StoreCrypto,
|
||||||
/// Long-term Ed25519 identity keypair. Also used as the MLS `Signer`.
|
/// Long-term Ed25519 identity keypair. Also used as the MLS `Signer`.
|
||||||
identity: Arc<IdentityKeypair>,
|
identity: Arc<IdentityKeypair>,
|
||||||
/// Active MLS group, if any.
|
/// Active MLS group, if any.
|
||||||
@@ -73,16 +75,23 @@ pub struct GroupMember {
|
|||||||
impl GroupMember {
|
impl GroupMember {
|
||||||
/// Create a new `GroupMember` with a fresh crypto backend.
|
/// Create a new `GroupMember` with a fresh crypto backend.
|
||||||
pub fn new(identity: Arc<IdentityKeypair>) -> Self {
|
pub fn new(identity: Arc<IdentityKeypair>) -> Self {
|
||||||
|
Self::new_with_state(identity, DiskKeyStore::ephemeral(), None)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Create a `GroupMember` from pre-existing state (identity + optional group + store).
|
||||||
|
pub fn new_with_state(
|
||||||
|
identity: Arc<IdentityKeypair>,
|
||||||
|
key_store: DiskKeyStore,
|
||||||
|
group: Option<MlsGroup>,
|
||||||
|
) -> Self {
|
||||||
let config = MlsGroupConfig::builder()
|
let config = MlsGroupConfig::builder()
|
||||||
// Embed the ratchet tree in Welcome messages so joinees do not
|
|
||||||
// need an out-of-band tree delivery mechanism.
|
|
||||||
.use_ratchet_tree_extension(true)
|
.use_ratchet_tree_extension(true)
|
||||||
.build();
|
.build();
|
||||||
|
|
||||||
Self {
|
Self {
|
||||||
backend: OpenMlsRustCrypto::default(),
|
backend: StoreCrypto::new(key_store),
|
||||||
identity,
|
identity,
|
||||||
group: None,
|
group,
|
||||||
config,
|
config,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -195,11 +204,7 @@ impl GroupMember {
|
|||||||
// Create the Commit + Welcome. The third return value (GroupInfo) is for
|
// Create the Commit + Welcome. The third return value (GroupInfo) is for
|
||||||
// external commits and is not needed here.
|
// external commits and is not needed here.
|
||||||
let (commit_out, welcome_out, _group_info) = group
|
let (commit_out, welcome_out, _group_info) = group
|
||||||
.add_members(
|
.add_members(&self.backend, self.identity.as_ref(), &[key_package])
|
||||||
&self.backend,
|
|
||||||
self.identity.as_ref(),
|
|
||||||
&[key_package],
|
|
||||||
)
|
|
||||||
.map_err(|e| CoreError::Mls(format!("add_members: {e:?}")))?;
|
.map_err(|e| CoreError::Mls(format!("add_members: {e:?}")))?;
|
||||||
|
|
||||||
// Merge the pending Commit into our own state, advancing the epoch.
|
// Merge the pending Commit into our own state, advancing the epoch.
|
||||||
@@ -231,8 +236,7 @@ impl GroupMember {
|
|||||||
/// [`generate_key_package`]: Self::generate_key_package
|
/// [`generate_key_package`]: Self::generate_key_package
|
||||||
pub fn join_group(&mut self, welcome_bytes: &[u8]) -> Result<(), CoreError> {
|
pub fn join_group(&mut self, welcome_bytes: &[u8]) -> Result<(), CoreError> {
|
||||||
// Deserialise MlsMessageIn, then extract the inner Welcome.
|
// Deserialise MlsMessageIn, then extract the inner Welcome.
|
||||||
let msg_in =
|
let msg_in = openmls::prelude::MlsMessageIn::tls_deserialize(&mut welcome_bytes.as_ref())
|
||||||
openmls::prelude::MlsMessageIn::tls_deserialize(&mut welcome_bytes.as_ref())
|
|
||||||
.map_err(|e| CoreError::Mls(format!("Welcome deserialise: {e:?}")))?;
|
.map_err(|e| CoreError::Mls(format!("Welcome deserialise: {e:?}")))?;
|
||||||
|
|
||||||
// into_welcome() is feature-gated in openmls 0.5; extract() is public.
|
// into_welcome() is feature-gated in openmls 0.5; extract() is public.
|
||||||
@@ -243,12 +247,7 @@ impl GroupMember {
|
|||||||
|
|
||||||
// ratchet_tree = None because use_ratchet_tree_extension = true embeds
|
// ratchet_tree = None because use_ratchet_tree_extension = true embeds
|
||||||
// the tree inside the Welcome's GroupInfo extension.
|
// the tree inside the Welcome's GroupInfo extension.
|
||||||
let group = MlsGroup::new_from_welcome(
|
let group = MlsGroup::new_from_welcome(&self.backend, &self.config, welcome, None)
|
||||||
&self.backend,
|
|
||||||
&self.config,
|
|
||||||
welcome,
|
|
||||||
None,
|
|
||||||
)
|
|
||||||
.map_err(|e| CoreError::Mls(format!("new_from_welcome: {e:?}")))?;
|
.map_err(|e| CoreError::Mls(format!("new_from_welcome: {e:?}")))?;
|
||||||
|
|
||||||
self.group = Some(group);
|
self.group = Some(group);
|
||||||
@@ -298,8 +297,7 @@ impl GroupMember {
|
|||||||
.as_mut()
|
.as_mut()
|
||||||
.ok_or_else(|| CoreError::Mls("no active group".into()))?;
|
.ok_or_else(|| CoreError::Mls("no active group".into()))?;
|
||||||
|
|
||||||
let msg_in =
|
let msg_in = openmls::prelude::MlsMessageIn::tls_deserialize(&mut bytes.as_ref())
|
||||||
openmls::prelude::MlsMessageIn::tls_deserialize(&mut bytes.as_ref())
|
|
||||||
.map_err(|e| CoreError::Mls(format!("message deserialise: {e:?}")))?;
|
.map_err(|e| CoreError::Mls(format!("message deserialise: {e:?}")))?;
|
||||||
|
|
||||||
// into_protocol_message() is feature-gated; extract() + manual construction is not.
|
// into_protocol_message() is feature-gated; extract() + manual construction is not.
|
||||||
@@ -314,9 +312,7 @@ impl GroupMember {
|
|||||||
.map_err(|e| CoreError::Mls(format!("process_message: {e:?}")))?;
|
.map_err(|e| CoreError::Mls(format!("process_message: {e:?}")))?;
|
||||||
|
|
||||||
match processed.into_content() {
|
match processed.into_content() {
|
||||||
ProcessedMessageContent::ApplicationMessage(app) => {
|
ProcessedMessageContent::ApplicationMessage(app) => Ok(Some(app.into_bytes())),
|
||||||
Ok(Some(app.into_bytes()))
|
|
||||||
}
|
|
||||||
ProcessedMessageContent::StagedCommitMessage(staged) => {
|
ProcessedMessageContent::StagedCommitMessage(staged) => {
|
||||||
// Merge the Commit into the local state (epoch advances).
|
// Merge the Commit into the local state (epoch advances).
|
||||||
group
|
group
|
||||||
@@ -350,6 +346,21 @@ impl GroupMember {
|
|||||||
&self.identity
|
&self.identity
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Return the private seed of the identity (for persistence).
|
||||||
|
pub fn identity_seed(&self) -> [u8; 32] {
|
||||||
|
self.identity.seed_bytes()
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Return a reference to the underlying crypto backend.
|
||||||
|
pub fn backend(&self) -> &StoreCrypto {
|
||||||
|
&self.backend
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Return a reference to the MLS group, if active.
|
||||||
|
pub fn group_ref(&self) -> Option<&MlsGroup> {
|
||||||
|
self.group.as_ref()
|
||||||
|
}
|
||||||
|
|
||||||
// ── Private helpers ───────────────────────────────────────────────────────
|
// ── Private helpers ───────────────────────────────────────────────────────
|
||||||
|
|
||||||
fn make_credential_with_key(&self) -> Result<CredentialWithKey, CoreError> {
|
fn make_credential_with_key(&self) -> Result<CredentialWithKey, CoreError> {
|
||||||
@@ -385,7 +396,9 @@ mod tests {
|
|||||||
let bob_kp = bob.generate_key_package().expect("Bob KeyPackage");
|
let bob_kp = bob.generate_key_package().expect("Bob KeyPackage");
|
||||||
|
|
||||||
// Alice creates the group.
|
// Alice creates the group.
|
||||||
alice.create_group(b"test-group-m3").expect("Alice create group");
|
alice
|
||||||
|
.create_group(b"test-group-m3")
|
||||||
|
.expect("Alice create group");
|
||||||
|
|
||||||
// Alice adds Bob → (commit, welcome).
|
// Alice adds Bob → (commit, welcome).
|
||||||
// Alice is the sole existing member, so she merges the commit herself.
|
// Alice is the sole existing member, so she merges the commit herself.
|
||||||
@@ -22,6 +22,7 @@
|
|||||||
use ed25519_dalek::{Signer as DalekSigner, SigningKey, VerifyingKey};
|
use ed25519_dalek::{Signer as DalekSigner, SigningKey, VerifyingKey};
|
||||||
use openmls_traits::signatures::Signer;
|
use openmls_traits::signatures::Signer;
|
||||||
use openmls_traits::types::{Error as MlsError, SignatureScheme};
|
use openmls_traits::types::{Error as MlsError, SignatureScheme};
|
||||||
|
use serde::{Deserialize, Serialize};
|
||||||
use sha2::{Digest, Sha256};
|
use sha2::{Digest, Sha256};
|
||||||
use zeroize::Zeroizing;
|
use zeroize::Zeroizing;
|
||||||
|
|
||||||
@@ -39,6 +40,23 @@ pub struct IdentityKeypair {
|
|||||||
verifying: VerifyingKey,
|
verifying: VerifyingKey,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl IdentityKeypair {
|
||||||
|
/// Recreate an identity keypair from a 32-byte seed.
|
||||||
|
pub fn from_seed(seed: [u8; 32]) -> Self {
|
||||||
|
let signing = SigningKey::from_bytes(&seed);
|
||||||
|
let verifying = signing.verifying_key();
|
||||||
|
Self {
|
||||||
|
seed: Zeroizing::new(seed),
|
||||||
|
verifying,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Return the raw 32-byte private seed (for persistence).
|
||||||
|
pub fn seed_bytes(&self) -> [u8; 32] {
|
||||||
|
*self.seed
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
impl IdentityKeypair {
|
impl IdentityKeypair {
|
||||||
/// Generate a fresh random Ed25519 identity keypair.
|
/// Generate a fresh random Ed25519 identity keypair.
|
||||||
pub fn generate() -> Self {
|
pub fn generate() -> Self {
|
||||||
@@ -84,6 +102,29 @@ impl Signer for IdentityKeypair {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl Serialize for IdentityKeypair {
|
||||||
|
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
|
||||||
|
where
|
||||||
|
S: serde::Serializer,
|
||||||
|
{
|
||||||
|
serializer.serialize_bytes(&self.seed[..])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<'de> Deserialize<'de> for IdentityKeypair {
|
||||||
|
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
|
||||||
|
where
|
||||||
|
D: serde::Deserializer<'de>,
|
||||||
|
{
|
||||||
|
let bytes: Vec<u8> = serde::Deserialize::deserialize(deserializer)?;
|
||||||
|
let seed: [u8; 32] = bytes
|
||||||
|
.as_slice()
|
||||||
|
.try_into()
|
||||||
|
.map_err(|_| serde::de::Error::custom("identity seed must be 32 bytes"))?;
|
||||||
|
Ok(IdentityKeypair::from_seed(seed))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
impl std::fmt::Debug for IdentityKeypair {
|
impl std::fmt::Debug for IdentityKeypair {
|
||||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||||
let fp = self.fingerprint();
|
let fp = self.fingerprint();
|
||||||
@@ -14,7 +14,7 @@
|
|||||||
//! # Wire format
|
//! # Wire format
|
||||||
//!
|
//!
|
||||||
//! KeyPackages are TLS-encoded using `tls_codec` (same version as openmls).
|
//! KeyPackages are TLS-encoded using `tls_codec` (same version as openmls).
|
||||||
//! The resulting bytes are opaque to the noiseml transport layer.
|
//! The resulting bytes are opaque to the quicnprotochat transport layer.
|
||||||
|
|
||||||
use openmls::prelude::{
|
use openmls::prelude::{
|
||||||
Ciphersuite, Credential, CredentialType, CredentialWithKey, CryptoConfig, KeyPackage,
|
Ciphersuite, Credential, CredentialType, CredentialWithKey, CryptoConfig, KeyPackage,
|
||||||
@@ -25,9 +25,8 @@ use sha2::{Digest, Sha256};
|
|||||||
|
|
||||||
use crate::{error::CoreError, identity::IdentityKeypair};
|
use crate::{error::CoreError, identity::IdentityKeypair};
|
||||||
|
|
||||||
/// The MLS ciphersuite used throughout noiseml.
|
/// The MLS ciphersuite used throughout quicnprotochat.
|
||||||
const CIPHERSUITE: Ciphersuite =
|
const CIPHERSUITE: Ciphersuite = Ciphersuite::MLS_128_DHKEMX25519_AES128GCM_SHA256_Ed25519;
|
||||||
Ciphersuite::MLS_128_DHKEMX25519_AES128GCM_SHA256_Ed25519;
|
|
||||||
|
|
||||||
/// Generate a fresh MLS KeyPackage for `identity` and serialise it.
|
/// Generate a fresh MLS KeyPackage for `identity` and serialise it.
|
||||||
///
|
///
|
||||||
@@ -41,17 +40,12 @@ const CIPHERSUITE: Ciphersuite =
|
|||||||
///
|
///
|
||||||
/// Returns [`CoreError::Mls`] if openmls fails to create the KeyPackage or if
|
/// Returns [`CoreError::Mls`] if openmls fails to create the KeyPackage or if
|
||||||
/// TLS serialisation fails.
|
/// TLS serialisation fails.
|
||||||
pub fn generate_key_package(
|
pub fn generate_key_package(identity: &IdentityKeypair) -> Result<(Vec<u8>, Vec<u8>), CoreError> {
|
||||||
identity: &IdentityKeypair,
|
|
||||||
) -> Result<(Vec<u8>, Vec<u8>), CoreError> {
|
|
||||||
let backend = OpenMlsRustCrypto::default();
|
let backend = OpenMlsRustCrypto::default();
|
||||||
|
|
||||||
// Build a BasicCredential using the raw Ed25519 public key bytes as the
|
// Build a BasicCredential using the raw Ed25519 public key bytes as the
|
||||||
// MLS identity. Per RFC 9420, any byte string may serve as the identity.
|
// MLS identity. Per RFC 9420, any byte string may serve as the identity.
|
||||||
let credential = Credential::new(
|
let credential = Credential::new(identity.public_key_bytes().to_vec(), CredentialType::Basic)
|
||||||
identity.public_key_bytes().to_vec(),
|
|
||||||
CredentialType::Basic,
|
|
||||||
)
|
|
||||||
.map_err(|e| CoreError::Mls(format!("{e:?}")))?;
|
.map_err(|e| CoreError::Mls(format!("{e:?}")))?;
|
||||||
|
|
||||||
// The `signature_key` in CredentialWithKey is the Ed25519 public key that
|
// The `signature_key` in CredentialWithKey is the Ed25519 public key that
|
||||||
@@ -113,7 +113,9 @@ mod tests {
|
|||||||
let private = kp.private_bytes();
|
let private = kp.private_bytes();
|
||||||
// We cannot observe zeroization after drop in a test without unsafe,
|
// We cannot observe zeroization after drop in a test without unsafe,
|
||||||
// but we can confirm the wrapper type is returned and is non-zero.
|
// but we can confirm the wrapper type is returned and is non-zero.
|
||||||
assert!(private.iter().any(|&b| b != 0),
|
assert!(
|
||||||
"freshly generated private key should not be all zeros");
|
private.iter().any(|&b| b != 0),
|
||||||
|
"freshly generated private key should not be all zeros"
|
||||||
|
);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
144
crates/quicnprotochat-core/src/keystore.rs
Normal file
144
crates/quicnprotochat-core/src/keystore.rs
Normal file
@@ -0,0 +1,144 @@
|
|||||||
|
use std::{
|
||||||
|
collections::HashMap,
|
||||||
|
fs,
|
||||||
|
path::{Path, PathBuf},
|
||||||
|
sync::RwLock,
|
||||||
|
};
|
||||||
|
|
||||||
|
use openmls_rust_crypto::RustCrypto;
|
||||||
|
use openmls_traits::{
|
||||||
|
key_store::{MlsEntity, OpenMlsKeyStore},
|
||||||
|
OpenMlsCryptoProvider,
|
||||||
|
};
|
||||||
|
|
||||||
|
/// A disk-backed key store implementing `OpenMlsKeyStore`.
|
||||||
|
///
|
||||||
|
/// In-memory when `path` is `None`; otherwise flushes the entire map to disk on
|
||||||
|
/// every store/delete so HPKE init keys survive process restarts.
|
||||||
|
#[derive(Debug)]
|
||||||
|
pub struct DiskKeyStore {
|
||||||
|
path: Option<PathBuf>,
|
||||||
|
values: RwLock<HashMap<Vec<u8>, Vec<u8>>>,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(thiserror::Error, Debug, PartialEq, Eq)]
|
||||||
|
pub enum DiskKeyStoreError {
|
||||||
|
#[error("serialization error")]
|
||||||
|
Serialization,
|
||||||
|
#[error("io error: {0}")]
|
||||||
|
Io(String),
|
||||||
|
}
|
||||||
|
|
||||||
|
impl DiskKeyStore {
|
||||||
|
/// In-memory keystore (no persistence).
|
||||||
|
pub fn ephemeral() -> Self {
|
||||||
|
Self {
|
||||||
|
path: None,
|
||||||
|
values: RwLock::new(HashMap::new()),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Persistent keystore backed by `path`. Creates an empty store if missing.
|
||||||
|
pub fn persistent(path: impl AsRef<Path>) -> Result<Self, DiskKeyStoreError> {
|
||||||
|
let path = path.as_ref().to_path_buf();
|
||||||
|
let values = if path.exists() {
|
||||||
|
let bytes = fs::read(&path).map_err(|e| DiskKeyStoreError::Io(e.to_string()))?;
|
||||||
|
if bytes.is_empty() {
|
||||||
|
HashMap::new()
|
||||||
|
} else {
|
||||||
|
bincode::deserialize(&bytes).map_err(|_| DiskKeyStoreError::Serialization)?
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
HashMap::new()
|
||||||
|
};
|
||||||
|
|
||||||
|
Ok(Self {
|
||||||
|
path: Some(path),
|
||||||
|
values: RwLock::new(values),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
fn flush(&self) -> Result<(), DiskKeyStoreError> {
|
||||||
|
let Some(path) = &self.path else {
|
||||||
|
return Ok(());
|
||||||
|
};
|
||||||
|
let values = self.values.read().unwrap();
|
||||||
|
let bytes = bincode::serialize(&*values).map_err(|_| DiskKeyStoreError::Serialization)?;
|
||||||
|
if let Some(parent) = path.parent() {
|
||||||
|
fs::create_dir_all(parent).map_err(|e| DiskKeyStoreError::Io(e.to_string()))?;
|
||||||
|
}
|
||||||
|
fs::write(path, bytes).map_err(|e| DiskKeyStoreError::Io(e.to_string()))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Default for DiskKeyStore {
|
||||||
|
fn default() -> Self {
|
||||||
|
Self::ephemeral()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl OpenMlsKeyStore for DiskKeyStore {
|
||||||
|
type Error = DiskKeyStoreError;
|
||||||
|
|
||||||
|
fn store<V: MlsEntity>(&self, k: &[u8], v: &V) -> Result<(), Self::Error> {
|
||||||
|
let value = serde_json::to_vec(v).map_err(|_| DiskKeyStoreError::Serialization)?;
|
||||||
|
let mut values = self.values.write().unwrap();
|
||||||
|
values.insert(k.to_vec(), value);
|
||||||
|
drop(values);
|
||||||
|
self.flush()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn read<V: MlsEntity>(&self, k: &[u8]) -> Option<V> {
|
||||||
|
let values = self.values.read().unwrap();
|
||||||
|
values
|
||||||
|
.get(k)
|
||||||
|
.and_then(|bytes| serde_json::from_slice(bytes).ok())
|
||||||
|
}
|
||||||
|
|
||||||
|
fn delete<V: MlsEntity>(&self, k: &[u8]) -> Result<(), Self::Error> {
|
||||||
|
let mut values = self.values.write().unwrap();
|
||||||
|
values.remove(k);
|
||||||
|
drop(values);
|
||||||
|
self.flush()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Crypto provider that couples RustCrypto with a disk-backed key store.
|
||||||
|
#[derive(Debug)]
|
||||||
|
pub struct StoreCrypto {
|
||||||
|
crypto: RustCrypto,
|
||||||
|
key_store: DiskKeyStore,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl StoreCrypto {
|
||||||
|
pub fn new(key_store: DiskKeyStore) -> Self {
|
||||||
|
Self {
|
||||||
|
crypto: RustCrypto::default(),
|
||||||
|
key_store,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Default for StoreCrypto {
|
||||||
|
fn default() -> Self {
|
||||||
|
Self::new(DiskKeyStore::ephemeral())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl OpenMlsCryptoProvider for StoreCrypto {
|
||||||
|
type CryptoProvider = RustCrypto;
|
||||||
|
type RandProvider = RustCrypto;
|
||||||
|
type KeyStoreProvider = DiskKeyStore;
|
||||||
|
|
||||||
|
fn crypto(&self) -> &Self::CryptoProvider {
|
||||||
|
&self.crypto
|
||||||
|
}
|
||||||
|
|
||||||
|
fn rand(&self) -> &Self::RandProvider {
|
||||||
|
&self.crypto
|
||||||
|
}
|
||||||
|
|
||||||
|
fn key_store(&self) -> &Self::KeyStoreProvider {
|
||||||
|
&self.key_store
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -1,5 +1,5 @@
|
|||||||
//! Core cryptographic primitives, Noise_XX transport, MLS group state machine,
|
//! Core cryptographic primitives, Noise_XX transport, MLS group state machine,
|
||||||
//! and frame codec for noiseml.
|
//! and frame codec for quicnprotochat.
|
||||||
//!
|
//!
|
||||||
//! # Module layout
|
//! # Module layout
|
||||||
//!
|
//!
|
||||||
@@ -17,8 +17,9 @@ mod codec;
|
|||||||
mod error;
|
mod error;
|
||||||
mod group;
|
mod group;
|
||||||
mod identity;
|
mod identity;
|
||||||
mod keypair;
|
|
||||||
mod keypackage;
|
mod keypackage;
|
||||||
|
mod keypair;
|
||||||
|
mod keystore;
|
||||||
mod noise;
|
mod noise;
|
||||||
|
|
||||||
// ── Public API ────────────────────────────────────────────────────────────────
|
// ── Public API ────────────────────────────────────────────────────────────────
|
||||||
@@ -27,6 +28,7 @@ pub use codec::{LengthPrefixedCodec, NOISE_MAX_MSG};
|
|||||||
pub use error::{CodecError, CoreError, MAX_PLAINTEXT_LEN};
|
pub use error::{CodecError, CoreError, MAX_PLAINTEXT_LEN};
|
||||||
pub use group::GroupMember;
|
pub use group::GroupMember;
|
||||||
pub use identity::IdentityKeypair;
|
pub use identity::IdentityKeypair;
|
||||||
pub use keypair::NoiseKeypair;
|
|
||||||
pub use keypackage::generate_key_package;
|
pub use keypackage::generate_key_package;
|
||||||
|
pub use keypair::NoiseKeypair;
|
||||||
|
pub use keystore::DiskKeyStore;
|
||||||
pub use noise::{handshake_initiator, handshake_responder, NoiseTransport};
|
pub use noise::{handshake_initiator, handshake_responder, NoiseTransport};
|
||||||
@@ -32,7 +32,7 @@
|
|||||||
use bytes::Bytes;
|
use bytes::Bytes;
|
||||||
use futures::{SinkExt, StreamExt};
|
use futures::{SinkExt, StreamExt};
|
||||||
use tokio::{
|
use tokio::{
|
||||||
io::{AsyncReadExt, AsyncWriteExt, DuplexStream, ReadHalf, WriteHalf, duplex},
|
io::{duplex, AsyncReadExt, AsyncWriteExt, DuplexStream, ReadHalf, WriteHalf},
|
||||||
net::TcpStream,
|
net::TcpStream,
|
||||||
};
|
};
|
||||||
use tokio_util::codec::Framed;
|
use tokio_util::codec::Framed;
|
||||||
@@ -42,9 +42,9 @@ use crate::{
|
|||||||
error::{CoreError, MAX_PLAINTEXT_LEN},
|
error::{CoreError, MAX_PLAINTEXT_LEN},
|
||||||
keypair::NoiseKeypair,
|
keypair::NoiseKeypair,
|
||||||
};
|
};
|
||||||
use noiseml_proto::{parse_envelope, build_envelope, ParsedEnvelope};
|
use quicnprotochat_proto::{build_envelope, parse_envelope, ParsedEnvelope};
|
||||||
|
|
||||||
/// Noise parameters used throughout noiseml.
|
/// Noise parameters used throughout quicnprotochat.
|
||||||
///
|
///
|
||||||
/// `Noise_XX_25519_ChaChaPoly_BLAKE2s` — both parties authenticate each
|
/// `Noise_XX_25519_ChaChaPoly_BLAKE2s` — both parties authenticate each
|
||||||
/// other's static X25519 keys; ChaCha20-Poly1305 for AEAD; BLAKE2s as PRF.
|
/// other's static X25519 keys; ChaCha20-Poly1305 for AEAD; BLAKE2s as PRF.
|
||||||
@@ -144,7 +144,7 @@ impl NoiseTransport {
|
|||||||
/// Serialise and encrypt a [`ParsedEnvelope`], then send it.
|
/// Serialise and encrypt a [`ParsedEnvelope`], then send it.
|
||||||
///
|
///
|
||||||
/// This is the primary application-level send method. The Cap'n Proto
|
/// This is the primary application-level send method. The Cap'n Proto
|
||||||
/// encoding is done by [`noiseml_proto::build_envelope`] before encryption.
|
/// encoding is done by [`quicnprotochat_proto::build_envelope`] before encryption.
|
||||||
pub async fn send_envelope(&mut self, env: &ParsedEnvelope) -> Result<(), CoreError> {
|
pub async fn send_envelope(&mut self, env: &ParsedEnvelope) -> Result<(), CoreError> {
|
||||||
let bytes = build_envelope(env).map_err(CoreError::Capnp)?;
|
let bytes = build_envelope(env).map_err(CoreError::Capnp)?;
|
||||||
self.send_frame(&bytes).await
|
self.send_frame(&bytes).await
|
||||||
@@ -244,9 +244,10 @@ impl NoiseTransport {
|
|||||||
|
|
||||||
impl std::fmt::Debug for NoiseTransport {
|
impl std::fmt::Debug for NoiseTransport {
|
||||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||||
let remote = self.remote_static.as_deref().map(|k| {
|
let remote = self
|
||||||
format!("{:02x}{:02x}{:02x}{:02x}…", k[0], k[1], k[2], k[3])
|
.remote_static
|
||||||
});
|
.as_deref()
|
||||||
|
.map(|k| format!("{:02x}{:02x}{:02x}{:02x}…", k[0], k[1], k[2], k[3]));
|
||||||
f.debug_struct("NoiseTransport")
|
f.debug_struct("NoiseTransport")
|
||||||
.field("remote_static", &remote)
|
.field("remote_static", &remote)
|
||||||
.finish_non_exhaustive()
|
.finish_non_exhaustive()
|
||||||
@@ -270,9 +271,9 @@ pub async fn handshake_initiator(
|
|||||||
stream: TcpStream,
|
stream: TcpStream,
|
||||||
keypair: &NoiseKeypair,
|
keypair: &NoiseKeypair,
|
||||||
) -> Result<NoiseTransport, CoreError> {
|
) -> Result<NoiseTransport, CoreError> {
|
||||||
let params: snow::params::NoiseParams = NOISE_PARAMS.parse().expect(
|
let params: snow::params::NoiseParams = NOISE_PARAMS
|
||||||
"NOISE_PARAMS is a compile-time constant and must parse successfully",
|
.parse()
|
||||||
);
|
.expect("NOISE_PARAMS is a compile-time constant and must parse successfully");
|
||||||
|
|
||||||
// The private key bytes are held in a Zeroizing wrapper and cleared after
|
// The private key bytes are held in a Zeroizing wrapper and cleared after
|
||||||
// snow clones them internally during build_initiator().
|
// snow clones them internally during build_initiator().
|
||||||
@@ -337,9 +338,9 @@ pub async fn handshake_responder(
|
|||||||
stream: TcpStream,
|
stream: TcpStream,
|
||||||
keypair: &NoiseKeypair,
|
keypair: &NoiseKeypair,
|
||||||
) -> Result<NoiseTransport, CoreError> {
|
) -> Result<NoiseTransport, CoreError> {
|
||||||
let params: snow::params::NoiseParams = NOISE_PARAMS.parse().expect(
|
let params: snow::params::NoiseParams = NOISE_PARAMS
|
||||||
"NOISE_PARAMS is a compile-time constant and must parse successfully",
|
.parse()
|
||||||
);
|
.expect("NOISE_PARAMS is a compile-time constant and must parse successfully");
|
||||||
|
|
||||||
let private = keypair.private_bytes();
|
let private = keypair.private_bytes();
|
||||||
let mut session = snow::Builder::new(params)
|
let mut session = snow::Builder::new(params)
|
||||||
@@ -1,8 +1,8 @@
|
|||||||
[package]
|
[package]
|
||||||
name = "noiseml-proto"
|
name = "quicnprotochat-proto"
|
||||||
version = "0.1.0"
|
version = "0.1.0"
|
||||||
edition = "2021"
|
edition = "2021"
|
||||||
description = "Cap'n Proto schemas, generated types, and serialisation helpers for noiseml. No crypto, no I/O."
|
description = "Cap'n Proto schemas, generated types, and serialisation helpers for quicnprotochat. No crypto, no I/O."
|
||||||
license = "MIT"
|
license = "MIT"
|
||||||
|
|
||||||
# build.rs invokes capnpc to generate Rust source from .capnp schemas.
|
# build.rs invokes capnpc to generate Rust source from .capnp schemas.
|
||||||
@@ -1,4 +1,4 @@
|
|||||||
//! Build script for noiseml-proto.
|
//! Build script for quicnprotochat-proto.
|
||||||
//!
|
//!
|
||||||
//! Invokes the `capnp` compiler to generate Rust types from `.capnp` schemas
|
//! Invokes the `capnp` compiler to generate Rust types from `.capnp` schemas
|
||||||
//! located in the workspace-root `schemas/` directory.
|
//! located in the workspace-root `schemas/` directory.
|
||||||
@@ -14,11 +14,10 @@
|
|||||||
use std::{env, path::PathBuf};
|
use std::{env, path::PathBuf};
|
||||||
|
|
||||||
fn main() {
|
fn main() {
|
||||||
let manifest_dir = PathBuf::from(
|
let manifest_dir =
|
||||||
env::var("CARGO_MANIFEST_DIR").expect("CARGO_MANIFEST_DIR not set by Cargo"),
|
PathBuf::from(env::var("CARGO_MANIFEST_DIR").expect("CARGO_MANIFEST_DIR not set by Cargo"));
|
||||||
);
|
|
||||||
|
|
||||||
// Workspace root is two levels above this crate (noiseml/crates/noiseml-proto).
|
// Workspace root is two levels above this crate (quicnprotochat/crates/quicnprotochat-proto).
|
||||||
let workspace_root = manifest_dir
|
let workspace_root = manifest_dir
|
||||||
.join("../..")
|
.join("../..")
|
||||||
.canonicalize()
|
.canonicalize()
|
||||||
@@ -39,6 +38,10 @@ fn main() {
|
|||||||
"cargo:rerun-if-changed={}",
|
"cargo:rerun-if-changed={}",
|
||||||
schemas_dir.join("delivery.capnp").display()
|
schemas_dir.join("delivery.capnp").display()
|
||||||
);
|
);
|
||||||
|
println!(
|
||||||
|
"cargo:rerun-if-changed={}",
|
||||||
|
schemas_dir.join("node.capnp").display()
|
||||||
|
);
|
||||||
|
|
||||||
capnpc::CompilerCommand::new()
|
capnpc::CompilerCommand::new()
|
||||||
// Treat `schemas/` as the include root so that inter-schema imports
|
// Treat `schemas/` as the include root so that inter-schema imports
|
||||||
@@ -47,6 +50,7 @@ fn main() {
|
|||||||
.file(schemas_dir.join("envelope.capnp"))
|
.file(schemas_dir.join("envelope.capnp"))
|
||||||
.file(schemas_dir.join("auth.capnp"))
|
.file(schemas_dir.join("auth.capnp"))
|
||||||
.file(schemas_dir.join("delivery.capnp"))
|
.file(schemas_dir.join("delivery.capnp"))
|
||||||
|
.file(schemas_dir.join("node.capnp"))
|
||||||
.run()
|
.run()
|
||||||
.expect(
|
.expect(
|
||||||
"Cap'n Proto schema compilation failed. \
|
"Cap'n Proto schema compilation failed. \
|
||||||
@@ -1,4 +1,4 @@
|
|||||||
//! Cap'n Proto schemas, generated types, and serialisation helpers for noiseml.
|
//! Cap'n Proto schemas, generated types, and serialisation helpers for quicnprotochat.
|
||||||
//!
|
//!
|
||||||
//! # Design constraints
|
//! # Design constraints
|
||||||
//!
|
//!
|
||||||
@@ -41,11 +41,18 @@ pub mod delivery_capnp {
|
|||||||
include!(concat!(env!("OUT_DIR"), "/delivery_capnp.rs"));
|
include!(concat!(env!("OUT_DIR"), "/delivery_capnp.rs"));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Cap'n Proto generated types for `schemas/node.capnp`.
|
||||||
|
///
|
||||||
|
/// Do not edit this module by hand — it is entirely machine-generated.
|
||||||
|
pub mod node_capnp {
|
||||||
|
include!(concat!(env!("OUT_DIR"), "/node_capnp.rs"));
|
||||||
|
}
|
||||||
|
|
||||||
// ── Re-exports ────────────────────────────────────────────────────────────────
|
// ── Re-exports ────────────────────────────────────────────────────────────────
|
||||||
|
|
||||||
/// The message-type discriminant from the `Envelope` schema.
|
/// The message-type discriminant from the `Envelope` schema.
|
||||||
///
|
///
|
||||||
/// Re-exported here so callers can `use noiseml_proto::MsgType` without
|
/// Re-exported here so callers can `use quicnprotochat_proto::MsgType` without
|
||||||
/// spelling out the full generated module path.
|
/// spelling out the full generated module path.
|
||||||
pub use envelope_capnp::envelope::MsgType;
|
pub use envelope_capnp::envelope::MsgType;
|
||||||
|
|
||||||
@@ -80,7 +87,7 @@ pub struct ParsedEnvelope {
|
|||||||
///
|
///
|
||||||
/// The returned bytes include the Cap'n Proto segment table header followed by
|
/// The returned bytes include the Cap'n Proto segment table header followed by
|
||||||
/// the message data. They are suitable for use as the body of a length-prefixed
|
/// the message data. They are suitable for use as the body of a length-prefixed
|
||||||
/// noiseml frame (the frame codec in `noiseml-core` prepends the 4-byte length).
|
/// quicnprotochat frame (the frame codec in `quicnprotochat-core` prepends the 4-byte length).
|
||||||
///
|
///
|
||||||
/// # Errors
|
/// # Errors
|
||||||
///
|
///
|
||||||
@@ -135,7 +142,7 @@ pub fn parse_envelope(bytes: &[u8]) -> Result<ParsedEnvelope, capnp::Error> {
|
|||||||
/// Serialise a Cap'n Proto message builder to unpacked wire bytes.
|
/// Serialise a Cap'n Proto message builder to unpacked wire bytes.
|
||||||
///
|
///
|
||||||
/// The output includes the segment table header. For transport, the
|
/// The output includes the segment table header. For transport, the
|
||||||
/// `noiseml-core` frame codec prepends a 4-byte little-endian length field.
|
/// `quicnprotochat-core` frame codec prepends a 4-byte little-endian length field.
|
||||||
pub fn to_bytes<A: capnp::message::Allocator>(
|
pub fn to_bytes<A: capnp::message::Allocator>(
|
||||||
msg: &capnp::message::Builder<A>,
|
msg: &capnp::message::Builder<A>,
|
||||||
) -> Result<Vec<u8>, capnp::Error> {
|
) -> Result<Vec<u8>, capnp::Error> {
|
||||||
@@ -1,17 +1,17 @@
|
|||||||
[package]
|
[package]
|
||||||
name = "noiseml-server"
|
name = "quicnprotochat-server"
|
||||||
version = "0.1.0"
|
version = "0.1.0"
|
||||||
edition = "2021"
|
edition = "2021"
|
||||||
description = "Delivery Service and Authentication Service for noiseml."
|
description = "Delivery Service and Authentication Service for quicnprotochat."
|
||||||
license = "MIT"
|
license = "MIT"
|
||||||
|
|
||||||
[[bin]]
|
[[bin]]
|
||||||
name = "noiseml-server"
|
name = "quicnprotochat-server"
|
||||||
path = "src/main.rs"
|
path = "src/main.rs"
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
noiseml-core = { path = "../noiseml-core" }
|
quicnprotochat-core = { path = "../quicnprotochat-core" }
|
||||||
noiseml-proto = { path = "../noiseml-proto" }
|
quicnprotochat-proto = { path = "../quicnprotochat-proto" }
|
||||||
|
|
||||||
# Serialisation + RPC
|
# Serialisation + RPC
|
||||||
capnp = { workspace = true }
|
capnp = { workspace = true }
|
||||||
@@ -27,10 +27,16 @@ dashmap = { workspace = true }
|
|||||||
sha2 = { workspace = true }
|
sha2 = { workspace = true }
|
||||||
tracing = { workspace = true }
|
tracing = { workspace = true }
|
||||||
tracing-subscriber = { workspace = true }
|
tracing-subscriber = { workspace = true }
|
||||||
|
quinn = { workspace = true }
|
||||||
|
quinn-proto = { workspace = true }
|
||||||
|
rustls = { workspace = true }
|
||||||
|
rcgen = { workspace = true }
|
||||||
|
|
||||||
# Error handling
|
# Error handling
|
||||||
anyhow = { workspace = true }
|
anyhow = { workspace = true }
|
||||||
thiserror = { workspace = true }
|
thiserror = { workspace = true }
|
||||||
|
bincode = { workspace = true }
|
||||||
|
serde = { workspace = true }
|
||||||
|
|
||||||
# CLI
|
# CLI
|
||||||
clap = { workspace = true }
|
clap = { workspace = true }
|
||||||
508
crates/quicnprotochat-server/src/main.rs
Normal file
508
crates/quicnprotochat-server/src/main.rs
Normal file
@@ -0,0 +1,508 @@
|
|||||||
|
//! quicnprotochat-server — unified Authentication + Delivery service.
|
||||||
|
//!
|
||||||
|
//! # M3 scope
|
||||||
|
//!
|
||||||
|
//! The server exposes a single QUIC + TLS 1.3 Cap'n Proto RPC endpoint
|
||||||
|
//! (`NodeService`) combining Authentication and Delivery operations.
|
||||||
|
//!
|
||||||
|
//! # Architecture
|
||||||
|
//!
|
||||||
|
//! ```text
|
||||||
|
//! QUIC endpoint (7000)
|
||||||
|
//! └─ TLS 1.3 handshake (self-signed by default)
|
||||||
|
//! └─ capnp-rpc VatNetwork (LocalSet, !Send)
|
||||||
|
//! └─ NodeServiceImpl (KeyPackage + Delivery queues)
|
||||||
|
//! ```
|
||||||
|
//!
|
||||||
|
//! Because `capnp-rpc` uses `Rc<RefCell<>>` internally it is `!Send`.
|
||||||
|
//! The entire RPC stack lives on a `tokio::task::LocalSet` spawned per
|
||||||
|
//! connection.
|
||||||
|
//!
|
||||||
|
//! # Configuration
|
||||||
|
//!
|
||||||
|
//! | Env var | CLI flag | Default |
|
||||||
|
//! |---------------------|----------------|-----------------|
|
||||||
|
//! | `QUICNPROTOCHAT_LISTEN` | `--listen` | `0.0.0.0:7000` |
|
||||||
|
//! | `RUST_LOG` | — | `info` |
|
||||||
|
|
||||||
|
use std::{fs, net::SocketAddr, path::PathBuf, sync::Arc, time::Duration};
|
||||||
|
|
||||||
|
use anyhow::Context;
|
||||||
|
use capnp::capability::Promise;
|
||||||
|
use capnp_rpc::{rpc_twoparty_capnp::Side, twoparty, RpcSystem};
|
||||||
|
use clap::Parser;
|
||||||
|
use dashmap::DashMap;
|
||||||
|
use quicnprotochat_proto::node_capnp::node_service;
|
||||||
|
use quinn::{Endpoint, ServerConfig};
|
||||||
|
use quinn_proto::crypto::rustls::QuicServerConfig;
|
||||||
|
use rcgen::generate_simple_self_signed;
|
||||||
|
use rustls::pki_types::{CertificateDer, PrivateKeyDer};
|
||||||
|
use sha2::{Digest, Sha256};
|
||||||
|
use tokio::sync::Notify;
|
||||||
|
use tokio::time::timeout;
|
||||||
|
use tokio_util::compat::{TokioAsyncReadCompatExt, TokioAsyncWriteCompatExt};
|
||||||
|
|
||||||
|
mod storage;
|
||||||
|
use storage::{FileBackedStore, StorageError};
|
||||||
|
|
||||||
|
// ── CLI ───────────────────────────────────────────────────────────────────────
|
||||||
|
|
||||||
|
#[derive(Debug, Parser)]
|
||||||
|
#[command(
|
||||||
|
name = "quicnprotochat-server",
|
||||||
|
about = "quicnprotochat Delivery Service + Authentication Service",
|
||||||
|
version
|
||||||
|
)]
|
||||||
|
struct Args {
|
||||||
|
/// QUIC listen address (host:port).
|
||||||
|
#[arg(long, default_value = "0.0.0.0:7000", env = "QUICNPROTOCHAT_LISTEN")]
|
||||||
|
listen: String,
|
||||||
|
|
||||||
|
/// Directory for persisted server data (KeyPackages + delivery queues).
|
||||||
|
#[arg(long, default_value = "data", env = "QUICNPROTOCHAT_DATA_DIR")]
|
||||||
|
data_dir: String,
|
||||||
|
|
||||||
|
/// TLS certificate path (generated automatically if missing).
|
||||||
|
#[arg(
|
||||||
|
long,
|
||||||
|
default_value = "data/server-cert.der",
|
||||||
|
env = "QUICNPROTOCHAT_TLS_CERT"
|
||||||
|
)]
|
||||||
|
tls_cert: PathBuf,
|
||||||
|
|
||||||
|
/// TLS private key path (generated automatically if missing).
|
||||||
|
#[arg(
|
||||||
|
long,
|
||||||
|
default_value = "data/server-key.der",
|
||||||
|
env = "QUICNPROTOCHAT_TLS_KEY"
|
||||||
|
)]
|
||||||
|
tls_key: PathBuf,
|
||||||
|
}
|
||||||
|
|
||||||
|
// ── Node service implementation ─────────────────────────────────────────────
|
||||||
|
|
||||||
|
/// Cap'n Proto RPC server implementation for `NodeService` (Auth + Delivery).
|
||||||
|
struct NodeServiceImpl {
|
||||||
|
store: Arc<FileBackedStore>,
|
||||||
|
waiters: Arc<DashMap<Vec<u8>, Arc<Notify>>>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl NodeServiceImpl {
|
||||||
|
fn waiter(&self, recipient_key: &[u8]) -> Arc<Notify> {
|
||||||
|
self.waiters
|
||||||
|
.entry(recipient_key.to_vec())
|
||||||
|
.or_insert_with(|| Arc::new(Notify::new()))
|
||||||
|
.clone()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl node_service::Server for NodeServiceImpl {
|
||||||
|
/// Upload a single-use KeyPackage and return its SHA-256 fingerprint.
|
||||||
|
fn upload_key_package(
|
||||||
|
&mut self,
|
||||||
|
params: node_service::UploadKeyPackageParams,
|
||||||
|
mut results: node_service::UploadKeyPackageResults,
|
||||||
|
) -> Promise<(), capnp::Error> {
|
||||||
|
let params = params
|
||||||
|
.get()
|
||||||
|
.map_err(|e| capnp::Error::failed(format!("upload_key_package: bad params: {e}")));
|
||||||
|
|
||||||
|
let (identity_key, package) = match params {
|
||||||
|
Ok(p) => {
|
||||||
|
let ik = match p.get_identity_key() {
|
||||||
|
Ok(v) => v.to_vec(),
|
||||||
|
Err(e) => return Promise::err(capnp::Error::failed(format!("{e}"))),
|
||||||
|
};
|
||||||
|
let pkg = match p.get_package() {
|
||||||
|
Ok(v) => v.to_vec(),
|
||||||
|
Err(e) => return Promise::err(capnp::Error::failed(format!("{e}"))),
|
||||||
|
};
|
||||||
|
(ik, pkg)
|
||||||
|
}
|
||||||
|
Err(e) => return Promise::err(e),
|
||||||
|
};
|
||||||
|
|
||||||
|
if identity_key.len() != 32 {
|
||||||
|
return Promise::err(capnp::Error::failed(format!(
|
||||||
|
"identityKey must be exactly 32 bytes, got {}",
|
||||||
|
identity_key.len()
|
||||||
|
)));
|
||||||
|
}
|
||||||
|
if package.is_empty() {
|
||||||
|
return Promise::err(capnp::Error::failed(
|
||||||
|
"package must not be empty".to_string(),
|
||||||
|
));
|
||||||
|
}
|
||||||
|
|
||||||
|
let fingerprint: Vec<u8> = Sha256::digest(&package).to_vec();
|
||||||
|
if let Err(e) = self
|
||||||
|
.store
|
||||||
|
.upload_key_package(&identity_key, package)
|
||||||
|
.map_err(storage_err)
|
||||||
|
{
|
||||||
|
return Promise::err(e);
|
||||||
|
}
|
||||||
|
|
||||||
|
results.get().set_fingerprint(&fingerprint);
|
||||||
|
|
||||||
|
tracing::debug!(
|
||||||
|
fingerprint = %fmt_hex(&fingerprint[..4]),
|
||||||
|
"KeyPackage uploaded"
|
||||||
|
);
|
||||||
|
|
||||||
|
Promise::ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Atomically remove and return one KeyPackage for the given identity key.
|
||||||
|
fn fetch_key_package(
|
||||||
|
&mut self,
|
||||||
|
params: node_service::FetchKeyPackageParams,
|
||||||
|
mut results: node_service::FetchKeyPackageResults,
|
||||||
|
) -> Promise<(), capnp::Error> {
|
||||||
|
let identity_key = match params.get() {
|
||||||
|
Ok(p) => match p.get_identity_key() {
|
||||||
|
Ok(v) => v.to_vec(),
|
||||||
|
Err(e) => return Promise::err(capnp::Error::failed(format!("{e}"))),
|
||||||
|
},
|
||||||
|
Err(e) => return Promise::err(capnp::Error::failed(format!("{e}"))),
|
||||||
|
};
|
||||||
|
|
||||||
|
if identity_key.len() != 32 {
|
||||||
|
return Promise::err(capnp::Error::failed(format!(
|
||||||
|
"identityKey must be exactly 32 bytes, got {}",
|
||||||
|
identity_key.len()
|
||||||
|
)));
|
||||||
|
}
|
||||||
|
|
||||||
|
let package = match self
|
||||||
|
.store
|
||||||
|
.fetch_key_package(&identity_key)
|
||||||
|
.map_err(storage_err)
|
||||||
|
{
|
||||||
|
Ok(p) => p,
|
||||||
|
Err(e) => return Promise::err(e),
|
||||||
|
};
|
||||||
|
|
||||||
|
match package {
|
||||||
|
Some(pkg) => {
|
||||||
|
tracing::debug!(
|
||||||
|
identity = %fmt_hex(&identity_key[..4]),
|
||||||
|
"KeyPackage fetched"
|
||||||
|
);
|
||||||
|
results.get().set_package(&pkg);
|
||||||
|
}
|
||||||
|
None => {
|
||||||
|
tracing::debug!(
|
||||||
|
identity = %fmt_hex(&identity_key[..4]),
|
||||||
|
"no KeyPackage available for identity"
|
||||||
|
);
|
||||||
|
results.get().set_package(&[]);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
Promise::ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Append `payload` to the queue for `recipient_key`.
|
||||||
|
fn enqueue(
|
||||||
|
&mut self,
|
||||||
|
params: node_service::EnqueueParams,
|
||||||
|
_results: node_service::EnqueueResults,
|
||||||
|
) -> Promise<(), capnp::Error> {
|
||||||
|
let p = match params.get() {
|
||||||
|
Ok(p) => p,
|
||||||
|
Err(e) => return Promise::err(capnp::Error::failed(format!("{e}"))),
|
||||||
|
};
|
||||||
|
let recipient_key = match p.get_recipient_key() {
|
||||||
|
Ok(v) => v.to_vec(),
|
||||||
|
Err(e) => return Promise::err(capnp::Error::failed(format!("{e}"))),
|
||||||
|
};
|
||||||
|
let payload = match p.get_payload() {
|
||||||
|
Ok(v) => v.to_vec(),
|
||||||
|
Err(e) => return Promise::err(capnp::Error::failed(format!("{e}"))),
|
||||||
|
};
|
||||||
|
|
||||||
|
if recipient_key.len() != 32 {
|
||||||
|
return Promise::err(capnp::Error::failed(format!(
|
||||||
|
"recipientKey must be exactly 32 bytes, got {}",
|
||||||
|
recipient_key.len()
|
||||||
|
)));
|
||||||
|
}
|
||||||
|
if payload.is_empty() {
|
||||||
|
return Promise::err(capnp::Error::failed(
|
||||||
|
"payload must not be empty".to_string(),
|
||||||
|
));
|
||||||
|
}
|
||||||
|
|
||||||
|
if let Err(e) = self
|
||||||
|
.store
|
||||||
|
.enqueue(&recipient_key, payload)
|
||||||
|
.map_err(storage_err)
|
||||||
|
{
|
||||||
|
return Promise::err(e);
|
||||||
|
}
|
||||||
|
|
||||||
|
self.waiter(&recipient_key).notify_waiters();
|
||||||
|
|
||||||
|
tracing::debug!(
|
||||||
|
recipient = %fmt_hex(&recipient_key[..4]),
|
||||||
|
"message enqueued"
|
||||||
|
);
|
||||||
|
|
||||||
|
Promise::ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Atomically drain and return all queued payloads for `recipient_key`.
|
||||||
|
fn fetch(
|
||||||
|
&mut self,
|
||||||
|
params: node_service::FetchParams,
|
||||||
|
mut results: node_service::FetchResults,
|
||||||
|
) -> Promise<(), capnp::Error> {
|
||||||
|
let recipient_key = match params.get() {
|
||||||
|
Ok(p) => match p.get_recipient_key() {
|
||||||
|
Ok(v) => v.to_vec(),
|
||||||
|
Err(e) => return Promise::err(capnp::Error::failed(format!("{e}"))),
|
||||||
|
},
|
||||||
|
Err(e) => return Promise::err(capnp::Error::failed(format!("{e}"))),
|
||||||
|
};
|
||||||
|
|
||||||
|
if recipient_key.len() != 32 {
|
||||||
|
return Promise::err(capnp::Error::failed(format!(
|
||||||
|
"recipientKey must be exactly 32 bytes, got {}",
|
||||||
|
recipient_key.len()
|
||||||
|
)));
|
||||||
|
}
|
||||||
|
|
||||||
|
let messages = match self.store.fetch(&recipient_key).map_err(storage_err) {
|
||||||
|
Ok(m) => m,
|
||||||
|
Err(e) => return Promise::err(e),
|
||||||
|
};
|
||||||
|
|
||||||
|
tracing::debug!(
|
||||||
|
recipient = %fmt_hex(&recipient_key[..4]),
|
||||||
|
count = messages.len(),
|
||||||
|
"messages fetched"
|
||||||
|
);
|
||||||
|
|
||||||
|
let mut list = results.get().init_payloads(messages.len() as u32);
|
||||||
|
for (i, msg) in messages.iter().enumerate() {
|
||||||
|
list.set(i as u32, msg);
|
||||||
|
}
|
||||||
|
|
||||||
|
Promise::ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Long-polling fetch with timeout (ms).
|
||||||
|
fn fetch_wait(
|
||||||
|
&mut self,
|
||||||
|
params: node_service::FetchWaitParams,
|
||||||
|
mut results: node_service::FetchWaitResults,
|
||||||
|
) -> Promise<(), capnp::Error> {
|
||||||
|
let p = match params.get() {
|
||||||
|
Ok(p) => p,
|
||||||
|
Err(e) => return Promise::err(capnp::Error::failed(format!("{e}"))),
|
||||||
|
};
|
||||||
|
let recipient_key = match p.get_recipient_key() {
|
||||||
|
Ok(v) => v.to_vec(),
|
||||||
|
Err(e) => return Promise::err(capnp::Error::failed(format!("{e}"))),
|
||||||
|
};
|
||||||
|
let timeout_ms = p.get_timeout_ms();
|
||||||
|
|
||||||
|
if recipient_key.len() != 32 {
|
||||||
|
return Promise::err(capnp::Error::failed(format!(
|
||||||
|
"recipientKey must be exactly 32 bytes, got {}",
|
||||||
|
recipient_key.len()
|
||||||
|
)));
|
||||||
|
}
|
||||||
|
|
||||||
|
let store = Arc::clone(&self.store);
|
||||||
|
let waiters = self.waiters.clone();
|
||||||
|
|
||||||
|
Promise::from_future(async move {
|
||||||
|
let messages = store.fetch(&recipient_key).map_err(storage_err)?;
|
||||||
|
|
||||||
|
if messages.is_empty() && timeout_ms > 0 {
|
||||||
|
let waiter = waiters
|
||||||
|
.entry(recipient_key.clone())
|
||||||
|
.or_insert_with(|| Arc::new(Notify::new()))
|
||||||
|
.clone();
|
||||||
|
let _ = timeout(Duration::from_millis(timeout_ms), waiter.notified()).await;
|
||||||
|
let msgs = store.fetch(&recipient_key).map_err(storage_err)?;
|
||||||
|
fill_payloads_wait(&mut results, msgs);
|
||||||
|
return Ok(());
|
||||||
|
}
|
||||||
|
|
||||||
|
fill_payloads_wait(&mut results, messages);
|
||||||
|
Ok(())
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
fn health(
|
||||||
|
&mut self,
|
||||||
|
_params: node_service::HealthParams,
|
||||||
|
mut results: node_service::HealthResults,
|
||||||
|
) -> Promise<(), capnp::Error> {
|
||||||
|
results.get().set_status("ok");
|
||||||
|
Promise::ok(())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn fill_payloads_wait(results: &mut node_service::FetchWaitResults, messages: Vec<Vec<u8>>) {
|
||||||
|
let mut list = results.get().init_payloads(messages.len() as u32);
|
||||||
|
for (i, msg) in messages.iter().enumerate() {
|
||||||
|
list.set(i as u32, msg);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn storage_err(err: StorageError) -> capnp::Error {
|
||||||
|
capnp::Error::failed(format!("{err}"))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ── Entry point ───────────────────────────────────────────────────────────────
|
||||||
|
|
||||||
|
#[tokio::main]
|
||||||
|
async fn main() -> anyhow::Result<()> {
|
||||||
|
tracing_subscriber::fmt()
|
||||||
|
.with_env_filter(
|
||||||
|
tracing_subscriber::EnvFilter::try_from_default_env()
|
||||||
|
.unwrap_or_else(|_| tracing_subscriber::EnvFilter::new("info")),
|
||||||
|
)
|
||||||
|
.init();
|
||||||
|
|
||||||
|
let args = Args::parse();
|
||||||
|
|
||||||
|
let listen: SocketAddr = args.listen.parse().context("--listen must be host:port")?;
|
||||||
|
|
||||||
|
let server_config = build_server_config(&args.tls_cert, &args.tls_key)
|
||||||
|
.context("failed to build TLS/QUIC server config")?;
|
||||||
|
|
||||||
|
// Shared storage — persisted to disk for restart safety.
|
||||||
|
let store = Arc::new(FileBackedStore::open(&args.data_dir)?);
|
||||||
|
let waiters: Arc<DashMap<Vec<u8>, Arc<Notify>>> = Arc::new(DashMap::new());
|
||||||
|
|
||||||
|
let endpoint = Endpoint::server(server_config, listen)?;
|
||||||
|
|
||||||
|
tracing::info!(
|
||||||
|
addr = %args.listen,
|
||||||
|
"accepting QUIC connections"
|
||||||
|
);
|
||||||
|
|
||||||
|
// capnp-rpc is !Send (Rc internals), so all RPC tasks must stay on a
|
||||||
|
// LocalSet. Both accept loops share one LocalSet.
|
||||||
|
let local = tokio::task::LocalSet::new();
|
||||||
|
local
|
||||||
|
.run_until(async move {
|
||||||
|
loop {
|
||||||
|
let incoming = match endpoint.accept().await {
|
||||||
|
Some(i) => i,
|
||||||
|
None => break,
|
||||||
|
};
|
||||||
|
|
||||||
|
let connecting = match incoming.accept() {
|
||||||
|
Ok(c) => c,
|
||||||
|
Err(e) => {
|
||||||
|
tracing::warn!(error = %e, "failed to accept incoming connection");
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
let store = Arc::clone(&store);
|
||||||
|
let waiters = Arc::clone(&waiters);
|
||||||
|
tokio::task::spawn_local(async move {
|
||||||
|
if let Err(e) = handle_node_connection(connecting, store, waiters).await {
|
||||||
|
tracing::warn!(error = %e, "connection error");
|
||||||
|
}
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok::<(), anyhow::Error>(())
|
||||||
|
})
|
||||||
|
.await
|
||||||
|
}
|
||||||
|
|
||||||
|
// ── Per-connection handlers ───────────────────────────────────────────────────
|
||||||
|
|
||||||
|
/// Handle one NodeService connection.
|
||||||
|
async fn handle_node_connection(
|
||||||
|
connecting: quinn::Connecting,
|
||||||
|
store: Arc<FileBackedStore>,
|
||||||
|
waiters: Arc<DashMap<Vec<u8>, Arc<Notify>>>,
|
||||||
|
) -> Result<(), anyhow::Error> {
|
||||||
|
let connection = connecting.await?;
|
||||||
|
|
||||||
|
tracing::info!(peer = %connection.remote_address(), "QUIC connected");
|
||||||
|
|
||||||
|
let (send, recv) = connection
|
||||||
|
.accept_bi()
|
||||||
|
.await
|
||||||
|
.map_err(|e| anyhow::anyhow!("failed to accept bi stream: {e}"))?;
|
||||||
|
let (reader, writer) = (recv.compat(), send.compat_write());
|
||||||
|
|
||||||
|
let network = twoparty::VatNetwork::new(reader, writer, Side::Server, Default::default());
|
||||||
|
|
||||||
|
let service: node_service::Client = capnp_rpc::new_client(NodeServiceImpl { store, waiters });
|
||||||
|
|
||||||
|
RpcSystem::new(Box::new(network), Some(service.client))
|
||||||
|
.await
|
||||||
|
.map_err(|e| anyhow::anyhow!("NodeService RPC error: {e}"))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ── Helpers ───────────────────────────────────────────────────────────────────
|
||||||
|
|
||||||
|
/// Format the first `n` bytes of a slice as lowercase hex with a trailing `…`.
|
||||||
|
fn fmt_hex(bytes: &[u8]) -> String {
|
||||||
|
let hex: String = bytes.iter().map(|b| format!("{b:02x}")).collect();
|
||||||
|
format!("{hex}…")
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Ensure a self-signed certificate exists on disk and return a QUIC server config.
|
||||||
|
fn build_server_config(cert_path: &PathBuf, key_path: &PathBuf) -> anyhow::Result<ServerConfig> {
|
||||||
|
if !cert_path.exists() || !key_path.exists() {
|
||||||
|
generate_self_signed(cert_path, key_path)?;
|
||||||
|
}
|
||||||
|
|
||||||
|
let cert_bytes = fs::read(cert_path).context("read cert")?;
|
||||||
|
let key_bytes = fs::read(key_path).context("read key")?;
|
||||||
|
|
||||||
|
let cert_chain = vec![CertificateDer::from(cert_bytes)];
|
||||||
|
let key = PrivateKeyDer::try_from(key_bytes).map_err(|_| anyhow::anyhow!("invalid key"))?;
|
||||||
|
|
||||||
|
let mut tls = rustls::ServerConfig::builder()
|
||||||
|
.with_no_client_auth()
|
||||||
|
.with_single_cert(cert_chain, key)?;
|
||||||
|
tls.alpn_protocols = vec![b"capnp".to_vec()];
|
||||||
|
|
||||||
|
let crypto = QuicServerConfig::try_from(tls)
|
||||||
|
.map_err(|e| anyhow::anyhow!("invalid server TLS config: {e}"))?;
|
||||||
|
|
||||||
|
Ok(ServerConfig::with_crypto(Arc::new(crypto)))
|
||||||
|
}
|
||||||
|
|
||||||
|
fn generate_self_signed(cert_path: &PathBuf, key_path: &PathBuf) -> anyhow::Result<()> {
|
||||||
|
if let Some(parent) = cert_path.parent() {
|
||||||
|
fs::create_dir_all(parent).context("create cert dir")?;
|
||||||
|
}
|
||||||
|
if let Some(parent) = key_path.parent() {
|
||||||
|
fs::create_dir_all(parent).context("create key dir")?;
|
||||||
|
}
|
||||||
|
|
||||||
|
let subject_alt_names = vec![
|
||||||
|
"localhost".to_string(),
|
||||||
|
"127.0.0.1".to_string(),
|
||||||
|
"::1".to_string(),
|
||||||
|
];
|
||||||
|
|
||||||
|
let issued = generate_simple_self_signed(subject_alt_names)?;
|
||||||
|
let key_der = issued.key_pair.serialize_der();
|
||||||
|
|
||||||
|
fs::write(cert_path, issued.cert.der()).context("write cert")?;
|
||||||
|
fs::write(key_path, &key_der).context("write key")?;
|
||||||
|
|
||||||
|
tracing::info!(
|
||||||
|
cert = %cert_path.display(),
|
||||||
|
key = %key_path.display(),
|
||||||
|
"generated self-signed TLS certificate"
|
||||||
|
);
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
114
crates/quicnprotochat-server/src/storage.rs
Normal file
114
crates/quicnprotochat-server/src/storage.rs
Normal file
@@ -0,0 +1,114 @@
|
|||||||
|
use std::{
|
||||||
|
collections::{HashMap, VecDeque},
|
||||||
|
fs,
|
||||||
|
path::{Path, PathBuf},
|
||||||
|
sync::Mutex,
|
||||||
|
};
|
||||||
|
|
||||||
|
use serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
|
#[derive(thiserror::Error, Debug)]
|
||||||
|
pub enum StorageError {
|
||||||
|
#[error("io error: {0}")]
|
||||||
|
Io(String),
|
||||||
|
#[error("serialization error")]
|
||||||
|
Serde,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Serialize, Deserialize, Default)]
|
||||||
|
struct QueueMap {
|
||||||
|
map: HashMap<Vec<u8>, VecDeque<Vec<u8>>>,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// File-backed storage for KeyPackages and delivery queues.
|
||||||
|
///
|
||||||
|
/// Each mutation flushes the entire map to disk. Suitable for MVP-scale loads.
|
||||||
|
pub struct FileBackedStore {
|
||||||
|
kp_path: PathBuf,
|
||||||
|
ds_path: PathBuf,
|
||||||
|
key_packages: Mutex<HashMap<Vec<u8>, VecDeque<Vec<u8>>>>,
|
||||||
|
deliveries: Mutex<HashMap<Vec<u8>, VecDeque<Vec<u8>>>>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl FileBackedStore {
|
||||||
|
pub fn open(dir: impl AsRef<Path>) -> Result<Self, StorageError> {
|
||||||
|
let dir = dir.as_ref();
|
||||||
|
if !dir.exists() {
|
||||||
|
fs::create_dir_all(dir).map_err(|e| StorageError::Io(e.to_string()))?;
|
||||||
|
}
|
||||||
|
let kp_path = dir.join("keypackages.bin");
|
||||||
|
let ds_path = dir.join("deliveries.bin");
|
||||||
|
|
||||||
|
let key_packages = Mutex::new(Self::load_map(&kp_path)?);
|
||||||
|
let deliveries = Mutex::new(Self::load_map(&ds_path)?);
|
||||||
|
|
||||||
|
Ok(Self {
|
||||||
|
kp_path,
|
||||||
|
ds_path,
|
||||||
|
key_packages,
|
||||||
|
deliveries,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn upload_key_package(
|
||||||
|
&self,
|
||||||
|
identity_key: &[u8],
|
||||||
|
package: Vec<u8>,
|
||||||
|
) -> Result<(), StorageError> {
|
||||||
|
let mut map = self.key_packages.lock().unwrap();
|
||||||
|
map.entry(identity_key.to_vec())
|
||||||
|
.or_default()
|
||||||
|
.push_back(package);
|
||||||
|
self.flush_map(&self.kp_path, &*map)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn fetch_key_package(&self, identity_key: &[u8]) -> Result<Option<Vec<u8>>, StorageError> {
|
||||||
|
let mut map = self.key_packages.lock().unwrap();
|
||||||
|
let package = map.get_mut(identity_key).and_then(|q| q.pop_front());
|
||||||
|
self.flush_map(&self.kp_path, &*map)?;
|
||||||
|
Ok(package)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn enqueue(&self, recipient_key: &[u8], payload: Vec<u8>) -> Result<(), StorageError> {
|
||||||
|
let mut map = self.deliveries.lock().unwrap();
|
||||||
|
map.entry(recipient_key.to_vec())
|
||||||
|
.or_default()
|
||||||
|
.push_back(payload);
|
||||||
|
self.flush_map(&self.ds_path, &*map)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn fetch(&self, recipient_key: &[u8]) -> Result<Vec<Vec<u8>>, StorageError> {
|
||||||
|
let mut map = self.deliveries.lock().unwrap();
|
||||||
|
let messages = map
|
||||||
|
.get_mut(recipient_key)
|
||||||
|
.map(|q| q.drain(..).collect())
|
||||||
|
.unwrap_or_default();
|
||||||
|
self.flush_map(&self.ds_path, &*map)?;
|
||||||
|
Ok(messages)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn load_map(path: &Path) -> Result<HashMap<Vec<u8>, VecDeque<Vec<u8>>>, StorageError> {
|
||||||
|
if !path.exists() {
|
||||||
|
return Ok(HashMap::new());
|
||||||
|
}
|
||||||
|
let bytes = fs::read(path).map_err(|e| StorageError::Io(e.to_string()))?;
|
||||||
|
if bytes.is_empty() {
|
||||||
|
return Ok(HashMap::new());
|
||||||
|
}
|
||||||
|
let map: QueueMap = bincode::deserialize(&bytes).map_err(|_| StorageError::Serde)?;
|
||||||
|
Ok(map.map)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn flush_map(
|
||||||
|
&self,
|
||||||
|
path: &Path,
|
||||||
|
map: &HashMap<Vec<u8>, VecDeque<Vec<u8>>>,
|
||||||
|
) -> Result<(), StorageError> {
|
||||||
|
let payload = QueueMap { map: map.clone() };
|
||||||
|
let bytes = bincode::serialize(&payload).map_err(|_| StorageError::Serde)?;
|
||||||
|
if let Some(parent) = path.parent() {
|
||||||
|
fs::create_dir_all(parent).map_err(|e| StorageError::Io(e.to_string()))?;
|
||||||
|
}
|
||||||
|
fs::write(path, bytes).map_err(|e| StorageError::Io(e.to_string()))
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -7,7 +7,7 @@ services:
|
|||||||
- "7000:7000"
|
- "7000:7000"
|
||||||
environment:
|
environment:
|
||||||
RUST_LOG: "info"
|
RUST_LOG: "info"
|
||||||
NOISEML_LISTEN: "0.0.0.0:7000"
|
QUICNPROTOCHAT_LISTEN: "0.0.0.0:7000"
|
||||||
# Healthcheck: attempt a TCP connection to port 7000.
|
# Healthcheck: attempt a TCP connection to port 7000.
|
||||||
# Uses bash /dev/tcp — available in debian:bookworm-slim without extra packages.
|
# Uses bash /dev/tcp — available in debian:bookworm-slim without extra packages.
|
||||||
healthcheck:
|
healthcheck:
|
||||||
|
|||||||
@@ -12,40 +12,40 @@ WORKDIR /build
|
|||||||
|
|
||||||
# Copy manifests first so dependency layers are cached independently of source.
|
# Copy manifests first so dependency layers are cached independently of source.
|
||||||
COPY Cargo.toml Cargo.lock ./
|
COPY Cargo.toml Cargo.lock ./
|
||||||
COPY crates/noiseml-core/Cargo.toml crates/noiseml-core/Cargo.toml
|
COPY crates/quicnprotochat-core/Cargo.toml crates/quicnprotochat-core/Cargo.toml
|
||||||
COPY crates/noiseml-proto/Cargo.toml crates/noiseml-proto/Cargo.toml
|
COPY crates/quicnprotochat-proto/Cargo.toml crates/quicnprotochat-proto/Cargo.toml
|
||||||
COPY crates/noiseml-server/Cargo.toml crates/noiseml-server/Cargo.toml
|
COPY crates/quicnprotochat-server/Cargo.toml crates/quicnprotochat-server/Cargo.toml
|
||||||
COPY crates/noiseml-client/Cargo.toml crates/noiseml-client/Cargo.toml
|
COPY crates/quicnprotochat-client/Cargo.toml crates/quicnprotochat-client/Cargo.toml
|
||||||
|
|
||||||
# Create dummy source files so `cargo build` can resolve the dependency graph
|
# Create dummy source files so `cargo build` can resolve the dependency graph
|
||||||
# and cache the compiled dependencies before copying real source.
|
# and cache the compiled dependencies before copying real source.
|
||||||
RUN mkdir -p \
|
RUN mkdir -p \
|
||||||
crates/noiseml-core/src \
|
crates/quicnprotochat-core/src \
|
||||||
crates/noiseml-proto/src \
|
crates/quicnprotochat-proto/src \
|
||||||
crates/noiseml-server/src \
|
crates/quicnprotochat-server/src \
|
||||||
crates/noiseml-client/src \
|
crates/quicnprotochat-client/src \
|
||||||
&& echo 'fn main() {}' > crates/noiseml-server/src/main.rs \
|
&& echo 'fn main() {}' > crates/quicnprotochat-server/src/main.rs \
|
||||||
&& echo 'fn main() {}' > crates/noiseml-client/src/main.rs \
|
&& echo 'fn main() {}' > crates/quicnprotochat-client/src/main.rs \
|
||||||
&& touch crates/noiseml-core/src/lib.rs \
|
&& touch crates/quicnprotochat-core/src/lib.rs \
|
||||||
&& touch crates/noiseml-proto/src/lib.rs
|
&& touch crates/quicnprotochat-proto/src/lib.rs
|
||||||
|
|
||||||
# Schemas must exist before the proto crate's build.rs runs.
|
# Schemas must exist before the proto crate's build.rs runs.
|
||||||
COPY schemas/ schemas/
|
COPY schemas/ schemas/
|
||||||
|
|
||||||
# Build dependencies only (source stubs mean this layer is cache-friendly).
|
# Build dependencies only (source stubs mean this layer is cache-friendly).
|
||||||
RUN cargo build --release --bin noiseml-server 2>/dev/null || true
|
RUN cargo build --release --bin quicnprotochat-server 2>/dev/null || true
|
||||||
|
|
||||||
# Copy real source and build for real.
|
# Copy real source and build for real.
|
||||||
COPY crates/ crates/
|
COPY crates/ crates/
|
||||||
|
|
||||||
# Touch main.rs files to force re-compilation of the binary crates.
|
# Touch main.rs files to force re-compilation of the binary crates.
|
||||||
RUN touch \
|
RUN touch \
|
||||||
crates/noiseml-core/src/lib.rs \
|
crates/quicnprotochat-core/src/lib.rs \
|
||||||
crates/noiseml-proto/src/lib.rs \
|
crates/quicnprotochat-proto/src/lib.rs \
|
||||||
crates/noiseml-server/src/main.rs \
|
crates/quicnprotochat-server/src/main.rs \
|
||||||
crates/noiseml-client/src/main.rs
|
crates/quicnprotochat-client/src/main.rs
|
||||||
|
|
||||||
RUN cargo build --release --bin noiseml-server
|
RUN cargo build --release --bin quicnprotochat-server
|
||||||
|
|
||||||
# ── Stage 2: Runtime ──────────────────────────────────────────────────────────
|
# ── Stage 2: Runtime ──────────────────────────────────────────────────────────
|
||||||
#
|
#
|
||||||
@@ -58,14 +58,14 @@ RUN apt-get update \
|
|||||||
&& apt-get install -y --no-install-recommends ca-certificates \
|
&& apt-get install -y --no-install-recommends ca-certificates \
|
||||||
&& rm -rf /var/lib/apt/lists/*
|
&& rm -rf /var/lib/apt/lists/*
|
||||||
|
|
||||||
COPY --from=builder /build/target/release/noiseml-server /usr/local/bin/noiseml-server
|
COPY --from=builder /build/target/release/quicnprotochat-server /usr/local/bin/quicnprotochat-server
|
||||||
|
|
||||||
EXPOSE 7000
|
EXPOSE 7000
|
||||||
|
|
||||||
ENV RUST_LOG=info \
|
ENV RUST_LOG=info \
|
||||||
NOISEML_LISTEN=0.0.0.0:7000
|
QUICNPROTOCHAT_LISTEN=0.0.0.0:7000
|
||||||
|
|
||||||
# Run as a non-root user.
|
# Run as a non-root user.
|
||||||
USER nobody
|
USER nobody
|
||||||
|
|
||||||
CMD ["noiseml-server"]
|
CMD ["quicnprotochat-server"]
|
||||||
|
|||||||
@@ -1,8 +1,8 @@
|
|||||||
# noiseml — Master Project Prompt
|
# quicnprotochat — Master Project Prompt
|
||||||
|
|
||||||
## Project Identity
|
## Project Identity
|
||||||
|
|
||||||
You are building **noiseml**, a production-grade end-to-end encrypted group messenger in Rust. It uses the MLS protocol (RFC 9420) for group key agreement, ML-KEM-768 (NIST FIPS 203) hybrid post-quantum key exchange, the Noise Protocol Framework (Noise_XX pattern) over raw TCP as the transport layer, and Cap'n Proto for wire serialisation and RPC. There is no TLS, no HTTP, no WebSocket, no MessagePack.
|
You are building **quicnprotochat**, a production-grade end-to-end encrypted group messenger in Rust. It uses the MLS protocol (RFC 9420) for group key agreement, ML-KEM-768 (NIST FIPS 203) hybrid post-quantum key exchange, the Noise Protocol Framework (Noise_XX pattern) over raw TCP as the transport layer, and Cap'n Proto for wire serialisation and RPC. There is no TLS, no HTTP, no WebSocket, no MessagePack.
|
||||||
|
|
||||||
This is not a prototype. Every milestone produces production-ready, tested, deployable code.
|
This is not a prototype. Every milestone produces production-ready, tested, deployable code.
|
||||||
|
|
||||||
@@ -35,13 +35,13 @@ This is not a prototype. Every milestone produces production-ready, tested, depl
|
|||||||
### Workspace Layout
|
### Workspace Layout
|
||||||
|
|
||||||
```
|
```
|
||||||
noiseml/
|
quicnprotochat/
|
||||||
├── Cargo.toml # workspace root
|
├── Cargo.toml # workspace root
|
||||||
├── crates/
|
├── crates/
|
||||||
│ ├── noiseml-core/ # crypto primitives, MLS wrapper, Noise framing codec
|
│ ├── quicnprotochat-core/ # crypto primitives, MLS wrapper, Noise framing codec
|
||||||
│ ├── noiseml-proto/ # Cap'n Proto schemas + generated types, no crypto, no I/O
|
│ ├── quicnprotochat-proto/ # Cap'n Proto schemas + generated types, no crypto, no I/O
|
||||||
│ ├── noiseml-server/ # Delivery Service (DS) + Authentication Service (AS)
|
│ ├── quicnprotochat-server/ # Delivery Service (DS) + Authentication Service (AS)
|
||||||
│ └── noiseml-client/ # CLI client
|
│ └── quicnprotochat-client/ # CLI client
|
||||||
├── schemas/ # .capnp schema files (canonical source of truth)
|
├── schemas/ # .capnp schema files (canonical source of truth)
|
||||||
│ ├── envelope.capnp
|
│ ├── envelope.capnp
|
||||||
│ ├── auth.capnp
|
│ ├── auth.capnp
|
||||||
@@ -55,31 +55,31 @@ noiseml/
|
|||||||
|
|
||||||
### Crate Responsibilities
|
### Crate Responsibilities
|
||||||
|
|
||||||
**noiseml-core**
|
**quicnprotochat-core**
|
||||||
- Noise_XX handshake initiator and responder (via `snow`)
|
- Noise_XX handshake initiator and responder (via `snow`)
|
||||||
- Length-prefixed Cap'n Proto frame codec (Tokio `Encoder`/`Decoder` traits)
|
- Length-prefixed Cap'n Proto frame codec (Tokio `Encoder`/`Decoder` traits)
|
||||||
- MLS group state machine wrapper around `openmls`
|
- MLS group state machine wrapper around `openmls`
|
||||||
- Hybrid PQ ciphersuite (X25519 + ML-KEM-768)
|
- Hybrid PQ ciphersuite (X25519 + ML-KEM-768)
|
||||||
- Key generation and zeroize-on-drop key types
|
- Key generation and zeroize-on-drop key types
|
||||||
|
|
||||||
**noiseml-proto**
|
**quicnprotochat-proto**
|
||||||
- Cap'n Proto `.capnp` schemas in `schemas/` (workspace root, shared)
|
- Cap'n Proto `.capnp` schemas in `schemas/` (workspace root, shared)
|
||||||
- `build.rs` invokes `capnpc` to generate Rust types into `src/generated/`
|
- `build.rs` invokes `capnpc` to generate Rust types into `src/generated/`
|
||||||
- Re-exports generated types with ergonomic builder/reader helpers
|
- Re-exports generated types with ergonomic builder/reader helpers
|
||||||
- Canonical serialisation helpers for signing (uses `capnp::message::Builder::canonicalize()`)
|
- Canonical serialisation helpers for signing (uses `capnp::message::Builder::canonicalize()`)
|
||||||
- No crypto, no I/O, no async
|
- No crypto, no I/O, no async
|
||||||
|
|
||||||
**noiseml-server**
|
**quicnprotochat-server**
|
||||||
- Authentication Service: KeyPackage store (DashMap → SQLite at M6)
|
- Authentication Service: KeyPackage store (DashMap → SQLite at M6)
|
||||||
- Delivery Service: Cap'n Proto RPC interface, fan-out router, per-group append-only message log
|
- Delivery Service: Cap'n Proto RPC interface, fan-out router, per-group append-only message log
|
||||||
- Tokio TCP listener, Noise handshake per connection, then Cap'n Proto RPC over the encrypted channel
|
- Tokio TCP listener, Noise handshake per connection, then Cap'n Proto RPC over the encrypted channel
|
||||||
- Structured logging (tracing)
|
- Structured logging (tracing)
|
||||||
|
|
||||||
**noiseml-client**
|
**quicnprotochat-client**
|
||||||
- Tokio TCP connection to server
|
- Tokio TCP connection to server
|
||||||
- Noise handshake, then Cap'n Proto RPC client stub
|
- Noise handshake, then Cap'n Proto RPC client stub
|
||||||
- CLI interface (clap)
|
- CLI interface (clap)
|
||||||
- Drives noiseml-core for all crypto operations
|
- Drives quicnprotochat-core for all crypto operations
|
||||||
- Displays received messages to stdout
|
- Displays received messages to stdout
|
||||||
|
|
||||||
### Transport Stack
|
### Transport Stack
|
||||||
@@ -174,11 +174,11 @@ Hybrid KEM construction:
|
|||||||
```
|
```
|
||||||
SharedSecret = HKDF-SHA256(
|
SharedSecret = HKDF-SHA256(
|
||||||
ikm = X25519_ss || ML-KEM-768_ss,
|
ikm = X25519_ss || ML-KEM-768_ss,
|
||||||
info = "noiseml-hybrid-v1",
|
info = "quicnprotochat-hybrid-v1",
|
||||||
len = 32
|
len = 32
|
||||||
)
|
)
|
||||||
```
|
```
|
||||||
Follows the combiner approach from draft-ietf-tls-hybrid-design. Implemented as a custom `openmls` `OpenMlsCryptoProvider` trait implementation in `noiseml-core`.
|
Follows the combiner approach from draft-ietf-tls-hybrid-design. Implemented as a custom `openmls` `OpenMlsCryptoProvider` trait implementation in `quicnprotochat-core`.
|
||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
@@ -189,10 +189,10 @@ Follows the combiner approach from draft-ietf-tls-hybrid-design. Implemented as
|
|||||||
|
|
||||||
Deliverables:
|
Deliverables:
|
||||||
- `schemas/envelope.capnp`: `Envelope` + `MsgType` (Ping/Pong only needed at this stage)
|
- `schemas/envelope.capnp`: `Envelope` + `MsgType` (Ping/Pong only needed at this stage)
|
||||||
- `noiseml-proto`: `build.rs` with `capnpc`, generated type re-exports, canonical helper
|
- `quicnprotochat-proto`: `build.rs` with `capnpc`, generated type re-exports, canonical helper
|
||||||
- `noiseml-core`: static X25519 keypair generation, Noise_XX initiator + responder, length-prefixed Cap'n Proto frame codec
|
- `quicnprotochat-core`: static X25519 keypair generation, Noise_XX initiator + responder, length-prefixed Cap'n Proto frame codec
|
||||||
- `noiseml-server`: TCP listener, Noise handshake, Ping→Pong handler, one tokio task per connection
|
- `quicnprotochat-server`: TCP listener, Noise handshake, Ping→Pong handler, one tokio task per connection
|
||||||
- `noiseml-client`: connects, Noise handshake, sends Ping, receives Pong, exits 0
|
- `quicnprotochat-client`: connects, Noise handshake, sends Ping, receives Pong, exits 0
|
||||||
- Integration test: server and client in same test binary using `tokio::spawn`
|
- Integration test: server and client in same test binary using `tokio::spawn`
|
||||||
- `docker-compose.yml` running the server
|
- `docker-compose.yml` running the server
|
||||||
|
|
||||||
@@ -201,10 +201,10 @@ Deliverables:
|
|||||||
|
|
||||||
Deliverables:
|
Deliverables:
|
||||||
- `schemas/auth.capnp`: `AuthenticationService` interface
|
- `schemas/auth.capnp`: `AuthenticationService` interface
|
||||||
- `noiseml-proto`: generated RPC stubs + client/server bootstrap helpers
|
- `quicnprotochat-proto`: generated RPC stubs + client/server bootstrap helpers
|
||||||
- `noiseml-core`: MLS KeyPackage generation (openmls)
|
- `quicnprotochat-core`: MLS KeyPackage generation (openmls)
|
||||||
- `noiseml-server`: AS RPC server implementation with DashMap store
|
- `quicnprotochat-server`: AS RPC server implementation with DashMap store
|
||||||
- `noiseml-client`: `register` and `fetch-key` CLI subcommands
|
- `quicnprotochat-client`: `register` and `fetch-key` CLI subcommands
|
||||||
- Test: Alice uploads KeyPackage, Bob fetches it, fingerprints match
|
- Test: Alice uploads KeyPackage, Bob fetches it, fingerprints match
|
||||||
|
|
||||||
### M3 — MLS Group Create + Welcome
|
### M3 — MLS Group Create + Welcome
|
||||||
@@ -212,25 +212,25 @@ Deliverables:
|
|||||||
|
|
||||||
Deliverables:
|
Deliverables:
|
||||||
- `schemas/delivery.capnp`: `DeliveryService` + `MessageStream` interfaces
|
- `schemas/delivery.capnp`: `DeliveryService` + `MessageStream` interfaces
|
||||||
- `noiseml-core`: group create, add member, process Welcome
|
- `quicnprotochat-core`: group create, add member, process Welcome
|
||||||
- `noiseml-server`: DS RPC server, Welcome routing by identity
|
- `quicnprotochat-server`: DS RPC server, Welcome routing by identity
|
||||||
- `noiseml-client`: `create-group` and `join` CLI subcommands
|
- `quicnprotochat-client`: `create-group` and `join` CLI subcommands
|
||||||
- Test: two clients reach identical epoch 1 group state, verified by comparing group context hashes
|
- Test: two clients reach identical epoch 1 group state, verified by comparing group context hashes
|
||||||
|
|
||||||
### M4 — Encrypted Group Messaging
|
### M4 — Encrypted Group Messaging
|
||||||
**Goal:** Alice and Bob exchange MLS Application messages through the DS.
|
**Goal:** Alice and Bob exchange MLS Application messages through the DS.
|
||||||
|
|
||||||
Deliverables:
|
Deliverables:
|
||||||
- `noiseml-core`: send/receive application message, epoch rotation on Commit
|
- `quicnprotochat-core`: send/receive application message, epoch rotation on Commit
|
||||||
- `noiseml-server`: DS fan-out via `MessageStream` capability stream, per-group ordered log (in-memory)
|
- `quicnprotochat-server`: DS fan-out via `MessageStream` capability stream, per-group ordered log (in-memory)
|
||||||
- `noiseml-client`: `send` subcommand, live receive loop via `MessageStream.next()`
|
- `quicnprotochat-client`: `send` subcommand, live receive loop via `MessageStream.next()`
|
||||||
- Test: round-trip message integrity, forward secrecy verified by confirming distinct key material across epochs
|
- Test: round-trip message integrity, forward secrecy verified by confirming distinct key material across epochs
|
||||||
|
|
||||||
### M5 — Hybrid PQ Ciphersuite
|
### M5 — Hybrid PQ Ciphersuite
|
||||||
**Goal:** Replace MLS crypto backend with X25519 + ML-KEM-768 hybrid.
|
**Goal:** Replace MLS crypto backend with X25519 + ML-KEM-768 hybrid.
|
||||||
|
|
||||||
Deliverables:
|
Deliverables:
|
||||||
- `noiseml-core`: custom `OpenMlsCryptoProvider` with hybrid KEM
|
- `quicnprotochat-core`: custom `OpenMlsCryptoProvider` with hybrid KEM
|
||||||
- All M3/M4 tests pass unchanged with new ciphersuite
|
- All M3/M4 tests pass unchanged with new ciphersuite
|
||||||
- Criterion benchmarks: key generation, encap/decap, group-add latency (10/100/1000 members)
|
- Criterion benchmarks: key generation, encap/decap, group-add latency (10/100/1000 members)
|
||||||
|
|
||||||
@@ -238,7 +238,7 @@ Deliverables:
|
|||||||
**Goal:** Server survives restart. Full containerised deployment.
|
**Goal:** Server survives restart. Full containerised deployment.
|
||||||
|
|
||||||
Deliverables:
|
Deliverables:
|
||||||
- `noiseml-server`: SQLite via `sqlx` for AS key store and DS message log, `migrations/` directory
|
- `quicnprotochat-server`: SQLite via `sqlx` for AS key store and DS message log, `migrations/` directory
|
||||||
- `docker/Dockerfile`: multi-stage build (rust:bookworm builder → debian:bookworm-slim runtime)
|
- `docker/Dockerfile`: multi-stage build (rust:bookworm builder → debian:bookworm-slim runtime)
|
||||||
- `docker-compose.yml`: server + SQLite volume, healthcheck
|
- `docker-compose.yml`: server + SQLite volume, healthcheck
|
||||||
- Client reconnect with session resume (re-handshake + rejoin group epoch from DS log)
|
- Client reconnect with session resume (re-handshake + rejoin group epoch from DS log)
|
||||||
@@ -266,7 +266,7 @@ capnp = "0.19"
|
|||||||
capnp-rpc = "0.19"
|
capnp-rpc = "0.19"
|
||||||
|
|
||||||
# Build-time only
|
# Build-time only
|
||||||
capnpc = "0.19" # build-dependency in noiseml-proto
|
capnpc = "0.19" # build-dependency in quicnprotochat-proto
|
||||||
|
|
||||||
# Async / networking
|
# Async / networking
|
||||||
tokio = { version = "1", features = ["full"] }
|
tokio = { version = "1", features = ["full"] }
|
||||||
@@ -310,7 +310,7 @@ The MLS content layer is PQ-protected from M5. The Noise transport (X25519) rema
|
|||||||
|
|
||||||
## How to Use This Prompt
|
## How to Use This Prompt
|
||||||
|
|
||||||
Paste this document at the start of any session working on noiseml. Then state which milestone you are working on and what specific task you need. The assistant will:
|
Paste this document at the start of any session working on quicnprotochat. Then state which milestone you are working on and what specific task you need. The assistant will:
|
||||||
|
|
||||||
1. Confirm the current milestone and task.
|
1. Confirm the current milestone and task.
|
||||||
2. State any design decisions being made (ADR format if significant).
|
2. State any design decisions being made (ADR format if significant).
|
||||||
@@ -325,5 +325,5 @@ When asking for code, always specify:
|
|||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
*noiseml — MLS + Post-Quantum + Noise/TCP + Cap'n Proto messenger in Rust*
|
*quicnprotochat — MLS + Post-Quantum + Noise/TCP + Cap'n Proto messenger in Rust*
|
||||||
*Architecture version: 1.1 | Last updated: 2026-02-19*
|
*Architecture version: 1.1 | Last updated: 2026-02-19*
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
# envelope.capnp — top-level wire message for all noiseml traffic.
|
# envelope.capnp — top-level wire message for all quicnprotochat traffic.
|
||||||
#
|
#
|
||||||
# Every frame exchanged over the Noise channel is serialised as an Envelope.
|
# Every frame exchanged over the Noise channel is serialised as an Envelope.
|
||||||
# The Delivery Service routes by (groupId, msgType) without inspecting payload.
|
# The Delivery Service routes by (groupId, msgType) without inspecting payload.
|
||||||
|
|||||||
29
schemas/node.capnp
Normal file
29
schemas/node.capnp
Normal file
@@ -0,0 +1,29 @@
|
|||||||
|
# node.capnp — Unified quicnprotochat node RPC interface.
|
||||||
|
#
|
||||||
|
# Combines Authentication and Delivery operations into a single service.
|
||||||
|
#
|
||||||
|
# ID generated with: capnp id
|
||||||
|
@0xd5ca5648a9cc1c28;
|
||||||
|
|
||||||
|
interface NodeService {
|
||||||
|
# Upload a single-use KeyPackage for later retrieval by peers.
|
||||||
|
# identityKey : Ed25519 public key bytes (32 bytes)
|
||||||
|
# package : TLS-encoded openmls KeyPackage
|
||||||
|
uploadKeyPackage @0 (identityKey :Data, package :Data) -> (fingerprint :Data);
|
||||||
|
|
||||||
|
# Fetch and atomically remove one KeyPackage for a given identity key.
|
||||||
|
# Returns empty Data if none are stored.
|
||||||
|
fetchKeyPackage @1 (identityKey :Data) -> (package :Data);
|
||||||
|
|
||||||
|
# Enqueue an opaque payload for delivery to a recipient.
|
||||||
|
enqueue @2 (recipientKey :Data, payload :Data) -> ();
|
||||||
|
|
||||||
|
# Fetch and drain all queued payloads for the recipient.
|
||||||
|
fetch @3 (recipientKey :Data) -> (payloads :List(Data));
|
||||||
|
|
||||||
|
# Long-poll: wait up to timeoutMs for new payloads, then drain queue.
|
||||||
|
fetchWait @4 (recipientKey :Data, timeoutMs :UInt64) -> (payloads :List(Data));
|
||||||
|
|
||||||
|
# Health probe for readiness/liveness.
|
||||||
|
health @5 () -> (status :Text);
|
||||||
|
}
|
||||||
Reference in New Issue
Block a user