1 Commits

Author SHA1 Message Date
41c57a1181 Cursor: Apply local changes for cloud agent 2026-02-22 22:29:52 +01:00
489 changed files with 17365 additions and 94694 deletions

1
.cursor/.gitignore vendored Normal file
View File

@@ -0,0 +1 @@
plans/

View File

@@ -1,20 +0,0 @@
# quicprochat Production Environment Variables
# Copy this file to .env and fill in the values.
# Server auth token (required, >= 16 characters)
QPC_AUTH_TOKEN=
# SQLCipher database encryption key (required for store_backend=sql)
QPC_DB_KEY=
# Ports (defaults shown)
QPC_LISTEN_PORT=7000
QPC_WS_PORT=9000
# Optional features
QPC_SEALED_SENDER=false
QPC_REDACT_LOGS=true
QPC_WS_LISTEN=
# Grafana admin password (required — must be strong, no default)
GRAFANA_ADMIN_PASSWORD=

View File

@@ -1,134 +0,0 @@
name: Claude Code Assistant
on:
issues:
types: [opened, labeled]
issue_comment:
types: [created]
concurrency:
group: claude-${{ github.event.issue.number }}
cancel-in-progress: true
jobs:
claude-code:
if: >-
(github.event_name == 'issues' &&
contains(toJSON(github.event.issue.labels), 'claude')) ||
(github.event_name == 'issue_comment' &&
contains(github.event.comment.body, '@claude') &&
github.event.comment.user.login != 'admin')
runs-on: ubuntu-latest
timeout-minutes: 15
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Run Claude on Issue
env:
ANTHROPIC_API_KEY: ${{ secrets.ANTHROPIC_API_KEY }}
GIT_TOKEN: ${{ secrets.GIT_TOKEN }}
run: |
set +e
# Configure git
git config user.name "Claude Bot"
git config user.email "claude@localhost"
git remote set-url origin "http://admin:${GIT_TOKEN}@localhost:3000/${{ github.repository }}.git"
ISSUE_NUMBER="${{ github.event.issue.number }}"
ISSUE_TITLE="${{ github.event.issue.title }}"
REPO="${{ github.repository }}"
LABELS_JSON='${{ toJSON(github.event.issue.labels) }}'
# Determine model + cost limits from issue labels
# Default: haiku (cheap). Add claude:sonnet or claude:opus for harder tasks.
CLAUDE_MODEL="haiku"
MAX_TURNS=15
MAX_BUDGET="0.50"
EFFORT="low"
if echo "$LABELS_JSON" | grep -q '"claude:opus"'; then
CLAUDE_MODEL="claude-opus-4-6"
MAX_TURNS=40
MAX_BUDGET="5.00"
EFFORT="high"
elif echo "$LABELS_JSON" | grep -q '"claude:sonnet"'; then
CLAUDE_MODEL="claude-sonnet-4-6"
MAX_TURNS=25
MAX_BUDGET="2.00"
EFFORT="medium"
fi
ISSUE_BODY=$(curl -s "http://localhost:3000/api/v1/repos/${REPO}/issues/${ISSUE_NUMBER}" \
-H "Authorization: token ${GIT_TOKEN}" | python3 -c "import sys,json; print(json.load(sys.stdin).get('body',''))")
COMMENT_BODY=""
if [ "${{ github.event_name }}" = "issue_comment" ]; then
COMMENT_ID="${{ github.event.comment.id }}"
COMMENT_BODY=$(curl -s "http://localhost:3000/api/v1/repos/${REPO}/issues/comments/${COMMENT_ID}" \
-H "Authorization: token ${GIT_TOKEN}" | python3 -c "import sys,json; print(json.load(sys.stdin).get('body',''))")
fi
BRANCH="claude/issue-${ISSUE_NUMBER}"
git checkout -b "${BRANCH}"
# Run Claude Code with cost controls
claude -p "You are working on the repository ${REPO} (Gitea instance at http://localhost:3000).
A Gitea issue needs your attention:
Issue #${ISSUE_NUMBER}: ${ISSUE_TITLE}
Description: ${ISSUE_BODY}
Additional context: ${COMMENT_BODY}
IMPORTANT RULES:
- Do NOT retry failed commands more than once. If something fails twice, stop and report the error.
- Do NOT loop on failing tests. Fix the obvious issue or report it. Never run the same failing command 3+ times.
- If you cannot complete the task, push what you have, create the PR as draft, and explain what is blocked.
- Be efficient: read only files you need, make targeted edits, avoid unnecessary exploration.
Steps:
1. Read and understand the relevant parts of the codebase
2. Implement the requested changes
3. Commit your changes with a descriptive message
4. Push branch ${BRANCH} to origin
5. Create a pull request targeting main that references issue #${ISSUE_NUMBER}
6. Post a comment on issue #${ISSUE_NUMBER} summarizing what you did
Git is configured. You are on branch ${BRANCH}. Work in the current directory.
Use git commands to push, and curl to the Gitea API for PR creation and comments.
Gitea API token is available as env var GIT_TOKEN." \
--allowedTools "Bash,Read,Edit,Write,Glob,Grep" \
--model "${CLAUDE_MODEL}" \
--max-turns "${MAX_TURNS}" \
--max-budget-usd "${MAX_BUDGET}" \
--effort "${EFFORT}" \
--permission-mode bypassPermissions \
--output-format json 2>&1 > /tmp/claude-result.json
CLAUDE_EXIT=$?
# Extract cost from JSON output
COST=$(python3 -c "
import json
with open('/tmp/claude-result.json') as f:
data = json.load(f)
cost = data.get('total_cost_usd', 0)
print(f'\${cost:.4f}')
" 2>/dev/null || echo "unknown")
# Amend the last commit to include cost and model
if git log --oneline main..HEAD 2>/dev/null | head -1 | grep -q .; then
LAST_MSG=$(git log -1 --format=%B)
git commit --amend -m "${LAST_MSG}
Claude model: ${CLAUDE_MODEL} | API cost: ${COST}" --no-verify
git push origin "${BRANCH}" --force
fi
# Post cost as comment
curl -s -X POST "http://localhost:3000/api/v1/repos/${REPO}/issues/${ISSUE_NUMBER}/comments" \
-H "Authorization: token ${GIT_TOKEN}" \
-H "Content-Type: application/json" \
-d "{\"body\": \"Done (model: **${CLAUDE_MODEL}**, effort: ${EFFORT}, budget cap: \$${MAX_BUDGET}). API cost: **${COST}**\"}" > /dev/null
exit ${CLAUDE_EXIT}

40
.github/CODEOWNERS vendored
View File

@@ -1,37 +1,15 @@
# Code owners for quicprochat. PRs require review from owners.
# Code owners for quicnprotochat. PRs require review from owners.
# See https://docs.github.com/en/repositories/managing-your-repositorys-settings-and-features/customizing-your-repository/about-code-owners
# Replace 'maintainers' with your GitHub user/team handle.
# Default owners for everything in the repo
* @maintainers
# Security-critical: crypto primitives, MLS, hybrid KEM
/crates/quicprochat-core/ @maintainers
# Wire format: protobuf definitions, Cap'n Proto schemas
/crates/quicprochat-proto/ @maintainers
/proto/ @maintainers
# Auth and server-side domain logic
/crates/quicprochat-server/ @maintainers
# Client SDK: auth, conversation store, messaging pipeline
/crates/quicprochat-sdk/ @maintainers
# CLI/TUI client
/crates/quicprochat-client/ @maintainers
# RPC framework: framing, middleware, QUIC transport
/crates/quicprochat-rpc/ @maintainers
# Key transparency
/crates/quicprochat-kt/ @maintainers
# Plugin ABI (no_std C-ABI boundary)
/crates/quicprochat-plugin-api/ @maintainers
# P2P transport
/crates/quicprochat-p2p/ @maintainers
# CI and infrastructure
/.github/ @maintainers
# Crate-specific (uncomment and add handles when you have designated owners)
# /crates/quicnprotochat-core/ @owner1
# /crates/quicnprotochat-proto/ @owner1
# /crates/quicnprotochat-server/ @owner1
# /crates/quicnprotochat-client/ @owner1
# /crates/quicnprotochat-p2p/ @owner1
# /schemas/ @owner1
# /docs/ @owner1

View File

@@ -1,54 +0,0 @@
name: Benchmarks
on:
push:
branches: [main, v2]
pull_request:
branches: [main, v2]
workflow_dispatch:
env:
CARGO_TERM_COLOR: always
jobs:
bench:
name: Run Criterion benchmarks
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- name: Install Rust
uses: dtolnay/rust-action@stable
- name: Install capnp
run: sudo apt-get update && sudo apt-get install -y capnproto
- name: Cache cargo
uses: actions/cache@v4
with:
path: |
~/.cargo/registry
~/.cargo/git
target
key: ${{ runner.os }}-bench-${{ hashFiles('**/Cargo.lock') }}
restore-keys: |
${{ runner.os }}-bench-
- name: Run benchmarks
run: cargo bench --package quicprochat-core -- --output-format=bencher 2>&1 | tee bench-output.txt
- name: Upload HTML reports
uses: actions/upload-artifact@v4
if: always()
with:
name: criterion-reports
path: target/criterion/
retention-days: 30
- name: Upload raw output
uses: actions/upload-artifact@v4
if: always()
with:
name: bench-output
path: bench-output.txt
retention-days: 30

View File

@@ -2,9 +2,9 @@ name: CI
on:
push:
branches: [main, master, v2]
branches: [main, master]
pull_request:
branches: [main, master, v2]
branches: [main, master]
env:
CARGO_TERM_COLOR: always
@@ -45,7 +45,7 @@ jobs:
run: cargo test --workspace
- name: Clippy
run: cargo clippy --workspace --all-targets -- -D warnings
run: cargo clippy --workspace --all-targets --
deny:
name: cargo-deny
@@ -72,103 +72,3 @@ jobs:
run: |
cargo install cargo-audit --locked
cargo audit
coverage:
name: Coverage
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- name: Install Rust
uses: dtolnay/rust-action@stable
- name: Install capnp
run: sudo apt-get update && sudo apt-get install -y capnproto
- name: Cache cargo
uses: actions/cache@v4
with:
path: |
~/.cargo/registry
~/.cargo/git
target
key: ${{ runner.os }}-coverage-${{ hashFiles('**/Cargo.lock') }}
restore-keys: |
${{ runner.os }}-coverage-
- name: Install cargo-tarpaulin
run: cargo install cargo-tarpaulin
- name: Run coverage
run: |
cargo tarpaulin --workspace \
--exclude quicprochat-p2p \
--out xml \
--output-dir coverage/ \
-- --test-threads 1
- name: Upload coverage report
uses: actions/upload-artifact@v4
with:
name: coverage-report
path: coverage/cobertura.xml
msrv:
name: MSRV Check
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- name: Install MSRV Rust (1.75)
uses: dtolnay/rust-action@1.75
with:
components: clippy
- name: Install capnp
run: sudo apt-get update && sudo apt-get install -y capnproto
- name: Cache cargo
uses: actions/cache@v4
with:
path: |
~/.cargo/registry
~/.cargo/git
target
key: ${{ runner.os }}-msrv-${{ hashFiles('**/Cargo.lock') }}
restore-keys: |
${{ runner.os }}-msrv-
- name: Check MSRV
run: cargo check --workspace
macos:
name: macOS Build Check
runs-on: macos-latest
steps:
- uses: actions/checkout@v4
- name: Install Rust
uses: dtolnay/rust-action@stable
- name: Cache cargo
uses: actions/cache@v4
with:
path: |
~/.cargo/registry
~/.cargo/git
target
key: ${{ runner.os }}-cargo-${{ hashFiles('**/Cargo.lock') }}
restore-keys: |
${{ runner.os }}-cargo-
- name: Check build
run: cargo check --workspace
docker:
name: Docker Build
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- name: Build Docker image
run: docker build -f docker/Dockerfile .

View File

@@ -1,65 +0,0 @@
name: OpenWrt Cross-Compile
on:
push:
tags:
- 'v*'
workflow_dispatch:
env:
CARGO_TERM_COLOR: always
MAX_SIZE_MB: 5
jobs:
cross-compile:
name: Cross-compile (${{ matrix.target }})
runs-on: ubuntu-latest
strategy:
fail-fast: false
matrix:
target:
- x86_64-unknown-linux-musl
- armv7-unknown-linux-musleabihf
- aarch64-unknown-linux-musl
steps:
- uses: actions/checkout@v4
- name: Install Rust toolchain
uses: dtolnay/rust-toolchain@stable
- name: Install cargo-zigbuild and zig
run: |
pip3 install ziglang
cargo install cargo-zigbuild
- name: Add target
run: rustup target add ${{ matrix.target }}
- name: Build (size-optimised)
env:
CARGO_PROFILE_RELEASE_OPT_LEVEL: s
CARGO_PROFILE_RELEASE_LTO: 'true'
CARGO_PROFILE_RELEASE_CODEGEN_UNITS: '1'
CARGO_PROFILE_RELEASE_STRIP: symbols
run: |
cargo zigbuild --release --target ${{ matrix.target }} --bin qpc-server
- name: Check binary size
run: |
BINARY="target/${{ matrix.target }}/release/qpc-server"
SIZE=$(stat -c%s "$BINARY")
SIZE_MB=$(echo "scale=2; $SIZE / 1048576" | bc)
echo "Binary size: ${SIZE_MB} MB"
MAX_BYTES=$(( ${{ env.MAX_SIZE_MB }} * 1048576 ))
if [ "$SIZE" -gt "$MAX_BYTES" ]; then
echo "::error::Binary exceeds ${MAX_SIZE_MB} MB limit (${SIZE_MB} MB)"
exit 1
fi
- name: Upload artifact
uses: actions/upload-artifact@v4
with:
name: qpc-server-${{ matrix.target }}
path: target/${{ matrix.target }}/release/qpc-server
retention-days: 30

21
.gitignore vendored
View File

@@ -1,6 +1,4 @@
/target
**/target/
node_modules/
**/*.rs.bk
.vscode/
gitea-mcp.json
@@ -9,21 +7,4 @@ docs/book/
# Server/client runtime data — do not commit certs, keys, or DBs
data/
*.der
*.pem
*.db
*.bin
*.ks
*.session
*.convdb
*.convdb-shm
*.convdb-wal
*.pending.ks
qpc-server.toml
# Internal planning docs (not for public distribution)
docs/internal/
# AI development workflow files
CLAUDE.md
master-prompt.md
scripts/ai_team.py
quicnprotochat-server.toml

View File

@@ -1,40 +0,0 @@
# Contributing to quicprochat
## Prerequisites
- **Rust toolchain** (stable) via [rustup](https://rustup.rs/)
- **protoc** is vendored via the `protobuf-src` crate -- no system installation needed
- Git with GPG signing configured
## Building and Testing
```sh
cargo build --workspace
cargo test --workspace
```
A `justfile` is also available for common tasks (`just build`, `just test`, `just proto`, etc.).
## Code Standards
### Commits
- **Conventional commits**: `feat:`, `fix:`, `docs:`, `chore:`, `test:`, `refactor:`
- Commits must be **GPG-signed**
- Commit messages describe *why*, not just *what*
- No `Co-authored-by` trailers
### Rust
- No `.unwrap()` on crypto or I/O operations outside of tests
- Secrets must be zeroized on drop and never logged
- No stubs, `todo!()`, or `unimplemented!()` in production code
- Prefer clarity over cleverness; avoid unnecessary abstractions
## Security Vulnerabilities
Do not open public issues for security bugs. See [SECURITY.md](SECURITY.md) for responsible disclosure instructions.
## Licensing
The server crate (`quicprochat-server`) is licensed under **AGPL-3.0**. All other crates are dual-licensed under **Apache-2.0 / MIT**. By submitting a contribution, you agree to license your work under the applicable license(s).

4532
Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@@ -1,46 +1,31 @@
[workspace]
resolver = "2"
members = [
"crates/quicprochat-core",
"crates/quicprochat-proto",
"crates/quicprochat-plugin-api",
"crates/quicprochat-kt",
"crates/quicprochat-rpc",
"crates/quicprochat-sdk",
"crates/quicprochat-server",
"crates/quicprochat-client",
# P2P crate uses iroh (~90 extra deps). Only compiled when the `mesh`
# feature is enabled on quicprochat-client.
"crates/quicprochat-p2p",
"crates/quicnprotochat-core",
"crates/quicnprotochat-proto",
"crates/quicnprotochat-server",
"crates/quicnprotochat-client",
"crates/quicnprotochat-p2p",
"crates/quicnprotochat-gui",
]
[workspace.package]
edition = "2021"
rust-version = "1.75"
repository = "https://github.com/quicprochat/quicprochat"
description = "End-to-end encrypted group messaging over QUIC"
keywords = ["encryption", "messaging", "quic", "mls", "post-quantum"]
categories = ["cryptography", "network-programming"]
# Shared dependency versions — bump here to affect the whole workspace.
[workspace.dependencies]
# ── Crypto ────────────────────────────────────────────────────────────────────
openmls = { version = "0.8" }
openmls_rust_crypto = { version = "0.5" }
openmls_traits = { version = "0.5" }
openmls_memory_storage = { version = "0.5" }
# tls_codec must match the version used by openmls 0.8 (which uses 0.4) to avoid
openmls = { version = "0.5", default-features = false, features = ["crypto-subtle"] }
openmls_rust_crypto = { version = "0.2" }
openmls_traits = { version = "0.2" }
# tls_codec must match the version used by openmls 0.5 (which uses 0.3) to avoid
# duplicate Serialize trait versions in the dependency graph.
tls_codec = { version = "0.4", features = ["derive"] }
tls_codec = { version = "0.3", features = ["derive"] }
# ml-kem 0.2 is the current stable release (FIPS 203, ML-KEM-768).
# All three parameter sets (512/768/1024) are compiled in by default — no feature flag needed.
ml-kem = { version = "0.2" }
x25519-dalek = { version = "2", features = ["static_secrets"] }
ed25519-dalek = { version = "2", features = ["rand_core"] }
sha2 = { version = "0.10" }
hmac = { version = "0.12" }
hkdf = { version = "0.12" }
ciborium = { version = "0.2" }
chacha20poly1305 = { version = "0.10" }
opaque-ke = { version = "4", features = ["ristretto255", "argon2"] }
zeroize = { version = "1", features = ["derive", "serde"] }
@@ -51,17 +36,12 @@ serde = { version = "1", features = ["derive"] }
serde_json = { version = "1" }
bincode = { version = "1" }
# ── Serialisation (v2: protobuf) ─────────────────────────────────────────────
prost = { version = "0.13" }
prost-types = { version = "0.13" }
prost-build = { version = "0.13" }
# ── Serialisation (v1 legacy — capnp, used by existing server/client) ────────
# ── Serialisation + RPC ───────────────────────────────────────────────────────
capnp = { version = "0.19" }
capnp-rpc = { version = "0.19" }
# ── Async / networking ────────────────────────────────────────────────────────
tokio = { version = "1", features = ["macros", "rt-multi-thread", "time", "sync", "signal", "io-util", "io-std"] }
tokio = { version = "1", features = ["full"] }
tokio-util = { version = "0.7", features = ["codec", "compat"] }
futures = { version = "0.3" }
quinn = { version = "0.11" }
@@ -69,18 +49,12 @@ quinn-proto = { version = "0.11" }
rustls = { version = "0.23", default-features = false, features = ["std", "ring"] }
rcgen = { version = "0.13" }
# ── Middleware ────────────────────────────────────────────────────────────────
tower = { version = "0.5", features = ["util", "limit", "timeout"] }
# ── Database ─────────────────────────────────────────────────────────────
rusqlite = { version = "0.31", features = ["bundled-sqlcipher"] }
# ── Encoding ─────────────────────────────────────────────────────────────────
hex = { version = "0.4" }
bytes = { version = "1" }
# ── Server utilities ──────────────────────────────────────────────────────────
dashmap = { version = "5" }
governor = { version = "0.6" }
tracing = { version = "0.1" }
tracing-subscriber = { version = "0.3", features = ["env-filter"] }
@@ -88,23 +62,14 @@ tracing-subscriber = { version = "0.3", features = ["env-filter"] }
anyhow = { version = "1" }
thiserror = { version = "1" }
# ── Config / CLI ──────────────────────────────────────────────────────────────
toml = { version = "0.8" }
# ── CLI ───────────────────────────────────────────────────────────────────────
clap = { version = "4", features = ["derive", "env"] }
rustyline = { version = "14" }
# ── Certificate parsing ──────────────────────────────────────────────────────
x509-parser = { version = "0.16", default-features = false }
clap_complete = { version = "4" }
indicatif = { version = "0.17" }
# ── Build-time ────────────────────────────────────────────────────────────────
capnpc = { version = "0.19" }
[workspace.lints.rust]
unsafe_code = "warn"
[workspace.lints.clippy]
unwrap_used = "deny"
[profile.release]
opt-level = 3
lto = "thin"

30
LICENSE
View File

@@ -1,30 +0,0 @@
quicproquo — Split Licensing
============================
This project uses a split license model similar to Signal:
Server (quicproquo-server)
--------------------------
Licensed under the GNU Affero General Public License v3.0 only.
See LICENSE-AGPL-3.0 for the full text.
SPDX-License-Identifier: AGPL-3.0-only
Libraries and SDKs (all other crates)
--------------------------------------
Licensed under either of
* Apache License, Version 2.0 (LICENSE-APACHE)
* MIT License (LICENSE-MIT)
at your option.
SPDX-License-Identifier: Apache-2.0 OR MIT
Contribution
------------
Unless you explicitly state otherwise, any contribution intentionally
submitted for inclusion in this project by you, as defined in the
Apache-2.0 license, shall be dual licensed as above (for library crates)
or AGPL-3.0-only (for the server crate), without any additional terms or
conditions.

View File

@@ -1,661 +0,0 @@
GNU AFFERO GENERAL PUBLIC LICENSE
Version 3, 19 November 2007
Copyright (C) 2007 Free Software Foundation, Inc. <https://fsf.org/>
Everyone is permitted to copy and distribute verbatim copies
of this license document, but changing it is not allowed.
Preamble
The GNU Affero General Public License is a free, copyleft license for
software and other kinds of works, specifically designed to ensure
cooperation with the community in the case of network server software.
The licenses for most software and other practical works are designed
to take away your freedom to share and change the works. By contrast,
our General Public Licenses are intended to guarantee your freedom to
share and change all versions of a program--to make sure it remains free
software for all its users.
When we speak of free software, we are referring to freedom, not
price. Our General Public Licenses are designed to make sure that you
have the freedom to distribute copies of free software (and charge for
them if you wish), that you receive source code or can get it if you
want it, that you can change the software or use pieces of it in new
free programs, and that you know you can do these things.
Developers that use our General Public Licenses protect your rights
with two steps: (1) assert copyright on the software, and (2) offer
you this License which gives you legal permission to copy, distribute
and/or modify the software.
A secondary benefit of defending all users' freedom is that
improvements made in alternate versions of the program, if they
receive widespread use, become available for other developers to
incorporate. Many developers of free software are heartened and
encouraged by the resulting cooperation. However, in the case of
software used on network servers, this result may fail to come about.
The GNU General Public License permits making a modified version and
letting the public access it on a server without ever releasing its
source code to the public.
The GNU Affero General Public License is designed specifically to
ensure that, in such cases, the modified source code becomes available
to the community. It requires the operator of a network server to
provide the source code of the modified version running there to the
users of that server. Therefore, public use of a modified version, on
a publicly accessible server, gives the public access to the source
code of the modified version.
An older license, called the Affero General Public License and
published by Affero, was designed to accomplish similar goals. This is
a different license, not a version of the Affero GPL, but Affero has
released a new version of the Affero GPL which permits relicensing under
this license.
The precise terms and conditions for copying, distribution and
modification follow.
TERMS AND CONDITIONS
0. Definitions.
"This License" refers to version 3 of the GNU Affero General Public License.
"Copyright" also means copyright-like laws that apply to other kinds of
works, such as semiconductor masks.
"The Program" refers to any copyrightable work licensed under this
License. Each licensee is addressed as "you". "Licensees" and
"recipients" may be individuals or organizations.
To "modify" a work means to copy from or adapt all or part of the work
in a fashion requiring copyright permission, other than the making of an
exact copy. The resulting work is called a "modified version" of the
earlier work or a work "based on" the earlier work.
A "covered work" means either the unmodified Program or a work based
on the Program.
To "propagate" a work means to do anything with it that, without
permission, would make you directly or secondarily liable for
infringement under applicable copyright law, except executing it on a
computer or modifying a private copy. Propagation includes copying,
distribution (with or without modification), making available to the
public, and in some countries other activities as well.
To "convey" a work means any kind of propagation that enables other
parties to make or receive copies. Mere interaction with a user through
a computer network, with no transfer of a copy, is not conveying.
An interactive user interface displays "Appropriate Legal Notices"
to the extent that it includes a convenient and prominently visible
feature that (1) displays an appropriate copyright notice, and (2)
tells the user that there is no warranty for the work (except to the
extent that warranties are provided), that licensees may convey the
work under this License, and how to view a copy of this License. If
the interface presents a list of user commands or options, such as a
menu, a prominent item in the list meets this criterion.
1. Source Code.
The "source code" for a work means the preferred form of the work
for making modifications to it. "Object code" means any non-source
form of a work.
A "Standard Interface" means an interface that either is an official
standard defined by a recognized standards body, or, in the case of
interfaces specified for a particular programming language, one that
is widely used among developers working in that language.
The "System Libraries" of an executable work include anything, other
than the work as a whole, that (a) is included in the normal form of
packaging a Major Component, but which is not part of that Major
Component, and (b) serves only to enable use of the work with that
Major Component, or to implement a Standard Interface for which an
implementation is available to the public in source code form. A
"Major Component", in this context, means a major essential component
(kernel, window system, and so on) of the specific operating system
(if any) on which the executable work runs, or a compiler used to
produce the work, or an object code interpreter used to run it.
The "Corresponding Source" for a work in object code form means all
the source code needed to generate, install, and (for an executable
work) run the object code and to modify the work, including scripts to
control those activities. However, it does not include the work's
System Libraries, or general-purpose tools or generally available free
programs which are used unmodified in performing those activities but
which are not part of the work. For example, Corresponding Source
includes interface definition files associated with source files for
the work, and the source code for shared libraries and dynamically
linked subprograms that the work is specifically designed to require,
such as by intimate data communication or control flow between those
subprograms and other parts of the work.
The Corresponding Source need not include anything that users
can regenerate automatically from other parts of the Corresponding
Source.
The Corresponding Source for a work in source code form is that
same work.
2. Basic Permissions.
All rights granted under this License are granted for the term of
copyright on the Program, and are irrevocable provided the stated
conditions are met. This License explicitly affirms your unlimited
permission to run the unmodified Program. The output from running a
covered work is covered by this License only if the output, given its
content, constitutes a covered work. This License acknowledges your
rights of fair use or other equivalent, as provided by copyright law.
You may make, run and propagate covered works that you do not
convey, without conditions so long as your license otherwise remains
in force. You may convey covered works to others for the sole purpose
of having them make modifications exclusively for you, or provide you
with facilities for running those works, provided that you comply with
the terms of this License in conveying all material for which you do
not control copyright. Those thus making or running the covered works
for you must do so exclusively on your behalf, under your direction
and control, on terms that prohibit them from making any copies of
your copyrighted material outside their relationship with you.
Conveying under any other circumstances is permitted solely under
the conditions stated below. Sublicensing is not allowed; section 10
makes it unnecessary.
3. Protecting Users' Legal Rights From Anti-Circumvention Law.
No covered work shall be deemed part of an effective technological
measure under any applicable law fulfilling obligations under article
11 of the WIPO copyright treaty adopted on 20 December 1996, or
similar laws prohibiting or restricting circumvention of such
measures.
When you convey a covered work, you waive any legal power to forbid
circumvention of technological measures to the extent such circumvention
is effected by exercising rights under this License with respect to
the covered work, and you disclaim any intention to limit operation or
modification of the work as a means of enforcing, against the work's
users, your or third parties' legal rights to forbid circumvention of
technological measures.
4. Conveying Verbatim Copies.
You may convey verbatim copies of the Program's source code as you
receive it, in any medium, provided that you conspicuously and
appropriately publish on each copy an appropriate copyright notice;
keep intact all notices stating that this License and any
non-permissive terms added in accord with section 7 apply to the code;
keep intact all notices of the absence of any warranty; and give all
recipients a copy of this License along with the Program.
You may charge any price or no price for each copy that you convey,
and you may offer support or warranty protection for a fee.
5. Conveying Modified Source Versions.
You may convey a work based on the Program, or the modifications to
produce it from the Program, in the form of source code under the
terms of section 4, provided that you also meet all of these conditions:
a) The work must carry prominent notices stating that you modified
it, and giving a relevant date.
b) The work must carry prominent notices stating that it is
released under this License and any conditions added under section
7. This requirement modifies the requirement in section 4 to
"keep intact all notices".
c) You must license the entire work, as a whole, under this
License to anyone who comes into possession of a copy. This
License will therefore apply, along with any applicable section 7
additional terms, to the whole of the work, and all its parts,
regardless of how they are packaged. This License gives no
permission to license the work in any other way, but it does not
invalidate such permission if you have separately received it.
d) If the work has interactive user interfaces, each must display
Appropriate Legal Notices; however, if the Program has interactive
interfaces that do not display Appropriate Legal Notices, your
work need not make them do so.
A compilation of a covered work with other separate and independent
works, which are not by their nature extensions of the covered work,
and which are not combined with it such as to form a larger program,
in or on a volume of a storage or distribution medium, is called an
"aggregate" if the compilation and its resulting copyright are not
used to limit the access or legal rights of the compilation's users
beyond what the individual works permit. Inclusion of a covered work
in an aggregate does not cause this License to apply to the other
parts of the aggregate.
6. Conveying Non-Source Forms.
You may convey a covered work in object code form under the terms
of sections 4 and 5, provided that you also convey the
machine-readable Corresponding Source under the terms of this License,
in one of these ways:
a) Convey the object code in, or embodied in, a physical product
(including a physical distribution medium), accompanied by the
Corresponding Source fixed on a durable physical medium
customarily used for software interchange.
b) Convey the object code in, or embodied in, a physical product
(including a physical distribution medium), accompanied by a
written offer, valid for at least three years and valid for as
long as you offer spare parts or customer support for that product
model, to give anyone who possesses the object code either (1) a
copy of the Corresponding Source for all the software in the
product that is covered by this License, on a durable physical
medium customarily used for software interchange, for a price no
more than your reasonable cost of physically performing this
conveying of source, or (2) access to copy the
Corresponding Source from a network server at no charge.
c) Convey individual copies of the object code with a copy of the
written offer to provide the Corresponding Source. This
alternative is allowed only occasionally and noncommercially, and
only if you received the object code with such an offer, in accord
with subsection 6b.
d) Convey the object code by offering access from a designated
place (gratis or for a charge), and offer equivalent access to the
Corresponding Source in the same way through the same place at no
further charge. You need not require recipients to copy the
Corresponding Source along with the object code. If the place to
copy the object code is a network server, the Corresponding Source
may be on a different server (operated by you or a third party)
that supports equivalent copying facilities, provided you maintain
clear directions next to the object code saying where to find the
Corresponding Source. Regardless of what server hosts the
Corresponding Source, you remain obligated to ensure that it is
available for as long as needed to satisfy these requirements.
e) Convey the object code using peer-to-peer transmission, provided
you inform other peers where the object code and Corresponding
Source of the work are being offered to the general public at no
charge under subsection 6d.
A separable portion of the object code, whose source code is excluded
from the Corresponding Source as a System Library, need not be
included in conveying the object code work.
A "User Product" is either (1) a "consumer product", which means any
tangible personal property which is normally used for personal, family,
or household purposes, or (2) anything designed or sold for incorporation
into a dwelling. In determining whether a product is a consumer product,
doubtful cases shall be resolved in favor of coverage. For a particular
product received by a particular user, "normally used" refers to a
typical or common use of that class of product, regardless of the status
of the particular user or of the way in which the particular user
actually uses, or expects or is expected to use, the product. A product
is a consumer product regardless of whether the product has substantial
commercial, industrial or non-consumer uses, unless such uses represent
the only significant mode of use of the product.
"Installation Information" for a User Product means any methods,
procedures, authorization keys, or other information required to install
and execute modified versions of a covered work in that User Product from
a modified version of its Corresponding Source. The information must
suffice to ensure that the continued functioning of the modified object
code is in no case prevented or interfered with solely because
modification has been made.
If you convey an object code work under this section in, or with, or
specifically for use in, a User Product, and the conveying occurs as
part of a transaction in which the right of possession and use of the
User Product is transferred to the recipient in perpetuity or for a
fixed term (regardless of how the transaction is characterized), the
Corresponding Source conveyed under this section must be accompanied
by the Installation Information. But this requirement does not apply
if neither you nor any third party retains the ability to install
modified object code on the User Product (for example, the work has
been installed in ROM).
The requirement to provide Installation Information does not include a
requirement to continue to provide support service, warranty, or updates
for a work that has been modified or installed by the recipient, or for
the User Product in which it has been modified or installed. Access to a
network may be denied when the modification itself materially and
adversely affects the operation of the network or violates the rules and
protocols for communication across the network.
Corresponding Source conveyed, and Installation Information provided,
in accord with this section must be in a format that is publicly
documented (and with an implementation available to the public in
source code form), and must require no special password or key for
unpacking, reading or copying.
7. Additional Terms.
"Additional permissions" are terms that supplement the terms of this
License by making exceptions from one or more of its conditions.
Additional permissions that are applicable to the entire Program shall
be treated as though they were included in this License, to the extent
that they are valid under applicable law. If additional permissions
apply only to part of the Program, that part may be used separately
under those permissions, but the entire Program remains governed by
this License without regard to the additional permissions.
When you convey a copy of a covered work, you may at your option
remove any additional permissions from that copy, or from any part of
it. (Additional permissions may be written to require their own
removal in certain cases when you modify the work.) You may place
additional permissions on material, added by you to a covered work,
for which you have or can give appropriate copyright permission.
Notwithstanding any other provision of this License, for material you
add to a covered work, you may (if authorized by the copyright holders of
that material) supplement the terms of this License with terms:
a) Disclaiming warranty or limiting liability differently from the
terms of sections 15 and 16 of this License; or
b) Requiring preservation of specified reasonable legal notices or
author attributions in that material or in the Appropriate Legal
Notices displayed by works containing it; or
c) Prohibiting misrepresentation of the origin of that material, or
requiring that modified versions of such material be marked in
reasonable ways as different from the original version; or
d) Limiting the use for publicity purposes of names of licensors or
authors of the material; or
e) Declining to grant rights under trademark law for use of some
trade names, trademarks, or service marks; or
f) Requiring indemnification of licensors and authors of that
material by anyone who conveys the material (or modified versions of
it) with contractual assumptions of liability to the recipient, for
any liability that these contractual assumptions directly impose on
those licensors and authors.
All other non-permissive additional terms are considered "further
restrictions" within the meaning of section 10. If the Program as you
received it, or any part of it, contains a notice stating that it is
governed by this License along with a term that is a further
restriction, you may remove that term. If a license document contains
a further restriction but permits relicensing or conveying under this
License, you may add to a covered work material governed by the terms
of that license document, provided that the further restriction does
not survive such relicensing or conveying.
If you add terms to a covered work in accord with this section, you
must place, in the relevant source files, a statement of the
additional terms that apply to those files, or a notice indicating
where to find the applicable terms.
Additional terms, permissive or non-permissive, may be stated in the
form of a separately written license, or stated as exceptions;
the above requirements apply either way.
8. Termination.
You may not propagate or modify a covered work except as expressly
provided under this License. Any attempt otherwise to propagate or
modify it is void, and will automatically terminate your rights under
this License (including any patent licenses granted under the third
paragraph of section 11).
However, if you cease all violation of this License, then your
license from a particular copyright holder is reinstated (a)
provisionally, unless and until the copyright holder explicitly and
finally terminates your license, and (b) permanently, if the copyright
holder fails to notify you of the violation by some reasonable means
prior to 60 days after the cessation.
Moreover, your license from a particular copyright holder is
reinstated permanently if the copyright holder notifies you of the
violation by some reasonable means, this is the first time you have
received notice of violation of this License (for any work) from that
copyright holder, and you cure the violation prior to 30 days after
your receipt of the notice.
Termination of your rights under this section does not terminate the
licenses of parties who have received copies or rights from you under
this License. If your rights have been terminated and not permanently
reinstated, you do not qualify to receive new licenses for the same
material under section 10.
9. Acceptance Not Required for Having Copies.
You are not required to accept this License in order to receive or
run a copy of the Program. Ancillary propagation of a covered work
occurring solely as a consequence of using peer-to-peer transmission
to receive a copy likewise does not require acceptance. However,
nothing other than this License grants you permission to propagate or
modify any covered work. These actions infringe copyright if you do
not accept this License. Therefore, by modifying or propagating a
covered work, you indicate your acceptance of this License to do so.
10. Automatic Licensing of Downstream Recipients.
Each time you convey a covered work, the recipient automatically
receives a license from the original licensors, to run, modify and
propagate that work, subject to this License. You are not responsible
for enforcing compliance by third parties with this License.
An "entity transaction" is a transaction transferring control of an
organization, or substantially all assets of one, or subdividing an
organization, or merging organizations. If propagation of a covered
work results from an entity transaction, each party to that
transaction who receives a copy of the work also receives whatever
licenses to the work the party's predecessor in interest had or could
give under the previous paragraph, plus a right to possession of the
Corresponding Source of the work from the predecessor in interest, if
the predecessor has it or can get it with reasonable efforts.
You may not impose any further restrictions on the exercise of the
rights granted or affirmed under this License. For example, you may
not impose a license fee, royalty, or other charge for exercise of
rights granted under this License, and you may not initiate litigation
(including a cross-claim or counterclaim in a lawsuit) alleging that
any patent claim is infringed by making, using, selling, offering for
sale, or importing the Program or any portion of it.
11. Patents.
A "contributor" is a copyright holder who authorizes use under this
License of the Program or a work on which the Program is based. The
work thus licensed is called the contributor's "contributor version".
A contributor's "essential patent claims" are all patent claims
owned or controlled by the contributor, whether already acquired or
hereafter acquired, that would be infringed by some manner, permitted
by this License, of making, using, or selling its contributor version,
but do not include claims that would be infringed only as a
consequence of further modification of the contributor version. For
purposes of this definition, "control" includes the right to grant
patent sublicenses in a manner consistent with the requirements of
this License.
Each contributor grants you a non-exclusive, worldwide, royalty-free
patent license under the contributor's essential patent claims, to
make, use, sell, offer for sale, import and otherwise run, modify and
propagate the contents of its contributor version.
In the following three paragraphs, a "patent license" is any express
agreement or commitment, however denominated, not to enforce a patent
(such as an express permission to practice a patent or covenant not to
sue for patent infringement). To "grant" such a patent license to a
party means to make such an agreement or commitment not to enforce a
patent against the party.
If you convey a covered work, knowingly relying on a patent license,
and the Corresponding Source of the work is not available for anyone
to copy, free of charge and under the terms of this License, through a
publicly available network server or other readily accessible means,
then you must either (1) cause the Corresponding Source to be so
available, or (2) arrange to deprive yourself of the benefit of the
patent license for this particular work, or (3) arrange, in a manner
consistent with the requirements of this License, to extend the patent
license to downstream recipients. "Knowingly relying" means you have
actual knowledge that, but for the patent license, your conveying the
covered work in a country, or your recipient's use of the covered work
in a country, would infringe one or more identifiable patents in that
country that you have reason to believe are valid.
If, pursuant to or in connection with a single transaction or
arrangement, you convey, or propagate by procuring conveyance of, a
covered work, and grant a patent license to some of the parties
receiving the covered work authorizing them to use, propagate, modify
or convey a specific copy of the covered work, then the patent license
you grant is automatically extended to all recipients of the covered
work and works based on it.
A patent license is "discriminatory" if it does not include within
the scope of its coverage, prohibits the exercise of, or is
conditioned on the non-exercise of one or more of the rights that are
specifically granted under this License. You may not convey a covered
work if you are a party to an arrangement with a third party that is
in the business of distributing software, under which you make payment
to the third party based on the extent of your activity of conveying
the work, and under which the third party grants, to any of the
parties who would receive the covered work from you, a discriminatory
patent license (a) in connection with copies of the covered work
conveyed by you (or copies made from those copies), or (b) primarily
for and in connection with specific products or compilations that
contain the covered work, unless you entered into that arrangement,
or that patent license was granted, prior to 28 March 2007.
Nothing in this License shall be construed as excluding or limiting
any implied license or other defenses to infringement that may
otherwise be available to you under applicable patent law.
12. No Surrender of Others' Freedom.
If conditions are imposed on you (whether by court order, agreement or
otherwise) that contradict the conditions of this License, they do not
excuse you from the conditions of this License. If you cannot convey a
covered work so as to satisfy simultaneously your obligations under this
License and any other pertinent obligations, then as a consequence you may
not convey it at all. For example, if you agree to terms that obligate you
to collect a royalty for further conveying from those to whom you convey
the Program, the only way you could satisfy both those terms and this
License would be to refrain entirely from conveying the Program.
13. Remote Network Interaction; Use with the GNU General Public License.
Notwithstanding any other provision of this License, if you modify the
Program, your modified version must prominently offer all users
interacting with it remotely through a computer network (if your version
supports such interaction) an opportunity to receive the Corresponding
Source of your version by providing access to the Corresponding Source
from a network server at no charge, through some standard or customary
means of facilitating copying of software. This Corresponding Source
shall include the Corresponding Source for any work covered by version 3
of the GNU General Public License that is incorporated pursuant to the
following paragraph.
Notwithstanding any other provision of this License, you have
permission to link or combine any covered work with a work licensed
under version 3 of the GNU General Public License into a single
combined work, and to convey the resulting work. The terms of this
License will continue to apply to the part which is the covered work,
but the work with which it is combined will remain governed by version
3 of the GNU General Public License.
14. Revised Versions of this License.
The Free Software Foundation may publish revised and/or new versions of
the GNU Affero General Public License from time to time. Such new versions
will be similar in spirit to the present version, but may differ in detail to
address new problems or concerns.
Each version is given a distinguishing version number. If the
Program specifies that a certain numbered version of the GNU Affero General
Public License "or any later version" applies to it, you have the
option of following the terms and conditions either of that numbered
version or of any later version published by the Free Software
Foundation. If the Program does not specify a version number of the
GNU Affero General Public License, you may choose any version ever published
by the Free Software Foundation.
If the Program specifies that a proxy can decide which future
versions of the GNU Affero General Public License can be used, that proxy's
public statement of acceptance of a version permanently authorizes you
to choose that version for the Program.
Later license versions may give you additional or different
permissions. However, no additional obligations are imposed on any
author or copyright holder as a result of your choosing to follow a
later version.
15. Disclaimer of Warranty.
THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
16. Limitation of Liability.
IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
SUCH DAMAGES.
17. Interpretation of Sections 15 and 16.
If the disclaimer of warranty and limitation of liability provided
above cannot be given local legal effect according to their terms,
reviewing courts shall apply local law that most closely approximates
an absolute waiver of all civil liability in connection with the
Program, unless a warranty or assumption of liability accompanies a
copy of the Program in return for a fee.
END OF TERMS AND CONDITIONS
How to Apply These Terms to Your New Programs
If you develop a new program, and you want it to be of the greatest
possible use to the public, the best way to achieve this is to make it
free software which everyone can redistribute and change under these terms.
To do so, attach the following notices to the program. It is safest
to attach them to the start of each source file to most effectively
state the exclusion of warranty; and each file should have at least
the "copyright" line and a pointer to where the full notice is found.
<one line to give the program's name and a brief idea of what it does.>
Copyright (C) <year> <name of author>
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with this program. If not, see <https://www.gnu.org/licenses/>.
Also add information on how to contact you by electronic and paper mail.
If your software can interact with users remotely through a computer
network, you should also make sure that it provides a way for users to
get its source. For example, if your program is a web application, its
interface could display a "Source" link that leads users to an archive
of the code. There are many ways you could offer source, and different
solutions will be better for different programs; see section 13 for the
specific requirements.
You should also get your employer (if you work as a programmer) or school,
if any, to sign a "copyright disclaimer" for the program, if necessary.
For more information on this, and how to apply and follow the GNU AGPL, see
<https://www.gnu.org/licenses/>.

View File

@@ -1,199 +0,0 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to the Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by the Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding any notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. Please also get an
OpenPGP-compatible signature on any file you distribute.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

View File

@@ -1,21 +0,0 @@
MIT License
Copyright (c) quicproquo contributors
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

354
README.md
View File

@@ -1,274 +1,124 @@
<p align="center">
<img src="assets/logo.png" alt="quicprochat" width="160">
</p>
# quicnprotochat
<h1 align="center">quicprochat</h1>
> End-to-end encrypted group messaging over **QUIC + TLS 1.3 + MLS** (RFC 9420), written in Rust.
<p align="center">
<strong>End-to-end encrypted group messaging over QUIC, powered by MLS and post-quantum cryptography.</strong>
</p>
<p align="center">
<a href="docs/src/design-rationale/messenger-comparison.md">Why quicprochat?</a> &middot;
<a href="ROADMAP.md">Roadmap</a> &middot;
<a href="docs/sdk/index.md">SDK Docs</a> &middot;
<a href="docs/operations/monitoring.md">Operations</a> &middot;
<a href="#quick-start">Quick Start</a>
</p>
---
quicprochat is a production-grade messenger where the server **never sees plaintext**. All traffic flows over QUIC/TLS 1.3, group keys are negotiated with the [MLS protocol](https://www.rfc-editor.org/rfc/rfc9420) (RFC 9420), and a hybrid X25519 + ML-KEM-768 KEM provides post-quantum confidentiality. Written in Rust. 45,000 lines of code. 301 tests.
Every byte on the wire is protected by a QUIC transport secured with TLS 1.3
(`quinn` + `rustls`). The inner **MLS** layer provides post-compromise security
and ratcheted group key agreement across any number of participants. Messages
are framed with **Cap'n Proto**, keeping serialisation zero-copy and
schema-versioned.
```
┌─────────────────────────────────────────────────
│ Application / MLS ciphertext group key ratchet (RFC 9420)
├─────────────────────────────────────────────────
Protobuf RPC / Cap'n Proto (legacy) typed, schema-versioned framing
├─────────────────────────────────────────────────
QUIC + TLS 1.3 (quinn/rustls) mutual auth + transport secrecy
└─────────────────────────────────────────────────
┌─────────────────────────────────────────────┐
Application / MLS ciphertext │ <- group key ratchet (RFC 9420)
├─────────────────────────────────────────────┤
Cap'n Proto RPC <- typed, schema-versioned framing
├─────────────────────────────────────────────┤
│ QUIC + TLS 1.3 (quinn/rustls) │ <- mutual auth + transport secrecy
└─────────────────────────────────────────────┘
```
## Highlights
| | |
|---|---|
| **Zero-knowledge server** | Routes opaque MLS ciphertexts by recipient key — never decrypts |
| **Post-quantum ready** | Hybrid X25519 + ML-KEM-768 KEM on both MLS and Noise layers |
| **Password auth** | OPAQUE — password never leaves the client, not even as a hash |
| **Forward secrecy** | MLS epoch ratchet: compromise today can't decrypt yesterday |
| **Multi-device** | Per-device keys, delivery fan-out, up to 5 devices per account |
| **Federation** | Server-to-server relay over QUIC with mTLS |
| **Offline-first** | Client-side outbox with idempotent retry and gap detection |
| **Sealed sender** | Optional anonymous enqueue — server can't see who sent a message |
| **7 SDKs** | Rust, Go, Python, TypeScript, Swift, Kotlin/Java, Ruby |
## Quick Start
```bash
# Build (no system dependencies — protoc is vendored)
cargo build --workspace
# Run tests
cargo test --workspace
# Start the server (auto-generates self-signed TLS cert)
cargo run --bin qpc-server -- --allow-insecure-auth
# Interactive REPL (registers + logs in automatically)
cargo run --bin qpc -- repl --username alice --password secret
```
**Two-terminal demo:**
```bash
# Terminal 1 # Terminal 2
qpc repl -u alice -p secretA qpc repl -u bob -p secretB
# Alice: # Bob sees:
/dm bob [alice] Hello, Bob!
Hello, Bob!
```
## Architecture
```
quicprochat/
├── crates/
│ ├── quicprochat-core # MLS, hybrid KEM, PQ Noise, OPAQUE, recovery, padding
│ ├── quicprochat-proto # Protobuf (prost) + Cap'n Proto generated types
│ ├── quicprochat-rpc # QUIC RPC framework (framing, dispatch, middleware)
│ ├── quicprochat-sdk # Client SDK (QpqClient, conversation store, outbox)
│ ├── quicprochat-server # QUIC server, 33 RPC methods, domain services, plugins
│ ├── quicprochat-client # CLI + REPL + TUI (Ratatui)
│ ├── quicprochat-kt # Key transparency (Merkle-log, revocation)
│ ├── quicprochat-p2p # iroh P2P, mesh identity, store-and-forward
│ ├── quicprochat-ffi # C FFI (libquicprochat_ffi.so)
│ └── quicprochat-plugin-api # Dynamic plugin hooks (C ABI)
├── proto/qpc/v1/ # 15 .proto schema files
├── sdks/ # Go, Python, TypeScript, Swift, Kotlin, Java, Ruby
├── docs/ # mdBook docs, SDK guides, operational runbooks
└── packaging/ # OpenWrt, Docker, cross-compilation
```
### Security Properties
| Property | Mechanism |
|---|---|
| Transport confidentiality | TLS 1.3 over QUIC (rustls) |
| Transport authentication | TLS 1.3 server cert (self-signed by default) |
| Group key agreement | MLS `MLS_128_DHKEMX25519_AES128GCM_SHA256_Ed25519` |
| Post-quantum confidentiality | X25519 + ML-KEM-768 hybrid KEM (MLS + Noise layers) |
| Forward secrecy | MLS epoch ratchet + per-epoch key schedule |
| Post-compromise security | MLS Update proposals rotate leaf material |
| Identity | Ed25519 long-term keys (MLS credential + leaf signature) |
| Password authentication | OPAQUE-ke (augmented PAKE, no password on wire) |
| Local storage | SQLCipher + Argon2id + ChaCha20-Poly1305 |
| Key transparency | Append-only Merkle log with inclusion proofs + revocation |
| Traffic analysis resistance | Uniform padding + configurable decoy traffic |
| Post-compromise security | MLS epoch ratchet |
| Identity | Ed25519 (MLS credential + leaf node signature) |
| Message framing | Cap'n Proto (unpacked wire format) |
## Features
### Messaging
- **1:1 DMs** and **N-party groups** with full MLS lifecycle (add, remove, key rotation)
- **Rich messaging** — reactions, read receipts, typing indicators, edit, delete
- **File transfer** — chunked upload/download, SHA-256 content addressing, 50 MB limit
- **Disappearing messages** — per-conversation TTL with server-side GC
- **Offline queue** — messages queued locally when disconnected, flushed on reconnect
- **Delivery proofs** — server-signed Ed25519 receipts for cryptographic send confirmation
- **Transcript export** — encrypted, tamper-evident archives with Merkle chain verification
### Infrastructure
- **Multi-device** — per-device keys and delivery fan-out (up to 5 devices)
- **Account recovery** — 8 recovery codes, encrypted bundles, zero-knowledge server
- **Federation** — server-to-server relay with mTLS and cross-server user resolution
- **Abuse prevention** — user blocking, message reporting, ban enforcement, admin tools
- **Graceful shutdown** — SIGTERM drain with configurable timeout, health endpoint awareness
- **Rate limiting** — sliding window algorithm, trait-based for Redis swap
- **Observability** — request correlation IDs, per-endpoint latency histograms, structured audit log
- **Dynamic plugins** — load `.so`/`.dylib` at runtime via `--plugin-dir` (6 hook points)
- **Mesh networking** — iroh P2P, mDNS discovery, store-and-forward, broadcast channels
### Mesh & P2P Features
The `quicprochat-p2p` crate provides a full **serverless mesh networking stack**:
| Feature | Module | Description |
|---------|--------|-------------|
| **P2P Transport** | `P2pNode` | Direct QUIC connections via iroh with NAT traversal |
| **Mesh Identity** | `MeshIdentity` | Ed25519 keypairs with 16-byte truncated addresses |
| **Mesh Envelope** | `MeshEnvelope` | Encrypted, signed, TTL-aware message containers |
| **Store-and-Forward** | `MeshStore` | Queue messages for offline recipients |
| **Multi-Hop Routing** | `MeshRouter` | Distributed routing table, forward through intermediaries |
| **Announce Protocol** | `MeshAnnounce` | Signed peer discovery with capability flags |
| **Broadcast Channels** | `BroadcastManager` | Pub/sub with symmetric key encryption |
| **Transport Abstraction** | `TransportManager` | Iroh, TCP, LoRa — route by address type |
| **LoRa Transport** | `transport_lora` | Duty-cycle aware, fragmentation, SF12 support |
| **MLS-Lite** | `mls_lite` | Lightweight symmetric mode for constrained links |
| **FAPP** | `fapp` + `fapp_router` | Free Appointment Propagation Protocol (see below) |
#### FAPP — Decentralized Appointment Discovery
**Problem:** In Germany, finding a psychotherapist takes 36 months due to artificial slot visibility limits.
**Solution:** FAPP lets licensed therapists announce free slots into the mesh. Patients discover and reserve slots anonymously — no central registry.
```rust
// Therapist publishes slots
let announce = SlotAnnounce::new(
&therapist_identity,
vec![Fachrichtung::Verhaltenstherapie],
vec![Modalitaet::Praxis, Modalitaet::Video],
vec![Kostentraeger::GKV],
"80331", // PLZ only, never exact address
slots,
approbation_hash,
sequence,
);
fapp_router.broadcast_announce(announce)?;
// Patient queries anonymously
let query = SlotQuery {
fachrichtung: Some(Fachrichtung::Verhaltenstherapie),
plz_prefix: Some("803".into()),
kostentraeger: Some(Kostentraeger::GKV),
..Default::default()
};
fapp_router.send_query(query)?;
```
**Privacy model:**
- Therapist identity is **public** (bound to Approbation hash)
- Patient queries are **anonymous** (no identifying information)
- Reservations use **E2E encryption** to therapist's key
See [`docs/specs/fapp-protocol.md`](docs/specs/fapp-protocol.md) for the full protocol spec.
### Client SDKs
| Language | Location | Transport | Notes |
|---|---|---|---|
| **Rust** | `crates/quicprochat-sdk` | QUIC (quinn) | Reference implementation |
| **Go** | `sdks/go/` | QUIC (quic-go) | Cap'n Proto RPC, full API |
| **Python** | `sdks/python/` | QUIC (aioquic) + FFI | Async client, PyPI-ready |
| **TypeScript** | `sdks/typescript/` | WebSocket + WASM crypto | 175 KB WASM bundle, browser demo |
| **Swift** | `sdks/swift/` | C FFI wrapper | iOS 15+ / macOS 13+ |
| **Kotlin/Java** | `sdks/kotlin/`, `sdks/java/` | JNI + C FFI | Android + JVM |
| **Ruby** | `sdks/ruby/` | C FFI gem | Block-form auto-disconnect |
### REPL Commands
40+ slash commands including:
```
/dm <user> Start a 1:1 DM
/create-group <name> Create a group
/invite <user> Add member to group
/remove <user> Remove member
/send-file <path> Upload and send a file
/verify <user> Compare safety numbers
/rotate-keys Rotate MLS key material
/disappear <duration> Set message TTL
/export <path> Export encrypted transcript
/devices list|add|rm Manage devices
/block <user> Block a user
/recovery setup Generate recovery codes
/help Full command reference
```
## Deployment
### Docker
```bash
docker build -t quicprochat -f docker/Dockerfile .
docker run -p 7000:7000 -v qpc-data:/data quicprochat
```
### Production (Docker Compose)
```bash
# Includes server + Prometheus + Grafana with pre-configured dashboards
docker compose -f docker-compose.prod.yml up -d
```
### OpenWrt
Cross-compiled static binaries for mesh/embedded deployments:
```bash
./scripts/cross-compile.sh # builds for x86_64, armv7, aarch64 (musl)
```
See [docs/openwrt.md](docs/openwrt.md) for `opkg` packaging and `procd` init scripts.
### Configuration
```bash
# Environment variables (see .env.example for full list)
QPC_LISTEN=0.0.0.0:7000
QPC_AUTH_TOKEN=your-strong-token
QPC_DB_KEY=your-db-encryption-key
QPC_STORE_BACKEND=sql
QPC_METRICS_LISTEN=0.0.0.0:9090
QPC_DRAIN_TIMEOUT=30
QPC_RPC_TIMEOUT=30
```
---
## Documentation
Full documentation is available as an **mdBook** wiki in [`docs/`](docs/):
```bash
mdbook serve docs # http://localhost:3000
# Install mdBook (once)
cargo install mdbook
# Build and serve locally
mdbook serve docs
# Open http://localhost:3000
```
- [SDK Integration Guide](docs/sdk/index.md) — wire format, per-language guides, "build your own SDK"
- [Operational Runbook](docs/operations/backup-restore.md) — backup/restore, key rotation, incident response
- [Scaling Guide](docs/operations/scaling-guide.md) — resource sizing, horizontal scaling, capacity planning
- [Monitoring](docs/operations/monitoring.md) — Prometheus metrics, Grafana dashboards, alert rules
### Highlights
## Security Status
- **[Architecture Overview](docs/src/architecture/overview.md)** — Two-service model, dual-key design, crate layout
- **[Protocol Deep Dives](docs/src/protocol-layers/overview.md)** — QUIC/TLS 1.3, Cap'n Proto, MLS, Hybrid KEM
- **[Cryptographic Properties](docs/src/cryptography/overview.md)** — Forward secrecy, post-compromise security, PQ readiness, threat model
- **[Design Rationale](docs/src/design-rationale/overview.md)** — Why MLS over Signal/Matrix, ADRs for all key decisions
- **[Wire Format Reference](docs/src/wire-format/overview.md)** — Annotated Cap'n Proto schemas
- **[Getting Started](docs/src/getting-started/prerequisites.md)** — Build, run, demo walkthrough
- **[Roadmap](docs/src/roadmap/milestones.md)** — Milestones, production readiness, future research
> **This software has not undergone an independent security audit.** While it implements cryptographic best practices (MLS RFC 9420, OPAQUE, zeroization, constant-time comparisons), no third-party firm has reviewed the implementation. Do not rely on it for high-risk communications until an audit is completed. See [SECURITY.md](SECURITY.md) for our vulnerability disclosure policy.
---
## Quick start
```bash
# Prerequisites: Rust 1.77+, capnp CLI
brew install capnp # macOS
# apt-get install capnproto # Debian/Ubuntu
# GUI prerequisites (Linux only) — WebKitGTK + GTK3 for Tauri 2
# sudo apt install -y libwebkit2gtk-4.1-dev libgtk-3-dev libglib2.0-dev libssl-dev libayatana-appindicator3-dev librsvg2-dev patchelf
# Build and test
cargo build --workspace
cargo test --workspace
# Start the server (port 7000 by default)
cargo run -p quicnprotochat-server
# Or via a config file (TOML)
cat > quicnprotochat-server.toml <<'EOF'
listen = "0.0.0.0:7000"
data_dir = "data"
tls_cert = "data/server-cert.der"
tls_key = "data/server-key.der"
auth_token = "devtoken"
store_backend = "file" # or "sql"
db_path = "data/quicnprotochat.db"
db_key = ""
EOF
cargo run -p quicnprotochat-server -- --config quicnprotochat-server.toml
# Run the two-party demo
cargo run -p quicnprotochat-client -- demo-group \
--server 127.0.0.1:7000
# Interactive 1:1 chat (after creating a group and inviting a peer)
# Terminal 1: quicnprotochat chat --peer-key <other_identity_hex>
# Terminal 2: quicnprotochat chat --peer-key <first_identity_hex>
# Type messages and press Enter; incoming messages appear as [peer] <msg>. Ctrl+D to exit.
```
See the [full demo walkthrough](docs/src/getting-started/demo-walkthrough.md) for a step-by-step guide.
---
## Milestones
| # | Name | Status | What it adds |
|---|------|--------|--------------|
| M1 | QUIC/TLS transport | Done | QUIC + TLS 1.3 endpoint, length-prefixed framing, Ping/Pong |
| M2 | Authentication Service | Done | Ed25519 identity, KeyPackage generation, AS upload/fetch |
| M3 | Delivery Service + MLS groups | Done | DS relay, `GroupMember` create/join/add/send/recv |
| M4 | Group CLI subcommands | Done | Persistent CLI (`create-group`, `invite`, `join`, `send`, `recv`), OPAQUE login |
| M5 | Multi-party groups | Done | N > 2 members, Commit fan-out, send --all, epoch sync |
| M6 | Persistence | Done | SQLite/SQLCipher, migrations, durable server + client state |
| M7 | Post-quantum | Next | PQ hybrid for MLS/HPKE (X25519 + ML-KEM-768) |
---
## Security notes
This is a **proof-of-concept research project**. It has not been audited.
See the [threat model](docs/src/cryptography/threat-model.md) for a detailed
analysis of what is and isn't protected.
---
## License

View File

@@ -1,891 +0,0 @@
<!DOCTYPE HTML>
<html lang="en" class="navy sidebar-visible" dir="ltr">
<head>
<!-- Book generated using mdBook -->
<meta charset="UTF-8">
<title>Full Roadmap (Phases 1-8) - quicproquo</title>
<!-- Custom HTML head -->
<meta name="description" content="End-to-end encrypted group messaging over QUIC + TLS 1.3 + MLS (RFC 9420)">
<meta name="viewport" content="width=device-width, initial-scale=1">
<meta name="theme-color" content="#ffffff">
<link rel="icon" href="favicon-de23e50b.svg">
<link rel="shortcut icon" href="favicon-8114d1fc.png">
<link rel="stylesheet" href="css/variables-8adf115d.css">
<link rel="stylesheet" href="css/general-2459343d.css">
<link rel="stylesheet" href="css/chrome-ae938929.css">
<link rel="stylesheet" href="css/print-9e4910d8.css" media="print">
<!-- Fonts -->
<link rel="stylesheet" href="fonts/fonts-9644e21d.css">
<!-- Highlight.js Stylesheets -->
<link rel="stylesheet" id="mdbook-highlight-css" href="highlight-493f70e1.css">
<link rel="stylesheet" id="mdbook-tomorrow-night-css" href="tomorrow-night-4c0ae647.css">
<link rel="stylesheet" id="mdbook-ayu-highlight-css" href="ayu-highlight-3fdfc3ac.css">
<!-- Custom theme stylesheets -->
<!-- Provide site root and default themes to javascript -->
<script>
const path_to_root = "";
const default_light_theme = "navy";
const default_dark_theme = "navy";
window.path_to_searchindex_js = "searchindex-1e4ee6e2.js";
</script>
<!-- Start loading toc.js asap -->
<script src="toc-69b0eb95.js"></script>
</head>
<body>
<div id="mdbook-help-container">
<div id="mdbook-help-popup">
<h2 class="mdbook-help-title">Keyboard shortcuts</h2>
<div>
<p>Press <kbd></kbd> or <kbd></kbd> to navigate between chapters</p>
<p>Press <kbd>S</kbd> or <kbd>/</kbd> to search in the book</p>
<p>Press <kbd>?</kbd> to show this help</p>
<p>Press <kbd>Esc</kbd> to hide this help</p>
</div>
</div>
</div>
<div id="mdbook-body-container">
<!-- Work around some values being stored in localStorage wrapped in quotes -->
<script>
try {
let theme = localStorage.getItem('mdbook-theme');
let sidebar = localStorage.getItem('mdbook-sidebar');
if (theme.startsWith('"') && theme.endsWith('"')) {
localStorage.setItem('mdbook-theme', theme.slice(1, theme.length - 1));
}
if (sidebar.startsWith('"') && sidebar.endsWith('"')) {
localStorage.setItem('mdbook-sidebar', sidebar.slice(1, sidebar.length - 1));
}
} catch (e) { }
</script>
<!-- Set the theme before any content is loaded, prevents flash -->
<script>
const default_theme = window.matchMedia("(prefers-color-scheme: dark)").matches ? default_dark_theme : default_light_theme;
let theme;
try { theme = localStorage.getItem('mdbook-theme'); } catch(e) { }
if (theme === null || theme === undefined) { theme = default_theme; }
const html = document.documentElement;
html.classList.remove('navy')
html.classList.add(theme);
html.classList.add("js");
</script>
<input type="checkbox" id="mdbook-sidebar-toggle-anchor" class="hidden">
<!-- Hide / unhide sidebar before it is displayed -->
<script>
let sidebar = null;
const sidebar_toggle = document.getElementById("mdbook-sidebar-toggle-anchor");
if (document.body.clientWidth >= 1080) {
try { sidebar = localStorage.getItem('mdbook-sidebar'); } catch(e) { }
sidebar = sidebar || 'visible';
} else {
sidebar = 'hidden';
sidebar_toggle.checked = false;
}
if (sidebar === 'visible') {
sidebar_toggle.checked = true;
} else {
html.classList.remove('sidebar-visible');
}
</script>
<nav id="mdbook-sidebar" class="sidebar" aria-label="Table of contents">
<!-- populated by js -->
<mdbook-sidebar-scrollbox class="sidebar-scrollbox"></mdbook-sidebar-scrollbox>
<noscript>
<iframe class="sidebar-iframe-outer" src="toc.html"></iframe>
</noscript>
<div id="mdbook-sidebar-resize-handle" class="sidebar-resize-handle">
<div class="sidebar-resize-indicator"></div>
</div>
</nav>
<div id="mdbook-page-wrapper" class="page-wrapper">
<div class="page">
<div id="mdbook-menu-bar-hover-placeholder"></div>
<div id="mdbook-menu-bar" class="menu-bar sticky">
<div class="left-buttons">
<label id="mdbook-sidebar-toggle" class="icon-button" for="mdbook-sidebar-toggle-anchor" title="Toggle Table of Contents" aria-label="Toggle Table of Contents" aria-controls="mdbook-sidebar">
<span class=fa-svg><svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 448 512"><!--! Font Awesome Free 6.2.0 by @fontawesome - https://fontawesome.com License - https://fontawesome.com/license/free (Icons: CC BY 4.0, Fonts: SIL OFL 1.1, Code: MIT License) Copyright 2022 Fonticons, Inc. --><path d="M0 96C0 78.3 14.3 64 32 64H416c17.7 0 32 14.3 32 32s-14.3 32-32 32H32C14.3 128 0 113.7 0 96zM0 256c0-17.7 14.3-32 32-32H416c17.7 0 32 14.3 32 32s-14.3 32-32 32H32c-17.7 0-32-14.3-32-32zM448 416c0 17.7-14.3 32-32 32H32c-17.7 0-32-14.3-32-32s14.3-32 32-32H416c17.7 0 32 14.3 32 32z"/></svg></span>
</label>
<button id="mdbook-theme-toggle" class="icon-button" type="button" title="Change theme" aria-label="Change theme" aria-haspopup="true" aria-expanded="false" aria-controls="mdbook-theme-list">
<span class=fa-svg><svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 576 512"><!--! Font Awesome Free 6.2.0 by @fontawesome - https://fontawesome.com License - https://fontawesome.com/license/free (Icons: CC BY 4.0, Fonts: SIL OFL 1.1, Code: MIT License) Copyright 2022 Fonticons, Inc. --><path d="M371.3 367.1c27.3-3.9 51.9-19.4 67.2-42.9L600.2 74.1c12.6-19.5 9.4-45.3-7.6-61.2S549.7-4.4 531.1 9.6L294.4 187.2c-24 18-38.2 46.1-38.4 76.1L371.3 367.1zm-19.6 25.4l-116-104.4C175.9 290.3 128 339.6 128 400c0 3.9 .2 7.8 .6 11.6c1.8 17.5-10.2 36.4-27.8 36.4H96c-17.7 0-32 14.3-32 32s14.3 32 32 32H240c61.9 0 112-50.1 112-112c0-2.5-.1-5-.2-7.5z"/></svg></span>
</button>
<ul id="mdbook-theme-list" class="theme-popup" aria-label="Themes" role="menu">
<li role="none"><button role="menuitem" class="theme" id="mdbook-theme-default_theme">Auto</button></li>
<li role="none"><button role="menuitem" class="theme" id="mdbook-theme-light">Light</button></li>
<li role="none"><button role="menuitem" class="theme" id="mdbook-theme-rust">Rust</button></li>
<li role="none"><button role="menuitem" class="theme" id="mdbook-theme-coal">Coal</button></li>
<li role="none"><button role="menuitem" class="theme" id="mdbook-theme-navy">Navy</button></li>
<li role="none"><button role="menuitem" class="theme" id="mdbook-theme-ayu">Ayu</button></li>
</ul>
<button id="mdbook-search-toggle" class="icon-button" type="button" title="Search (`/`)" aria-label="Toggle Searchbar" aria-expanded="false" aria-keyshortcuts="/ s" aria-controls="mdbook-searchbar">
<span class=fa-svg><svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 512 512"><!--! Font Awesome Free 6.2.0 by @fontawesome - https://fontawesome.com License - https://fontawesome.com/license/free (Icons: CC BY 4.0, Fonts: SIL OFL 1.1, Code: MIT License) Copyright 2022 Fonticons, Inc. --><path d="M416 208c0 45.9-14.9 88.3-40 122.7L502.6 457.4c12.5 12.5 12.5 32.8 0 45.3s-32.8 12.5-45.3 0L330.7 376c-34.4 25.2-76.8 40-122.7 40C93.1 416 0 322.9 0 208S93.1 0 208 0S416 93.1 416 208zM208 352c79.5 0 144-64.5 144-144s-64.5-144-144-144S64 128.5 64 208s64.5 144 144 144z"/></svg></span>
</button>
</div>
<h1 class="menu-title">quicproquo</h1>
<div class="right-buttons">
<a href="print.html" title="Print this book" aria-label="Print this book">
<span class=fa-svg id="print-button"><svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 512 512"><!--! Font Awesome Free 6.2.0 by @fontawesome - https://fontawesome.com License - https://fontawesome.com/license/free (Icons: CC BY 4.0, Fonts: SIL OFL 1.1, Code: MIT License) Copyright 2022 Fonticons, Inc. --><path d="M128 0C92.7 0 64 28.7 64 64v96h64V64H354.7L384 93.3V160h64V93.3c0-17-6.7-33.3-18.7-45.3L400 18.7C388 6.7 371.7 0 354.7 0H128zM384 352v32 64H128V384 368 352H384zm64 32h32c17.7 0 32-14.3 32-32V256c0-35.3-28.7-64-64-64H64c-35.3 0-64 28.7-64 64v96c0 17.7 14.3 32 32 32H64v64c0 35.3 28.7 64 64 64H384c35.3 0 64-28.7 64-64V384zm-16-88c-13.3 0-24-10.7-24-24s10.7-24 24-24s24 10.7 24 24s-10.7 24-24 24z"/></svg></span>
</a>
</div>
</div>
<div id="mdbook-search-wrapper" class="hidden">
<form id="mdbook-searchbar-outer" class="searchbar-outer">
<div class="search-wrapper">
<input type="search" id="mdbook-searchbar" name="searchbar" placeholder="Search this book ..." aria-controls="mdbook-searchresults-outer" aria-describedby="searchresults-header">
<div class="spinner-wrapper">
<span class=fa-svg id="fa-spin"><svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 512 512"><!--! Font Awesome Free 6.2.0 by @fontawesome - https://fontawesome.com License - https://fontawesome.com/license/free (Icons: CC BY 4.0, Fonts: SIL OFL 1.1, Code: MIT License) Copyright 2022 Fonticons, Inc. --><path d="M304 48c0-26.5-21.5-48-48-48s-48 21.5-48 48s21.5 48 48 48s48-21.5 48-48zm0 416c0-26.5-21.5-48-48-48s-48 21.5-48 48s21.5 48 48 48s48-21.5 48-48zM48 304c26.5 0 48-21.5 48-48s-21.5-48-48-48s-48 21.5-48 48s21.5 48 48 48zm464-48c0-26.5-21.5-48-48-48s-48 21.5-48 48s21.5 48 48 48s48-21.5 48-48zM142.9 437c18.7-18.7 18.7-49.1 0-67.9s-49.1-18.7-67.9 0s-18.7 49.1 0 67.9s49.1 18.7 67.9 0zm0-294.2c18.7-18.7 18.7-49.1 0-67.9S93.7 56.2 75 75s-18.7 49.1 0 67.9s49.1 18.7 67.9 0zM369.1 437c18.7 18.7 49.1 18.7 67.9 0s18.7-49.1 0-67.9s-49.1-18.7-67.9 0s-18.7 49.1 0 67.9z"/></svg></span>
</div>
</div>
</form>
<div id="mdbook-searchresults-outer" class="searchresults-outer hidden">
<div id="mdbook-searchresults-header" class="searchresults-header"></div>
<ul id="mdbook-searchresults">
</ul>
</div>
</div>
<!-- Apply ARIA attributes after the sidebar and the sidebar toggle button are added to the DOM -->
<script>
document.getElementById('mdbook-sidebar-toggle').setAttribute('aria-expanded', sidebar === 'visible');
document.getElementById('mdbook-sidebar').setAttribute('aria-hidden', sidebar !== 'visible');
Array.from(document.querySelectorAll('#mdbook-sidebar a')).forEach(function(link) {
link.setAttribute('tabIndex', sidebar === 'visible' ? 0 : -1);
});
</script>
<div id="mdbook-content" class="content">
<main>
<h1 id="roadmap--quicproquo"><a class="header" href="#roadmap--quicproquo">Roadmap — quicproquo</a></h1>
<blockquote>
<p>From proof-of-concept to production-grade E2E encrypted messaging.</p>
<p>Each phase is designed to be tackled sequentially. Items within a phase
can be parallelised. Check the box when done.</p>
</blockquote>
<hr>
<h2 id="phase-1--production-hardening-critical"><a class="header" href="#phase-1--production-hardening-critical">Phase 1 — Production Hardening (Critical)</a></h2>
<p>Eliminate all crash paths, enforce secure defaults, fix deployment blockers.</p>
<ul>
<li>
<p><input disabled="" type="checkbox" checked=""> <strong>1.1 Remove <code>.unwrap()</code> / <code>.expect()</code> from production paths</strong></p>
<ul>
<li>Replace <code>AUTH_CONTEXT.read().expect()</code> in client RPC with proper <code>Result</code></li>
<li>Replace <code>"0.0.0.0:0".parse().unwrap()</code> in client with fallible parse</li>
<li>Replace <code>Mutex::lock().unwrap()</code> in server storage with <code>.map_err()</code></li>
<li>Audit: <code>grep -rn 'unwrap()\|expect(' crates/</code> outside <code>#[cfg(test)]</code></li>
</ul>
</li>
<li>
<p><input disabled="" type="checkbox" checked=""> <strong>1.2 Enforce secure defaults in production mode</strong></p>
<ul>
<li>Reject startup if <code>QPQ_PRODUCTION=true</code> and <code>auth_token</code> is empty or <code>"devtoken"</code></li>
<li>Require non-empty <code>db_key</code> when using SQL backend in production</li>
<li>Refuse to auto-generate TLS certs in production mode (require existing cert+key)</li>
<li>Already partially implemented — verify and harden the validation in <code>config.rs</code></li>
</ul>
</li>
<li>
<p><input disabled="" type="checkbox" checked=""> <strong>1.3 Fix <code>.gitignore</code></strong></p>
<ul>
<li>Add <code>data/</code>, <code>*.der</code>, <code>*.pem</code>, <code>*.db</code>, <code>*.bin</code> (state files), <code>*.ks</code> (keystores)</li>
<li>Verify no secrets are already tracked: <code>git ls-files data/ *.der *.db</code></li>
</ul>
</li>
<li>
<p><input disabled="" type="checkbox" checked=""> <strong>1.4 Fix Dockerfile</strong></p>
<ul>
<li>Sync workspace members (handle excluded <code>p2p</code> crate)</li>
<li>Create dedicated user/group instead of <code>nobody</code></li>
<li>Set writable <code>QPQ_DATA_DIR</code> with correct permissions</li>
<li>Test: <code>docker build . &amp;&amp; docker run --rm -it qpq-server --help</code></li>
</ul>
</li>
<li>
<p><input disabled="" type="checkbox" checked=""> <strong>1.5 TLS certificate lifecycle</strong></p>
<ul>
<li>Document CA-signed cert setup (Lets Encrypt / custom CA)</li>
<li>Add <code>--tls-required</code> flag that refuses to start without valid cert</li>
<li>Log clear warning when using self-signed certs</li>
<li>Document certificate rotation procedure</li>
</ul>
</li>
</ul>
<hr>
<h2 id="phase-2--test--ci-maturity"><a class="header" href="#phase-2--test--ci-maturity">Phase 2 — Test &amp; CI Maturity</a></h2>
<p>Build confidence before adding features.</p>
<ul>
<li>
<p><input disabled="" type="checkbox" checked=""> <strong>2.1 Expand E2E test coverage</strong></p>
<ul>
<li>Auth failure scenarios (wrong password, expired token, invalid token)</li>
<li>Message ordering verification (send N messages, verify seq numbers)</li>
<li>Concurrent clients (3+ members in group, simultaneous send/recv)</li>
<li>OPAQUE registration + login full flow</li>
<li>Queue full behavior (&gt;1000 messages)</li>
<li>Rate limiting behavior (&gt;100 enqueues/minute)</li>
<li>Reconnection after server restart</li>
<li>KeyPackage exhaustion (fetch when none available)</li>
</ul>
</li>
<li>
<p><input disabled="" type="checkbox" checked=""> <strong>2.2 Add unit tests for untested paths</strong></p>
<ul>
<li>Client retry logic (exponential backoff, jitter, retriable classification)</li>
<li>REPL input parsing edge cases (empty input, special characters, <code>/</code> commands)</li>
<li>State file encryption/decryption round-trip with bad password</li>
<li>Token cache expiry</li>
<li>Conversation store migrations</li>
</ul>
</li>
<li>
<p><input disabled="" type="checkbox" checked=""> <strong>2.3 CI hardening</strong></p>
<ul>
<li>Add <code>.github/CODEOWNERS</code> (crypto, auth, wire-format require 2 reviewers)</li>
<li>Ensure <code>cargo deny check</code> runs on every PR (already in CI — verify)</li>
<li>Add <code>cargo audit</code> as blocking check (already in CI — verify)</li>
<li>Add coverage reporting (tarpaulin or llvm-cov)</li>
<li>Add CI job for Docker build validation</li>
</ul>
</li>
<li>
<p><input disabled="" type="checkbox" checked=""> <strong>2.4 Clean up build warnings</strong></p>
<ul>
<li>Fix Capn Proto generated <code>unused_parens</code> warnings</li>
<li>Remove dead code / unused imports</li>
<li>Address <code>openmls</code> future-incompat warnings</li>
<li>Target: <code>cargo clippy --workspace -- -D warnings</code> passes clean</li>
</ul>
</li>
</ul>
<hr>
<h2 id="phase-3--client-sdks-native-quic--capn-proto-everywhere"><a class="header" href="#phase-3--client-sdks-native-quic--capn-proto-everywhere">Phase 3 — Client SDKs: Native QUIC + Capn Proto Everywhere</a></h2>
<p><strong>No REST gateway. No protocol dilution.</strong> The <code>.capnp</code> schemas are the
interface definition. Every SDK speaks native QUIC + Capn Proto. The
project name stays honest.</p>
<h3 id="why-this-matters"><a class="header" href="#why-this-matters">Why this matters</a></h3>
<p>The name is <strong>quic</strong>n<strong>proto</strong>chat — the protocol IS the product. Instead
of adding an HTTP translation layer that loses zero-copy performance and
adds base64 overhead, we invest in making the native protocol accessible
from every language that has QUIC + Capn Proto support, and provide
WASM/FFI for the crypto layer.</p>
<h3 id="architecture"><a class="header" href="#architecture">Architecture</a></h3>
<pre><code> Server: QUIC + Cap'n Proto (single protocol, no gateway)
Client SDKs:
┌─── Rust quinn + capnp-rpc (existing, reference impl)
├─── Go quic-go + go-capnp (native, high confidence)
├─── Python aioquic + pycapnp (native QUIC, manual framing)
├─── C/C++ msquic/ngtcp2 + capnproto (reference impl, full RPC)
└─── Browser WebTransport + capnp (WASM) (QUIC transport, no HTTP needed)
Crypto layer (client-side MLS, shared across all SDKs):
┌─── Rust crate (native, existing)
├─── WASM module (browsers, Node.js, Deno)
└─── C FFI (Swift, Kotlin, Python, Go via cgo)
</code></pre>
<h3 id="language-support-reality-check"><a class="header" href="#language-support-reality-check">Language support reality check</a></h3>
<div class="table-wrapper">
<table>
<thead>
<tr><th>Language</th><th>QUIC</th><th>Capn Proto</th><th>RPC</th><th>Confidence</th></tr>
</thead>
<tbody>
<tr><td><strong>Rust</strong></td><td>quinn ✅</td><td>capnp-rpc ✅</td><td>Full ✅</td><td>Existing</td></tr>
<tr><td><strong>Go</strong></td><td>quic-go ✅</td><td>go-capnp ✅</td><td>Level 1 ✅</td><td>High</td></tr>
<tr><td><strong>Python</strong></td><td>aioquic ✅</td><td>pycapnp ⚠️</td><td>Manual framing</td><td>Medium</td></tr>
<tr><td><strong>C/C++</strong></td><td>msquic/ngtcp2 ✅</td><td>capnproto ✅</td><td>Full ✅</td><td>High</td></tr>
<tr><td><strong>Browser</strong></td><td>WebTransport ✅</td><td>WASM ✅</td><td>Via WASM bridge</td><td>Medium</td></tr>
</tbody>
</table>
</div>
<h3 id="implementation"><a class="header" href="#implementation">Implementation</a></h3>
<ul>
<li>
<p><input disabled="" type="checkbox" checked=""> <strong>3.1 Go SDK (<code>quicproquo-go</code>)</strong></p>
<ul>
<li>Generated Go types from <code>node.capnp</code> (6487-line codegen, all 24 RPC methods)</li>
<li>QUIC transport via <code>quic-go</code> with TLS 1.3 + ALPN <code>"capnp"</code></li>
<li>High-level <code>qpq</code> package: Connect, Health, ResolveUser, CreateChannel, Send/SendWithTTL, Receive/ReceiveWait, DeleteAccount, OPAQUE auth</li>
<li>Example CLI in <code>sdks/go/cmd/example/</code></li>
</ul>
</li>
<li>
<p><input disabled="" type="checkbox" checked=""> <strong>3.2 Python SDK (<code>quicproquo-py</code>)</strong></p>
<ul>
<li>QUIC transport: <code>aioquic</code> with custom Capn Proto stream handler</li>
<li>Capn Proto serialization: <code>pycapnp</code> for message types</li>
<li>Manual RPC framing: length-prefixed request/response over QUIC stream</li>
<li>Async/await API matching the Rust client patterns</li>
<li>Crypto: PyO3 bindings to <code>quicproquo-core</code> for MLS operations</li>
<li>Publish: PyPI <code>quicproquo</code></li>
<li>Example: async bot client</li>
</ul>
</li>
<li>
<p><input disabled="" type="checkbox" checked=""> <strong>3.3 C FFI layer (<code>quicproquo-ffi</code>)</strong></p>
<ul>
<li><code>crates/quicproquo-ffi</code> with 7 extern “C” functions: connect, login, send, receive, disconnect, last_error, free_string</li>
<li>Builds as <code>libquicproquo_ffi.so</code> / <code>.dylib</code> / <code>.dll</code></li>
<li>Python ctypes wrapper in <code>examples/python/qpq_client.py</code></li>
</ul>
</li>
<li>
<p><input disabled="" type="checkbox" checked=""> <strong>3.4 WASM compilation of <code>quicproquo-core</code></strong></p>
<ul>
<li><code>wasm-pack build</code> target producing 175 KB WASM bundle (LTO + opt-level=s)</li>
<li>13 <code>wasm_bindgen</code> functions: Ed25519 identity, hybrid KEM, safety numbers, sealed sender, padding</li>
<li>Browser-ready with <code>crypto.getRandomValues()</code> RNG</li>
<li>Published as <code>sdks/typescript/wasm-crypto/</code></li>
</ul>
</li>
<li>
<p><input disabled="" type="checkbox" checked=""> <strong>3.5 WebTransport server endpoint</strong></p>
<ul>
<li>Add HTTP/3 + WebTransport listener to server (same QUIC stack via quinn)</li>
<li>Capn Proto RPC framed over WebTransport bidirectional streams</li>
<li>Same auth, same storage, same RPC handlers — just a different stream source</li>
<li>Browsers connect via <code>new WebTransport("https://server:7443")</code></li>
<li>ALPN negotiation: <code>"h3"</code> for WebTransport, <code>"capnp"</code> for native QUIC</li>
<li>Configurable port: <code>--webtransport-listen 0.0.0.0:7443</code></li>
<li>Feature-flagged: <code>--features webtransport</code></li>
</ul>
</li>
<li>
<p><input disabled="" type="checkbox" checked=""> <strong>3.6 TypeScript/JavaScript SDK (<code>@quicproquo/client</code>)</strong></p>
<ul>
<li><code>QpqClient</code> class: connect, offline, health, resolveUser, createChannel, send/sendWithTTL, receive, deleteAccount</li>
<li>WASM crypto wrapper: generateIdentity, sign/verify, hybridEncrypt/Decrypt, computeSafetyNumber, sealedSend, pad</li>
<li>WebSocket transport with request/response correlation and reconnection</li>
<li>Browser demo: interactive crypto playground + chat UI (<code>sdks/typescript/demo/index.html</code>)</li>
</ul>
</li>
<li>
<p><input disabled="" type="checkbox" checked=""> <strong>3.7 SDK documentation and schema publishing</strong></p>
<ul>
<li>Publish <code>.capnp</code> schemas as the canonical API contract</li>
<li>Document the QUIC + Capn Proto connection pattern for each language</li>
<li>Provide a “build your own SDK” guide (QUIC stream → Capn Proto RPC bootstrap)</li>
<li>Reference implementation checklist: connect, auth, upload key, enqueue, fetch</li>
</ul>
</li>
</ul>
<hr>
<h2 id="phase-4--trust--security-infrastructure"><a class="header" href="#phase-4--trust--security-infrastructure">Phase 4 — Trust &amp; Security Infrastructure</a></h2>
<p>Address the security gaps required for real-world deployment.</p>
<ul>
<li>
<p><input disabled="" type="checkbox"> <strong>4.1 Third-party cryptographic audit</strong></p>
<ul>
<li>Scope: MLS integration, OPAQUE flow, hybrid KEM, key lifecycle, zeroization</li>
<li>Firms: NCC Group, Trail of Bits, Cure53</li>
<li>Budget and timeline: typically 4-6 weeks, $50K$150K</li>
<li>Publish report publicly (builds trust)</li>
</ul>
</li>
<li>
<p><input disabled="" type="checkbox" checked=""> <strong>4.2 Key Transparency / revocation</strong></p>
<ul>
<li>Replace <code>BasicCredential</code> with X.509-based MLS credentials</li>
<li>Or: verifiable key directory (Merkle tree, auditable log)</li>
<li>Users can verify peer keys havent been substituted (MITM detection)</li>
<li>Revocation mechanism for compromised keys</li>
</ul>
</li>
<li>
<p><input disabled="" type="checkbox" checked=""> <strong>4.3 Client authentication on Delivery Service</strong></p>
<ul>
<li>DS sender identity binding with explicit audit logging</li>
<li><code>sender_prefix</code> tracking in enqueue/batch_enqueue RPCs</li>
<li>Sender identity derived from authenticated session</li>
</ul>
</li>
<li>
<p><input disabled="" type="checkbox" checked=""> <strong>4.4 M7 — Post-quantum MLS integration</strong></p>
<ul>
<li>Integrate hybrid KEM (X25519 + ML-KEM-768) into the OpenMLS crypto provider</li>
<li>Group key material gets post-quantum confidentiality</li>
<li>Full test suite with PQ ciphersuite</li>
<li>Ref: existing <code>hybrid_kem.rs</code> and <code>hybrid_crypto.rs</code></li>
</ul>
</li>
<li>
<p><input disabled="" type="checkbox" checked=""> <strong>4.5 Username enumeration mitigation</strong></p>
<ul>
<li>5 ms timing floor on <code>resolveUser</code> responses</li>
<li>Rate limiting to prevent bulk enumeration attacks</li>
</ul>
</li>
</ul>
<hr>
<h2 id="phase-5--features--ux"><a class="header" href="#phase-5--features--ux">Phase 5 — Features &amp; UX</a></h2>
<p>Make it a product people want to use.</p>
<ul>
<li>
<p><input disabled="" type="checkbox" checked=""> <strong>5.1 Multi-device support</strong></p>
<ul>
<li>Account → multiple devices, each with own Ed25519 key + MLS KeyPackages</li>
<li>Device graph management (add device, remove device, list devices)</li>
<li>Messages delivered to all devices of a user</li>
<li><code>device_id</code> field already in Auth struct — wire it through</li>
</ul>
</li>
<li>
<p><input disabled="" type="checkbox" checked=""> <strong>5.2 Account recovery</strong></p>
<ul>
<li>Recovery codes or backup key (encrypted, stored by user)</li>
<li>Option: server-assisted recovery with security questions (lower security)</li>
<li>MLS state re-establishment after device loss</li>
</ul>
</li>
<li>
<p><input disabled="" type="checkbox" checked=""> <strong>5.3 Full MLS lifecycle</strong></p>
<ul>
<li>Member removal (Remove proposal → Commit → fan-out)</li>
<li>Credential update (Update proposal for key rotation)</li>
<li>Explicit proposal handling (queue proposals, batch commit)</li>
<li>Group metadata (name, description, avatar hash)</li>
</ul>
</li>
<li>
<p><input disabled="" type="checkbox" checked=""> <strong>5.4 Message editing and deletion</strong></p>
<ul>
<li><code>Edit</code> (0x06) and <code>Delete</code> (0x07) message types in <code>AppMessage</code></li>
<li><code>/edit &lt;index&gt; &lt;text&gt;</code> and <code>/delete &lt;index&gt;</code> REPL commands (own messages only)</li>
<li>Database update/removal on incoming edit/delete</li>
</ul>
</li>
<li>
<p><input disabled="" type="checkbox" checked=""> <strong>5.5 File and media transfer</strong></p>
<ul>
<li><code>uploadBlob</code> / <code>downloadBlob</code> RPCs with 256 KB chunked streaming</li>
<li>SHA-256 content-addressable storage with hash verification</li>
<li><code>FileRef</code> (0x08) message type with blob_id, filename, file_size, mime_type</li>
<li><code>/send-file &lt;path&gt;</code> and <code>/download &lt;index&gt;</code> REPL commands with progress bars</li>
<li>50 MB max file size, automatic MIME detection via <code>mime_guess</code></li>
</ul>
</li>
<li>
<p><input disabled="" type="checkbox" checked=""> <strong>5.6 Abuse prevention and moderation</strong></p>
<ul>
<li>Block user (client-side, suppress display)</li>
<li>Report message (encrypted report to admin key)</li>
<li>Admin tools: ban user, delete account, audit log</li>
</ul>
</li>
<li>
<p><input disabled="" type="checkbox" checked=""> <strong>5.7 Offline message queue (client-side)</strong></p>
<ul>
<li>Queue messages when disconnected, send on reconnect</li>
<li>Idempotent message IDs to prevent duplicates</li>
<li>Gap detection: compare local seq with server seq</li>
</ul>
</li>
</ul>
<hr>
<h2 id="phase-6--scale--operations"><a class="header" href="#phase-6--scale--operations">Phase 6 — Scale &amp; Operations</a></h2>
<p>Prepare for real traffic.</p>
<ul>
<li>
<p><input disabled="" type="checkbox" checked=""> <strong>6.1 Distributed rate limiting</strong></p>
<ul>
<li>Current: in-memory per-process, lost on restart</li>
<li>Move to Redis or shared state for multi-node deployments</li>
<li>Sliding window with configurable thresholds</li>
</ul>
</li>
<li>
<p><input disabled="" type="checkbox" checked=""> <strong>6.2 Multi-node / horizontal scaling</strong></p>
<ul>
<li>Stateless server design (already mostly there — state is in storage backend)</li>
<li>Shared PostgreSQL or CockroachDB backend (replace SQLite)</li>
<li>Message queue fan-out (Redis pub/sub or NATS for cross-node notification)</li>
<li>Load balancer health check via QUIC RPC <code>health()</code> or Prometheus <code>/metrics</code></li>
</ul>
</li>
<li>
<p><input disabled="" type="checkbox" checked=""> <strong>6.3 Operational runbook</strong></p>
<ul>
<li>Backup / restore procedures (SQLCipher, file backend)</li>
<li>Key rotation (auth token, TLS cert, DB encryption key)</li>
<li>Incident response playbook</li>
<li>Scaling guide (when to add nodes, resource sizing)</li>
<li>Monitoring dashboard templates (Grafana + Prometheus)</li>
</ul>
</li>
<li>
<p><input disabled="" type="checkbox" checked=""> <strong>6.4 Connection draining and graceful shutdown</strong></p>
<ul>
<li>Stop accepting new connections on SIGTERM</li>
<li>Wait for in-flight RPCs (configurable timeout, default 30s)</li>
<li>Drain WebTransport sessions with close frame</li>
<li>Document expected behavior for load balancers (health → unhealthy first)</li>
</ul>
</li>
<li>
<p><input disabled="" type="checkbox" checked=""> <strong>6.5 Request-level timeouts</strong></p>
<ul>
<li>Per-RPC timeout (prevent slow clients from holding resources)</li>
<li>Database query timeout</li>
<li>Overall request deadline propagation</li>
</ul>
</li>
<li>
<p><input disabled="" type="checkbox" checked=""> <strong>6.6 Observability enhancements</strong></p>
<ul>
<li>Request correlation IDs (trace across RPC → storage)</li>
<li>Storage operation latency metrics</li>
<li>Per-endpoint latency histograms</li>
<li>Structured audit log to persistent storage (not just stdout)</li>
<li>OpenTelemetry integration</li>
</ul>
</li>
</ul>
<hr>
<h2 id="phase-7--platform-expansion--research"><a class="header" href="#phase-7--platform-expansion--research">Phase 7 — Platform Expansion &amp; Research</a></h2>
<p>Long-term vision for wide adoption.</p>
<ul>
<li>
<p><input disabled="" type="checkbox" checked=""> <strong>7.1 Mobile clients (iOS + Android)</strong></p>
<ul>
<li>Use C FFI (Phase 3.3) for crypto + transport (single library)</li>
<li>Push notifications via APNs / FCM (server sends notification on enqueue)</li>
<li>Background QUIC connection for message polling</li>
<li>Biometric auth for local key storage (Keychain / Android Keystore)</li>
</ul>
</li>
<li>
<p><input disabled="" type="checkbox" checked=""> <strong>7.2 Web client (browser)</strong></p>
<ul>
<li>Use WASM (Phase 3.4) for crypto</li>
<li>Use WebTransport (Phase 3.5) for native QUIC transport</li>
<li>Capn Proto via WASM bridge (Phase 3.6)</li>
<li>IndexedDB for local state persistence</li>
<li>Service Worker for background notifications</li>
<li>Progressive Web App (PWA) support</li>
</ul>
</li>
<li>
<p><input disabled="" type="checkbox" checked=""> <strong>7.3 Federation</strong></p>
<ul>
<li>Server-to-server protocol via Capn Proto RPC over QUIC (see <code>federation.capnp</code>)</li>
<li><code>relayEnqueue</code>, <code>proxyFetchKeyPackage</code>, <code>federationHealth</code> methods</li>
<li>Identity resolution across federated servers</li>
<li>MLS group spanning multiple servers</li>
<li>Trust model for federated deployments</li>
</ul>
</li>
<li>
<p><input disabled="" type="checkbox" checked=""> <strong>7.4 Sealed Sender</strong></p>
<ul>
<li>Sender identity inside MLS ciphertext only (server cant see who sent)</li>
<li><code>sealed_sender</code> module in quicproquo-core with seal/unseal API</li>
<li>WASM-accessible via <code>wasm_bindgen</code> for browser use</li>
</ul>
</li>
<li>
<p><input disabled="" type="checkbox" checked=""> <strong>7.5 Additional language SDKs</strong></p>
<ul>
<li>Java/Kotlin: JNI bindings to C FFI (Phase 3.3) + native QUIC (netty-quic)</li>
<li>Swift: Swift wrapper over C FFI + Network.framework QUIC</li>
<li>Ruby: FFI bindings via <code>quicproquo-ffi</code></li>
<li>Evaluate demand-driven — only build SDKs people request</li>
</ul>
</li>
<li>
<p><input disabled="" type="checkbox" checked=""> <strong>7.6 P2P / NAT traversal</strong></p>
<ul>
<li>Direct peer-to-peer via iroh (foundation exists in <code>quicproquo-p2p</code>)</li>
<li>Server as fallback relay only</li>
<li>Reduces latency and single-point-of-failure</li>
<li>Ref: <code>FUTURE-IMPROVEMENTS.md § 6.1</code></li>
</ul>
</li>
<li>
<p><input disabled="" type="checkbox" checked=""> <strong>7.7 Traffic analysis resistance</strong></p>
<ul>
<li>Padding messages to uniform size</li>
<li>Decoy traffic to mask timing patterns</li>
<li>Optional Tor/I2P routing for IP privacy</li>
<li>Ref: <code>FUTURE-IMPROVEMENTS.md § 5.4, 6.3</code></li>
</ul>
</li>
</ul>
<hr>
<h2 id="phase-8--freifunk--community-mesh-networking"><a class="header" href="#phase-8--freifunk--community-mesh-networking">Phase 8 — Freifunk / Community Mesh Networking</a></h2>
<p>Make qpq a first-class citizen on decentralised, community-operated wireless
networks (Freifunk, BATMAN-adv/Babel routing, OpenWrt). Multiple qpq nodes form
a federated mesh; clients auto-discover nearby nodes via mDNS; the network
functions without any central infrastructure or internet uplink.</p>
<h3 id="architecture-1"><a class="header" href="#architecture-1">Architecture</a></h3>
<pre><code> Client A ─── mDNS discovery ──► nearby qpq node (LAN / mesh)
Cap'n Proto federation
remote qpq node (across mesh)
</code></pre>
<ul>
<li>
<p><input disabled="" type="checkbox" checked=""> <strong>F0 — Re-include <code>quicproquo-p2p</code> in workspace; fix ALPN strings</strong></p>
<ul>
<li>Moved <code>crates/quicproquo-p2p</code> from <code>exclude</code> back into <code>[workspace] members</code></li>
<li>Fixed ALPN <code>b"quicnprotochat/p2p/1"</code><code>b"quicproquo/p2p/1"</code> (breaking wire change)</li>
<li>Fixed federation ALPN <code>b"qnpc-fed"</code><code>b"quicproquo/federation/1"</code></li>
<li>Feature-gated behind <code>--features mesh</code> on client (keeps iroh out of default builds)</li>
</ul>
</li>
<li>
<p><input disabled="" type="checkbox" checked=""> <strong>F1 — Federation routing in message delivery</strong></p>
<ul>
<li><code>handle_enqueue</code> and <code>handle_batch_enqueue</code> call <code>federation::routing::resolve_destination()</code></li>
<li>Recipients with a remote home server are relayed via <code>FederationClient::relay_enqueue()</code></li>
<li>mTLS mutual authentication between nodes (both present client certs, validated against shared CA)</li>
<li>Config: <code>QPQ_FEDERATION_LISTEN</code>, <code>QPQ_LOCAL_DOMAIN</code>, <code>QPQ_FEDERATION_CERT/KEY/CA</code></li>
</ul>
</li>
<li>
<p><input disabled="" type="checkbox" checked=""> <strong>F2 — mDNS local peer discovery</strong></p>
<ul>
<li>Server announces <code>_quicproquo._udp.local.</code> on startup via <code>mdns-sd</code></li>
<li>Client: <code>MeshDiscovery::start()</code> browses for nearby nodes (feature-gated)</li>
<li>REPL commands: <code>/mesh peers</code> (scan + list), <code>/mesh server &lt;host:port&gt;</code> (note address)</li>
<li>Nodes announce: <code>ver=1</code>, <code>server=&lt;host:port&gt;</code>, <code>domain=&lt;local_domain&gt;</code> TXT records</li>
</ul>
</li>
<li>
<p><input disabled="" type="checkbox" checked=""> <strong>F3 — Self-sovereign mesh identity</strong></p>
<ul>
<li>Ed25519 keypair-based identity independent of AS registration</li>
<li>JSON-persisted seed + known peers directory</li>
<li>Sign/verify operations for mesh authenticity (<code>crates/quicproquo-p2p/src/identity.rs</code>)</li>
</ul>
</li>
<li>
<p><input disabled="" type="checkbox" checked=""> <strong>F4 — Store-and-forward with TTL</strong></p>
<ul>
<li><code>MeshEnvelope</code> with TTL-based expiry, hop_count tracking, max_hops routing limit</li>
<li>SHA-256 deduplication ID prevents relay loops</li>
<li>Ed25519 signature verification on envelopes</li>
<li><code>MeshStore</code> in-memory queue with per-recipient capacity limits and TTL-based GC</li>
</ul>
</li>
<li>
<p><input disabled="" type="checkbox" checked=""> <strong>F5 — Lightweight broadcast channels</strong></p>
<ul>
<li>Symmetric ChaCha20-Poly1305 encrypted channels (no MLS overhead)</li>
<li>Topic-based pub/sub via <code>BroadcastChannel</code> and <code>BroadcastManager</code></li>
<li>Subscribe/unsubscribe, create, publish API on <code>P2pNode</code></li>
</ul>
</li>
<li>
<p><input disabled="" type="checkbox" checked=""> <strong>F6 — Extended <code>/mesh</code> REPL commands</strong></p>
<ul>
<li><code>/mesh send &lt;peer_id&gt; &lt;msg&gt;</code> — direct P2P message via iroh</li>
<li><code>/mesh broadcast &lt;topic&gt; &lt;msg&gt;</code> — publish to broadcast channel</li>
<li><code>/mesh subscribe &lt;topic&gt;</code> — join broadcast channel</li>
<li><code>/mesh route</code> — show routing table</li>
<li><code>/mesh identity</code> — show mesh identity info</li>
<li><code>/mesh store</code> — show store-and-forward statistics</li>
</ul>
</li>
<li>
<p><input disabled="" type="checkbox" checked=""> <strong>F7 — OpenWrt cross-compilation guide</strong></p>
<ul>
<li>Musl static builds: <code>x86_64-unknown-linux-musl</code>, <code>armv7-unknown-linux-musleabihf</code>, <code>mips-unknown-linux-musl</code></li>
<li>Strip binary: <code>--release</code> + <code>strip</code> → target size &lt; 5 MB for flash storage</li>
<li><code>opkg</code> package manifest for OpenWrt feed</li>
<li><code>procd</code> init script + <code>uci</code> config file for OpenWrt integration</li>
<li>CI job: cross-compile and size-check on every release tag</li>
</ul>
</li>
<li>
<p><input disabled="" type="checkbox" checked=""> <strong>F8 — Traffic analysis resistance for mesh</strong></p>
<ul>
<li>Uniform message padding to nearest 256-byte boundary (hides message size)</li>
<li>Configurable decoy traffic rate (fake messages to mask send timing)</li>
<li>Optional onion routing: 3-hop relay through other mesh nodes (no Tor dependency)</li>
<li>Ref: Phase 7.7 for server-side traffic analysis resistance</li>
</ul>
</li>
</ul>
<hr>
<h2 id="phase-9--developer-experience--community-growth"><a class="header" href="#phase-9--developer-experience--community-growth">Phase 9 — Developer Experience &amp; Community Growth</a></h2>
<p>Features designed to attract contributors, create demo/showcase potential,
and lower the barrier to entry for non-crypto developers.</p>
<ul>
<li>
<p><input disabled="" type="checkbox" checked=""> <strong>9.1 Criterion Benchmark Suite (<code>qpq-bench</code>)</strong></p>
<ul>
<li>Criterion benchmarks for all crypto primitives: hybrid KEM encap/decap,
MLS group-add at 10/100/1000 members, epoch rotation, Noise_XX handshake</li>
<li>CI publishes HTML benchmark reports as GitHub Actions artifacts</li>
<li>Citable numbers — no other project benchmarks MLS + PQ-KEM in Rust</li>
</ul>
</li>
<li>
<p><input disabled="" type="checkbox" checked=""> <strong>9.2 Safety Numbers (key verification)</strong></p>
<ul>
<li>60-digit numeric code derived from two identity keys (Signal-style)</li>
<li><code>/verify &lt;username&gt;</code> REPL command for out-of-band verification</li>
<li>Available in WASM via <code>compute_safety_number</code> binding</li>
</ul>
</li>
<li>
<p><input disabled="" type="checkbox" checked=""> <strong>9.3 Full-Screen TUI (Ratatui + Crossterm)</strong></p>
<ul>
<li><code>qpq tui</code> launches a full-screen terminal UI: message pane, input bar,
channel sidebar with unread counts, MLS epoch indicator</li>
<li>Feature-gated <code>--features tui</code> to keep ratatui/crossterm out of default builds</li>
<li>Existing REPL and CLI subcommands are unaffected</li>
</ul>
</li>
<li>
<p><input disabled="" type="checkbox" checked=""> <strong>9.4 Delivery Proof Canary Tokens</strong></p>
<ul>
<li>Server signs <code>Ed25519(SHA-256(message_id || recipient || timestamp))</code> on enqueue</li>
<li>Sender stores proof locally — cryptographic evidence the server queued the message</li>
<li>Capn Proto schema gains optional <code>deliveryProof: Data</code> on enqueue response</li>
</ul>
</li>
<li>
<p><input disabled="" type="checkbox" checked=""> <strong>9.5 Verifiable Transcript Archive</strong></p>
<ul>
<li><code>GroupMember::export_transcript(path, password)</code> writes encrypted, tamper-evident
message archive (CBOR records, Argon2id + ChaCha20-Poly1305, Merkle chain)</li>
<li><code>qpq export verify</code> CLI command independently verifies chain integrity</li>
<li>Useful for legal discovery, audit, or personal backup</li>
</ul>
</li>
<li>
<p><input disabled="" type="checkbox" checked=""> <strong>9.6 Key Transparency (Merkle-Log Identity Binding)</strong></p>
<ul>
<li>Append-only Merkle log of (username, identity_key) bindings in the AS</li>
<li>Clients receive inclusion proofs alongside key fetches</li>
<li>Any client can independently audit the full identity history</li>
<li>Lightweight subset of RFC 9162 adapted for identity keys</li>
</ul>
</li>
<li>
<p><input disabled="" type="checkbox" checked=""> <strong>9.7 Dynamic Server Plugin System</strong></p>
<ul>
<li>Server loads <code>.so</code>/<code>.dylib</code> plugins at runtime via <code>--plugin-dir</code></li>
<li>C-compatible <code>HookVTable</code> via <code>extern "C"</code> — plugins in any language</li>
<li>6 hook points: on_message_enqueue, on_batch_enqueue, on_auth, on_channel_created, on_fetch, on_user_registered</li>
<li>Example plugins: logging plugin, rate limit plugin (512 KiB payload enforcement)</li>
</ul>
</li>
<li>
<p><input disabled="" type="checkbox" checked=""> <strong>9.8 PQ Noise Transport Layer</strong></p>
<ul>
<li>Hybrid <code>Noise_XX + ML-KEM-768</code> handshake for post-quantum transport security</li>
<li>Closes the harvest-now-decrypt-later gap on handshake metadata (ADR-006)</li>
<li>Feature-gated <code>--features pq-noise</code>; classical Noise_XX default preserved</li>
<li>May require extending or forking <code>snow</code> crates <code>CryptoResolver</code></li>
</ul>
</li>
</ul>
<hr>
<h2 id="summary-timeline"><a class="header" href="#summary-timeline">Summary Timeline</a></h2>
<div class="table-wrapper">
<table>
<thead>
<tr><th>Phase</th><th>Focus</th><th>Estimated Effort</th></tr>
</thead>
<tbody>
<tr><td><strong>1</strong></td><td>Production Hardening</td><td>12 days</td></tr>
<tr><td><strong>2</strong></td><td>Test &amp; CI Maturity</td><td>23 days</td></tr>
<tr><td><strong>3</strong></td><td>Client SDKs (Go, Python, WASM, FFI, WebTransport)</td><td>58 days</td></tr>
<tr><td><strong>4</strong></td><td>Trust &amp; Security Infrastructure</td><td>24 days (excl. audit)</td></tr>
<tr><td><strong>5</strong></td><td>Features &amp; UX</td><td>57 days</td></tr>
<tr><td><strong>6</strong></td><td>Scale &amp; Operations</td><td>35 days</td></tr>
<tr><td><strong>7</strong></td><td>Platform Expansion &amp; Research</td><td>ongoing</td></tr>
<tr><td><strong>8</strong></td><td>Freifunk / Community Mesh</td><td>ongoing</td></tr>
<tr><td><strong>9</strong></td><td>Developer Experience &amp; Community Growth</td><td>35 days</td></tr>
</tbody>
</table>
</div>
<hr>
<h2 id="related-documents"><a class="header" href="#related-documents">Related Documents</a></h2>
<ul>
<li><a href="docs/FUTURE-IMPROVEMENTS.html">Future Improvements</a> — consolidated improvement list</li>
<li><a href="docs/PRODUCTION-READINESS-AUDIT.html">Production Readiness Audit</a> — specific blockers</li>
<li><a href="docs/SECURITY-AUDIT.html">Security Audit</a> — findings and recommendations</li>
<li><a href="docs/src/roadmap/milestones.html">Milestone Tracker</a> — M1M7 status</li>
<li><a href="docs/src/roadmap/authz-plan.html">Auth, Devices, and Tokens</a> — authorization design</li>
<li><a href="docs/src/roadmap/dm-channels.html">DM Channel Design</a> — 1:1 channel spec</li>
</ul>
</main>
<nav class="nav-wrapper" aria-label="Page navigation">
<!-- Mobile navigation buttons -->
<a rel="prev" href="roadmap/future-research.html" class="mobile-nav-chapters previous" title="Previous chapter" aria-label="Previous chapter" aria-keyshortcuts="Left">
<span class=fa-svg><svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 320 512"><!--! Font Awesome Free 6.2.0 by @fontawesome - https://fontawesome.com License - https://fontawesome.com/license/free (Icons: CC BY 4.0, Fonts: SIL OFL 1.1, Code: MIT License) Copyright 2022 Fonticons, Inc. --><path d="M41.4 233.4c-12.5 12.5-12.5 32.8 0 45.3l160 160c12.5 12.5 32.8 12.5 45.3 0s12.5-32.8 0-45.3L109.3 256 246.6 118.6c12.5-12.5 12.5-32.8 0-45.3s-32.8-12.5-45.3 0l-160 160z"/></svg></span>
</a>
<a rel="next prefetch" href="operations/monitoring.html" class="mobile-nav-chapters next" title="Next chapter" aria-label="Next chapter" aria-keyshortcuts="Right">
<span class=fa-svg><svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 320 512"><!--! Font Awesome Free 6.2.0 by @fontawesome - https://fontawesome.com License - https://fontawesome.com/license/free (Icons: CC BY 4.0, Fonts: SIL OFL 1.1, Code: MIT License) Copyright 2022 Fonticons, Inc. --><path d="M278.6 233.4c12.5 12.5 12.5 32.8 0 45.3l-160 160c-12.5 12.5-32.8 12.5-45.3 0s-12.5-32.8 0-45.3L210.7 256 73.4 118.6c-12.5-12.5-12.5-32.8 0-45.3s32.8-12.5 45.3 0l160 160z"/></svg></span>
</a>
<div style="clear: both"></div>
</nav>
</div>
</div>
<nav class="nav-wide-wrapper" aria-label="Page navigation">
<a rel="prev" href="roadmap/future-research.html" class="nav-chapters previous" title="Previous chapter" aria-label="Previous chapter" aria-keyshortcuts="Left">
<span class=fa-svg><svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 320 512"><!--! Font Awesome Free 6.2.0 by @fontawesome - https://fontawesome.com License - https://fontawesome.com/license/free (Icons: CC BY 4.0, Fonts: SIL OFL 1.1, Code: MIT License) Copyright 2022 Fonticons, Inc. --><path d="M41.4 233.4c-12.5 12.5-12.5 32.8 0 45.3l160 160c12.5 12.5 32.8 12.5 45.3 0s12.5-32.8 0-45.3L109.3 256 246.6 118.6c12.5-12.5 12.5-32.8 0-45.3s-32.8-12.5-45.3 0l-160 160z"/></svg></span>
</a>
<a rel="next prefetch" href="operations/monitoring.html" class="nav-chapters next" title="Next chapter" aria-label="Next chapter" aria-keyshortcuts="Right">
<span class=fa-svg><svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 320 512"><!--! Font Awesome Free 6.2.0 by @fontawesome - https://fontawesome.com License - https://fontawesome.com/license/free (Icons: CC BY 4.0, Fonts: SIL OFL 1.1, Code: MIT License) Copyright 2022 Fonticons, Inc. --><path d="M278.6 233.4c12.5 12.5 12.5 32.8 0 45.3l-160 160c-12.5 12.5-32.8 12.5-45.3 0s-12.5-32.8 0-45.3L210.7 256 73.4 118.6c-12.5-12.5-12.5-32.8 0-45.3s32.8-12.5 45.3 0l160 160z"/></svg></span>
</a>
</nav>
</div>
<template id=fa-eye><span class=fa-svg><svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 576 512"><!--! Font Awesome Free 6.2.0 by @fontawesome - https://fontawesome.com License - https://fontawesome.com/license/free (Icons: CC BY 4.0, Fonts: SIL OFL 1.1, Code: MIT License) Copyright 2022 Fonticons, Inc. --><path d="M288 32c-80.8 0-145.5 36.8-192.6 80.6C48.6 156 17.3 208 2.5 243.7c-3.3 7.9-3.3 16.7 0 24.6C17.3 304 48.6 356 95.4 399.4C142.5 443.2 207.2 480 288 480s145.5-36.8 192.6-80.6c46.8-43.5 78.1-95.4 93-131.1c3.3-7.9 3.3-16.7 0-24.6c-14.9-35.7-46.2-87.7-93-131.1C433.5 68.8 368.8 32 288 32zM432 256c0 79.5-64.5 144-144 144s-144-64.5-144-144s64.5-144 144-144s144 64.5 144 144zM288 192c0 35.3-28.7 64-64 64c-11.5 0-22.3-3-31.6-8.4c-.2 2.8-.4 5.5-.4 8.4c0 53 43 96 96 96s96-43 96-96s-43-96-96-96c-2.8 0-5.6 .1-8.4 .4c5.3 9.3 8.4 20.1 8.4 31.6z"/></svg></span></template>
<template id=fa-eye-slash><span class=fa-svg><svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 640 512"><!--! Font Awesome Free 6.2.0 by @fontawesome - https://fontawesome.com License - https://fontawesome.com/license/free (Icons: CC BY 4.0, Fonts: SIL OFL 1.1, Code: MIT License) Copyright 2022 Fonticons, Inc. --><path d="M38.8 5.1C28.4-3.1 13.3-1.2 5.1 9.2S-1.2 34.7 9.2 42.9l592 464c10.4 8.2 25.5 6.3 33.7-4.1s6.3-25.5-4.1-33.7L525.6 386.7c39.6-40.6 66.4-86.1 79.9-118.4c3.3-7.9 3.3-16.7 0-24.6c-14.9-35.7-46.2-87.7-93-131.1C465.5 68.8 400.8 32 320 32c-68.2 0-125 26.3-169.3 60.8L38.8 5.1zM223.1 149.5C248.6 126.2 282.7 112 320 112c79.5 0 144 64.5 144 144c0 24.9-6.3 48.3-17.4 68.7L408 294.5c5.2-11.8 8-24.8 8-38.5c0-53-43-96-96-96c-2.8 0-5.6 .1-8.4 .4c5.3 9.3 8.4 20.1 8.4 31.6c0 10.2-2.4 19.8-6.6 28.3l-90.3-70.8zm223.1 298L373 389.9c-16.4 6.5-34.3 10.1-53 10.1c-79.5 0-144-64.5-144-144c0-6.9 .5-13.6 1.4-20.2L83.1 161.5C60.3 191.2 44 220.8 34.5 243.7c-3.3 7.9-3.3 16.7 0 24.6c14.9 35.7 46.2 87.7 93 131.1C174.5 443.2 239.2 480 320 480c47.8 0 89.9-12.9 126.2-32.5z"/></svg></span></template>
<template id=fa-copy><span class=fa-svg><svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 512 512"><!--! Font Awesome Free 6.2.0 by @fontawesome - https://fontawesome.com License - https://fontawesome.com/license/free (Icons: CC BY 4.0, Fonts: SIL OFL 1.1, Code: MIT License) Copyright 2022 Fonticons, Inc. --><path d="M502.6 70.63l-61.25-61.25C435.4 3.371 427.2 0 418.7 0H255.1c-35.35 0-64 28.66-64 64l.0195 256C192 355.4 220.7 384 256 384h192c35.2 0 64-28.8 64-64V93.25C512 84.77 508.6 76.63 502.6 70.63zM464 320c0 8.836-7.164 16-16 16H255.1c-8.838 0-16-7.164-16-16L239.1 64.13c0-8.836 7.164-16 16-16h128L384 96c0 17.67 14.33 32 32 32h47.1V320zM272 448c0 8.836-7.164 16-16 16H63.1c-8.838 0-16-7.164-16-16L47.98 192.1c0-8.836 7.164-16 16-16H160V128H63.99c-35.35 0-64 28.65-64 64l.0098 256C.002 483.3 28.66 512 64 512h192c35.2 0 64-28.8 64-64v-32h-47.1L272 448z"/></svg></span></template>
<template id=fa-play><span class=fa-svg><svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 384 512"><!--! Font Awesome Free 6.2.0 by @fontawesome - https://fontawesome.com License - https://fontawesome.com/license/free (Icons: CC BY 4.0, Fonts: SIL OFL 1.1, Code: MIT License) Copyright 2022 Fonticons, Inc. --><path d="M73 39c-14.8-9.1-33.4-9.4-48.5-.9S0 62.6 0 80V432c0 17.4 9.4 33.4 24.5 41.9s33.7 8.1 48.5-.9L361 297c14.3-8.7 23-24.2 23-41s-8.7-32.2-23-41L73 39z"/></svg></span></template>
<template id=fa-clock-rotate-left><span class=fa-svg><svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 512 512"><!--! Font Awesome Free 6.2.0 by @fontawesome - https://fontawesome.com License - https://fontawesome.com/license/free (Icons: CC BY 4.0, Fonts: SIL OFL 1.1, Code: MIT License) Copyright 2022 Fonticons, Inc. --><path d="M75 75L41 41C25.9 25.9 0 36.6 0 57.9V168c0 13.3 10.7 24 24 24H134.1c21.4 0 32.1-25.9 17-41l-30.8-30.8C155 85.5 203 64 256 64c106 0 192 86 192 192s-86 192-192 192c-40.8 0-78.6-12.7-109.7-34.4c-14.5-10.1-34.4-6.6-44.6 7.9s-6.6 34.4 7.9 44.6C151.2 495 201.7 512 256 512c141.4 0 256-114.6 256-256S397.4 0 256 0C185.3 0 121.3 28.7 75 75zm181 53c-13.3 0-24 10.7-24 24V256c0 6.4 2.5 12.5 7 17l72 72c9.4 9.4 24.6 9.4 33.9 0s9.4-24.6 0-33.9l-65-65V152c0-13.3-10.7-24-24-24z"/></svg></span></template>
<script>
window.playground_copyable = true;
</script>
<script src="elasticlunr-ef4e11c1.min.js"></script>
<script src="mark-09e88c2c.min.js"></script>
<script src="searcher-c2a407aa.js"></script>
<script src="clipboard-1626706a.min.js"></script>
<script src="highlight-abc7f01d.js"></script>
<script src="book-a0b12cfe.js"></script>
<!-- Custom JS scripts -->
</div>
</body>
</html>

View File

@@ -1,493 +0,0 @@
# Roadmap — quicprochat
> From proof-of-concept to production-grade E2E encrypted messaging.
>
> Each phase is designed to be tackled sequentially. Items within a phase
> can be parallelised. Check the box when done.
---
## Phase 1 — Production Hardening (Critical)
Eliminate all crash paths, enforce secure defaults, fix deployment blockers.
- [x] **1.1 Remove `.unwrap()` / `.expect()` from production paths**
- Replace `AUTH_CONTEXT.read().expect()` in client RPC with proper `Result`
- Replace `"0.0.0.0:0".parse().unwrap()` in client with fallible parse
- Replace `Mutex::lock().unwrap()` in server storage with `.map_err()`
- Audit: `grep -rn 'unwrap()\|expect(' crates/` outside `#[cfg(test)]`
- [x] **1.2 Enforce secure defaults in production mode**
- Reject startup if `QPC_PRODUCTION=true` and `auth_token` is empty or `"devtoken"`
- Require non-empty `db_key` when using SQL backend in production
- Refuse to auto-generate TLS certs in production mode (require existing cert+key)
- Already partially implemented — verify and harden the validation in `config.rs`
- [x] **1.3 Fix `.gitignore`**
- Add `data/`, `*.der`, `*.pem`, `*.db`, `*.bin` (state files), `*.ks` (keystores)
- Verify no secrets are already tracked: `git ls-files data/ *.der *.db`
- [x] **1.4 Fix Dockerfile**
- Sync workspace members (handle excluded `p2p` crate)
- Create dedicated user/group instead of `nobody`
- Set writable `QPC_DATA_DIR` with correct permissions
- Test: `docker build . && docker run --rm -it qpc-server --help`
- [x] **1.5 TLS certificate lifecycle**
- Document CA-signed cert setup (Let's Encrypt / custom CA)
- Add `--tls-required` flag that refuses to start without valid cert
- Log clear warning when using self-signed certs
- Document certificate rotation procedure
---
## Phase 2 — Test & CI Maturity
Build confidence before adding features.
- [x] **2.1 Expand E2E test coverage**
- Auth failure scenarios (wrong password, expired token, invalid token)
- Message ordering verification (send N messages, verify seq numbers)
- Concurrent clients (3+ members in group, simultaneous send/recv)
- OPAQUE registration + login full flow
- Queue full behavior (>1000 messages)
- Rate limiting behavior (>100 enqueues/minute)
- Reconnection after server restart
- KeyPackage exhaustion (fetch when none available)
- [x] **2.2 Add unit tests for untested paths**
- Client retry logic (exponential backoff, jitter, retriable classification)
- REPL input parsing edge cases (empty input, special characters, `/` commands)
- State file encryption/decryption round-trip with bad password
- Token cache expiry
- Conversation store migrations
- [x] **2.3 CI hardening**
- Add `.github/CODEOWNERS` (crypto, auth, wire-format require 2 reviewers)
- Ensure `cargo deny check` runs on every PR (already in CI — verify)
- Add `cargo audit` as blocking check (already in CI — verify)
- Add coverage reporting (tarpaulin or llvm-cov)
- Add CI job for Docker build validation
- [x] **2.4 Clean up build warnings**
- Fix Cap'n Proto generated `unused_parens` warnings
- Remove dead code / unused imports
- Address `openmls` future-incompat warnings
- Target: `cargo clippy --workspace -- -D warnings` passes clean
---
## Phase 3 — Client SDKs: Native QUIC + Cap'n Proto Everywhere
**No REST gateway. No protocol dilution.** The `.capnp` schemas are the
interface definition. Every SDK speaks native QUIC + Cap'n Proto. The
project name stays honest.
### Why this matters
The name is **quic**n**proto**chat — the protocol IS the product. Instead
of adding an HTTP translation layer that loses zero-copy performance and
adds base64 overhead, we invest in making the native protocol accessible
from every language that has QUIC + Cap'n Proto support, and provide
WASM/FFI for the crypto layer.
### Architecture
```
Server: QUIC + Cap'n Proto (single protocol, no gateway)
Client SDKs:
┌─── Rust quinn + capnp-rpc (existing, reference impl)
├─── Go quic-go + go-capnp (native, high confidence)
├─── Python aioquic + pycapnp (native QUIC, manual framing)
├─── C/C++ msquic/ngtcp2 + capnproto (reference impl, full RPC)
└─── Browser WebTransport + capnp (WASM) (QUIC transport, no HTTP needed)
Crypto layer (client-side MLS, shared across all SDKs):
┌─── Rust crate (native, existing)
├─── WASM module (browsers, Node.js, Deno)
└─── C FFI (Swift, Kotlin, Python, Go via cgo)
```
### Language support reality check
| Language | QUIC | Cap'n Proto | RPC | Confidence |
|----------|------|-------------|-----|------------|
| **Rust** | quinn ✅ | capnp-rpc ✅ | Full ✅ | Existing |
| **Go** | quic-go ✅ | go-capnp ✅ | Level 1 ✅ | High |
| **Python** | aioquic ✅ | pycapnp ⚠️ | Manual framing | Medium |
| **C/C++** | msquic/ngtcp2 ✅ | capnproto ✅ | Full ✅ | High |
| **Browser** | WebTransport ✅ | WASM ✅ | Via WASM bridge | Medium |
### Implementation
- [x] **3.1 Go SDK (`quicprochat-go`)**
- Generated Go types from `node.capnp` (6487-line codegen, all 24 RPC methods)
- QUIC transport via `quic-go` with TLS 1.3 + ALPN `"capnp"`
- High-level `qpc` package: Connect, Health, ResolveUser, CreateChannel, Send/SendWithTTL, Receive/ReceiveWait, DeleteAccount, OPAQUE auth
- Example CLI in `sdks/go/cmd/example/`
- [x] **3.2 Python SDK (`quicprochat-py`)**
- QUIC transport: `aioquic` with custom Cap'n Proto stream handler
- Cap'n Proto serialization: `pycapnp` for message types
- Manual RPC framing: length-prefixed request/response over QUIC stream
- Async/await API matching the Rust client patterns
- Crypto: PyO3 bindings to `quicprochat-core` for MLS operations
- Publish: PyPI `quicprochat`
- Example: async bot client
- [x] **3.3 C FFI layer (`quicprochat-ffi`)**
- `crates/quicprochat-ffi` with 7 extern "C" functions: connect, login, send, receive, disconnect, last_error, free_string
- Builds as `libquicprochat_ffi.so` / `.dylib` / `.dll`
- Python ctypes wrapper in `examples/python/qpc_client.py`
- [x] **3.4 WASM compilation of `quicprochat-core`**
- `wasm-pack build` target producing 175 KB WASM bundle (LTO + opt-level=s)
- 13 `wasm_bindgen` functions: Ed25519 identity, hybrid KEM, safety numbers, sealed sender, padding
- Browser-ready with `crypto.getRandomValues()` RNG
- Published as `sdks/typescript/wasm-crypto/`
- [x] **3.5 WebTransport server endpoint**
- Add HTTP/3 + WebTransport listener to server (same QUIC stack via quinn)
- Cap'n Proto RPC framed over WebTransport bidirectional streams
- Same auth, same storage, same RPC handlers — just a different stream source
- Browsers connect via `new WebTransport("https://server:7443")`
- ALPN negotiation: `"h3"` for WebTransport, `"capnp"` for native QUIC
- Configurable port: `--webtransport-listen 0.0.0.0:7443`
- Feature-flagged: `--features webtransport`
- [x] **3.6 TypeScript/JavaScript SDK (`@quicprochat/client`)**
- `QpqClient` class: connect, offline, health, resolveUser, createChannel, send/sendWithTTL, receive, deleteAccount
- WASM crypto wrapper: generateIdentity, sign/verify, hybridEncrypt/Decrypt, computeSafetyNumber, sealedSend, pad
- WebSocket transport with request/response correlation and reconnection
- Browser demo: interactive crypto playground + chat UI (`sdks/typescript/demo/index.html`)
- [x] **3.7 SDK documentation and schema publishing**
- Publish `.capnp` schemas as the canonical API contract
- Document the QUIC + Cap'n Proto connection pattern for each language
- Provide a "build your own SDK" guide (QUIC stream → Cap'n Proto RPC bootstrap)
- Reference implementation checklist: connect, auth, upload key, enqueue, fetch
---
## Phase 4 — Trust & Security Infrastructure
Address the security gaps required for real-world deployment.
- [ ] **4.1 Third-party cryptographic audit**
- Scope: MLS integration, OPAQUE flow, hybrid KEM, key lifecycle, zeroization
- Firms: NCC Group, Trail of Bits, Cure53
- Budget and timeline: typically 4-6 weeks, $50K$150K
- Publish report publicly (builds trust)
- [x] **4.2 Key Transparency / revocation**
- Replace `BasicCredential` with X.509-based MLS credentials
- Or: verifiable key directory (Merkle tree, auditable log)
- Users can verify peer keys haven't been substituted (MITM detection)
- Revocation mechanism for compromised keys
- [x] **4.3 Client authentication on Delivery Service**
- DS sender identity binding with explicit audit logging
- `sender_prefix` tracking in enqueue/batch_enqueue RPCs
- Sender identity derived from authenticated session
- [x] **4.4 M7 — Post-quantum MLS integration**
- Integrate hybrid KEM (X25519 + ML-KEM-768) into the OpenMLS crypto provider
- Group key material gets post-quantum confidentiality
- Full test suite with PQ ciphersuite
- Ref: existing `hybrid_kem.rs` and `hybrid_crypto.rs`
- [x] **4.5 Username enumeration mitigation**
- 5 ms timing floor on `resolveUser` responses
- Rate limiting to prevent bulk enumeration attacks
---
## Phase 5 — Features & UX
Make it a product people want to use.
- [x] **5.1 Multi-device support**
- Account → multiple devices, each with own Ed25519 key + MLS KeyPackages
- Device graph management (add device, remove device, list devices)
- Messages delivered to all devices of a user
- `device_id` field already in Auth struct — wire it through
- [x] **5.2 Account recovery**
- Recovery codes or backup key (encrypted, stored by user)
- Option: server-assisted recovery with security questions (lower security)
- MLS state re-establishment after device loss
- [x] **5.3 Full MLS lifecycle**
- Member removal (Remove proposal → Commit → fan-out)
- Credential update (Update proposal for key rotation)
- Explicit proposal handling (queue proposals, batch commit)
- Group metadata (name, description, avatar hash)
- [x] **5.4 Message editing and deletion**
- `Edit` (0x06) and `Delete` (0x07) message types in `AppMessage`
- `/edit <index> <text>` and `/delete <index>` REPL commands (own messages only)
- Database update/removal on incoming edit/delete
- [x] **5.5 File and media transfer**
- `uploadBlob` / `downloadBlob` RPCs with 256 KB chunked streaming
- SHA-256 content-addressable storage with hash verification
- `FileRef` (0x08) message type with blob_id, filename, file_size, mime_type
- `/send-file <path>` and `/download <index>` REPL commands with progress bars
- 50 MB max file size, automatic MIME detection via `mime_guess`
- [x] **5.6 Abuse prevention and moderation**
- Block user (client-side, suppress display)
- Report message (encrypted report to admin key)
- Admin tools: ban user, delete account, audit log
- [x] **5.7 Offline message queue (client-side)**
- Queue messages when disconnected, send on reconnect
- Idempotent message IDs to prevent duplicates
- Gap detection: compare local seq with server seq
---
## Phase 6 — Scale & Operations
Prepare for real traffic.
- [x] **6.1 Distributed rate limiting**
- Current: in-memory per-process, lost on restart
- Move to Redis or shared state for multi-node deployments
- Sliding window with configurable thresholds
- [x] **6.2 Multi-node / horizontal scaling**
- Stateless server design (already mostly there — state is in storage backend)
- Shared PostgreSQL or CockroachDB backend (replace SQLite)
- Message queue fan-out (Redis pub/sub or NATS for cross-node notification)
- Load balancer health check via QUIC RPC `health()` or Prometheus `/metrics`
- [x] **6.3 Operational runbook**
- Backup / restore procedures (SQLCipher, file backend)
- Key rotation (auth token, TLS cert, DB encryption key)
- Incident response playbook
- Scaling guide (when to add nodes, resource sizing)
- Monitoring dashboard templates (Grafana + Prometheus)
- [x] **6.4 Connection draining and graceful shutdown**
- Stop accepting new connections on SIGTERM
- Wait for in-flight RPCs (configurable timeout, default 30s)
- Drain WebTransport sessions with close frame
- Document expected behavior for load balancers (health → unhealthy first)
- [x] **6.5 Request-level timeouts**
- Per-RPC timeout (prevent slow clients from holding resources)
- Database query timeout
- Overall request deadline propagation
- [x] **6.6 Observability enhancements**
- Request correlation IDs (trace across RPC → storage)
- Storage operation latency metrics
- Per-endpoint latency histograms
- Structured audit log to persistent storage (not just stdout)
- OpenTelemetry integration
---
## Phase 7 — Platform Expansion & Research
Long-term vision for wide adoption.
- [x] **7.1 Mobile clients (iOS + Android)**
- Use C FFI (Phase 3.3) for crypto + transport (single library)
- Push notifications via APNs / FCM (server sends notification on enqueue)
- Background QUIC connection for message polling
- Biometric auth for local key storage (Keychain / Android Keystore)
- [x] **7.2 Web client (browser)**
- Use WASM (Phase 3.4) for crypto
- Use WebTransport (Phase 3.5) for native QUIC transport
- Cap'n Proto via WASM bridge (Phase 3.6)
- IndexedDB for local state persistence
- Service Worker for background notifications
- Progressive Web App (PWA) support
- [x] **7.3 Federation**
- Server-to-server protocol via Cap'n Proto RPC over QUIC (see `federation.capnp`)
- `relayEnqueue`, `proxyFetchKeyPackage`, `federationHealth` methods
- Identity resolution across federated servers
- MLS group spanning multiple servers
- Trust model for federated deployments
- [x] **7.4 Sealed Sender**
- Sender identity inside MLS ciphertext only (server can't see who sent)
- `sealed_sender` module in quicprochat-core with seal/unseal API
- WASM-accessible via `wasm_bindgen` for browser use
- [x] **7.5 Additional language SDKs**
- Java/Kotlin: JNI bindings to C FFI (Phase 3.3) + native QUIC (netty-quic)
- Swift: Swift wrapper over C FFI + Network.framework QUIC
- Ruby: FFI bindings via `quicprochat-ffi`
- Evaluate demand-driven — only build SDKs people request
- [x] **7.6 P2P / NAT traversal**
- Direct peer-to-peer via iroh (foundation exists in `quicprochat-p2p`)
- Server as fallback relay only
- Reduces latency and single-point-of-failure
- Ref: `FUTURE-IMPROVEMENTS.md § 6.1`
- [x] **7.7 Traffic analysis resistance**
- Padding messages to uniform size
- Decoy traffic to mask timing patterns
- Optional Tor/I2P routing for IP privacy
- Ref: `FUTURE-IMPROVEMENTS.md § 5.4, 6.3`
---
## Phase 8 — Freifunk / Community Mesh Networking
Make qpc a first-class citizen on decentralised, community-operated wireless
networks (Freifunk, BATMAN-adv/Babel routing, OpenWrt). Multiple qpc nodes form
a federated mesh; clients auto-discover nearby nodes via mDNS; the network
functions without any central infrastructure or internet uplink.
### Architecture
```
Client A ─── mDNS discovery ──► nearby qpc node (LAN / mesh)
Cap'n Proto federation
remote qpc node (across mesh)
```
- [x] **F0 — Re-include `quicprochat-p2p` in workspace; fix ALPN strings**
- Moved `crates/quicprochat-p2p` from `exclude` back into `[workspace] members`
- Fixed ALPN `b"quicnprotochat/p2p/1"``b"quicprochat/p2p/1"` (breaking wire change)
- Fixed federation ALPN `b"qnpc-fed"``b"quicprochat/federation/1"`
- Feature-gated behind `--features mesh` on client (keeps iroh out of default builds)
- [x] **F1 — Federation routing in message delivery**
- `handle_enqueue` and `handle_batch_enqueue` call `federation::routing::resolve_destination()`
- Recipients with a remote home server are relayed via `FederationClient::relay_enqueue()`
- mTLS mutual authentication between nodes (both present client certs, validated against shared CA)
- Config: `QPC_FEDERATION_LISTEN`, `QPC_LOCAL_DOMAIN`, `QPC_FEDERATION_CERT/KEY/CA`
- [x] **F2 — mDNS local peer discovery**
- Server announces `_quicprochat._udp.local.` on startup via `mdns-sd`
- Client: `MeshDiscovery::start()` browses for nearby nodes (feature-gated)
- REPL commands: `/mesh peers` (scan + list), `/mesh server <host:port>` (note address)
- Nodes announce: `ver=1`, `server=<host:port>`, `domain=<local_domain>` TXT records
- [x] **F3 — Self-sovereign mesh identity**
- Ed25519 keypair-based identity independent of AS registration
- JSON-persisted seed + known peers directory
- Sign/verify operations for mesh authenticity (`crates/quicprochat-p2p/src/identity.rs`)
- [x] **F4 — Store-and-forward with TTL**
- `MeshEnvelope` with TTL-based expiry, hop_count tracking, max_hops routing limit
- SHA-256 deduplication ID prevents relay loops
- Ed25519 signature verification on envelopes
- `MeshStore` in-memory queue with per-recipient capacity limits and TTL-based GC
- [x] **F5 — Lightweight broadcast channels**
- Symmetric ChaCha20-Poly1305 encrypted channels (no MLS overhead)
- Topic-based pub/sub via `BroadcastChannel` and `BroadcastManager`
- Subscribe/unsubscribe, create, publish API on `P2pNode`
- [x] **F6 — Extended `/mesh` REPL commands**
- `/mesh send <peer_id> <msg>` — direct P2P message via iroh
- `/mesh broadcast <topic> <msg>` — publish to broadcast channel
- `/mesh subscribe <topic>` — join broadcast channel
- `/mesh route` — show routing table
- `/mesh identity` — show mesh identity info
- `/mesh store` — show store-and-forward statistics
- [x] **F7 — OpenWrt cross-compilation guide**
- Musl static builds: `x86_64-unknown-linux-musl`, `armv7-unknown-linux-musleabihf`, `mips-unknown-linux-musl`
- Strip binary: `--release` + `strip` → target size < 5 MB for flash storage
- `opkg` package manifest for OpenWrt feed
- `procd` init script + `uci` config file for OpenWrt integration
- CI job: cross-compile and size-check on every release tag
- [x] **F8 — Traffic analysis resistance for mesh**
- Uniform message padding to nearest 256-byte boundary (hides message size)
- Configurable decoy traffic rate (fake messages to mask send timing)
- Optional onion routing: 3-hop relay through other mesh nodes (no Tor dependency)
- Ref: Phase 7.7 for server-side traffic analysis resistance
---
## Phase 9 — Developer Experience & Community Growth
Features designed to attract contributors, create demo/showcase potential,
and lower the barrier to entry for non-crypto developers.
- [x] **9.1 Criterion Benchmark Suite (`qpc-bench`)**
- Criterion benchmarks for all crypto primitives: hybrid KEM encap/decap,
MLS group-add at 10/100/1000 members, epoch rotation, Noise_XX handshake
- CI publishes HTML benchmark reports as GitHub Actions artifacts
- Citable numbers — no other project benchmarks MLS + PQ-KEM in Rust
- [x] **9.2 Safety Numbers (key verification)**
- 60-digit numeric code derived from two identity keys (Signal-style)
- `/verify <username>` REPL command for out-of-band verification
- Available in WASM via `compute_safety_number` binding
- [x] **9.3 Full-Screen TUI (Ratatui + Crossterm)**
- `qpc tui` launches a full-screen terminal UI: message pane, input bar,
channel sidebar with unread counts, MLS epoch indicator
- Feature-gated `--features tui` to keep ratatui/crossterm out of default builds
- Existing REPL and CLI subcommands are unaffected
- [x] **9.4 Delivery Proof Canary Tokens**
- Server signs `Ed25519(SHA-256(message_id || recipient || timestamp))` on enqueue
- Sender stores proof locally — cryptographic evidence the server queued the message
- Cap'n Proto schema gains optional `deliveryProof: Data` on enqueue response
- [x] **9.5 Verifiable Transcript Archive**
- `GroupMember::export_transcript(path, password)` writes encrypted, tamper-evident
message archive (CBOR records, Argon2id + ChaCha20-Poly1305, Merkle chain)
- `qpc export verify` CLI command independently verifies chain integrity
- Useful for legal discovery, audit, or personal backup
- [x] **9.6 Key Transparency (Merkle-Log Identity Binding)**
- Append-only Merkle log of (username, identity_key) bindings in the AS
- Clients receive inclusion proofs alongside key fetches
- Any client can independently audit the full identity history
- Lightweight subset of RFC 9162 adapted for identity keys
- [x] **9.7 Dynamic Server Plugin System**
- Server loads `.so`/`.dylib` plugins at runtime via `--plugin-dir`
- C-compatible `HookVTable` via `extern "C"` — plugins in any language
- 6 hook points: on_message_enqueue, on_batch_enqueue, on_auth, on_channel_created, on_fetch, on_user_registered
- Example plugins: logging plugin, rate limit plugin (512 KiB payload enforcement)
- [x] **9.8 PQ Noise Transport Layer**
- Hybrid `Noise_XX + ML-KEM-768` handshake for post-quantum transport security
- Closes the harvest-now-decrypt-later gap on handshake metadata (ADR-006)
- Feature-gated `--features pq-noise`; classical Noise_XX default preserved
- May require extending or forking `snow` crate's `CryptoResolver`
---
## Summary Timeline
| Phase | Focus | Estimated Effort |
|-------|-------|-----------------|
| **1** | Production Hardening | 12 days |
| **2** | Test & CI Maturity | 23 days |
| **3** | Client SDKs (Go, Python, WASM, FFI, WebTransport) | 58 days |
| **4** | Trust & Security Infrastructure | 24 days (excl. audit) |
| **5** | Features & UX | 57 days |
| **6** | Scale & Operations | 35 days |
| **7** | Platform Expansion & Research | ongoing |
| **8** | Freifunk / Community Mesh | ongoing |
| **9** | Developer Experience & Community Growth | 35 days |
---
## Related Documents
- [Future Improvements](docs/FUTURE-IMPROVEMENTS.md) — consolidated improvement list
- [Production Readiness Audit](docs/PRODUCTION-READINESS-AUDIT.md) — specific blockers
- [Security Audit](docs/SECURITY-AUDIT.md) — findings and recommendations
- [Milestone Tracker](docs/src/roadmap/milestones.md) — M1M7 status
- [Auth, Devices, and Tokens](docs/src/roadmap/authz-plan.md) — authorization design
- [DM Channel Design](docs/src/roadmap/dm-channels.md) — 1:1 channel spec

View File

@@ -1,29 +0,0 @@
# Security Policy
## Supported Versions
Only the current `main` branch is supported with security updates.
## Reporting a Vulnerability
**Do not use public GitHub issues to report security vulnerabilities.**
Instead, email **security@quicprochat.org** with:
- A description of the vulnerability
- Steps to reproduce or a proof of concept
- The affected component(s) and potential impact
We will acknowledge your report within **48 hours** and work with you on a fix under a **90-day coordinated disclosure** timeline.
## What Qualifies
- Cryptographic implementation bugs (MLS, Noise, hybrid KEM, key derivation)
- Authentication or authorization bypass
- Key material leakage (memory, logs, network)
- Protocol-level flaws (replay, downgrade, impersonation)
- Any issue that compromises message confidentiality or integrity
## Credit
Reporters are credited in published security advisories unless they prefer to remain anonymous. Let us know your preference when you report.

View File

@@ -1,229 +0,0 @@
# quicprochat — Sprint Plan
> 7 sprints synthesized from code audit, architecture analysis, and ecosystem research.
> Each sprint is ~1 week. Sprints are ordered by priority and dependency.
---
## Sprint 1 — Bug Fixes & Code Quality (Quick Wins)
Fix all known bugs, clippy warnings, and dead code before building on top.
- [x] **1.1 Fix boolean logic bug in TUI**
- `crates/quicprochat-client/src/client/v2_tui.rs:832` — remove `|| true`
- Cursor positioning always executes regardless of input state
- [x] **1.2 Fix unwrap violations in P2P router**
- `crates/quicprochat-p2p/src/routing.rs:416,419``.lock().unwrap()` on Mutex
- Replace with `.expect("lock poisoned")` or proper error handling
- [x] **1.3 Remove placeholder assertion in WebTransport**
- `crates/quicprochat-server/src/webtransport.rs:418``assert!(true);`
- [x] **1.4 Wire up unused metrics**
- `record_storage_latency()` — instrument storage layer calls
- `record_uptime_seconds()` — add periodic heartbeat task in server main loop
- [x] **1.5 Wire up or remove unused config fields**
- `EffectiveConfig::webtransport_listen` — connect to WebTransport listener
- `EffectiveConfig::rpc_timeout_secs` — apply as per-RPC deadline
- `EffectiveConfig::storage_timeout_secs` — apply as DB query timeout
- [x] **1.6 Fix remaining clippy warnings**
- Reduce function arity (2 functions with 8-9 args → use config/param structs)
- Remove useless `format!()` call
- Collapse nested conditionals
- Rename `from_str` method to avoid `FromStr` trait confusion
---
## Sprint 2 — OpenMLS 0.5 → 0.8 Migration
**CRITICAL**: OpenMLS 0.7.2 includes security patches. Staying on 0.5 is a risk.
- [x] **2.1 Migrate StorageProvider trait**
- Old `OpenMlsKeyStore` → new `StorageProvider` (most invasive change)
- Rework `DiskKeyStore` integration (must keep bincode serialization)
- Update all `group.rs` calls that interact with the key store
- [x] **2.2 Update MLS API calls**
- `self_update()` / `propose_self_update()` — add `LeafNodeParameters` arg
- `join_by_external_commit()` — add optional LeafNode params
- `Sender::NewMember` → split into `NewMemberProposal` / `NewMemberCommit`
- [x] **2.3 Handle GREASE support**
- New variants in `ProposalType`, `ExtensionType`, `CredentialType`
- Update match arms to handle unknown/GREASE values
- [x] **2.4 Update AAD handling**
- AAD no longer persisted — set before every API call generating `MlsMessageOut`
- [x] **2.5 Verify FIPS 203 alignment**
- Confirm ML-KEM-768 parameters match final FIPS 203 (not draft)
- Review hybrid KEM against RFC 9794 combination methods
- [x] **2.6 Full test suite pass**
- All 301 tests must pass with OpenMLS 0.8
- Run crypto benchmarks to check for performance regressions
---
## Sprint 3 — Client Resilience
Currently, network glitches cause the client to hang. This blocks v2 launch.
- [x] **3.1 Auto-reconnect with backoff**
- Integrate existing `retry.rs` into `RpcClient::call()` path
- Exponential backoff with jitter (already implemented, not wired)
- Configurable max retries and backoff ceiling
- [x] **3.2 Push subscription recovery**
- Detect broken push stream and re-subscribe automatically
- Buffer missed events during reconnection window
- [x] **3.3 Heartbeat / keepalive**
- Periodic QUIC ping in TUI and REPL modes
- Detect dead connections before user notices
- [x] **3.4 SDK disconnect lifecycle**
- Add `QpcClient::disconnect()` for clean shutdown
- Proper state machine: Connected → Reconnecting → Disconnected
- [x] **3.5 Connection status UI**
- TUI: show connection state in status bar (Connected / Reconnecting / Offline)
- REPL: print status change notifications
---
## Sprint 4 — Server Hardening
Fix graceful shutdown and wire up timeouts for production readiness.
- [x] **4.1 In-flight RPC tracking**
- Replace fixed 30s shutdown delay with actual in-flight RPC counter
- Drain when counter reaches zero (with configurable max wait)
- [x] **4.2 Apply request-level timeouts**
- Wire `rpc_timeout_secs` config into per-RPC deadline enforcement
- Wire `storage_timeout_secs` into DB query timeouts
- Cancel long-running operations cleanly
- [x] **4.3 Plugin shutdown hooks**
- Add `on_shutdown` hook to `HookVTable`
- Call plugin shutdown before server exits
- [x] **4.4 Federation drain during shutdown**
- Stop accepting federation relay requests on SIGTERM
- Wait for in-flight federation RPCs before exit
- [x] **4.5 Connection draining improvements**
- Send QUIC CONNECTION_CLOSE with application reason
- WebTransport: send close frame before dropping sessions
---
## Sprint 5 — Test Coverage & CI Hardening
Address the major test coverage gaps identified in the audit.
- [x] **5.1 RPC framing unit tests**
- `crates/quicprochat-rpc/src/framing.rs` — encode/decode edge cases
- Malformed frames, truncated input, max-size payloads
- Fuzzing harness for frame parser
- [x] **5.2 SDK state machine tests**
- `crates/quicprochat-sdk/src/conversation.rs` — conversation lifecycle
- `crates/quicprochat-sdk/src/groups.rs` — group join/leave/update
- `crates/quicprochat-sdk/src/messaging.rs` — send/receive/queue
- [x] **5.3 Server domain service tests**
- `crates/quicprochat-server/src/domain/` — all service modules
- Test business logic without DB (mock storage trait)
- [x] **5.4 Integration tests**
- Reconnection scenario (kill server, restart, verify client recovers)
- Graceful shutdown (send SIGTERM during active RPCs, verify drain)
- Multi-node federation relay (if federation wired in Sprint 6)
- [x] **5.5 CI hardening**
- Add MSRV check (Rust 1.75 or declared minimum)
- Add cross-platform CI (macOS, Windows — at least build check)
- Add cargo-fuzz for crypto and parsing code
- Add MIRI for unsafe code in plugin-api/FFI
---
## Sprint 6 — Federation & P2P Integration
Wire up the scaffolded federation and P2P code into working features.
- [x] **6.1 Federation message routing**
- Wire `federation::routing::resolve_destination()` into `handle_enqueue`
- Route messages to remote home servers via `FederationClient::relay_enqueue()`
- Resolve protocol mismatch (Cap'n Proto federation vs Protobuf main RPC)
- [x] **6.2 Federation identity resolution**
- Cross-server user lookup (`user@remote-server`)
- KeyPackage fetching across federated nodes
- [x] **6.3 P2P client integration**
- Wire iroh P2P into client as transport option
- Fallback logic: prefer P2P direct → fall back to server relay
- mDNS discovery in client (already scaffolded, needs activation)
- [x] **6.4 Multipath QUIC evaluation**
- Research draft-ietf-quic-multipath (likely RFC in 2026)
- Prototype: use multiple paths for mesh relay resilience
- Decision: adopt or defer based on quinn support
- [x] **6.5 Federation integration tests**
- Two-server test: register on A, send to user on B, verify delivery
- mTLS mutual auth verification
- Partition tolerance (one node goes down, messages queue)
---
## Sprint 7 — Documentation, Polish & Future Prep
Final polish and forward-looking improvements.
- [x] **7.1 Crate-level documentation**
- Add module-level docs to `quicprochat-plugin-api`, `quicprochat-rpc`, `quicprochat-sdk`
- Doc comments for all public APIs in domain services
- [x] **7.2 Refactor high-arity functions** (none found — already clean)
- Consolidate 8-9 parameter functions into config/param structs
- Improve builder patterns where appropriate
- [ ] **7.3 Review RFC 9750 (MLS Architecture)** (deferred — requires manual review)
- Verify quicprochat's AS/DS split aligns with RFC 9750 recommendations
- Document any deviations and rationale
- [ ] **7.4 Desktop client evaluation** (deferred — requires Tauri prototype)
- Prototype Tauri v2 desktop shell wrapping the TUI or a web UI
- Evaluate effort to ship cross-platform desktop client
- [x] **7.5 Security pre-audit prep**
- Document all crypto boundaries and trust assumptions
- Create threat model document
- Prepare scope document for external auditors (Roadmap item 4.1)
- Budget: NCC Group / Trail of Bits / Cure53 ($50K$150K, 4-6 weeks)
- [ ] **7.6 Repository rename** (requires GitHub admin action)
- Rename GitHub repository from `quicproquo``quicprochat`
- Update all GitHub URLs, CI badge links, go.mod import paths
- Set up redirect from old repo name
---
## Sprint Summary
| Sprint | Focus | Risk | Key Deliverable |
|--------|-------|------|----------------|
| **1** | Bug fixes & code quality | Low | Zero clippy warnings, metrics wired |
| **2** | OpenMLS 0.5 → 0.8 | High | Security patches applied, FIPS 203 verified |
| **3** | Client resilience | Medium | Auto-reconnect, heartbeat, status UI |
| **4** | Server hardening | Medium | Real graceful shutdown, timeouts enforced |
| **5** | Test coverage & CI | Low | Unit tests for SDK/RPC/domain, fuzzing |
| **6** | Federation & P2P | High | Working cross-server messaging, P2P fallback |
| **7** | Docs, polish & audit prep | Low | Audit-ready, desktop prototype |

View File

@@ -1,26 +0,0 @@
registering 'alice'...
user 'alice' registered
logging in as 'alice'...
logged in, session cached
identity: c1e1f6df17eeb6..2816
KeyPackage uploaded
hybrid key uploaded
type /help for commands, Ctrl+D to exit
[no conversation] > /dm bob
resolving bob...
creating channel...
fetching peer's key package...
DM with @bob created. Start typing!
[@bob] > Hey Bob, testing our E2E encrypted channel!
[bob] Works great -- the server never sees plaintext?
[@bob] > Right. MLS forward secrecy + post-quantum KEM.
[bob] Impressive. How do I verify your identity?
[@bob] > Run /verify alice -- compare the safety number out-of-band.
[@bob] > /group-info
 Conversation: @bob
 Type: DM
 Members: 2
 alice (you), bob
 MLS epoch: 3
[@bob] >

Binary file not shown.

Before

Width:  |  Height:  |  Size: 2.2 MiB

View File

@@ -1,24 +0,0 @@
registering 'bob'...
user 'bob' registered
logging in as 'bob'...
logged in, session cached
identity: a8c2f19f1b0806..c73f
KeyPackage uploaded
hybrid key uploaded
type /help for commands, Ctrl+D to exit
[system] new conversation: @alice
[@alice] > [alice] Hey Bob, testing our E2E encrypted channel!
[@alice] > Works great -- the server never sees plaintext?
[alice] Right. MLS forward secrecy + post-quantum KEM.
[@alice] > Impressive. How do I verify your identity?
[alice] Run /verify alice -- compare the safety number out-of-band.
[@alice] > /verify alice
 Safety number for @alice:
 096482 731945 208376
 571039 284617 950283
[@alice] > /whoami
 identity: a8c2f19f1b0806..c73f
 hybrid key: yes
 conversations: 1
[@alice] >

Binary file not shown.

Before

Width:  |  Height:  |  Size: 67 KiB

View File

@@ -1,59 +0,0 @@
=== Alice (left) ===
./target/debug/qpq repl --username alice --password de
opass1 --server 127.0.0.1:17123 --ca-cert /tmp/tmp.adbXG
OrPY/server-cert.der --state /tmp/tmp.adbXGLOrPY/alice.b
n
registering 'alice'...
user 'alice' registered
logging in as 'alice'...
logged in, session cached
identity: c1e1f6df17eeb6f539d7fbea94129fa32fc02ca40e5c
7a7c95cfc94161d5f628
KeyPackage uploaded
hybrid key uploaded
type /help for commands, Ctrl+D to exit
[no conversation] > /dm bob
resolving bob...
creating channel...
fetching peer's key package...
DM with @bob created. Start typing!
[@bob] > ^LHey Bob, testing our E2E encrypted channel!
[@bob] > Right. MLS forward secrecy + post-quantum KEM.
[@bob] > /group-info
Conversation: @bob
Type: DM
Members: 2
alice (you), bob
MLS epoch: 1
[@bob] >
=== Bob (right) ===
./target/debug/qpq repl --username bob --password demop
ass2 --server 127.0.0.1:17123 --ca-cert /tmp/tmp.adbXGLOr
PY/server-cert.der --state /tmp/tmp.adbXGLOrPY/bob.bin
registering 'bob'...
user 'bob' registered
logging in as 'bob'...
logged in, session cached
identity: a8c2f19f1b080616b7206e02244fd14c2ab8821367392
af5ff9c89c69750c73f
KeyPackage uploaded
hybrid key uploaded
type /help for commands, Ctrl+D to exit
[no conversation] > /list
no conversations yet. Try /dm <username> or /create-gro
up <name>
[no conversation] > /switch @alice
error: conversation not found: @alice
[no conversation] > ^LWorks great -- the server never see
s plaintext?
error: no active conversation; use /dm or /create-group
first
[no conversation] > /whoami
identity: a8c2f19f1b080616b7206e02244fd14c2ab8821367392
af5ff9c89c69750c73f
hybrid key: yes
conversations: 0
[no conversation] >

View File

@@ -0,0 +1,60 @@
[package]
name = "quicnprotochat-client"
version = "0.1.0"
edition = "2021"
description = "CLI client for quicnprotochat."
license = "MIT"
[[bin]]
name = "quicnprotochat"
path = "src/main.rs"
[dependencies]
quicnprotochat-core = { path = "../quicnprotochat-core" }
quicnprotochat-proto = { path = "../quicnprotochat-proto" }
openmls_rust_crypto = { workspace = true }
# Serialisation + RPC
capnp = { workspace = true }
capnp-rpc = { workspace = true }
# Async
tokio = { workspace = true }
tokio-util = { workspace = true }
futures = { workspace = true }
serde = { workspace = true }
serde_json = { workspace = true }
bincode = { workspace = true }
# Crypto — OPAQUE PAKE
opaque-ke = { workspace = true }
rand = { workspace = true }
# Error handling
anyhow = { workspace = true }
thiserror = { workspace = true }
# Crypto — for fingerprint verification in fetch-key subcommand
sha2 = { workspace = true }
argon2 = { workspace = true }
chacha20poly1305 = { workspace = true }
quinn = { workspace = true }
quinn-proto = { workspace = true }
rustls = { workspace = true }
# Logging
tracing = { workspace = true }
tracing-subscriber = { workspace = true }
# CLI
clap = { workspace = true }
clap_complete = { workspace = true }
indicatif = { workspace = true }
[dev-dependencies]
dashmap = { workspace = true }
assert_cmd = "2"
tempfile = "3"
portpicker = "0.1"
rand = "0.8"
hex = "0.4"

View File

@@ -5,9 +5,9 @@ use opaque_ke::{
ClientLogin, ClientLoginFinishParameters, ClientRegistration,
ClientRegistrationFinishParameters, CredentialResponse, RegistrationResponse,
};
use quicprochat_core::{
use quicnprotochat_core::{
generate_key_package, hybrid_decrypt, hybrid_encrypt, opaque_auth::OpaqueSuite,
GroupMember, HybridKeypair, IdentityKeypair, ReceivedMessage,
HybridKeypair, IdentityKeypair,
};
use super::{
@@ -16,7 +16,10 @@ use super::{
connect_node, current_timestamp_ms, enqueue, fetch_all, fetch_hybrid_key,
fetch_key_package, fetch_wait, try_hybrid_decrypt, upload_hybrid_key, upload_key_package,
},
state::{decode_identity_key, load_existing_state, load_or_init_state, save_state, sha256},
state::{
decode_identity_key, load_existing_state, load_or_init_state, save_state, sha256,
MemberBackend,
},
};
/// Print local identity information from the state file (no server connection).
@@ -27,8 +30,8 @@ pub fn cmd_whoami(state_path: &Path, password: Option<&str>) -> anyhow::Result<(
let pk_bytes = identity.public_key_bytes();
let fingerprint = sha256(&pk_bytes);
println!("identity_key : {}", hex::encode(pk_bytes));
println!("fingerprint : {}", hex::encode(fingerprint));
println!("identity_key : {}", hex::encode(&pk_bytes));
println!("fingerprint : {}", hex::encode(&fingerprint));
println!(
"hybrid_key : {}",
if state.hybrid_key.is_some() {
@@ -45,6 +48,14 @@ pub fn cmd_whoami(state_path: &Path, password: Option<&str>) -> anyhow::Result<(
"none"
}
);
println!(
"pq_backend : {}",
if state.use_pq_backend {
"yes (MLS HPKE: X25519 + ML-KEM-768)"
} else {
"no (classical)"
}
);
println!("state_file : {}", state_path.display());
Ok(())
@@ -203,7 +214,6 @@ pub async fn cmd_register_user(
}
/// Log in via the OPAQUE protocol and receive a session token.
#[allow(clippy::too_many_arguments)]
pub async fn cmd_login(
server: &str,
ca_cert: &Path,
@@ -311,129 +321,6 @@ fn derive_identity_for_login(
))
}
// ── OPAQUE helpers (used by both one-shot commands and REPL bootstrap) ───────
/// Perform OPAQUE registration. Returns Ok(()) on success.
/// The error message contains "E018" if the user already exists.
/// Does NOT require init_auth() — OPAQUE RPCs are unauthenticated.
pub(crate) async fn opaque_register(
client: &quicprochat_proto::node_capnp::node_service::Client,
username: &str,
password: &str,
identity_key: Option<&[u8]>,
) -> anyhow::Result<()> {
let mut rng = rand::rngs::OsRng;
let reg_start = ClientRegistration::<OpaqueSuite>::start(&mut rng, password.as_bytes())
.map_err(|e| anyhow::anyhow!("OPAQUE register start: {e}"))?;
let mut req = client.opaque_register_start_request();
{
let mut p = req.get();
p.set_username(username);
p.set_request(&reg_start.message.serialize());
}
let resp = req.send().promise.await.context("opaque_register_start RPC failed")?;
let response_bytes = resp
.get()
.context("register_start: bad response")?
.get_response()
.context("register_start: missing response")?
.to_vec();
let reg_response = RegistrationResponse::<OpaqueSuite>::deserialize(&response_bytes)
.map_err(|e| anyhow::anyhow!("invalid registration response: {e}"))?;
let reg_finish = reg_start
.state
.finish(
&mut rng,
password.as_bytes(),
reg_response,
ClientRegistrationFinishParameters::<OpaqueSuite>::default(),
)
.map_err(|e| anyhow::anyhow!("OPAQUE register finish: {e}"))?;
let mut req = client.opaque_register_finish_request();
{
let mut p = req.get();
p.set_username(username);
p.set_upload(&reg_finish.message.serialize());
if let Some(ik) = identity_key {
p.set_identity_key(ik);
} else {
p.set_identity_key(&[]);
}
}
let resp = req.send().promise.await.context("opaque_register_finish RPC failed")?;
let success = resp
.get()
.context("register_finish: bad response")?
.get_success();
anyhow::ensure!(success, "server rejected registration");
Ok(())
}
/// Perform OPAQUE login and return the raw session token bytes.
/// Does NOT require init_auth() — OPAQUE RPCs are unauthenticated.
pub async fn opaque_login(
client: &quicprochat_proto::node_capnp::node_service::Client,
username: &str,
password: &str,
identity_key: &[u8],
) -> anyhow::Result<Vec<u8>> {
let mut rng = rand::rngs::OsRng;
let login_start = ClientLogin::<OpaqueSuite>::start(&mut rng, password.as_bytes())
.map_err(|e| anyhow::anyhow!("OPAQUE login start: {e}"))?;
let mut req = client.opaque_login_start_request();
{
let mut p = req.get();
p.set_username(username);
p.set_request(&login_start.message.serialize());
}
let resp = req.send().promise.await.context("opaque_login_start RPC failed")?;
let response_bytes = resp
.get()
.context("login_start: bad response")?
.get_response()
.context("login_start: missing response")?
.to_vec();
let credential_response = CredentialResponse::<OpaqueSuite>::deserialize(&response_bytes)
.map_err(|e| anyhow::anyhow!("invalid credential response: {e}"))?;
let login_finish = login_start
.state
.finish(
&mut rng,
password.as_bytes(),
credential_response,
ClientLoginFinishParameters::<OpaqueSuite>::default(),
)
.map_err(|e| anyhow::anyhow!("OPAQUE login finish (bad password?): {e}"))?;
let mut req = client.opaque_login_finish_request();
{
let mut p = req.get();
p.set_username(username);
p.set_finalization(&login_finish.message.serialize());
p.set_identity_key(identity_key);
}
let resp = req.send().promise.await.context("opaque_login_finish RPC failed")?;
let session_token = resp
.get()
.context("login_finish: bad response")?
.get_session_token()
.context("login_finish: missing session_token")?
.to_vec();
anyhow::ensure!(!session_token.is_empty(), "server returned empty session token");
Ok(session_token)
}
/// Generate a KeyPackage for a fresh identity and upload it to the AS.
pub async fn cmd_register(server: &str, ca_cert: &Path, server_name: &str) -> anyhow::Result<()> {
let identity = IdentityKeypair::generate();
@@ -489,7 +376,7 @@ async fn do_upload_keypackage(
ca_cert: &Path,
server_name: &str,
password: Option<&str>,
member: &mut GroupMember,
member: &mut MemberBackend,
hybrid_kp: Option<&HybridKeypair>,
) -> anyhow::Result<()> {
let tls_bytes = member
@@ -523,7 +410,7 @@ async fn do_upload_keypackage(
anyhow::ensure!(server_fp == fingerprint, "fingerprint mismatch");
if let Some(hkp) = &hybrid_kp {
if let Some(ref hkp) = hybrid_kp {
upload_hybrid_key(
&node_client,
&member.identity().public_key_bytes(),
@@ -552,8 +439,9 @@ pub async fn cmd_register_state(
ca_cert: &Path,
server_name: &str,
password: Option<&str>,
use_pq_backend: bool,
) -> anyhow::Result<()> {
let state = load_or_init_state(state_path, password)?;
let state = load_or_init_state(state_path, password, use_pq_backend)?;
let (mut member, hybrid_kp) = state.into_parts(state_path)?;
do_upload_keypackage(
state_path,
@@ -646,15 +534,37 @@ pub async fn cmd_fetch_key(
}
/// Run a two-party MLS demo against the unified server.
pub async fn cmd_demo_group(server: &str, ca_cert: &Path, server_name: &str) -> anyhow::Result<()> {
let creator_state_path = PathBuf::from("qpc-demo-creator.bin");
let joiner_state_path = PathBuf::from("qpc-demo-joiner.bin");
pub async fn cmd_demo_group(
server: &str,
ca_cert: &Path,
server_name: &str,
use_pq_backend: bool,
) -> anyhow::Result<()> {
use indicatif::{ProgressBar, ProgressStyle};
let creator_state_path = PathBuf::from("quicnprotochat-demo-creator.bin");
let joiner_state_path = PathBuf::from("quicnprotochat-demo-joiner.bin");
let pb = ProgressBar::new(5);
pb.set_style(
ProgressStyle::with_template("{spinner:.green} [{bar:40.cyan/blue}] {pos}/{len} {msg}")
.expect("demo progress template is valid")
.tick_chars("\u{2801}\u{2802}\u{2804}\u{2840}\u{2820}\u{2810}\u{2808} ")
.progress_chars("=>-"),
);
pb.enable_steady_tick(std::time::Duration::from_millis(80));
pb.set_message("Generating Alice keys\u{2026}");
let (mut creator, creator_hybrid_opt) =
load_or_init_state(&creator_state_path, None)?.into_parts(&creator_state_path)?;
let (mut joiner, joiner_hybrid_opt) =
load_or_init_state(&joiner_state_path, None)?.into_parts(&joiner_state_path)?;
load_or_init_state(&creator_state_path, None, use_pq_backend)?.into_parts(&creator_state_path)?;
pb.inc(1);
pb.set_message("Generating Bob keys\u{2026}");
let (mut joiner, joiner_hybrid_opt) =
load_or_init_state(&joiner_state_path, None, use_pq_backend)?.into_parts(&joiner_state_path)?;
pb.inc(1);
pb.set_message("Creating group\u{2026}");
let creator_hybrid = creator_hybrid_opt.unwrap_or_else(HybridKeypair::generate);
let joiner_hybrid = joiner_hybrid_opt.unwrap_or_else(HybridKeypair::generate);
@@ -676,8 +586,6 @@ pub async fn cmd_demo_group(server: &str, ca_cert: &Path, server_name: &str) ->
upload_hybrid_key(&creator_node, &creator_identity, &creator_hybrid.public_key()).await?;
upload_hybrid_key(&joiner_node, &joiner_identity, &joiner_hybrid.public_key()).await?;
println!("hybrid public keys uploaded for creator and joiner");
let fetched_joiner_kp = fetch_key_package(&creator_node, &joiner_identity).await?;
anyhow::ensure!(
!fetched_joiner_kp.is_empty(),
@@ -690,7 +598,9 @@ pub async fn cmd_demo_group(server: &str, ca_cert: &Path, server_name: &str) ->
let (_commit, welcome) = creator
.add_member(&fetched_joiner_kp)
.context("add_member failed")?;
pb.inc(1);
pb.set_message("Encrypting\u{2026}");
let creator_ds = creator_node.clone();
let joiner_ds = joiner_node.clone();
@@ -698,9 +608,11 @@ pub async fn cmd_demo_group(server: &str, ca_cert: &Path, server_name: &str) ->
.await?
.context("joiner hybrid key not found")?;
let wrapped_welcome =
hybrid_encrypt(&joiner_hybrid_pk, &welcome, b"", b"").context("hybrid encrypt welcome")?;
hybrid_encrypt(&joiner_hybrid_pk, &welcome).context("hybrid encrypt welcome")?;
enqueue(&creator_ds, &joiner_identity, &wrapped_welcome).await?;
pb.inc(1);
pb.set_message("Delivering\u{2026}");
let welcome_payloads = fetch_all(&joiner_ds, &joiner_identity).await?;
let raw_welcome = welcome_payloads
.first()
@@ -708,7 +620,7 @@ pub async fn cmd_demo_group(server: &str, ca_cert: &Path, server_name: &str) ->
.context("Welcome was not delivered to joiner via DS")?;
let welcome_bytes =
hybrid_decrypt(&joiner_hybrid, &raw_welcome, b"", b"").context("hybrid decrypt welcome failed")?;
hybrid_decrypt(&joiner_hybrid, &raw_welcome).context("hybrid decrypt welcome failed")?;
joiner
.join_group(&welcome_bytes)
.context("join_group failed")?;
@@ -717,7 +629,7 @@ pub async fn cmd_demo_group(server: &str, ca_cert: &Path, server_name: &str) ->
.send_message(b"hello")
.context("send_message failed")?;
let wrapped_creator_joiner =
hybrid_encrypt(&joiner_hybrid_pk, &ct_creator_to_joiner, b"", b"").context("hybrid encrypt failed")?;
hybrid_encrypt(&joiner_hybrid_pk, &ct_creator_to_joiner).context("hybrid encrypt failed")?;
enqueue(&creator_ds, &joiner_identity, &wrapped_creator_joiner).await?;
let joiner_msgs = fetch_all(&joiner_ds, &joiner_identity).await?;
@@ -725,15 +637,10 @@ pub async fn cmd_demo_group(server: &str, ca_cert: &Path, server_name: &str) ->
.first()
.context("joiner: missing ciphertext from DS")?;
let inner_creator_joiner =
hybrid_decrypt(&joiner_hybrid, raw_creator_joiner, b"", b"").context("hybrid decrypt failed")?;
let plaintext_creator_joiner = match joiner.receive_message(&inner_creator_joiner)? {
ReceivedMessage::Application(pt) => pt,
other => anyhow::bail!("expected application message, got {other:?}"),
};
println!(
"creator -> joiner plaintext: {}",
String::from_utf8_lossy(&plaintext_creator_joiner)
);
hybrid_decrypt(&joiner_hybrid, raw_creator_joiner).context("hybrid decrypt failed")?;
let plaintext_creator_joiner = joiner
.receive_message(&inner_creator_joiner)?
.context("expected application message")?;
let creator_hybrid_pk = fetch_hybrid_key(&joiner_node, &creator_identity)
.await?
@@ -742,7 +649,7 @@ pub async fn cmd_demo_group(server: &str, ca_cert: &Path, server_name: &str) ->
.send_message(b"hello back")
.context("send_message failed")?;
let wrapped_joiner_creator =
hybrid_encrypt(&creator_hybrid_pk, &ct_joiner_to_creator, b"", b"").context("hybrid encrypt failed")?;
hybrid_encrypt(&creator_hybrid_pk, &ct_joiner_to_creator).context("hybrid encrypt failed")?;
enqueue(&joiner_ds, &creator_identity, &wrapped_joiner_creator).await?;
let creator_msgs = fetch_all(&creator_ds, &creator_identity).await?;
@@ -750,16 +657,21 @@ pub async fn cmd_demo_group(server: &str, ca_cert: &Path, server_name: &str) ->
.first()
.context("creator: missing ciphertext from DS")?;
let inner_joiner_creator =
hybrid_decrypt(&creator_hybrid, raw_joiner_creator, b"", b"").context("hybrid decrypt failed")?;
let plaintext_joiner_creator = match creator.receive_message(&inner_joiner_creator)? {
ReceivedMessage::Application(pt) => pt,
other => anyhow::bail!("expected application message, got {other:?}"),
};
hybrid_decrypt(&creator_hybrid, raw_joiner_creator).context("hybrid decrypt failed")?;
let plaintext_joiner_creator = creator
.receive_message(&inner_joiner_creator)?
.context("expected application message")?;
pb.inc(1);
pb.finish_and_clear();
println!(
"joiner -> creator plaintext: {}",
"creator -> joiner: {}",
String::from_utf8_lossy(&plaintext_creator_joiner)
);
println!(
"joiner -> creator: {}",
String::from_utf8_lossy(&plaintext_joiner_creator)
);
println!("demo-group complete (hybrid PQ envelope active)");
Ok(())
@@ -771,8 +683,9 @@ pub async fn cmd_create_group(
_server: &str,
group_id: &str,
password: Option<&str>,
use_pq_backend: bool,
) -> anyhow::Result<()> {
let state = load_or_init_state(state_path, password)?;
let state = load_or_init_state(state_path, password, use_pq_backend)?;
let (mut member, hybrid_kp) = state.into_parts(state_path)?;
anyhow::ensure!(
@@ -827,7 +740,7 @@ pub async fn cmd_invite(
}
let peer_hpk = fetch_hybrid_key(&node_client, mk).await?;
let commit_payload = if let Some(ref pk) = peer_hpk {
hybrid_encrypt(pk, &commit, b"", b"").context("hybrid encrypt commit")?
hybrid_encrypt(pk, &commit).context("hybrid encrypt commit")?
} else {
commit.clone()
};
@@ -836,7 +749,7 @@ pub async fn cmd_invite(
let peer_hybrid_pk = fetch_hybrid_key(&node_client, &peer_key).await?;
let payload = if let Some(ref pk) = peer_hybrid_pk {
hybrid_encrypt(pk, &welcome, b"", b"").context("hybrid encrypt welcome failed")?
hybrid_encrypt(pk, &welcome).context("hybrid encrypt welcome failed")?
} else {
welcome
};
@@ -900,22 +813,12 @@ pub async fn cmd_join(
let _ = member.receive_message(&mls_payload);
}
// Auto-replenish KeyPackage after join consumed the original one.
let tls_bytes = member
.generate_key_package()
.context("KeyPackage replenishment failed")?;
upload_key_package(&node_client, &member.identity().public_key_bytes(), &tls_bytes)
.await
.context("KeyPackage replenishment upload failed")?;
println!("KeyPackage auto-replenished after join");
save_state(state_path, &member, hybrid_kp.as_ref(), password)?;
println!("joined group successfully");
Ok(())
}
/// Send an application message via DS (single recipient or broadcast to all other members).
#[allow(clippy::too_many_arguments)]
pub async fn cmd_send(
state_path: &Path,
server: &str,
@@ -956,7 +859,7 @@ pub async fn cmd_send(
for recipient in &recipients {
let peer_hybrid_pk = fetch_hybrid_key(&node_client, recipient).await?;
let payload = if let Some(ref pk) = peer_hybrid_pk {
hybrid_encrypt(pk, &ct, b"", b"").context("hybrid encrypt failed")?
hybrid_encrypt(pk, &ct).context("hybrid encrypt failed")?
} else {
ct.clone()
};
@@ -986,11 +889,29 @@ pub async fn cmd_recv(
stream: bool,
password: Option<&str>,
) -> anyhow::Result<()> {
use indicatif::{ProgressBar, ProgressStyle};
let state = load_existing_state(state_path, password)?;
let (mut member, hybrid_kp) = state.into_parts(state_path)?;
let client = connect_node(server, ca_cert, server_name).await?;
let stream_pb: Option<ProgressBar> = if stream {
let pb = ProgressBar::new_spinner();
pb.set_style(
ProgressStyle::with_template("{spinner:.green} {msg}")
.expect("recv progress template is valid")
.tick_chars("\u{2801}\u{2802}\u{2804}\u{2840}\u{2820}\u{2810}\u{2808} "),
);
pb.set_message("Listening for messages (0 received)\u{2026}");
pb.enable_steady_tick(std::time::Duration::from_millis(100));
Some(pb)
} else {
None
};
let mut total_received: usize = 0;
loop {
let mut payloads =
fetch_wait(&client, &member.identity().public_key_bytes(), wait_ms).await?;
@@ -1007,47 +928,68 @@ pub async fn cmd_recv(
// application messages that depend on the resulting epoch.
payloads.sort_by_key(|(seq, _)| *seq);
let mut pending: Vec<(usize, Vec<u8>)> = Vec::new();
let mut retry_mls: Vec<Vec<u8>> = Vec::new();
for (idx, (_, payload)) in payloads.iter().enumerate() {
let mls_payload = match try_hybrid_decrypt(hybrid_kp.as_ref(), payload) {
Ok(b) => b,
Err(e) => {
println!("[{idx}] decrypt error: {e}");
match &stream_pb {
Some(pb) => pb.println(format!("[{idx}] decrypt error: {e}")),
None => println!("[{idx}] decrypt error: {e}"),
}
continue;
}
};
match member.receive_message(&mls_payload) {
Ok(ReceivedMessage::Application(pt)) => println!("[{idx}] plaintext: {}", String::from_utf8_lossy(&pt)),
Ok(ReceivedMessage::StateChanged) | Ok(ReceivedMessage::SelfRemoved) => println!("[{idx}] commit applied"),
Err(_) => pending.push((idx, mls_payload)),
}
}
// Retry until no more progress (handles multi-epoch batches).
loop {
let before = pending.len();
pending.retain(|(idx, mls_payload)| {
match member.receive_message(mls_payload) {
Ok(ReceivedMessage::Application(pt)) => {
println!("[{idx}/retry] plaintext: {}", String::from_utf8_lossy(&pt));
false
Ok(Some(pt)) => {
total_received += 1;
let line = format!("[{idx}] plaintext: {}", String::from_utf8_lossy(&pt));
match &stream_pb {
Some(pb) => pb.println(line),
None => println!("{line}"),
}
Ok(ReceivedMessage::StateChanged) | Ok(ReceivedMessage::SelfRemoved) => {
println!("[{idx}/retry] commit applied");
false
}
Err(_) => true,
}
});
if pending.len() == before {
break; // No progress — remaining messages are unprocessable
Ok(None) => {
let line = format!("[{idx}] commit applied");
match &stream_pb {
Some(pb) => pb.println(line),
None => println!("{line}"),
}
}
Err(_) => retry_mls.push(mls_payload),
}
}
for (idx, _) in &pending {
println!("[{idx}] error: unprocessable after all retries");
// Retry messages that failed on the first pass (e.g. app messages whose
// epoch was not yet advanced until a commit earlier in the batch was applied).
for mls_payload in &retry_mls {
match member.receive_message(mls_payload) {
Ok(Some(pt)) => {
total_received += 1;
let line = format!("[retry] plaintext: {}", String::from_utf8_lossy(&pt));
match &stream_pb {
Some(pb) => pb.println(line),
None => println!("{line}"),
}
}
Ok(None) => {}
Err(e) => {
let line = format!("[retry] error: {e}");
match &stream_pb {
Some(pb) => pb.println(line),
None => println!("{line}"),
}
}
}
}
save_state(state_path, &member, hybrid_kp.as_ref(), password)?;
if let Some(ref pb) = stream_pb {
pb.set_message(format!(
"Listening for messages ({total_received} received)\u{2026}"
));
}
if !stream {
return Ok(());
}
@@ -1056,8 +998,8 @@ pub async fn cmd_recv(
/// Fetch pending payloads, process in order (merge commits, collect plaintexts), save state.
/// Returns only application-message plaintexts. Used by E2E tests and callers that need returned messages.
/// Retries in a loop until no more progress, handling multi-epoch batches where commits must be
/// applied before later application messages can be decrypted.
/// Uses two passes so that if the server delivers an application message before a Commit, the second pass
/// processes it after commits are merged.
pub async fn receive_pending_plaintexts(
state_path: &Path,
server: &str,
@@ -1075,33 +1017,21 @@ pub async fn receive_pending_plaintexts(
payloads.sort_by_key(|(seq, _)| *seq);
let mut plaintexts = Vec::new();
let mut pending: Vec<Vec<u8>> = Vec::new();
let mut retry_mls: Vec<Vec<u8>> = Vec::new();
for (_, payload) in &payloads {
let mls_payload = match try_hybrid_decrypt(hybrid_kp.as_ref(), payload) {
Ok(b) => b,
Err(_) => continue,
};
match member.receive_message(&mls_payload) {
Ok(ReceivedMessage::Application(pt)) => plaintexts.push(pt),
Ok(ReceivedMessage::StateChanged) | Ok(ReceivedMessage::SelfRemoved) => {}
Err(_) => pending.push(mls_payload),
Ok(Some(pt)) => plaintexts.push(pt),
Ok(None) => {}
Err(_) => retry_mls.push(mls_payload),
}
}
// Retry until no more progress (handles multi-epoch batches).
loop {
let before = pending.len();
pending.retain(|mls_payload| {
match member.receive_message(mls_payload) {
Ok(ReceivedMessage::Application(pt)) => {
plaintexts.push(pt);
false
}
Ok(ReceivedMessage::StateChanged) | Ok(ReceivedMessage::SelfRemoved) => false,
Err(_) => true,
}
});
if pending.len() == before {
break;
for mls_payload in &retry_mls {
if let Ok(Some(pt)) = member.receive_message(mls_payload) {
plaintexts.push(pt);
}
}
@@ -1117,8 +1047,8 @@ pub fn whoami_json(state_path: &Path, password: Option<&str>) -> anyhow::Result<
let fingerprint = sha256(&pk_bytes);
Ok(format!(
r#"{{"identity_key":"{}", "fingerprint":"{}", "hybrid_key":{}, "group":{}}}"#,
hex::encode(pk_bytes),
hex::encode(fingerprint),
hex::encode(&pk_bytes),
hex::encode(&fingerprint),
state.hybrid_key.is_some(),
state.group.is_some(),
))
@@ -1231,7 +1161,7 @@ pub async fn cmd_chat(
.context("send_message failed")?;
let peer_hybrid_pk = fetch_hybrid_key(&client, &peer_key).await?;
let payload = if let Some(ref pk) = peer_hybrid_pk {
hybrid_encrypt(pk, &ct, b"", b"").context("hybrid encrypt failed")?
hybrid_encrypt(pk, &ct).context("hybrid encrypt failed")?
} else {
ct
};
@@ -1247,39 +1177,21 @@ pub async fn cmd_chat(
_ = poll.tick() => {
let mut payloads = fetch_wait(&client, &identity_bytes, 0).await?;
payloads.sort_by_key(|(seq, _)| *seq);
let mut retry_payloads: Vec<Vec<u8>> = Vec::new();
for (_, payload) in &payloads {
let mls_payload = match try_hybrid_decrypt(hybrid_kp.as_ref(), payload) {
Ok(b) => b,
Err(_) => continue,
};
match member.receive_message(&mls_payload) {
Ok(ReceivedMessage::Application(pt)) => {
Ok(Some(pt)) => {
let s = String::from_utf8_lossy(&pt);
println!("\r\n[peer] {s}\n> ");
std::io::stdout().flush().context("flush stdout")?;
}
Ok(ReceivedMessage::StateChanged) | Ok(ReceivedMessage::SelfRemoved) => {}
Err(_) => retry_payloads.push(mls_payload),
Ok(None) => {}
Err(_) => {}
}
}
// Retry failed messages (epoch may have advanced from commits in this batch)
loop {
let before = retry_payloads.len();
retry_payloads.retain(|mls_payload| {
match member.receive_message(mls_payload) {
Ok(ReceivedMessage::Application(pt)) => {
let s = String::from_utf8_lossy(&pt);
println!("\r\n[peer] {s}\n> ");
let _ = std::io::stdout().flush();
false
}
Ok(ReceivedMessage::StateChanged) | Ok(ReceivedMessage::SelfRemoved) => false,
Err(_) => true,
}
});
if retry_payloads.len() == before { break; }
}
if !payloads.is_empty() {
save_state(state_path, &member, hybrid_kp.as_ref(), password)?;
}
@@ -1290,111 +1202,3 @@ pub async fn cmd_chat(
println!();
Ok(())
}
// ── Transcript export ─────────────────────────────────────────────────────────
/// Export the message history for a conversation to an encrypted, tamper-evident
/// transcript file.
///
/// `conv_db` is the path to the conversation SQLite database (`.convdb` file).
/// `conv_id_hex` is the 32-hex-character conversation ID to export.
/// `output` is the path for the `.qpct` transcript file to write.
/// `transcript_password` is used to derive the encryption key (Argon2id).
/// `db_password` is the optional SQLCipher password for the conversation database.
pub fn cmd_export(
conv_db: &Path,
conv_id_hex: &str,
output: &Path,
transcript_password: &str,
db_password: Option<&str>,
) -> anyhow::Result<()> {
use quicprochat_core::{TranscriptRecord, TranscriptWriter};
use super::conversation::{ConversationId, ConversationStore};
// Decode conversation ID from hex.
let id_bytes = hex::decode(conv_id_hex)
.map_err(|e| anyhow::anyhow!("conv-id must be 32 hex characters (16 bytes): {e}"))?;
let conv_id = ConversationId::from_slice(&id_bytes)
.ok_or_else(|| anyhow::anyhow!("conv-id must be exactly 16 bytes (32 hex chars), got {} bytes", id_bytes.len()))?;
// Open conversation database.
let store = ConversationStore::open(conv_db, db_password)
.context("open conversation database")?;
// Load conversation metadata (to display name in output).
let conv = store
.load_conversation(&conv_id)?
.with_context(|| format!("conversation '{conv_id_hex}' not found in database"))?;
// Load all messages (oldest first).
let messages = store.load_all_messages(&conv_id)?;
if messages.is_empty() {
println!("No messages in conversation '{}'.", conv.display_name);
return Ok(());
}
// Create output file.
if let Some(parent) = output.parent() {
std::fs::create_dir_all(parent).ok();
}
let mut file = std::fs::File::create(output)
.with_context(|| format!("create transcript file '{}'", output.display()))?;
// Write transcript header + records.
let mut writer = TranscriptWriter::new(transcript_password, &mut file)
.context("initialise transcript writer")?;
let mut written = 0u64;
for (seq, msg) in messages.iter().enumerate() {
writer
.write_record(
&TranscriptRecord {
seq: seq as u64,
sender_identity: &msg.sender_key,
timestamp_ms: msg.timestamp_ms,
plaintext: &msg.body,
},
&mut file,
)
.context("write transcript record")?;
written += 1;
}
println!(
"Exported {} message(s) from '{}' to '{}'.",
written,
conv.display_name,
output.display()
);
println!("Decrypt with: qpc export verify --input <file> --password <password>");
Ok(())
}
/// Verify the hash-chain integrity of a transcript file without decrypting content.
///
/// Prints a summary. Does not require the encryption password (structural check only).
pub fn cmd_export_verify(input: &Path) -> anyhow::Result<()> {
use quicprochat_core::{validate_transcript_structure, ChainVerdict};
let data = std::fs::read(input)
.with_context(|| format!("read transcript file '{}'", input.display()))?;
match validate_transcript_structure(&data)? {
ChainVerdict::Ok { records } => {
println!(
"OK: transcript '{}' is structurally valid. {} record(s) found, hash chain intact.",
input.display(),
records
);
}
ChainVerdict::Broken => {
anyhow::bail!(
"FAIL: hash chain is broken in '{}' — file may have been tampered with.",
input.display()
);
}
}
Ok(())
}

View File

@@ -0,0 +1,13 @@
pub fn encode(bytes: impl AsRef<[u8]>) -> String {
bytes.as_ref().iter().map(|b| format!("{b:02x}")).collect()
}
pub fn decode(s: &str) -> Result<Vec<u8>, &'static str> {
if s.len() % 2 != 0 {
return Err("odd-length hex string");
}
(0..s.len())
.step_by(2)
.map(|i| u8::from_str_radix(&s[i..i + 2], 16).map_err(|_| "invalid hex character"))
.collect()
}

View File

@@ -1,23 +1,8 @@
pub mod command_engine;
pub mod commands;
pub mod conversation;
pub mod display;
pub mod hex;
pub mod mesh_discovery;
#[cfg(feature = "playbook")]
pub mod playbook;
pub mod repl;
pub mod retry;
pub mod rpc;
pub mod session;
pub mod state;
pub mod token_cache;
#[cfg(feature = "tui")]
pub mod tui;
#[cfg(feature = "v2")]
pub mod v2_repl;
#[cfg(all(feature = "v2", feature = "tui"))]
pub mod v2_tui;
pub use commands::*;
pub use rpc::{connect_node, enqueue, fetch_all, fetch_hybrid_key, fetch_key_package, fetch_wait, upload_hybrid_key, upload_key_package};

View File

@@ -0,0 +1,102 @@
//! Retry with exponential backoff for transient RPC failures.
//!
//! Used for `enqueue`, `fetch_all`, and `fetch_wait`. Auth and invalid-param
//! errors are not retried. Configure via `QUICNPROTOCHAT_MAX_RETRIES` and
//! `QUICNPROTOCHAT_BASE_DELAY_MS` (optional).
use std::future::Future;
use std::time::Duration;
use rand::Rng;
use tracing::warn;
/// Default maximum number of retry attempts (including the first try).
pub const DEFAULT_MAX_RETRIES: u32 = 3;
/// Default base delay in milliseconds for exponential backoff.
pub const DEFAULT_BASE_DELAY_MS: u64 = 500;
/// Read max retries from env or use default.
pub fn max_retries_from_env() -> u32 {
std::env::var("QUICNPROTOCHAT_MAX_RETRIES")
.ok()
.and_then(|s| s.parse().ok())
.unwrap_or(DEFAULT_MAX_RETRIES)
}
/// Read base delay (ms) from env or use default.
pub fn base_delay_ms_from_env() -> u64 {
std::env::var("QUICNPROTOCHAT_BASE_DELAY_MS")
.ok()
.and_then(|s| s.parse().ok())
.unwrap_or(DEFAULT_BASE_DELAY_MS)
}
/// Runs an async operation with retries. On `Ok(t)` returns immediately.
/// On `Err(e)`: if `is_retriable(&e)` and `attempt < max_retries`, sleeps with
/// exponential backoff (plus jitter) then retries; otherwise returns the last error.
pub async fn retry_async<F, Fut, T, E, P>(
op: F,
max_retries: u32,
base_delay_ms: u64,
is_retriable: P,
) -> Result<T, E>
where
F: Fn() -> Fut,
Fut: Future<Output = Result<T, E>>,
P: Fn(&E) -> bool,
{
let mut last_err = None;
for attempt in 0..max_retries {
match op().await {
Ok(t) => return Ok(t),
Err(e) => {
last_err = Some(e);
let err = last_err.as_ref().expect("last_err just set in Err branch");
if !is_retriable(err) || attempt + 1 >= max_retries {
break;
}
let delay_ms = base_delay_ms * 2u64.saturating_pow(attempt);
let jitter_ms = rand::thread_rng().gen_range(0..=delay_ms / 2);
let total_ms = delay_ms + jitter_ms;
warn!(
attempt = attempt + 1,
max_retries,
delay_ms = total_ms,
"RPC failed, retrying after backoff"
);
tokio::time::sleep(Duration::from_millis(total_ms)).await;
}
}
}
// Loop runs at least once (max_retries >= 1) and we only break after storing an Err, so this is always Some.
Err(last_err.expect("retry_async: last_err is Some when breaking after Err"))
}
/// Classifies `anyhow::Error` for retry: returns `false` for auth or invalid-param
/// errors (do not retry), `true` for transient errors (network, timeout, server 5xx).
/// When in doubt, returns `true` (retry).
pub fn anyhow_is_retriable(err: &anyhow::Error) -> bool {
let s = format!("{:#}", err);
let s_lower = s.to_lowercase();
// Do not retry: auth / permission
if s_lower.contains("unauthorized")
|| s_lower.contains("auth failed")
|| s_lower.contains("access denied")
|| s_lower.contains("401")
|| s_lower.contains("forbidden")
|| s_lower.contains("403")
|| s_lower.contains("token")
{
return false;
}
// Do not retry: bad request / invalid params
if s_lower.contains("bad request")
|| s_lower.contains("400")
|| s_lower.contains("invalid param")
|| s_lower.contains("fingerprint mismatch")
{
return false;
}
// Retry: network, timeout, connection, server error, or anything else
true
}

View File

@@ -0,0 +1,369 @@
use std::net::SocketAddr;
use std::path::Path;
use std::sync::Arc;
use anyhow::Context;
use quinn::{ClientConfig, Endpoint};
use quinn_proto::crypto::rustls::QuicClientConfig;
use rustls::pki_types::CertificateDer;
use rustls::{ClientConfig as RustlsClientConfig, RootCertStore};
use tokio_util::compat::{TokioAsyncReadCompatExt, TokioAsyncWriteCompatExt};
use capnp_rpc::{rpc_twoparty_capnp::Side, twoparty, RpcSystem};
use quicnprotochat_core::HybridPublicKey;
use quicnprotochat_proto::node_capnp::{auth, node_service};
use crate::AUTH_CONTEXT;
use super::retry::{
anyhow_is_retriable, base_delay_ms_from_env, max_retries_from_env, retry_async,
};
/// Establish a QUIC/TLS connection and return a `NodeService` client.
///
/// Must be called from within a `LocalSet` because capnp-rpc is `!Send`.
pub async fn connect_node(
server: &str,
ca_cert: &Path,
server_name: &str,
) -> anyhow::Result<node_service::Client> {
let addr: SocketAddr = server
.parse()
.with_context(|| format!("server must be host:port, got {server}"))?;
let cert_bytes = std::fs::read(ca_cert).with_context(|| format!("read ca_cert {ca_cert:?}"))?;
let mut roots = RootCertStore::empty();
roots
.add(CertificateDer::from(cert_bytes))
.context("add root cert")?;
let mut tls = RustlsClientConfig::builder()
.with_root_certificates(roots)
.with_no_client_auth();
tls.alpn_protocols = vec![b"capnp".to_vec()];
let crypto = QuicClientConfig::try_from(tls)
.map_err(|e| anyhow::anyhow!("invalid client TLS config: {e}"))?;
let bind_addr: SocketAddr = "0.0.0.0:0".parse().context("parse client bind address")?;
let mut endpoint = Endpoint::client(bind_addr)?;
endpoint.set_default_client_config(ClientConfig::new(Arc::new(crypto)));
let connection = endpoint
.connect(addr, server_name)
.context("quic connect init")?
.await
.context("quic connect failed")?;
let (send, recv) = connection.open_bi().await.context("open bi stream")?;
let network = twoparty::VatNetwork::new(
recv.compat(),
send.compat_write(),
Side::Client,
Default::default(),
);
let mut rpc_system = RpcSystem::new(Box::new(network), None);
let client: node_service::Client = rpc_system.bootstrap(Side::Server);
tokio::task::spawn_local(rpc_system);
Ok(client)
}
pub fn set_auth(auth: &mut auth::Builder<'_>) -> anyhow::Result<()> {
let ctx = AUTH_CONTEXT.get().ok_or_else(|| {
anyhow::anyhow!("init_auth must be called with a non-empty token before RPCs")
})?;
auth.set_version(ctx.version);
auth.set_access_token(&ctx.access_token);
auth.set_device_id(&ctx.device_id);
Ok(())
}
/// Upload a KeyPackage and verify the fingerprint echoed by the AS.
pub async fn upload_key_package(
client: &node_service::Client,
identity_key: &[u8],
package: &[u8],
) -> anyhow::Result<()> {
let mut req = client.upload_key_package_request();
{
let mut p = req.get();
p.set_identity_key(identity_key);
p.set_package(package);
let mut auth = p.reborrow().init_auth();
set_auth(&mut auth)?;
}
let resp = req
.send()
.promise
.await
.context("upload_key_package RPC failed")?;
let server_fp = resp
.get()
.context("upload_key_package: bad response")?
.get_fingerprint()
.context("upload_key_package: missing fingerprint")?
.to_vec();
let local_fp = super::state::sha256(package);
anyhow::ensure!(server_fp == local_fp, "fingerprint mismatch");
Ok(())
}
/// Fetch a KeyPackage for `identity_key` from the AS.
pub async fn fetch_key_package(
client: &node_service::Client,
identity_key: &[u8],
) -> anyhow::Result<Vec<u8>> {
let mut req = client.fetch_key_package_request();
{
let mut p = req.get();
p.set_identity_key(identity_key);
let mut auth = p.reborrow().init_auth();
set_auth(&mut auth)?;
}
let resp = req
.send()
.promise
.await
.context("fetch_key_package RPC failed")?;
let pkg = resp
.get()
.context("fetch_key_package: bad response")?
.get_package()
.context("fetch_key_package: missing package field")?
.to_vec();
Ok(pkg)
}
/// Enqueue an opaque payload to the DS for `recipient_key`.
/// Returns the per-inbox sequence number assigned by the server.
/// Retries on transient failures with exponential backoff.
pub async fn enqueue(
client: &node_service::Client,
recipient_key: &[u8],
payload: &[u8],
) -> anyhow::Result<u64> {
let client = client.clone();
let recipient_key = recipient_key.to_vec();
let payload = payload.to_vec();
retry_async(
|| {
let client = client.clone();
let recipient_key = recipient_key.clone();
let payload = payload.clone();
async move {
let mut req = client.enqueue_request();
{
let mut p = req.get();
p.set_recipient_key(&recipient_key);
p.set_payload(&payload);
p.set_channel_id(&[]);
p.set_version(1);
let mut auth = p.reborrow().init_auth();
set_auth(&mut auth)?;
}
let resp = req.send().promise.await.context("enqueue RPC failed")?;
let seq = resp.get().context("enqueue: bad response")?.get_seq();
Ok(seq)
}
},
max_retries_from_env(),
base_delay_ms_from_env(),
anyhow_is_retriable,
)
.await
}
/// Fetch and drain all payloads for `recipient_key`.
/// Returns `(seq, payload)` pairs — sort by `seq` before MLS processing.
/// Retries on transient failures with exponential backoff.
pub async fn fetch_all(
client: &node_service::Client,
recipient_key: &[u8],
) -> anyhow::Result<Vec<(u64, Vec<u8>)>> {
let client = client.clone();
let recipient_key = recipient_key.to_vec();
retry_async(
|| {
let client = client.clone();
let recipient_key = recipient_key.clone();
async move {
let mut req = client.fetch_request();
{
let mut p = req.get();
p.set_recipient_key(&recipient_key);
p.set_channel_id(&[]);
p.set_version(1);
p.set_limit(0); // fetch all
let mut auth = p.reborrow().init_auth();
set_auth(&mut auth)?;
}
let resp = req.send().promise.await.context("fetch RPC failed")?;
let list = resp
.get()
.context("fetch: bad response")?
.get_payloads()
.context("fetch: missing payloads")?;
let mut payloads = Vec::with_capacity(list.len() as usize);
for i in 0..list.len() {
let entry = list.get(i);
let seq = entry.get_seq();
let data = entry
.get_data()
.context("fetch: envelope data read failed")?
.to_vec();
payloads.push((seq, data));
}
Ok(payloads)
}
},
max_retries_from_env(),
base_delay_ms_from_env(),
anyhow_is_retriable,
)
.await
}
/// Long-poll for payloads with optional timeout (ms).
/// Returns `(seq, payload)` pairs — sort by `seq` before MLS processing.
/// Retries on transient failures with exponential backoff.
pub async fn fetch_wait(
client: &node_service::Client,
recipient_key: &[u8],
timeout_ms: u64,
) -> anyhow::Result<Vec<(u64, Vec<u8>)>> {
let client = client.clone();
let recipient_key = recipient_key.to_vec();
retry_async(
|| {
let client = client.clone();
let recipient_key = recipient_key.clone();
let timeout_ms = timeout_ms;
async move {
let mut req = client.fetch_wait_request();
{
let mut p = req.get();
p.set_recipient_key(&recipient_key);
p.set_timeout_ms(timeout_ms);
p.set_channel_id(&[]);
p.set_version(1);
p.set_limit(0); // fetch all
let mut auth = p.reborrow().init_auth();
set_auth(&mut auth)?;
}
let resp = req.send().promise.await.context("fetch_wait RPC failed")?;
let list = resp
.get()
.context("fetch_wait: bad response")?
.get_payloads()
.context("fetch_wait: missing payloads")?;
let mut payloads = Vec::with_capacity(list.len() as usize);
for i in 0..list.len() {
let entry = list.get(i);
let seq = entry.get_seq();
let data = entry
.get_data()
.context("fetch_wait: envelope data read failed")?
.to_vec();
payloads.push((seq, data));
}
Ok(payloads)
}
},
max_retries_from_env(),
base_delay_ms_from_env(),
anyhow_is_retriable,
)
.await
}
/// Upload a hybrid (X25519 + ML-KEM-768) public key for an identity.
pub async fn upload_hybrid_key(
client: &node_service::Client,
identity_key: &[u8],
hybrid_pk: &HybridPublicKey,
) -> anyhow::Result<()> {
let mut req = client.upload_hybrid_key_request();
{
let mut p = req.get();
p.set_identity_key(identity_key);
p.set_hybrid_public_key(&hybrid_pk.to_bytes());
let mut auth = p.reborrow().init_auth();
set_auth(&mut auth)?;
}
req.send()
.promise
.await
.context("upload_hybrid_key RPC failed")?;
Ok(())
}
/// Fetch a peer's hybrid public key from the server.
///
/// Returns `None` if the peer has not uploaded a hybrid key.
pub async fn fetch_hybrid_key(
client: &node_service::Client,
identity_key: &[u8],
) -> anyhow::Result<Option<HybridPublicKey>> {
let mut req = client.fetch_hybrid_key_request();
{
let mut p = req.get();
p.set_identity_key(identity_key);
let mut auth = p.reborrow().init_auth();
set_auth(&mut auth)?;
}
let resp = req
.send()
.promise
.await
.context("fetch_hybrid_key RPC failed")?;
let pk_bytes = resp
.get()
.context("fetch_hybrid_key: bad response")?
.get_hybrid_public_key()
.context("fetch_hybrid_key: missing field")?
.to_vec();
if pk_bytes.is_empty() {
return Ok(None);
}
let pk = HybridPublicKey::from_bytes(&pk_bytes).context("invalid hybrid public key")?;
Ok(Some(pk))
}
/// Decrypt a hybrid envelope. Requires a hybrid key; no fallback to plaintext MLS.
pub fn try_hybrid_decrypt(
hybrid_kp: Option<&quicnprotochat_core::HybridKeypair>,
payload: &[u8],
) -> anyhow::Result<Vec<u8>> {
let kp = hybrid_kp.ok_or_else(|| anyhow::anyhow!("hybrid key required for decryption"))?;
quicnprotochat_core::hybrid_decrypt(kp, payload).map_err(|e| anyhow::anyhow!("{e}"))
}
/// Return the current Unix timestamp in milliseconds.
pub fn current_timestamp_ms() -> u64 {
std::time::SystemTime::now()
.duration_since(std::time::UNIX_EPOCH)
.unwrap_or_default()
.as_millis() as u64
}

View File

@@ -0,0 +1,382 @@
use std::path::{Path, PathBuf};
use std::sync::Arc;
use anyhow::Context;
use argon2::Argon2;
use chacha20poly1305::{
aead::{Aead, KeyInit},
ChaCha20Poly1305, Key, Nonce,
};
use rand::RngCore;
use serde::{Deserialize, Serialize};
use quicnprotochat_core::{
CoreError, DiskKeyStore, GroupMember, HybridCryptoProvider, HybridKeypair, HybridKeypairBytes,
IdentityKeypair, MlsGroup, StoreCrypto,
};
/// Magic bytes for encrypted client state files.
const STATE_MAGIC: &[u8; 4] = b"QPCE";
const STATE_SALT_LEN: usize = 16;
const STATE_NONCE_LEN: usize = 12;
/// Persisted client state (identity, MLS group, optional PQ key).
///
/// **Production note:** When loading state, use the same `use_pq_backend` value that was used when
/// the state was created. Loading PQ state with classical backend (or vice versa) will fail or
/// produce incorrect behavior.
#[derive(Serialize, Deserialize)]
pub struct StoredState {
pub identity_seed: [u8; 32],
pub group: Option<Vec<u8>>,
/// Post-quantum hybrid keypair (X25519 + ML-KEM-768). `None` for state created before hybrid was added.
#[serde(default)]
pub hybrid_key: Option<HybridKeypairBytes>,
/// Cached member public keys for group participants.
#[serde(default)]
pub member_keys: Vec<Vec<u8>>,
/// If true, MLS uses post-quantum hybrid KEM (HybridCryptoProvider) for HPKE. M7.
#[serde(default)]
pub use_pq_backend: bool,
}
/// MLS member backend: classical (StoreCrypto) or post-quantum hybrid (HybridCryptoProvider).
pub enum MemberBackend {
Classical(GroupMember<StoreCrypto>),
Hybrid(GroupMember<HybridCryptoProvider>),
}
impl MemberBackend {
pub fn generate_key_package(&mut self) -> Result<Vec<u8>, CoreError> {
match self {
MemberBackend::Classical(m) => m.generate_key_package(),
MemberBackend::Hybrid(m) => m.generate_key_package(),
}
}
pub fn create_group(&mut self, group_id: &[u8]) -> Result<(), CoreError> {
match self {
MemberBackend::Classical(m) => m.create_group(group_id),
MemberBackend::Hybrid(m) => m.create_group(group_id),
}
}
pub fn add_member(&mut self, key_package_bytes: &[u8]) -> Result<(Vec<u8>, Vec<u8>), CoreError> {
match self {
MemberBackend::Classical(m) => m.add_member(key_package_bytes),
MemberBackend::Hybrid(m) => m.add_member(key_package_bytes),
}
}
pub fn join_group(&mut self, welcome: &[u8]) -> Result<(), CoreError> {
match self {
MemberBackend::Classical(m) => m.join_group(welcome),
MemberBackend::Hybrid(m) => m.join_group(welcome),
}
}
pub fn send_message(&mut self, plaintext: &[u8]) -> Result<Vec<u8>, CoreError> {
match self {
MemberBackend::Classical(m) => m.send_message(plaintext),
MemberBackend::Hybrid(m) => m.send_message(plaintext),
}
}
pub fn receive_message(&mut self, bytes: &[u8]) -> Result<Option<Vec<u8>>, CoreError> {
match self {
MemberBackend::Classical(m) => m.receive_message(bytes),
MemberBackend::Hybrid(m) => m.receive_message(bytes),
}
}
pub fn receive_message_with_sender(
&mut self,
bytes: &[u8],
) -> Result<Option<(Vec<u8>, Vec<u8>)>, CoreError> {
match self {
MemberBackend::Classical(m) => m.receive_message_with_sender(bytes),
MemberBackend::Hybrid(m) => m.receive_message_with_sender(bytes),
}
}
pub fn group_id(&self) -> Option<Vec<u8>> {
match self {
MemberBackend::Classical(m) => m.group_id(),
MemberBackend::Hybrid(m) => m.group_id(),
}
}
pub fn identity(&self) -> &IdentityKeypair {
match self {
MemberBackend::Classical(m) => m.identity(),
MemberBackend::Hybrid(m) => m.identity(),
}
}
pub fn identity_seed(&self) -> [u8; 32] {
match self {
MemberBackend::Classical(m) => m.identity_seed(),
MemberBackend::Hybrid(m) => m.identity_seed(),
}
}
pub fn group_ref(&self) -> Option<&MlsGroup> {
match self {
MemberBackend::Classical(m) => m.group_ref(),
MemberBackend::Hybrid(m) => m.group_ref(),
}
}
pub fn member_identities(&self) -> Vec<Vec<u8>> {
match self {
MemberBackend::Classical(m) => m.member_identities(),
MemberBackend::Hybrid(m) => m.member_identities(),
}
}
pub fn is_pq(&self) -> bool {
matches!(self, MemberBackend::Hybrid(_))
}
}
impl StoredState {
/// Rebuild member and hybrid key from stored state. Uses PQ backend if `use_pq_backend` is true.
pub fn into_parts(self, state_path: &Path) -> anyhow::Result<(MemberBackend, Option<HybridKeypair>)> {
let identity = Arc::new(IdentityKeypair::from_seed(self.identity_seed));
let group = self
.group
.map(|bytes| bincode::deserialize(&bytes).context("decode group"))
.transpose()?;
let key_store = DiskKeyStore::persistent(keystore_path(state_path))?;
let member = if self.use_pq_backend {
MemberBackend::Hybrid(GroupMember::<HybridCryptoProvider>::new_with_state_hybrid(
identity, key_store, group,
))
} else {
MemberBackend::Classical(GroupMember::new_with_state(identity, key_store, group))
};
let hybrid_kp = self
.hybrid_key
.map(|bytes| HybridKeypair::from_bytes(&bytes).context("decode hybrid key"))
.transpose()?;
Ok((member, hybrid_kp))
}
/// Build state from a classical GroupMember (backward compat / tests). Prefer [`from_member_backend`](Self::from_member_backend) in production.
pub fn from_parts(
member: &GroupMember<StoreCrypto>,
hybrid_kp: Option<&HybridKeypair>,
) -> anyhow::Result<Self> {
let group = member
.group_ref()
.map(|g| bincode::serialize(g).context("serialize group"))
.transpose()?;
Ok(Self {
identity_seed: member.identity_seed(),
group,
hybrid_key: hybrid_kp.map(|kp| kp.to_bytes()),
member_keys: Vec::new(),
use_pq_backend: false,
})
}
/// Build state from MemberBackend (classical or PQ).
pub fn from_member_backend(
member: &MemberBackend,
hybrid_kp: Option<&HybridKeypair>,
) -> anyhow::Result<Self> {
let group = member
.group_ref()
.map(|g| bincode::serialize(g).context("serialize group"))
.transpose()?;
Ok(Self {
identity_seed: member.identity_seed(),
group,
hybrid_key: hybrid_kp.map(|kp| kp.to_bytes()),
member_keys: Vec::new(),
use_pq_backend: member.is_pq(),
})
}
}
/// Derive a 32-byte key from a password and salt using Argon2id.
fn derive_state_key(password: &str, salt: &[u8]) -> anyhow::Result<[u8; 32]> {
let mut key = [0u8; 32];
Argon2::default()
.hash_password_into(password.as_bytes(), salt, &mut key)
.map_err(|e| anyhow::anyhow!("argon2 key derivation failed: {e}"))?;
Ok(key)
}
/// Encrypt `plaintext` with the QPCE format: magic(4) | salt(16) | nonce(12) | ciphertext.
pub fn encrypt_state(password: &str, plaintext: &[u8]) -> anyhow::Result<Vec<u8>> {
let mut salt = [0u8; STATE_SALT_LEN];
rand::rngs::OsRng.fill_bytes(&mut salt);
let mut nonce_bytes = [0u8; STATE_NONCE_LEN];
rand::rngs::OsRng.fill_bytes(&mut nonce_bytes);
let key = derive_state_key(password, &salt)?;
let cipher = ChaCha20Poly1305::new(Key::from_slice(&key));
let nonce = Nonce::from_slice(&nonce_bytes);
let ciphertext = cipher
.encrypt(nonce, plaintext)
.map_err(|e| anyhow::anyhow!("state encryption failed: {e}"))?;
let mut out = Vec::with_capacity(4 + STATE_SALT_LEN + STATE_NONCE_LEN + ciphertext.len());
out.extend_from_slice(STATE_MAGIC);
out.extend_from_slice(&salt);
out.extend_from_slice(&nonce_bytes);
out.extend_from_slice(&ciphertext);
Ok(out)
}
/// Decrypt a QPCE-formatted state file.
pub fn decrypt_state(password: &str, data: &[u8]) -> anyhow::Result<Vec<u8>> {
let header_len = 4 + STATE_SALT_LEN + STATE_NONCE_LEN;
anyhow::ensure!(
data.len() > header_len,
"encrypted state file too short ({} bytes)",
data.len()
);
let salt = &data[4..4 + STATE_SALT_LEN];
let nonce_bytes = &data[4 + STATE_SALT_LEN..header_len];
let ciphertext = &data[header_len..];
let key = derive_state_key(password, salt)?;
let cipher = ChaCha20Poly1305::new(Key::from_slice(&key));
let nonce = Nonce::from_slice(nonce_bytes);
let plaintext = cipher
.decrypt(nonce, ciphertext)
.map_err(|_| anyhow::anyhow!("state decryption failed (wrong password?)"))?;
Ok(plaintext)
}
/// Returns true if raw bytes begin with the QPCE magic header.
pub fn is_encrypted_state(bytes: &[u8]) -> bool {
bytes.len() >= 4 && &bytes[..4] == STATE_MAGIC
}
/// Create new state with optional post-quantum MLS backend (M7). When `use_pq_backend` is true,
/// new state uses `HybridCryptoProvider` for MLS HPKE (X25519 + ML-KEM-768).
pub fn load_or_init_state(
path: &Path,
password: Option<&str>,
use_pq_backend: bool,
) -> anyhow::Result<StoredState> {
if path.exists() {
let mut state = load_existing_state(path, password)?;
// Generate hybrid keypair if missing (upgrade from older state).
if state.hybrid_key.is_none() {
let pb = indicatif::ProgressBar::new_spinner();
pb.set_message("Generating post-quantum keypair\u{2026}");
pb.enable_steady_tick(std::time::Duration::from_millis(80));
state.hybrid_key = Some(HybridKeypair::generate().to_bytes());
pb.finish_and_clear();
write_state(path, &state, password)?;
}
return Ok(state);
}
let pb = indicatif::ProgressBar::new_spinner();
pb.set_message("Generating post-quantum keypair\u{2026}");
pb.enable_steady_tick(std::time::Duration::from_millis(80));
let identity = IdentityKeypair::generate();
let hybrid_kp = HybridKeypair::generate();
pb.finish_and_clear();
let key_store = DiskKeyStore::persistent(keystore_path(path))?;
let member = if use_pq_backend {
MemberBackend::Hybrid(GroupMember::<HybridCryptoProvider>::new_with_state_hybrid(
Arc::new(identity),
key_store,
None,
))
} else {
MemberBackend::Classical(GroupMember::new_with_state(
Arc::new(identity),
key_store,
None,
))
};
let state = StoredState::from_member_backend(&member, Some(&hybrid_kp))?;
write_state(path, &state, password)?;
Ok(state)
}
pub fn load_existing_state(path: &Path, password: Option<&str>) -> anyhow::Result<StoredState> {
let bytes = std::fs::read(path).with_context(|| format!("read state file {path:?}"))?;
if is_encrypted_state(&bytes) {
let pw = password
.context("state file is encrypted (QPCE); a password is required to decrypt it")?;
let plaintext = decrypt_state(pw, &bytes)?;
bincode::deserialize(&plaintext).context("decode encrypted state")
} else {
bincode::deserialize(&bytes).context("decode state")
}
}
pub fn save_state(
path: &Path,
member: &MemberBackend,
hybrid_kp: Option<&HybridKeypair>,
password: Option<&str>,
) -> anyhow::Result<()> {
let state = StoredState::from_member_backend(member, hybrid_kp)?;
write_state(path, &state, password)
}
pub fn write_state(path: &Path, state: &StoredState, password: Option<&str>) -> anyhow::Result<()> {
if let Some(parent) = path.parent() {
std::fs::create_dir_all(parent).with_context(|| format!("create dir {parent:?}"))?;
}
let plaintext = bincode::serialize(state).context("encode state")?;
let bytes = if let Some(pw) = password {
encrypt_state(pw, &plaintext)?
} else {
plaintext
};
std::fs::write(path, bytes).with_context(|| format!("write state {path:?}"))?;
Ok(())
}
pub fn decode_identity_key(hex_str: &str) -> anyhow::Result<Vec<u8>> {
let bytes = super::hex::decode(hex_str)
.map_err(|e| anyhow::anyhow!(e))
.context("identity key must be hex")?;
anyhow::ensure!(bytes.len() == 32, "identity key must be 32 bytes");
Ok(bytes)
}
pub fn keystore_path(state_path: &Path) -> PathBuf {
let mut path = state_path.to_path_buf();
path.set_extension("ks");
path
}
pub fn sha256(bytes: &[u8]) -> Vec<u8> {
use sha2::{Digest, Sha256};
Sha256::digest(bytes).to_vec()
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn encrypt_decrypt_roundtrip() {
let plaintext = b"test state data";
let password = "test-password";
let encrypted = encrypt_state(password, plaintext).unwrap();
assert!(is_encrypted_state(&encrypted));
let decrypted = decrypt_state(password, &encrypted).unwrap();
assert_eq!(decrypted, plaintext);
}
#[test]
fn wrong_password_fails() {
let plaintext = b"test state data";
let encrypted = encrypt_state("correct", plaintext).unwrap();
assert!(decrypt_state("wrong", &encrypted).is_err());
}
}

View File

@@ -0,0 +1,57 @@
//! quicnprotochat CLI client library.
//!
//! # KeyPackage expiry and refresh
//!
//! KeyPackages are single-use (consumed when someone fetches them for an invite) and the server
//! may enforce a TTL (e.g. 24 hours). To stay invitable, run `quicnprotochat refresh-keypackage`
//! periodically (e.g. before the server TTL) or after your KeyPackage was consumed:
//!
//! ```bash
//! quicnprotochat refresh-keypackage --state quicnprotochat-state.bin --server 127.0.0.1:7000
//! ```
//!
//! Use the same `--access-token` (or `QUICNPROTOCHAT_ACCESS_TOKEN`) as for other authenticated
//! commands. See the [running-the-client](https://docs.quicnprotochat.dev/getting-started/running-the-client)
//! docs for details.
use std::sync::OnceLock;
pub mod client;
pub use client::commands::{
cmd_chat, cmd_check_key, cmd_create_group, cmd_demo_group, cmd_fetch_key, cmd_health,
cmd_health_json, cmd_invite, cmd_join, cmd_login, cmd_ping, cmd_recv, cmd_register,
cmd_register_state, cmd_refresh_keypackage, cmd_register_user, cmd_send, cmd_whoami,
receive_pending_plaintexts, whoami_json,
};
pub use client::rpc::{connect_node, enqueue, fetch_wait};
pub use client::state::{load_existing_state, StoredState};
// Global auth context initialized once per process.
pub(crate) static AUTH_CONTEXT: OnceLock<ClientAuth> = OnceLock::new();
#[derive(Clone, Debug)]
pub struct ClientAuth {
pub(crate) version: u16,
pub(crate) access_token: Vec<u8>,
pub(crate) device_id: Vec<u8>,
}
impl ClientAuth {
/// Build a client auth context from optional token and device id.
pub fn from_parts(access_token: String, device_id: Option<String>) -> Self {
let token = access_token.into_bytes();
let device = device_id.unwrap_or_default().into_bytes();
Self {
version: 1,
access_token: token,
device_id: device,
}
}
}
/// Initialize the global auth context; subsequent calls are ignored.
pub fn init_auth(ctx: ClientAuth) {
let _ = AUTH_CONTEXT.set(ctx);
}

View File

@@ -0,0 +1,541 @@
//! quicnprotochat CLI client.
use std::path::PathBuf;
use clap::{Parser, Subcommand};
use quicnprotochat_client::{
cmd_chat, cmd_check_key, cmd_create_group, cmd_demo_group, cmd_fetch_key, cmd_health,
cmd_invite, cmd_join, cmd_login, cmd_ping, cmd_recv, cmd_register, cmd_register_state,
cmd_refresh_keypackage, cmd_register_user, cmd_send, cmd_whoami, init_auth, ClientAuth,
};
// ── CLI ───────────────────────────────────────────────────────────────────────
#[derive(Debug, Parser)]
#[command(name = "quicnprotochat", about = "quicnprotochat CLI client", version)]
struct Args {
/// Path to the server's TLS certificate (self-signed by default).
#[arg(
long,
global = true,
default_value = "data/server-cert.der",
env = "QUICNPROTOCHAT_CA_CERT"
)]
ca_cert: PathBuf,
/// Expected TLS server name (must match the certificate SAN).
#[arg(
long,
global = true,
default_value = "localhost",
env = "QUICNPROTOCHAT_SERVER_NAME"
)]
server_name: String,
/// Bearer token or OPAQUE session token for authenticated requests.
/// Not required for register-user and login commands.
#[arg(
long,
global = true,
env = "QUICNPROTOCHAT_ACCESS_TOKEN",
default_value = ""
)]
access_token: String,
/// Optional device identifier (UUID bytes encoded as hex or raw string).
#[arg(long, global = true, env = "QUICNPROTOCHAT_DEVICE_ID")]
device_id: Option<String>,
/// Password to encrypt/decrypt client state files (QPCE format).
/// If set, state files are encrypted at rest with Argon2id + ChaCha20Poly1305.
#[arg(long, global = true, env = "QUICNPROTOCHAT_STATE_PASSWORD")]
state_password: Option<String>,
/// Use post-quantum MLS backend (X25519 + ML-KEM-768) for new state. M7.
#[arg(long, global = true, env = "QUICNPROTOCHAT_PQ")]
pq: bool,
#[command(subcommand)]
command: Command,
}
#[derive(Debug, Subcommand)]
enum Command {
/// Register a new user via OPAQUE (password never leaves the client).
RegisterUser {
#[arg(long, default_value = "127.0.0.1:7000", env = "QUICNPROTOCHAT_SERVER")]
server: String,
/// Username for the new account.
#[arg(long)]
username: String,
/// Password (will be used in OPAQUE PAKE; server never sees it).
#[arg(long)]
password: String,
},
/// Log in via OPAQUE and receive a session token.
Login {
#[arg(long, default_value = "127.0.0.1:7000", env = "QUICNPROTOCHAT_SERVER")]
server: String,
#[arg(long)]
username: String,
#[arg(long)]
password: String,
/// Hex-encoded Ed25519 identity key (64 hex chars). Optional if --state is provided.
#[arg(long)]
identity_key: Option<String>,
/// State file to derive the identity key (requires same password if encrypted).
#[arg(long)]
state: Option<PathBuf>,
/// Password for the encrypted state file (if any).
#[arg(long)]
state_password: Option<String>,
},
/// Show local identity key, fingerprint, group status, and hybrid key status.
Whoami {
/// State file path (identity + MLS state).
#[arg(
long,
default_value = "quicnprotochat-state.bin",
env = "QUICNPROTOCHAT_STATE"
)]
state: PathBuf,
},
/// Check server connectivity and print status.
Health {
/// Server address (host:port).
#[arg(long, default_value = "127.0.0.1:7000", env = "QUICNPROTOCHAT_SERVER")]
server: String,
},
/// Check if a peer has registered a hybrid key (non-consuming lookup).
CheckKey {
/// Server address (host:port).
#[arg(long, default_value = "127.0.0.1:7000", env = "QUICNPROTOCHAT_SERVER")]
server: String,
/// Peer's Ed25519 identity public key (64 hex chars = 32 bytes).
identity_key: String,
},
/// Send a Ping to the server and print the round-trip time.
Ping {
/// Server address (host:port).
#[arg(long, default_value = "127.0.0.1:7000", env = "QUICNPROTOCHAT_SERVER")]
server: String,
},
/// Generate a fresh MLS KeyPackage and upload it to the Authentication Service.
Register {
/// Server address (host:port).
#[arg(long, default_value = "127.0.0.1:7000", env = "QUICNPROTOCHAT_SERVER")]
server: String,
},
/// Fetch a peer's KeyPackage from the Authentication Service.
FetchKey {
/// Server address (host:port).
#[arg(long, default_value = "127.0.0.1:7000", env = "QUICNPROTOCHAT_SERVER")]
server: String,
/// Target peer's Ed25519 identity public key (64 hex chars = 32 bytes).
identity_key: String,
},
/// Run a two-party MLS demo (creator + joiner) against live AS and DS.
DemoGroup {
/// Server address (host:port).
#[arg(long, default_value = "127.0.0.1:7000", env = "QUICNPROTOCHAT_SERVER")]
server: String,
},
/// Upload the persistent identity's KeyPackage to the AS (uses state file).
RegisterState {
/// State file path (identity + MLS state).
#[arg(
long,
default_value = "quicnprotochat-state.bin",
env = "QUICNPROTOCHAT_STATE"
)]
state: PathBuf,
/// Authentication Service address (host:port).
#[arg(long, default_value = "127.0.0.1:7000", env = "QUICNPROTOCHAT_SERVER")]
server: String,
},
/// Refresh the KeyPackage on the server (existing state only).
/// Run periodically (e.g. before server TTL ~24h) or after your KeyPackage was consumed so others can invite you.
RefreshKeypackage {
/// State file path (identity + MLS state).
#[arg(
long,
default_value = "quicnprotochat-state.bin",
env = "QUICNPROTOCHAT_STATE"
)]
state: PathBuf,
/// Server address (host:port).
#[arg(long, default_value = "127.0.0.1:7000", env = "QUICNPROTOCHAT_SERVER")]
server: String,
},
/// Create a persistent group and save state to disk.
CreateGroup {
/// State file path (identity + MLS state).
#[arg(
long,
default_value = "quicnprotochat-state.bin",
env = "QUICNPROTOCHAT_STATE"
)]
state: PathBuf,
/// Server address (host:port).
#[arg(long, default_value = "127.0.0.1:7000", env = "QUICNPROTOCHAT_SERVER")]
server: String,
/// Group identifier (arbitrary bytes, typically a human-readable name).
#[arg(long)]
group_id: String,
},
/// Invite a peer into the group and deliver a Welcome via DS.
Invite {
#[arg(
long,
default_value = "quicnprotochat-state.bin",
env = "QUICNPROTOCHAT_STATE"
)]
state: PathBuf,
#[arg(long, default_value = "127.0.0.1:7000", env = "QUICNPROTOCHAT_SERVER")]
server: String,
/// Peer identity public key (64 hex chars = 32 bytes).
#[arg(long)]
peer_key: String,
},
/// Join a group by fetching the Welcome from the DS.
Join {
#[arg(
long,
default_value = "quicnprotochat-state.bin",
env = "QUICNPROTOCHAT_STATE"
)]
state: PathBuf,
#[arg(long, default_value = "127.0.0.1:7000", env = "QUICNPROTOCHAT_SERVER")]
server: String,
},
/// Send an application message via the DS.
Send {
#[arg(
long,
default_value = "quicnprotochat-state.bin",
env = "QUICNPROTOCHAT_STATE"
)]
state: PathBuf,
#[arg(long, default_value = "127.0.0.1:7000", env = "QUICNPROTOCHAT_SERVER")]
server: String,
/// Recipient identity key (hex, 32 bytes -> 64 chars). Omit when using --all.
#[arg(long)]
peer_key: Option<String>,
/// Send to all other group members (N-way groups).
#[arg(long)]
all: bool,
/// Plaintext message to send.
#[arg(long)]
msg: String,
},
/// Receive and decrypt all pending messages from the DS.
Recv {
#[arg(
long,
default_value = "quicnprotochat-state.bin",
env = "QUICNPROTOCHAT_STATE"
)]
state: PathBuf,
#[arg(long, default_value = "127.0.0.1:7000", env = "QUICNPROTOCHAT_SERVER")]
server: String,
/// Wait for up to this many milliseconds if no messages are queued.
#[arg(long, default_value_t = 0)]
wait_ms: u64,
/// Continuously long-poll for messages.
#[arg(long)]
stream: bool,
},
/// Interactive 1:1 chat: type to send, incoming messages printed as [peer] <msg>. Ctrl+D to exit.
/// In a two-person group, peer is chosen automatically; use --peer-key only with 3+ members.
Chat {
#[arg(
long,
default_value = "quicnprotochat-state.bin",
env = "QUICNPROTOCHAT_STATE"
)]
state: PathBuf,
#[arg(long, default_value = "127.0.0.1:7000", env = "QUICNPROTOCHAT_SERVER")]
server: String,
/// Peer identity key (hex, 64 chars). Omit in a two-person group to use the only other member.
#[arg(long)]
peer_key: Option<String>,
/// How often to poll for incoming messages (milliseconds).
#[arg(long, default_value_t = 500)]
poll_interval_ms: u64,
},
/// Generate shell completions for the given shell and print to stdout.
#[command(hide = true)]
Completions {
shell: clap_complete::Shell,
},
}
// ── Entry point ───────────────────────────────────────────────────────────────
#[tokio::main]
async fn main() -> anyhow::Result<()> {
tracing_subscriber::fmt()
.with_env_filter(
tracing_subscriber::EnvFilter::try_from_default_env()
.unwrap_or_else(|_| tracing_subscriber::EnvFilter::new("warn")),
)
.init();
let args = Args::parse();
// Initialize auth context once for all RPCs (empty token OK for register-user/login).
let auth_ctx = ClientAuth::from_parts(args.access_token.clone(), args.device_id.clone());
init_auth(auth_ctx);
let state_pw = args.state_password.as_deref();
match args.command {
Command::RegisterUser {
server,
username,
password,
} => {
let local = tokio::task::LocalSet::new();
local
.run_until(cmd_register_user(
&server,
&args.ca_cert,
&args.server_name,
&username,
&password,
None,
))
.await
}
Command::Login {
server,
username,
password,
identity_key,
state,
state_password,
} => {
let local = tokio::task::LocalSet::new();
local
.run_until(cmd_login(
&server,
&args.ca_cert,
&args.server_name,
&username,
&password,
identity_key.as_deref(),
state.as_deref(),
state_password.as_deref(),
))
.await
}
Command::Whoami { state } => cmd_whoami(&state, state_pw),
Command::Health { server } => {
let local = tokio::task::LocalSet::new();
local
.run_until(cmd_health(&server, &args.ca_cert, &args.server_name))
.await
}
Command::CheckKey {
server,
identity_key,
} => {
let local = tokio::task::LocalSet::new();
local
.run_until(cmd_check_key(
&server,
&args.ca_cert,
&args.server_name,
&identity_key,
))
.await
}
Command::Ping { server } => cmd_ping(&server, &args.ca_cert, &args.server_name).await,
Command::Register { server } => {
let local = tokio::task::LocalSet::new();
local
.run_until(cmd_register(&server, &args.ca_cert, &args.server_name))
.await
}
Command::FetchKey {
server,
identity_key,
} => {
let local = tokio::task::LocalSet::new();
local
.run_until(cmd_fetch_key(
&server,
&args.ca_cert,
&args.server_name,
&identity_key,
))
.await
}
Command::DemoGroup { server } => {
let local = tokio::task::LocalSet::new();
local
.run_until(cmd_demo_group(&server, &args.ca_cert, &args.server_name, args.pq))
.await
}
Command::RegisterState { state, server } => {
let local = tokio::task::LocalSet::new();
local
.run_until(cmd_register_state(
&state,
&server,
&args.ca_cert,
&args.server_name,
state_pw,
args.pq,
))
.await
}
Command::RefreshKeypackage { state, server } => {
let local = tokio::task::LocalSet::new();
local
.run_until(cmd_refresh_keypackage(
&state,
&server,
&args.ca_cert,
&args.server_name,
state_pw,
))
.await
}
Command::CreateGroup {
state,
server,
group_id,
} => {
let local = tokio::task::LocalSet::new();
local
.run_until(cmd_create_group(&state, &server, &group_id, state_pw, args.pq))
.await
}
Command::Invite {
state,
server,
peer_key,
} => {
let local = tokio::task::LocalSet::new();
local
.run_until(cmd_invite(
&state,
&server,
&args.ca_cert,
&args.server_name,
&peer_key,
state_pw,
))
.await
}
Command::Join { state, server } => {
let local = tokio::task::LocalSet::new();
local
.run_until(cmd_join(
&state,
&server,
&args.ca_cert,
&args.server_name,
state_pw,
))
.await
}
Command::Send {
state,
server,
peer_key,
all,
msg,
} => {
let local = tokio::task::LocalSet::new();
local
.run_until(cmd_send(
&state,
&server,
&args.ca_cert,
&args.server_name,
peer_key.as_deref(),
all,
&msg,
state_pw,
))
.await
}
Command::Recv {
state,
server,
wait_ms,
stream,
} => {
let local = tokio::task::LocalSet::new();
local
.run_until(cmd_recv(
&state,
&server,
&args.ca_cert,
&args.server_name,
wait_ms,
stream,
state_pw,
))
.await
}
Command::Chat {
state,
server,
peer_key,
poll_interval_ms,
} => {
let local = tokio::task::LocalSet::new();
local
.run_until(cmd_chat(
&state,
&server,
&args.ca_cert,
&args.server_name,
peer_key.as_deref(),
state_pw,
poll_interval_ms,
))
.await
}
Command::Completions { shell } => {
use clap::CommandFactory;
clap_complete::generate(
shell,
&mut Args::command(),
"quicnprotochat",
&mut std::io::stdout(),
);
Ok(())
}
}
}

View File

@@ -0,0 +1,593 @@
// cargo_bin! only works for current package's binary; we spawn quicnprotochat-server from another package.
#![allow(deprecated)]
use std::{path::PathBuf, process::Command, time::Duration};
use assert_cmd::cargo::cargo_bin;
use portpicker::pick_unused_port;
use rand::RngCore;
use tempfile::TempDir;
use tokio::time::sleep;
use hex;
// Required by rustls 0.23 when QUIC/TLS is used from this process (e.g. client in test).
fn ensure_rustls_provider() {
let _ = rustls::crypto::ring::default_provider().install_default();
}
use quicnprotochat_client::{
cmd_create_group, cmd_invite, cmd_join, cmd_login, cmd_ping, cmd_register_state,
cmd_register_user, cmd_send, connect_node, enqueue, fetch_wait, init_auth,
load_existing_state, receive_pending_plaintexts, ClientAuth,
};
use quicnprotochat_core::IdentityKeypair;
fn hex_encode(bytes: &[u8]) -> String {
bytes.iter().map(|b| format!("{b:02x}")).collect()
}
async fn wait_for_health(server: &str, ca_cert: &PathBuf, server_name: &str) -> anyhow::Result<()> {
let local = tokio::task::LocalSet::new();
for _ in 0..30 {
if local
.run_until(cmd_ping(server, ca_cert, server_name))
.await
.is_ok()
{
return Ok(());
}
sleep(Duration::from_millis(200)).await;
}
anyhow::bail!("server health never became ready")
}
/// Creator and joiner register; creator creates group and invites joiner; joiner joins;
/// creator sends a message; assert joiner's mailbox receives it.
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn e2e_happy_path_register_invite_join_send_recv() -> anyhow::Result<()> {
ensure_rustls_provider();
let temp = TempDir::new()?;
let base = temp.path();
let port = pick_unused_port().expect("free port");
let listen = format!("127.0.0.1:{port}");
let server = listen.clone();
let ca_cert = base.join("server-cert.der");
let tls_key = base.join("server-key.der");
let data_dir = base.join("data");
let auth_token = "devtoken";
// Spawn server binary.
let server_bin = cargo_bin("quicnprotochat-server");
let child = Command::new(server_bin)
.arg("--listen")
.arg(&listen)
.arg("--data-dir")
.arg(&data_dir)
.arg("--tls-cert")
.arg(&ca_cert)
.arg("--tls-key")
.arg(&tls_key)
.arg("--auth-token")
.arg(auth_token)
.arg("--allow-insecure-auth")
.spawn()
.expect("spawn server");
// Ensure we always terminate the child.
struct ChildGuard(std::process::Child);
impl Drop for ChildGuard {
fn drop(&mut self) {
let _ = self.0.kill();
}
}
let child_guard = ChildGuard(child);
let _ = child_guard;
// Wait for server to be healthy and certs to be generated.
wait_for_health(&server, &ca_cert, "localhost").await?;
// Set client auth context.
init_auth(ClientAuth::from_parts(auth_token.to_string(), None));
let local = tokio::task::LocalSet::new();
let creator_state = base.join("creator.bin");
let joiner_state = base.join("joiner.bin");
local
.run_until(cmd_register_state(
&creator_state,
&server,
&ca_cert,
"localhost",
None,
false,
))
.await?;
local
.run_until(cmd_register_state(
&joiner_state,
&server,
&ca_cert,
"localhost",
None,
false,
))
.await?;
local
.run_until(cmd_create_group(&creator_state, &server, "test-group", None, false))
.await?;
let joiner_state_loaded = load_existing_state(&joiner_state, None)?;
let joiner_identity = IdentityKeypair::from_seed(joiner_state_loaded.identity_seed);
let joiner_pk_hex = hex_encode(&joiner_identity.public_key_bytes());
local
.run_until(cmd_invite(
&creator_state,
&server,
&ca_cert,
"localhost",
&joiner_pk_hex,
None,
))
.await?;
local
.run_until(cmd_join(&joiner_state, &server, &ca_cert, "localhost", None))
.await?;
local
.run_until(cmd_send(
&creator_state,
&server,
&ca_cert,
"localhost",
Some(&joiner_pk_hex),
false,
"hello",
None,
))
.await?;
local
.run_until(async {
let client = connect_node(&server, &ca_cert, "localhost").await?;
let payloads = fetch_wait(&client, &joiner_identity.public_key_bytes(), 1000).await?;
anyhow::ensure!(!payloads.is_empty(), "no payloads delivered to joiner");
Ok::<(), anyhow::Error>(())
})
.await?;
Ok(())
}
/// Three-party group: A creates group, invites B then C; B and C join; A sends, B and C receive;
/// B sends, A and C receive.
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn e2e_three_party_group_invite_join_send_recv() -> anyhow::Result<()> {
ensure_rustls_provider();
let temp = TempDir::new()?;
let base = temp.path();
let port = pick_unused_port().expect("free port");
let listen = format!("127.0.0.1:{port}");
let server = listen.clone();
let ca_cert = base.join("server-cert.der");
let tls_key = base.join("server-key.der");
let data_dir = base.join("data");
let auth_token = "devtoken";
let server_bin = cargo_bin("quicnprotochat-server");
let child = Command::new(server_bin)
.arg("--listen")
.arg(&listen)
.arg("--data-dir")
.arg(&data_dir)
.arg("--tls-cert")
.arg(&ca_cert)
.arg("--tls-key")
.arg(&tls_key)
.arg("--auth-token")
.arg(auth_token)
.arg("--allow-insecure-auth")
.spawn()
.expect("spawn server");
struct ChildGuard(std::process::Child);
impl Drop for ChildGuard {
fn drop(&mut self) {
let _ = self.0.kill();
}
}
let _child_guard = ChildGuard(child);
wait_for_health(&server, &ca_cert, "localhost").await?;
init_auth(ClientAuth::from_parts(auth_token.to_string(), None));
let local = tokio::task::LocalSet::new();
let creator_state = base.join("creator.bin");
let b_state = base.join("b.bin");
let c_state = base.join("c.bin");
local
.run_until(cmd_register_state(
&creator_state,
&server,
&ca_cert,
"localhost",
None,
false,
))
.await?;
local
.run_until(cmd_register_state(
&b_state,
&server,
&ca_cert,
"localhost",
None,
false,
))
.await?;
local
.run_until(cmd_register_state(
&c_state,
&server,
&ca_cert,
"localhost",
None,
false,
))
.await?;
let b_loaded = load_existing_state(&b_state, None)?;
let b_pk_hex = hex_encode(&IdentityKeypair::from_seed(b_loaded.identity_seed).public_key_bytes());
let c_loaded = load_existing_state(&c_state, None)?;
let c_pk_hex = hex_encode(&IdentityKeypair::from_seed(c_loaded.identity_seed).public_key_bytes());
local
.run_until(cmd_create_group(&creator_state, &server, "test-group", None, false))
.await?;
local
.run_until(cmd_invite(
&creator_state,
&server,
&ca_cert,
"localhost",
&b_pk_hex,
None,
))
.await?;
local
.run_until(cmd_invite(
&creator_state,
&server,
&ca_cert,
"localhost",
&c_pk_hex,
None,
))
.await?;
local
.run_until(cmd_join(&b_state, &server, &ca_cert, "localhost", None))
.await?;
local
.run_until(cmd_join(&c_state, &server, &ca_cert, "localhost", None))
.await?;
local
.run_until(cmd_send(
&creator_state,
&server,
&ca_cert,
"localhost",
None,
true,
"hello",
None,
))
.await?;
sleep(Duration::from_millis(150)).await;
let b_plaintexts = local
.run_until(receive_pending_plaintexts(
&b_state,
&server,
&ca_cert,
"localhost",
1500,
None,
))
.await?;
let c_plaintexts = local
.run_until(receive_pending_plaintexts(
&c_state,
&server,
&ca_cert,
"localhost",
1500,
None,
))
.await?;
anyhow::ensure!(
b_plaintexts.iter().any(|p| p.as_slice() == b"hello"),
"B did not receive 'hello', got {:?}",
b_plaintexts
);
anyhow::ensure!(
c_plaintexts.iter().any(|p| p.as_slice() == b"hello"),
"C did not receive 'hello', got {:?}",
c_plaintexts
);
local
.run_until(cmd_send(
&b_state,
&server,
&ca_cert,
"localhost",
None,
true,
"hi",
None,
))
.await?;
sleep(Duration::from_millis(200)).await;
let a_plaintexts = local
.run_until(receive_pending_plaintexts(
&creator_state,
&server,
&ca_cert,
"localhost",
1500,
None,
))
.await?;
let c_plaintexts2 = local
.run_until(receive_pending_plaintexts(
&c_state,
&server,
&ca_cert,
"localhost",
1500,
None,
))
.await?;
anyhow::ensure!(
a_plaintexts.iter().any(|p| p.as_slice() == b"hi"),
"A did not receive 'hi', got {:?}",
a_plaintexts
);
anyhow::ensure!(
c_plaintexts2.iter().any(|p| p.as_slice() == b"hi"),
"C did not receive 'hi', got {:?}",
c_plaintexts2
);
Ok(())
}
/// Login should refuse if the presented identity key does not match the registered key.
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn e2e_login_rejects_mismatched_identity() -> anyhow::Result<()> {
ensure_rustls_provider();
let temp = TempDir::new()?;
let base = temp.path();
let port = pick_unused_port().expect("free port");
let listen = format!("127.0.0.1:{port}");
let server = listen.clone();
let ca_cert = base.join("server-cert.der");
let tls_key = base.join("server-key.der");
let data_dir = base.join("data");
let auth_token = "devtoken";
// Spawn server binary.
let server_bin = cargo_bin("quicnprotochat-server");
let child = Command::new(server_bin)
.arg("--listen")
.arg(&listen)
.arg("--data-dir")
.arg(&data_dir)
.arg("--tls-cert")
.arg(&ca_cert)
.arg("--tls-key")
.arg(&tls_key)
.arg("--auth-token")
.arg(auth_token)
.arg("--allow-insecure-auth")
.spawn()
.expect("spawn server");
struct ChildGuard(std::process::Child);
impl Drop for ChildGuard {
fn drop(&mut self) {
let _ = self.0.kill();
}
}
let child_guard = ChildGuard(child);
let _ = child_guard;
wait_for_health(&server, &ca_cert, "localhost").await?;
init_auth(ClientAuth::from_parts(auth_token.to_string(), None));
let local = tokio::task::LocalSet::new();
let state_path = base.join("user.bin");
// Register and persist state (includes identity key binding).
local
.run_until(cmd_register_state(
&state_path,
&server,
&ca_cert,
"localhost",
None,
false,
))
.await?;
// Register the user with the bound identity so login can enforce mismatches.
let stored_state = load_existing_state(&state_path, None)?;
let identity_hex = hex::encode(
IdentityKeypair::from_seed(stored_state.identity_seed).public_key_bytes(),
);
local
.run_until(cmd_register_user(
&server,
&ca_cert,
"localhost",
"user1",
"pass",
Some(&identity_hex),
))
.await?;
// Craft an unrelated identity key and attempt login with it.
let mut bogus_identity = [0u8; 32];
rand::thread_rng().fill_bytes(&mut bogus_identity);
let bogus_hex = hex::encode(bogus_identity);
let result = local
.run_until(cmd_login(
&server,
&ca_cert,
"localhost",
"user1",
"pass",
Some(&bogus_hex),
None,
None,
))
.await;
match result {
Ok(_) => anyhow::bail!("login unexpectedly succeeded with mismatched identity"),
Err(e) => {
// Show the full error chain so we can match the server's E016 response.
let msg = format!("{e:#}");
anyhow::ensure!(
msg.contains("identity") || msg.contains("E016"),
"login failed but not for identity mismatch: {msg}"
);
}
}
Ok(())
}
/// Sealed Sender: enqueue with valid token (no identity binding) succeeds; recipient can fetch.
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn e2e_sealed_sender_enqueue_then_fetch() -> anyhow::Result<()> {
ensure_rustls_provider();
let temp = TempDir::new()?;
let base = temp.path();
let port = pick_unused_port().expect("free port");
let listen = format!("127.0.0.1:{port}");
let server = listen.clone();
let ca_cert = base.join("server-cert.der");
let tls_key = base.join("server-key.der");
let data_dir = base.join("data");
let auth_token = "devtoken";
let server_bin = cargo_bin("quicnprotochat-server");
let child = Command::new(server_bin)
.arg("--listen")
.arg(&listen)
.arg("--data-dir")
.arg(&data_dir)
.arg("--tls-cert")
.arg(&ca_cert)
.arg("--tls-key")
.arg(&tls_key)
.arg("--auth-token")
.arg(auth_token)
.arg("--allow-insecure-auth")
.arg("--sealed-sender")
.spawn()
.expect("spawn server");
struct ChildGuard(std::process::Child);
impl Drop for ChildGuard {
fn drop(&mut self) {
let _ = self.0.kill();
}
}
let _child_guard = ChildGuard(child);
wait_for_health(&server, &ca_cert, "localhost").await?;
init_auth(ClientAuth::from_parts(auth_token.to_string(), None));
let local = tokio::task::LocalSet::new();
let state_path = base.join("recipient.bin");
local
.run_until(cmd_register_state(
&state_path,
&server,
&ca_cert,
"localhost",
None,
false,
))
.await?;
let stored = load_existing_state(&state_path, None)?;
let recipient_key = IdentityKeypair::from_seed(stored.identity_seed).public_key_bytes();
let identity_hex = hex_encode(&recipient_key);
local
.run_until(cmd_register_user(
&server,
&ca_cert,
"localhost",
"recipient",
"pass",
Some(&identity_hex),
))
.await?;
local
.run_until(cmd_login(
&server,
&ca_cert,
"localhost",
"recipient",
"pass",
Some(&identity_hex),
None,
None,
))
.await?;
let client = local.run_until(connect_node(&server, &ca_cert, "localhost")).await?;
local
.run_until(enqueue(&client, &recipient_key, b"sealed-payload"))
.await?;
let payloads = local
.run_until(fetch_wait(&client, &recipient_key, 500))
.await?;
anyhow::ensure!(
payloads.len() == 1 && payloads[0].1.as_slice() == b"sealed-payload",
"expected one payload 'sealed-payload', got {:?}",
payloads
);
Ok(())
}

View File

@@ -0,0 +1,45 @@
[package]
name = "quicnprotochat-core"
version = "0.1.0"
edition = "2021"
description = "Crypto primitives, MLS state machine, and hybrid post-quantum KEM for quicnprotochat."
license = "MIT"
[dependencies]
# Crypto — classical
x25519-dalek = { workspace = true }
ed25519-dalek = { workspace = true }
sha2 = { workspace = true }
hkdf = { workspace = true }
chacha20poly1305 = { workspace = true }
zeroize = { workspace = true }
rand = { workspace = true }
# Crypto — post-quantum hybrid KEM (M7)
ml-kem = { workspace = true }
# Crypto — OPAQUE password-authenticated key exchange
opaque-ke = { workspace = true }
argon2 = { workspace = true }
# Crypto — MLS (M2)
openmls = { workspace = true }
openmls_rust_crypto = { workspace = true }
openmls_traits = { workspace = true }
tls_codec = { workspace = true }
serde = { workspace = true }
bincode = { workspace = true }
serde_json = { workspace = true }
# Serialisation
capnp = { workspace = true }
quicnprotochat-proto = { path = "../quicnprotochat-proto" }
# Async runtime
tokio = { workspace = true }
# Error handling
thiserror = { workspace = true }
[dev-dependencies]
tokio = { workspace = true }

View File

@@ -0,0 +1,256 @@
//! Rich application-layer message format for MLS application payloads.
//!
//! The server sees only opaque ciphertext; structure lives in this client-defined
//! plaintext schema. All messages use: version byte (1) + message_type byte + type-specific payload.
//!
//! # Message ID
//!
//! `message_id` is assigned by the sender (16 random bytes) and included in the
//! serialized payload for Chat (and implied for Reply/Reaction/ReadReceipt via ref_msg_id).
//! Recipients can store message_ids to reference them in replies or reactions.
use crate::error::CoreError;
use rand::RngCore;
/// Current schema version.
pub const VERSION: u8 = 1;
/// Message type discriminant (one byte).
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
#[repr(u8)]
pub enum MessageType {
Chat = 0x01,
Reply = 0x02,
Reaction = 0x03,
ReadReceipt = 0x04,
Typing = 0x05,
}
impl MessageType {
fn from_byte(b: u8) -> Option<Self> {
match b {
0x01 => Some(MessageType::Chat),
0x02 => Some(MessageType::Reply),
0x03 => Some(MessageType::Reaction),
0x04 => Some(MessageType::ReadReceipt),
0x05 => Some(MessageType::Typing),
_ => None,
}
}
}
/// Parsed application message (one of the rich types).
#[derive(Clone, Debug, PartialEq, Eq)]
pub enum AppMessage {
/// Plain chat: body (UTF-8). message_id is included so recipients can store and reference it.
Chat {
message_id: [u8; 16],
body: Vec<u8>,
},
Reply {
ref_msg_id: [u8; 16],
body: Vec<u8>,
},
Reaction {
ref_msg_id: [u8; 16],
emoji: Vec<u8>,
},
ReadReceipt {
msg_id: [u8; 16],
},
Typing {
/// 0 = stopped, 1 = typing
active: u8,
},
}
/// Generate a new 16-byte message ID (e.g. for Chat/Reply so recipients can reference it).
pub fn generate_message_id() -> [u8; 16] {
let mut id = [0u8; 16];
rand::rngs::OsRng.fill_bytes(&mut id);
id
}
// ── Layout (minimal, no Cap'n Proto) ─────────────────────────────────────────
//
// All messages: [version: 1][type: 1][payload...]
//
// Chat: [msg_id: 16][body_len: 2 BE][body]
// Reply: [ref_msg_id: 16][body_len: 2 BE][body]
// Reaction: [ref_msg_id: 16][emoji_len: 1][emoji]
// ReadReceipt: [msg_id: 16]
// Typing: [active: 1] 0 = stopped, 1 = typing
/// Serialize a rich message into the application payload format.
pub fn serialize(msg_type: MessageType, payload: &[u8]) -> Vec<u8> {
let mut out = Vec::with_capacity(2 + payload.len());
out.push(VERSION);
out.push(msg_type as u8);
out.extend_from_slice(payload);
out
}
/// Serialize a Chat message (generates message_id internally; pass None to generate, or Some(id) when replying with a known id).
pub fn serialize_chat(body: &[u8], message_id: Option<[u8; 16]>) -> Vec<u8> {
let id = message_id.unwrap_or_else(generate_message_id);
let mut payload = Vec::with_capacity(16 + 2 + body.len());
payload.extend_from_slice(&id);
payload.extend_from_slice(&(body.len() as u16).to_be_bytes());
payload.extend_from_slice(body);
serialize(MessageType::Chat, &payload)
}
/// Serialize a Reply message.
pub fn serialize_reply(ref_msg_id: [u8; 16], body: &[u8]) -> Vec<u8> {
let mut payload = Vec::with_capacity(16 + 2 + body.len());
payload.extend_from_slice(&ref_msg_id);
payload.extend_from_slice(&(body.len() as u16).to_be_bytes());
payload.extend_from_slice(body);
serialize(MessageType::Reply, &payload)
}
/// Serialize a Reaction message.
pub fn serialize_reaction(ref_msg_id: [u8; 16], emoji: &[u8]) -> Result<Vec<u8>, CoreError> {
if emoji.len() > 255 {
return Err(CoreError::AppMessage("emoji length > 255".into()));
}
let mut payload = Vec::with_capacity(16 + 1 + emoji.len());
payload.extend_from_slice(&ref_msg_id);
payload.push(emoji.len() as u8);
payload.extend_from_slice(emoji);
Ok(serialize(MessageType::Reaction, &payload))
}
/// Serialize a ReadReceipt message.
pub fn serialize_read_receipt(msg_id: [u8; 16]) -> Vec<u8> {
serialize(MessageType::ReadReceipt, &msg_id)
}
/// Serialize a Typing message (active: 0 = stopped, 1 = typing).
pub fn serialize_typing(active: u8) -> Vec<u8> {
let payload = [active];
serialize(MessageType::Typing, &payload)
}
/// Parse bytes into (MessageType, AppMessage). Fails if version/type unknown or payload too short.
pub fn parse(bytes: &[u8]) -> Result<(MessageType, AppMessage), CoreError> {
if bytes.len() < 2 {
return Err(CoreError::AppMessage("payload too short (need version + type)".into()));
}
let version = bytes[0];
if version != VERSION {
return Err(CoreError::AppMessage(format!("unsupported version {version}").into()));
}
let msg_type = MessageType::from_byte(bytes[1])
.ok_or_else(|| CoreError::AppMessage(format!("unknown message type {}", bytes[1]).into()))?;
let payload = &bytes[2..];
let app = match msg_type {
MessageType::Chat => parse_chat(payload)?,
MessageType::Reply => parse_reply(payload)?,
MessageType::Reaction => parse_reaction(payload)?,
MessageType::ReadReceipt => parse_read_receipt(payload)?,
MessageType::Typing => parse_typing(payload)?,
};
Ok((msg_type, app))
}
fn parse_chat(payload: &[u8]) -> Result<AppMessage, CoreError> {
if payload.len() < 16 + 2 {
return Err(CoreError::AppMessage("Chat payload too short".into()));
}
let mut message_id = [0u8; 16];
message_id.copy_from_slice(&payload[..16]);
let body_len = u16::from_be_bytes([payload[16], payload[17]]) as usize;
if payload.len() < 18 + body_len {
return Err(CoreError::AppMessage("Chat body length exceeds payload".into()));
}
let body = payload[18..18 + body_len].to_vec();
Ok(AppMessage::Chat { message_id, body })
}
fn parse_reply(payload: &[u8]) -> Result<AppMessage, CoreError> {
if payload.len() < 16 + 2 {
return Err(CoreError::AppMessage("Reply payload too short".into()));
}
let mut ref_msg_id = [0u8; 16];
ref_msg_id.copy_from_slice(&payload[..16]);
let body_len = u16::from_be_bytes([payload[16], payload[17]]) as usize;
if payload.len() < 18 + body_len {
return Err(CoreError::AppMessage("Reply body length exceeds payload".into()));
}
let body = payload[18..18 + body_len].to_vec();
Ok(AppMessage::Reply { ref_msg_id, body })
}
fn parse_reaction(payload: &[u8]) -> Result<AppMessage, CoreError> {
if payload.len() < 16 + 1 {
return Err(CoreError::AppMessage("Reaction payload too short".into()));
}
let mut ref_msg_id = [0u8; 16];
ref_msg_id.copy_from_slice(&payload[..16]);
let emoji_len = payload[16] as usize;
if payload.len() < 17 + emoji_len {
return Err(CoreError::AppMessage("Reaction emoji length exceeds payload".into()));
}
let emoji = payload[17..17 + emoji_len].to_vec();
Ok(AppMessage::Reaction { ref_msg_id, emoji })
}
fn parse_read_receipt(payload: &[u8]) -> Result<AppMessage, CoreError> {
if payload.len() < 16 {
return Err(CoreError::AppMessage("ReadReceipt payload too short".into()));
}
let mut msg_id = [0u8; 16];
msg_id.copy_from_slice(&payload[..16]);
Ok(AppMessage::ReadReceipt { msg_id })
}
fn parse_typing(payload: &[u8]) -> Result<AppMessage, CoreError> {
if payload.is_empty() {
return Err(CoreError::AppMessage("Typing payload empty".into()));
}
Ok(AppMessage::Typing { active: payload[0] })
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn roundtrip_chat() {
let body = b"hello";
let encoded = serialize_chat(body, None);
let (t, msg) = parse(&encoded).expect("serialize_chat output is valid");
assert_eq!(t, MessageType::Chat);
assert!(matches!(&msg, AppMessage::Chat { .. }), "expected Chat, got {:?}", msg);
if let AppMessage::Chat { body: b, .. } = &msg {
assert_eq!(b.as_slice(), body);
}
}
#[test]
fn roundtrip_reply() {
let ref_id = [1u8; 16];
let body = b"reply text";
let encoded = serialize_reply(ref_id, body);
let (t, msg) = parse(&encoded).expect("serialize_reply output is valid");
assert_eq!(t, MessageType::Reply);
assert!(matches!(&msg, AppMessage::Reply { .. }), "expected Reply, got {:?}", msg);
if let AppMessage::Reply { ref_msg_id, body: b } = &msg {
assert_eq!(ref_msg_id, &ref_id);
assert_eq!(b.as_slice(), body);
}
}
#[test]
fn roundtrip_typing() {
let encoded = serialize_typing(1);
let (t, msg) = parse(&encoded).expect("serialize_typing output is valid");
assert_eq!(t, MessageType::Typing);
assert!(matches!(&msg, AppMessage::Typing { .. }), "expected Typing, got {:?}", msg);
if let AppMessage::Typing { active } = &msg {
assert_eq!(*active, 1);
}
}
}

View File

@@ -1,4 +1,4 @@
//! Error types for `quicprochat-core`.
//! Error types for `quicnprotochat-core`.
use thiserror::Error;
@@ -6,24 +6,15 @@ use thiserror::Error;
#[derive(Debug, Error)]
pub enum CoreError {
/// Cap'n Proto serialisation or deserialisation failed.
#[cfg(feature = "native")]
#[error("Cap'n Proto error: {0}")]
Capnp(#[from] capnp::Error),
/// An MLS operation failed (string description).
/// An MLS operation failed.
///
/// Preserved for backward compatibility. Prefer [`CoreError::MlsError`]
/// for new code that wraps typed openmls errors.
/// The inner string is the debug representation of the openmls error.
#[error("MLS error: {0}")]
Mls(String),
/// An MLS operation failed (typed, boxed error).
///
/// Wraps the underlying openmls error so callers can downcast to specific
/// error types when needed.
#[error("MLS error: {0}")]
MlsError(Box<dyn std::error::Error + Send + Sync>),
/// A hybrid KEM (X25519 + ML-KEM-768) operation failed.
#[error("hybrid KEM error: {0}")]
HybridKem(#[from] crate::hybrid_kem::HybridKemError),

View File

@@ -0,0 +1,597 @@
//! MLS group state machine.
//!
//! # Design
//!
//! [`GroupMember`] wraps an openmls [`MlsGroup`] plus a per-client crypto
//! backend ([`StoreCrypto`] or [`HybridCryptoProvider`] for M7). The backend
//! is **persistent** — it holds the key store that maps init-key references
//! to HPKE private keys (classical or hybrid).
//! openmls's `new_from_welcome` reads those private keys from the key store to
//! decrypt the Welcome, so the same backend instance must be used from
//! `generate_key_package` through `join_group`.
//!
//! # Wire format
//!
//! All MLS messages are serialised/deserialised using TLS presentation language
//! encoding (`tls_codec`). The resulting byte vectors are what the transport
//! layer (and the Delivery Service) sees.
//!
//! # MLS ciphersuite
//!
//! `MLS_128_DHKEMX25519_AES128GCM_SHA256_Ed25519` — same as M2.
//!
//! # Ratchet tree
//!
//! `use_ratchet_tree_extension = true` so that the ratchet tree is embedded
//! in Welcome messages. `new_from_welcome` is called with `ratchet_tree = None`;
//! openmls extracts the tree from the Welcome's `GroupInfo` extension.
use std::{path::Path, sync::Arc};
use openmls::prelude::{
Ciphersuite, Credential, CredentialType, CredentialWithKey, CryptoConfig, GroupId, KeyPackage,
KeyPackageIn, MlsGroup, MlsGroupConfig, MlsMessageInBody, MlsMessageOut,
ProcessedMessageContent, ProtocolMessage, ProtocolVersion, TlsDeserializeTrait,
TlsSerializeTrait,
};
use openmls_traits::OpenMlsCryptoProvider;
use crate::{
error::CoreError,
hybrid_crypto::HybridCryptoProvider,
identity::IdentityKeypair,
keystore::{DiskKeyStore, StoreCrypto},
};
// ── Constants ─────────────────────────────────────────────────────────────────
const CIPHERSUITE: Ciphersuite = Ciphersuite::MLS_128_DHKEMX25519_AES128GCM_SHA256_Ed25519;
// ── GroupMember ───────────────────────────────────────────────────────────────
/// Per-client MLS state: identity keypair, crypto backend, and optional group.
///
/// Generic over the crypto provider `P`: [`StoreCrypto`] (default, classical)
/// or [`HybridCryptoProvider`] (M7, post-quantum hybrid KEM).
///
/// # Lifecycle
///
/// ```text
/// GroupMember::new(identity)
/// ├─ generate_key_package() → upload to AS
/// ├─ create_group(group_id) → become sole member
/// │ └─ add_member(kp) → invite a peer; returns (commit, welcome)
/// └─ join_group(welcome) → join after receiving a Welcome
/// ├─ send_message(msg) → encrypt application data
/// └─ receive_message(b) → decrypt; returns Some(plaintext) or None
/// ```
pub struct GroupMember<P: OpenMlsCryptoProvider = StoreCrypto> {
/// Crypto backend (classical or hybrid). Holds the key store with HPKE
/// private keys created during `generate_key_package`.
backend: P,
/// Long-term Ed25519 identity keypair. Also used as the MLS `Signer`.
identity: Arc<IdentityKeypair>,
/// Active MLS group, if any.
group: Option<MlsGroup>,
/// Shared group configuration (wire format, ratchet tree extension, etc.).
config: MlsGroupConfig,
}
impl GroupMember<StoreCrypto> {
/// Create a new `GroupMember` with a fresh crypto backend (classical X25519).
pub fn new(identity: Arc<IdentityKeypair>) -> Self {
Self::new_with_state(identity, DiskKeyStore::ephemeral(), None)
}
/// Create a `GroupMember` with a persistent keystore at `path`.
pub fn new_persistent(
identity: Arc<IdentityKeypair>,
path: impl AsRef<Path>,
) -> Result<Self, CoreError> {
let key_store = DiskKeyStore::persistent(path)
.map_err(|e| CoreError::Io(format!("keystore: {e}")))?;
Ok(Self::new_with_state(identity, key_store, None))
}
/// Create a `GroupMember` from pre-existing state (identity + optional group + store).
pub fn new_with_state(
identity: Arc<IdentityKeypair>,
key_store: DiskKeyStore,
group: Option<MlsGroup>,
) -> Self {
let config = MlsGroupConfig::builder()
.use_ratchet_tree_extension(true)
.build();
Self {
backend: StoreCrypto::new(key_store),
identity,
group,
config,
}
}
}
impl GroupMember<HybridCryptoProvider> {
/// Create a `GroupMember` that uses post-quantum hybrid KEM (X25519 + ML-KEM-768) for HPKE.
///
/// All members of a group must use the same provider type: if the creator uses
/// `new_with_hybrid`, KeyPackages will have hybrid init keys and joiners must
/// also use `new_with_hybrid` to decrypt the Welcome.
pub fn new_with_hybrid(
identity: Arc<IdentityKeypair>,
key_store: DiskKeyStore,
) -> Self {
Self::new_with_state_hybrid(identity, key_store, None)
}
/// Create a PQ `GroupMember` from persisted state (identity, key store, optional group).
pub fn new_with_state_hybrid(
identity: Arc<IdentityKeypair>,
key_store: DiskKeyStore,
group: Option<MlsGroup>,
) -> Self {
let config = MlsGroupConfig::builder()
.use_ratchet_tree_extension(true)
.build();
Self {
backend: HybridCryptoProvider::new(key_store),
identity,
group,
config,
}
}
}
impl<P: OpenMlsCryptoProvider> GroupMember<P> {
// ── KeyPackage ────────────────────────────────────────────────────────────
/// Generate a fresh single-use MLS KeyPackage.
///
/// The HPKE init private key is stored in `self.backend`'s key store.
/// **The same `GroupMember` instance must later call `join_group`** so
/// that `new_from_welcome` can retrieve the private key.
///
/// # Returns
///
/// TLS-encoded KeyPackage bytes, ready for upload to the Authentication
/// Service.
///
/// # Errors
///
/// Returns [`CoreError::Mls`] if openmls fails to create the KeyPackage.
pub fn generate_key_package(&mut self) -> Result<Vec<u8>, CoreError> {
let credential_with_key = self.make_credential_with_key()?;
let key_package = KeyPackage::builder()
.build(
CryptoConfig::with_default_version(CIPHERSUITE),
&self.backend,
self.identity.as_ref(),
credential_with_key,
)
.map_err(|e| CoreError::Mls(format!("{e:?}")))?;
key_package
.tls_serialize_detached()
.map_err(|e| CoreError::Mls(format!("{e:?}")))
}
// ── Group creation ────────────────────────────────────────────────────────
/// Create a new MLS group with `group_id` as the group identifier.
///
/// The caller becomes the sole member (epoch 0). Use `add_member` to
/// invite additional members.
///
/// `group_id` can be any non-empty byte string; SHA-256 of a human-readable
/// name is a good choice.
///
/// # Errors
///
/// Returns [`CoreError::Mls`] if the group already exists or openmls fails.
pub fn create_group(&mut self, group_id: &[u8]) -> Result<(), CoreError> {
let credential_with_key = self.make_credential_with_key()?;
let mls_id = GroupId::from_slice(group_id);
let group = MlsGroup::new_with_group_id(
&self.backend,
self.identity.as_ref(),
&self.config,
mls_id,
credential_with_key,
)
.map_err(|e| CoreError::Mls(format!("{e:?}")))?;
self.group = Some(group);
Ok(())
}
// ── Membership ────────────────────────────────────────────────────────────
/// Add a new member by their TLS-encoded KeyPackage bytes.
///
/// Produces a Commit (to update existing members' state) and a Welcome
/// (to bootstrap the new member). The caller is responsible for
/// distributing these:
///
/// - Send `commit_bytes` to all **existing** group members via the DS.
/// (In the 2-party case where the creator is the only member, this can
/// be discarded — the creator applies it locally via this method.)
/// - Send `welcome_bytes` to the **new** member via the DS.
///
/// This method also merges the pending Commit into the local group state
/// (advancing the epoch), so the caller is immediately ready to encrypt.
///
/// # Returns
///
/// `(commit_bytes, welcome_bytes)` — both TLS-encoded MLS messages.
///
/// # Errors
///
/// Returns [`CoreError::Mls`] if the KeyPackage is malformed, no active
/// group exists, or openmls fails.
pub fn add_member(
&mut self,
mut key_package_bytes: &[u8],
) -> Result<(Vec<u8>, Vec<u8>), CoreError> {
let group = self
.group
.as_mut()
.ok_or_else(|| CoreError::Mls("no active group".into()))?;
// Deserialise and validate the peer's KeyPackage. KeyPackage only derives
// TlsSerialize; KeyPackageIn derives TlsDeserialize and provides validate()
// which verifies the signature and returns a trusted KeyPackage.
let key_package: KeyPackage =
KeyPackageIn::tls_deserialize(&mut key_package_bytes)
.map_err(|e| CoreError::Mls(format!("KeyPackage deserialise: {e:?}")))?
.validate(self.backend.crypto(), ProtocolVersion::Mls10)
.map_err(|e| CoreError::Mls(format!("KeyPackage validate: {e:?}")))?;
// Create the Commit + Welcome. The third return value (GroupInfo) is for
// external commits and is not needed here.
let (commit_out, welcome_out, _group_info) = group
.add_members(&self.backend, self.identity.as_ref(), &[key_package])
.map_err(|e| CoreError::Mls(format!("add_members: {e:?}")))?;
// Merge the pending Commit into our own state, advancing the epoch.
group
.merge_pending_commit(&self.backend)
.map_err(|e| CoreError::Mls(format!("merge_pending_commit: {e:?}")))?;
let commit_bytes = commit_out
.to_bytes()
.map_err(|e| CoreError::Mls(format!("commit serialise: {e:?}")))?;
let welcome_bytes = welcome_out
.to_bytes()
.map_err(|e| CoreError::Mls(format!("welcome serialise: {e:?}")))?;
Ok((commit_bytes, welcome_bytes))
}
/// Join an existing MLS group from a TLS-encoded Welcome message.
///
/// The caller must have previously called [`generate_key_package`] on
/// **this same instance** so that the HPKE init private key is in the
/// backend's key store.
///
/// # Errors
///
/// Returns [`CoreError::Mls`] if the Welcome does not match any known
/// KeyPackage, or openmls validation fails.
///
/// [`generate_key_package`]: Self::generate_key_package
pub fn join_group(&mut self, mut welcome_bytes: &[u8]) -> Result<(), CoreError> {
// Deserialise MlsMessageIn, then extract the inner Welcome.
let msg_in = openmls::prelude::MlsMessageIn::tls_deserialize(&mut welcome_bytes)
.map_err(|e| CoreError::Mls(format!("Welcome deserialise: {e:?}")))?;
// into_welcome() is feature-gated in openmls 0.5; extract() is public.
let welcome = match msg_in.extract() {
MlsMessageInBody::Welcome(w) => w,
_ => return Err(CoreError::Mls("expected a Welcome message".into())),
};
// ratchet_tree = None because use_ratchet_tree_extension = true embeds
// the tree inside the Welcome's GroupInfo extension.
let group = MlsGroup::new_from_welcome(&self.backend, &self.config, welcome, None)
.map_err(|e| CoreError::Mls(format!("new_from_welcome: {e:?}")))?;
self.group = Some(group);
Ok(())
}
// ── Application messages ──────────────────────────────────────────────────
/// Encrypt `plaintext` as an MLS Application message.
///
/// # Returns
///
/// TLS-encoded `MlsMessageOut` bytes (PrivateMessage variant).
///
/// # Errors
///
/// Returns [`CoreError::Mls`] if there is no active group or encryption fails.
pub fn send_message(&mut self, plaintext: &[u8]) -> Result<Vec<u8>, CoreError> {
let group = self
.group
.as_mut()
.ok_or_else(|| CoreError::Mls("no active group".into()))?;
let mls_msg: MlsMessageOut = group
.create_message(&self.backend, self.identity.as_ref(), plaintext)
.map_err(|e| CoreError::Mls(format!("create_message: {e:?}")))?;
mls_msg
.to_bytes()
.map_err(|e| CoreError::Mls(format!("message serialise: {e:?}")))
}
/// Process an incoming TLS-encoded MLS message.
///
/// # Returns
///
/// - `Ok(Some(plaintext))` for Application messages.
/// - `Ok(None)` for Commit messages (group state is updated internally).
///
/// # Errors
///
/// Returns [`CoreError::Mls`] if the message is malformed, fails
/// authentication, or the group state is inconsistent.
pub fn receive_message(&mut self, mut bytes: &[u8]) -> Result<Option<Vec<u8>>, CoreError> {
let group = self
.group
.as_mut()
.ok_or_else(|| CoreError::Mls("no active group".into()))?;
let msg_in = openmls::prelude::MlsMessageIn::tls_deserialize(&mut bytes)
.map_err(|e| CoreError::Mls(format!("message deserialise: {e:?}")))?;
// into_protocol_message() is feature-gated; extract() + manual construction is not.
let protocol_message = match msg_in.extract() {
MlsMessageInBody::PrivateMessage(m) => ProtocolMessage::PrivateMessage(m),
MlsMessageInBody::PublicMessage(m) => ProtocolMessage::PublicMessage(m),
_ => return Err(CoreError::Mls("not a protocol message".into())),
};
let processed = group
.process_message(&self.backend, protocol_message)
.map_err(|e| CoreError::Mls(format!("process_message: {e:?}")))?;
match processed.into_content() {
ProcessedMessageContent::ApplicationMessage(app) => Ok(Some(app.into_bytes())),
ProcessedMessageContent::StagedCommitMessage(staged) => {
// Merge the Commit into the local state (epoch advances).
group
.merge_staged_commit(&self.backend, *staged)
.map_err(|e| CoreError::Mls(format!("merge_staged_commit: {e:?}")))?;
Ok(None)
}
// Proposals are stored for a later Commit; nothing to return yet.
ProcessedMessageContent::ProposalMessage(proposal) => {
group.store_pending_proposal(*proposal);
Ok(None)
}
ProcessedMessageContent::ExternalJoinProposalMessage(proposal) => {
group.store_pending_proposal(*proposal);
Ok(None)
}
}
}
/// Process an incoming TLS-encoded MLS message and return sender identity + plaintext for application messages.
///
/// Same as [`receive_message`], but for Application messages returns
/// `Some((sender_identity_bytes, plaintext))` so the client can display who sent the message.
/// `sender_identity_bytes` is the MLS credential identity (e.g. Ed25519 public key for Basic credential).
///
/// Returns `Ok(None)` for Commit and Proposal messages (group state is updated internally).
pub fn receive_message_with_sender(
&mut self,
mut bytes: &[u8],
) -> Result<Option<(Vec<u8>, Vec<u8>)>, CoreError> {
let group = self
.group
.as_mut()
.ok_or_else(|| CoreError::Mls("no active group".into()))?;
let msg_in = openmls::prelude::MlsMessageIn::tls_deserialize(&mut bytes)
.map_err(|e| CoreError::Mls(format!("message deserialise: {e:?}")))?;
let protocol_message = match msg_in.extract() {
MlsMessageInBody::PrivateMessage(m) => ProtocolMessage::PrivateMessage(m),
MlsMessageInBody::PublicMessage(m) => ProtocolMessage::PublicMessage(m),
_ => return Err(CoreError::Mls("not a protocol message".into())),
};
let processed = group
.process_message(&self.backend, protocol_message)
.map_err(|e| CoreError::Mls(format!("process_message: {e:?}")))?;
let sender_identity = processed.credential().identity().to_vec();
match processed.into_content() {
ProcessedMessageContent::ApplicationMessage(app) => {
Ok(Some((sender_identity, app.into_bytes())))
}
ProcessedMessageContent::StagedCommitMessage(staged) => {
group
.merge_staged_commit(&self.backend, *staged)
.map_err(|e| CoreError::Mls(format!("merge_staged_commit: {e:?}")))?;
Ok(None)
}
ProcessedMessageContent::ProposalMessage(proposal) => {
group.store_pending_proposal(*proposal);
Ok(None)
}
ProcessedMessageContent::ExternalJoinProposalMessage(proposal) => {
group.store_pending_proposal(*proposal);
Ok(None)
}
}
}
// ── Accessors ─────────────────────────────────────────────────────────────
/// Return the MLS group ID bytes, or `None` if no group is active.
pub fn group_id(&self) -> Option<Vec<u8>> {
self.group
.as_ref()
.map(|g| g.group_id().as_slice().to_vec())
}
/// Return a reference to the identity keypair.
pub fn identity(&self) -> &IdentityKeypair {
&self.identity
}
/// Return the private seed of the identity (for persistence).
pub fn identity_seed(&self) -> [u8; 32] {
self.identity.seed_bytes()
}
/// Return a reference to the underlying crypto backend.
pub fn backend(&self) -> &P {
&self.backend
}
/// Return a reference to the MLS group, if active.
pub fn group_ref(&self) -> Option<&MlsGroup> {
self.group.as_ref()
}
/// Return the identity (credential) bytes of all current group members.
///
/// Each entry is the raw credential payload (Ed25519 public key bytes)
/// extracted from the member's MLS leaf node.
pub fn member_identities(&self) -> Vec<Vec<u8>> {
let group = match self.group.as_ref() {
Some(g) => g,
None => return Vec::new(),
};
group
.members()
.map(|m| m.credential.identity().to_vec())
.collect()
}
// ── Private helpers ───────────────────────────────────────────────────────
fn make_credential_with_key(&self) -> Result<CredentialWithKey, CoreError> {
let credential = Credential::new(
self.identity.public_key_bytes().to_vec(),
CredentialType::Basic,
)
.map_err(|e| CoreError::Mls(format!("{e:?}")))?;
Ok(CredentialWithKey {
credential,
signature_key: self.identity.public_key_bytes().to_vec().into(),
})
}
}
// ── Unit tests ────────────────────────────────────────────────────────────────
#[cfg(test)]
mod tests {
use super::*;
/// Full two-party MLS round-trip: creator creates group, adds joiner, then they exchange messages.
#[test]
fn two_party_mls_round_trip() {
let creator_id = Arc::new(IdentityKeypair::generate());
let joiner_id = Arc::new(IdentityKeypair::generate());
let mut creator = GroupMember::new(Arc::clone(&creator_id));
let mut joiner = GroupMember::new(Arc::clone(&joiner_id));
let joiner_kp = joiner
.generate_key_package()
.expect("joiner KeyPackage");
creator
.create_group(b"test-group-m3")
.expect("creator create group");
let (_, welcome) = creator
.add_member(&joiner_kp)
.expect("creator add joiner");
joiner.join_group(&welcome).expect("joiner join group");
let ct_creator = creator.send_message(b"hello").expect("creator send");
let pt_joiner = joiner
.receive_message(&ct_creator)
.expect("joiner recv")
.expect("application message");
assert_eq!(pt_joiner, b"hello");
let ct_joiner = joiner.send_message(b"hello back").expect("joiner send");
let pt_creator = creator
.receive_message(&ct_joiner)
.expect("creator recv")
.expect("application message");
assert_eq!(pt_creator, b"hello back");
}
/// M7: Full two-party MLS round-trip with post-quantum hybrid KEM (HybridCryptoProvider).
#[test]
fn two_party_mls_round_trip_hybrid() {
let creator_id = Arc::new(IdentityKeypair::generate());
let joiner_id = Arc::new(IdentityKeypair::generate());
let key_store_creator = DiskKeyStore::ephemeral();
let key_store_joiner = DiskKeyStore::ephemeral();
let mut creator =
GroupMember::<HybridCryptoProvider>::new_with_hybrid(Arc::clone(&creator_id), key_store_creator);
let mut joiner =
GroupMember::<HybridCryptoProvider>::new_with_hybrid(Arc::clone(&joiner_id), key_store_joiner);
let joiner_kp = joiner
.generate_key_package()
.expect("joiner KeyPackage (hybrid)");
creator
.create_group(b"test-group-m7-hybrid")
.expect("creator create group");
let (_, welcome) = creator
.add_member(&joiner_kp)
.expect("creator add joiner");
joiner.join_group(&welcome).expect("joiner join group");
let ct_creator = creator.send_message(b"hello pq").expect("creator send");
let pt_joiner = joiner
.receive_message(&ct_creator)
.expect("joiner recv")
.expect("application message");
assert_eq!(pt_joiner, b"hello pq");
let ct_joiner = joiner.send_message(b"hello back pq").expect("joiner send");
let pt_creator = creator
.receive_message(&ct_joiner)
.expect("creator recv")
.expect("application message");
assert_eq!(pt_creator, b"hello back pq");
}
/// `group_id()` returns None before create_group, Some afterwards.
#[test]
fn group_id_lifecycle() {
let id = Arc::new(IdentityKeypair::generate());
let mut member = GroupMember::new(id);
assert!(member.group_id().is_none(), "no group before create");
member.create_group(b"gid").unwrap();
assert_eq!(
member.group_id().unwrap(),
b"gid".as_slice(),
"group_id must match what was passed"
);
}
}

View File

@@ -27,9 +27,8 @@ use openmls_traits::{
crypto::OpenMlsCrypto,
types::{
CryptoError, ExporterSecret, HpkeCiphertext, HpkeConfig, HpkeKeyPair, HpkeKemType,
KemOutput,
},
OpenMlsProvider,
OpenMlsCryptoProvider,
};
use tls_codec::SecretVLBytes;
@@ -47,50 +46,18 @@ use openmls_traits::types::{
/// Crypto backend that uses hybrid KEM for HPKE when keys are in hybrid format,
/// and delegates everything else to RustCrypto.
///
/// When `hybrid_enabled` is `true`, `derive_hpke_keypair` produces hybrid keys
/// (1216-byte public, 2432-byte private). When `false`, it delegates to
/// RustCrypto and produces classical 32-byte X25519 keys.
///
/// The `hpke_seal` / `hpke_open` methods always detect the key format by length,
/// so they work correctly regardless of the flag — a hybrid-length key will use
/// hybrid KEM, a classical-length key will use RustCrypto.
#[derive(Debug)]
pub struct HybridCrypto {
rust_crypto: RustCrypto,
/// When true, `derive_hpke_keypair` produces hybrid (X25519 + ML-KEM-768)
/// keys. When false, it produces classical X25519 keys via RustCrypto.
hybrid_enabled: bool,
}
impl HybridCrypto {
/// Create a hybrid-enabled crypto backend (derive_hpke_keypair produces hybrid keys).
pub fn new() -> Self {
Self {
rust_crypto: RustCrypto::default(),
hybrid_enabled: true,
}
}
/// Alias for `new()` — hybrid mode enabled.
pub fn new_hybrid() -> Self {
Self::new()
}
/// Create a classical crypto backend (derive_hpke_keypair produces standard
/// X25519 keys, but seal/open still accept hybrid keys by length detection).
pub fn new_classical() -> Self {
Self {
rust_crypto: RustCrypto::default(),
hybrid_enabled: false,
}
}
/// Whether this backend produces hybrid keys from `derive_hpke_keypair`.
pub fn is_hybrid_enabled(&self) -> bool {
self.hybrid_enabled
}
/// Expose the underlying RustCrypto for rand() and delegation.
pub fn rust_crypto(&self) -> &RustCrypto {
&self.rust_crypto
@@ -129,15 +96,6 @@ impl OpenMlsCrypto for HybridCrypto {
self.rust_crypto.hkdf_extract(hash_type, salt, ikm)
}
fn hmac(
&self,
hash_type: HashType,
key: &[u8],
message: &[u8],
) -> Result<SecretVLBytes, CryptoError> {
self.rust_crypto.hmac(hash_type, key, message)
}
fn hkdf_expand(
&self,
hash_type: HashType,
@@ -199,18 +157,23 @@ impl OpenMlsCrypto for HybridCrypto {
info: &[u8],
aad: &[u8],
ptxt: &[u8],
) -> Result<HpkeCiphertext, CryptoError> {
) -> HpkeCiphertext {
if Self::is_hybrid_public_key(pk_r) {
let recipient_pk = HybridPublicKey::from_bytes(pk_r)
.map_err(|_| CryptoError::CryptoLibraryError)?;
let envelope = hybrid_encrypt(&recipient_pk, ptxt, info, aad)
.map_err(|_| CryptoError::CryptoLibraryError)?;
let kem_output = envelope[..HYBRID_KEM_OUTPUT_LEN].to_vec();
let ciphertext = envelope[HYBRID_KEM_OUTPUT_LEN..].to_vec();
Ok(HpkeCiphertext {
kem_output: kem_output.into(),
ciphertext: ciphertext.into(),
})
let recipient_pk = match HybridPublicKey::from_bytes(pk_r) {
Ok(pk) => pk,
Err(_) => return self.rust_crypto.hpke_seal(config, pk_r, info, aad, ptxt),
};
match hybrid_encrypt(&recipient_pk, ptxt) {
Ok(envelope) => {
let kem_output = envelope[..HYBRID_KEM_OUTPUT_LEN].to_vec();
let ciphertext = envelope[HYBRID_KEM_OUTPUT_LEN..].to_vec();
HpkeCiphertext {
kem_output: kem_output.into(),
ciphertext: ciphertext.into(),
}
}
Err(_) => self.rust_crypto.hpke_seal(config, pk_r, info, aad, ptxt),
}
} else {
self.rust_crypto.hpke_seal(config, pk_r, info, aad, ptxt)
}
@@ -225,17 +188,17 @@ impl OpenMlsCrypto for HybridCrypto {
aad: &[u8],
) -> Result<Vec<u8>, CryptoError> {
if Self::is_hybrid_private_key(sk_r) {
let keypair = HybridKeypair::from_private_bytes(sk_r)
.map_err(|_| CryptoError::HpkeDecryptionError)?;
let keypair = match HybridKeypair::from_private_bytes(sk_r) {
Ok(kp) => kp,
Err(_) => return self.rust_crypto.hpke_open(config, input, sk_r, info, aad),
};
let envelope: Vec<u8> = input
.kem_output.as_slice()
.iter()
.chain(input.ciphertext.as_slice())
.copied()
.collect();
// Pass HPKE info and aad through for proper context binding (RFC 9180).
hybrid_decrypt(&keypair, &envelope, info, aad)
.map_err(|_| CryptoError::HpkeDecryptionError)
hybrid_decrypt(&keypair, &envelope).map_err(|_| CryptoError::HpkeDecryptionError)
} else {
self.rust_crypto.hpke_open(config, input, sk_r, info, aad)
}
@@ -248,13 +211,16 @@ impl OpenMlsCrypto for HybridCrypto {
info: &[u8],
exporter_context: &[u8],
exporter_length: usize,
) -> Result<(KemOutput, ExporterSecret), CryptoError> {
) -> Result<(Vec<u8>, ExporterSecret), CryptoError> {
if Self::is_hybrid_public_key(pk_r) {
// A key that passes the hybrid length check but fails deserialization
// is corrupted — return an error instead of silently downgrading to
// classical crypto (which would defeat PQ protection).
let recipient_pk = HybridPublicKey::from_bytes(pk_r)
.map_err(|_| CryptoError::SenderSetupError)?;
let recipient_pk = match HybridPublicKey::from_bytes(pk_r) {
Ok(pk) => pk,
Err(_) => {
return self.rust_crypto.hpke_setup_sender_and_export(
config, pk_r, info, exporter_context, exporter_length,
)
}
};
let (kem_output, shared_secret) =
hybrid_encapsulate_only(&recipient_pk).map_err(|_| CryptoError::SenderSetupError)?;
let exported = hybrid_export(&shared_secret, exporter_context, exporter_length);
@@ -289,14 +255,13 @@ impl OpenMlsCrypto for HybridCrypto {
}
}
fn derive_hpke_keypair(&self, config: HpkeConfig, ikm: &[u8]) -> Result<HpkeKeyPair, CryptoError> {
if self.hybrid_enabled && config.0 == HpkeKemType::DhKem25519 {
fn derive_hpke_keypair(&self, config: HpkeConfig, ikm: &[u8]) -> HpkeKeyPair {
if config.0 == HpkeKemType::DhKem25519 {
let kp = HybridKeypair::derive_from_ikm(ikm);
let private_bytes = kp.private_to_bytes();
Ok(HpkeKeyPair {
private: private_bytes.as_slice().into(),
HpkeKeyPair {
private: kp.private_to_bytes().into(),
public: kp.public_key().to_bytes(),
})
}
} else {
self.rust_crypto.derive_hpke_keypair(config, ikm)
}
@@ -312,32 +277,12 @@ pub struct HybridCryptoProvider {
}
impl HybridCryptoProvider {
/// Create a hybrid-enabled provider (KeyPackages will contain hybrid init keys).
pub fn new(key_store: DiskKeyStore) -> Self {
Self {
crypto: HybridCrypto::new_hybrid(),
crypto: HybridCrypto::new(),
key_store,
}
}
/// Alias for `new()` — hybrid mode enabled.
pub fn new_hybrid(key_store: DiskKeyStore) -> Self {
Self::new(key_store)
}
/// Create a classical-mode provider (KeyPackages use standard X25519 init keys,
/// but seal/open still accept hybrid keys by length detection).
pub fn new_classical(key_store: DiskKeyStore) -> Self {
Self {
crypto: HybridCrypto::new_classical(),
key_store,
}
}
/// Whether this provider produces hybrid keys from `derive_hpke_keypair`.
pub fn is_hybrid_enabled(&self) -> bool {
self.crypto.is_hybrid_enabled()
}
}
impl Default for HybridCryptoProvider {
@@ -346,10 +291,10 @@ impl Default for HybridCryptoProvider {
}
}
impl OpenMlsProvider for HybridCryptoProvider {
impl OpenMlsCryptoProvider for HybridCryptoProvider {
type CryptoProvider = HybridCrypto;
type RandProvider = RustCrypto;
type StorageProvider = DiskKeyStore;
type KeyStoreProvider = DiskKeyStore;
fn crypto(&self) -> &Self::CryptoProvider {
&self.crypto
@@ -359,7 +304,7 @@ impl OpenMlsProvider for HybridCryptoProvider {
self.crypto.rust_crypto()
}
fn storage(&self) -> &Self::StorageProvider {
fn key_store(&self) -> &Self::KeyStoreProvider {
&self.key_store
}
}
@@ -367,7 +312,6 @@ impl OpenMlsProvider for HybridCryptoProvider {
// ── Tests ───────────────────────────────────────────────────────────────────
#[cfg(test)]
#[allow(clippy::unwrap_used)]
mod tests {
use super::*;
use openmls_traits::types::HpkeKdfType;
@@ -386,7 +330,7 @@ mod tests {
let crypto = HybridCrypto::new();
let ikm = b"test-ikm-for-hybrid-hpke-keypair";
let keypair = crypto.derive_hpke_keypair(hpke_config_dhkem_x25519(), ikm).unwrap();
let keypair = crypto.derive_hpke_keypair(hpke_config_dhkem_x25519(), ikm);
assert_eq!(keypair.public.len(), HYBRID_PUBLIC_KEY_LEN);
assert_eq!(keypair.private.as_ref().len(), HYBRID_PRIVATE_KEY_LEN);
@@ -400,7 +344,7 @@ mod tests {
info,
aad,
plaintext,
).unwrap();
);
assert!(!ct.kem_output.as_slice().is_empty());
assert!(!ct.ciphertext.as_slice().is_empty());
@@ -422,7 +366,7 @@ mod tests {
let crypto = HybridCrypto::new();
let ikm = b"exporter-ikm";
let keypair = crypto.derive_hpke_keypair(hpke_config_dhkem_x25519(), ikm).unwrap();
let keypair = crypto.derive_hpke_keypair(hpke_config_dhkem_x25519(), ikm);
let info = b"";
let exporter_context = b"MLS 1.0 external init";
let exporter_length = 32;
@@ -454,57 +398,11 @@ mod tests {
assert_eq!(sender_exported.as_ref(), receiver_exported.as_ref());
}
/// Classical mode: derive_hpke_keypair produces standard 32-byte X25519 keys.
#[test]
fn classical_mode_produces_standard_keys() {
let crypto = HybridCrypto::new_classical();
let ikm = b"test-ikm-for-classical-hpke";
let keypair = crypto.derive_hpke_keypair(hpke_config_dhkem_x25519(), ikm).unwrap();
// Classical X25519 keys are 32 bytes
assert_eq!(keypair.public.len(), 32);
assert_eq!(keypair.private.as_ref().len(), 32);
}
/// Classical mode round-trip: seal/open works with classical keys.
#[test]
fn classical_mode_seal_open_round_trip() {
let crypto = HybridCrypto::new_classical();
let ikm = b"test-ikm-for-classical-round-trip";
let keypair = crypto.derive_hpke_keypair(hpke_config_dhkem_x25519(), ikm).unwrap();
assert_eq!(keypair.public.len(), 32); // classical key
let plaintext = b"hello classical MLS";
let info = b"mls 1.0 test";
let aad = b"additional data";
let ct = crypto.hpke_seal(
hpke_config_dhkem_x25519(),
&keypair.public,
info,
aad,
plaintext,
).unwrap();
assert!(!ct.kem_output.as_slice().is_empty());
let decrypted = crypto
.hpke_open(
hpke_config_dhkem_x25519(),
&ct,
keypair.private.as_ref(),
info,
aad,
)
.expect("hpke_open with classical keys");
assert_eq!(decrypted.as_slice(), plaintext);
}
/// KeyPackage generation with HybridCryptoProvider (validates full HPKE path in MLS).
#[test]
fn key_package_generation_with_hybrid_provider() {
use openmls::prelude::{
BasicCredential, CredentialWithKey, KeyPackage,
Credential, CredentialType, CredentialWithKey, CryptoConfig, KeyPackage,
};
use std::sync::Arc;
use tls_codec::Serialize;
@@ -517,24 +415,26 @@ mod tests {
let provider = HybridCryptoProvider::default();
let identity = Arc::new(IdentityKeypair::generate());
let credential: openmls::prelude::Credential =
BasicCredential::new(identity.public_key_bytes().to_vec()).into();
let credential = Credential::new(
identity.public_key_bytes().to_vec(),
CredentialType::Basic,
)
.unwrap();
let credential_with_key = CredentialWithKey {
credential,
signature_key: identity.public_key_bytes().to_vec().into(),
};
let key_package_bundle = KeyPackage::builder()
let key_package = KeyPackage::builder()
.build(
CIPHERSUITE,
CryptoConfig::with_default_version(CIPHERSUITE),
&provider,
identity.as_ref(),
credential_with_key,
)
.expect("KeyPackage with hybrid HPKE");
let bytes = key_package_bundle
.key_package()
let bytes = key_package
.tls_serialize_detached()
.expect("serialize KeyPackage");
assert!(!bytes.is_empty());

View File

@@ -41,14 +41,8 @@ use ml_kem::kem::{DecapsulationKey, EncapsulationKey};
const HYBRID_VERSION: u8 = 0x01;
/// HKDF info string for domain separation.
/// Frozen at the original project name for backward compatibility with existing
/// encrypted state files and messages. Do not change.
const HKDF_INFO: &[u8] = b"quicnprotochat-hybrid-v1";
/// HKDF salt for domain separation (defence-in-depth; IKM already has 64 bytes of entropy).
/// Frozen — see [`HKDF_INFO`].
const HKDF_SALT: &[u8] = b"quicnprotochat-hybrid-v1-salt";
/// ML-KEM-768 ciphertext size in bytes.
const MLKEM_CT_LEN: usize = 1088;
@@ -125,7 +119,6 @@ pub struct HybridPublicKey {
}
/// HKDF info for deriving HPKE keypair seed from IKM (MLS compatibility).
/// Frozen — see [`HKDF_INFO`].
const HKDF_INFO_HPKE_KEYPAIR: &[u8] = b"quicnprotochat-hybrid-hpke-keypair-v1";
impl HybridKeypair {
@@ -159,14 +152,11 @@ impl HybridKeypair {
}
/// Serialise private key for MLS key store: x25519_sk(32) || mlkem_dk(2400).
///
/// The returned value is wrapped in [`Zeroizing`] so secret key material
/// is securely erased when dropped.
pub fn private_to_bytes(&self) -> Zeroizing<Vec<u8>> {
pub fn private_to_bytes(&self) -> Vec<u8> {
let mut out = Vec::with_capacity(HYBRID_PRIVATE_KEY_LEN);
out.extend_from_slice(self.x25519_sk.as_bytes());
out.extend_from_slice(self.mlkem_dk.as_bytes().as_slice());
Zeroizing::new(out)
out
}
/// Reconstruct a hybrid keypair from private key bytes (from MLS key store).
@@ -174,8 +164,7 @@ impl HybridKeypair {
if bytes.len() != HYBRID_PRIVATE_KEY_LEN {
return Err(HybridKemError::TooShort(bytes.len()));
}
let x25519_sk = StaticSecret::from(<[u8; 32]>::try_from(&bytes[0..32])
.expect("slice is exactly 32 bytes (guaranteed by HYBRID_PRIVATE_KEY_LEN check)"));
let x25519_sk = StaticSecret::from(<[u8; 32]>::try_from(&bytes[0..32]).unwrap());
let x25519_pk = X25519Public::from(&x25519_sk);
let mlkem_dk_arr = Array::try_from(&bytes[32..32 + MLKEM_DK_LEN])
@@ -258,15 +247,10 @@ impl HybridPublicKey {
/// Encrypt `plaintext` to `recipient_pk` using X25519 + ML-KEM-768 hybrid KEM.
///
/// `info` is optional HPKE context info incorporated into key derivation.
/// `aad` is optional additional authenticated data bound to the AEAD ciphertext.
///
/// Returns the complete hybrid envelope as a byte vector.
pub fn hybrid_encrypt(
recipient_pk: &HybridPublicKey,
plaintext: &[u8],
info: &[u8],
aad: &[u8],
) -> Result<Vec<u8>, HybridKemError> {
// 1. Ephemeral X25519 DH
let eph_secret = EphemeralSecret::random_from_rng(OsRng);
@@ -282,19 +266,18 @@ pub fn hybrid_encrypt(
.encapsulate(&mut OsRng)
.map_err(|_| HybridKemError::EncryptionFailed)?;
// 3. Derive AEAD key from combined shared secrets (with caller info for context binding)
let aead_key = derive_aead_key(x25519_ss.as_bytes(), mlkem_ss.as_slice(), info);
// 3. Derive AEAD key from combined shared secrets
let aead_key = derive_aead_key(x25519_ss.as_bytes(), mlkem_ss.as_slice());
// Generate a random 12-byte nonce (not derived from HKDF).
let mut nonce_bytes = [0u8; 12];
OsRng.fill_bytes(&mut nonce_bytes);
let aead_nonce = *Nonce::from_slice(&nonce_bytes);
// 4. AEAD encrypt with caller-supplied AAD
// 4. AEAD encrypt
let cipher = ChaCha20Poly1305::new(&aead_key);
let aead_payload = chacha20poly1305::aead::Payload { msg: plaintext, aad };
let ct = cipher
.encrypt(&aead_nonce, aead_payload)
.encrypt(&aead_nonce, plaintext)
.map_err(|_| HybridKemError::EncryptionFailed)?;
// 5. Assemble envelope: version || x25519_eph_pk || mlkem_ct || nonce || aead_ct
@@ -309,14 +292,7 @@ pub fn hybrid_encrypt(
}
/// Decrypt a hybrid envelope using the recipient's private key.
///
/// `info` and `aad` must match what was passed to `hybrid_encrypt`.
pub fn hybrid_decrypt(
keypair: &HybridKeypair,
envelope: &[u8],
info: &[u8],
aad: &[u8],
) -> Result<Vec<u8>, HybridKemError> {
pub fn hybrid_decrypt(keypair: &HybridKeypair, envelope: &[u8]) -> Result<Vec<u8>, HybridKemError> {
if envelope.len() < HEADER_LEN + 16 {
// 16 = minimum AEAD tag
return Err(HybridKemError::TooShort(envelope.len()));
@@ -358,14 +334,13 @@ pub fn hybrid_decrypt(
.decapsulate(&mlkem_ct_arr)
.map_err(|_| HybridKemError::MlKemDecapsFailed)?;
// 3. Derive AEAD key (with caller info for context binding)
let aead_key = derive_aead_key(x25519_ss.as_bytes(), mlkem_ss.as_slice(), info);
// 3. Derive AEAD key
let aead_key = derive_aead_key(x25519_ss.as_bytes(), mlkem_ss.as_slice());
// 4. Decrypt with caller-supplied AAD
// 4. Decrypt
let cipher = ChaCha20Poly1305::new(&aead_key);
let aead_payload = chacha20poly1305::aead::Payload { msg: aead_ct, aad };
let plaintext = cipher
.decrypt(nonce, aead_payload)
.decrypt(nonce, aead_ct)
.map_err(|_| HybridKemError::DecryptionFailed)?;
Ok(plaintext)
@@ -391,9 +366,8 @@ pub fn hybrid_encapsulate_only(
.encapsulate(&mut OsRng)
.map_err(|_| HybridKemError::EncryptionFailed)?;
let aead_key = derive_aead_key(x25519_ss.as_bytes(), mlkem_ss.as_slice(), b"");
let shared_secret: [u8; 32] = aead_key.as_slice().try_into()
.expect("AEAD key is always exactly 32 bytes");
let aead_key = derive_aead_key(x25519_ss.as_bytes(), mlkem_ss.as_slice());
let shared_secret = aead_key.as_slice().try_into().unwrap();
let mut kem_output = Vec::with_capacity(HYBRID_KEM_OUTPUT_LEN);
kem_output.push(HYBRID_VERSION);
@@ -416,8 +390,7 @@ pub fn hybrid_decapsulate_only(
return Err(HybridKemError::UnsupportedVersion(kem_output[0]));
}
let eph_pk_bytes: [u8; 32] = kem_output[1..33].try_into()
.expect("slice is exactly 32 bytes (guaranteed by HYBRID_KEM_OUTPUT_LEN check)");
let eph_pk_bytes: [u8; 32] = kem_output[1..33].try_into().unwrap();
let eph_pk = X25519Public::from(eph_pk_bytes);
let x25519_ss = keypair.x25519_sk.diffie_hellman(&eph_pk);
@@ -428,9 +401,8 @@ pub fn hybrid_decapsulate_only(
.decapsulate(&mlkem_ct_arr)
.map_err(|_| HybridKemError::MlKemDecapsFailed)?;
let aead_key = derive_aead_key(x25519_ss.as_bytes(), mlkem_ss.as_slice(), b"");
Ok(aead_key.as_slice().try_into()
.expect("AEAD key is always exactly 32 bytes"))
let aead_key = derive_aead_key(x25519_ss.as_bytes(), mlkem_ss.as_slice());
Ok(aead_key.as_slice().try_into().unwrap())
}
/// Export a secret from shared secret (MLS HPKE exporter compatibility).
@@ -440,7 +412,7 @@ pub fn hybrid_export(
exporter_context: &[u8],
length: usize,
) -> Vec<u8> {
let hk = Hkdf::<Sha256>::new(Some(HKDF_SALT), shared_secret);
let hk = Hkdf::<Sha256>::new(None, shared_secret);
let mut out = vec![0u8; length];
hk.expand(exporter_context, &mut out).expect("valid length");
out
@@ -448,26 +420,18 @@ pub fn hybrid_export(
/// Derive AEAD key from the combined X25519 + ML-KEM shared secrets.
///
/// `extra_info` is optional caller-supplied context (e.g. HPKE `info`) that is
/// appended to the domain-separation label for additional binding.
///
/// The nonce is generated randomly per-encryption rather than derived from
/// HKDF, preventing nonce reuse when the same shared secret is (accidentally)
/// used more than once.
fn derive_aead_key(x25519_ss: &[u8], mlkem_ss: &[u8], extra_info: &[u8]) -> Key {
fn derive_aead_key(x25519_ss: &[u8], mlkem_ss: &[u8]) -> Key {
let mut ikm = Zeroizing::new(vec![0u8; x25519_ss.len() + mlkem_ss.len()]);
ikm[..x25519_ss.len()].copy_from_slice(x25519_ss);
ikm[x25519_ss.len()..].copy_from_slice(mlkem_ss);
let hk = Hkdf::<Sha256>::new(Some(HKDF_SALT), &ikm);
// Combine domain-separation label with caller-supplied context.
let mut info = Vec::with_capacity(HKDF_INFO.len() + extra_info.len());
info.extend_from_slice(HKDF_INFO);
info.extend_from_slice(extra_info);
let hk = Hkdf::<Sha256>::new(None, &ikm);
let mut key_bytes = Zeroizing::new([0u8; 32]);
hk.expand(&info, &mut *key_bytes)
hk.expand(HKDF_INFO, &mut *key_bytes)
.expect("32 bytes is valid HKDF-SHA256 output length");
*Key::from_slice(&*key_bytes)
@@ -476,7 +440,6 @@ fn derive_aead_key(x25519_ss: &[u8], mlkem_ss: &[u8], extra_info: &[u8]) -> Key
// ── Tests ───────────────────────────────────────────────────────────────────
#[cfg(test)]
#[allow(clippy::unwrap_used)]
mod tests {
use super::*;
@@ -494,39 +457,21 @@ mod tests {
let pk = kp.public_key();
let plaintext = b"hello post-quantum world!";
let envelope = hybrid_encrypt(&pk, plaintext, b"", b"").unwrap();
let recovered = hybrid_decrypt(&kp, &envelope, b"", b"").unwrap();
let envelope = hybrid_encrypt(&pk, plaintext).unwrap();
let recovered = hybrid_decrypt(&kp, &envelope).unwrap();
assert_eq!(recovered, plaintext);
}
#[test]
fn encrypt_decrypt_with_info_aad() {
let kp = HybridKeypair::generate();
let pk = kp.public_key();
let plaintext = b"context-bound payload";
let info = b"mls epoch 42";
let aad = b"group-id-abc";
let envelope = hybrid_encrypt(&pk, plaintext, info, aad).unwrap();
let recovered = hybrid_decrypt(&kp, &envelope, info, aad).unwrap();
assert_eq!(recovered, plaintext);
// Mismatched info must fail
assert!(hybrid_decrypt(&kp, &envelope, b"wrong info", aad).is_err());
// Mismatched aad must fail
assert!(hybrid_decrypt(&kp, &envelope, info, b"wrong aad").is_err());
}
#[test]
fn wrong_key_decryption_fails() {
let kp_sender_target = HybridKeypair::generate();
let kp_wrong = HybridKeypair::generate();
let pk = kp_sender_target.public_key();
let envelope = hybrid_encrypt(&pk, b"secret", b"", b"").unwrap();
let envelope = hybrid_encrypt(&pk, b"secret").unwrap();
let result = hybrid_decrypt(&kp_wrong, &envelope, b"", b"");
let result = hybrid_decrypt(&kp_wrong, &envelope);
assert!(result.is_err());
}
@@ -535,12 +480,12 @@ mod tests {
let kp = HybridKeypair::generate();
let pk = kp.public_key();
let mut envelope = hybrid_encrypt(&pk, b"payload", b"", b"").unwrap();
let mut envelope = hybrid_encrypt(&pk, b"payload").unwrap();
let last = envelope.len() - 1;
envelope[last] ^= 0x01;
assert!(matches!(
hybrid_decrypt(&kp, &envelope, b"", b""),
hybrid_decrypt(&kp, &envelope),
Err(HybridKemError::DecryptionFailed)
));
}
@@ -550,11 +495,11 @@ mod tests {
let kp = HybridKeypair::generate();
let pk = kp.public_key();
let mut envelope = hybrid_encrypt(&pk, b"payload", b"", b"").unwrap();
let mut envelope = hybrid_encrypt(&pk, b"payload").unwrap();
// Flip a byte in the ML-KEM ciphertext region (starts at offset 33)
envelope[40] ^= 0xFF;
assert!(hybrid_decrypt(&kp, &envelope, b"", b"").is_err());
assert!(hybrid_decrypt(&kp, &envelope).is_err());
}
#[test]
@@ -562,11 +507,11 @@ mod tests {
let kp = HybridKeypair::generate();
let pk = kp.public_key();
let mut envelope = hybrid_encrypt(&pk, b"payload", b"", b"").unwrap();
let mut envelope = hybrid_encrypt(&pk, b"payload").unwrap();
// Flip a byte in the X25519 ephemeral pk region (offset 1..33)
envelope[5] ^= 0xFF;
assert!(hybrid_decrypt(&kp, &envelope, b"", b"").is_err());
assert!(hybrid_decrypt(&kp, &envelope).is_err());
}
#[test]
@@ -574,11 +519,11 @@ mod tests {
let kp = HybridKeypair::generate();
let pk = kp.public_key();
let mut envelope = hybrid_encrypt(&pk, b"payload", b"", b"").unwrap();
let mut envelope = hybrid_encrypt(&pk, b"payload").unwrap();
envelope[0] = 0xFF;
assert!(matches!(
hybrid_decrypt(&kp, &envelope, b"", b""),
hybrid_decrypt(&kp, &envelope),
Err(HybridKemError::UnsupportedVersion(0xFF))
));
}
@@ -587,7 +532,7 @@ mod tests {
fn envelope_too_short_rejected() {
let kp = HybridKeypair::generate();
assert!(matches!(
hybrid_decrypt(&kp, &[0x01; 10], b"", b""),
hybrid_decrypt(&kp, &[0x01; 10]),
Err(HybridKemError::TooShort(10))
));
}
@@ -603,8 +548,8 @@ mod tests {
// Verify restored keypair can decrypt
let pk = kp.public_key();
let ct = hybrid_encrypt(&pk, b"test", b"", b"").unwrap();
let pt = hybrid_decrypt(&restored, &ct, b"", b"").unwrap();
let ct = hybrid_encrypt(&pk, b"test").unwrap();
let pt = hybrid_decrypt(&restored, &ct).unwrap();
assert_eq!(pt, b"test");
}
@@ -625,8 +570,8 @@ mod tests {
let pk = kp.public_key();
let plaintext = vec![0xAB; 50_000]; // 50 KB
let envelope = hybrid_encrypt(&pk, &plaintext, b"", b"").unwrap();
let recovered = hybrid_decrypt(&kp, &envelope, b"", b"").unwrap();
let envelope = hybrid_encrypt(&pk, &plaintext).unwrap();
let recovered = hybrid_decrypt(&kp, &envelope).unwrap();
assert_eq!(recovered, plaintext);
}

View File

@@ -0,0 +1,135 @@
//! Ed25519 identity keypair for MLS credentials and AS registration.
//!
//! The [`IdentityKeypair`] is the long-term identity key embedded in MLS
//! `BasicCredential`s. It is used for signing MLS messages and as the
//! indexing key for the Authentication Service.
//!
//! # Zeroize
//!
//! The 32-byte private seed is stored as `Zeroizing<[u8; 32]>`, which zeroes
//! the bytes on drop. `[u8; 32]` is `Copy + Default` and satisfies zeroize's
//! `DefaultIsZeroes` constraint, avoiding a conflict with ed25519-dalek's
//! `SigningKey` zeroize impl.
//!
//! # Fingerprint
//!
//! A 32-byte SHA-256 digest of the raw public key bytes is used as a compact,
//! collision-resistant identifier for logging.
use ed25519_dalek::{Signer as DalekSigner, SigningKey, VerifyingKey};
use openmls_traits::signatures::Signer;
use openmls_traits::types::{Error as MlsError, SignatureScheme};
use serde::{Deserialize, Serialize};
use sha2::{Digest, Sha256};
use zeroize::Zeroizing;
/// An Ed25519 identity keypair.
///
/// Created with [`IdentityKeypair::generate`]. The private signing key seed
/// is zeroed when this struct is dropped.
pub struct IdentityKeypair {
/// Raw 32-byte private seed — zeroized on drop.
///
/// Stored as bytes rather than `SigningKey` to satisfy zeroize's
/// `DefaultIsZeroes` bound on `Zeroizing<T>`.
seed: Zeroizing<[u8; 32]>,
/// Corresponding 32-byte public verifying key.
verifying: VerifyingKey,
}
impl IdentityKeypair {
/// Recreate an identity keypair from a 32-byte seed.
pub fn from_seed(seed: [u8; 32]) -> Self {
let signing = SigningKey::from_bytes(&seed);
let verifying = signing.verifying_key();
Self {
seed: Zeroizing::new(seed),
verifying,
}
}
/// Return the raw 32-byte private seed (for persistence).
pub fn seed_bytes(&self) -> [u8; 32] {
*self.seed
}
}
impl IdentityKeypair {
/// Generate a fresh random Ed25519 identity keypair.
pub fn generate() -> Self {
use rand::rngs::OsRng;
let signing = SigningKey::generate(&mut OsRng);
let verifying = signing.verifying_key();
let seed = Zeroizing::new(signing.to_bytes());
Self { seed, verifying }
}
/// Return the raw 32-byte Ed25519 public key.
///
/// This is the byte array used as `identityKey` in `auth.capnp` calls.
pub fn public_key_bytes(&self) -> [u8; 32] {
self.verifying.to_bytes()
}
/// Return the SHA-256 fingerprint of the public key (32 bytes).
pub fn fingerprint(&self) -> [u8; 32] {
let mut hasher = Sha256::new();
hasher.update(self.verifying.to_bytes());
hasher.finalize().into()
}
/// Reconstruct the `SigningKey` from the stored seed bytes.
fn signing_key(&self) -> SigningKey {
SigningKey::from_bytes(&self.seed)
}
}
/// Implement the openmls `Signer` trait so `IdentityKeypair` can be passed
/// directly to `KeyPackage::builder().build(...)` without needing the external
/// `openmls_basic_credential` crate.
impl Signer for IdentityKeypair {
fn sign(&self, payload: &[u8]) -> Result<Vec<u8>, MlsError> {
let sk = self.signing_key();
let sig: ed25519_dalek::Signature = sk.sign(payload);
Ok(sig.to_bytes().to_vec())
}
fn signature_scheme(&self) -> SignatureScheme {
SignatureScheme::ED25519
}
}
impl Serialize for IdentityKeypair {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: serde::Serializer,
{
serializer.serialize_bytes(&self.seed[..])
}
}
impl<'de> Deserialize<'de> for IdentityKeypair {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: serde::Deserializer<'de>,
{
let bytes: Vec<u8> = serde::Deserialize::deserialize(deserializer)?;
let seed: [u8; 32] = bytes
.as_slice()
.try_into()
.map_err(|_| serde::de::Error::custom("identity seed must be 32 bytes"))?;
Ok(IdentityKeypair::from_seed(seed))
}
}
impl std::fmt::Debug for IdentityKeypair {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let fp = self.fingerprint();
f.debug_struct("IdentityKeypair")
.field(
"fingerprint",
&format!("{:02x}{:02x}{:02x}{:02x}", fp[0], fp[1], fp[2], fp[3]),
)
.finish_non_exhaustive()
}
}

View File

@@ -14,18 +14,18 @@
//! # Wire format
//!
//! KeyPackages are TLS-encoded using `tls_codec` (same version as openmls).
//! The resulting bytes are opaque to the quicprochat transport layer.
//! The resulting bytes are opaque to the quicnprotochat transport layer.
use openmls::prelude::{
BasicCredential, Ciphersuite, CredentialWithKey, KeyPackage, KeyPackageIn,
Ciphersuite, Credential, CredentialType, CredentialWithKey, CryptoConfig, KeyPackage,
KeyPackageIn, TlsDeserializeTrait, TlsSerializeTrait,
};
use openmls_rust_crypto::OpenMlsRustCrypto;
use tls_codec::{Deserialize as TlsDeserializeTrait, Serialize as TlsSerializeTrait};
use sha2::{Digest, Sha256};
use crate::{error::CoreError, identity::IdentityKeypair};
/// The MLS ciphersuite used throughout quicprochat (RFC 9420 §17.1).
/// The MLS ciphersuite used throughout quicnprotochat (RFC 9420 §17.1).
pub const ALLOWED_CIPHERSUITE: Ciphersuite =
Ciphersuite::MLS_128_DHKEMX25519_AES128GCM_SHA256_Ed25519;
@@ -74,8 +74,8 @@ pub fn generate_key_package(identity: &IdentityKeypair) -> Result<(Vec<u8>, Vec<
// Build a BasicCredential using the raw Ed25519 public key bytes as the
// MLS identity. Per RFC 9420, any byte string may serve as the identity.
let credential: openmls::prelude::Credential =
BasicCredential::new(identity.public_key_bytes().to_vec()).into();
let credential = Credential::new(identity.public_key_bytes().to_vec(), CredentialType::Basic)
.map_err(|e| CoreError::Mls(format!("{e:?}")))?;
// The `signature_key` in CredentialWithKey is the Ed25519 public key that
// will be used to verify the KeyPackage's leaf node signature.
@@ -87,13 +87,19 @@ pub fn generate_key_package(identity: &IdentityKeypair) -> Result<(Vec<u8>, Vec<
// `IdentityKeypair` implements `openmls_traits::signatures::Signer`
// so it can be passed directly to the builder.
let key_package_bundle = KeyPackage::builder()
.build(CIPHERSUITE, &backend, identity, credential_with_key)
let key_package = KeyPackage::builder()
.build(
CryptoConfig::with_default_version(CIPHERSUITE),
&backend,
identity,
credential_with_key,
)
.map_err(|e| CoreError::Mls(format!("{e:?}")))?;
// TLS-encode the KeyPackage.
let tls_bytes = key_package_bundle
.key_package()
// TLS-encode the KeyPackage using the trait from the openmls prelude.
// This uses tls_codec 0.3 (the same version openmls uses internally),
// avoiding a duplicate-trait conflict with tls_codec 0.4.
let tls_bytes = key_package
.tls_serialize_detached()
.map_err(|e| CoreError::Mls(format!("{e:?}")))?;

View File

@@ -0,0 +1,144 @@
use std::{
collections::HashMap,
fs,
path::{Path, PathBuf},
sync::RwLock,
};
use openmls_rust_crypto::RustCrypto;
use openmls_traits::{
key_store::{MlsEntity, OpenMlsKeyStore},
OpenMlsCryptoProvider,
};
/// A disk-backed key store implementing `OpenMlsKeyStore`.
///
/// In-memory when `path` is `None`; otherwise flushes the entire map to disk on
/// every store/delete so HPKE init keys survive process restarts.
#[derive(Debug)]
pub struct DiskKeyStore {
path: Option<PathBuf>,
values: RwLock<HashMap<Vec<u8>, Vec<u8>>>,
}
#[derive(thiserror::Error, Debug, PartialEq, Eq)]
pub enum DiskKeyStoreError {
#[error("serialization error")]
Serialization,
#[error("io error: {0}")]
Io(String),
}
impl DiskKeyStore {
/// In-memory keystore (no persistence).
pub fn ephemeral() -> Self {
Self {
path: None,
values: RwLock::new(HashMap::new()),
}
}
/// Persistent keystore backed by `path`. Creates an empty store if missing.
pub fn persistent(path: impl AsRef<Path>) -> Result<Self, DiskKeyStoreError> {
let path = path.as_ref().to_path_buf();
let values = if path.exists() {
let bytes = fs::read(&path).map_err(|e| DiskKeyStoreError::Io(e.to_string()))?;
if bytes.is_empty() {
HashMap::new()
} else {
bincode::deserialize(&bytes).map_err(|_| DiskKeyStoreError::Serialization)?
}
} else {
HashMap::new()
};
Ok(Self {
path: Some(path),
values: RwLock::new(values),
})
}
fn flush(&self) -> Result<(), DiskKeyStoreError> {
let Some(path) = &self.path else {
return Ok(());
};
let values = self.values.read().unwrap();
let bytes = bincode::serialize(&*values).map_err(|_| DiskKeyStoreError::Serialization)?;
if let Some(parent) = path.parent() {
fs::create_dir_all(parent).map_err(|e| DiskKeyStoreError::Io(e.to_string()))?;
}
fs::write(path, bytes).map_err(|e| DiskKeyStoreError::Io(e.to_string()))
}
}
impl Default for DiskKeyStore {
fn default() -> Self {
Self::ephemeral()
}
}
impl OpenMlsKeyStore for DiskKeyStore {
type Error = DiskKeyStoreError;
fn store<V: MlsEntity>(&self, k: &[u8], v: &V) -> Result<(), Self::Error> {
let value = serde_json::to_vec(v).map_err(|_| DiskKeyStoreError::Serialization)?;
let mut values = self.values.write().unwrap();
values.insert(k.to_vec(), value);
drop(values);
self.flush()
}
fn read<V: MlsEntity>(&self, k: &[u8]) -> Option<V> {
let values = self.values.read().unwrap();
values
.get(k)
.and_then(|bytes| serde_json::from_slice(bytes).ok())
}
fn delete<V: MlsEntity>(&self, k: &[u8]) -> Result<(), Self::Error> {
let mut values = self.values.write().unwrap();
values.remove(k);
drop(values);
self.flush()
}
}
/// Crypto provider that couples RustCrypto with a disk-backed key store.
#[derive(Debug)]
pub struct StoreCrypto {
crypto: RustCrypto,
key_store: DiskKeyStore,
}
impl StoreCrypto {
pub fn new(key_store: DiskKeyStore) -> Self {
Self {
crypto: RustCrypto::default(),
key_store,
}
}
}
impl Default for StoreCrypto {
fn default() -> Self {
Self::new(DiskKeyStore::ephemeral())
}
}
impl OpenMlsCryptoProvider for StoreCrypto {
type CryptoProvider = RustCrypto;
type RandProvider = RustCrypto;
type KeyStoreProvider = DiskKeyStore;
fn crypto(&self) -> &Self::CryptoProvider {
&self.crypto
}
fn rand(&self) -> &Self::RandProvider {
&self.crypto
}
fn key_store(&self) -> &Self::KeyStoreProvider {
&self.key_store
}
}

View File

@@ -0,0 +1,42 @@
//! Core cryptographic primitives, MLS group state machine, and hybrid
//! post-quantum KEM for quicnprotochat.
//!
//! # Module layout
//!
//! | Module | Responsibility |
//! |---------------|------------------------------------------------------------------|
//! | `app_message` | Rich application payload (Chat, Reply, Reaction, ReadReceipt, Typing) |
//! | `error` | [`CoreError`] type |
//! | `identity` | [`IdentityKeypair`] — Ed25519 identity key for MLS credentials |
//! | `keypackage` | [`generate_key_package`] — standalone KeyPackage generation |
//! | `group` | [`GroupMember`] — MLS group lifecycle (create/join/send/recv) |
//! | `hybrid_kem` | Hybrid X25519 + ML-KEM-768 key encapsulation |
//! | `keystore` | [`DiskKeyStore`] — OpenMLS key store with optional persistence |
mod app_message;
mod error;
mod group;
pub mod hybrid_crypto;
pub mod hybrid_kem;
mod identity;
mod keypackage;
mod keystore;
pub mod opaque_auth;
// ── Public API ────────────────────────────────────────────────────────────────
pub use app_message::{
serialize, serialize_chat, serialize_reaction, serialize_read_receipt, serialize_reply,
serialize_typing, parse, generate_message_id, AppMessage, MessageType, VERSION as APP_MESSAGE_VERSION,
};
pub use error::CoreError;
pub use group::GroupMember;
pub use hybrid_kem::{
hybrid_decrypt, hybrid_encrypt, HybridKemError, HybridKeypair, HybridKeypairBytes,
HybridPublicKey,
};
pub use hybrid_crypto::{HybridCrypto, HybridCryptoProvider};
pub use identity::IdentityKeypair;
pub use keypackage::{generate_key_package, validate_keypackage_ciphersuite};
pub use keystore::{DiskKeyStore, StoreCrypto};
pub use openmls::prelude::MlsGroup;

View File

@@ -5,7 +5,7 @@
use opaque_ke::CipherSuite;
/// OPAQUE cipher suite for quicprochat.
/// OPAQUE cipher suite for quicnprotochat.
///
/// - **OPRF**: Ristretto255 (curve25519-based, ~128-bit security)
/// - **Key exchange**: Triple-DH (3DH) over Ristretto255 with SHA-512

View File

@@ -0,0 +1,22 @@
[package]
name = "quicnprotochat-gui"
version = "0.1.0"
edition = "2021"
description = "Native GUI for quicnprotochat (Tauri 2)."
license = "MIT"
[[bin]]
name = "quicnprotochat-gui"
path = "src/main.rs"
[dependencies]
quicnprotochat-core = { path = "../quicnprotochat-core" }
quicnprotochat-client = { path = "../quicnprotochat-client" }
quicnprotochat-proto = { path = "../quicnprotochat-proto" }
tauri = { version = "2", features = [] }
tokio = { workspace = true }
serde = { workspace = true }
serde_json = { workspace = true }
[build-dependencies]
tauri-build = "2"

View File

@@ -0,0 +1,32 @@
# quicnprotochat-gui
Native GUI for quicnprotochat using [Tauri 2](https://v2.tauri.app/). The UI runs in a webview; all server-facing work (capnp-rpc, `node_service::Client`) runs on a **dedicated backend thread** with a tokio `LocalSet`, since that code is `!Send`.
## Backend threading model
- A single **backend thread** runs a tokio `LocalSet` and a request-response loop.
- The UI thread sends commands over an `mpsc` channel: `Whoami { state_path, password }` or `Health { server, ca_cert, server_name }`.
- For each request, the backend runs sync code (whoami) or `LocalSet::run_until(async { ... })` (health). It then sends `Result<String, String>` back on the provided reply channel.
- Tauri commands (`whoami`, `health`) block on that reply so the frontend gets a simple async-style result.
## How to run
From the workspace root:
```bash
cargo run -p quicnprotochat-gui
```
**Linux:** Tauri uses GTK. Install development packages if the build fails, e.g.:
- Debian/Ubuntu: `sudo apt install libgtk-3-dev libwebkit2gtk-4.1-dev`
- Fedora: `sudo dnf install gtk3-devel webkit2gtk4.1-devel`
## Frontend
The frontend is static HTML in `ui/index.html` (no npm or build step). It provides:
- **Whoami** state path (and optional password); calls `whoami` and shows JSON (identity_key, fingerprint, etc.).
- **Health** server address; calls `health` and shows server status and RTT JSON.
Default CA cert and server name for health are the same as the CLI (`data/server-cert.der`, `localhost`) unless overridden via optional params.

View File

@@ -0,0 +1,3 @@
fn main() {
tauri_build::build()
}

View File

@@ -0,0 +1,11 @@
{
"$schema": "https://schema.tauri.app/config/2/capability",
"identifier": "default",
"description": "Capability for the main window (custom commands whoami, health are allowed by default)",
"windows": ["main"],
"permissions": [
"core:default",
"core:window:allow-close",
"core:window:allow-set-title"
]
}

File diff suppressed because one or more lines are too long

View File

@@ -0,0 +1 @@
{"default":{"identifier":"default","description":"Capability for the main window (custom commands whoami, health are allowed by default)","local":true,"windows":["main"],"permissions":["core:default","core:window:allow-close","core:window:allow-set-title"]}}

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

Binary file not shown.

After

Width:  |  Height:  |  Size: 2.1 KiB

View File

@@ -0,0 +1,86 @@
//! Backend service running on a dedicated thread with a tokio LocalSet.
//!
//! All server-facing work (capnp-rpc, node_service::Client) is !Send and must run on this
//! single thread. The UI thread sends commands over a channel; this thread runs
//! `LocalSet::run_until` for each request and sends the result back.
use std::path::PathBuf;
use std::sync::mpsc;
use std::thread;
use tokio::runtime::Builder;
use tokio::task::LocalSet;
use quicnprotochat_client::{cmd_health_json, whoami_json};
/// Commands the UI can send to the backend thread.
pub enum BackendCommand {
Whoami {
state_path: String,
password: Option<String>,
},
Health {
server: String,
ca_cert: PathBuf,
server_name: String,
},
}
/// Response sent back to the UI.
pub type BackendResponse = Result<String, String>;
/// Spawn the backend thread and return a sender to post commands and a join handle.
/// The backend runs a tokio LocalSet and processes one command at a time:
/// for each received command it runs `LocalSet::run_until(future)` (for async commands)
/// or runs sync code (whoami), then sends the result on the provided reply channel.
pub fn spawn_backend() -> (mpsc::Sender<(BackendCommand, mpsc::Sender<BackendResponse>)>, thread::JoinHandle<()>) {
let (tx, rx) = mpsc::channel::<(BackendCommand, mpsc::Sender<BackendResponse>)>();
let handle = thread::spawn(move || {
let rt = Builder::new_current_thread()
.enable_all()
.build()
.expect("backend tokio runtime");
let local = LocalSet::new();
while let Ok((cmd, reply_tx)) = rx.recv() {
let result = run_command(&local, &rt, cmd);
let _ = reply_tx.send(result);
}
});
(tx, handle)
}
fn run_command(
local: &LocalSet,
rt: &tokio::runtime::Runtime,
cmd: BackendCommand,
) -> BackendResponse {
match cmd {
BackendCommand::Whoami { state_path, password } => {
let path = PathBuf::from(&state_path);
whoami_json(&path, password.as_deref()).map_err(|e| e.to_string())
}
BackendCommand::Health {
server,
ca_cert,
server_name,
} => {
// Request-response: we run LocalSet::run_until for this single request so capnp-rpc
// and connect_node stay on this thread (!Send).
let fut = cmd_health_json(&server, &ca_cert, &server_name);
rt.block_on(local.run_until(fut)).map_err(|e| e.to_string())
}
}
}
/// Default CA cert path (relative to cwd or absolute); same default as CLI.
pub fn default_ca_cert() -> PathBuf {
PathBuf::from("data/server-cert.der")
}
/// Default TLS server name.
pub fn default_server_name() -> String {
"localhost".to_string()
}

View File

@@ -0,0 +1,76 @@
//! quicnprotochat native GUI (Tauri 2).
//!
//! The backend runs on a dedicated thread with a tokio LocalSet; all server-facing
//! work (capnp-rpc, node_service::Client) is dispatched there. Tauri commands
//! block on the request-response channel until the backend returns.
mod backend;
use std::path::PathBuf;
use std::sync::mpsc;
use backend::{spawn_backend, BackendCommand};
/// Shared state: sender to the backend thread.
struct BackendState {
tx: mpsc::Sender<(BackendCommand, mpsc::Sender<backend::BackendResponse>)>,
}
/// Runs whoami on the backend thread and returns JSON string (identity_key, fingerprint, etc.).
#[tauri::command]
fn whoami(
state: tauri::State<BackendState>,
state_path: String,
password: Option<String>,
) -> Result<String, String> {
let (reply_tx, reply_rx) = mpsc::channel();
state
.tx
.send((
BackendCommand::Whoami {
state_path,
password,
},
reply_tx,
))
.map_err(|e| e.to_string())?;
reply_rx.recv().map_err(|e| e.to_string())?
}
/// Runs health check on the backend thread (LocalSet::run_until) and returns status JSON.
#[tauri::command]
fn health(
state: tauri::State<BackendState>,
server: String,
ca_cert: Option<String>,
server_name: Option<String>,
) -> Result<String, String> {
let ca_cert = ca_cert
.map(PathBuf::from)
.unwrap_or_else(backend::default_ca_cert);
let server_name = server_name.unwrap_or_else(backend::default_server_name);
let (reply_tx, reply_rx) = mpsc::channel();
state
.tx
.send((
BackendCommand::Health {
server,
ca_cert,
server_name,
},
reply_tx,
))
.map_err(|e| e.to_string())?;
reply_rx.recv().map_err(|e| e.to_string())?
}
#[cfg_attr(mobile, tauri::mobile_entry_point)]
pub fn run() {
let (backend_tx, _backend_handle) = spawn_backend();
tauri::Builder::default()
.manage(BackendState { tx: backend_tx })
.invoke_handler(tauri::generate_handler![whoami, health])
.run(tauri::generate_context!())
.expect("error while running tauri application");
}

View File

@@ -0,0 +1,5 @@
//! Desktop entry point for quicnprotochat-gui.
fn main() {
quicnprotochat_gui::run()
}

View File

@@ -0,0 +1,24 @@
{
"$schema": "https://schema.tauri.app/config/2",
"productName": "quicnprotochat-gui",
"identifier": "chat.quicnproto.gui",
"build": {
"frontendDist": "./ui",
"beforeBuildCommand": "",
"beforeDevCommand": ""
},
"app": {
"windows": [
{
"title": "quicnprotochat",
"width": 640,
"height": 480
}
],
"security": {
"csp": null
}
},
"bundle": {},
"plugins": {}
}

View File

@@ -0,0 +1,54 @@
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8" />
<meta name="viewport" content="width=device-width, initial-scale=1.0" />
<title>quicnprotochat</title>
<style>
body { font-family: system-ui, sans-serif; margin: 1rem; }
button { margin: 0.25rem; padding: 0.5rem 1rem; cursor: pointer; }
#output { white-space: pre-wrap; background: #f0f0f0; padding: 0.75rem; margin-top: 1rem; min-height: 4rem; border-radius: 4px; }
.error { color: #c00; }
</style>
</head>
<body>
<h1>quicnprotochat</h1>
<p>
<button id="whoami">Whoami</button>
<button id="health">Health</button>
</p>
<label>State path: <input id="statePath" type="text" value="quicnprotochat-state.bin" size="32" /></label>
<br />
<label>Server: <input id="server" type="text" value="127.0.0.1:7000" size="24" /></label>
<div id="output">Click Whoami or Health. Results appear here.</div>
<script>
const output = document.getElementById('output');
const statePath = document.getElementById('statePath');
const server = document.getElementById('server');
function show(result, isError = false) {
output.textContent = result;
output.className = isError ? 'error' : '';
}
const invoke = window.__TAURI__?.core?.invoke;
if (!invoke) {
show('Tauri API not available (not running inside Tauri?).', true);
} else {
document.getElementById('whoami').addEventListener('click', function () {
show('Running whoami…');
invoke('whoami', { statePath: statePath.value.trim(), password: null })
.then(function (s) { show(s); })
.catch(function (e) { show(String(e), true); });
});
document.getElementById('health').addEventListener('click', function () {
show('Running health…');
invoke('health', { server: server.value.trim() })
.then(function (s) { show(s); })
.catch(function (e) { show(String(e), true); });
});
}
</script>
</body>
</html>

View File

@@ -0,0 +1,12 @@
[package]
name = "quicnprotochat-p2p"
version = "0.1.0"
edition = "2021"
description = "P2P transport layer for quicnprotochat using iroh."
license = "MIT"
[dependencies]
iroh = "0.96"
tokio = { workspace = true }
tracing = { workspace = true }
anyhow = { workspace = true }

View File

@@ -0,0 +1,186 @@
//! P2P transport layer for quicnprotochat using iroh.
//!
//! Provides direct peer-to-peer QUIC connections with NAT traversal via iroh
//! relay servers. When both peers are online, messages bypass the central
//! server entirely.
//!
//! # Architecture
//!
//! ```text
//! Client A ── iroh direct (QUIC) ── Client B (preferred: low latency)
//! │ │
//! └── QUIC/TLS ── Server ── QUIC/TLS ┘ (fallback: store-and-forward)
//! ```
use iroh::{Endpoint, EndpointAddr, PublicKey, SecretKey};
/// ALPN protocol identifier for quicnprotochat P2P messaging.
const P2P_ALPN: &[u8] = b"quicnprotochat/p2p/1";
/// A P2P node backed by an iroh endpoint.
///
/// Manages direct QUIC connections to peers with automatic NAT traversal.
pub struct P2pNode {
endpoint: Endpoint,
}
/// Received P2P message with sender information.
pub struct P2pMessage {
pub sender: PublicKey,
pub payload: Vec<u8>,
}
impl P2pNode {
/// Start a new P2P node.
///
/// Generates a fresh identity or reuses a provided secret key.
pub async fn start(secret_key: Option<SecretKey>) -> anyhow::Result<Self> {
let mut builder = Endpoint::builder();
if let Some(sk) = secret_key {
builder = builder.secret_key(sk);
}
builder = builder.alpns(vec![P2P_ALPN.to_vec()]);
let endpoint = builder.bind().await?;
tracing::info!(
node_id = %endpoint.id().fmt_short(),
"P2P node started"
);
Ok(Self { endpoint })
}
/// This node's public key (used as node ID for peer discovery).
pub fn node_id(&self) -> PublicKey {
self.endpoint.id()
}
/// This node's secret key (for persistence across restarts).
pub fn secret_key(&self) -> SecretKey {
self.endpoint.secret_key().clone()
}
/// Get the node's network address information for publishing to discovery.
pub fn endpoint_addr(&self) -> EndpointAddr {
self.endpoint.addr()
}
/// Send a payload directly to a peer via P2P QUIC.
pub async fn send(&self, peer: impl Into<EndpointAddr>, payload: &[u8]) -> anyhow::Result<()> {
let peer = peer.into();
let conn = self.endpoint.connect(peer, P2P_ALPN).await?;
let mut send = conn.open_uni().await.map_err(|e| anyhow::anyhow!("{e}"))?;
// Simple framing: 4-byte length prefix + payload.
let len = (payload.len() as u32).to_be_bytes();
send.write_all(&len)
.await
.map_err(|e| anyhow::anyhow!("{e}"))?;
send.write_all(payload)
.await
.map_err(|e| anyhow::anyhow!("{e}"))?;
send.finish().map_err(|e| anyhow::anyhow!("{e}"))?;
// Wait until the peer has consumed the stream before dropping.
send.stopped().await.map_err(|e| anyhow::anyhow!("{e}"))?;
tracing::debug!(
peer = %conn.remote_id().fmt_short(),
bytes = payload.len(),
"P2P message sent"
);
Ok(())
}
/// Accept a single incoming P2P message.
///
/// Blocks until a peer connects and sends data.
pub async fn recv(&self) -> anyhow::Result<P2pMessage> {
let incoming = self
.endpoint
.accept()
.await
.ok_or_else(|| anyhow::anyhow!("no more incoming connections"))?;
let conn = incoming.await.map_err(|e| anyhow::anyhow!("{e}"))?;
let sender = conn.remote_id();
let mut recv = conn
.accept_uni()
.await
.map_err(|e| anyhow::anyhow!("{e}"))?;
// Read length-prefixed payload.
let mut len_buf = [0u8; 4];
recv.read_exact(&mut len_buf)
.await
.map_err(|e| anyhow::anyhow!("{e}"))?;
let len = u32::from_be_bytes(len_buf) as usize;
if len > 5 * 1024 * 1024 {
anyhow::bail!("P2P payload too large: {len} bytes");
}
let mut payload = vec![0u8; len];
recv.read_exact(&mut payload)
.await
.map_err(|e| anyhow::anyhow!("{e}"))?;
tracing::debug!(
peer = %sender.fmt_short(),
bytes = len,
"P2P message received"
);
Ok(P2pMessage { sender, payload })
}
/// Gracefully shut down the P2P node.
pub async fn close(self) {
self.endpoint.close().await;
}
}
#[cfg(test)]
mod tests {
use super::*;
use iroh::RelayMode;
/// Create a local-only P2P node with relays disabled (for testing).
async fn local_node() -> P2pNode {
let endpoint = Endpoint::builder()
.alpns(vec![P2P_ALPN.to_vec()])
.relay_mode(RelayMode::Disabled)
.bind()
.await
.unwrap();
P2pNode { endpoint }
}
#[tokio::test]
async fn p2p_round_trip() {
let sender = local_node().await;
let receiver = local_node().await;
let receiver_addr = receiver.endpoint_addr();
let sender_id = sender.node_id();
let payload = b"hello via P2P";
let recv_handle = tokio::spawn(async move {
let msg = receiver.recv().await.unwrap();
assert_eq!(msg.payload, payload.to_vec());
assert_eq!(msg.sender, sender_id);
});
tokio::time::sleep(std::time::Duration::from_millis(200)).await;
sender.send(receiver_addr, payload).await.unwrap();
recv_handle.await.unwrap();
tokio::time::sleep(std::time::Duration::from_millis(100)).await;
sender.close().await;
}
}

View File

@@ -0,0 +1,15 @@
[package]
name = "quicnprotochat-proto"
version = "0.1.0"
edition = "2021"
description = "Cap'n Proto schemas, generated types, and serialisation helpers for quicnprotochat. No crypto, no I/O."
license = "MIT"
# build.rs invokes capnpc to generate Rust source from .capnp schemas.
build = "build.rs"
[dependencies]
capnp = { workspace = true }
[build-dependencies]
capnpc = { workspace = true }

View File

@@ -0,0 +1,54 @@
//! Build script for quicnprotochat-proto.
//!
//! Invokes the `capnp` compiler to generate Rust types from `.capnp` schemas
//! located in the workspace-root `schemas/` directory.
//!
//! # Prerequisites
//!
//! The `capnp` CLI must be installed and on `PATH`.
//!
//! Debian/Ubuntu: apt-get install capnproto
//! macOS: brew install capnp
//! Docker: see docker/Dockerfile
use std::{env, path::PathBuf};
fn main() {
let manifest_dir =
PathBuf::from(env::var("CARGO_MANIFEST_DIR").expect("CARGO_MANIFEST_DIR not set by Cargo"));
// Workspace root is two levels above this crate (quicnprotochat/crates/quicnprotochat-proto).
let workspace_root = manifest_dir
.join("../..")
.canonicalize()
.expect("could not canonicalize workspace root path");
let schemas_dir = workspace_root.join("schemas");
// Re-run this build script whenever any schema file changes.
println!(
"cargo:rerun-if-changed={}",
schemas_dir.join("auth.capnp").display()
);
println!(
"cargo:rerun-if-changed={}",
schemas_dir.join("delivery.capnp").display()
);
println!(
"cargo:rerun-if-changed={}",
schemas_dir.join("node.capnp").display()
);
capnpc::CompilerCommand::new()
// Treat `schemas/` as the include root so that inter-schema imports
// resolve correctly.
.src_prefix(&schemas_dir)
.file(schemas_dir.join("auth.capnp"))
.file(schemas_dir.join("delivery.capnp"))
.file(schemas_dir.join("node.capnp"))
.run()
.expect(
"Cap'n Proto schema compilation failed. \
Is `capnp` installed? (apt-get install capnproto / brew install capnp)",
);
}

View File

@@ -0,0 +1,65 @@
//! Cap'n Proto schemas, generated types, and serialisation helpers for quicnprotochat.
//!
//! Generated Cap'n Proto code emits unnecessary parentheses; allow per coding standards.
#![allow(unused_parens)]
//! # Design constraints
//!
//! This crate is intentionally restricted:
//! - **No crypto** — key material never enters this crate.
//! - **No I/O** — callers own transport; this crate only converts bytes ↔ types.
//! - **No async** — pure synchronous data-layer code.
//!
//! # Generated code
//!
//! `build.rs` invokes `capnpc` at compile time and writes generated Rust source
//! into `$OUT_DIR`. The `include!` macros below splice that code in as a module.
// ── Generated types ───────────────────────────────────────────────────────────
/// Cap'n Proto generated types for `schemas/auth.capnp`.
///
/// Do not edit this module by hand — it is entirely machine-generated.
pub mod auth_capnp {
include!(concat!(env!("OUT_DIR"), "/auth_capnp.rs"));
}
/// Cap'n Proto generated types for `schemas/delivery.capnp`.
///
/// Do not edit this module by hand — it is entirely machine-generated.
pub mod delivery_capnp {
include!(concat!(env!("OUT_DIR"), "/delivery_capnp.rs"));
}
/// Cap'n Proto generated types for `schemas/node.capnp`.
///
/// Do not edit this module by hand — it is entirely machine-generated.
pub mod node_capnp {
include!(concat!(env!("OUT_DIR"), "/node_capnp.rs"));
}
// ── Low-level byte ↔ message conversions ──────────────────────────────────────
/// Serialise a Cap'n Proto message builder to unpacked wire bytes.
///
/// The output includes the segment table header. For transport, the
/// `quicnprotochat-core` frame codec prepends a 4-byte little-endian length field.
pub fn to_bytes<A: capnp::message::Allocator>(
msg: &capnp::message::Builder<A>,
) -> Result<Vec<u8>, capnp::Error> {
let mut buf = Vec::new();
capnp::serialize::write_message(&mut buf, msg)?;
Ok(buf)
}
/// Deserialise unpacked wire bytes into a message with owned segments.
///
/// Uses `ReaderOptions::new()` (default limits: 64 MiB, 512 nesting levels).
/// Callers that receive data from untrusted peers should consider tightening
/// the traversal limit via `ReaderOptions::traversal_limit_in_words`.
pub fn from_bytes(
bytes: &[u8],
) -> Result<capnp::message::Reader<capnp::serialize::OwnedSegments>, capnp::Error> {
let mut cursor = std::io::Cursor::new(bytes);
capnp::serialize::read_message(&mut cursor, capnp::message::ReaderOptions::new())
}

View File

@@ -0,0 +1,59 @@
[package]
name = "quicnprotochat-server"
version = "0.1.0"
edition = "2021"
description = "Delivery Service and Authentication Service for quicnprotochat."
license = "MIT"
[[bin]]
name = "quicnprotochat-server"
path = "src/main.rs"
[dependencies]
quicnprotochat-core = { path = "../quicnprotochat-core" }
quicnprotochat-proto = { path = "../quicnprotochat-proto" }
# Serialisation + RPC
capnp = { workspace = true }
capnp-rpc = { workspace = true }
# Async
tokio = { workspace = true }
tokio-util = { workspace = true }
futures = { workspace = true }
# Server utilities
dashmap = { workspace = true }
governor = { workspace = true }
sha2 = { workspace = true }
tracing = { workspace = true }
tracing-subscriber = { workspace = true }
quinn = { workspace = true }
quinn-proto = { workspace = true }
rustls = { workspace = true }
rcgen = { workspace = true }
# Crypto — OPAQUE PAKE
opaque-ke = { workspace = true }
rand = { workspace = true }
subtle = { workspace = true }
# Database
rusqlite = { workspace = true }
# Error handling
anyhow = { workspace = true }
thiserror = { workspace = true }
bincode = { workspace = true }
serde = { workspace = true }
# CLI
clap = { workspace = true }
toml = { version = "0.8" }
# Metrics (Prometheus)
metrics = "0.22"
metrics-exporter-prometheus = "0.15"
[dev-dependencies]
tempfile = "3"

View File

@@ -1,43 +1,34 @@
use std::net::IpAddr;
use std::sync::Arc;
use dashmap::DashMap;
use quicprochat_proto::node_capnp::auth;
use quicnprotochat_proto::node_capnp::auth;
use sha2::Digest;
use subtle::ConstantTimeEq;
use tokio::sync::Notify;
use zeroize::Zeroizing;
use crate::error_codes::*;
pub const SESSION_TTL_SECS: u64 = 24 * 60 * 60; // 24 hours
pub const PENDING_LOGIN_TTL_SECS: u64 = 300; // 5 minutes
pub const RATE_LIMIT_WINDOW_SECS: u64 = 60;
pub const RATE_LIMIT_MAX_ENQUEUES: u32 = 100;
/// Maximum enqueues per second per token before GCRA rate limiting kicks in.
pub const RATE_LIMIT_MAX_PER_SEC: std::num::NonZeroU32 =
std::num::NonZeroU32::new(100).expect("RATE_LIMIT_MAX_PER_SEC must be non-zero");
#[derive(Clone)]
/// Keyed GCRA rate limiter backed by DashMap (one bucket per session token).
pub type RateLimiter = governor::DefaultKeyedRateLimiter<Vec<u8>>;
#[derive(Clone, Debug)]
pub struct AuthConfig {
/// Server bearer token — zeroized on drop to prevent memory disclosure.
pub required_token: Option<Zeroizing<Vec<u8>>>,
pub required_token: Option<Vec<u8>>,
/// When true, a valid bearer token (no session) is accepted and the request's identity/key is used (dev/e2e only).
/// CLI flag: --allow-insecure-auth / QPQ_ALLOW_INSECURE_AUTH.
pub allow_insecure_identity_from_request: bool,
}
impl std::fmt::Debug for AuthConfig {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_struct("AuthConfig")
.field("required_token", &self.required_token.as_ref().map(|_| "[REDACTED]"))
.field("allow_insecure_identity_from_request", &self.allow_insecure_identity_from_request)
.finish()
}
}
impl AuthConfig {
pub fn new(required_token: Option<String>, allow_insecure_identity_from_request: bool) -> Self {
let required_token = required_token
.filter(|s| !s.is_empty())
.map(|s| Zeroizing::new(s.into_bytes()));
.map(|s| s.into_bytes());
Self {
required_token,
allow_insecure_identity_from_request,
@@ -60,11 +51,6 @@ pub struct PendingLogin {
pub created_at: u64,
}
pub struct RateEntry {
pub count: u32,
pub window_start: u64,
}
#[derive(Clone)]
pub struct AuthContext {
pub token: Vec<u8>,
@@ -72,41 +58,20 @@ pub struct AuthContext {
}
pub fn current_timestamp() -> u64 {
match std::time::SystemTime::now().duration_since(std::time::UNIX_EPOCH) {
Ok(d) => d.as_secs(),
Err(_) => {
tracing::warn!("system time is before UNIX_EPOCH; using 0 for session/rate-limit timestamps");
0
}
}
std::time::SystemTime::now()
.duration_since(std::time::UNIX_EPOCH)
.unwrap_or_default()
.as_secs()
}
pub fn check_rate_limit(
rate_limits: &DashMap<Vec<u8>, RateEntry>,
token: &[u8],
) -> Result<(), capnp::Error> {
let now = current_timestamp();
let mut entry = rate_limits.entry(token.to_vec()).or_insert(RateEntry {
count: 0,
window_start: now,
});
if now - entry.window_start >= RATE_LIMIT_WINDOW_SECS {
entry.count = 1;
entry.window_start = now;
} else {
entry.count += 1;
if entry.count > RATE_LIMIT_MAX_ENQUEUES {
return Err(crate::error_codes::coded_error(
E014_RATE_LIMITED,
format!(
"rate limit exceeded: {} enqueues in {}s window",
RATE_LIMIT_MAX_ENQUEUES, RATE_LIMIT_WINDOW_SECS
),
));
}
}
Ok(())
/// Check the GCRA rate limit for a token. Returns an error if the token has exceeded the quota.
pub fn check_rate_limit(limiter: &RateLimiter, token: &[u8]) -> Result<(), capnp::Error> {
limiter.check_key(&token.to_vec()).map_err(|_| {
crate::error_codes::coded_error(
E014_RATE_LIMITED,
format!("rate limit exceeded: max {} enqueues/s", RATE_LIMIT_MAX_PER_SEC),
)
})
}
pub fn validate_auth(
@@ -145,7 +110,7 @@ pub fn validate_auth_context(
}
if let Some(expected) = &cfg.required_token {
if expected.len() == token.len() && bool::from(expected.as_slice().ct_eq(&token)) {
if expected.len() == token.len() && bool::from(expected.ct_eq(&token)) {
return Ok(AuthContext {
token,
identity_key: None,
@@ -178,50 +143,7 @@ pub fn validate_auth_context(
Err(crate::error_codes::coded_error(E003_INVALID_TOKEN, "invalid accessToken"))
}
/// Validate a raw bearer token (no Cap'n Proto dependency).
/// Used by the WebSocket JSON-RPC bridge.
pub fn validate_token_raw(
cfg: &AuthConfig,
sessions: &DashMap<Vec<u8>, SessionInfo>,
token: &[u8],
) -> Result<AuthContext, String> {
if token.is_empty() {
return Err("empty access token".to_string());
}
// Check static bearer token.
if let Some(expected) = &cfg.required_token {
if expected.len() == token.len() && bool::from(expected.as_slice().ct_eq(token)) {
return Ok(AuthContext {
token: token.to_vec(),
identity_key: None,
});
}
}
// Check session tokens.
if let Some(session) = sessions.get(token) {
let now = current_timestamp();
if session.expires_at > now {
let identity = if session.identity_key.is_empty() {
None
} else {
Some(session.identity_key.clone())
};
return Ok(AuthContext {
token: token.to_vec(),
identity_key: identity,
});
}
drop(session);
sessions.remove(token);
return Err("session token has expired".to_string());
}
Err("invalid access token".to_string())
}
pub fn require_identity(auth_ctx: &AuthContext) -> Result<&[u8], capnp::Error> {
pub fn require_identity<'a>(auth_ctx: &'a AuthContext) -> Result<&'a [u8], capnp::Error> {
match auth_ctx.identity_key.as_deref() {
Some(ik) => Ok(ik),
None => Err(crate::error_codes::coded_error(
@@ -233,7 +155,7 @@ pub fn require_identity(auth_ctx: &AuthContext) -> Result<&[u8], capnp::Error> {
pub fn require_identity_match(auth_ctx: &AuthContext, expected: &[u8]) -> Result<(), capnp::Error> {
let ik = require_identity(auth_ctx)?;
if ik.len() != expected.len() || !bool::from(ik.ct_eq(expected)) {
if ik != expected {
return Err(crate::error_codes::coded_error(
E016_IDENTITY_MISMATCH,
"access token is bound to a different identity",
@@ -271,30 +193,6 @@ pub fn waiter(waiters: &DashMap<Vec<u8>, Arc<Notify>>, recipient_key: &[u8]) ->
.clone()
}
pub const CONN_RATE_LIMIT_WINDOW_SECS: u64 = 60;
pub const CONN_RATE_LIMIT_MAX: u32 = 50;
/// Per-IP connection rate limiter. Returns `true` if the connection is allowed.
pub fn check_conn_rate_limit(
conn_rate_limits: &DashMap<IpAddr, RateEntry>,
ip: IpAddr,
) -> bool {
let now = current_timestamp();
let mut entry = conn_rate_limits.entry(ip).or_insert(RateEntry {
count: 0,
window_start: now,
});
if now - entry.window_start >= CONN_RATE_LIMIT_WINDOW_SECS {
entry.count = 1;
entry.window_start = now;
true
} else {
entry.count += 1;
entry.count <= CONN_RATE_LIMIT_MAX
}
}
pub fn fingerprint(data: &[u8]) -> Vec<u8> {
sha2::Sha256::digest(data).to_vec()
}

View File

@@ -0,0 +1,187 @@
use std::path::{Path, PathBuf};
use anyhow::Context;
use serde::Deserialize;
pub const DEFAULT_LISTEN: &str = "0.0.0.0:7000";
pub const DEFAULT_DATA_DIR: &str = "data";
pub const DEFAULT_TLS_CERT: &str = "data/server-cert.der";
pub const DEFAULT_TLS_KEY: &str = "data/server-key.der";
pub const DEFAULT_STORE_BACKEND: &str = "file";
pub const DEFAULT_DB_PATH: &str = "data/quicnprotochat.db";
#[derive(Debug, Default, Deserialize)]
pub struct FileConfig {
pub listen: Option<String>,
pub data_dir: Option<String>,
pub tls_cert: Option<PathBuf>,
pub tls_key: Option<PathBuf>,
pub auth_token: Option<String>,
pub allow_insecure_auth: Option<bool>,
/// When true, enqueue does not require an identity-bound session: only a valid token is required.
/// The server does not associate the request with a specific sender (Sealed Sender).
#[serde(default)]
pub sealed_sender: Option<bool>,
pub store_backend: Option<String>,
pub db_path: Option<PathBuf>,
pub db_key: Option<String>,
/// Metrics HTTP listen address (e.g. "0.0.0.0:9090"). If set, /metrics is served there.
pub metrics_listen: Option<String>,
/// When true and metrics_listen is set, start the metrics server.
#[serde(default)]
pub metrics_enabled: Option<bool>,
}
#[derive(Debug)]
pub struct EffectiveConfig {
pub listen: String,
pub data_dir: String,
pub tls_cert: PathBuf,
pub tls_key: PathBuf,
pub auth_token: Option<String>,
pub allow_insecure_auth: bool,
/// When true, enqueue does not require identity; valid token only (Sealed Sender).
pub sealed_sender: bool,
pub store_backend: String,
pub db_path: PathBuf,
pub db_key: String,
/// If Some(addr), metrics server listens here (e.g. "0.0.0.0:9090").
pub metrics_listen: Option<String>,
/// Start metrics server only when true and metrics_listen is set.
pub metrics_enabled: bool,
}
pub fn load_config(path: Option<&Path>) -> anyhow::Result<FileConfig> {
let path = match path {
Some(p) => PathBuf::from(p),
None => PathBuf::from("quicnprotochat-server.toml"),
};
if !path.exists() {
return Ok(FileConfig::default());
}
let contents =
std::fs::read_to_string(&path).with_context(|| format!("read config file {path:?}"))?;
let cfg: FileConfig =
toml::from_str(&contents).with_context(|| format!("parse config file {path:?}"))?;
Ok(cfg)
}
pub fn merge_config(args: &crate::Args, file: &FileConfig) -> EffectiveConfig {
let listen = if args.listen == DEFAULT_LISTEN {
file.listen
.clone()
.unwrap_or_else(|| DEFAULT_LISTEN.to_string())
} else {
args.listen.clone()
};
let data_dir = if args.data_dir == DEFAULT_DATA_DIR {
file.data_dir
.clone()
.unwrap_or_else(|| DEFAULT_DATA_DIR.to_string())
} else {
args.data_dir.clone()
};
let tls_cert = if args.tls_cert == PathBuf::from(DEFAULT_TLS_CERT) {
file.tls_cert
.clone()
.unwrap_or_else(|| PathBuf::from(DEFAULT_TLS_CERT))
} else {
args.tls_cert.clone()
};
let tls_key = if args.tls_key == PathBuf::from(DEFAULT_TLS_KEY) {
file.tls_key
.clone()
.unwrap_or_else(|| PathBuf::from(DEFAULT_TLS_KEY))
} else {
args.tls_key.clone()
};
let auth_token = if args.auth_token.is_some() {
args.auth_token.clone()
} else {
file.auth_token.clone()
};
let allow_insecure_auth = if args.allow_insecure_auth {
true
} else {
file.allow_insecure_auth.unwrap_or(false)
};
let sealed_sender = args.sealed_sender || file.sealed_sender.unwrap_or(false);
let store_backend = if args.store_backend == DEFAULT_STORE_BACKEND {
file.store_backend
.clone()
.unwrap_or_else(|| DEFAULT_STORE_BACKEND.to_string())
} else {
args.store_backend.clone()
};
let db_path = if args.db_path == PathBuf::from(DEFAULT_DB_PATH) {
file.db_path
.clone()
.unwrap_or_else(|| PathBuf::from(DEFAULT_DB_PATH))
} else {
args.db_path.clone()
};
let db_key = if args.db_key.is_empty() {
file.db_key.clone().unwrap_or_else(|| args.db_key.clone())
} else {
args.db_key.clone()
};
let metrics_listen = args
.metrics_listen
.clone()
.or_else(|| file.metrics_listen.clone());
let metrics_enabled = args
.metrics_enabled
.or(file.metrics_enabled)
.unwrap_or(metrics_listen.is_some());
EffectiveConfig {
listen,
data_dir,
tls_cert,
tls_key,
auth_token,
allow_insecure_auth,
sealed_sender,
store_backend,
db_path,
db_key,
metrics_listen,
metrics_enabled,
}
}
pub fn validate_production_config(effective: &EffectiveConfig) -> anyhow::Result<()> {
let token = effective
.auth_token
.as_deref()
.filter(|s| !s.is_empty())
.ok_or_else(|| {
anyhow::anyhow!("production requires QUICNPROTOCHAT_AUTH_TOKEN (non-empty)")
})?;
if token == "devtoken" {
anyhow::bail!(
"production forbids auth_token 'devtoken'; set a strong QUICNPROTOCHAT_AUTH_TOKEN"
);
}
if effective.store_backend == "sql" && effective.db_key.is_empty() {
anyhow::bail!("production with store_backend=sql requires non-empty QUICNPROTOCHAT_DB_KEY");
}
if !effective.tls_cert.exists() || !effective.tls_key.exists() {
anyhow::bail!(
"production requires existing TLS cert and key (no auto-generation); provide QUICNPROTOCHAT_TLS_CERT and QUICNPROTOCHAT_TLS_KEY"
);
}
Ok(())
}

View File

@@ -24,21 +24,6 @@ pub const E018_USER_EXISTS: &str = "E018";
pub const E019_NO_PENDING_LOGIN: &str = "E019";
pub const E020_BAD_PARAMS: &str = "E020";
pub const E021_CIPHERSUITE_NOT_ALLOWED: &str = "E021";
pub const E022_CHANNEL_ACCESS_DENIED: &str = "E022";
pub const E023_CHANNEL_NOT_FOUND: &str = "E023";
pub const E024_BLOB_TOO_LARGE: &str = "E024";
pub const E025_BLOB_HASH_LENGTH: &str = "E025";
pub const E026_BLOB_HASH_MISMATCH: &str = "E026";
pub const E027_BLOB_NOT_FOUND: &str = "E027";
pub const E028_ACCOUNT_DELETION_FAILED: &str = "E028";
pub const E029_DEVICE_LIMIT: &str = "E029";
pub const E030_DEVICE_NOT_FOUND: &str = "E030";
#[allow(dead_code)] // used by v2 RPC moderation handlers
pub const E031_USER_BANNED: &str = "E031";
#[allow(dead_code)] // used by v2 RPC moderation handlers
pub const E032_REPORT_EMPTY: &str = "E032";
#[allow(dead_code)] // used by v2 RPC moderation handlers
pub const E033_ADMIN_REQUIRED: &str = "E033";
/// Build a `capnp::Error::failed()` with the structured code prefix.
pub fn coded_error(code: &str, msg: impl std::fmt::Display) -> capnp::Error {

View File

@@ -0,0 +1,300 @@
//! quicnprotochat-server — unified Authentication + Delivery service.
//!
//! The server hosts Authentication + Delivery services over QUIC + Cap'n Proto.
use std::{net::SocketAddr, path::PathBuf, sync::Arc};
use anyhow::Context;
use clap::Parser;
use dashmap::DashMap;
use opaque_ke::ServerSetup;
use quicnprotochat_core::opaque_auth::OpaqueSuite;
use quinn::Endpoint;
use rand::rngs::OsRng;
use tokio::sync::Notify;
use tokio::task::LocalSet;
mod auth;
mod config;
mod error_codes;
mod metrics;
mod node_service;
mod sql_store;
mod tls;
mod storage;
use auth::{AuthConfig, PendingLogin, RateLimiter, SessionInfo, RATE_LIMIT_MAX_PER_SEC};
use config::{
load_config, merge_config, validate_production_config, DEFAULT_DATA_DIR, DEFAULT_DB_PATH,
DEFAULT_LISTEN, DEFAULT_STORE_BACKEND, DEFAULT_TLS_CERT, DEFAULT_TLS_KEY,
};
use node_service::{handle_node_connection, spawn_cleanup_task};
use sql_store::SqlStore;
use storage::{FileBackedStore, Store};
use tls::build_server_config;
// ── CLI ───────────────────────────────────────────────────────────────────────
#[derive(Debug, Parser)]
#[command(
name = "quicnprotochat-server",
about = "quicnprotochat Delivery Service + Authentication Service",
version
)]
struct Args {
/// Optional path to a TOML config file (fields map to CLI flags).
#[arg(long, env = "QUICNPROTOCHAT_CONFIG")]
config: Option<PathBuf>,
/// QUIC listen address (host:port).
#[arg(long, default_value = DEFAULT_LISTEN, env = "QUICNPROTOCHAT_LISTEN")]
listen: String,
/// Directory for persisted server data (KeyPackages + delivery queues).
#[arg(long, default_value = DEFAULT_DATA_DIR, env = "QUICNPROTOCHAT_DATA_DIR")]
data_dir: String,
/// TLS certificate path (generated automatically if missing).
#[arg(long, default_value = DEFAULT_TLS_CERT, env = "QUICNPROTOCHAT_TLS_CERT")]
tls_cert: PathBuf,
/// TLS private key path (generated automatically if missing).
#[arg(long, default_value = DEFAULT_TLS_KEY, env = "QUICNPROTOCHAT_TLS_KEY")]
tls_key: PathBuf,
/// Required bearer token for auth.version=1 requests. Use --allow-insecure-auth to run without it (dev only).
#[arg(long, env = "QUICNPROTOCHAT_AUTH_TOKEN")]
auth_token: Option<String>,
/// Allow running without QUICNPROTOCHAT_AUTH_TOKEN (development only).
#[arg(long, env = "QUICNPROTOCHAT_ALLOW_INSECURE_AUTH", default_value_t = false)]
allow_insecure_auth: bool,
/// Enable Sealed Sender: enqueue does not require identity-bound session, only a valid token.
#[arg(long, env = "QUICNPROTOCHAT_SEALED_SENDER", default_value_t = false)]
sealed_sender: bool,
/// Storage backend: "file" (bincode) or "sql" (SQLCipher-encrypted).
#[arg(long, default_value = DEFAULT_STORE_BACKEND, env = "QUICNPROTOCHAT_STORE_BACKEND")]
store_backend: String,
/// Path to the SQLCipher database file (only used when --store-backend=sql).
#[arg(long, default_value = DEFAULT_DB_PATH, env = "QUICNPROTOCHAT_DB_PATH")]
db_path: PathBuf,
/// SQLCipher encryption key. Empty string disables encryption.
#[arg(long, default_value = "", env = "QUICNPROTOCHAT_DB_KEY")]
db_key: String,
/// Metrics HTTP listen address (e.g. 0.0.0.0:9090). If set and metrics enabled, /metrics is served.
#[arg(long, env = "QUICNPROTOCHAT_METRICS_LISTEN")]
metrics_listen: Option<String>,
/// Enable metrics server when metrics_listen is set.
#[arg(long, env = "QUICNPROTOCHAT_METRICS_ENABLED")]
metrics_enabled: Option<bool>,
}
// ── Entry point ───────────────────────────────────────────────────────────────
#[tokio::main]
async fn main() -> anyhow::Result<()> {
let _ = rustls::crypto::ring::default_provider().install_default();
tracing_subscriber::fmt()
.with_env_filter(
tracing_subscriber::EnvFilter::try_from_default_env()
.unwrap_or_else(|_| tracing_subscriber::EnvFilter::new("info")),
)
.init();
let args = Args::parse();
let file_cfg = load_config(args.config.as_deref())?;
let effective = merge_config(&args, &file_cfg);
let production = std::env::var("QUICNPROTOCHAT_PRODUCTION")
.map(|v| matches!(v.to_lowercase().as_str(), "1" | "true" | "yes"))
.unwrap_or(false);
if production {
validate_production_config(&effective)?;
}
// Optional metrics server: only start when metrics_enabled and metrics_listen are set.
if effective.metrics_enabled {
if let Some(addr_str) = &effective.metrics_listen {
let addr: std::net::SocketAddr = addr_str
.parse()
.context("metrics_listen must be host:port (e.g. 0.0.0.0:9090)")?;
metrics_exporter_prometheus::PrometheusBuilder::new()
.with_http_listener(addr)
.install()
.context("failed to install Prometheus metrics exporter")?;
tracing::info!(addr = %addr_str, "metrics server listening on /metrics");
}
}
// In non-production, require an explicit opt-out before running without a static token.
if !production
&& effective
.auth_token
.as_deref()
.map(|s| s.is_empty())
.unwrap_or(true)
&& !effective.allow_insecure_auth
{
anyhow::bail!(
"missing QUICNPROTOCHAT_AUTH_TOKEN; set one or pass --allow-insecure-auth for development"
);
}
if effective.allow_insecure_auth
&& effective
.auth_token
.as_deref()
.map(|s| s.is_empty())
.unwrap_or(true)
{
tracing::warn!("running without QUICNPROTOCHAT_AUTH_TOKEN (allow-insecure-auth enabled); development only");
}
let listen: SocketAddr = effective
.listen
.parse()
.context("--listen must be host:port")?;
let server_config = build_server_config(&effective.tls_cert, &effective.tls_key, production)
.context("failed to build TLS/QUIC server config")?;
// Shared storage — persisted to disk for restart safety.
let store: Arc<dyn Store> = match effective.store_backend.as_str() {
"sql" => {
if let Some(parent) = effective.db_path.parent() {
std::fs::create_dir_all(parent).context("create db dir")?;
}
tracing::info!(
path = %effective.db_path.display(),
encrypted = !effective.db_key.is_empty(),
"opening SQLCipher store"
);
if effective.db_key.is_empty() {
tracing::warn!("db_key is empty; SQL store will be plaintext (development only)");
}
Arc::new(SqlStore::open(&effective.db_path, &effective.db_key)?)
}
"file" | _ => {
tracing::info!(dir = %effective.data_dir, "opening file-backed store");
Arc::new(FileBackedStore::open(&effective.data_dir)?)
}
};
let auth_cfg = Arc::new(AuthConfig::new(
effective.auth_token.clone(),
effective.allow_insecure_auth,
));
let waiters: Arc<DashMap<Vec<u8>, Arc<Notify>>> = Arc::new(DashMap::new());
// OPAQUE ServerSetup: load from storage or generate fresh.
let opaque_setup: Arc<ServerSetup<OpaqueSuite>> = match store.get_server_setup() {
Ok(Some(bytes)) => {
let setup = ServerSetup::<OpaqueSuite>::deserialize(&bytes)
.map_err(|e| anyhow::anyhow!("corrupt OPAQUE server setup: {e}"))?;
tracing::info!("loaded persisted OPAQUE ServerSetup");
Arc::new(setup)
}
Ok(None) => {
let setup = ServerSetup::<OpaqueSuite>::new(&mut OsRng);
let bytes = setup.serialize().to_vec();
store
.store_server_setup(bytes)
.context("persist OPAQUE ServerSetup")?;
tracing::info!("generated and persisted new OPAQUE ServerSetup");
Arc::new(setup)
}
Err(e) => return Err(anyhow::anyhow!("load OPAQUE server setup: {e}")),
};
let pending_logins: Arc<DashMap<String, PendingLogin>> = Arc::new(DashMap::new());
let sessions: Arc<DashMap<Vec<u8>, SessionInfo>> = Arc::new(DashMap::new());
let rate_limiter: Arc<RateLimiter> = Arc::new(governor::RateLimiter::keyed(
governor::Quota::per_second(RATE_LIMIT_MAX_PER_SEC),
));
// Background cleanup task (expire sessions, pending logins, and stale messages).
// Governor's DashMapStateStore handles rate-limit cleanup automatically.
spawn_cleanup_task(
Arc::clone(&sessions),
Arc::clone(&pending_logins),
Arc::clone(&store),
);
let endpoint = Endpoint::server(server_config, listen)?;
tracing::info!(
addr = %effective.listen,
"accepting QUIC connections"
);
// capnp-rpc is !Send (Rc internals), so all RPC tasks must stay on a LocalSet.
let local = LocalSet::new();
local
.run_until(async move {
loop {
tokio::select! {
biased;
incoming = endpoint.accept() => {
let incoming = match incoming {
Some(i) => i,
None => break,
};
let connecting = match incoming.accept() {
Ok(c) => c,
Err(e) => {
tracing::warn!(error = %e, "failed to accept incoming connection");
continue;
}
};
let store = Arc::clone(&store);
let waiters = Arc::clone(&waiters);
let auth_cfg = Arc::clone(&auth_cfg);
let opaque_setup = Arc::clone(&opaque_setup);
let pending_logins = Arc::clone(&pending_logins);
let sessions = Arc::clone(&sessions);
let rate_limiter = Arc::clone(&rate_limiter);
let sealed_sender = effective.sealed_sender;
tokio::task::spawn_local(async move {
if let Err(e) = handle_node_connection(
connecting,
store,
waiters,
auth_cfg,
opaque_setup,
pending_logins,
sessions,
rate_limiter,
sealed_sender,
)
.await
{
tracing::warn!(error = %e, "connection error");
}
});
}
_ = tokio::signal::ctrl_c() => {
tracing::info!("shutdown signal received, draining QUIC connections");
endpoint.close(0u32.into(), b"server shutdown");
break;
}
}
}
Ok::<(), anyhow::Error>(())
})
.await?;
Ok(())
}

View File

@@ -47,25 +47,3 @@ pub fn record_auth_login_failure_total() {
pub fn record_rate_limit_hit_total() {
metrics::counter!("rate_limit_hit_total").increment(1);
}
// ── Storage operation latency ───────────────────────────────────────────────
/// Record storage operation latency. Called by instrumented Store wrappers.
pub fn record_storage_latency(operation: &'static str, duration: std::time::Duration) {
metrics::histogram!("storage_operation_duration_seconds", "op" => operation)
.record(duration.as_secs_f64());
}
// ── In-flight RPCs ────────────────────────────────────────────────────────
/// Record the current number of in-flight RPCs (connections being served).
pub fn record_in_flight_rpcs(count: usize) {
metrics::gauge!("server_in_flight_rpcs").set(count as f64);
}
// ── Server info ────────────────────────────────────────────────────────────
/// Record the server uptime in seconds (set periodically).
pub fn record_uptime_seconds(secs: f64) {
metrics::gauge!("server_uptime_seconds").set(secs);
}

View File

@@ -3,16 +3,14 @@ use opaque_ke::{
CredentialFinalization, CredentialRequest, RegistrationRequest, RegistrationUpload,
ServerLogin, ServerRegistration,
};
use quicprochat_core::opaque_auth::OpaqueSuite;
use quicprochat_proto::node_capnp::node_service;
use quicnprotochat_core::opaque_auth::OpaqueSuite;
use quicnprotochat_proto::node_capnp::node_service;
use crate::auth::{coded_error, current_timestamp, PendingLogin, SESSION_TTL_SECS};
use crate::error_codes::*;
use crate::metrics;
use crate::storage::StorageError;
use crate::hooks::AuthEvent;
use super::NodeServiceImpl;
// Audit events in this module must never include secrets (no session tokens, passwords, or raw keys).
@@ -21,16 +19,6 @@ fn storage_err(err: StorageError) -> capnp::Error {
coded_error(E009_STORAGE_ERROR, err)
}
/// Parse username from Cap'n Proto reader; requires valid UTF-8.
fn parse_username_param(
result: Result<capnp::text::Reader<'_>, capnp::Error>,
) -> Result<String, capnp::Error> {
let reader = result.map_err(|e| coded_error(E020_BAD_PARAMS, e))?;
reader
.to_string()
.map_err(|_| coded_error(E020_BAD_PARAMS, "username must be valid UTF-8"))
}
impl NodeServiceImpl {
pub fn handle_opaque_login_start(
&mut self,
@@ -41,9 +29,9 @@ impl NodeServiceImpl {
Ok(p) => p,
Err(e) => return Promise::err(coded_error(E020_BAD_PARAMS, e)),
};
let username = match parse_username_param(p.get_username()) {
Ok(s) => s,
Err(e) => return Promise::err(e),
let username = match p.get_username() {
Ok(v) => v.to_string().unwrap_or_default().to_string(),
Err(e) => return Promise::err(coded_error(E020_BAD_PARAMS, e)),
};
let request_bytes = match p.get_request() {
Ok(v) => v.to_vec(),
@@ -54,14 +42,6 @@ impl NodeServiceImpl {
return Promise::err(coded_error(E011_USERNAME_EMPTY, "username must not be empty"));
}
// Check for existing recent pending login before expensive OPAQUE/storage work (DoS mitigation).
if let Some(existing) = self.pending_logins.get(&username) {
let age = current_timestamp().saturating_sub(existing.created_at);
if age < 60 {
return Promise::err(coded_error(E010_OPAQUE_ERROR, "login already in progress"));
}
}
let credential_request = match CredentialRequest::<OpaqueSuite>::deserialize(&request_bytes) {
Ok(r) => r,
Err(e) => {
@@ -82,7 +62,9 @@ impl NodeServiceImpl {
))
}
},
Ok(None) => None,
Ok(None) => {
return Promise::err(coded_error(E010_OPAQUE_ERROR, "user not registered"))
}
Err(e) => return Promise::err(storage_err(e)),
};
@@ -129,9 +111,9 @@ impl NodeServiceImpl {
Ok(p) => p,
Err(e) => return Promise::err(coded_error(E020_BAD_PARAMS, e)),
};
let username = match parse_username_param(p.get_username()) {
Ok(s) => s,
Err(e) => return Promise::err(e),
let username = match p.get_username() {
Ok(v) => v.to_string().unwrap_or_default().to_string(),
Err(e) => return Promise::err(coded_error(E020_BAD_PARAMS, e)),
};
let request_bytes = match p.get_request() {
Ok(v) => v.to_vec(),
@@ -189,9 +171,9 @@ impl NodeServiceImpl {
Ok(p) => p,
Err(e) => return Promise::err(coded_error(E020_BAD_PARAMS, e)),
};
let username = match parse_username_param(p.get_username()) {
Ok(s) => s,
Err(e) => return Promise::err(e),
let username = match p.get_username() {
Ok(v) => v.to_string().unwrap_or_default().to_string(),
Err(e) => return Promise::err(coded_error(E020_BAD_PARAMS, e)),
};
let finalization_bytes = match p.get_finalization() {
Ok(v) => v.to_vec(),
@@ -209,11 +191,6 @@ impl NodeServiceImpl {
// Audit: login failure — do not log secrets (no token, no password).
tracing::warn!(user = %username, "audit: auth login failure (no pending login)");
metrics::record_auth_login_failure_total();
self.hooks.on_auth(&AuthEvent {
username: username.clone(),
success: false,
failure_reason: "no pending login".to_string(),
});
return Promise::err(coded_error(E019_NO_PENDING_LOGIN, "no pending login for this username"))
}
};
@@ -243,11 +220,6 @@ impl NodeServiceImpl {
Err(e) => {
tracing::warn!(user = %username, "audit: auth login failure (OPAQUE finish failed)");
metrics::record_auth_login_failure_total();
self.hooks.on_auth(&AuthEvent {
username: username.clone(),
success: false,
failure_reason: format!("OPAQUE finish failed: {e}"),
});
return Promise::err(coded_error(
E010_OPAQUE_ERROR,
format!("OPAQUE login finish failed (bad password?): {e}"),
@@ -267,11 +239,6 @@ impl NodeServiceImpl {
if stored_ik != identity_key {
tracing::warn!(user = %username, "audit: auth login failure (identity mismatch)");
metrics::record_auth_login_failure_total();
self.hooks.on_auth(&AuthEvent {
username: username.clone(),
success: false,
failure_reason: "identity key mismatch".to_string(),
});
return Promise::err(coded_error(
E016_IDENTITY_MISMATCH,
"identity key does not match registered key",
@@ -296,13 +263,6 @@ impl NodeServiceImpl {
results.get().set_session_token(&token_vec);
// Hook: on_auth — fires after successful login.
self.hooks.on_auth(&AuthEvent {
username: username.clone(),
success: true,
failure_reason: String::new(),
});
// Audit: login success — do not log session token or any secrets.
metrics::record_auth_login_success_total();
tracing::info!(user = %username, "audit: auth login success — session token issued");
@@ -318,9 +278,9 @@ impl NodeServiceImpl {
Ok(p) => p,
Err(e) => return Promise::err(coded_error(E020_BAD_PARAMS, e)),
};
let username = match parse_username_param(p.get_username()) {
Ok(s) => s,
Err(e) => return Promise::err(e),
let username = match p.get_username() {
Ok(v) => v.to_string().unwrap_or_default().to_string(),
Err(e) => return Promise::err(coded_error(E020_BAD_PARAMS, e)),
};
let upload_bytes = match p.get_upload() {
Ok(v) => v.to_vec(),
@@ -332,6 +292,16 @@ impl NodeServiceImpl {
return Promise::err(coded_error(E011_USERNAME_EMPTY, "username must not be empty"));
}
let _request = match RegistrationRequest::<OpaqueSuite>::deserialize(&upload_bytes) {
Ok(r) => r,
Err(e) => {
return Promise::err(coded_error(
E010_OPAQUE_ERROR,
format!("invalid registration upload: {e}"),
))
}
};
match self.store.has_user_record(&username) {
Ok(true) => {
return Promise::err(coded_error(
@@ -356,53 +326,22 @@ impl NodeServiceImpl {
let password_file = ServerRegistration::<OpaqueSuite>::finish(upload);
let record_bytes = password_file.serialize().to_vec();
match self
if let Err(e) = self
.store
.store_user_record(&username, record_bytes)
.map_err(storage_err)
{
Ok(()) => {}
Err(crate::storage::StorageError::DuplicateUser(_)) => {
return Promise::err(coded_error(
E018_USER_EXISTS,
format!("user '{}' already registered", username),
))
}
Err(e) => return Promise::err(storage_err(e)),
return Promise::err(e);
}
// Hook: on_user_registered — fires after successful registration.
self.hooks.on_user_registered(&username, &identity_key);
if !identity_key.is_empty() {
if let Err(e) = self
.store
.store_user_identity_key(&username, identity_key.clone())
.store_user_identity_key(&username, identity_key)
.map_err(storage_err)
{
return Promise::err(e);
}
// Append (username, identity_key) to the Key Transparency Merkle log.
match self.kt_log.lock() {
Ok(mut log) => {
log.append(&username, &identity_key);
// Persist after each append (small extra cost, but ensures durability).
match log.to_bytes() {
Ok(bytes) => {
if let Err(e) = self.store.save_kt_log(bytes) {
tracing::warn!(user = %username, error = %e, "KT log persist failed");
}
}
Err(e) => {
tracing::warn!(user = %username, error = %e, "KT log serialise failed");
}
}
tracing::info!(user = %username, tree_size = log.len(), "KT: appended identity binding");
}
Err(e) => {
tracing::warn!(user = %username, error = %e, "KT log lock poisoned; skipping append");
}
}
}
results.get().set_success(true);

View File

@@ -0,0 +1,318 @@
use std::sync::Arc;
use std::time::Duration;
use capnp::capability::Promise;
use dashmap::DashMap;
use quicnprotochat_proto::node_capnp::node_service;
use tokio::sync::Notify;
use tokio::time::timeout;
use crate::auth::{
check_rate_limit, coded_error, fmt_hex, require_identity_or_request, validate_auth_context,
};
use crate::error_codes::*;
use crate::metrics;
use crate::storage::{StorageError, Store};
use super::{NodeServiceImpl, CURRENT_WIRE_VERSION};
// Audit events here must not include secrets: no payload content, no full recipient/token bytes (prefix only).
const MAX_PAYLOAD_BYTES: usize = 5 * 1024 * 1024; // 5 MB cap per message
const MAX_QUEUE_DEPTH: usize = 1000;
fn storage_err(err: StorageError) -> capnp::Error {
coded_error(E009_STORAGE_ERROR, err)
}
pub fn fill_payloads_wait(
results: &mut node_service::FetchWaitResults,
messages: Vec<(u64, Vec<u8>)>,
) {
let mut list = results.get().init_payloads(messages.len() as u32);
for (i, (seq, data)) in messages.iter().enumerate() {
let mut entry = list.reborrow().get(i as u32);
entry.set_seq(*seq);
entry.set_data(data);
}
}
impl NodeServiceImpl {
pub fn handle_enqueue(
&mut self,
params: node_service::EnqueueParams,
mut results: node_service::EnqueueResults,
) -> Promise<(), capnp::Error> {
let p = match params.get() {
Ok(p) => p,
Err(e) => return Promise::err(coded_error(E020_BAD_PARAMS, e)),
};
let recipient_key = match p.get_recipient_key() {
Ok(v) => v.to_vec(),
Err(e) => return Promise::err(coded_error(E020_BAD_PARAMS, e)),
};
let payload = match p.get_payload() {
Ok(v) => v.to_vec(),
Err(e) => return Promise::err(coded_error(E020_BAD_PARAMS, e)),
};
let channel_id = p.get_channel_id().unwrap_or_default().to_vec();
let version = p.get_version();
let auth_ctx = match validate_auth_context(&self.auth_cfg, &self.sessions, p.get_auth()) {
Ok(ctx) => ctx,
Err(e) => return Promise::err(e),
};
if recipient_key.len() != 32 {
return Promise::err(coded_error(
E004_IDENTITY_KEY_LENGTH,
format!("recipientKey must be exactly 32 bytes, got {}", recipient_key.len()),
));
}
if payload.is_empty() {
return Promise::err(coded_error(E005_PAYLOAD_EMPTY, "payload must not be empty"));
}
if payload.len() > MAX_PAYLOAD_BYTES {
return Promise::err(coded_error(
E006_PAYLOAD_TOO_LARGE,
format!("payload exceeds max size ({} bytes)", MAX_PAYLOAD_BYTES),
));
}
if version != CURRENT_WIRE_VERSION {
return Promise::err(coded_error(
E012_WIRE_VERSION,
format!("unsupported wire version {} (expected {CURRENT_WIRE_VERSION})", version),
));
}
if let Err(e) = check_rate_limit(&self.rate_limiter, &auth_ctx.token) {
// Audit: rate limit hit — do not log token or identity.
tracing::warn!("rate_limit_hit");
metrics::record_rate_limit_hit_total();
return Promise::err(e);
}
// When sealed_sender is true, enqueue does not require identity; valid token only.
if !self.sealed_sender {
if let Err(e) = require_identity_or_request(
&auth_ctx,
&recipient_key,
self.auth_cfg.allow_insecure_identity_from_request,
) {
return Promise::err(e);
}
}
match self.store.queue_depth(&recipient_key, &channel_id) {
Ok(depth) if depth >= MAX_QUEUE_DEPTH => {
return Promise::err(coded_error(
E015_QUEUE_FULL,
format!("queue depth {} exceeds limit {}", depth, MAX_QUEUE_DEPTH),
));
}
Err(e) => return Promise::err(storage_err(e)),
_ => {}
}
let payload_len = payload.len();
let seq = match self
.store
.enqueue(&recipient_key, &channel_id, payload)
.map_err(storage_err)
{
Ok(seq) => seq,
Err(e) => return Promise::err(e),
};
results.get().set_seq(seq);
// Metrics and audit. Audit events must not include secrets (no payload, no full keys).
metrics::record_enqueue_total();
metrics::record_enqueue_bytes(payload_len as u64);
if let Ok(depth) = self.store.queue_depth(&recipient_key, &channel_id) {
metrics::record_delivery_queue_depth(depth);
}
tracing::info!(
recipient_prefix = %fmt_hex(&recipient_key[..4]),
payload_len = payload_len,
seq = seq,
"audit: enqueue"
);
crate::auth::waiter(&self.waiters, &recipient_key).notify_waiters();
Promise::ok(())
}
pub fn handle_fetch(
&mut self,
params: node_service::FetchParams,
mut results: node_service::FetchResults,
) -> Promise<(), capnp::Error> {
let recipient_key = match params.get() {
Ok(p) => match p.get_recipient_key() {
Ok(v) => v.to_vec(),
Err(e) => return Promise::err(coded_error(E020_BAD_PARAMS, e)),
},
Err(e) => return Promise::err(coded_error(E020_BAD_PARAMS, e)),
};
let channel_id = params
.get()
.ok()
.and_then(|p| p.get_channel_id().ok())
.map(|c| c.to_vec())
.unwrap_or_default();
let version = params
.get()
.ok()
.map(|p| p.get_version())
.unwrap_or(CURRENT_WIRE_VERSION);
let limit = params.get().ok().map(|p| p.get_limit()).unwrap_or(0);
let auth_ctx = match params
.get()
.ok()
.map(|p| validate_auth_context(&self.auth_cfg, &self.sessions, p.get_auth()))
.transpose()
{
Ok(ctx) => ctx,
Err(e) => return Promise::err(e),
};
if recipient_key.len() != 32 {
return Promise::err(coded_error(
E004_IDENTITY_KEY_LENGTH,
format!("recipientKey must be exactly 32 bytes, got {}", recipient_key.len()),
));
}
if version != CURRENT_WIRE_VERSION {
return Promise::err(coded_error(
E012_WIRE_VERSION,
format!("unsupported wire version {} (expected {CURRENT_WIRE_VERSION})", version),
));
}
let auth_ctx = match auth_ctx {
Some(ctx) => ctx,
None => return Promise::err(coded_error(E003_INVALID_TOKEN, "auth required")),
};
if let Err(e) = require_identity_or_request(
&auth_ctx,
&recipient_key,
self.auth_cfg.allow_insecure_identity_from_request,
) {
return Promise::err(e);
}
let messages = if limit > 0 {
match self
.store
.fetch_limited(&recipient_key, &channel_id, limit as usize)
.map_err(storage_err)
{
Ok(m) => m,
Err(e) => return Promise::err(e),
}
} else {
match self
.store
.fetch(&recipient_key, &channel_id)
.map_err(storage_err)
{
Ok(m) => m,
Err(e) => return Promise::err(e),
}
};
// Audit: fetch — do not log payload or full keys.
metrics::record_fetch_total();
tracing::info!(
recipient_prefix = %fmt_hex(&recipient_key[..4]),
count = messages.len(),
"audit: fetch"
);
let mut list = results.get().init_payloads(messages.len() as u32);
for (i, (seq, data)) in messages.iter().enumerate() {
let mut entry = list.reborrow().get(i as u32);
entry.set_seq(*seq);
entry.set_data(data);
}
Promise::ok(())
}
pub fn handle_fetch_wait(
&mut self,
params: node_service::FetchWaitParams,
mut results: node_service::FetchWaitResults,
) -> Promise<(), capnp::Error> {
let p = match params.get() {
Ok(p) => p,
Err(e) => return Promise::err(coded_error(E020_BAD_PARAMS, e)),
};
let recipient_key = match p.get_recipient_key() {
Ok(v) => v.to_vec(),
Err(e) => return Promise::err(coded_error(E020_BAD_PARAMS, e)),
};
let channel_id = p.get_channel_id().unwrap_or_default().to_vec();
let version = p.get_version();
let timeout_ms = p.get_timeout_ms();
let limit = p.get_limit();
let auth_ctx = match validate_auth_context(&self.auth_cfg, &self.sessions, p.get_auth()) {
Ok(ctx) => ctx,
Err(e) => return Promise::err(e),
};
if recipient_key.len() != 32 {
return Promise::err(coded_error(
E004_IDENTITY_KEY_LENGTH,
format!("recipientKey must be exactly 32 bytes, got {}", recipient_key.len()),
));
}
if version != CURRENT_WIRE_VERSION {
return Promise::err(coded_error(
E012_WIRE_VERSION,
format!("unsupported wire version {} (expected {CURRENT_WIRE_VERSION})", version),
));
}
if let Err(e) = require_identity_or_request(
&auth_ctx,
&recipient_key,
self.auth_cfg.allow_insecure_identity_from_request,
) {
return Promise::err(e);
}
let store = Arc::clone(&self.store);
let waiters: Arc<DashMap<Vec<u8>, Arc<Notify>>> = self.waiters.clone();
Promise::from_future(async move {
let fetch_fn = |s: &Arc<dyn Store>, rk: &[u8], ch: &[u8], lim: u32| -> Result<Vec<(u64, Vec<u8>)>, capnp::Error> {
if lim > 0 {
s.fetch_limited(rk, ch, lim as usize).map_err(storage_err)
} else {
s.fetch(rk, ch).map_err(storage_err)
}
};
let messages = fetch_fn(&store, &recipient_key, &channel_id, limit)?;
if messages.is_empty() && timeout_ms > 0 {
let waiter = waiters
.entry(recipient_key.clone())
.or_insert_with(|| Arc::new(Notify::new()))
.clone();
let _ = timeout(Duration::from_millis(timeout_ms), waiter.notified()).await;
let msgs = fetch_fn(&store, &recipient_key, &channel_id, limit)?;
fill_payloads_wait(&mut results, msgs);
metrics::record_fetch_wait_total();
return Ok(());
}
fill_payloads_wait(&mut results, messages);
metrics::record_fetch_wait_total();
Ok(())
})
}
}

View File

@@ -1,5 +1,5 @@
use capnp::capability::Promise;
use quicprochat_proto::node_capnp::node_service;
use quicnprotochat_proto::node_capnp::node_service;
use crate::auth::{coded_error, fmt_hex, require_identity_or_request, validate_auth_context};
use crate::error_codes::*;
@@ -12,7 +12,7 @@ fn storage_err(err: StorageError) -> capnp::Error {
coded_error(E009_STORAGE_ERROR, err)
}
const MAX_KEYPACKAGE_BYTES: usize = 1024 * 1024; // 1 MB cap per KeyPackage
const MAX_KEYPACKAGE_BYTES: usize = 1 * 1024 * 1024; // 1 MB cap per KeyPackage
impl NodeServiceImpl {
pub fn handle_upload_key_package(
@@ -63,7 +63,7 @@ impl NodeServiceImpl {
return Promise::err(e);
}
if let Err(e) = quicprochat_core::validate_keypackage_ciphersuite(&package) {
if let Err(e) = quicnprotochat_core::validate_keypackage_ciphersuite(&package) {
return Promise::err(coded_error(
E021_CIPHERSUITE_NOT_ALLOWED,
format!("KeyPackage ciphersuite not allowed: {e}"),
@@ -214,10 +214,10 @@ impl NodeServiceImpl {
Err(e) => return Promise::err(coded_error(E020_BAD_PARAMS, e)),
};
// Auth check only — any authenticated user can fetch any peer's hybrid public key.
if let Err(e) = validate_auth_context(&self.auth_cfg, &self.sessions, p.get_auth()) {
return Promise::err(e);
}
let auth_ctx = match validate_auth_context(&self.auth_cfg, &self.sessions, p.get_auth()) {
Ok(ctx) => ctx,
Err(e) => return Promise::err(e),
};
if identity_key.len() != 32 {
return Promise::err(coded_error(
@@ -226,6 +226,14 @@ impl NodeServiceImpl {
));
}
if let Err(e) = require_identity_or_request(
&auth_ctx,
&identity_key,
self.auth_cfg.allow_insecure_identity_from_request,
) {
return Promise::err(e);
}
let hybrid_pk = match self
.store
.fetch_hybrid_key(&identity_key)
@@ -248,47 +256,4 @@ impl NodeServiceImpl {
Promise::ok(())
}
pub fn handle_fetch_hybrid_keys(
&mut self,
params: node_service::FetchHybridKeysParams,
mut results: node_service::FetchHybridKeysResults,
) -> Promise<(), capnp::Error> {
let p = match params.get() {
Ok(p) => p,
Err(e) => return Promise::err(coded_error(E020_BAD_PARAMS, e)),
};
let identity_keys = match p.get_identity_keys() {
Ok(v) => v,
Err(e) => return Promise::err(coded_error(E020_BAD_PARAMS, e)),
};
if let Err(e) = validate_auth_context(&self.auth_cfg, &self.sessions, p.get_auth()) {
return Promise::err(e);
}
let count = identity_keys.len() as usize;
let mut key_data: Vec<Vec<u8>> = Vec::with_capacity(count);
for i in 0..identity_keys.len() {
let ik = match identity_keys.get(i) {
Ok(v) => v.to_vec(),
Err(e) => return Promise::err(coded_error(E020_BAD_PARAMS, e)),
};
let pk = match self.store.fetch_hybrid_key(&ik).map_err(storage_err) {
Ok(Some(pk)) => pk,
Ok(None) => vec![],
Err(e) => return Promise::err(e),
};
key_data.push(pk);
}
let mut list = results.get().init_keys(key_data.len() as u32);
for (i, pk) in key_data.iter().enumerate() {
list.set(i as u32, pk);
}
tracing::debug!(count = count, "batch hybrid key fetch");
Promise::ok(())
}
}

View File

@@ -0,0 +1,244 @@
use std::sync::Arc;
use std::time::Duration;
use capnp_rpc::RpcSystem;
use dashmap::DashMap;
use opaque_ke::ServerSetup;
use quicnprotochat_core::opaque_auth::OpaqueSuite;
use quicnprotochat_proto::node_capnp::node_service;
use tokio::sync::Notify;
use tokio_util::compat::{TokioAsyncReadCompatExt, TokioAsyncWriteCompatExt};
use crate::auth::{
current_timestamp, AuthConfig, PendingLogin, RateLimiter, SessionInfo, PENDING_LOGIN_TTL_SECS,
};
use crate::storage::Store;
mod auth_ops;
mod delivery;
mod key_ops;
mod p2p_ops;
impl node_service::Server for NodeServiceImpl {
fn upload_key_package(
&mut self,
params: node_service::UploadKeyPackageParams,
results: node_service::UploadKeyPackageResults,
) -> capnp::capability::Promise<(), capnp::Error> {
self.handle_upload_key_package(params, results)
}
fn fetch_key_package(
&mut self,
params: node_service::FetchKeyPackageParams,
results: node_service::FetchKeyPackageResults,
) -> capnp::capability::Promise<(), capnp::Error> {
self.handle_fetch_key_package(params, results)
}
fn enqueue(
&mut self,
params: node_service::EnqueueParams,
results: node_service::EnqueueResults,
) -> capnp::capability::Promise<(), capnp::Error> {
self.handle_enqueue(params, results)
}
fn fetch(
&mut self,
params: node_service::FetchParams,
results: node_service::FetchResults,
) -> capnp::capability::Promise<(), capnp::Error> {
self.handle_fetch(params, results)
}
fn fetch_wait(
&mut self,
params: node_service::FetchWaitParams,
results: node_service::FetchWaitResults,
) -> capnp::capability::Promise<(), capnp::Error> {
self.handle_fetch_wait(params, results)
}
fn health(
&mut self,
params: node_service::HealthParams,
results: node_service::HealthResults,
) -> capnp::capability::Promise<(), capnp::Error> {
self.handle_health(params, results)
}
fn upload_hybrid_key(
&mut self,
params: node_service::UploadHybridKeyParams,
results: node_service::UploadHybridKeyResults,
) -> capnp::capability::Promise<(), capnp::Error> {
self.handle_upload_hybrid_key(params, results)
}
fn fetch_hybrid_key(
&mut self,
params: node_service::FetchHybridKeyParams,
results: node_service::FetchHybridKeyResults,
) -> capnp::capability::Promise<(), capnp::Error> {
self.handle_fetch_hybrid_key(params, results)
}
fn opaque_login_start(
&mut self,
params: node_service::OpaqueLoginStartParams,
results: node_service::OpaqueLoginStartResults,
) -> capnp::capability::Promise<(), capnp::Error> {
self.handle_opaque_login_start(params, results)
}
fn opaque_register_start(
&mut self,
params: node_service::OpaqueRegisterStartParams,
results: node_service::OpaqueRegisterStartResults,
) -> capnp::capability::Promise<(), capnp::Error> {
self.handle_opaque_register_start(params, results)
}
fn opaque_login_finish(
&mut self,
params: node_service::OpaqueLoginFinishParams,
results: node_service::OpaqueLoginFinishResults,
) -> capnp::capability::Promise<(), capnp::Error> {
self.handle_opaque_login_finish(params, results)
}
fn opaque_register_finish(
&mut self,
params: node_service::OpaqueRegisterFinishParams,
results: node_service::OpaqueRegisterFinishResults,
) -> capnp::capability::Promise<(), capnp::Error> {
self.handle_opaque_register_finish(params, results)
}
fn publish_endpoint(
&mut self,
params: node_service::PublishEndpointParams,
results: node_service::PublishEndpointResults,
) -> capnp::capability::Promise<(), capnp::Error> {
self.handle_publish_endpoint(params, results)
}
fn resolve_endpoint(
&mut self,
params: node_service::ResolveEndpointParams,
results: node_service::ResolveEndpointResults,
) -> capnp::capability::Promise<(), capnp::Error> {
self.handle_resolve_endpoint(params, results)
}
}
pub const CURRENT_WIRE_VERSION: u16 = 1;
pub struct NodeServiceImpl {
pub store: Arc<dyn Store>,
pub waiters: Arc<DashMap<Vec<u8>, Arc<Notify>>>,
pub auth_cfg: Arc<AuthConfig>,
pub opaque_setup: Arc<ServerSetup<OpaqueSuite>>,
pub pending_logins: Arc<DashMap<String, PendingLogin>>,
pub sessions: Arc<DashMap<Vec<u8>, SessionInfo>>,
pub rate_limiter: Arc<RateLimiter>,
/// When true, enqueue does not require identity-bound session (Sealed Sender).
pub sealed_sender: bool,
}
impl NodeServiceImpl {
pub fn new(
store: Arc<dyn Store>,
waiters: Arc<DashMap<Vec<u8>, Arc<Notify>>>,
auth_cfg: Arc<AuthConfig>,
opaque_setup: Arc<ServerSetup<OpaqueSuite>>,
pending_logins: Arc<DashMap<String, PendingLogin>>,
sessions: Arc<DashMap<Vec<u8>, SessionInfo>>,
rate_limiter: Arc<RateLimiter>,
sealed_sender: bool,
) -> Self {
Self {
store,
waiters,
auth_cfg,
opaque_setup,
pending_logins,
sessions,
rate_limiter,
sealed_sender,
}
}
}
pub async fn handle_node_connection(
connecting: quinn::Connecting,
store: Arc<dyn Store>,
waiters: Arc<DashMap<Vec<u8>, Arc<Notify>>>,
auth_cfg: Arc<AuthConfig>,
opaque_setup: Arc<ServerSetup<OpaqueSuite>>,
pending_logins: Arc<DashMap<String, PendingLogin>>,
sessions: Arc<DashMap<Vec<u8>, SessionInfo>>,
rate_limiter: Arc<RateLimiter>,
sealed_sender: bool,
) -> Result<(), anyhow::Error> {
let connection = connecting.await?;
tracing::info!(peer = %connection.remote_address(), "QUIC connected");
let (send, recv) = connection
.accept_bi()
.await
.map_err(|e| anyhow::anyhow!("failed to accept bi stream: {e}"))?;
let (reader, writer) = (recv.compat(), send.compat_write());
let network = capnp_rpc::twoparty::VatNetwork::new(
reader,
writer,
capnp_rpc::rpc_twoparty_capnp::Side::Server,
Default::default(),
);
let service: node_service::Client = capnp_rpc::new_client(NodeServiceImpl::new(
store,
waiters,
auth_cfg,
opaque_setup,
pending_logins,
sessions,
rate_limiter,
sealed_sender,
));
RpcSystem::new(Box::new(network), Some(service.client))
.await
.map_err(|e| anyhow::anyhow!("NodeService RPC error: {e}"))
}
const MESSAGE_TTL_SECS: u64 = 7 * 24 * 60 * 60; // 7 days
pub fn spawn_cleanup_task(
sessions: Arc<DashMap<Vec<u8>, SessionInfo>>,
pending_logins: Arc<DashMap<String, PendingLogin>>,
store: Arc<dyn Store>,
) {
tokio::spawn(async move {
let mut interval = tokio::time::interval(Duration::from_secs(60));
loop {
interval.tick().await;
let now = current_timestamp();
sessions.retain(|_, info| info.expires_at > now);
pending_logins.retain(|_, pl| now - pl.created_at < PENDING_LOGIN_TTL_SECS);
// Rate limit cleanup is handled automatically by governor's DashMapStateStore.
match store.gc_expired_messages(MESSAGE_TTL_SECS) {
Ok(n) if n > 0 => {
tracing::debug!(expired = n, "garbage collected expired messages")
}
Err(e) => tracing::warn!(error = %e, "message GC failed"),
_ => {}
}
}
});
}

View File

@@ -1,5 +1,5 @@
use capnp::capability::Promise;
use quicprochat_proto::node_capnp::node_service;
use quicnprotochat_proto::node_capnp::node_service;
use crate::auth::{
coded_error, fmt_hex, require_identity_or_request, validate_auth, validate_auth_context,
@@ -14,7 +14,6 @@ fn storage_err(err: StorageError) -> capnp::Error {
}
impl NodeServiceImpl {
/// Health check: unauthenticated by design for liveness probes and load balancers.
pub fn handle_health(
&mut self,
_params: node_service::HealthParams,

View File

@@ -0,0 +1,548 @@
//! SQLCipher-backed persistent storage.
use std::path::Path;
use std::sync::Mutex;
use rusqlite::{params, Connection};
use crate::storage::{StorageError, Store};
/// Schema version after introducing the migration runner (existing DBs had 1).
const SCHEMA_VERSION: i32 = 3;
/// Migrations: (migration_number, SQL). Files named NNN_name.sql, applied in order when N > user_version.
const MIGRATIONS: &[(i32, &str)] = &[
(1, include_str!("../migrations/001_initial.sql")),
(3, include_str!("../migrations/002_add_seq.sql")),
];
/// Runs pending migrations on an open connection: applies any migration whose number is greater
/// than the current PRAGMA user_version, then sets user_version to SCHEMA_VERSION.
fn run_migrations(conn: &Connection) -> Result<(), StorageError> {
let current_version: i32 = conn
.pragma_query_value(None, "user_version", |row| row.get(0))
.map_err(|e| StorageError::Db(format!("PRAGMA user_version failed: {e}")))?;
for (migration_num, sql) in MIGRATIONS {
if *migration_num > current_version {
conn.execute_batch(sql).map_err(|e| StorageError::Db(e.to_string()))?;
}
}
conn.pragma_update(None, "user_version", SCHEMA_VERSION)
.map_err(|e| StorageError::Db(format!("set user_version failed: {e}")))?;
Ok(())
}
/// SQLCipher-encrypted storage backend.
pub struct SqlStore {
conn: Mutex<Connection>,
}
impl SqlStore {
fn lock_conn(&self) -> Result<std::sync::MutexGuard<'_, Connection>, StorageError> {
self.conn
.lock()
.map_err(|e| StorageError::Db(format!("lock poisoned: {e}")))
}
pub fn open(path: impl AsRef<Path>, key: &str) -> Result<Self, StorageError> {
let conn = Connection::open(path).map_err(|e| StorageError::Db(e.to_string()))?;
if !key.is_empty() {
conn.pragma_update(None, "key", key)
.map_err(|e| StorageError::Db(format!("PRAGMA key failed: {e}")))?;
}
conn.execute_batch(
"PRAGMA journal_mode = WAL;
PRAGMA synchronous = NORMAL;
PRAGMA foreign_keys = ON;",
)
.map_err(|e| StorageError::Db(e.to_string()))?;
let current_version: i32 = conn
.pragma_query_value(None, "user_version", |row| row.get(0))
.map_err(|e| StorageError::Db(format!("PRAGMA user_version failed: {e}")))?;
if current_version > SCHEMA_VERSION {
return Err(StorageError::Db(format!(
"database schema version {current_version} is newer than supported {SCHEMA_VERSION}"
)));
}
run_migrations(&conn)?;
Ok(Self {
conn: Mutex::new(conn),
})
}
}
impl Store for SqlStore {
fn upload_key_package(
&self,
identity_key: &[u8],
package: Vec<u8>,
) -> Result<(), StorageError> {
let conn = self.lock_conn()?;
conn.execute(
"INSERT INTO key_packages (identity_key, package_data) VALUES (?1, ?2)",
params![identity_key, package],
)
.map_err(|e| StorageError::Db(e.to_string()))?;
Ok(())
}
fn fetch_key_package(&self, identity_key: &[u8]) -> Result<Option<Vec<u8>>, StorageError> {
let conn = self.lock_conn()?;
let mut stmt = conn
.prepare(
"SELECT id, package_data FROM key_packages
WHERE identity_key = ?1
ORDER BY id ASC
LIMIT 1",
)
.map_err(|e| StorageError::Db(e.to_string()))?;
let row = stmt
.query_row(params![identity_key], |row| {
Ok((row.get::<_, i64>(0)?, row.get::<_, Vec<u8>>(1)?))
})
.optional()
.map_err(|e| StorageError::Db(e.to_string()))?;
match row {
Some((id, package)) => {
conn.execute("DELETE FROM key_packages WHERE id = ?1", params![id])
.map_err(|e| StorageError::Db(e.to_string()))?;
Ok(Some(package))
}
None => Ok(None),
}
}
fn enqueue(
&self,
recipient_key: &[u8],
channel_id: &[u8],
payload: Vec<u8>,
) -> Result<u64, StorageError> {
let conn = self.lock_conn()?;
// Atomically get-and-increment the per-inbox sequence counter.
// RETURNING gives us the post-update next_seq; the assigned seq is next_seq - 1.
let seq: i64 = conn
.query_row(
"INSERT INTO delivery_seq_counters (recipient_key, channel_id, next_seq)
VALUES (?1, ?2, 1)
ON CONFLICT(recipient_key, channel_id) DO UPDATE SET next_seq = next_seq + 1
RETURNING next_seq - 1",
params![recipient_key, channel_id],
|row| row.get(0),
)
.map_err(|e| StorageError::Db(e.to_string()))?;
conn.execute(
"INSERT INTO deliveries (recipient_key, channel_id, seq, payload) VALUES (?1, ?2, ?3, ?4)",
params![recipient_key, channel_id, seq, payload],
)
.map_err(|e| StorageError::Db(e.to_string()))?;
Ok(seq as u64)
}
fn fetch(
&self,
recipient_key: &[u8],
channel_id: &[u8],
) -> Result<Vec<(u64, Vec<u8>)>, StorageError> {
let conn = self.lock_conn()?;
let mut stmt = conn
.prepare(
"SELECT id, seq, payload FROM deliveries
WHERE recipient_key = ?1 AND channel_id = ?2
ORDER BY seq ASC",
)
.map_err(|e| StorageError::Db(e.to_string()))?;
let rows: Vec<(i64, i64, Vec<u8>)> = stmt
.query_map(params![recipient_key, channel_id], |row| {
Ok((row.get(0)?, row.get(1)?, row.get(2)?))
})
.map_err(|e| StorageError::Db(e.to_string()))?
.collect::<Result<Vec<_>, _>>()
.map_err(|e| StorageError::Db(e.to_string()))?;
if !rows.is_empty() {
let ids: Vec<i64> = rows.iter().map(|(id, _, _)| *id).collect();
let placeholders: String = ids.iter().map(|_| "?").collect::<Vec<_>>().join(",");
let sql = format!("DELETE FROM deliveries WHERE id IN ({placeholders})");
let params: Vec<&dyn rusqlite::types::ToSql> = ids
.iter()
.map(|id| id as &dyn rusqlite::types::ToSql)
.collect();
conn.execute(&sql, params.as_slice())
.map_err(|e| StorageError::Db(e.to_string()))?;
}
Ok(rows.into_iter().map(|(_, seq, payload)| (seq as u64, payload)).collect())
}
fn fetch_limited(
&self,
recipient_key: &[u8],
channel_id: &[u8],
limit: usize,
) -> Result<Vec<(u64, Vec<u8>)>, StorageError> {
let conn = self.lock_conn()?;
let mut stmt = conn
.prepare(
"SELECT id, seq, payload FROM deliveries
WHERE recipient_key = ?1 AND channel_id = ?2
ORDER BY seq ASC
LIMIT ?3",
)
.map_err(|e| StorageError::Db(e.to_string()))?;
let rows: Vec<(i64, i64, Vec<u8>)> = stmt
.query_map(params![recipient_key, channel_id, limit as i64], |row| {
Ok((row.get(0)?, row.get(1)?, row.get(2)?))
})
.map_err(|e| StorageError::Db(e.to_string()))?
.collect::<Result<Vec<_>, _>>()
.map_err(|e| StorageError::Db(e.to_string()))?;
if !rows.is_empty() {
let ids: Vec<i64> = rows.iter().map(|(id, _, _)| *id).collect();
let placeholders: String = ids.iter().map(|_| "?").collect::<Vec<_>>().join(",");
let sql = format!("DELETE FROM deliveries WHERE id IN ({placeholders})");
let params: Vec<&dyn rusqlite::types::ToSql> = ids
.iter()
.map(|id| id as &dyn rusqlite::types::ToSql)
.collect();
conn.execute(&sql, params.as_slice())
.map_err(|e| StorageError::Db(e.to_string()))?;
}
Ok(rows.into_iter().map(|(_, seq, payload)| (seq as u64, payload)).collect())
}
fn queue_depth(&self, recipient_key: &[u8], channel_id: &[u8]) -> Result<usize, StorageError> {
let conn = self.lock_conn()?;
let count: i64 = conn
.query_row(
"SELECT COUNT(*) FROM deliveries WHERE recipient_key = ?1 AND channel_id = ?2",
params![recipient_key, channel_id],
|row| row.get(0),
)
.map_err(|e| StorageError::Db(e.to_string()))?;
Ok(count as usize)
}
fn gc_expired_messages(&self, max_age_secs: u64) -> Result<usize, StorageError> {
let conn = self.lock_conn()?;
let cutoff = std::time::SystemTime::now()
.duration_since(std::time::UNIX_EPOCH)
.unwrap_or_default()
.as_secs()
.saturating_sub(max_age_secs);
let deleted = conn
.execute(
"DELETE FROM deliveries WHERE created_at < ?1",
params![cutoff as i64],
)
.map_err(|e| StorageError::Db(e.to_string()))?;
Ok(deleted)
}
fn upload_hybrid_key(
&self,
identity_key: &[u8],
hybrid_pk: Vec<u8>,
) -> Result<(), StorageError> {
let conn = self.lock_conn()?;
conn.execute(
"INSERT OR REPLACE INTO hybrid_keys (identity_key, hybrid_public_key) VALUES (?1, ?2)",
params![identity_key, hybrid_pk],
)
.map_err(|e| StorageError::Db(e.to_string()))?;
Ok(())
}
fn fetch_hybrid_key(&self, identity_key: &[u8]) -> Result<Option<Vec<u8>>, StorageError> {
let conn = self.lock_conn()?;
let mut stmt = conn
.prepare("SELECT hybrid_public_key FROM hybrid_keys WHERE identity_key = ?1")
.map_err(|e| StorageError::Db(e.to_string()))?;
stmt.query_row(params![identity_key], |row| row.get(0))
.optional()
.map_err(|e| StorageError::Db(e.to_string()))
}
fn store_server_setup(&self, setup: Vec<u8>) -> Result<(), StorageError> {
let conn = self.lock_conn()?;
conn.execute(
"INSERT OR REPLACE INTO server_setup (id, setup_data) VALUES (1, ?1)",
params![setup],
)
.map_err(|e| StorageError::Db(e.to_string()))?;
Ok(())
}
fn get_server_setup(&self) -> Result<Option<Vec<u8>>, StorageError> {
let conn = self.lock_conn()?;
let mut stmt = conn
.prepare("SELECT setup_data FROM server_setup WHERE id = 1")
.map_err(|e| StorageError::Db(e.to_string()))?;
stmt.query_row([], |row| row.get(0))
.optional()
.map_err(|e| StorageError::Db(e.to_string()))
}
fn store_user_record(&self, username: &str, record: Vec<u8>) -> Result<(), StorageError> {
let conn = self.lock_conn()?;
conn.execute(
"INSERT OR REPLACE INTO users (username, opaque_record) VALUES (?1, ?2)",
params![username, record],
)
.map_err(|e| StorageError::Db(e.to_string()))?;
Ok(())
}
fn get_user_record(&self, username: &str) -> Result<Option<Vec<u8>>, StorageError> {
let conn = self.lock_conn()?;
let mut stmt = conn
.prepare("SELECT opaque_record FROM users WHERE username = ?1")
.map_err(|e| StorageError::Db(e.to_string()))?;
stmt.query_row(params![username], |row| row.get(0))
.optional()
.map_err(|e| StorageError::Db(e.to_string()))
}
fn has_user_record(&self, username: &str) -> Result<bool, StorageError> {
let conn = self.lock_conn()?;
let exists: bool = conn
.query_row(
"SELECT EXISTS(SELECT 1 FROM users WHERE username = ?1)",
params![username],
|row| row.get(0),
)
.map_err(|e| StorageError::Db(e.to_string()))?;
Ok(exists)
}
fn store_user_identity_key(
&self,
username: &str,
identity_key: Vec<u8>,
) -> Result<(), StorageError> {
let conn = self.lock_conn()?;
conn.execute(
"INSERT OR REPLACE INTO user_identity_keys (username, identity_key) VALUES (?1, ?2)",
params![username, identity_key],
)
.map_err(|e| StorageError::Db(e.to_string()))?;
Ok(())
}
fn get_user_identity_key(&self, username: &str) -> Result<Option<Vec<u8>>, StorageError> {
let conn = self.lock_conn()?;
let mut stmt = conn
.prepare("SELECT identity_key FROM user_identity_keys WHERE username = ?1")
.map_err(|e| StorageError::Db(e.to_string()))?;
stmt.query_row(params![username], |row| row.get(0))
.optional()
.map_err(|e| StorageError::Db(e.to_string()))
}
fn publish_endpoint(
&self,
identity_key: &[u8],
node_addr: Vec<u8>,
) -> Result<(), StorageError> {
let conn = self.lock_conn()?;
conn.execute(
"INSERT OR REPLACE INTO endpoints (identity_key, node_addr) VALUES (?1, ?2)",
params![identity_key, node_addr],
)
.map_err(|e| StorageError::Db(e.to_string()))?;
Ok(())
}
fn resolve_endpoint(&self, identity_key: &[u8]) -> Result<Option<Vec<u8>>, StorageError> {
let conn = self.lock_conn()?;
let mut stmt = conn
.prepare("SELECT node_addr FROM endpoints WHERE identity_key = ?1")
.map_err(|e| StorageError::Db(e.to_string()))?;
stmt.query_row(params![identity_key], |row| row.get(0))
.optional()
.map_err(|e| StorageError::Db(e.to_string()))
}
}
/// Convenience extension for `rusqlite::OptionalExtension`.
trait OptionalExt<T> {
fn optional(self) -> Result<Option<T>, rusqlite::Error>;
}
impl<T> OptionalExt<T> for Result<T, rusqlite::Error> {
fn optional(self) -> Result<Option<T>, rusqlite::Error> {
match self {
Ok(v) => Ok(Some(v)),
Err(rusqlite::Error::QueryReturnedNoRows) => Ok(None),
Err(e) => Err(e),
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use std::path::PathBuf;
fn open_in_memory() -> SqlStore {
SqlStore::open(":memory:", "").unwrap()
}
#[test]
fn sets_user_version_after_migrate() {
let dir = tempfile::tempdir().expect("tempdir");
let db_path: PathBuf = dir.path().join("store.db");
{
let store = SqlStore::open(&db_path, "").expect("open store");
let _guard = store.lock_conn().unwrap();
}
let conn = rusqlite::Connection::open(&db_path).expect("reopen db");
let version: i32 = conn
.pragma_query_value(None, "user_version", |row| row.get(0))
.expect("read user_version");
assert_eq!(version, SCHEMA_VERSION);
}
#[test]
fn key_package_fifo() {
let store = open_in_memory();
let identity = [1u8; 32];
store
.upload_key_package(&identity, b"kp1".to_vec())
.unwrap();
store
.upload_key_package(&identity, b"kp2".to_vec())
.unwrap();
assert_eq!(
store.fetch_key_package(&identity).unwrap(),
Some(b"kp1".to_vec())
);
assert_eq!(
store.fetch_key_package(&identity).unwrap(),
Some(b"kp2".to_vec())
);
assert_eq!(store.fetch_key_package(&identity).unwrap(), None);
}
#[test]
fn delivery_round_trip() {
let store = open_in_memory();
let rk = [1u8; 32];
let ch = b"channel-1";
let seq0 = store.enqueue(&rk, ch, b"msg1".to_vec()).unwrap();
let seq1 = store.enqueue(&rk, ch, b"msg2".to_vec()).unwrap();
assert_eq!(seq0, 0);
assert_eq!(seq1, 1);
let msgs = store.fetch(&rk, ch).unwrap();
assert_eq!(msgs, vec![(0u64, b"msg1".to_vec()), (1u64, b"msg2".to_vec())]);
assert!(store.fetch(&rk, ch).unwrap().is_empty());
}
#[test]
fn fetch_limited_partial_drain() {
let store = open_in_memory();
let rk = [5u8; 32];
let ch = b"ch";
store.enqueue(&rk, ch, b"a".to_vec()).unwrap();
store.enqueue(&rk, ch, b"b".to_vec()).unwrap();
store.enqueue(&rk, ch, b"c".to_vec()).unwrap();
let msgs = store.fetch_limited(&rk, ch, 2).unwrap();
assert_eq!(msgs, vec![(0u64, b"a".to_vec()), (1u64, b"b".to_vec())]);
let remaining = store.fetch(&rk, ch).unwrap();
assert_eq!(remaining, vec![(2u64, b"c".to_vec())]);
}
#[test]
fn queue_depth_count() {
let store = open_in_memory();
let rk = [6u8; 32];
let ch = b"ch";
assert_eq!(store.queue_depth(&rk, ch).unwrap(), 0);
store.enqueue(&rk, ch, b"x".to_vec()).unwrap();
store.enqueue(&rk, ch, b"y".to_vec()).unwrap();
assert_eq!(store.queue_depth(&rk, ch).unwrap(), 2);
}
#[test]
fn has_user_record_check() {
let store = open_in_memory();
assert!(!store.has_user_record("user1").unwrap());
store
.store_user_record("user1", b"record".to_vec())
.unwrap();
assert!(store.has_user_record("user1").unwrap());
assert!(!store.has_user_record("user2").unwrap());
}
#[test]
fn user_identity_key_round_trip() {
let store = open_in_memory();
assert!(store.get_user_identity_key("user1").unwrap().is_none());
store
.store_user_identity_key("user1", vec![1u8; 32])
.unwrap();
assert_eq!(
store.get_user_identity_key("user1").unwrap(),
Some(vec![1u8; 32])
);
}
#[test]
fn hybrid_key_round_trip() {
let store = open_in_memory();
let ik = [2u8; 32];
let pk = b"hybrid_public_key_data".to_vec();
store.upload_hybrid_key(&ik, pk.clone()).unwrap();
assert_eq!(store.fetch_hybrid_key(&ik).unwrap(), Some(pk));
}
#[test]
fn separate_channels_isolated() {
let store = open_in_memory();
let rk = [4u8; 32];
store.enqueue(&rk, b"ch-a", b"a1".to_vec()).unwrap();
store.enqueue(&rk, b"ch-b", b"b1".to_vec()).unwrap();
let a_msgs = store.fetch(&rk, b"ch-a").unwrap();
assert_eq!(a_msgs, vec![(0u64, b"a1".to_vec())]);
let b_msgs = store.fetch(&rk, b"ch-b").unwrap();
assert_eq!(b_msgs, vec![(0u64, b"b1".to_vec())]);
}
}

View File

@@ -0,0 +1,494 @@
use std::{
collections::{HashMap, VecDeque},
fs,
hash::Hash,
path::{Path, PathBuf},
sync::Mutex,
};
use serde::{Deserialize, Serialize};
#[derive(thiserror::Error, Debug)]
pub enum StorageError {
#[error("io error: {0}")]
Io(String),
#[error("serialization error")]
Serde,
#[error("database error: {0}")]
Db(String),
}
fn lock<T>(m: &Mutex<T>) -> Result<std::sync::MutexGuard<'_, T>, StorageError> {
m.lock()
.map_err(|e| StorageError::Io(format!("lock poisoned: {e}")))
}
// ── Store trait ──────────────────────────────────────────────────────────────
/// Abstraction over storage backends (file-backed, SQLCipher, etc.).
pub trait Store: Send + Sync {
fn upload_key_package(&self, identity_key: &[u8], package: Vec<u8>)
-> Result<(), StorageError>;
fn fetch_key_package(&self, identity_key: &[u8]) -> Result<Option<Vec<u8>>, StorageError>;
/// Enqueue a payload and return the monotonically increasing per-inbox sequence number
/// assigned to this message. Clients sort by seq before MLS processing.
fn enqueue(
&self,
recipient_key: &[u8],
channel_id: &[u8],
payload: Vec<u8>,
) -> Result<u64, StorageError>;
/// Fetch and drain all queued messages, returning `(seq, payload)` pairs ordered by seq.
fn fetch(
&self,
recipient_key: &[u8],
channel_id: &[u8],
) -> Result<Vec<(u64, Vec<u8>)>, StorageError>;
/// Fetch up to `limit` messages without draining the entire queue (Fix 8).
/// Returns `(seq, payload)` pairs ordered by seq.
fn fetch_limited(
&self,
recipient_key: &[u8],
channel_id: &[u8],
limit: usize,
) -> Result<Vec<(u64, Vec<u8>)>, StorageError>;
/// Return the number of queued messages for (recipient, channel) (Fix 7).
fn queue_depth(&self, recipient_key: &[u8], channel_id: &[u8]) -> Result<usize, StorageError>;
/// Delete messages older than `max_age_secs`. Returns count deleted (Fix 7).
fn gc_expired_messages(&self, max_age_secs: u64) -> Result<usize, StorageError>;
fn upload_hybrid_key(
&self,
identity_key: &[u8],
hybrid_pk: Vec<u8>,
) -> Result<(), StorageError>;
fn fetch_hybrid_key(&self, identity_key: &[u8]) -> Result<Option<Vec<u8>>, StorageError>;
/// Store the OPAQUE `ServerSetup` (generated once, loaded on restart).
fn store_server_setup(&self, setup: Vec<u8>) -> Result<(), StorageError>;
/// Load the persisted `ServerSetup`, if any.
fn get_server_setup(&self) -> Result<Option<Vec<u8>>, StorageError>;
/// Store an OPAQUE user record (serialized `ServerRegistration`).
fn store_user_record(&self, username: &str, record: Vec<u8>) -> Result<(), StorageError>;
/// Retrieve an OPAQUE user record by username.
fn get_user_record(&self, username: &str) -> Result<Option<Vec<u8>>, StorageError>;
/// Check if a user record already exists (Fix 5).
fn has_user_record(&self, username: &str) -> Result<bool, StorageError>;
/// Store identity key for a user (Fix 2).
fn store_user_identity_key(
&self,
username: &str,
identity_key: Vec<u8>,
) -> Result<(), StorageError>;
/// Retrieve identity key for a user (Fix 2).
fn get_user_identity_key(&self, username: &str) -> Result<Option<Vec<u8>>, StorageError>;
/// Publish a P2P endpoint address for an identity key.
fn publish_endpoint(&self, identity_key: &[u8], node_addr: Vec<u8>)
-> Result<(), StorageError>;
/// Resolve a peer's P2P endpoint address.
fn resolve_endpoint(&self, identity_key: &[u8]) -> Result<Option<Vec<u8>>, StorageError>;
}
// ── ChannelKey ───────────────────────────────────────────────────────────────
#[derive(Serialize, Deserialize, Clone, Eq, PartialEq, Debug)]
pub struct ChannelKey {
pub channel_id: Vec<u8>,
pub recipient_key: Vec<u8>,
}
impl Hash for ChannelKey {
fn hash<H: std::hash::Hasher>(&self, state: &mut H) {
self.channel_id.hash(state);
self.recipient_key.hash(state);
}
}
// ── FileBackedStore ──────────────────────────────────────────────────────────
#[derive(Serialize, Deserialize, Default)]
struct QueueMapV1 {
map: HashMap<Vec<u8>, VecDeque<Vec<u8>>>,
}
#[derive(Serialize, Deserialize, Default)]
struct QueueMapV2 {
map: HashMap<ChannelKey, VecDeque<Vec<u8>>>,
}
#[derive(Serialize, Deserialize, Default, Clone)]
struct SeqEntry {
seq: u64,
data: Vec<u8>,
}
/// V3 delivery store: each queue entry carries a monotonic per-inbox sequence number.
#[derive(Serialize, Deserialize, Default)]
struct QueueMapV3 {
map: HashMap<ChannelKey, VecDeque<SeqEntry>>,
next_seq: HashMap<ChannelKey, u64>,
}
/// File-backed storage for KeyPackages and delivery queues.
///
/// Each mutation flushes the entire map to disk. Suitable for MVP-scale loads.
pub struct FileBackedStore {
kp_path: PathBuf,
ds_path: PathBuf,
hk_path: PathBuf,
setup_path: PathBuf,
users_path: PathBuf,
identity_keys_path: PathBuf,
key_packages: Mutex<HashMap<Vec<u8>, VecDeque<Vec<u8>>>>,
deliveries: Mutex<QueueMapV3>,
hybrid_keys: Mutex<HashMap<Vec<u8>, Vec<u8>>>,
users: Mutex<HashMap<String, Vec<u8>>>,
identity_keys: Mutex<HashMap<String, Vec<u8>>>,
endpoints: Mutex<HashMap<Vec<u8>, Vec<u8>>>,
}
impl FileBackedStore {
pub fn open(dir: impl AsRef<Path>) -> Result<Self, StorageError> {
let dir = dir.as_ref();
if !dir.exists() {
fs::create_dir_all(dir).map_err(|e| StorageError::Io(e.to_string()))?;
}
let kp_path = dir.join("keypackages.bin");
let ds_path = dir.join("deliveries.bin");
let hk_path = dir.join("hybridkeys.bin");
let setup_path = dir.join("server_setup.bin");
let users_path = dir.join("users.bin");
let identity_keys_path = dir.join("identity_keys.bin");
let key_packages = Mutex::new(Self::load_kp_map(&kp_path)?);
let deliveries = Mutex::new(Self::load_delivery_map_v3(&ds_path)?);
let hybrid_keys = Mutex::new(Self::load_hybrid_keys(&hk_path)?);
let users = Mutex::new(Self::load_users(&users_path)?);
let identity_keys = Mutex::new(Self::load_map_string_bytes(&identity_keys_path)?);
Ok(Self {
kp_path,
ds_path,
hk_path,
setup_path,
users_path,
identity_keys_path,
key_packages,
deliveries,
hybrid_keys,
users,
identity_keys,
endpoints: Mutex::new(HashMap::new()),
})
}
fn load_kp_map(path: &Path) -> Result<HashMap<Vec<u8>, VecDeque<Vec<u8>>>, StorageError> {
if !path.exists() {
return Ok(HashMap::new());
}
let bytes = fs::read(path).map_err(|e| StorageError::Io(e.to_string()))?;
if bytes.is_empty() {
return Ok(HashMap::new());
}
let map: QueueMapV1 = bincode::deserialize(&bytes).map_err(|_| StorageError::Serde)?;
Ok(map.map)
}
fn flush_kp_map(
&self,
path: &Path,
map: &HashMap<Vec<u8>, VecDeque<Vec<u8>>>,
) -> Result<(), StorageError> {
let payload = QueueMapV1 { map: map.clone() };
let bytes = bincode::serialize(&payload).map_err(|_| StorageError::Serde)?;
if let Some(parent) = path.parent() {
fs::create_dir_all(parent).map_err(|e| StorageError::Io(e.to_string()))?;
}
fs::write(path, bytes).map_err(|e| StorageError::Io(e.to_string()))
}
/// Load deliveries as V3. Falls back to V2 format (assigns seqs starting at 0).
fn load_delivery_map_v3(path: &Path) -> Result<QueueMapV3, StorageError> {
if !path.exists() {
return Ok(QueueMapV3::default());
}
let bytes = fs::read(path).map_err(|e| StorageError::Io(e.to_string()))?;
if bytes.is_empty() {
return Ok(QueueMapV3::default());
}
// Try V3 first.
if let Ok(v3) = bincode::deserialize::<QueueMapV3>(&bytes) {
return Ok(v3);
}
// Fall back to V2: assign ascending seqs starting at 0 per channel.
let v2 = bincode::deserialize::<QueueMapV2>(&bytes)
.map_err(|_| StorageError::Io("deliveries file: unrecognised format".into()))?;
let mut v3 = QueueMapV3::default();
for (key, queue) in v2.map {
let entries: VecDeque<SeqEntry> = queue
.into_iter()
.enumerate()
.map(|(i, data)| SeqEntry { seq: i as u64, data })
.collect();
let next = entries.len() as u64;
v3.next_seq.insert(key.clone(), next);
v3.map.insert(key, entries);
}
Ok(v3)
}
fn flush_delivery_map(&self, path: &Path, map: &QueueMapV3) -> Result<(), StorageError> {
let bytes = bincode::serialize(map).map_err(|_| StorageError::Serde)?;
if let Some(parent) = path.parent() {
fs::create_dir_all(parent).map_err(|e| StorageError::Io(e.to_string()))?;
}
fs::write(path, bytes).map_err(|e| StorageError::Io(e.to_string()))
}
fn load_hybrid_keys(path: &Path) -> Result<HashMap<Vec<u8>, Vec<u8>>, StorageError> {
if !path.exists() {
return Ok(HashMap::new());
}
let bytes = fs::read(path).map_err(|e| StorageError::Io(e.to_string()))?;
if bytes.is_empty() {
return Ok(HashMap::new());
}
bincode::deserialize(&bytes).map_err(|_| StorageError::Serde)
}
fn flush_hybrid_keys(
&self,
path: &Path,
map: &HashMap<Vec<u8>, Vec<u8>>,
) -> Result<(), StorageError> {
let bytes = bincode::serialize(map).map_err(|_| StorageError::Serde)?;
if let Some(parent) = path.parent() {
fs::create_dir_all(parent).map_err(|e| StorageError::Io(e.to_string()))?;
}
fs::write(path, bytes).map_err(|e| StorageError::Io(e.to_string()))
}
fn load_users(path: &Path) -> Result<HashMap<String, Vec<u8>>, StorageError> {
if !path.exists() {
return Ok(HashMap::new());
}
let bytes = fs::read(path).map_err(|e| StorageError::Io(e.to_string()))?;
if bytes.is_empty() {
return Ok(HashMap::new());
}
bincode::deserialize(&bytes).map_err(|_| StorageError::Serde)
}
fn flush_users(&self, path: &Path, map: &HashMap<String, Vec<u8>>) -> Result<(), StorageError> {
let bytes = bincode::serialize(map).map_err(|_| StorageError::Serde)?;
if let Some(parent) = path.parent() {
fs::create_dir_all(parent).map_err(|e| StorageError::Io(e.to_string()))?;
}
fs::write(path, bytes).map_err(|e| StorageError::Io(e.to_string()))
}
fn load_map_string_bytes(path: &Path) -> Result<HashMap<String, Vec<u8>>, StorageError> {
Self::load_users(path)
}
fn flush_map_string_bytes(
&self,
path: &Path,
map: &HashMap<String, Vec<u8>>,
) -> Result<(), StorageError> {
self.flush_users(path, map)
}
}
impl Store for FileBackedStore {
fn upload_key_package(
&self,
identity_key: &[u8],
package: Vec<u8>,
) -> Result<(), StorageError> {
let mut map = lock(&self.key_packages)?;
map.entry(identity_key.to_vec())
.or_default()
.push_back(package);
self.flush_kp_map(&self.kp_path, &*map)
}
fn fetch_key_package(&self, identity_key: &[u8]) -> Result<Option<Vec<u8>>, StorageError> {
let mut map = lock(&self.key_packages)?;
let package = map.get_mut(identity_key).and_then(|q| q.pop_front());
self.flush_kp_map(&self.kp_path, &*map)?;
Ok(package)
}
fn enqueue(
&self,
recipient_key: &[u8],
channel_id: &[u8],
payload: Vec<u8>,
) -> Result<u64, StorageError> {
let mut inner = lock(&self.deliveries)?;
let key = ChannelKey {
channel_id: channel_id.to_vec(),
recipient_key: recipient_key.to_vec(),
};
let seq = {
let entry = inner.next_seq.entry(key.clone()).or_insert(0);
let s = *entry;
*entry = s + 1;
s
};
inner.map.entry(key).or_default().push_back(SeqEntry { seq, data: payload });
self.flush_delivery_map(&self.ds_path, &*inner)?;
Ok(seq)
}
fn fetch(
&self,
recipient_key: &[u8],
channel_id: &[u8],
) -> Result<Vec<(u64, Vec<u8>)>, StorageError> {
let mut inner = lock(&self.deliveries)?;
let key = ChannelKey {
channel_id: channel_id.to_vec(),
recipient_key: recipient_key.to_vec(),
};
let messages: Vec<(u64, Vec<u8>)> = inner
.map
.get_mut(&key)
.map(|q| q.drain(..).map(|e| (e.seq, e.data)).collect())
.unwrap_or_default();
self.flush_delivery_map(&self.ds_path, &*inner)?;
Ok(messages)
}
fn fetch_limited(
&self,
recipient_key: &[u8],
channel_id: &[u8],
limit: usize,
) -> Result<Vec<(u64, Vec<u8>)>, StorageError> {
let mut inner = lock(&self.deliveries)?;
let key = ChannelKey {
channel_id: channel_id.to_vec(),
recipient_key: recipient_key.to_vec(),
};
let messages: Vec<(u64, Vec<u8>)> = inner
.map
.get_mut(&key)
.map(|q| {
let count = limit.min(q.len());
q.drain(..count).map(|e| (e.seq, e.data)).collect()
})
.unwrap_or_default();
self.flush_delivery_map(&self.ds_path, &*inner)?;
Ok(messages)
}
fn queue_depth(&self, recipient_key: &[u8], channel_id: &[u8]) -> Result<usize, StorageError> {
let inner = lock(&self.deliveries)?;
let key = ChannelKey {
channel_id: channel_id.to_vec(),
recipient_key: recipient_key.to_vec(),
};
Ok(inner.map.get(&key).map(|q| q.len()).unwrap_or(0))
}
fn gc_expired_messages(&self, _max_age_secs: u64) -> Result<usize, StorageError> {
// FileBackedStore does not track timestamps per message — no-op.
Ok(0)
}
fn upload_hybrid_key(
&self,
identity_key: &[u8],
hybrid_pk: Vec<u8>,
) -> Result<(), StorageError> {
let mut map = lock(&self.hybrid_keys)?;
map.insert(identity_key.to_vec(), hybrid_pk);
self.flush_hybrid_keys(&self.hk_path, &*map)
}
fn fetch_hybrid_key(&self, identity_key: &[u8]) -> Result<Option<Vec<u8>>, StorageError> {
let map = lock(&self.hybrid_keys)?;
Ok(map.get(identity_key).cloned())
}
fn store_server_setup(&self, setup: Vec<u8>) -> Result<(), StorageError> {
if let Some(parent) = self.setup_path.parent() {
fs::create_dir_all(parent).map_err(|e| StorageError::Io(e.to_string()))?;
}
fs::write(&self.setup_path, setup).map_err(|e| StorageError::Io(e.to_string()))
}
fn get_server_setup(&self) -> Result<Option<Vec<u8>>, StorageError> {
if !self.setup_path.exists() {
return Ok(None);
}
let bytes = fs::read(&self.setup_path).map_err(|e| StorageError::Io(e.to_string()))?;
if bytes.is_empty() {
return Ok(None);
}
Ok(Some(bytes))
}
fn store_user_record(&self, username: &str, record: Vec<u8>) -> Result<(), StorageError> {
let mut map = lock(&self.users)?;
map.insert(username.to_string(), record);
self.flush_users(&self.users_path, &*map)
}
fn get_user_record(&self, username: &str) -> Result<Option<Vec<u8>>, StorageError> {
let map = lock(&self.users)?;
Ok(map.get(username).cloned())
}
fn has_user_record(&self, username: &str) -> Result<bool, StorageError> {
let map = lock(&self.users)?;
Ok(map.contains_key(username))
}
fn store_user_identity_key(
&self,
username: &str,
identity_key: Vec<u8>,
) -> Result<(), StorageError> {
let mut map = lock(&self.identity_keys)?;
map.insert(username.to_string(), identity_key);
self.flush_map_string_bytes(&self.identity_keys_path, &*map)
}
fn get_user_identity_key(&self, username: &str) -> Result<Option<Vec<u8>>, StorageError> {
let map = lock(&self.identity_keys)?;
Ok(map.get(username).cloned())
}
fn publish_endpoint(
&self,
identity_key: &[u8],
node_addr: Vec<u8>,
) -> Result<(), StorageError> {
let mut map = lock(&self.endpoints)?;
map.insert(identity_key.to_vec(), node_addr);
Ok(())
}
fn resolve_endpoint(&self, identity_key: &[u8]) -> Result<Option<Vec<u8>>, StorageError> {
let map = lock(&self.endpoints)?;
Ok(map.get(identity_key).cloned())
}
}

View File

@@ -28,9 +28,6 @@ pub fn build_server_config(
let cert_bytes = std::fs::read(cert_path).context("read cert")?;
let key_bytes = std::fs::read(key_path).context("read key")?;
// Validate certificate expiry and warn about self-signed certs.
validate_certificate(&cert_bytes)?;
let cert_chain = vec![CertificateDer::from(cert_bytes)];
let key = PrivateKeyDer::try_from(key_bytes).map_err(|_| anyhow::anyhow!("invalid key"))?;
@@ -64,12 +61,6 @@ fn generate_self_signed_cert(cert_path: &PathBuf, key_path: &PathBuf) -> anyhow:
std::fs::write(cert_path, issued.cert.der()).context("write cert")?;
std::fs::write(key_path, &key_der).context("write key")?;
#[cfg(unix)]
{
use std::os::unix::fs::PermissionsExt;
let perms = std::fs::Permissions::from_mode(0o600);
std::fs::set_permissions(key_path, perms).context("set key permissions")?;
}
tracing::info!(
cert = %cert_path.display(),
@@ -79,39 +70,3 @@ fn generate_self_signed_cert(cert_path: &PathBuf, key_path: &PathBuf) -> anyhow:
Ok(())
}
/// Validate a DER-encoded X.509 certificate: bail if expired, warn if expiring
/// soon or self-signed.
fn validate_certificate(der_bytes: &[u8]) -> anyhow::Result<()> {
use x509_parser::prelude::*;
let (_, cert) = X509Certificate::from_der(der_bytes)
.map_err(|e| anyhow::anyhow!("failed to parse X.509 certificate: {e}"))?;
let validity = cert.validity();
let now = ASN1Time::now();
if !validity.is_valid_at(now) {
anyhow::bail!(
"TLS certificate expired (not_after: {})",
validity.not_after
);
}
// Warn if expiring within 30 days.
let thirty_days = std::time::Duration::from_secs(30 * 24 * 60 * 60);
let cutoff = now.timestamp() + thirty_days.as_secs() as i64;
if validity.not_after.timestamp() < cutoff {
tracing::warn!(
not_after = %validity.not_after,
"TLS certificate expires within 30 days"
);
}
// Warn if self-signed (issuer == subject).
if cert.issuer() == cert.subject() {
tracing::warn!("TLS certificate is self-signed (issuer == subject)");
}
Ok(())
}

View File

@@ -1,104 +0,0 @@
[package]
name = "quicprochat-client"
version = "0.1.0"
edition.workspace = true
description = "CLI client for quicprochat."
license = "Apache-2.0 OR MIT"
repository.workspace = true
[[bin]]
name = "qpc"
path = "src/main.rs"
[dependencies]
quicprochat-core = { path = "../quicprochat-core" }
quicprochat-proto = { path = "../quicprochat-proto" }
quicprochat-kt = { path = "../quicprochat-kt" }
openmls_rust_crypto = { workspace = true }
# Serialisation + RPC
capnp = { workspace = true }
capnp-rpc = { workspace = true }
# Async
tokio = { workspace = true }
tokio-util = { workspace = true }
futures = { workspace = true }
serde = { workspace = true }
serde_json = { workspace = true }
bincode = { workspace = true }
# Crypto — OPAQUE PAKE
opaque-ke = { workspace = true }
rand = { workspace = true }
# Error handling
anyhow = { workspace = true }
thiserror = { workspace = true }
# Crypto — for fingerprint verification in fetch-key subcommand
sha2 = { workspace = true }
argon2 = { workspace = true }
chacha20poly1305 = { workspace = true }
ciborium = { workspace = true }
zeroize = { workspace = true }
quinn = { workspace = true }
quinn-proto = { workspace = true }
rustls = { workspace = true }
# Logging
tracing = { workspace = true }
tracing-subscriber = { workspace = true }
# CLI + config
clap = { workspace = true }
toml = { workspace = true }
# Local message/conversation storage
rusqlite = { workspace = true }
# Hex encoding/decoding
hex = { workspace = true }
# Secure password prompting (no echo)
rpassword = "5"
# mDNS discovery for mesh mode (Freifunk). Only compiled with --features mesh.
mdns-sd = { version = "0.12", optional = true }
# Optional P2P transport for direct node-to-node messaging.
quicprochat-p2p = { path = "../quicprochat-p2p", optional = true }
# Optional TUI dependencies (Ratatui full-screen interface).
ratatui = { version = "0.29", optional = true, default-features = false, features = ["crossterm"] }
crossterm = { version = "0.28", optional = true }
# YAML playbook parsing (only compiled with --features playbook).
serde_yaml = { version = "0.9", optional = true }
# v2 SDK-based CLI (thin shell over quicprochat-sdk).
quicprochat-sdk = { path = "../quicprochat-sdk", optional = true }
quicprochat-rpc = { path = "../quicprochat-rpc", optional = true }
rustyline = { workspace = true, optional = true }
[lints]
workspace = true
[features]
# Enable mesh-mode features: mDNS local peer discovery + P2P transport.
# Build: cargo build -p quicprochat-client --features mesh
mesh = ["dep:mdns-sd", "dep:quicprochat-p2p"]
# Enable full-screen Ratatui TUI: cargo build -p quicprochat-client --features tui
tui = ["dep:ratatui", "dep:crossterm"]
# Enable playbook (scripted command execution): YAML parser + serde derives.
# Build: cargo build -p quicprochat-client --features playbook
playbook = ["dep:serde_yaml"]
# v2 CLI over SDK: cargo build -p quicprochat-client --features v2
v2 = ["dep:quicprochat-sdk", "dep:quicprochat-rpc", "dep:rustyline"]
[dev-dependencies]
dashmap = { workspace = true }
assert_cmd = "2"
tempfile = "3"
portpicker = "0.1"
rand = "0.8"

View File

@@ -1,516 +0,0 @@
//! Command engine: typed command enum, registry, and execution bridge.
//!
//! Maps every REPL slash command and lifecycle operation into a single `Command`
//! enum with typed parameters. `CommandRegistry` parses raw input and delegates
//! execution to the existing `cmd_*` handlers in `repl.rs`.
use std::collections::HashMap;
use quicprochat_proto::node_capnp::node_service;
use super::repl::{Input, SlashCommand, parse_input};
use super::session::SessionState;
// ── Comparison operator for assert conditions ────────────────────────────────
/// Comparison operator used in playbook assertions.
#[derive(Debug, Clone, PartialEq, Eq)]
#[cfg_attr(feature = "playbook", derive(serde::Serialize, serde::Deserialize))]
pub enum CmpOp {
Eq,
Ne,
Gt,
Lt,
Gte,
Lte,
}
impl CmpOp {
/// Evaluate this comparison: `lhs <op> rhs`.
pub fn eval(&self, lhs: usize, rhs: usize) -> bool {
match self {
CmpOp::Eq => lhs == rhs,
CmpOp::Ne => lhs != rhs,
CmpOp::Gt => lhs > rhs,
CmpOp::Lt => lhs < rhs,
CmpOp::Gte => lhs >= rhs,
CmpOp::Lte => lhs <= rhs,
}
}
}
// ── Assert conditions for playbook testing ───────────────────────────────────
/// Conditions that can be asserted in a playbook step.
#[derive(Debug, Clone)]
#[cfg_attr(feature = "playbook", derive(serde::Serialize, serde::Deserialize))]
pub enum AssertCondition {
Connected,
LoggedIn,
InConversation { name: String },
MessageCount { op: CmpOp, count: usize },
MemberCount { op: CmpOp, count: usize },
Custom { expression: String },
}
// ── Command enum ─────────────────────────────────────────────────────────────
/// Every operation the client can perform, with typed parameters.
///
/// This is a superset of `SlashCommand` — it adds lifecycle operations
/// (`Connect`, `Login`, `Register`, `SendMessage`, `Wait`, `Assert`, `SetVar`)
/// that are needed for non-interactive / playbook execution.
#[derive(Debug, Clone)]
#[cfg_attr(feature = "playbook", derive(serde::Serialize, serde::Deserialize))]
pub enum Command {
// ── Lifecycle (not in SlashCommand) ──────────────────────────────────
Connect {
server: String,
ca_cert: Option<String>,
insecure: bool,
},
Login {
username: String,
password: String,
},
Register {
username: String,
password: String,
},
SendMessage {
text: String,
},
Wait {
duration_ms: u64,
},
Assert {
condition: AssertCondition,
},
SetVar {
name: String,
value: String,
},
// ── SlashCommand mirror ─────────────────────────────────────────────
Help,
Quit,
Whoami,
List,
Switch { target: String },
Dm { username: String },
CreateGroup { name: String },
Invite { target: String },
Remove { target: String },
Leave,
Join,
Members,
GroupInfo,
Rename { name: String },
History { count: usize },
// Mesh
MeshStart,
MeshStop,
MeshPeers,
MeshServer { addr: String },
MeshSend { peer_id: String, message: String },
MeshBroadcast { topic: String, message: String },
MeshSubscribe { topic: String },
MeshRoute,
MeshIdentity,
MeshStore,
// Security / crypto
Verify { username: String },
UpdateKey,
Typing,
TypingNotify { enabled: bool },
React { emoji: String, index: Option<usize> },
Edit { index: usize, new_text: String },
Delete { index: usize },
SendFile { path: String },
Download { index: usize },
DeleteAccount,
Disappear { arg: Option<String> },
Privacy { arg: Option<String> },
VerifyFs,
RotateAllKeys,
Devices,
RegisterDevice { name: String },
RevokeDevice { id_prefix: String },
}
impl Command {
/// Convert a `Command` to a `SlashCommand` when possible.
///
/// Returns `None` for lifecycle commands that have no `SlashCommand`
/// equivalent (`Connect`, `Login`, `Register`, `SendMessage`, `Wait`,
/// `Assert`, `SetVar`).
pub(crate) fn to_slash(&self) -> Option<SlashCommand> {
match self.clone() {
// Lifecycle — no SlashCommand equivalent
Command::Connect { .. }
| Command::Login { .. }
| Command::Register { .. }
| Command::SendMessage { .. }
| Command::Wait { .. }
| Command::Assert { .. }
| Command::SetVar { .. } => None,
// 1:1 mirror
Command::Help => Some(SlashCommand::Help),
Command::Quit => Some(SlashCommand::Quit),
Command::Whoami => Some(SlashCommand::Whoami),
Command::List => Some(SlashCommand::List),
Command::Switch { target } => Some(SlashCommand::Switch { target }),
Command::Dm { username } => Some(SlashCommand::Dm { username }),
Command::CreateGroup { name } => Some(SlashCommand::CreateGroup { name }),
Command::Invite { target } => Some(SlashCommand::Invite { target }),
Command::Remove { target } => Some(SlashCommand::Remove { target }),
Command::Leave => Some(SlashCommand::Leave),
Command::Join => Some(SlashCommand::Join),
Command::Members => Some(SlashCommand::Members),
Command::GroupInfo => Some(SlashCommand::GroupInfo),
Command::Rename { name } => Some(SlashCommand::Rename { name }),
Command::History { count } => Some(SlashCommand::History { count }),
Command::MeshStart => Some(SlashCommand::MeshStart),
Command::MeshStop => Some(SlashCommand::MeshStop),
Command::MeshPeers => Some(SlashCommand::MeshPeers),
Command::MeshServer { addr } => Some(SlashCommand::MeshServer { addr }),
Command::MeshSend { peer_id, message } => {
Some(SlashCommand::MeshSend { peer_id, message })
}
Command::MeshBroadcast { topic, message } => {
Some(SlashCommand::MeshBroadcast { topic, message })
}
Command::MeshSubscribe { topic } => Some(SlashCommand::MeshSubscribe { topic }),
Command::MeshRoute => Some(SlashCommand::MeshRoute),
Command::MeshIdentity => Some(SlashCommand::MeshIdentity),
Command::MeshStore => Some(SlashCommand::MeshStore),
Command::Verify { username } => Some(SlashCommand::Verify { username }),
Command::UpdateKey => Some(SlashCommand::UpdateKey),
Command::Typing => Some(SlashCommand::Typing),
Command::TypingNotify { enabled } => Some(SlashCommand::TypingNotify { enabled }),
Command::React { emoji, index } => Some(SlashCommand::React { emoji, index }),
Command::Edit { index, new_text } => Some(SlashCommand::Edit { index, new_text }),
Command::Delete { index } => Some(SlashCommand::Delete { index }),
Command::SendFile { path } => Some(SlashCommand::SendFile { path }),
Command::Download { index } => Some(SlashCommand::Download { index }),
Command::DeleteAccount => Some(SlashCommand::DeleteAccount),
Command::Disappear { arg } => Some(SlashCommand::Disappear { arg }),
Command::Privacy { arg } => Some(SlashCommand::Privacy { arg }),
Command::VerifyFs => Some(SlashCommand::VerifyFs),
Command::RotateAllKeys => Some(SlashCommand::RotateAllKeys),
Command::Devices => Some(SlashCommand::Devices),
Command::RegisterDevice { name } => Some(SlashCommand::RegisterDevice { name }),
Command::RevokeDevice { id_prefix } => {
Some(SlashCommand::RevokeDevice { id_prefix })
}
}
}
}
// ── CommandResult ────────────────────────────────────────────────────────────
/// Outcome of executing a single `Command`.
#[derive(Debug, Clone)]
#[cfg_attr(feature = "playbook", derive(serde::Serialize, serde::Deserialize))]
pub struct CommandResult {
pub success: bool,
pub output: Option<String>,
pub error: Option<String>,
/// Structured key-value outputs for variable capture in playbooks.
pub data: HashMap<String, String>,
}
impl CommandResult {
fn ok() -> Self {
Self {
success: true,
output: None,
error: None,
data: HashMap::new(),
}
}
fn err(msg: String) -> Self {
Self {
success: false,
output: None,
error: Some(msg),
data: HashMap::new(),
}
}
}
// ── CommandRegistry ──────────────────────────────────────────────────────────
/// Parses raw input into `Command` and delegates execution to the existing
/// REPL handlers.
pub struct CommandRegistry;
impl CommandRegistry {
/// Parse a raw input line into a `Command`.
///
/// Returns `None` for empty input. Returns `Some(Command::SendMessage)`
/// for plain chat text. Slash commands are parsed via the existing
/// `parse_input` function.
pub fn parse(line: &str) -> Option<Command> {
match parse_input(line) {
Input::Empty => None,
Input::ChatMessage(text) => Some(Command::SendMessage { text }),
Input::Slash(sc) => Some(slash_to_command(sc)),
}
}
/// Execute a `Command`, delegating slash commands to the existing
/// `handle_slash` dispatch and handling lifecycle commands directly.
///
/// Currently, output from `cmd_*` handlers goes to stdout (unchanged).
/// `CommandResult` captures success/failure status; stdout capture can
/// be added later.
pub async fn execute(
cmd: &Command,
session: &mut SessionState,
client: &node_service::Client,
) -> CommandResult {
match cmd {
Command::Wait { duration_ms } => {
tokio::time::sleep(std::time::Duration::from_millis(*duration_ms)).await;
CommandResult::ok()
}
Command::SetVar { name, value } => {
let mut result = CommandResult::ok();
result.data.insert(name.clone(), value.clone());
result
}
Command::Assert { condition } => execute_assert(condition, session),
Command::Connect { .. } | Command::Login { .. } | Command::Register { .. } => {
// These lifecycle commands require external context (endpoint,
// OPAQUE state) that lives outside SessionState. The playbook
// executor will handle them directly; calling execute() for
// them is an error.
CommandResult::err(
"lifecycle commands (connect/login/register) must be handled by the playbook executor".into(),
)
}
Command::SendMessage { text } => {
match super::repl::do_send(session, client, text).await {
Ok(()) => CommandResult::ok(),
Err(e) => CommandResult::err(format!("{e:#}")),
}
}
Command::Quit => CommandResult::ok(),
other => {
// All remaining variants have a SlashCommand equivalent.
if let Some(sc) = other.to_slash() {
match execute_slash(session, client, sc).await {
Ok(()) => CommandResult::ok(),
Err(e) => CommandResult::err(format!("{e:#}")),
}
} else {
CommandResult::err("command has no slash equivalent".into())
}
}
}
}
}
// ── Conversion helpers ──────────────────────────────────────────────────────
/// Convert a `SlashCommand` into the corresponding `Command`.
fn slash_to_command(sc: SlashCommand) -> Command {
match sc {
SlashCommand::Help => Command::Help,
SlashCommand::Quit => Command::Quit,
SlashCommand::Whoami => Command::Whoami,
SlashCommand::List => Command::List,
SlashCommand::Switch { target } => Command::Switch { target },
SlashCommand::Dm { username } => Command::Dm { username },
SlashCommand::CreateGroup { name } => Command::CreateGroup { name },
SlashCommand::Invite { target } => Command::Invite { target },
SlashCommand::Remove { target } => Command::Remove { target },
SlashCommand::Leave => Command::Leave,
SlashCommand::Join => Command::Join,
SlashCommand::Members => Command::Members,
SlashCommand::GroupInfo => Command::GroupInfo,
SlashCommand::Rename { name } => Command::Rename { name },
SlashCommand::History { count } => Command::History { count },
SlashCommand::MeshStart => Command::MeshStart,
SlashCommand::MeshStop => Command::MeshStop,
SlashCommand::MeshPeers => Command::MeshPeers,
SlashCommand::MeshServer { addr } => Command::MeshServer { addr },
SlashCommand::MeshSend { peer_id, message } => Command::MeshSend { peer_id, message },
SlashCommand::MeshBroadcast { topic, message } => {
Command::MeshBroadcast { topic, message }
}
SlashCommand::MeshSubscribe { topic } => Command::MeshSubscribe { topic },
SlashCommand::MeshRoute => Command::MeshRoute,
SlashCommand::MeshIdentity => Command::MeshIdentity,
SlashCommand::MeshStore => Command::MeshStore,
SlashCommand::Verify { username } => Command::Verify { username },
SlashCommand::UpdateKey => Command::UpdateKey,
SlashCommand::Typing => Command::Typing,
SlashCommand::TypingNotify { enabled } => Command::TypingNotify { enabled },
SlashCommand::React { emoji, index } => Command::React { emoji, index },
SlashCommand::Edit { index, new_text } => Command::Edit { index, new_text },
SlashCommand::Delete { index } => Command::Delete { index },
SlashCommand::SendFile { path } => Command::SendFile { path },
SlashCommand::Download { index } => Command::Download { index },
SlashCommand::DeleteAccount => Command::DeleteAccount,
SlashCommand::Disappear { arg } => Command::Disappear { arg },
SlashCommand::Privacy { arg } => Command::Privacy { arg },
SlashCommand::VerifyFs => Command::VerifyFs,
SlashCommand::RotateAllKeys => Command::RotateAllKeys,
SlashCommand::Devices => Command::Devices,
SlashCommand::RegisterDevice { name } => Command::RegisterDevice { name },
SlashCommand::RevokeDevice { id_prefix } => Command::RevokeDevice { id_prefix },
}
}
// ── Execution helpers ───────────────────────────────────────────────────────
/// Execute a `SlashCommand` using the existing `cmd_*` handlers from `repl.rs`.
///
/// This duplicates the dispatch table from `handle_slash` but returns
/// `anyhow::Result<()>` instead of printing errors inline — the caller
/// decides how to surface errors.
async fn execute_slash(
session: &mut SessionState,
client: &node_service::Client,
cmd: SlashCommand,
) -> anyhow::Result<()> {
use super::repl::*;
match cmd {
SlashCommand::Help => {
print_help();
Ok(())
}
SlashCommand::Quit => Ok(()),
SlashCommand::Whoami => cmd_whoami(session),
SlashCommand::List => cmd_list(session),
SlashCommand::Switch { target } => cmd_switch(session, &target),
SlashCommand::Dm { username } => cmd_dm(session, client, &username).await,
SlashCommand::CreateGroup { name } => cmd_create_group(session, &name),
SlashCommand::Invite { target } => cmd_invite(session, client, &target).await,
SlashCommand::Remove { target } => cmd_remove(session, client, &target).await,
SlashCommand::Leave => cmd_leave(session, client).await,
SlashCommand::Join => cmd_join(session, client).await,
SlashCommand::Members => cmd_members(session, client).await,
SlashCommand::GroupInfo => cmd_group_info(session, client).await,
SlashCommand::Rename { name } => cmd_rename(session, &name),
SlashCommand::History { count } => cmd_history(session, count),
SlashCommand::MeshStart => cmd_mesh_start(session).await,
SlashCommand::MeshStop => cmd_mesh_stop(session).await,
SlashCommand::MeshPeers => cmd_mesh_peers(),
SlashCommand::MeshServer { addr } => {
super::display::print_status(&format!(
"mesh server hint: reconnect with --server {addr} to use this node"
));
Ok(())
}
SlashCommand::MeshSend { peer_id, message } => cmd_mesh_send(session, &peer_id, &message).await,
SlashCommand::MeshBroadcast { topic, message } => cmd_mesh_broadcast(session, &topic, &message).await,
SlashCommand::MeshSubscribe { topic } => cmd_mesh_subscribe(session, &topic),
SlashCommand::MeshRoute => cmd_mesh_route(session),
SlashCommand::MeshIdentity => cmd_mesh_identity(session),
SlashCommand::MeshStore => cmd_mesh_store(session),
SlashCommand::Verify { username } => cmd_verify(session, client, &username).await,
SlashCommand::UpdateKey => cmd_update_key(session, client).await,
SlashCommand::Typing => cmd_typing(session, client).await,
SlashCommand::TypingNotify { enabled } => {
session.typing_notify_enabled = enabled;
super::display::print_status(&format!(
"typing notifications {}",
if enabled { "enabled" } else { "disabled" }
));
Ok(())
}
SlashCommand::React { emoji, index } => cmd_react(session, client, &emoji, index).await,
SlashCommand::Edit { index, new_text } => {
cmd_edit(session, client, index, &new_text).await
}
SlashCommand::Delete { index } => cmd_delete(session, client, index).await,
SlashCommand::SendFile { path } => cmd_send_file(session, client, &path).await,
SlashCommand::Download { index } => cmd_download(session, client, index).await,
SlashCommand::DeleteAccount => cmd_delete_account(session, client).await,
SlashCommand::Disappear { arg } => cmd_disappear(session, arg.as_deref()),
SlashCommand::Privacy { arg } => cmd_privacy(session, arg.as_deref()),
SlashCommand::VerifyFs => cmd_verify_fs(session),
SlashCommand::RotateAllKeys => cmd_rotate_all_keys(session, client).await,
SlashCommand::Devices => cmd_devices(client).await,
SlashCommand::RegisterDevice { name } => cmd_register_device(client, &name).await,
SlashCommand::RevokeDevice { id_prefix } => cmd_revoke_device(client, &id_prefix).await,
}
}
/// Assert a condition against the current session state.
fn execute_assert(condition: &AssertCondition, session: &SessionState) -> CommandResult {
match condition {
AssertCondition::Connected => {
// We have a session => we got past connect. Always true when
// execute() is called with a valid client reference.
CommandResult::ok()
}
AssertCondition::LoggedIn => {
let guard = crate::AUTH_CONTEXT
.read()
.expect("AUTH_CONTEXT poisoned");
if guard.is_some() {
CommandResult::ok()
} else {
CommandResult::err("not logged in".into())
}
}
AssertCondition::InConversation { name } => {
if let Some(display) = session.active_display_name() {
if display.contains(name.as_str()) {
CommandResult::ok()
} else {
CommandResult::err(format!(
"active conversation is '{display}', expected '{name}'"
))
}
} else {
CommandResult::err("no active conversation".into())
}
}
AssertCondition::MessageCount { op, count } => {
let actual = session
.active_conversation
.as_ref()
.and_then(|id| session.conv_store.load_all_messages(id).ok())
.map(|msgs| msgs.len())
.unwrap_or(0);
if op.eval(actual, *count) {
CommandResult::ok()
} else {
CommandResult::err(format!(
"message count assertion failed: {actual} {op:?} {count}"
))
}
}
AssertCondition::MemberCount { op, count } => {
let actual = session
.active_conversation
.as_ref()
.and_then(|id| session.members.get(id))
.map(|m| m.member_identities().len())
.unwrap_or(0);
if op.eval(actual, *count) {
CommandResult::ok()
} else {
CommandResult::err(format!(
"member count assertion failed: {actual} {op:?} {count}"
))
}
}
AssertCondition::Custom { expression } => {
// Custom expressions are not evaluated yet; always pass.
let mut result = CommandResult::ok();
result.data.insert("expression".into(), expression.clone());
result
}
}
}

View File

@@ -1,798 +0,0 @@
//! Multi-conversation state backed by SQLite (SQLCipher-encrypted when a
//! password is provided).
//!
//! Each conversation (DM or group) has its own MLS group blob, keystore blob,
//! member list, and message history.
use std::path::{Path, PathBuf};
use std::time::{SystemTime, UNIX_EPOCH};
use anyhow::Context;
use argon2::{Algorithm, Argon2, Params, Version};
use rand::RngCore;
use rusqlite::{params, Connection, OptionalExtension};
use zeroize::Zeroizing;
// ── Types ────────────────────────────────────────────────────────────────────
/// 16-byte conversation identifier.
/// - DMs: the channel_id returned by `createChannel` (server-assigned UUID).
/// - Groups: SHA-256(group_name)[..16].
#[derive(Clone, Debug, PartialEq, Eq, Hash)]
pub struct ConversationId(pub [u8; 16]);
impl ConversationId {
pub fn from_slice(s: &[u8]) -> Option<Self> {
if s.len() == 16 {
let mut buf = [0u8; 16];
buf.copy_from_slice(s);
Some(Self(buf))
} else {
None
}
}
/// Derive a conversation ID from a group name via SHA-256 truncation.
pub fn from_group_name(name: &str) -> Self {
use sha2::{Sha256, Digest};
let hash = Sha256::digest(name.as_bytes());
let mut buf = [0u8; 16];
buf.copy_from_slice(&hash[..16]);
Self(buf)
}
pub fn hex(&self) -> String {
hex::encode(self.0)
}
}
#[derive(Clone, Debug, PartialEq, Eq)]
pub enum ConversationKind {
/// 1:1 DM channel with a specific peer.
Dm {
peer_key: Vec<u8>,
peer_username: Option<String>,
},
/// Named group with N members.
Group { name: String },
}
#[derive(Clone, Debug)]
pub struct Conversation {
pub id: ConversationId,
pub kind: ConversationKind,
pub display_name: String,
/// Serialized MLS group (bincode).
pub mls_group_blob: Option<Vec<u8>>,
/// Serialized keystore (bincode HashMap).
pub keystore_blob: Option<Vec<u8>>,
/// Member identity keys (bincode Vec<Vec<u8>>).
pub member_keys: Vec<Vec<u8>>,
pub unread_count: u32,
pub last_activity_ms: u64,
pub created_at_ms: u64,
/// Whether this conversation uses hybrid (X25519 + ML-KEM-768) MLS keys.
pub is_hybrid: bool,
/// Highest server-side delivery sequence number seen.
pub last_seen_seq: u64,
}
#[derive(Clone, Debug)]
pub struct StoredMessage {
pub conversation_id: ConversationId,
pub message_id: Option<[u8; 16]>,
pub sender_key: Vec<u8>,
pub sender_name: Option<String>,
pub body: String,
pub msg_type: String,
pub ref_msg_id: Option<[u8; 16]>,
pub timestamp_ms: u64,
pub is_outgoing: bool,
}
// ── Key derivation (Argon2id, matching state.rs parameters) ─────────────────
const ARGON2_M_COST: u32 = 19 * 1024;
const ARGON2_T_COST: u32 = 2;
const ARGON2_P_COST: u32 = 1;
const SALT_LEN: usize = 16;
/// Derive a 32-byte SQLCipher key from the user password and a random salt.
fn derive_convdb_key(password: &str, salt: &[u8]) -> anyhow::Result<Zeroizing<[u8; 32]>> {
let params = Params::new(ARGON2_M_COST, ARGON2_T_COST, ARGON2_P_COST, Some(32))
.map_err(|e| anyhow::anyhow!("argon2 params: {e}"))?;
let argon2 = Argon2::new(Algorithm::Argon2id, Version::default(), params);
let mut key = Zeroizing::new([0u8; 32]);
argon2
.hash_password_into(password.as_bytes(), salt, &mut *key)
.map_err(|e| anyhow::anyhow!("convdb key derivation: {e}"))?;
Ok(key)
}
/// Read or create a 16-byte random salt at `salt_path` (mode 0o600).
fn get_or_create_salt(salt_path: &Path) -> anyhow::Result<Vec<u8>> {
if salt_path.exists() {
let bytes = std::fs::read(salt_path).context("read convdb salt")?;
anyhow::ensure!(bytes.len() == SALT_LEN, "invalid convdb salt length");
return Ok(bytes);
}
let mut salt = vec![0u8; SALT_LEN];
rand::rngs::OsRng.fill_bytes(&mut salt);
std::fs::write(salt_path, &salt).context("write convdb salt")?;
#[cfg(unix)]
{
use std::os::unix::fs::PermissionsExt;
std::fs::set_permissions(salt_path, std::fs::Permissions::from_mode(0o600)).ok();
}
Ok(salt)
}
// ── ConversationStore ────────────────────────────────────────────────────────
pub struct ConversationStore {
conn: Connection,
}
impl ConversationStore {
/// Open or create the conversation database at `db_path`.
/// If `password` is `Some`, the database is encrypted with SQLCipher using
/// an Argon2id-derived key. Existing unencrypted databases are migrated
/// transparently.
pub fn open(db_path: &Path, password: Option<&str>) -> anyhow::Result<Self> {
if let Some(parent) = db_path.parent() {
std::fs::create_dir_all(parent).ok();
}
match password {
Some(pw) => Self::open_encrypted(db_path, pw),
None => Self::open_plain(db_path),
}
}
fn open_plain(db_path: &Path) -> anyhow::Result<Self> {
let conn = Connection::open(db_path).context("open conversation db")?;
conn.execute_batch("PRAGMA journal_mode=WAL; PRAGMA foreign_keys=ON;")
.context("set pragmas")?;
Self::migrate(&conn)?;
Ok(Self { conn })
}
fn open_encrypted(db_path: &Path, password: &str) -> anyhow::Result<Self> {
let salt_path = PathBuf::from(format!("{}-salt", db_path.display()));
let already_encrypted = salt_path.exists();
// Migrate an existing unencrypted database before opening with encryption.
if db_path.exists() && !already_encrypted {
Self::migrate_plain_to_encrypted(db_path, &salt_path, password)?;
// After migration, salt file exists and DB is encrypted — fall through.
}
let salt = get_or_create_salt(&salt_path)?;
let key = derive_convdb_key(password, &salt)?;
#[allow(clippy::needless_borrows_for_generic_args)]
let hex_key = Zeroizing::new(hex::encode(&*key));
let conn = Connection::open(db_path).context("open conversation db")?;
conn.pragma_update(None, "key", format!("x'{}'", &*hex_key))
.context("set SQLCipher key")?;
conn.execute_batch("PRAGMA journal_mode=WAL; PRAGMA foreign_keys=ON;")
.context("set pragmas")?;
Self::migrate(&conn)?;
Ok(Self { conn })
}
/// Migrate an unencrypted `.convdb` to an encrypted one in-place.
fn migrate_plain_to_encrypted(
db_path: &Path,
salt_path: &Path,
password: &str,
) -> anyhow::Result<()> {
let salt = get_or_create_salt(salt_path)?;
let key = derive_convdb_key(password, &salt)?;
#[allow(clippy::needless_borrows_for_generic_args)]
let hex_key = Zeroizing::new(hex::encode(&*key));
let enc_path = db_path.with_extension("convdb-enc");
// Open the existing plaintext database.
let plain = Connection::open(db_path).context("open plain convdb for migration")?;
plain.execute_batch("PRAGMA journal_mode=WAL; PRAGMA foreign_keys=ON;").ok();
// Attach a new encrypted database and export into it.
// Sanitize the path to prevent SQL injection (ATTACH does not support parameterized paths).
let enc_path_str = enc_path.display().to_string();
anyhow::ensure!(
!enc_path_str.contains('\''),
"database path must not contain single quotes: {enc_path_str}"
);
plain
.execute_batch(&format!(
"ATTACH DATABASE '{enc_path_str}' AS encrypted KEY \"x'{}'\";",
&*hex_key
))
.context("attach encrypted db for migration")?;
plain
.execute_batch("SELECT sqlcipher_export('encrypted');")
.context("sqlcipher_export to encrypted db")?;
plain
.execute_batch("DETACH DATABASE encrypted;")
.context("detach encrypted db")?;
drop(plain);
// Swap files: encrypted → original.
std::fs::rename(&enc_path, db_path).context("replace convdb with encrypted version")?;
// Clean up WAL/SHM left from the plaintext open.
let wal = PathBuf::from(format!("{}-wal", db_path.display()));
let shm = PathBuf::from(format!("{}-shm", db_path.display()));
std::fs::remove_file(&wal).ok();
std::fs::remove_file(&shm).ok();
tracing::info!("migrated conversation database to encrypted storage");
Ok(())
}
fn migrate(conn: &Connection) -> anyhow::Result<()> {
conn.execute_batch(
"CREATE TABLE IF NOT EXISTS conversations (
id BLOB PRIMARY KEY,
kind TEXT NOT NULL,
display_name TEXT NOT NULL,
peer_key BLOB,
peer_username TEXT,
group_name TEXT,
mls_group_blob BLOB,
keystore_blob BLOB,
member_keys BLOB,
unread_count INTEGER NOT NULL DEFAULT 0,
last_activity_ms INTEGER NOT NULL DEFAULT 0,
created_at_ms INTEGER NOT NULL DEFAULT 0
);
CREATE TABLE IF NOT EXISTS messages (
id INTEGER PRIMARY KEY AUTOINCREMENT,
conversation_id BLOB NOT NULL REFERENCES conversations(id),
message_id BLOB,
sender_key BLOB NOT NULL,
sender_name TEXT,
body TEXT NOT NULL,
msg_type TEXT NOT NULL,
ref_msg_id BLOB,
timestamp_ms INTEGER NOT NULL,
is_outgoing INTEGER NOT NULL DEFAULT 0
);
CREATE INDEX IF NOT EXISTS idx_messages_conv
ON messages(conversation_id, timestamp_ms);
CREATE TABLE IF NOT EXISTS outbox (
id INTEGER PRIMARY KEY AUTOINCREMENT,
conversation_id BLOB NOT NULL,
recipient_key BLOB NOT NULL,
payload BLOB NOT NULL,
created_at_ms INTEGER NOT NULL,
retry_count INTEGER NOT NULL DEFAULT 0,
status TEXT NOT NULL DEFAULT 'pending'
);
CREATE INDEX IF NOT EXISTS idx_outbox_status
ON outbox(status, created_at_ms);",
)
.context("migrate conversation db")?;
// Additive migrations for new columns (safe to re-run; errors ignored if column already exists).
conn.execute_batch("ALTER TABLE conversations ADD COLUMN is_hybrid INTEGER NOT NULL DEFAULT 0;").ok();
conn.execute_batch("ALTER TABLE conversations ADD COLUMN last_seen_seq INTEGER NOT NULL DEFAULT 0;").ok();
Ok(())
}
// ── Conversation CRUD ────────────────────────────────────────────────
pub fn save_conversation(&self, conv: &Conversation) -> anyhow::Result<()> {
let (kind_str, peer_key, peer_username, group_name) = match &conv.kind {
ConversationKind::Dm {
peer_key,
peer_username,
} => ("dm", Some(peer_key.as_slice()), peer_username.as_deref(), None),
ConversationKind::Group { name } => ("group", None, None, Some(name.as_str())),
};
let member_keys_blob = bincode::serialize(&conv.member_keys)
.context("serialize member_keys")?;
self.conn.execute(
"INSERT INTO conversations
(id, kind, display_name, peer_key, peer_username, group_name,
mls_group_blob, keystore_blob, member_keys, unread_count,
last_activity_ms, created_at_ms, is_hybrid, last_seen_seq)
VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7, ?8, ?9, ?10, ?11, ?12, ?13, ?14)
ON CONFLICT(id) DO UPDATE SET
display_name = excluded.display_name,
mls_group_blob = excluded.mls_group_blob,
keystore_blob = excluded.keystore_blob,
member_keys = excluded.member_keys,
unread_count = excluded.unread_count,
last_activity_ms = excluded.last_activity_ms,
is_hybrid = excluded.is_hybrid,
last_seen_seq = excluded.last_seen_seq",
params![
conv.id.0.as_slice(),
kind_str,
conv.display_name,
peer_key,
peer_username,
group_name,
conv.mls_group_blob,
conv.keystore_blob,
member_keys_blob,
conv.unread_count,
conv.last_activity_ms,
conv.created_at_ms,
conv.is_hybrid as i32,
conv.last_seen_seq as i64,
],
)?;
Ok(())
}
pub fn load_conversation(&self, id: &ConversationId) -> anyhow::Result<Option<Conversation>> {
self.conn
.query_row(
"SELECT kind, display_name, peer_key, peer_username, group_name,
mls_group_blob, keystore_blob, member_keys, unread_count,
last_activity_ms, created_at_ms, is_hybrid, last_seen_seq
FROM conversations WHERE id = ?1",
params![id.0.as_slice()],
|row| {
let kind_str: String = row.get(0)?;
let display_name: String = row.get(1)?;
let peer_key: Option<Vec<u8>> = row.get(2)?;
let peer_username: Option<String> = row.get(3)?;
let group_name: Option<String> = row.get(4)?;
let mls_group_blob: Option<Vec<u8>> = row.get(5)?;
let keystore_blob: Option<Vec<u8>> = row.get(6)?;
let member_keys_blob: Option<Vec<u8>> = row.get(7)?;
let unread_count: u32 = row.get(8)?;
let last_activity_ms: u64 = row.get(9)?;
let created_at_ms: u64 = row.get(10)?;
let is_hybrid_int: i32 = row.get(11)?;
let last_seen_seq: i64 = row.get(12)?;
let kind = if kind_str == "dm" {
ConversationKind::Dm {
peer_key: peer_key.unwrap_or_default(),
peer_username,
}
} else {
ConversationKind::Group {
name: group_name.unwrap_or_default(),
}
};
let member_keys: Vec<Vec<u8>> = member_keys_blob
.and_then(|b| match bincode::deserialize(&b) {
Ok(v) => Some(v),
Err(e) => {
tracing::warn!(conv = %hex::encode(id.0), "bincode deserialize member_keys failed: {e}");
None
}
})
.unwrap_or_default();
Ok(Conversation {
id: id.clone(),
kind,
display_name,
mls_group_blob,
keystore_blob,
member_keys,
unread_count,
last_activity_ms,
created_at_ms,
is_hybrid: is_hybrid_int != 0,
last_seen_seq: last_seen_seq as u64,
})
},
)
.optional()
.context("load conversation")
}
pub fn list_conversations(&self) -> anyhow::Result<Vec<Conversation>> {
let mut stmt = self.conn.prepare(
"SELECT id, kind, display_name, peer_key, peer_username, group_name,
mls_group_blob, keystore_blob, member_keys, unread_count,
last_activity_ms, created_at_ms, is_hybrid, last_seen_seq
FROM conversations ORDER BY last_activity_ms DESC",
)?;
let rows = stmt.query_map([], |row| {
let id_blob: Vec<u8> = row.get(0)?;
let kind_str: String = row.get(1)?;
let display_name: String = row.get(2)?;
let peer_key: Option<Vec<u8>> = row.get(3)?;
let peer_username: Option<String> = row.get(4)?;
let group_name: Option<String> = row.get(5)?;
let mls_group_blob: Option<Vec<u8>> = row.get(6)?;
let keystore_blob: Option<Vec<u8>> = row.get(7)?;
let member_keys_blob: Option<Vec<u8>> = row.get(8)?;
let unread_count: u32 = row.get(9)?;
let last_activity_ms: u64 = row.get(10)?;
let created_at_ms: u64 = row.get(11)?;
let is_hybrid_int: i32 = row.get(12)?;
let last_seen_seq: i64 = row.get(13)?;
let id = ConversationId::from_slice(&id_blob).unwrap_or(ConversationId([0; 16]));
let kind = if kind_str == "dm" {
ConversationKind::Dm {
peer_key: peer_key.unwrap_or_default(),
peer_username,
}
} else {
ConversationKind::Group {
name: group_name.unwrap_or_default(),
}
};
let member_keys: Vec<Vec<u8>> = member_keys_blob
.and_then(|b| match bincode::deserialize(&b) {
Ok(v) => Some(v),
Err(e) => {
tracing::warn!(conv = %hex::encode(&id_blob), "bincode deserialize member_keys failed: {e}");
None
}
})
.unwrap_or_default();
Ok(Conversation {
id,
kind,
display_name,
mls_group_blob,
keystore_blob,
member_keys,
unread_count,
last_activity_ms,
created_at_ms,
is_hybrid: is_hybrid_int != 0,
last_seen_seq: last_seen_seq as u64,
})
})?;
let mut convs = Vec::new();
for row in rows {
convs.push(row?);
}
Ok(convs)
}
/// Find a DM conversation by the peer's identity key.
pub fn find_dm_by_peer(&self, peer_key: &[u8]) -> anyhow::Result<Option<Conversation>> {
let id_blob: Option<Vec<u8>> = self
.conn
.query_row(
"SELECT id FROM conversations WHERE kind = 'dm' AND peer_key = ?1",
params![peer_key],
|row| row.get(0),
)
.optional()?;
match id_blob {
Some(blob) => {
let id = ConversationId::from_slice(&blob)
.context("invalid conversation id in db")?;
self.load_conversation(&id)
}
None => Ok(None),
}
}
/// Find a group conversation by name.
pub fn find_group_by_name(&self, name: &str) -> anyhow::Result<Option<Conversation>> {
let id_blob: Option<Vec<u8>> = self
.conn
.query_row(
"SELECT id FROM conversations WHERE kind = 'group' AND group_name = ?1",
params![name],
|row| row.get(0),
)
.optional()?;
match id_blob {
Some(blob) => {
let id = ConversationId::from_slice(&blob)
.context("invalid conversation id in db")?;
self.load_conversation(&id)
}
None => Ok(None),
}
}
pub fn increment_unread(&self, id: &ConversationId) -> anyhow::Result<()> {
self.conn.execute(
"UPDATE conversations SET unread_count = unread_count + 1 WHERE id = ?1",
params![id.0.as_slice()],
)?;
Ok(())
}
pub fn reset_unread(&self, id: &ConversationId) -> anyhow::Result<()> {
self.conn.execute(
"UPDATE conversations SET unread_count = 0 WHERE id = ?1",
params![id.0.as_slice()],
)?;
Ok(())
}
pub fn update_activity(&self, id: &ConversationId, ts_ms: u64) -> anyhow::Result<()> {
self.conn.execute(
"UPDATE conversations SET last_activity_ms = ?2 WHERE id = ?1 AND last_activity_ms < ?2",
params![id.0.as_slice(), ts_ms],
)?;
Ok(())
}
// ── Message CRUD ─────────────────────────────────────────────────────
pub fn save_message(&self, msg: &StoredMessage) -> anyhow::Result<()> {
self.conn.execute(
"INSERT INTO messages
(conversation_id, message_id, sender_key, sender_name, body,
msg_type, ref_msg_id, timestamp_ms, is_outgoing)
VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7, ?8, ?9)",
params![
msg.conversation_id.0.as_slice(),
msg.message_id.as_ref().map(|id| id.as_slice()),
msg.sender_key,
msg.sender_name,
msg.body,
msg.msg_type,
msg.ref_msg_id.as_ref().map(|id| id.as_slice()),
msg.timestamp_ms,
msg.is_outgoing as i32,
],
)?;
Ok(())
}
pub fn load_recent_messages(
&self,
conv_id: &ConversationId,
limit: usize,
) -> anyhow::Result<Vec<StoredMessage>> {
let mut stmt = self.conn.prepare(
"SELECT message_id, sender_key, sender_name, body, msg_type,
ref_msg_id, timestamp_ms, is_outgoing
FROM messages
WHERE conversation_id = ?1
ORDER BY timestamp_ms DESC
LIMIT ?2",
)?;
let rows = stmt.query_map(params![conv_id.0.as_slice(), limit.min(u32::MAX as usize) as u32], |row| {
let message_id: Option<Vec<u8>> = row.get(0)?;
let sender_key: Vec<u8> = row.get(1)?;
let sender_name: Option<String> = row.get(2)?;
let body: String = row.get(3)?;
let msg_type: String = row.get(4)?;
let ref_msg_id: Option<Vec<u8>> = row.get(5)?;
let timestamp_ms: u64 = row.get(6)?;
let is_outgoing: i32 = row.get(7)?;
fn to_16(v: &[u8]) -> Option<[u8; 16]> {
if v.len() == 16 {
let mut buf = [0u8; 16];
buf.copy_from_slice(v);
Some(buf)
} else {
None
}
}
Ok(StoredMessage {
conversation_id: conv_id.clone(),
message_id: message_id.as_deref().and_then(to_16),
sender_key,
sender_name,
body,
msg_type,
ref_msg_id: ref_msg_id.as_deref().and_then(to_16),
timestamp_ms,
is_outgoing: is_outgoing != 0,
})
})?;
let mut msgs = Vec::new();
for row in rows {
msgs.push(row?);
}
// Reverse so oldest first
msgs.reverse();
Ok(msgs)
}
/// Load all messages for a conversation, oldest first (no limit).
pub fn load_all_messages(&self, conv_id: &ConversationId) -> anyhow::Result<Vec<StoredMessage>> {
let mut stmt = self.conn.prepare(
"SELECT message_id, sender_key, sender_name, body, msg_type,
ref_msg_id, timestamp_ms, is_outgoing
FROM messages
WHERE conversation_id = ?1
ORDER BY timestamp_ms ASC, id ASC",
)?;
let rows = stmt.query_map(params![conv_id.0.as_slice()], |row| {
let message_id: Option<Vec<u8>> = row.get(0)?;
let sender_key: Vec<u8> = row.get(1)?;
let sender_name: Option<String> = row.get(2)?;
let body: String = row.get(3)?;
let msg_type: String = row.get(4)?;
let ref_msg_id: Option<Vec<u8>> = row.get(5)?;
let timestamp_ms: u64 = row.get(6)?;
let is_outgoing: i32 = row.get(7)?;
fn to_16(v: &[u8]) -> Option<[u8; 16]> {
if v.len() == 16 {
let mut buf = [0u8; 16];
buf.copy_from_slice(v);
Some(buf)
} else {
None
}
}
Ok(StoredMessage {
conversation_id: conv_id.clone(),
message_id: message_id.as_deref().and_then(to_16),
sender_key,
sender_name,
body,
msg_type,
ref_msg_id: ref_msg_id.as_deref().and_then(to_16),
timestamp_ms,
is_outgoing: is_outgoing != 0,
})
})?;
let mut msgs = Vec::new();
for row in rows {
msgs.push(row?);
}
Ok(msgs)
}
/// Update the body of an existing message (for edits).
pub fn update_message_body(
&self,
conv_id: &ConversationId,
message_id: &[u8; 16],
new_body: &str,
) -> anyhow::Result<bool> {
let rows = self.conn.execute(
"UPDATE messages SET body = ?3 WHERE conversation_id = ?1 AND message_id = ?2",
params![conv_id.0.as_slice(), message_id.as_slice(), new_body],
)?;
Ok(rows > 0)
}
/// Mark a message as deleted (sets body to "[deleted]" and msg_type to "deleted").
pub fn delete_message(
&self,
conv_id: &ConversationId,
message_id: &[u8; 16],
) -> anyhow::Result<bool> {
let rows = self.conn.execute(
"UPDATE messages SET body = '[deleted]', msg_type = 'deleted' WHERE conversation_id = ?1 AND message_id = ?2",
params![conv_id.0.as_slice(), message_id.as_slice()],
)?;
Ok(rows > 0)
}
/// Save a message, deduplicating by message_id within the same conversation.
/// Returns `true` if the message was saved (new), `false` if it was a duplicate.
pub fn save_message_dedup(&self, msg: &StoredMessage) -> anyhow::Result<bool> {
if let Some(ref mid) = msg.message_id {
let exists: bool = self.conn.query_row(
"SELECT EXISTS(SELECT 1 FROM messages WHERE message_id = ?1 AND conversation_id = ?2)",
params![mid.as_slice(), msg.conversation_id.0.as_slice()],
|row| row.get(0),
)?;
if exists {
return Ok(false);
}
}
self.save_message(msg)?;
Ok(true)
}
// ── Sequence tracking ──────────────────────────────────────────────
pub fn update_last_seen_seq(&self, id: &ConversationId, seq: u64) -> anyhow::Result<()> {
self.conn.execute(
"UPDATE conversations SET last_seen_seq = ?2 WHERE id = ?1 AND last_seen_seq < ?2",
params![id.0.as_slice(), seq as i64],
)?;
Ok(())
}
// ── Outbox (offline queue) ────────────────────────────────────────
pub fn enqueue_outbox(
&self,
conv_id: &ConversationId,
recipient_key: &[u8],
payload: &[u8],
) -> anyhow::Result<()> {
self.conn.execute(
"INSERT INTO outbox (conversation_id, recipient_key, payload, created_at_ms)
VALUES (?1, ?2, ?3, ?4)",
params![conv_id.0.as_slice(), recipient_key, payload, now_ms() as i64],
)?;
Ok(())
}
pub fn load_pending_outbox(&self) -> anyhow::Result<Vec<OutboxEntry>> {
let mut stmt = self.conn.prepare(
"SELECT id, conversation_id, recipient_key, payload, retry_count
FROM outbox WHERE status = 'pending' ORDER BY created_at_ms",
)?;
let rows = stmt.query_map([], |row| {
let id: i64 = row.get(0)?;
let conv_blob: Vec<u8> = row.get(1)?;
let recipient_key: Vec<u8> = row.get(2)?;
let payload: Vec<u8> = row.get(3)?;
let retry_count: u32 = row.get(4)?;
Ok(OutboxEntry {
id,
conversation_id: ConversationId::from_slice(&conv_blob)
.unwrap_or(ConversationId([0; 16])),
recipient_key,
payload,
retry_count,
})
})?;
let mut entries = Vec::new();
for row in rows {
entries.push(row?);
}
Ok(entries)
}
pub fn mark_outbox_sent(&self, id: i64) -> anyhow::Result<()> {
self.conn.execute(
"UPDATE outbox SET status = 'sent' WHERE id = ?1",
params![id],
)?;
Ok(())
}
pub fn mark_outbox_failed(&self, id: i64, retry_count: u32) -> anyhow::Result<()> {
let new_status = if retry_count > 5 { "failed" } else { "pending" };
self.conn.execute(
"UPDATE outbox SET retry_count = ?2, status = ?3 WHERE id = ?1",
params![id, retry_count, new_status],
)?;
Ok(())
}
/// Delete messages older than `cutoff_ms` (epoch milliseconds) across all conversations.
pub fn delete_messages_before(&self, cutoff_ms: u64) -> anyhow::Result<usize> {
let rows = self.conn.execute(
"DELETE FROM messages WHERE timestamp_ms < ?1",
params![cutoff_ms as i64],
)?;
Ok(rows)
}
}
/// An entry in the offline outbox queue.
#[derive(Clone, Debug)]
pub struct OutboxEntry {
pub id: i64,
pub conversation_id: ConversationId,
pub recipient_key: Vec<u8>,
pub payload: Vec<u8>,
pub retry_count: u32,
}
pub fn now_ms() -> u64 {
SystemTime::now()
.duration_since(UNIX_EPOCH)
.unwrap_or_default()
.as_millis() as u64
}

View File

@@ -1,82 +0,0 @@
//! Terminal display helpers for the REPL.
use super::conversation::StoredMessage;
use super::session::SessionState;
// ANSI color codes
const RESET: &str = "\x1b[0m";
const BOLD: &str = "\x1b[1m";
const DIM: &str = "\x1b[2m";
const GREEN: &str = "\x1b[32m";
const CYAN: &str = "\x1b[36m";
const YELLOW: &str = "\x1b[33m";
/// Print the REPL prompt showing the active conversation and unread count.
pub fn print_prompt(session: &SessionState) {
use std::io::Write;
let name = session
.active_display_name()
.unwrap_or_else(|| "no conversation".into());
let unread = session.total_unread();
if unread > 0 {
print!("{DIM}[{RESET}{BOLD}{name}{RESET} {YELLOW}{unread} unread{RESET}{DIM}]{RESET} > ");
} else {
print!("{DIM}[{RESET}{BOLD}{name}{RESET}{DIM}]{RESET} > ");
}
let _ = std::io::stdout().flush();
}
/// Print an incoming or outgoing message.
pub fn print_message(msg: &StoredMessage) {
let body = if msg.msg_type == "reaction" {
format!("reacted {}", msg.body)
} else {
msg.body.clone()
};
if msg.is_outgoing {
println!("\r{GREEN}> {body}{RESET}");
} else {
let fallback = hex::encode(&msg.sender_key[..4]);
let sender = msg.sender_name.as_deref().unwrap_or(&fallback);
println!("\r{CYAN}{BOLD}[{sender}]{RESET} {body}");
}
}
/// Print a message received in real-time (clears current line first).
pub fn print_incoming(sender: &str, body: &str) {
use std::io::Write;
// Clear current line, print message, then re-show prompt context
print!("\r\x1b[2K");
println!("{CYAN}{BOLD}[{sender}]{RESET} {body}");
let _ = std::io::stdout().flush();
}
/// Print a system/status message.
pub fn print_status(msg: &str) {
println!("{DIM} {msg}{RESET}");
}
/// Print a transient typing indicator (clears current line first).
pub fn print_typing(sender: &str) {
use std::io::Write;
print!("\r\x1b[2K");
println!("{DIM} {sender} is typing...{RESET}");
let _ = std::io::stdout().flush();
}
/// Print an error message.
pub fn print_error(msg: &str) {
println!("{YELLOW} error: {msg}{RESET}");
}
/// Format a conversation list entry for `/list`.
pub fn format_conv_line(display_name: &str, kind: &str, unread: u32, members: usize) -> String {
let unread_str = if unread > 0 {
format!(" {YELLOW}({unread} new){RESET}")
} else {
String::new()
};
format!(
" {BOLD}{display_name}{RESET} {DIM}[{kind}, {members} members]{RESET}{unread_str}"
)
}

View File

@@ -1,7 +0,0 @@
pub fn encode(bytes: impl AsRef<[u8]>) -> String {
hex::encode(bytes)
}
pub fn decode(s: &str) -> Result<Vec<u8>, &'static str> {
hex::decode(s).map_err(|_| "invalid hex string")
}

View File

@@ -1,148 +0,0 @@
//! mDNS-based peer discovery for Freifunk / community mesh deployments.
//!
//! Browse for `_quicprochat._udp.local.` services on the local network and
//! surface them as [`DiscoveredPeer`] structs. Servers announce themselves
//! automatically on startup; this module lets clients find them without manual
//! configuration.
//!
//! # Usage
//!
//! ```no_run
//! use quicprochat_client::client::mesh_discovery::MeshDiscovery;
//!
//! let disc = MeshDiscovery::start()?;
//! // Give mDNS time to collect announcements before reading.
//! std::thread::sleep(std::time::Duration::from_secs(2));
//! for peer in disc.peers() {
//! println!("found: {} at {}", peer.domain, peer.server_addr);
//! }
//! # Ok::<(), quicprochat_client::client::mesh_discovery::MeshDiscoveryError>(())
//! ```
#[cfg(feature = "mesh")]
use mdns_sd::{ServiceDaemon, ServiceEvent};
use std::net::SocketAddr;
#[cfg(feature = "mesh")]
use std::sync::{Arc, Mutex};
#[cfg(feature = "mesh")]
use std::collections::HashMap;
/// A qpc server discovered on the local network via mDNS.
#[derive(Debug, Clone)]
pub struct DiscoveredPeer {
/// Federation domain of the remote server (e.g. `"node1.freifunk.net"`).
pub domain: String,
/// QUIC RPC address to connect to.
pub server_addr: SocketAddr,
}
/// A running mDNS browse session.
///
/// Starts immediately on construction; drop to stop browsing.
pub struct MeshDiscovery {
#[cfg(feature = "mesh")]
_daemon: ServiceDaemon,
#[cfg(feature = "mesh")]
peers: Arc<Mutex<HashMap<String, DiscoveredPeer>>>,
}
#[derive(thiserror::Error, Debug)]
pub enum MeshDiscoveryError {
#[error("mDNS daemon failed to start: {0}")]
DaemonError(String),
#[error("mDNS browse failed: {0}")]
BrowseError(String),
#[error("mesh feature not compiled (rebuild with --features mesh)")]
FeatureDisabled,
}
impl MeshDiscovery {
/// Start browsing for `_quicprochat._udp.local.` services.
///
/// Returns immediately; peers are collected in the background.
/// Returns [`MeshDiscoveryError::FeatureDisabled`] when built without the
/// `mesh` feature.
pub fn start() -> Result<Self, MeshDiscoveryError> {
#[cfg(feature = "mesh")]
{
Self::start_inner()
}
#[cfg(not(feature = "mesh"))]
{
Err(MeshDiscoveryError::FeatureDisabled)
}
}
#[cfg(feature = "mesh")]
fn start_inner() -> Result<Self, MeshDiscoveryError> {
let daemon = ServiceDaemon::new()
.map_err(|e| MeshDiscoveryError::DaemonError(e.to_string()))?;
let receiver = daemon
.browse("_quicprochat._udp.local.")
.map_err(|e| MeshDiscoveryError::BrowseError(e.to_string()))?;
let peers: Arc<Mutex<HashMap<String, DiscoveredPeer>>> =
Arc::new(Mutex::new(HashMap::new()));
let peers_bg = Arc::clone(&peers);
// Process mDNS events in a background thread (ServiceDaemon is sync).
std::thread::spawn(move || {
for event in receiver {
match event {
ServiceEvent::ServiceResolved(info) => {
// Extract the qpc server address from TXT records.
let server_addr_str = info
.get_property_val_str("server")
.map(|s| s.to_string());
let domain = info
.get_property_val_str("domain")
.map(|s| s.to_string())
.unwrap_or_else(|| info.get_fullname().to_string());
if let Some(addr_str) = server_addr_str {
if let Ok(addr) = addr_str.parse::<SocketAddr>() {
let peer = DiscoveredPeer {
domain: domain.clone(),
server_addr: addr,
};
if let Ok(mut map) = peers_bg.lock() {
map.insert(domain, peer);
}
}
}
}
ServiceEvent::ServiceRemoved(_ty, fullname) => {
if let Ok(mut map) = peers_bg.lock() {
map.retain(|_, p| {
!fullname.contains(&p.domain)
});
}
}
// Other events (SearchStarted, SearchStopped) are informational.
_ => {}
}
}
});
Ok(Self {
_daemon: daemon,
peers,
})
}
/// Return a snapshot of all peers discovered so far.
pub fn peers(&self) -> Vec<DiscoveredPeer> {
#[cfg(feature = "mesh")]
{
self.peers
.lock()
.map(|m| m.values().cloned().collect())
.unwrap_or_default()
}
#[cfg(not(feature = "mesh"))]
{
vec![]
}
}
}

View File

@@ -1,868 +0,0 @@
//! YAML playbook parser and executor.
//!
//! Playbooks describe a sequence of client commands in YAML format.
//! They support variable substitution, assertions, loops, and per-step
//! error handling policies.
//!
//! ```yaml
//! name: "smoke test"
//! steps:
//! - command: dm
//! args: { username: "bob" }
//! - command: send
//! args: { text: "Hello from playbook" }
//! - command: assert
//! condition: message_count
//! op: gte
//! value: 1
//! ```
//!
//! Requires the `playbook` cargo feature.
use std::collections::HashMap;
use std::path::Path;
use std::time::{Duration, Instant};
use anyhow::{Context, bail};
use quicprochat_proto::node_capnp::node_service;
use serde::{Deserialize, Serialize};
use super::command_engine::{AssertCondition, CmpOp, Command, CommandRegistry};
use super::session::SessionState;
// ── Playbook structs ────────────────────────────────────────────────────────
/// A parsed YAML playbook.
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct Playbook {
pub name: String,
#[serde(default)]
pub description: Option<String>,
#[serde(default)]
pub variables: HashMap<String, String>,
pub steps: Vec<PlaybookStep>,
}
/// A single step in a playbook.
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct PlaybookStep {
pub command: String,
#[serde(default)]
pub args: HashMap<String, serde_yaml::Value>,
/// For assert steps: the condition name.
#[serde(default)]
pub condition: Option<String>,
/// For assert steps: comparison operator.
#[serde(default)]
pub op: Option<String>,
/// For assert steps: expected value.
#[serde(default)]
pub value: Option<serde_yaml::Value>,
/// Capture the command output into this variable name.
#[serde(default)]
pub capture: Option<String>,
/// Error handling policy for this step.
#[serde(default)]
pub on_error: OnError,
/// Optional loop specification.
#[serde(rename = "loop", default)]
pub loop_spec: Option<LoopSpec>,
}
/// What to do when a step fails.
#[derive(Debug, Clone, Serialize, Deserialize, Default, PartialEq)]
#[serde(rename_all = "lowercase")]
pub enum OnError {
#[default]
Fail,
Skip,
Continue,
}
/// Loop specification for repeating a step.
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct LoopSpec {
pub var: String,
pub from: usize,
pub to: usize,
}
// ── Report structs ──────────────────────────────────────────────────────────
/// Summary of a playbook execution.
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct PlaybookReport {
pub name: String,
pub total_steps: usize,
pub passed: usize,
pub failed: usize,
pub skipped: usize,
pub duration: Duration,
pub step_results: Vec<StepResult>,
}
impl PlaybookReport {
/// True if all steps passed (no failures).
pub fn all_passed(&self) -> bool {
self.failed == 0
}
}
impl std::fmt::Display for PlaybookReport {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
writeln!(f, "Playbook: {}", self.name)?;
writeln!(
f,
"Result: {} passed, {} failed, {} skipped ({} total)",
self.passed, self.failed, self.skipped, self.total_steps,
)?;
writeln!(f, "Duration: {:.2}s", self.duration.as_secs_f64())?;
for sr in &self.step_results {
let status = if sr.success { "OK" } else { "FAIL" };
write!(
f,
" [{}/{}] {} ... {} ({:.1}ms)",
sr.step_index + 1,
self.total_steps,
sr.command,
status,
sr.duration.as_secs_f64() * 1000.0,
)?;
if let Some(ref e) = sr.error {
write!(f, " — {e}")?;
}
writeln!(f)?;
}
Ok(())
}
}
/// Result of a single step execution.
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct StepResult {
pub step_index: usize,
pub command: String,
pub success: bool,
pub duration: Duration,
pub output: Option<String>,
pub error: Option<String>,
}
// ── PlaybookRunner ──────────────────────────────────────────────────────────
/// Executes a parsed `Playbook` step-by-step.
pub struct PlaybookRunner {
playbook: Playbook,
vars: HashMap<String, String>,
}
impl PlaybookRunner {
/// Load a playbook from a YAML file.
pub fn from_file(path: &Path) -> anyhow::Result<Self> {
let content =
std::fs::read_to_string(path).with_context(|| format!("read {}", path.display()))?;
Self::from_str(&content)
}
/// Parse a playbook from a YAML string.
pub fn from_str(yaml: &str) -> anyhow::Result<Self> {
let playbook: Playbook =
serde_yaml::from_str(yaml).context("parse playbook YAML")?;
let vars = playbook.variables.clone();
Ok(Self { playbook, vars })
}
/// Override or add variables before execution.
pub fn set_var(&mut self, name: impl Into<String>, value: impl Into<String>) {
self.vars.insert(name.into(), value.into());
}
/// Execute all steps, returning a report.
pub async fn run(
&mut self,
session: &mut SessionState,
client: &node_service::Client,
) -> PlaybookReport {
let start = Instant::now();
let total = self.expanded_step_count();
let mut results = Vec::new();
let mut passed = 0usize;
let mut failed = 0usize;
let mut skipped = 0usize;
let mut step_idx = 0usize;
let mut abort = false;
for step in &self.playbook.steps.clone() {
if abort {
skipped += 1;
results.push(StepResult {
step_index: step_idx,
command: step.command.clone(),
success: false,
duration: Duration::ZERO,
output: None,
error: Some("skipped (prior failure)".into()),
});
step_idx += 1;
continue;
}
if let Some(ref ls) = step.loop_spec {
for i in ls.from..=ls.to {
self.vars.insert(ls.var.clone(), i.to_string());
let sr = self.execute_step(step, step_idx, total, session, client).await;
if sr.success {
passed += 1;
} else {
failed += 1;
if step.on_error == OnError::Fail {
abort = true;
}
}
results.push(sr);
step_idx += 1;
if abort {
break;
}
}
} else {
let sr = self.execute_step(step, step_idx, total, session, client).await;
if sr.success {
passed += 1;
} else {
match step.on_error {
OnError::Fail => {
failed += 1;
abort = true;
}
OnError::Skip => skipped += 1,
OnError::Continue => failed += 1,
}
}
results.push(sr);
step_idx += 1;
}
}
PlaybookReport {
name: self.playbook.name.clone(),
total_steps: step_idx,
passed,
failed,
skipped,
duration: start.elapsed(),
step_results: results,
}
}
/// Execute a single step.
async fn execute_step(
&mut self,
step: &PlaybookStep,
index: usize,
total: usize,
session: &mut SessionState,
client: &node_service::Client,
) -> StepResult {
let t = Instant::now();
let cmd = match self.step_to_command(step) {
Ok(c) => c,
Err(e) => {
return StepResult {
step_index: index,
command: step.command.clone(),
success: false,
duration: t.elapsed(),
output: None,
error: Some(format!("{e:#}")),
};
}
};
eprintln!(
"[{}/{}] {} ...",
index + 1,
total,
step.command,
);
let cr = CommandRegistry::execute(&cmd, session, client).await;
// Capture output into variable if requested.
if let Some(ref var_name) = step.capture {
if let Some(ref out) = cr.output {
self.vars.insert(var_name.clone(), out.clone());
}
for (k, v) in &cr.data {
self.vars.insert(format!("{var_name}.{k}"), v.clone());
}
}
StepResult {
step_index: index,
command: step.command.clone(),
success: cr.success,
duration: t.elapsed(),
output: cr.output,
error: cr.error,
}
}
/// Convert a YAML step into a typed `Command`.
fn step_to_command(&self, step: &PlaybookStep) -> anyhow::Result<Command> {
let cmd_name = step.command.as_str();
match cmd_name {
// ── Lifecycle commands ────────────────────────────────────────
"connect" => Ok(Command::Connect {
server: self.resolve_str(&step.args, "server")?,
ca_cert: self.opt_str(&step.args, "ca_cert"),
insecure: self.opt_bool(&step.args, "insecure"),
}),
"login" => Ok(Command::Login {
username: self.resolve_str(&step.args, "username")?,
password: self.resolve_str(&step.args, "password")?,
}),
"register" => Ok(Command::Register {
username: self.resolve_str(&step.args, "username")?,
password: self.resolve_str(&step.args, "password")?,
}),
"send" | "send-message" => Ok(Command::SendMessage {
text: self.resolve_str(&step.args, "text")?,
}),
"wait" => Ok(Command::Wait {
duration_ms: self.resolve_u64(&step.args, "duration_ms")?,
}),
"set-var" | "setvar" => Ok(Command::SetVar {
name: self.resolve_str(&step.args, "name")?,
value: self.resolve_str(&step.args, "value")?,
}),
"assert" => {
let condition = self.build_assert_condition(step)?;
Ok(Command::Assert { condition })
}
// ── Session / identity ───────────────────────────────────────
"help" => Ok(Command::Help),
"quit" | "exit" => Ok(Command::Quit),
"whoami" => Ok(Command::Whoami),
"list" | "ls" => Ok(Command::List),
"switch" | "sw" => Ok(Command::Switch {
target: self.resolve_str(&step.args, "target")?,
}),
"dm" => Ok(Command::Dm {
username: self.resolve_str(&step.args, "username")?,
}),
"create-group" | "cg" => Ok(Command::CreateGroup {
name: self.resolve_str(&step.args, "name")?,
}),
"invite" => Ok(Command::Invite {
target: self.resolve_str(&step.args, "target")?,
}),
"remove" | "kick" => Ok(Command::Remove {
target: self.resolve_str(&step.args, "target")?,
}),
"leave" => Ok(Command::Leave),
"join" => Ok(Command::Join),
"members" => Ok(Command::Members),
"group-info" | "gi" => Ok(Command::GroupInfo),
"rename" => Ok(Command::Rename {
name: self.resolve_str(&step.args, "name")?,
}),
"history" | "hist" => Ok(Command::History {
count: self.opt_usize(&step.args, "count").unwrap_or(20),
}),
// ── Security / crypto ────────────────────────────────────────
"verify" => Ok(Command::Verify {
username: self.resolve_str(&step.args, "username")?,
}),
"update-key" | "rotate-key" => Ok(Command::UpdateKey),
"typing" => Ok(Command::Typing),
"typing-notify" => Ok(Command::TypingNotify {
enabled: self.opt_bool(&step.args, "enabled"),
}),
"react" => Ok(Command::React {
emoji: self.resolve_str(&step.args, "emoji")?,
index: self.opt_usize(&step.args, "index"),
}),
"edit" => Ok(Command::Edit {
index: self.resolve_usize(&step.args, "index")?,
new_text: self.resolve_str(&step.args, "new_text")?,
}),
"delete" | "del" => Ok(Command::Delete {
index: self.resolve_usize(&step.args, "index")?,
}),
"send-file" | "sf" => Ok(Command::SendFile {
path: self.resolve_str(&step.args, "path")?,
}),
"download" | "dl" => Ok(Command::Download {
index: self.resolve_usize(&step.args, "index")?,
}),
"delete-account" => Ok(Command::DeleteAccount),
"disappear" => Ok(Command::Disappear {
arg: self.opt_str(&step.args, "duration"),
}),
"privacy" => Ok(Command::Privacy {
arg: self.opt_str(&step.args, "setting"),
}),
"verify-fs" => Ok(Command::VerifyFs),
"rotate-all-keys" => Ok(Command::RotateAllKeys),
"devices" => Ok(Command::Devices),
"register-device" => Ok(Command::RegisterDevice {
name: self.resolve_str(&step.args, "name")?,
}),
"revoke-device" => Ok(Command::RevokeDevice {
id_prefix: self.resolve_str(&step.args, "id_prefix")?,
}),
// ── Mesh ─────────────────────────────────────────────────────
"mesh-peers" => Ok(Command::MeshPeers),
"mesh-server" => Ok(Command::MeshServer {
addr: self.resolve_str(&step.args, "addr")?,
}),
"mesh-send" => Ok(Command::MeshSend {
peer_id: self.resolve_str(&step.args, "peer_id")?,
message: self.resolve_str(&step.args, "message")?,
}),
"mesh-broadcast" => Ok(Command::MeshBroadcast {
topic: self.resolve_str(&step.args, "topic")?,
message: self.resolve_str(&step.args, "message")?,
}),
"mesh-subscribe" => Ok(Command::MeshSubscribe {
topic: self.resolve_str(&step.args, "topic")?,
}),
"mesh-route" => Ok(Command::MeshRoute),
"mesh-identity" | "mesh-id" => Ok(Command::MeshIdentity),
"mesh-store" => Ok(Command::MeshStore),
other => bail!("unknown command: {other}"),
}
}
/// Build an `AssertCondition` from a playbook step.
fn build_assert_condition(&self, step: &PlaybookStep) -> anyhow::Result<AssertCondition> {
let cond = step
.condition
.as_deref()
.context("assert step requires 'condition' field")?;
match cond {
"connected" => Ok(AssertCondition::Connected),
"logged_in" => Ok(AssertCondition::LoggedIn),
"in_conversation" => {
let name = self.resolve_str(&step.args, "name")
.or_else(|_| step.value.as_ref()
.and_then(|v| v.as_str())
.map(|s| self.substitute(s))
.context("assert in_conversation requires 'name' arg or 'value'"))?;
Ok(AssertCondition::InConversation { name })
}
"message_count" => {
let op = self.parse_cmp_op(step.op.as_deref().unwrap_or("gte"))?;
let count = step
.value
.as_ref()
.and_then(|v| v.as_u64())
.context("message_count assert requires numeric 'value'")?
as usize;
Ok(AssertCondition::MessageCount { op, count })
}
"member_count" => {
let op = self.parse_cmp_op(step.op.as_deref().unwrap_or("gte"))?;
let count = step
.value
.as_ref()
.and_then(|v| v.as_u64())
.context("member_count assert requires numeric 'value'")?
as usize;
Ok(AssertCondition::MemberCount { op, count })
}
other => Ok(AssertCondition::Custom {
expression: other.to_string(),
}),
}
}
fn parse_cmp_op(&self, s: &str) -> anyhow::Result<CmpOp> {
match s {
"eq" | "==" => Ok(CmpOp::Eq),
"ne" | "!=" => Ok(CmpOp::Ne),
"gt" | ">" => Ok(CmpOp::Gt),
"lt" | "<" => Ok(CmpOp::Lt),
"gte" | ">=" => Ok(CmpOp::Gte),
"lte" | "<=" => Ok(CmpOp::Lte),
other => bail!("unknown comparison operator: {other}"),
}
}
// ── Variable substitution helpers ────────────────────────────────────
/// Substitute `$varname` and `${VAR:-default}` in a string.
fn substitute(&self, s: &str) -> String {
let mut result = String::with_capacity(s.len());
let mut chars = s.chars().peekable();
while let Some(c) = chars.next() {
if c == '$' {
if chars.peek() == Some(&'{') {
chars.next(); // consume '{'
let mut key = String::new();
let mut default = None;
while let Some(&ch) = chars.peek() {
if ch == '}' {
chars.next();
break;
}
if ch == ':' && chars.clone().nth(1) == Some('-') {
chars.next(); // consume ':'
chars.next(); // consume '-'
let mut def = String::new();
while let Some(&dch) = chars.peek() {
if dch == '}' {
chars.next();
break;
}
def.push(dch);
chars.next();
}
default = Some(def);
break;
}
key.push(ch);
chars.next();
}
if let Some(val) = self.vars.get(&key) {
result.push_str(val);
} else if let Ok(val) = std::env::var(&key) {
result.push_str(&val);
} else if let Some(def) = default {
result.push_str(&def);
}
} else {
let mut key = String::new();
while let Some(&ch) = chars.peek() {
if ch.is_alphanumeric() || ch == '_' {
key.push(ch);
chars.next();
} else {
break;
}
}
if let Some(val) = self.vars.get(&key) {
result.push_str(val);
} else {
result.push('$');
result.push_str(&key);
}
}
} else {
result.push(c);
}
}
result
}
/// Resolve a required string argument with variable substitution.
fn resolve_str(
&self,
args: &HashMap<String, serde_yaml::Value>,
key: &str,
) -> anyhow::Result<String> {
let val = args
.get(key)
.with_context(|| format!("missing required argument: {key}"))?;
match val {
serde_yaml::Value::String(s) => Ok(self.substitute(s)),
serde_yaml::Value::Number(n) => Ok(n.to_string()),
serde_yaml::Value::Bool(b) => Ok(b.to_string()),
other => Ok(format!("{other:?}")),
}
}
/// Resolve an optional string argument.
fn opt_str(
&self,
args: &HashMap<String, serde_yaml::Value>,
key: &str,
) -> Option<String> {
args.get(key).map(|v| match v {
serde_yaml::Value::String(s) => self.substitute(s),
serde_yaml::Value::Number(n) => n.to_string(),
serde_yaml::Value::Bool(b) => b.to_string(),
other => format!("{other:?}"),
})
}
/// Resolve an optional bool argument (defaults to false).
fn opt_bool(
&self,
args: &HashMap<String, serde_yaml::Value>,
key: &str,
) -> bool {
args.get(key)
.and_then(|v| v.as_bool())
.unwrap_or(false)
}
/// Resolve a required usize argument.
fn resolve_usize(
&self,
args: &HashMap<String, serde_yaml::Value>,
key: &str,
) -> anyhow::Result<usize> {
let val = args
.get(key)
.with_context(|| format!("missing required argument: {key}"))?;
val.as_u64()
.map(|n| n as usize)
.with_context(|| format!("argument '{key}' must be a positive integer"))
}
/// Resolve a required u64 argument.
fn resolve_u64(
&self,
args: &HashMap<String, serde_yaml::Value>,
key: &str,
) -> anyhow::Result<u64> {
let val = args
.get(key)
.with_context(|| format!("missing required argument: {key}"))?;
val.as_u64()
.with_context(|| format!("argument '{key}' must be a positive integer"))
}
/// Resolve an optional usize argument.
fn opt_usize(
&self,
args: &HashMap<String, serde_yaml::Value>,
key: &str,
) -> Option<usize> {
args.get(key).and_then(|v| v.as_u64()).map(|n| n as usize)
}
/// Count total expanded steps (including loop iterations).
fn expanded_step_count(&self) -> usize {
self.playbook
.steps
.iter()
.map(|s| {
if let Some(ref ls) = s.loop_spec {
if ls.to >= ls.from {
ls.to - ls.from + 1
} else {
0
}
} else {
1
}
})
.sum()
}
}
// ── Tests ───────────────────────────────────────────────────────────────────
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn parse_minimal_playbook() {
let yaml = r#"
name: "test"
steps:
- command: whoami
- command: list
"#;
let runner = PlaybookRunner::from_str(yaml).unwrap();
assert_eq!(runner.playbook.name, "test");
assert_eq!(runner.playbook.steps.len(), 2);
assert_eq!(runner.playbook.steps[0].command, "whoami");
}
#[test]
fn parse_playbook_with_variables() {
let yaml = r#"
name: "var test"
variables:
user: alice
server: "127.0.0.1:5001"
steps:
- command: dm
args:
username: "$user"
"#;
let runner = PlaybookRunner::from_str(yaml).unwrap();
assert_eq!(runner.vars["user"], "alice");
assert_eq!(runner.vars["server"], "127.0.0.1:5001");
}
#[test]
fn variable_substitution() {
let mut vars = HashMap::new();
vars.insert("name".to_string(), "alice".to_string());
vars.insert("port".to_string(), "5001".to_string());
let runner = PlaybookRunner {
playbook: Playbook {
name: "test".into(),
description: None,
variables: HashMap::new(),
steps: vec![],
},
vars,
};
assert_eq!(runner.substitute("hello $name"), "hello alice");
assert_eq!(runner.substitute("port=$port!"), "port=5001!");
assert_eq!(runner.substitute("${name}@server"), "alice@server");
assert_eq!(
runner.substitute("${missing:-default}"),
"default"
);
assert_eq!(runner.substitute("no vars here"), "no vars here");
}
#[test]
fn step_to_command_mapping() {
let yaml = r#"
name: "mapping test"
variables:
user: bob
steps:
- command: dm
args:
username: "$user"
- command: send
args:
text: "hello"
- command: history
args:
count: 10
- command: wait
args:
duration_ms: 500
"#;
let runner = PlaybookRunner::from_str(yaml).unwrap();
let cmd0 = runner.step_to_command(&runner.playbook.steps[0]).unwrap();
assert!(matches!(cmd0, Command::Dm { username } if username == "bob"));
let cmd1 = runner.step_to_command(&runner.playbook.steps[1]).unwrap();
assert!(matches!(cmd1, Command::SendMessage { text } if text == "hello"));
let cmd2 = runner.step_to_command(&runner.playbook.steps[2]).unwrap();
assert!(matches!(cmd2, Command::History { count: 10 }));
let cmd3 = runner.step_to_command(&runner.playbook.steps[3]).unwrap();
assert!(matches!(cmd3, Command::Wait { duration_ms: 500 }));
}
#[test]
fn parse_assert_step() {
let yaml = r#"
name: "assert test"
steps:
- command: assert
condition: message_count
op: gte
value: 5
"#;
let runner = PlaybookRunner::from_str(yaml).unwrap();
let cmd = runner.step_to_command(&runner.playbook.steps[0]).unwrap();
match cmd {
Command::Assert {
condition: AssertCondition::MessageCount { op, count },
} => {
assert_eq!(op, CmpOp::Gte);
assert_eq!(count, 5);
}
other => panic!("expected Assert MessageCount, got {other:?}"),
}
}
#[test]
fn parse_loop_spec() {
let yaml = r#"
name: "loop test"
steps:
- command: send
args:
text: "msg $i"
loop:
var: i
from: 1
to: 5
"#;
let runner = PlaybookRunner::from_str(yaml).unwrap();
assert_eq!(runner.expanded_step_count(), 5);
let ls = runner.playbook.steps[0].loop_spec.as_ref().unwrap();
assert_eq!(ls.var, "i");
assert_eq!(ls.from, 1);
assert_eq!(ls.to, 5);
}
#[test]
fn on_error_defaults_to_fail() {
let yaml = r#"
name: "error test"
steps:
- command: whoami
- command: list
on_error: continue
- command: quit
on_error: skip
"#;
let runner = PlaybookRunner::from_str(yaml).unwrap();
assert_eq!(runner.playbook.steps[0].on_error, OnError::Fail);
assert_eq!(runner.playbook.steps[1].on_error, OnError::Continue);
assert_eq!(runner.playbook.steps[2].on_error, OnError::Skip);
}
#[test]
fn cmp_op_parsing() {
let runner = PlaybookRunner::from_str("name: t\nsteps: []").unwrap();
assert!(matches!(runner.parse_cmp_op("eq"), Ok(CmpOp::Eq)));
assert!(matches!(runner.parse_cmp_op("=="), Ok(CmpOp::Eq)));
assert!(matches!(runner.parse_cmp_op("gte"), Ok(CmpOp::Gte)));
assert!(matches!(runner.parse_cmp_op(">="), Ok(CmpOp::Gte)));
assert!(matches!(runner.parse_cmp_op("<"), Ok(CmpOp::Lt)));
assert!(runner.parse_cmp_op("invalid").is_err());
}
#[test]
fn report_display() {
let report = PlaybookReport {
name: "test".into(),
total_steps: 3,
passed: 2,
failed: 1,
skipped: 0,
duration: Duration::from_millis(150),
step_results: vec![
StepResult {
step_index: 0,
command: "whoami".into(),
success: true,
duration: Duration::from_millis(10),
output: None,
error: None,
},
StepResult {
step_index: 1,
command: "dm".into(),
success: true,
duration: Duration::from_millis(50),
output: None,
error: None,
},
StepResult {
step_index: 2,
command: "assert".into(),
success: false,
duration: Duration::from_millis(1),
output: None,
error: Some("message count 0 < 1".into()),
},
],
};
let s = format!("{report}");
assert!(s.contains("2 passed, 1 failed"));
assert!(s.contains("[3/3] assert ... FAIL"));
}
}

File diff suppressed because it is too large Load Diff

View File

@@ -1,207 +0,0 @@
//! Retry with exponential backoff for transient RPC failures.
use std::future::Future;
use std::time::Duration;
use rand::Rng;
use tracing::warn;
/// Default maximum number of retry attempts (including the first try).
pub const DEFAULT_MAX_RETRIES: u32 = 3;
/// Default base delay in milliseconds for exponential backoff.
pub const DEFAULT_BASE_DELAY_MS: u64 = 500;
/// Runs an async operation with retries. On `Ok(t)` returns immediately.
/// On `Err(e)`: if `is_retriable(&e)` and `attempt < max_retries`, sleeps with
/// exponential backoff (plus jitter) then retries; otherwise returns the last error.
pub async fn retry_async<F, Fut, T, E, P>(
op: F,
max_retries: u32,
base_delay_ms: u64,
is_retriable: P,
) -> Result<T, E>
where
F: Fn() -> Fut,
Fut: Future<Output = Result<T, E>>,
P: Fn(&E) -> bool,
{
let mut last_err: Option<E> = None;
for attempt in 0..max_retries {
match op().await {
Ok(t) => return Ok(t),
Err(e) => {
if !is_retriable(&e) || attempt + 1 >= max_retries {
return Err(e);
}
let delay_ms = base_delay_ms * 2u64.saturating_pow(attempt);
let jitter_ms = rand::thread_rng().gen_range(0..=delay_ms / 2);
let total_ms = delay_ms + jitter_ms;
warn!(
attempt = attempt + 1,
max_retries,
delay_ms = total_ms,
"RPC failed, retrying after backoff"
);
last_err = Some(e);
tokio::time::sleep(Duration::from_millis(total_ms)).await;
}
}
}
match last_err {
Some(e) => Err(e),
None => unreachable!(
"retry_async: last_err is always Some when loop exits after an Err"
),
}
}
/// Classifies `anyhow::Error` for retry: returns `false` for auth or invalid-param
/// errors (do not retry), `true` for transient errors (network, timeout, server 5xx).
/// When in doubt, returns `true` (retry).
pub fn anyhow_is_retriable(err: &anyhow::Error) -> bool {
let s = format!("{:#}", err);
let s_lower = s.to_lowercase();
// Do not retry: auth / permission
if s_lower.contains("unauthorized")
|| s_lower.contains("auth failed")
|| s_lower.contains("access denied")
|| s_lower.contains("401")
|| s_lower.contains("forbidden")
|| s_lower.contains("403")
|| s_lower.contains("token")
{
return false;
}
// Do not retry: bad request / invalid params
if s_lower.contains("bad request")
|| s_lower.contains("400")
|| s_lower.contains("invalid param")
|| s_lower.contains("fingerprint mismatch")
{
return false;
}
// Retry: network, timeout, connection, server error, or anything else
true
}
#[cfg(test)]
#[allow(clippy::unwrap_used)]
mod tests {
use super::*;
#[tokio::test]
async fn retry_success_first_attempt() {
let result = retry_async(|| async { Ok::<_, String>(42) }, 3, 10, |_| true).await;
assert_eq!(result.unwrap(), 42);
}
#[tokio::test]
async fn retry_succeeds_after_one_failure() {
let counter = std::sync::Arc::new(std::sync::atomic::AtomicU32::new(0));
let c = counter.clone();
let result = retry_async(
|| {
let c = c.clone();
async move {
let n = c.fetch_add(1, std::sync::atomic::Ordering::SeqCst);
if n == 0 {
Err("transient failure".to_string())
} else {
Ok(99)
}
}
},
3,
1, // minimal delay for test speed
|_| true,
)
.await;
assert_eq!(result.unwrap(), 99);
assert_eq!(counter.load(std::sync::atomic::Ordering::SeqCst), 2);
}
#[tokio::test]
async fn retry_non_retriable_fails_immediately() {
let counter = std::sync::Arc::new(std::sync::atomic::AtomicU32::new(0));
let c = counter.clone();
let result = retry_async(
|| {
let c = c.clone();
async move {
c.fetch_add(1, std::sync::atomic::Ordering::SeqCst);
Err::<(), _>("permanent error")
}
},
5,
1,
|_: &&str| false, // nothing is retriable
)
.await;
assert!(result.is_err());
assert_eq!(counter.load(std::sync::atomic::Ordering::SeqCst), 1);
}
#[tokio::test]
async fn retry_exhausts_all_attempts() {
let counter = std::sync::Arc::new(std::sync::atomic::AtomicU32::new(0));
let c = counter.clone();
let result = retry_async(
|| {
let c = c.clone();
async move {
c.fetch_add(1, std::sync::atomic::Ordering::SeqCst);
Err::<(), _>("still failing")
}
},
3,
1,
|_| true,
)
.await;
assert!(result.is_err());
assert_eq!(counter.load(std::sync::atomic::Ordering::SeqCst), 3);
}
#[test]
fn anyhow_is_retriable_classifications() {
// Auth errors should NOT be retriable
let auth_errors = [
"unauthorized access",
"HTTP 401 Unauthorized",
"forbidden resource",
"HTTP 403 Forbidden",
"auth failed for user",
"access denied",
"invalid token",
];
for msg in &auth_errors {
let err = anyhow::anyhow!("{msg}");
assert!(!anyhow_is_retriable(&err), "expected non-retriable: {msg}");
}
// Bad-request errors should NOT be retriable
let bad_req_errors = [
"bad request: missing field",
"HTTP 400 Bad Request",
"invalid param: username",
"fingerprint mismatch",
];
for msg in &bad_req_errors {
let err = anyhow::anyhow!("{msg}");
assert!(!anyhow_is_retriable(&err), "expected non-retriable: {msg}");
}
// Transient errors SHOULD be retriable
let transient_errors = [
"connection refused",
"network timeout",
"server error 500",
"stream reset",
"something unknown happened",
];
for msg in &transient_errors {
let err = anyhow::anyhow!("{msg}");
assert!(anyhow_is_retriable(&err), "expected retriable: {msg}");
}
}
}

View File

@@ -1,978 +0,0 @@
use std::net::SocketAddr;
use std::path::Path;
use std::sync::Arc;
use anyhow::Context;
use quinn::{ClientConfig, Endpoint};
use quinn_proto::crypto::rustls::QuicClientConfig;
use rustls::pki_types::{CertificateDer, ServerName, UnixTime};
use rustls::{ClientConfig as RustlsClientConfig, RootCertStore};
use tokio_util::compat::{TokioAsyncReadCompatExt, TokioAsyncWriteCompatExt};
use capnp_rpc::{rpc_twoparty_capnp::Side, twoparty, RpcSystem};
use quicprochat_core::HybridPublicKey;
use quicprochat_proto::node_capnp::{auth, node_service};
use crate::{AUTH_CONTEXT, INSECURE_SKIP_VERIFY};
use super::retry::{anyhow_is_retriable, retry_async, DEFAULT_BASE_DELAY_MS, DEFAULT_MAX_RETRIES};
/// Cap'n Proto traversal limit (words). 4 Mi words = 32 MiB; bounds DoS from deeply nested or large messages.
const CAPNP_TRAVERSAL_LIMIT_WORDS: usize = 4 * 1024 * 1024;
/// A [`rustls::client::danger::ServerCertVerifier`] that accepts any certificate.
///
/// **Development only.** Using this in production disables all TLS guarantees.
#[derive(Debug)]
struct InsecureServerCertVerifier;
impl rustls::client::danger::ServerCertVerifier for InsecureServerCertVerifier {
fn verify_server_cert(
&self,
_end_entity: &CertificateDer<'_>,
_intermediates: &[CertificateDer<'_>],
_server_name: &ServerName<'_>,
_ocsp_response: &[u8],
_now: UnixTime,
) -> Result<rustls::client::danger::ServerCertVerified, rustls::Error> {
Ok(rustls::client::danger::ServerCertVerified::assertion())
}
fn verify_tls12_signature(
&self,
_message: &[u8],
_cert: &CertificateDer<'_>,
_dss: &rustls::DigitallySignedStruct,
) -> Result<rustls::client::danger::HandshakeSignatureValid, rustls::Error> {
Ok(rustls::client::danger::HandshakeSignatureValid::assertion())
}
fn verify_tls13_signature(
&self,
_message: &[u8],
_cert: &CertificateDer<'_>,
_dss: &rustls::DigitallySignedStruct,
) -> Result<rustls::client::danger::HandshakeSignatureValid, rustls::Error> {
Ok(rustls::client::danger::HandshakeSignatureValid::assertion())
}
fn supported_verify_schemes(&self) -> Vec<rustls::SignatureScheme> {
rustls::crypto::ring::default_provider()
.signature_verification_algorithms
.supported_schemes()
}
}
/// Establish a QUIC/TLS connection and return a `NodeService` client.
///
/// Must be called from within a `LocalSet` because capnp-rpc is `!Send`.
///
/// Reads [`INSECURE_SKIP_VERIFY`] to decide whether to bypass certificate
/// verification (set once at startup via [`crate::set_insecure_skip_verify`]).
pub async fn connect_node(
server: &str,
ca_cert: &Path,
server_name: &str,
) -> anyhow::Result<node_service::Client> {
let skip = INSECURE_SKIP_VERIFY.load(std::sync::atomic::Ordering::Relaxed);
connect_node_opt(server, ca_cert, server_name, skip).await
}
/// Like [`connect_node`] but with an explicit `insecure_skip_verify` toggle.
///
/// When `insecure_skip_verify` is `true`, certificate verification is disabled entirely.
/// This is intended for development and testing only.
pub async fn connect_node_opt(
server: &str,
ca_cert: &Path,
server_name: &str,
insecure_skip_verify: bool,
) -> anyhow::Result<node_service::Client> {
let addr: SocketAddr = server
.parse()
.with_context(|| format!("server must be host:port, got {server}"))?;
let mut tls = if insecure_skip_verify {
RustlsClientConfig::builder()
.dangerous()
.with_custom_certificate_verifier(Arc::new(InsecureServerCertVerifier))
.with_no_client_auth()
} else {
let cert_bytes =
std::fs::read(ca_cert).with_context(|| format!("read ca_cert {ca_cert:?}"))?;
let mut roots = RootCertStore::empty();
roots
.add(CertificateDer::from(cert_bytes))
.context("add root cert")?;
RustlsClientConfig::builder()
.with_root_certificates(roots)
.with_no_client_auth()
};
tls.alpn_protocols = vec![b"capnp".to_vec()];
let crypto = QuicClientConfig::try_from(tls)
.map_err(|e| anyhow::anyhow!("invalid client TLS config: {e}"))?;
let bind_addr: SocketAddr = "0.0.0.0:0".parse().context("parse client bind address")?;
let mut endpoint = Endpoint::client(bind_addr)?;
endpoint.set_default_client_config(ClientConfig::new(Arc::new(crypto)));
let connection = endpoint
.connect(addr, server_name)
.context("quic connect init")?
.await
.context("quic connect failed")?;
let (send, recv) = connection.open_bi().await.context("open bi stream")?;
let mut reader_opts = capnp::message::ReaderOptions::new();
reader_opts.traversal_limit_in_words(Some(CAPNP_TRAVERSAL_LIMIT_WORDS));
let network = twoparty::VatNetwork::new(
recv.compat(),
send.compat_write(),
Side::Client,
reader_opts,
);
let mut rpc_system = RpcSystem::new(Box::new(network), None);
let client: node_service::Client = rpc_system.bootstrap(Side::Server);
tokio::task::spawn_local(rpc_system);
Ok(client)
}
pub fn set_auth(auth: &mut auth::Builder<'_>) -> anyhow::Result<()> {
let guard = AUTH_CONTEXT
.read()
.map_err(|e| anyhow::anyhow!("AUTH_CONTEXT lock poisoned: {e}"))?;
let ctx = guard.as_ref().ok_or_else(|| {
anyhow::anyhow!(
"init_auth must be called before RPCs (use a bearer or session token for authenticated commands)"
)
})?;
auth.set_version(ctx.version);
auth.set_access_token(&ctx.access_token);
auth.set_device_id(&ctx.device_id);
Ok(())
}
/// Upload a KeyPackage and verify the fingerprint echoed by the AS.
pub async fn upload_key_package(
client: &node_service::Client,
identity_key: &[u8],
package: &[u8],
) -> anyhow::Result<()> {
let mut req = client.upload_key_package_request();
{
let mut p = req.get();
p.set_identity_key(identity_key);
p.set_package(package);
let mut auth = p.reborrow().init_auth();
set_auth(&mut auth)?;
}
let resp = req
.send()
.promise
.await
.context("upload_key_package RPC failed")?;
let server_fp = resp
.get()
.context("upload_key_package: bad response")?
.get_fingerprint()
.context("upload_key_package: missing fingerprint")?
.to_vec();
let local_fp = super::state::sha256(package);
anyhow::ensure!(server_fp == local_fp, "fingerprint mismatch");
Ok(())
}
/// Fetch a KeyPackage for `identity_key` from the AS.
pub async fn fetch_key_package(
client: &node_service::Client,
identity_key: &[u8],
) -> anyhow::Result<Vec<u8>> {
let mut req = client.fetch_key_package_request();
{
let mut p = req.get();
p.set_identity_key(identity_key);
let mut auth = p.reborrow().init_auth();
set_auth(&mut auth)?;
}
let resp = req
.send()
.promise
.await
.context("fetch_key_package RPC failed")?;
let pkg = resp
.get()
.context("fetch_key_package: bad response")?
.get_package()
.context("fetch_key_package: missing package field")?
.to_vec();
Ok(pkg)
}
/// Enqueue an opaque payload to the DS for `recipient_key`.
/// Returns the per-inbox sequence number assigned by the server.
/// Retries on transient failures with exponential backoff.
pub async fn enqueue(
client: &node_service::Client,
recipient_key: &[u8],
payload: &[u8],
) -> anyhow::Result<u64> {
enqueue_with_ttl(client, recipient_key, payload, None).await
}
/// Enqueue with an optional TTL (seconds). 0 or None means no expiry.
pub async fn enqueue_with_ttl(
client: &node_service::Client,
recipient_key: &[u8],
payload: &[u8],
ttl_secs: Option<u32>,
) -> anyhow::Result<u64> {
let client = client.clone();
let recipient_key = recipient_key.to_vec();
let payload = payload.to_vec();
retry_async(
|| {
let client = client.clone();
let recipient_key = recipient_key.clone();
let payload = payload.clone();
async move {
let mut req = client.enqueue_request();
{
let mut p = req.get();
p.set_recipient_key(&recipient_key);
p.set_payload(&payload);
p.set_channel_id(&[]);
p.set_version(1);
if let Some(ttl) = ttl_secs {
p.set_ttl_secs(ttl);
}
let mut auth = p.reborrow().init_auth();
set_auth(&mut auth)?;
}
let resp = req.send().promise.await.context("enqueue RPC failed")?;
let seq = resp.get().context("enqueue: bad response")?.get_seq();
Ok(seq)
}
},
DEFAULT_MAX_RETRIES,
DEFAULT_BASE_DELAY_MS,
anyhow_is_retriable,
)
.await
}
/// Fetch and drain all payloads for `recipient_key`.
/// Returns `(seq, payload)` pairs — sort by `seq` before MLS processing.
/// Retries on transient failures with exponential backoff.
pub async fn fetch_all(
client: &node_service::Client,
recipient_key: &[u8],
) -> anyhow::Result<Vec<(u64, Vec<u8>)>> {
let client = client.clone();
let recipient_key = recipient_key.to_vec();
retry_async(
|| {
let client = client.clone();
let recipient_key = recipient_key.clone();
async move {
let mut req = client.fetch_request();
{
let mut p = req.get();
p.set_recipient_key(&recipient_key);
p.set_channel_id(&[]);
p.set_version(1);
p.set_limit(0); // fetch all
let mut auth = p.reborrow().init_auth();
set_auth(&mut auth)?;
}
let resp = req.send().promise.await.context("fetch RPC failed")?;
let list = resp
.get()
.context("fetch: bad response")?
.get_payloads()
.context("fetch: missing payloads")?;
let mut payloads = Vec::with_capacity(list.len() as usize);
for i in 0..list.len() {
let entry = list.get(i);
let seq = entry.get_seq();
let data = entry
.get_data()
.context("fetch: envelope data read failed")?
.to_vec();
payloads.push((seq, data));
}
Ok(payloads)
}
},
DEFAULT_MAX_RETRIES,
DEFAULT_BASE_DELAY_MS,
anyhow_is_retriable,
)
.await
}
/// Long-poll for payloads with optional timeout (ms).
/// Returns `(seq, payload)` pairs — sort by `seq` before MLS processing.
/// Retries on transient failures with exponential backoff.
pub async fn fetch_wait(
client: &node_service::Client,
recipient_key: &[u8],
timeout_ms: u64,
) -> anyhow::Result<Vec<(u64, Vec<u8>)>> {
let client = client.clone();
let recipient_key = recipient_key.to_vec();
retry_async(
|| {
let client = client.clone();
let recipient_key = recipient_key.clone();
async move {
let mut req = client.fetch_wait_request();
{
let mut p = req.get();
p.set_recipient_key(&recipient_key);
p.set_timeout_ms(timeout_ms);
p.set_channel_id(&[]);
p.set_version(1);
p.set_limit(0); // fetch all
let mut auth = p.reborrow().init_auth();
set_auth(&mut auth)?;
}
let resp = req.send().promise.await.context("fetch_wait RPC failed")?;
let list = resp
.get()
.context("fetch_wait: bad response")?
.get_payloads()
.context("fetch_wait: missing payloads")?;
let mut payloads = Vec::with_capacity(list.len() as usize);
for i in 0..list.len() {
let entry = list.get(i);
let seq = entry.get_seq();
let data = entry
.get_data()
.context("fetch_wait: envelope data read failed")?
.to_vec();
payloads.push((seq, data));
}
Ok(payloads)
}
},
DEFAULT_MAX_RETRIES,
DEFAULT_BASE_DELAY_MS,
anyhow_is_retriable,
)
.await
}
/// Upload a hybrid (X25519 + ML-KEM-768) public key for an identity.
pub async fn upload_hybrid_key(
client: &node_service::Client,
identity_key: &[u8],
hybrid_pk: &HybridPublicKey,
) -> anyhow::Result<()> {
let mut req = client.upload_hybrid_key_request();
{
let mut p = req.get();
p.set_identity_key(identity_key);
p.set_hybrid_public_key(&hybrid_pk.to_bytes());
let mut auth = p.reborrow().init_auth();
set_auth(&mut auth)?;
}
req.send()
.promise
.await
.context("upload_hybrid_key RPC failed")?;
Ok(())
}
/// Fetch a peer's hybrid public key from the server.
///
/// Returns `None` if the peer has not uploaded a hybrid key.
pub async fn fetch_hybrid_key(
client: &node_service::Client,
identity_key: &[u8],
) -> anyhow::Result<Option<HybridPublicKey>> {
let mut req = client.fetch_hybrid_key_request();
{
let mut p = req.get();
p.set_identity_key(identity_key);
let mut auth = p.reborrow().init_auth();
set_auth(&mut auth)?;
}
let resp = req
.send()
.promise
.await
.context("fetch_hybrid_key RPC failed")?;
let pk_bytes = resp
.get()
.context("fetch_hybrid_key: bad response")?
.get_hybrid_public_key()
.context("fetch_hybrid_key: missing field")?
.to_vec();
if pk_bytes.is_empty() {
return Ok(None);
}
let pk = HybridPublicKey::from_bytes(&pk_bytes).context("invalid hybrid public key")?;
Ok(Some(pk))
}
/// Decrypt a hybrid envelope. Requires a hybrid key; no fallback to plaintext MLS.
pub fn try_hybrid_decrypt(
hybrid_kp: Option<&quicprochat_core::HybridKeypair>,
payload: &[u8],
) -> anyhow::Result<Vec<u8>> {
let kp = hybrid_kp.ok_or_else(|| anyhow::anyhow!("hybrid key required for decryption"))?;
quicprochat_core::hybrid_decrypt(kp, payload, b"", b"").map_err(|e| anyhow::anyhow!("{e}"))
}
/// Peek at queued payloads without removing them.
/// Returns `(seq, payload)` pairs sorted by seq.
/// Retries on transient failures with exponential backoff.
pub async fn peek(
client: &node_service::Client,
recipient_key: &[u8],
) -> anyhow::Result<Vec<(u64, Vec<u8>)>> {
let client = client.clone();
let recipient_key = recipient_key.to_vec();
retry_async(
|| {
let client = client.clone();
let recipient_key = recipient_key.clone();
async move {
let mut req = client.peek_request();
{
let mut p = req.get();
p.set_recipient_key(&recipient_key);
p.set_channel_id(&[]);
p.set_version(1);
p.set_limit(0); // peek all
let mut auth = p.reborrow().init_auth();
set_auth(&mut auth)?;
}
let resp = req.send().promise.await.context("peek RPC failed")?;
let list = resp
.get()
.context("peek: bad response")?
.get_payloads()
.context("peek: missing payloads")?;
let mut payloads = Vec::with_capacity(list.len() as usize);
for i in 0..list.len() {
let entry = list.get(i);
let seq = entry.get_seq();
let data = entry
.get_data()
.context("peek: envelope data read failed")?
.to_vec();
payloads.push((seq, data));
}
Ok(payloads)
}
},
DEFAULT_MAX_RETRIES,
DEFAULT_BASE_DELAY_MS,
anyhow_is_retriable,
)
.await
}
/// Acknowledge all messages up to and including `seq_up_to`.
/// Retries on transient failures with exponential backoff.
pub async fn ack(
client: &node_service::Client,
recipient_key: &[u8],
seq_up_to: u64,
) -> anyhow::Result<()> {
let client = client.clone();
let recipient_key = recipient_key.to_vec();
retry_async(
|| {
let client = client.clone();
let recipient_key = recipient_key.clone();
async move {
let mut req = client.ack_request();
{
let mut p = req.get();
p.set_recipient_key(&recipient_key);
p.set_channel_id(&[]);
p.set_version(1);
p.set_seq_up_to(seq_up_to);
let mut auth = p.reborrow().init_auth();
set_auth(&mut auth)?;
}
req.send().promise.await.context("ack RPC failed")?;
Ok(())
}
},
DEFAULT_MAX_RETRIES,
DEFAULT_BASE_DELAY_MS,
anyhow_is_retriable,
)
.await
}
/// Fetch multiple peers' hybrid keys in a single round-trip.
/// Returns `None` for peers who have not uploaded a hybrid key.
/// Retries on transient failures with exponential backoff.
pub async fn fetch_hybrid_keys(
client: &node_service::Client,
identity_keys: &[&[u8]],
) -> anyhow::Result<Vec<Option<HybridPublicKey>>> {
let client = client.clone();
let identity_keys: Vec<Vec<u8>> = identity_keys.iter().map(|k| k.to_vec()).collect();
retry_async(
|| {
let client = client.clone();
let identity_keys = identity_keys.clone();
async move {
let mut req = client.fetch_hybrid_keys_request();
{
let mut p = req.get();
let mut list = p.reborrow().init_identity_keys(identity_keys.len() as u32);
for (i, ik) in identity_keys.iter().enumerate() {
list.set(i as u32, ik);
}
let mut auth = p.reborrow().init_auth();
set_auth(&mut auth)?;
}
let resp = req
.send()
.promise
.await
.context("fetch_hybrid_keys RPC failed")?;
let keys = resp
.get()
.context("fetch_hybrid_keys: bad response")?
.get_keys()
.context("fetch_hybrid_keys: missing keys")?;
let mut result = Vec::with_capacity(keys.len() as usize);
for i in 0..keys.len() {
let pk_bytes = keys
.get(i)
.context("fetch_hybrid_keys: key read failed")?
.to_vec();
if pk_bytes.is_empty() {
result.push(None);
} else {
let pk = HybridPublicKey::from_bytes(&pk_bytes)
.context("invalid hybrid public key")?;
result.push(Some(pk));
}
}
Ok(result)
}
},
DEFAULT_MAX_RETRIES,
DEFAULT_BASE_DELAY_MS,
anyhow_is_retriable,
)
.await
}
/// Enqueue the same payload to multiple recipients in a single round-trip.
/// Returns per-recipient sequence numbers.
/// Retries on transient failures with exponential backoff.
pub async fn batch_enqueue(
client: &node_service::Client,
recipient_keys: &[&[u8]],
payload: &[u8],
) -> anyhow::Result<Vec<u64>> {
let client = client.clone();
let recipient_keys: Vec<Vec<u8>> = recipient_keys.iter().map(|k| k.to_vec()).collect();
let payload = payload.to_vec();
retry_async(
|| {
let client = client.clone();
let recipient_keys = recipient_keys.clone();
let payload = payload.clone();
async move {
let mut req = client.batch_enqueue_request();
{
let mut p = req.get();
let mut list = p.reborrow().init_recipient_keys(recipient_keys.len() as u32);
for (i, rk) in recipient_keys.iter().enumerate() {
list.set(i as u32, rk);
}
p.set_payload(&payload);
p.set_channel_id(&[]);
p.set_version(1);
let mut auth = p.reborrow().init_auth();
set_auth(&mut auth)?;
}
let resp = req
.send()
.promise
.await
.context("batch_enqueue RPC failed")?;
let seqs = resp
.get()
.context("batch_enqueue: bad response")?
.get_seqs()
.context("batch_enqueue: missing seqs")?;
let mut result = Vec::with_capacity(seqs.len() as usize);
for i in 0..seqs.len() {
result.push(seqs.get(i));
}
Ok(result)
}
},
DEFAULT_MAX_RETRIES,
DEFAULT_BASE_DELAY_MS,
anyhow_is_retriable,
)
.await
}
/// Resolve a username to its Ed25519 identity key (32 bytes).
///
/// When the server returns a non-empty `inclusionProof`, the client verifies it
/// against the identity key using the Key Transparency Merkle proof. Proof
/// verification failure is treated as a hard error (the server is misbehaving).
/// If the server sends no proof (empty field), the key is returned as-is —
/// callers can decide whether to require proofs for security-critical flows.
///
/// Returns `None` if the username is not registered.
pub async fn resolve_user(
client: &node_service::Client,
username: &str,
) -> anyhow::Result<Option<Vec<u8>>> {
let mut req = client.resolve_user_request();
{
let mut p = req.get();
p.set_username(username);
let mut auth = p.reborrow().init_auth();
set_auth(&mut auth)?;
}
let resp = req
.send()
.promise
.await
.context("resolve_user RPC failed")?;
let reader = resp.get().context("resolve_user: bad response")?;
let key = reader
.get_identity_key()
.context("resolve_user: missing identity_key field")?
.to_vec();
if key.is_empty() {
return Ok(None);
}
// Verify the KT inclusion proof when the server sends one.
let proof_bytes = reader
.get_inclusion_proof()
.context("resolve_user: missing inclusion_proof field")?
.to_vec();
if !proof_bytes.is_empty() {
let proof = quicprochat_kt::InclusionProof::from_bytes(&proof_bytes)
.context("resolve_user: inclusion proof deserialise failed")?;
quicprochat_kt::verify_inclusion(&proof, username, &key)
.context("resolve_user: KT inclusion proof verification FAILED — possible key mislabelling")?;
}
Ok(Some(key))
}
/// Reverse lookup: resolve an identity key to the registered username.
/// Returns `None` if no username is associated with the key.
pub async fn resolve_identity(
client: &node_service::Client,
identity_key: &[u8],
) -> anyhow::Result<Option<String>> {
let mut req = client.resolve_identity_request();
{
let mut p = req.get();
p.set_identity_key(identity_key);
let mut auth = p.reborrow().init_auth();
set_auth(&mut auth)?;
}
let resp = req
.send()
.promise
.await
.context("resolve_identity RPC failed")?;
let username = resp
.get()
.context("resolve_identity: bad response")?
.get_username()
.context("resolve_identity: missing field")?
.to_str()
.unwrap_or("")
.to_string();
if username.is_empty() {
Ok(None)
} else {
Ok(Some(username))
}
}
/// Create a 1:1 DM channel with a peer.
///
/// Returns `(channel_id, was_new)` where `channel_id` is the stable 16-byte identifier and
/// `was_new` is `true` iff this call created the channel for the first time. When `was_new` is
/// `false`, the channel already existed (created by the peer), and the caller should wait for
/// the peer's MLS Welcome to arrive via the background poller rather than creating a new MLS group.
pub async fn create_channel(
client: &node_service::Client,
peer_key: &[u8],
) -> anyhow::Result<(Vec<u8>, bool)> {
let mut req = client.create_channel_request();
{
let mut p = req.get();
p.set_peer_key(peer_key);
let mut auth = p.reborrow().init_auth();
set_auth(&mut auth)?;
}
let resp = req
.send()
.promise
.await
.context("create_channel RPC failed")?;
let reader = resp.get().context("create_channel: bad response")?;
let channel_id = reader
.get_channel_id()
.context("create_channel: missing channel_id")?
.to_vec();
let was_new = reader.get_was_new();
Ok((channel_id, was_new))
}
/// Upload a single chunk of a blob to the server.
///
/// `blob_hash` is the expected SHA-256 hash (32 bytes) of the complete blob.
/// Returns the `blob_id` once the server has received and verified the final chunk.
pub async fn upload_blob_chunk(
client: &node_service::Client,
blob_hash: &[u8],
chunk: &[u8],
offset: u64,
total_size: u64,
mime_type: &str,
) -> anyhow::Result<Vec<u8>> {
let mut req = client.upload_blob_request();
{
let mut p = req.get();
let mut auth = p.reborrow().init_auth();
set_auth(&mut auth)?;
p.set_blob_hash(blob_hash);
p.set_chunk(chunk);
p.set_offset(offset);
p.set_total_size(total_size);
p.set_mime_type(mime_type);
}
let resp = req.send().promise.await.context("upload_blob RPC failed")?;
let blob_id = resp
.get()
.context("upload_blob: bad response")?
.get_blob_id()
.context("upload_blob: missing blob_id")?
.to_vec();
Ok(blob_id)
}
/// Download a single chunk of a blob from the server.
///
/// Returns `(chunk_bytes, total_size, mime_type)`.
pub async fn download_blob_chunk(
client: &node_service::Client,
blob_id: &[u8],
offset: u64,
length: u32,
) -> anyhow::Result<(Vec<u8>, u64, String)> {
let mut req = client.download_blob_request();
{
let mut p = req.get();
let mut auth = p.reborrow().init_auth();
set_auth(&mut auth)?;
p.set_blob_id(blob_id);
p.set_offset(offset);
p.set_length(length);
}
let resp = req.send().promise.await.context("download_blob RPC failed")?;
let reader = resp.get().context("download_blob: bad response")?;
let chunk = reader.get_chunk().context("download_blob: missing chunk")?.to_vec();
let total_size = reader.get_total_size();
let mime_type = reader
.get_mime_type()
.context("download_blob: missing mime_type")?
.to_str()
.unwrap_or("application/octet-stream")
.to_string();
Ok((chunk, total_size, mime_type))
}
/// Delete the authenticated user's account on the server.
/// Requires an identity-bound session (OPAQUE login).
pub async fn delete_account(
client: &node_service::Client,
) -> anyhow::Result<bool> {
let mut req = client.delete_account_request();
{
let mut p = req.get();
let mut auth = p.reborrow().init_auth();
set_auth(&mut auth)?;
}
let resp = req
.send()
.promise
.await
.context("delete_account RPC failed")?;
let success = resp
.get()
.context("delete_account: bad response")?
.get_success();
Ok(success)
}
/// Register a device for the authenticated identity.
pub async fn register_device(
client: &node_service::Client,
device_id: &[u8],
device_name: &str,
) -> anyhow::Result<bool> {
let mut req = client.register_device_request();
{
let mut p = req.get();
p.set_device_id(device_id);
p.set_device_name(device_name);
let mut auth = p.reborrow().init_auth();
set_auth(&mut auth)?;
}
let resp = req
.send()
.promise
.await
.context("register_device RPC failed")?;
let success = resp
.get()
.context("register_device: bad response")?
.get_success();
Ok(success)
}
/// List all registered devices for the authenticated identity.
pub async fn list_devices(
client: &node_service::Client,
) -> anyhow::Result<Vec<(Vec<u8>, String, u64)>> {
let mut req = client.list_devices_request();
{
let mut p = req.get();
let mut auth = p.reborrow().init_auth();
set_auth(&mut auth)?;
}
let resp = req
.send()
.promise
.await
.context("list_devices RPC failed")?;
let devices = resp
.get()
.context("list_devices: bad response")?
.get_devices()
.context("list_devices: missing devices field")?;
let mut result = Vec::with_capacity(devices.len() as usize);
for i in 0..devices.len() {
let entry = devices.get(i);
let device_id = entry
.get_device_id()
.context("list_devices: missing device_id")?
.to_vec();
let device_name = entry
.get_device_name()
.context("list_devices: missing device_name")?
.to_str()
.unwrap_or("")
.to_string();
let registered_at = entry.get_registered_at();
result.push((device_id, device_name, registered_at));
}
Ok(result)
}
/// Revoke (remove) a registered device.
pub async fn revoke_device(
client: &node_service::Client,
device_id: &[u8],
) -> anyhow::Result<bool> {
let mut req = client.revoke_device_request();
{
let mut p = req.get();
p.set_device_id(device_id);
let mut auth = p.reborrow().init_auth();
set_auth(&mut auth)?;
}
let resp = req
.send()
.promise
.await
.context("revoke_device RPC failed")?;
let success = resp
.get()
.context("revoke_device: bad response")?
.get_success();
Ok(success)
}
/// Return the current Unix timestamp in milliseconds.
pub fn current_timestamp_ms() -> u64 {
std::time::SystemTime::now()
.duration_since(std::time::UNIX_EPOCH)
.unwrap_or_default()
.as_millis() as u64
}

View File

@@ -1,305 +0,0 @@
//! Runtime session state for the interactive REPL.
//!
//! Wraps the legacy `StoredState` (identity + hybrid key) and adds
//! multi-conversation management via `ConversationStore`.
use std::collections::HashMap;
use std::path::{Path, PathBuf};
use std::sync::Arc;
use std::time::Instant;
use anyhow::Context;
use zeroize::Zeroizing;
use quicprochat_core::{DiskKeyStore, GroupMember, HybridKeypair, IdentityKeypair};
use super::conversation::{
now_ms, Conversation, ConversationId, ConversationKind, ConversationStore,
};
use super::state::load_or_init_state;
/// Runtime state for an interactive REPL session.
pub struct SessionState {
/// Long-term identity keypair.
pub identity: Arc<IdentityKeypair>,
/// Post-quantum hybrid keypair.
pub hybrid_kp: Option<HybridKeypair>,
/// Path to the legacy state file (for backward compat with one-shot commands).
pub state_path: PathBuf,
/// Optional password for the legacy state file. Zeroized on drop. (M9)
pub password: Option<Zeroizing<String>>,
/// SQLite-backed conversation + message store.
pub conv_store: ConversationStore,
/// Currently active conversation.
pub active_conversation: Option<ConversationId>,
/// In-memory GroupMember instances keyed by conversation ID.
pub members: HashMap<ConversationId, GroupMember>,
/// Holds the GroupMember whose KeyPackage was uploaded to the server.
/// Its keystore contains the HPKE init private key needed to decrypt
/// incoming Welcome messages. Consumed on auto-join, then replenished.
pub pending_member: Option<GroupMember>,
/// Whether to display typing indicators from others (session preference).
pub typing_notify_enabled: bool,
/// Tracks who is currently typing and when the indicator was last received.
/// Entries older than 10 seconds are considered expired.
pub typing_indicators: HashMap<String, Instant>,
/// Per-conversation disappearing message TTL in seconds. None = messages persist.
pub disappear_ttl: HashMap<ConversationId, u32>,
/// When true, /members and /group-info redact identity keys as `[redacted-XXXX]`.
pub redact_keys: bool,
/// When Some(secs), auto-clear local messages older than this duration.
pub auto_clear_secs: Option<u32>,
/// When true, send periodic dummy messages for traffic analysis resistance.
pub padding_enabled: bool,
/// Last epoch at which we sent a message (for /verify-fs).
pub last_send_epoch: Option<u64>,
/// P2P node for direct mesh messaging (requires `--features mesh`).
#[cfg(feature = "mesh")]
pub p2p_node: Option<Arc<quicprochat_p2p::P2pNode>>,
}
impl SessionState {
/// Load identity from the legacy state file, open the conversation store,
/// and migrate any existing single-group state into the conversation DB.
pub fn load(
state_path: &Path,
password: Option<&str>,
) -> anyhow::Result<Self> {
let state = load_or_init_state(state_path, password)?;
let identity = Arc::new(IdentityKeypair::from_seed(state.identity_seed));
let hybrid_kp = state
.hybrid_key
.as_ref()
.map(HybridKeypair::from_bytes)
.transpose()
.context("decode hybrid key")?;
// Open the conversation DB next to the state file.
// When a state password is provided, encrypt the DB with SQLCipher.
let db_path = state_path.with_extension("convdb");
let conv_store = ConversationStore::open(&db_path, password)?;
let mut session = Self {
identity,
hybrid_kp,
state_path: state_path.to_path_buf(),
password: password.map(|p| Zeroizing::new(String::from(p))),
conv_store,
active_conversation: None,
members: HashMap::new(),
pending_member: None,
typing_notify_enabled: true,
typing_indicators: HashMap::new(),
disappear_ttl: HashMap::new(),
redact_keys: false,
auto_clear_secs: None,
padding_enabled: false,
last_send_epoch: None,
#[cfg(feature = "mesh")]
p2p_node: None,
};
// Migrate legacy single-group into conversations if present and not yet migrated.
if state.group.is_some() {
session.migrate_legacy_group(state_path, &state.group)?;
}
// Load all existing conversations' GroupMembers into memory.
session.load_all_members()?;
Ok(session)
}
/// Migrate the legacy single-group from StoredState into the conversation DB.
fn migrate_legacy_group(
&mut self,
_state_path: &Path,
group_blob: &Option<Vec<u8>>,
) -> anyhow::Result<()> {
let blob = match group_blob {
Some(b) => b,
None => return Ok(()),
};
// Legacy group blobs used openmls 0.5 serde format. After the 0.8
// upgrade the blob format changed to storage-provider state. Attempt
// to load from the new format; if that fails, skip the legacy group.
let group_id_guess = &blob[..blob.len().min(16)];
let member = match GroupMember::new_from_storage_bytes(
Arc::clone(&self.identity),
blob,
group_id_guess,
false, // legacy groups are classical
) {
Ok(m) => m,
Err(e) => {
tracing::warn!(error = %e, "skipping incompatible legacy group blob (openmls version mismatch)");
return Ok(());
}
};
let group_id_bytes = member.group_id().unwrap_or_default();
// Use the first 16 bytes of the group_id as the ConversationId.
let conv_id = if group_id_bytes.len() >= 16 {
ConversationId::from_slice(&group_id_bytes[..16])
.unwrap_or(ConversationId([0; 16]))
} else {
ConversationId::from_group_name(&hex::encode(&group_id_bytes))
};
// Check if already migrated.
if self.conv_store.load_conversation(&conv_id)?.is_some() {
return Ok(());
}
let member_keys = member.member_identities();
let short_id = &hex::encode(&group_id_bytes)[..8.min(group_id_bytes.len() * 2)];
let conv = Conversation {
id: conv_id.clone(),
kind: ConversationKind::Group {
name: format!("legacy-{short_id}"),
},
display_name: format!("legacy-{short_id}"),
mls_group_blob: Some(blob.clone()),
keystore_blob: None,
member_keys,
unread_count: 0,
last_activity_ms: now_ms(),
created_at_ms: now_ms(),
is_hybrid: false,
last_seen_seq: 0,
};
self.conv_store.save_conversation(&conv)?;
self.members.insert(conv_id, member);
Ok(())
}
/// Load all conversations from the DB and create in-memory GroupMember instances.
fn load_all_members(&mut self) -> anyhow::Result<()> {
let convs = self.conv_store.list_conversations()?;
for conv in convs {
if self.members.contains_key(&conv.id) {
continue;
}
let member = self.create_member_from_conv(&conv)?;
self.members.insert(conv.id.clone(), member);
}
Ok(())
}
/// Create a GroupMember from a stored conversation.
fn create_member_from_conv(&self, conv: &Conversation) -> anyhow::Result<GroupMember> {
if let Some(blob) = conv.mls_group_blob.as_ref() {
let group_id = conv.id.0.as_slice();
let member = GroupMember::new_from_storage_bytes(
Arc::clone(&self.identity),
blob,
group_id,
conv.is_hybrid,
)
.context("restore MLS state from conversation db")?;
Ok(member)
} else {
// No MLS state — create an empty member.
let ks_path = self.keystore_path_for(&conv.id);
let ks = DiskKeyStore::persistent(&ks_path)
.unwrap_or_else(|e| {
tracing::warn!(path = %ks_path.display(), error = %e, "DiskKeyStore open failed, falling back to ephemeral");
DiskKeyStore::ephemeral()
});
Ok(GroupMember::new_with_state(
Arc::clone(&self.identity),
ks,
None,
conv.is_hybrid,
))
}
}
/// Path for a per-conversation keystore file.
fn keystore_path_for(&self, conv_id: &ConversationId) -> PathBuf {
let dir = self.state_path.with_extension("keystores");
dir.join(format!("{}.ks", conv_id.hex()))
}
/// Persist a conversation's MLS group state back to the DB.
pub fn save_member(&self, conv_id: &ConversationId) -> anyhow::Result<()> {
let member = self.members.get(conv_id).context("no such conversation")?;
let blob = member
.serialize_mls_state()
.context("serialize MLS state")?;
let member_keys = member.member_identities();
// Update the mls_group_blob and member_keys in the DB.
if let Some(mut conv) = self.conv_store.load_conversation(conv_id)? {
conv.mls_group_blob = blob;
conv.member_keys = member_keys;
self.conv_store.save_conversation(&conv)?;
}
Ok(())
}
/// Persist all in-memory group states back to the DB.
pub fn save_all(&self) -> anyhow::Result<()> {
for conv_id in self.members.keys() {
if let Err(e) = self.save_member(conv_id) {
tracing::warn!(conv = %conv_id.hex(), error = %e, "failed to save conversation");
}
}
Ok(())
}
/// Add a new conversation and its GroupMember to the session.
pub fn add_conversation(
&mut self,
conv: Conversation,
member: GroupMember,
) -> anyhow::Result<()> {
// Ensure keystore directory exists
let ks_path = self.keystore_path_for(&conv.id);
if let Some(parent) = ks_path.parent() {
std::fs::create_dir_all(parent).ok();
}
self.conv_store.save_conversation(&conv)?;
self.members.insert(conv.id.clone(), member);
Ok(())
}
/// Get a mutable reference to a conversation's GroupMember.
pub fn get_member_mut(&mut self, conv_id: &ConversationId) -> Option<&mut GroupMember> {
self.members.get_mut(conv_id)
}
/// Public key bytes for this identity.
pub fn identity_bytes(&self) -> Vec<u8> {
self.identity.public_key_bytes().to_vec()
}
/// Short hex prefix of the identity key for display.
pub fn identity_short(&self) -> String {
hex::encode(&self.identity.public_key_bytes()[..4])
}
/// Get display name of a conversation.
pub fn active_display_name(&self) -> Option<String> {
let id = self.active_conversation.as_ref()?;
self.conv_store.load_conversation(id).ok().flatten().map(|c| c.display_name)
}
/// Count total unread across all conversations.
pub fn total_unread(&self) -> u32 {
self.conv_store
.list_conversations()
.unwrap_or_default()
.iter()
.map(|c| c.unread_count)
.sum()
}
}

View File

@@ -1,309 +0,0 @@
use std::path::{Path, PathBuf};
use std::sync::Arc;
use anyhow::Context;
use argon2::{Algorithm, Argon2, Params, Version};
use chacha20poly1305::{
aead::{Aead, KeyInit},
ChaCha20Poly1305, Key, Nonce,
};
use rand::RngCore;
use serde::{Deserialize, Serialize};
use quicprochat_core::{DiskKeyStore, GroupMember, HybridKeypair, HybridKeypairBytes, IdentityKeypair};
/// Magic bytes for encrypted client state files.
const STATE_MAGIC: &[u8; 4] = b"QPCE";
const STATE_SALT_LEN: usize = 16;
const STATE_NONCE_LEN: usize = 12;
#[derive(Serialize, Deserialize)]
pub struct StoredState {
pub identity_seed: [u8; 32],
pub group: Option<Vec<u8>>,
/// Post-quantum hybrid keypair (X25519 + ML-KEM-768). `None` for state created before hybrid was added.
#[serde(default)]
pub hybrid_key: Option<HybridKeypairBytes>,
/// Cached member public keys for group participants.
#[serde(default)]
pub member_keys: Vec<Vec<u8>>,
/// MLS group ID bytes, needed to reload the group from StorageProvider state.
#[serde(default)]
pub group_id: Option<Vec<u8>>,
}
impl StoredState {
pub fn into_parts(self, state_path: &Path) -> anyhow::Result<(GroupMember, Option<HybridKeypair>)> {
let identity = Arc::new(IdentityKeypair::from_seed(self.identity_seed));
let hybrid = self.hybrid_key.is_some();
let member = match (self.group.as_ref(), self.group_id.as_ref()) {
(Some(storage_bytes), Some(gid)) => {
GroupMember::new_from_storage_bytes(
identity,
storage_bytes,
gid,
hybrid,
)
.context("restore MLS state from stored state")?
}
_ => {
let key_store = DiskKeyStore::persistent(keystore_path(state_path))?;
GroupMember::new_with_state(identity, key_store, None, hybrid)
}
};
let hybrid_kp = self
.hybrid_key
.map(|bytes| HybridKeypair::from_bytes(&bytes).context("decode hybrid key"))
.transpose()?;
Ok((member, hybrid_kp))
}
pub fn from_parts(member: &GroupMember, hybrid_kp: Option<&HybridKeypair>) -> anyhow::Result<Self> {
let group = member
.serialize_mls_state()
.context("serialize MLS state")?;
Ok(Self {
identity_seed: *member.identity_seed(),
group,
hybrid_key: hybrid_kp.map(|kp| kp.to_bytes()),
member_keys: Vec::new(),
group_id: member.group_id(),
})
}
}
/// Argon2id parameters for client state key derivation (auditable; matches argon2 crate defaults).
/// - Memory: 19 MiB (m_cost = 19*1024 KiB)
/// - Time: 2 iterations
/// - Parallelism: 1 lane
const ARGON2_STATE_M_COST: u32 = 19 * 1024;
const ARGON2_STATE_T_COST: u32 = 2;
const ARGON2_STATE_P_COST: u32 = 1;
/// Derive a 32-byte key from a password and salt using Argon2id with explicit parameters.
fn derive_state_key(password: &str, salt: &[u8]) -> anyhow::Result<[u8; 32]> {
let params = Params::new(ARGON2_STATE_M_COST, ARGON2_STATE_T_COST, ARGON2_STATE_P_COST, Some(32))
.map_err(|e| anyhow::anyhow!("argon2 params: {e}"))?;
let argon2 = Argon2::new(Algorithm::Argon2id, Version::default(), params);
let mut key = [0u8; 32];
argon2
.hash_password_into(password.as_bytes(), salt, &mut key)
.map_err(|e| anyhow::anyhow!("argon2 key derivation failed: {e}"))?;
Ok(key)
}
/// Encrypt `plaintext` with the QPCE format: magic(4) | salt(16) | nonce(12) | ciphertext.
pub fn encrypt_state(password: &str, plaintext: &[u8]) -> anyhow::Result<Vec<u8>> {
let mut salt = [0u8; STATE_SALT_LEN];
rand::rngs::OsRng.fill_bytes(&mut salt);
let mut nonce_bytes = [0u8; STATE_NONCE_LEN];
rand::rngs::OsRng.fill_bytes(&mut nonce_bytes);
let key = zeroize::Zeroizing::new(derive_state_key(password, &salt)?);
let cipher = ChaCha20Poly1305::new(Key::from_slice(&*key));
let nonce = Nonce::from_slice(&nonce_bytes);
let ciphertext = cipher
.encrypt(nonce, plaintext)
.map_err(|e| anyhow::anyhow!("state encryption failed: {e}"))?;
let mut out = Vec::with_capacity(4 + STATE_SALT_LEN + STATE_NONCE_LEN + ciphertext.len());
out.extend_from_slice(STATE_MAGIC);
out.extend_from_slice(&salt);
out.extend_from_slice(&nonce_bytes);
out.extend_from_slice(&ciphertext);
Ok(out)
}
/// Decrypt a QPCE-formatted state file.
pub fn decrypt_state(password: &str, data: &[u8]) -> anyhow::Result<Vec<u8>> {
let header_len = 4 + STATE_SALT_LEN + STATE_NONCE_LEN;
anyhow::ensure!(
data.len() > header_len,
"encrypted state file too short ({} bytes)",
data.len()
);
let salt = &data[4..4 + STATE_SALT_LEN];
let nonce_bytes = &data[4 + STATE_SALT_LEN..header_len];
let ciphertext = &data[header_len..];
let key = zeroize::Zeroizing::new(derive_state_key(password, salt)?);
let cipher = ChaCha20Poly1305::new(Key::from_slice(&*key));
let nonce = Nonce::from_slice(nonce_bytes);
let plaintext = cipher
.decrypt(nonce, ciphertext)
.map_err(|_| anyhow::anyhow!("state decryption failed (wrong password?)"))?;
Ok(plaintext)
}
/// Returns true if raw bytes begin with the QPCE magic header.
pub fn is_encrypted_state(bytes: &[u8]) -> bool {
bytes.len() >= 4 && &bytes[..4] == STATE_MAGIC
}
pub fn load_or_init_state(path: &Path, password: Option<&str>) -> anyhow::Result<StoredState> {
if path.exists() {
let mut state = load_existing_state(path, password)?;
// Generate hybrid keypair if missing (upgrade from older state).
if state.hybrid_key.is_none() {
state.hybrid_key = Some(HybridKeypair::generate().to_bytes());
write_state(path, &state, password)?;
}
return Ok(state);
}
let identity = IdentityKeypair::generate();
let hybrid_kp = HybridKeypair::generate();
let key_store = DiskKeyStore::persistent(keystore_path(path))?;
let member = GroupMember::new_with_state(Arc::new(identity), key_store, None, false);
let state = StoredState::from_parts(&member, Some(&hybrid_kp))?;
write_state(path, &state, password)?;
Ok(state)
}
pub fn load_existing_state(path: &Path, password: Option<&str>) -> anyhow::Result<StoredState> {
let bytes = std::fs::read(path).with_context(|| format!("read state file {path:?}"))?;
if is_encrypted_state(&bytes) {
let pw = password
.context("state file is encrypted (QPCE); a password is required to decrypt it")?;
let plaintext = decrypt_state(pw, &bytes)?;
bincode::deserialize(&plaintext).context("decode encrypted state")
} else {
bincode::deserialize(&bytes).context("decode state")
}
}
pub fn save_state(
path: &Path,
member: &GroupMember,
hybrid_kp: Option<&HybridKeypair>,
password: Option<&str>,
) -> anyhow::Result<()> {
let state = StoredState::from_parts(member, hybrid_kp)?;
write_state(path, &state, password)
}
pub fn write_state(path: &Path, state: &StoredState, password: Option<&str>) -> anyhow::Result<()> {
if let Some(parent) = path.parent() {
std::fs::create_dir_all(parent).with_context(|| format!("create dir {parent:?}"))?;
}
let plaintext = bincode::serialize(state).context("encode state")?;
let bytes = if let Some(pw) = password {
encrypt_state(pw, &plaintext)?
} else {
plaintext
};
let tmp = path.with_extension("tmp");
std::fs::write(&tmp, bytes).with_context(|| format!("write state temp {tmp:?}"))?;
std::fs::rename(&tmp, path).with_context(|| format!("rename state {tmp:?} -> {path:?}"))?;
Ok(())
}
pub fn decode_identity_key(hex_str: &str) -> anyhow::Result<Vec<u8>> {
let bytes = super::hex::decode(hex_str)
.map_err(|e| anyhow::anyhow!(e))
.context("identity key must be hex")?;
anyhow::ensure!(bytes.len() == 32, "identity key must be 32 bytes");
Ok(bytes)
}
pub fn keystore_path(state_path: &Path) -> PathBuf {
let mut path = state_path.to_path_buf();
path.set_extension("ks");
path
}
pub fn sha256(bytes: &[u8]) -> Vec<u8> {
use sha2::{Digest, Sha256};
Sha256::digest(bytes).to_vec()
}
#[cfg(test)]
#[allow(clippy::unwrap_used)]
mod tests {
use super::*;
#[test]
fn encrypt_decrypt_roundtrip() {
let plaintext = b"test state data";
let password = "test-password";
let encrypted = encrypt_state(password, plaintext).unwrap();
assert!(is_encrypted_state(&encrypted));
let decrypted = decrypt_state(password, &encrypted).unwrap();
assert_eq!(decrypted, plaintext);
}
#[test]
fn wrong_password_fails() {
let plaintext = b"test state data";
let encrypted = encrypt_state("correct", plaintext).unwrap();
assert!(decrypt_state("wrong", &encrypted).is_err());
}
#[test]
fn state_encrypt_decrypt_round_trip() {
let state = StoredState {
identity_seed: [42u8; 32],
hybrid_key: None,
group: None,
member_keys: Vec::new(),
group_id: None,
};
let password = "test-password";
let plaintext = bincode::serialize(&state).unwrap();
let encrypted = encrypt_state(password, &plaintext).unwrap();
let decrypted = decrypt_state(password, &encrypted).unwrap();
let recovered: StoredState = bincode::deserialize(&decrypted).unwrap();
assert_eq!(recovered.identity_seed, state.identity_seed);
assert!(recovered.hybrid_key.is_none());
assert!(recovered.group.is_none());
}
#[test]
fn state_encrypt_decrypt_with_hybrid_key() {
use zeroize::Zeroizing;
let state = StoredState {
identity_seed: [7u8; 32],
hybrid_key: Some(HybridKeypairBytes {
x25519_sk: Zeroizing::new([1u8; 32]),
mlkem_dk: Zeroizing::new(vec![3u8; 2400]),
mlkem_ek: vec![4u8; 1184],
}),
group: None,
member_keys: Vec::new(),
group_id: None,
};
let password = "another-password";
let plaintext = bincode::serialize(&state).unwrap();
let encrypted = encrypt_state(password, &plaintext).unwrap();
let decrypted = decrypt_state(password, &encrypted).unwrap();
let recovered: StoredState = bincode::deserialize(&decrypted).unwrap();
assert_eq!(recovered.identity_seed, state.identity_seed);
assert!(recovered.hybrid_key.is_some());
}
#[test]
fn state_wrong_password_fails() {
let state = StoredState {
identity_seed: [99u8; 32],
hybrid_key: None,
group: None,
member_keys: Vec::new(),
group_id: None,
};
let plaintext = bincode::serialize(&state).unwrap();
let encrypted = encrypt_state("correct", &plaintext).unwrap();
assert!(decrypt_state("wrong", &encrypted).is_err());
}
}

View File

@@ -1,179 +0,0 @@
//! Cached session token stored next to the state file.
//!
//! File format (no password): two lines — username and hex-encoded session token.
//! File format (with password): QPCE-encrypted version of the above.
//! The token has a server-side 24h TTL; no client-side expiry tracking.
use std::path::{Path, PathBuf};
use anyhow::Context;
use super::state::{decrypt_state, encrypt_state, is_encrypted_state};
pub struct CachedSession {
pub username: String,
pub token_hex: String,
}
/// Derive the session cache path: `{state_path}.session`.
fn session_cache_path(state_path: &Path) -> PathBuf {
state_path.with_extension("session")
}
/// Parse the two-line format (username + token_hex) from plaintext bytes.
fn parse_session_lines(text: &str) -> Option<CachedSession> {
let mut lines = text.lines();
let username = lines.next()?.trim().to_string();
let token_hex = lines.next()?.trim().to_string();
if username.is_empty() || token_hex.is_empty() {
return None;
}
if hex::decode(&token_hex).is_err() {
return None;
}
Some(CachedSession { username, token_hex })
}
/// Load a cached session token. Returns None if file is missing or malformed.
/// Decrypts if the file is QPCE-encrypted (requires `password`).
pub fn load_cached_session(state_path: &Path, password: Option<&str>) -> Option<CachedSession> {
let path = session_cache_path(state_path);
let raw = std::fs::read(&path).ok()?;
if is_encrypted_state(&raw) {
let pw = password?;
let plaintext = decrypt_state(pw, &raw).ok()?;
let text = String::from_utf8(plaintext).ok()?;
parse_session_lines(&text)
} else {
let text = String::from_utf8(raw).ok()?;
parse_session_lines(&text)
}
}
/// Save a session token to the cache file (mode 0o600 on Unix).
/// Encrypts with QPCE if `password` is provided.
pub fn save_cached_session(
state_path: &Path,
username: &str,
token_hex: &str,
password: Option<&str>,
) -> anyhow::Result<()> {
let path = session_cache_path(state_path);
let contents = format!("{username}\n{token_hex}\n");
let bytes = match password {
Some(pw) => encrypt_state(pw, contents.as_bytes())?,
None => {
#[cfg(not(unix))]
tracing::warn!(
"storing session token as plaintext (no password set); \
file permissions cannot be restricted on this platform"
);
contents.into_bytes()
}
};
std::fs::write(&path, bytes).with_context(|| format!("write session cache {path:?}"))?;
#[cfg(unix)]
{
use std::os::unix::fs::PermissionsExt;
let perms = std::fs::Permissions::from_mode(0o600);
std::fs::set_permissions(&path, perms).ok();
}
Ok(())
}
/// Remove the cached session file.
pub fn clear_cached_session(state_path: &Path) {
let path = session_cache_path(state_path);
std::fs::remove_file(&path).ok();
}
#[cfg(test)]
#[allow(clippy::unwrap_used)]
mod tests {
use super::*;
#[test]
fn plaintext_round_trip() {
let dir = tempfile::tempdir().unwrap();
let state_path = dir.path().join("state.bin");
let token = hex::encode(b"session-token-bytes");
save_cached_session(&state_path, "alice", &token, None).unwrap();
let loaded = load_cached_session(&state_path, None).unwrap();
assert_eq!(loaded.username, "alice");
assert_eq!(loaded.token_hex, token);
}
#[test]
fn encrypted_round_trip() {
let dir = tempfile::tempdir().unwrap();
let state_path = dir.path().join("state.bin");
let password = "strong-password";
let token = hex::encode(b"encrypted-token");
save_cached_session(&state_path, "bob", &token, Some(password)).unwrap();
// Encrypted file should start with QPCE magic
let raw = std::fs::read(session_cache_path(&state_path)).unwrap();
assert_eq!(&raw[..4], b"QPCE");
let loaded = load_cached_session(&state_path, Some(password)).unwrap();
assert_eq!(loaded.username, "bob");
assert_eq!(loaded.token_hex, token);
}
#[test]
fn wrong_password_returns_none() {
let dir = tempfile::tempdir().unwrap();
let state_path = dir.path().join("state.bin");
let token = hex::encode(b"secret-token");
save_cached_session(&state_path, "carol", &token, Some("correct")).unwrap();
let result = load_cached_session(&state_path, Some("wrong"));
assert!(result.is_none());
}
#[test]
fn missing_file_returns_none() {
let dir = tempfile::tempdir().unwrap();
let state_path = dir.path().join("nonexistent.bin");
assert!(load_cached_session(&state_path, None).is_none());
}
#[test]
fn clear_removes_file() {
let dir = tempfile::tempdir().unwrap();
let state_path = dir.path().join("state.bin");
let token = hex::encode(b"to-be-deleted");
save_cached_session(&state_path, "dave", &token, None).unwrap();
assert!(session_cache_path(&state_path).exists());
clear_cached_session(&state_path);
assert!(!session_cache_path(&state_path).exists());
}
#[test]
fn malformed_content_returns_none() {
let dir = tempfile::tempdir().unwrap();
let state_path = dir.path().join("state.bin");
let cache_path = session_cache_path(&state_path);
// Not valid hex on second line
std::fs::write(&cache_path, "alice\nnot-hex-data\n").unwrap();
assert!(load_cached_session(&state_path, None).is_none());
// Only one line
std::fs::write(&cache_path, "alice\n").unwrap();
assert!(load_cached_session(&state_path, None).is_none());
// Empty file
std::fs::write(&cache_path, "").unwrap();
assert!(load_cached_session(&state_path, None).is_none());
}
}

View File

@@ -1,807 +0,0 @@
//! Full-screen Ratatui TUI for quicprochat.
//!
//! Layout:
//! ┌──────────────┬──────────────────────────────────────────┐
//! │ Channels │ Messages │
//! │ (20%) │ (80%) │
//! │ │ │
//! │ ├──────────────────────────────────────────┤
//! │ │ Input bar │
//! └──────────────┴──────────────────────────────────────────┘
//!
//! Keyboard:
//! Enter — send message
//! Up / Down — scroll message history
//! Tab — next channel
//! Shift+Tab — prev channel
//! Ctrl+C / q — quit
use std::path::Path;
use std::sync::Arc;
use std::time::Duration;
use anyhow::Context;
use crossterm::{
event::{self, DisableMouseCapture, EnableMouseCapture, Event, KeyCode, KeyModifiers},
execute,
terminal::{disable_raw_mode, enable_raw_mode, EnterAlternateScreen, LeaveAlternateScreen},
};
use ratatui::{
backend::CrosstermBackend,
layout::{Constraint, Direction, Layout, Rect},
style::{Color, Modifier, Style},
text::{Line, Span},
widgets::{Block, Borders, List, ListItem, ListState, Paragraph, Wrap},
Frame, Terminal,
};
use tokio::sync::mpsc;
use tokio::time::interval;
use crate::{ClientAuth, init_auth};
use super::commands::{opaque_login, opaque_register};
use super::conversation::{now_ms, ConversationId, StoredMessage};
use super::rpc::{
connect_node, enqueue, fetch_hybrid_key, fetch_wait, try_hybrid_decrypt, upload_hybrid_key,
upload_key_package,
};
use super::session::SessionState;
use super::state::load_or_init_state;
use super::token_cache::{load_cached_session, save_cached_session};
use quicprochat_core::{
AppMessage, DiskKeyStore, GroupMember, IdentityKeypair, ReceivedMessage,
hybrid_encrypt, parse as parse_app_msg, serialize_chat,
};
use quicprochat_proto::node_capnp::node_service;
// ── App events ───────────────────────────────────────────────────────────────
/// Events sent from background tasks into the main TUI loop.
enum TuiEvent {
/// A key event from the terminal.
Key(event::KeyEvent),
/// New messages received from the server (conv_id, sender_short, body).
NewMessages(Vec<(ConversationId, String, String)>),
/// Tick — redraw periodically even if nothing happened.
Tick,
}
// ── Display message ───────────────────────────────────────────────────────────
#[derive(Clone)]
struct DisplayMessage {
sender: String,
body: String,
timestamp_ms: u64,
is_outgoing: bool,
}
// ── App state ─────────────────────────────────────────────────────────────────
struct App {
/// Channel (conversation) names shown in the sidebar.
channel_names: Vec<String>,
/// Conversation IDs, parallel to `channel_names`.
channel_ids: Vec<ConversationId>,
/// Index of the selected channel in the sidebar.
selected_channel: usize,
/// Messages for the currently active channel.
messages: Vec<DisplayMessage>,
/// Current input buffer.
input: String,
/// Scroll offset (0 = bottom).
scroll_offset: usize,
/// Whether the user has requested quit.
should_quit: bool,
/// Short identity string for display.
identity_short: String,
}
impl App {
fn new(session: &SessionState) -> anyhow::Result<Self> {
let convs = session.conv_store.list_conversations()?;
let channel_names: Vec<String> = convs.iter().map(|c| c.display_name.clone()).collect();
let channel_ids: Vec<ConversationId> = convs.iter().map(|c| c.id.clone()).collect();
Ok(Self {
channel_names,
channel_ids,
selected_channel: 0,
messages: Vec::new(),
input: String::new(),
scroll_offset: 0,
should_quit: false,
identity_short: session.identity_short(),
})
}
fn active_conv_id(&self) -> Option<&ConversationId> {
self.channel_ids.get(self.selected_channel)
}
/// Reload messages for the currently selected channel from the session store.
fn reload_messages(&mut self, session: &SessionState) -> anyhow::Result<()> {
let conv_id = match self.active_conv_id() {
Some(id) => id.clone(),
None => {
self.messages.clear();
return Ok(());
}
};
let stored = session.conv_store.load_recent_messages(&conv_id, 200)?;
self.messages = stored
.into_iter()
.map(|m| {
let sender = if m.is_outgoing {
format!("me({})", &self.identity_short)
} else if let Some(name) = &m.sender_name {
name.clone()
} else {
// Shorten sender key to 8 hex chars.
let hex_short = hex::encode(&m.sender_key[..m.sender_key.len().min(4)]);
format!("{hex_short}")
};
DisplayMessage {
sender,
body: m.body,
timestamp_ms: m.timestamp_ms,
is_outgoing: m.is_outgoing,
}
})
.collect();
// Reset scroll to bottom on channel switch.
self.scroll_offset = 0;
Ok(())
}
fn select_next_channel(&mut self, session: &SessionState) {
if self.channel_names.is_empty() {
return;
}
self.selected_channel = (self.selected_channel + 1) % self.channel_names.len();
let _ = self.reload_messages(session);
}
fn select_prev_channel(&mut self, session: &SessionState) {
if self.channel_names.is_empty() {
return;
}
if self.selected_channel == 0 {
self.selected_channel = self.channel_names.len() - 1;
} else {
self.selected_channel -= 1;
}
let _ = self.reload_messages(session);
}
fn scroll_up(&mut self) {
self.scroll_offset = self.scroll_offset.saturating_add(1);
}
fn scroll_down(&mut self) {
self.scroll_offset = self.scroll_offset.saturating_sub(1);
}
/// Append newly received messages to the in-memory list (no DB reload needed
/// since we already have them from the poll task, but we also save them via
/// the session so they appear on reload).
fn append_messages(&mut self, msgs: Vec<(ConversationId, String, String)>) {
let active = self.active_conv_id().cloned();
for (conv_id, sender, body) in msgs {
if active.as_ref() == Some(&conv_id) {
self.messages.push(DisplayMessage {
sender,
body,
timestamp_ms: now_ms(),
is_outgoing: false,
});
// Snap to bottom if user wasn't scrolled.
if self.scroll_offset == 0 {
// Already at bottom — nothing to do.
}
}
}
}
}
// ── Drawing ───────────────────────────────────────────────────────────────────
fn ui(frame: &mut Frame, app: &App) {
let size = frame.area();
// Top-level split: sidebar | main area.
let h_chunks = Layout::default()
.direction(Direction::Horizontal)
.constraints([Constraint::Percentage(20), Constraint::Percentage(80)])
.split(size);
// Main area split: messages | input bar.
let v_chunks = Layout::default()
.direction(Direction::Vertical)
.constraints([Constraint::Min(3), Constraint::Length(3)])
.split(h_chunks[1]);
draw_sidebar(frame, app, h_chunks[0]);
draw_messages(frame, app, v_chunks[0]);
draw_input(frame, app, v_chunks[1]);
}
fn draw_sidebar(frame: &mut Frame, app: &App, area: Rect) {
let items: Vec<ListItem> = app
.channel_names
.iter()
.enumerate()
.map(|(i, name)| {
let style = if i == app.selected_channel {
Style::default()
.fg(Color::Cyan)
.add_modifier(Modifier::BOLD | Modifier::REVERSED)
} else {
Style::default().fg(Color::Cyan)
};
ListItem::new(Line::from(Span::styled(name.clone(), style)))
})
.collect();
let block = Block::default()
.title(" Channels ")
.borders(Borders::ALL)
.style(Style::default().fg(Color::DarkGray));
let mut list_state = ListState::default();
if !app.channel_names.is_empty() {
list_state.select(Some(app.selected_channel));
}
frame.render_stateful_widget(
List::new(items).block(block),
area,
&mut list_state,
);
}
fn draw_messages(frame: &mut Frame, app: &App, area: Rect) {
let channel_title = app
.channel_names
.get(app.selected_channel)
.map(|n| format!(" {n} "))
.unwrap_or_else(|| " Messages ".to_string());
let block = Block::default()
.title(channel_title)
.borders(Borders::ALL)
.style(Style::default().fg(Color::DarkGray));
let inner_height = area.height.saturating_sub(2) as usize;
// Build lines from messages (newest at bottom).
let mut lines: Vec<Line> = app
.messages
.iter()
.map(|m| {
let ts = format_timestamp(m.timestamp_ms);
let ts_span = Span::styled(ts, Style::default().fg(Color::DarkGray));
let sender_style = if m.is_outgoing {
Style::default().fg(Color::Yellow).add_modifier(Modifier::BOLD)
} else {
Style::default().fg(Color::Green).add_modifier(Modifier::BOLD)
};
let sender_span = Span::styled(format!(" {} ", m.sender), sender_style);
let body_span = Span::raw(m.body.clone());
Line::from(vec![ts_span, sender_span, body_span])
})
.collect();
// Apply scroll: scroll_offset=0 means newest at bottom.
let total = lines.len();
let visible_start = if total > inner_height {
let bottom = total - app.scroll_offset.min(total);
bottom.saturating_sub(inner_height)
} else {
0
};
let visible_end = if total > inner_height {
total - app.scroll_offset.min(total)
} else {
total
};
let visible_lines: Vec<Line> = lines
.drain(visible_start..visible_end.min(lines.len()))
.collect();
let paragraph = Paragraph::new(visible_lines)
.block(block)
.wrap(Wrap { trim: false });
frame.render_widget(paragraph, area);
}
fn draw_input(frame: &mut Frame, app: &App, area: Rect) {
let block = Block::default()
.title(" Input (Enter=send, Tab=switch channel, q/Ctrl+C=quit) ")
.borders(Borders::ALL)
.style(Style::default().fg(Color::DarkGray));
let input_text = Paragraph::new(app.input.as_str())
.block(block)
.style(Style::default().fg(Color::White));
frame.render_widget(input_text, area);
// Position cursor at end of input.
let cursor_x = area.x + 1 + app.input.len() as u16;
let cursor_y = area.y + 1;
if cursor_x < area.x + area.width - 1 {
frame.set_cursor_position((cursor_x, cursor_y));
}
}
fn format_timestamp(ms: u64) -> String {
// Simple HH:MM format from epoch ms.
let secs = ms / 1000;
let hours = (secs / 3600) % 24;
let minutes = (secs / 60) % 60;
format!("[{:02}:{:02}] ", hours, minutes)
}
// ── Message polling task ──────────────────────────────────────────────────────
/// Background task that polls the server for new messages and sends them via `tx`.
async fn poll_task(
mut session: SessionState,
client: node_service::Client,
tx: mpsc::Sender<TuiEvent>,
) {
let mut poll_interval = interval(Duration::from_millis(1000));
poll_interval.set_missed_tick_behavior(tokio::time::MissedTickBehavior::Skip);
loop {
poll_interval.tick().await;
let identity_bytes = session.identity_bytes();
let payloads = match fetch_wait(&client, &identity_bytes, 0).await {
Ok(p) => p,
Err(_) => continue,
};
if payloads.is_empty() {
continue;
}
let mut new_msgs: Vec<(ConversationId, String, String)> = Vec::new();
let my_key = session.identity_bytes();
let mut sorted = payloads;
sorted.sort_by_key(|(seq, _)| *seq);
for (_seq, payload) in &sorted {
let mls_payload = match try_hybrid_decrypt(session.hybrid_kp.as_ref(), payload) {
Ok(b) => b,
Err(_) => payload.clone(),
};
let conv_ids: Vec<ConversationId> = session.members.keys().cloned().collect();
for conv_id in &conv_ids {
let member = match session.members.get_mut(conv_id) {
Some(m) => m,
None => continue,
};
match member.receive_message(&mls_payload) {
Ok(ReceivedMessage::Application(plaintext)) => {
let (sender_key, app_bytes) = {
let after_unpad = quicprochat_core::padding::unpad(&plaintext)
.unwrap_or_else(|_| plaintext.clone());
if quicprochat_core::sealed_sender::is_sealed(&after_unpad) {
match quicprochat_core::sealed_sender::unseal(&after_unpad) {
Ok((sk, inner)) => (sk.to_vec(), inner),
Err(_) => (my_key.clone(), after_unpad),
}
} else {
(my_key.clone(), after_unpad)
}
};
let (body, msg_id, msg_type, ref_msg_id) =
match parse_app_msg(&app_bytes) {
Ok((_, AppMessage::Chat { message_id, body })) => (
String::from_utf8_lossy(&body).to_string(),
Some(message_id),
"chat",
None,
),
Ok((_, AppMessage::Reply { ref_msg_id, body })) => (
String::from_utf8_lossy(&body).to_string(),
None,
"reply",
Some(ref_msg_id),
),
Ok((_, AppMessage::Reaction { ref_msg_id, emoji })) => (
String::from_utf8_lossy(&emoji).to_string(),
None,
"reaction",
Some(ref_msg_id),
),
_ => (
String::from_utf8_lossy(&app_bytes).to_string(),
None,
"chat",
None,
),
};
let stored = StoredMessage {
conversation_id: conv_id.clone(),
message_id: msg_id,
sender_key: sender_key.clone(),
sender_name: None,
body: body.clone(),
msg_type: msg_type.into(),
ref_msg_id,
timestamp_ms: now_ms(),
is_outgoing: false,
};
if session.conv_store.save_message(&stored).is_ok() {
let sender_short = hex::encode(&sender_key[..sender_key.len().min(4)]);
new_msgs.push((conv_id.clone(), sender_short, body));
}
let _ = session.conv_store.update_activity(conv_id, now_ms());
let _ = session.save_member(conv_id);
break;
}
Ok(ReceivedMessage::StateChanged) | Ok(ReceivedMessage::SelfRemoved) => {
let _ = session.save_member(conv_id);
break;
}
_ => {}
}
}
}
if !new_msgs.is_empty() {
if tx.send(TuiEvent::NewMessages(new_msgs)).await.is_err() {
break;
}
}
}
}
// ── Send message ──────────────────────────────────────────────────────────────
async fn send_message(
session: &mut SessionState,
client: &node_service::Client,
conv_id: &ConversationId,
text: &str,
) -> anyhow::Result<()> {
let my_key = session.identity_bytes();
let identity = Arc::clone(&session.identity);
let member = session
.members
.get_mut(conv_id)
.context("no GroupMember for this conversation")?;
// Wrap in structured AppMessage format.
let app_payload = serialize_chat(text.as_bytes(), None)
.context("serialize app message")?;
// Metadata protection: seal + pad.
let sealed = quicprochat_core::sealed_sender::seal(&identity, &app_payload);
let padded = quicprochat_core::padding::pad(&sealed);
let ct = member.send_message(&padded).context("MLS encrypt")?;
let recipients: Vec<Vec<u8>> = member
.member_identities()
.into_iter()
.filter(|id| id.as_slice() != my_key.as_slice())
.collect();
for recipient_key in &recipients {
let peer_hybrid_pk = fetch_hybrid_key(client, recipient_key).await?;
let payload = if let Some(ref pk) = peer_hybrid_pk {
hybrid_encrypt(pk, &ct, b"", b"").context("hybrid encrypt")?
} else {
ct.clone()
};
enqueue(client, recipient_key, &payload).await?;
}
// Extract message_id from what we just serialized.
let msg_id = parse_app_msg(&app_payload)
.ok()
.and_then(|(_, m)| match m {
AppMessage::Chat { message_id, .. } => Some(message_id),
_ => None,
});
// Save outgoing message.
let stored = StoredMessage {
conversation_id: conv_id.clone(),
message_id: msg_id,
sender_key: my_key,
sender_name: Some("you".into()),
body: text.to_string(),
msg_type: "chat".into(),
ref_msg_id: None,
timestamp_ms: now_ms(),
is_outgoing: true,
};
session.conv_store.save_message(&stored)?;
session.conv_store.update_activity(conv_id, now_ms())?;
session.save_member(conv_id)?;
Ok(())
}
// ── TUI entry point ───────────────────────────────────────────────────────────
/// Entry point for `qpc tui`. Sets up the terminal, runs the event loop, and
/// restores the terminal on exit.
pub async fn run_tui(
state_path: &Path,
server: &str,
ca_cert: &Path,
server_name: &str,
password: Option<&str>,
username: Option<&str>,
opaque_password: Option<&str>,
access_token: &str,
device_id: Option<&str>,
) -> anyhow::Result<()> {
// ── Auth ──────────────────────────────────────────────────────────────────
let resolved_token = resolve_tui_access_token(
state_path,
server,
ca_cert,
server_name,
password,
username,
opaque_password,
access_token,
)
.await?;
let token_bytes = hex::decode(&resolved_token)
.unwrap_or_else(|_| resolved_token.into_bytes());
let auth_ctx = ClientAuth::from_raw(token_bytes, device_id.map(String::from));
init_auth(auth_ctx);
// ── Session + RPC ─────────────────────────────────────────────────────────
let mut session = SessionState::load(state_path, password)?;
let client = connect_node(server, ca_cert, server_name).await?;
// Auto-upload KeyPackage.
let _ = auto_upload_keys_tui(&session, &client).await;
// ── Terminal setup ────────────────────────────────────────────────────────
enable_raw_mode().context("enable raw mode")?;
let mut stdout = std::io::stdout();
execute!(stdout, EnterAlternateScreen, EnableMouseCapture)
.context("enter alternate screen")?;
let backend = CrosstermBackend::new(stdout);
let mut terminal = Terminal::new(backend).context("create terminal")?;
let result = tui_loop(&mut terminal, &mut session, client).await;
// ── Terminal cleanup (always restore, even on error) ───────────────────
disable_raw_mode().ok();
execute!(
terminal.backend_mut(),
LeaveAlternateScreen,
DisableMouseCapture
)
.ok();
terminal.show_cursor().ok();
session.save_all()?;
result
}
async fn tui_loop(
terminal: &mut Terminal<CrosstermBackend<std::io::Stdout>>,
session: &mut SessionState,
client: node_service::Client,
) -> anyhow::Result<()> {
let mut app = App::new(session)?;
app.reload_messages(session)?;
let (event_tx, mut event_rx) = mpsc::channel::<TuiEvent>(256);
// ── Keyboard event task ───────────────────────────────────────────────────
let key_tx = event_tx.clone();
tokio::task::spawn_local(async move {
loop {
// crossterm event polling — 50ms timeout so we can tick.
match event::poll(Duration::from_millis(50)) {
Ok(true) => {
if let Ok(Event::Key(key)) = event::read() {
if key_tx.send(TuiEvent::Key(key)).await.is_err() {
break;
}
}
}
Ok(false) => {
// No event — send a tick so the UI redraws.
if key_tx.send(TuiEvent::Tick).await.is_err() {
break;
}
}
Err(_) => break,
}
}
});
// ── Message poll task ─────────────────────────────────────────────────────
// Clone session state for the poll task (it needs its own SessionState).
let poll_session = SessionState::load(
&session.state_path.clone(),
session.password.as_ref().map(|p| p.as_str()),
)?;
let poll_tx = event_tx.clone();
tokio::task::spawn_local(poll_task(poll_session, client.clone(), poll_tx));
// ── Main loop ─────────────────────────────────────────────────────────────
loop {
terminal.draw(|f| ui(f, &app)).context("draw")?;
match event_rx.recv().await {
None => break,
Some(TuiEvent::Tick) => {
// Just redraw.
}
Some(TuiEvent::NewMessages(msgs)) => {
app.append_messages(msgs);
}
Some(TuiEvent::Key(key)) => {
match key.code {
KeyCode::Char('c') if key.modifiers.contains(KeyModifiers::CONTROL) => {
app.should_quit = true;
}
KeyCode::Char('q') if app.input.is_empty() => {
app.should_quit = true;
}
KeyCode::Enter => {
let text = app.input.trim().to_string();
if !text.is_empty() {
app.input.clear();
if let Some(conv_id) = app.active_conv_id().cloned() {
match send_message(session, &client, &conv_id, &text).await {
Ok(()) => {
// Add to in-memory list immediately.
app.messages.push(DisplayMessage {
sender: format!("me({})", app.identity_short),
body: text,
timestamp_ms: now_ms(),
is_outgoing: true,
});
}
Err(_e) => {
// Silently drop — user will see nothing happened.
}
}
}
}
}
KeyCode::Char(c) => {
app.input.push(c);
}
KeyCode::Backspace => {
app.input.pop();
}
KeyCode::Up => {
app.scroll_up();
}
KeyCode::Down => {
app.scroll_down();
}
KeyCode::Tab => {
if key.modifiers.contains(KeyModifiers::SHIFT) {
app.select_prev_channel(session);
} else {
app.select_next_channel(session);
}
app.reload_messages(session)?;
}
_ => {}
}
}
}
if app.should_quit {
break;
}
}
Ok(())
}
// ── Startup helpers ───────────────────────────────────────────────────────────
async fn auto_upload_keys_tui(
session: &SessionState,
client: &node_service::Client,
) -> anyhow::Result<()> {
let ks_path = session.state_path.with_extension("pending.ks");
let ks = DiskKeyStore::persistent(&ks_path).unwrap_or_else(|_| DiskKeyStore::ephemeral());
let mut member = GroupMember::new_with_state(
Arc::clone(&session.identity),
ks,
None,
false,
);
let kp_bytes = member.generate_key_package().context("generate KeyPackage")?;
let id_key = session.identity.public_key_bytes();
upload_key_package(client, &id_key, &kp_bytes).await?;
if let Some(ref hkp) = session.hybrid_kp {
upload_hybrid_key(client, &id_key, &hkp.public_key()).await?;
}
Ok(())
}
async fn resolve_tui_access_token(
state_path: &Path,
server: &str,
ca_cert: &Path,
server_name: &str,
state_password: Option<&str>,
username: Option<&str>,
opaque_password: Option<&str>,
cli_access_token: &str,
) -> anyhow::Result<String> {
if !cli_access_token.is_empty() {
return Ok(cli_access_token.to_string());
}
if let Some(cached) = load_cached_session(state_path, state_password) {
return Ok(cached.token_hex);
}
let username = match username {
Some(u) => u.to_string(),
None => {
use std::io::Write;
eprint!("Username: ");
std::io::stderr().flush().ok();
let mut input = String::new();
std::io::stdin()
.read_line(&mut input)
.context("failed to read username")?;
let trimmed = input.trim().to_string();
anyhow::ensure!(!trimmed.is_empty(), "username is required");
trimmed
}
};
let opaque_password = match opaque_password {
Some(p) => p.to_string(),
None => rpassword::read_password().context("failed to read password")?,
};
let state = load_or_init_state(state_path, state_password)?;
let identity = IdentityKeypair::from_seed(state.identity_seed);
let identity_key = identity.public_key_bytes().to_vec();
let node_client = connect_node(server, ca_cert, server_name).await?;
match opaque_register(&node_client, &username, &opaque_password, Some(&identity_key)).await {
Ok(()) | Err(_) => {}
}
let token_bytes = opaque_login(&node_client, &username, &opaque_password, &identity_key)
.await
.context("OPAQUE login failed")?;
let token_hex = hex::encode(&token_bytes);
save_cached_session(state_path, &username, &token_hex, state_password)?;
Ok(token_hex)
}

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -1,177 +0,0 @@
//! quicprochat CLI client library.
//!
//! # KeyPackage expiry and refresh
//!
//! KeyPackages are single-use (consumed when someone fetches them for an invite) and the server
//! may enforce a TTL (e.g. 24 hours). To stay invitable, run `qpc refresh-keypackage`
//! periodically (e.g. before the server TTL) or after your KeyPackage was consumed:
//!
//! ```bash
//! qpc refresh-keypackage --state qpc-state.bin --server 127.0.0.1:7000
//! ```
//!
//! Use the same `--access-token` (or `QPQ_ACCESS_TOKEN`) as for other authenticated
//! commands. See the [running-the-client](https://docs.quicprochat.dev/getting-started/running-the-client)
//! docs for details.
use std::sync::RwLock;
use std::sync::atomic::{AtomicBool, Ordering};
use zeroize::Zeroizing;
pub mod client;
#[cfg(feature = "v2")]
pub mod v2_commands;
pub use client::commands::{
cmd_chat, cmd_check_key, cmd_create_group, cmd_demo_group, cmd_export, cmd_export_verify,
cmd_fetch_key, cmd_health, cmd_health_json, cmd_invite, cmd_join, cmd_login, cmd_ping,
cmd_recv, cmd_register, cmd_register_state, cmd_refresh_keypackage, cmd_register_user,
cmd_send, cmd_whoami, opaque_login, receive_pending_plaintexts, whoami_json,
};
pub use client::command_engine::{Command, CommandRegistry, CommandResult};
#[cfg(feature = "playbook")]
pub use client::playbook::{Playbook, PlaybookReport, PlaybookRunner};
pub use client::repl::run_repl;
pub use client::rpc::{connect_node, connect_node_opt, create_channel, enqueue, fetch_wait, resolve_user};
// ── ClientContext: structured holder for session-scoped auth + TLS config ────
/// Holds the authentication credentials and TLS policy for a client session.
///
/// Prefer constructing a `ClientContext` and passing it explicitly where
/// possible. The global `AUTH_CONTEXT` / `INSECURE_SKIP_VERIFY` statics
/// delegate to a `ClientContext` under the hood and exist only for backward
/// compatibility with call-sites that have not yet been migrated.
pub struct ClientContext {
auth: RwLock<Option<ClientAuth>>,
insecure_skip_verify: AtomicBool,
}
impl ClientContext {
/// Create a new context with no auth and TLS verification enabled.
pub fn new() -> Self {
Self {
auth: RwLock::new(None),
insecure_skip_verify: AtomicBool::new(false),
}
}
/// Create a context pre-loaded with auth credentials.
pub fn with_auth(auth: ClientAuth) -> Self {
Self {
auth: RwLock::new(Some(auth)),
insecure_skip_verify: AtomicBool::new(false),
}
}
/// Set (or replace) the auth credentials.
///
/// # Panics
/// Panics if the RwLock is poisoned (a thread panicked while holding it).
/// A poisoned lock indicates unrecoverable state corruption.
#[allow(clippy::expect_used)]
pub fn set_auth(&self, ctx: ClientAuth) {
let mut guard = self.auth.write().expect("ClientContext auth lock poisoned");
*guard = Some(ctx);
}
/// Read the current auth snapshot (cloned).
///
/// # Panics
/// Panics if the RwLock is poisoned (a thread panicked while holding it).
/// A poisoned lock indicates unrecoverable state corruption.
#[allow(clippy::expect_used)]
pub fn get_auth(&self) -> Option<ClientAuth> {
let guard = self.auth.read().expect("ClientContext auth lock poisoned");
guard.clone()
}
/// Returns true if auth credentials have been set.
///
/// # Panics
/// Panics if the RwLock is poisoned (a thread panicked while holding it).
/// A poisoned lock indicates unrecoverable state corruption.
#[allow(clippy::expect_used)]
pub fn is_authenticated(&self) -> bool {
let guard = self.auth.read().expect("ClientContext auth lock poisoned");
guard.is_some()
}
/// Enable or disable insecure TLS mode.
pub fn set_insecure_skip_verify(&self, enabled: bool) {
self.insecure_skip_verify.store(enabled, Ordering::Relaxed);
}
/// Read the current insecure-skip-verify flag.
pub fn insecure_skip_verify(&self) -> bool {
self.insecure_skip_verify.load(Ordering::Relaxed)
}
}
impl Default for ClientContext {
fn default() -> Self {
Self::new()
}
}
// ── Global statics (thin wrappers, kept for backward compat) ─────────────────
/// Global auth context — delegates to a process-wide `ClientContext`.
/// Prefer passing `&ClientContext` explicitly in new code.
pub(crate) static AUTH_CONTEXT: RwLock<Option<ClientAuth>> = RwLock::new(None);
/// When `true`, [`connect_node`] skips TLS certificate verification.
/// Prefer `ClientContext::set_insecure_skip_verify` in new code.
pub(crate) static INSECURE_SKIP_VERIFY: AtomicBool = AtomicBool::new(false);
/// Enable or disable insecure (no-verify) TLS mode globally.
///
/// **Development only.** When enabled, all outgoing connections skip certificate
/// verification, making them vulnerable to MITM attacks.
pub fn set_insecure_skip_verify(enabled: bool) {
INSECURE_SKIP_VERIFY.store(enabled, Ordering::Relaxed);
}
#[derive(Clone, Debug)]
pub struct ClientAuth {
pub(crate) version: u16,
/// Bearer or OPAQUE session token. Zeroized on drop. (M8)
pub(crate) access_token: Zeroizing<Vec<u8>>,
pub(crate) device_id: Vec<u8>,
}
impl ClientAuth {
/// Build a client auth context from optional token and device id.
pub fn from_parts(access_token: String, device_id: Option<String>) -> Self {
let token = access_token.into_bytes();
let device = device_id.unwrap_or_default().into_bytes();
Self {
version: 1,
access_token: Zeroizing::new(token),
device_id: device,
}
}
/// Build from raw token bytes (e.g. a 32-byte OPAQUE session token).
pub fn from_raw(raw_token: Vec<u8>, device_id: Option<String>) -> Self {
let device = device_id.unwrap_or_default().into_bytes();
Self {
version: 1,
access_token: Zeroizing::new(raw_token),
device_id: device,
}
}
}
/// Set (or replace) the global auth context.
///
/// # Panics
/// Panics if the RwLock is poisoned (a thread panicked while holding it).
/// A poisoned lock indicates unrecoverable state corruption.
#[allow(clippy::expect_used)]
pub fn init_auth(ctx: ClientAuth) {
let mut guard = AUTH_CONTEXT.write().expect("AUTH_CONTEXT poisoned");
*guard = Some(ctx);
}

File diff suppressed because it is too large Load Diff

View File

@@ -1,228 +0,0 @@
//! v2 CLI command implementations — thin wrappers over the SDK.
use quicprochat_sdk::client::QpqClient;
use quicprochat_sdk::error::SdkError;
/// Register a new user account via OPAQUE.
pub async fn cmd_register_user(
client: &mut QpqClient,
username: &str,
password: &str,
) -> Result<(), SdkError> {
client.register(username, password).await?;
let key = client.identity_key().unwrap_or_default();
println!("registered user: {username}");
println!("identity key : {}", hex::encode(key));
Ok(())
}
/// Log in via OPAQUE and print session info.
pub async fn cmd_login(
client: &mut QpqClient,
username: &str,
password: &str,
) -> Result<(), SdkError> {
client.login(username, password).await?;
println!("logged in as: {username}");
if let Some(key) = client.identity_key() {
println!("identity key: {}", hex::encode(key));
}
Ok(())
}
/// Print local identity information.
pub fn cmd_whoami(client: &QpqClient) {
match client.username() {
Some(u) => println!("username : {u}"),
None => println!("username : (not logged in)"),
}
match client.identity_key() {
Some(k) => println!("identity key: {}", hex::encode(k)),
None => println!("identity key: (none)"),
}
println!("connected : {}", client.is_connected());
println!("authenticated: {}", client.is_authenticated());
}
/// Health check — connect to the server and report status.
pub async fn cmd_health(client: &mut QpqClient) -> Result<(), SdkError> {
let start = std::time::Instant::now();
// The SDK connect() already establishes a QUIC connection.
// If we're already connected, just report success.
if !client.is_connected() {
client.connect().await?;
}
let rtt_ms = start.elapsed().as_millis();
println!("status : ok");
println!("rtt : {rtt_ms}ms");
Ok(())
}
/// Resolve a username to its identity key.
pub async fn cmd_resolve(client: &mut QpqClient, username: &str) -> Result<(), SdkError> {
let rpc = client.rpc()?;
match quicprochat_sdk::users::resolve_user(rpc, username).await? {
Some(key) => {
println!("{username} -> {}", hex::encode(&key));
}
None => {
println!("{username}: not found");
}
}
Ok(())
}
/// List registered devices.
pub async fn cmd_devices_list(client: &mut QpqClient) -> Result<(), SdkError> {
let rpc = client.rpc()?;
let devices = quicprochat_sdk::devices::list_devices(rpc).await?;
if devices.is_empty() {
println!("no devices registered");
} else {
println!("{:<36} {:<20} {}", "DEVICE ID", "NAME", "REGISTERED AT");
for d in &devices {
println!(
"{:<36} {:<20} {}",
hex::encode(&d.device_id),
d.device_name,
d.registered_at,
);
}
}
Ok(())
}
/// Register a new device.
pub async fn cmd_devices_register(
client: &mut QpqClient,
device_id: &str,
device_name: &str,
) -> Result<(), SdkError> {
let rpc = client.rpc()?;
let id_bytes = hex::decode(device_id)
.map_err(|e| SdkError::Other(anyhow::anyhow!("invalid device_id hex: {e}")))?;
let was_new = quicprochat_sdk::devices::register_device(rpc, &id_bytes, device_name).await?;
if was_new {
println!("device registered: {device_name}");
} else {
println!("device already registered: {device_name}");
}
Ok(())
}
/// Revoke a device.
pub async fn cmd_devices_revoke(
client: &mut QpqClient,
device_id: &str,
) -> Result<(), SdkError> {
let rpc = client.rpc()?;
let id_bytes = hex::decode(device_id)
.map_err(|e| SdkError::Other(anyhow::anyhow!("invalid device_id hex: {e}")))?;
let revoked = quicprochat_sdk::devices::revoke_device(rpc, &id_bytes).await?;
if revoked {
println!("device revoked: {device_id}");
} else {
println!("device not found: {device_id}");
}
Ok(())
}
/// Set up account recovery — generate codes and upload encrypted bundles.
pub async fn cmd_recovery_setup(client: &mut QpqClient) -> Result<(), SdkError> {
// Load identity seed from state file.
let state_path = client.config_state_path();
let stored = quicprochat_sdk::state::load_state(&state_path, None)
.map_err(|e| SdkError::Crypto(format!("load identity for recovery: {e}")))?;
let rpc = client.rpc()?;
let codes =
quicprochat_sdk::recovery::setup_recovery(rpc, &stored.identity_seed, &[]).await?;
println!("=== RECOVERY CODES ===");
println!("Save these codes securely. They will NOT be shown again.");
println!("Each code can independently recover your account.");
println!();
for (i, code) in codes.iter().enumerate() {
println!(" {}. {}", i + 1, code);
}
println!();
println!("{} codes generated and uploaded.", codes.len());
Ok(())
}
// ── Outbox commands ──────────────────────────────────────────────────────────
/// List pending outbox entries.
pub fn cmd_outbox_list(client: &QpqClient) -> Result<(), SdkError> {
let store = client.conversations()?;
let entries = quicprochat_sdk::outbox::list_pending(store)?;
if entries.is_empty() {
println!("outbox is empty — no pending messages");
} else {
println!("{:<6} {:<34} {:<8} PAYLOAD SIZE", "ID", "CONVERSATION", "RETRIES");
for e in &entries {
println!(
"{:<6} {:<34} {:<8} {} bytes",
e.id,
e.conversation_id.hex(),
e.retry_count,
e.payload.len(),
);
}
println!("\n{} pending entries", entries.len());
}
Ok(())
}
/// Retry sending all pending outbox entries.
pub async fn cmd_outbox_retry(client: &mut QpqClient) -> Result<(), SdkError> {
let rpc = client.rpc()?;
let store = client.conversations()?;
let (sent, failed) = quicprochat_sdk::outbox::flush_outbox(rpc, store).await?;
println!("outbox flush: {sent} sent, {failed} permanently failed");
Ok(())
}
/// Clear permanently failed outbox entries.
pub fn cmd_outbox_clear(client: &QpqClient) -> Result<(), SdkError> {
let store = client.conversations()?;
let cleared = quicprochat_sdk::outbox::clear_failed(store)?;
println!("cleared {cleared} failed outbox entries");
Ok(())
}
/// Recover an account from a recovery code.
pub async fn cmd_recovery_restore(
client: &mut QpqClient,
code: &str,
) -> Result<(), SdkError> {
let rpc = client.rpc()?;
let (identity_seed, conversation_ids) =
quicprochat_sdk::recovery::recover_account(rpc, code).await?;
// Restore identity.
let keypair = quicprochat_core::IdentityKeypair::from_seed(identity_seed);
client.set_identity_key(keypair.public_key_bytes().to_vec());
println!("account recovered successfully");
println!("identity key: {}", hex::encode(keypair.public_key_bytes()));
if !conversation_ids.is_empty() {
println!(
"{} conversations need rejoin (peers must re-invite this device)",
conversation_ids.len()
);
}
// Save recovered state.
let state = quicprochat_sdk::state::StoredState {
identity_seed,
group: None,
hybrid_key: None,
member_keys: Vec::new(),
};
let state_path = client.config_state_path();
quicprochat_sdk::state::save_state(&state_path, &state, None)?;
println!("state saved to {}", state_path.display());
Ok(())
}

Some files were not shown because too many files have changed in this diff Show More