Compare commits
48 Commits
v2
...
50a63a6b96
| Author | SHA1 | Date | |
|---|---|---|---|
| 50a63a6b96 | |||
| a258f98a40 | |||
| 024b6c91d1 | |||
| ac36534063 | |||
| 7be7287ba2 | |||
| 3c6eebdb00 | |||
| eee1e9f278 | |||
| 5d1688d89f | |||
| 56331632fd | |||
| 12846bd2a0 | |||
| dd2041df20 | |||
| 65ce5aec18 | |||
| 0b3d5c5100 | |||
| cbfa7e16c4 | |||
| e2c04cf0c3 | |||
| bcde8b733c | |||
| 237f4360e4 | |||
| a055706236 | |||
| 9cbf824db6 | |||
| 3f81837112 | |||
| db49d83fda | |||
| 9b09f09892 | |||
| 92fefda41d | |||
| 84ec822823 | |||
| 01bc2a4273 | |||
| f9ac921a0c | |||
| d469999c2a | |||
| f0901f6597 | |||
| 543bd442a3 | |||
| 266bcfed59 | |||
| c256c38ffb | |||
| 416618f4cf | |||
| 872695e5f1 | |||
| e4c5868b31 | |||
| 66eca065e0 | |||
| a05da9b751 | |||
| 077f48f19c | |||
| 3708b8df41 | |||
| b98dcc27ae | |||
| 2e081ead8e | |||
| a710037dde | |||
| d8c1392587 | |||
| a9d1f535aa | |||
| aa29d3bc34 | |||
| 2a9f0b43ef | |||
| d073f614b3 | |||
| f7a7f672b4 | |||
| 189534c511 |
20
.env.example
20
.env.example
@@ -1,20 +1,20 @@
|
||||
# quicproquo Production Environment Variables
|
||||
# quicprochat Production Environment Variables
|
||||
# Copy this file to .env and fill in the values.
|
||||
|
||||
# Server auth token (required, >= 16 characters)
|
||||
QPQ_AUTH_TOKEN=
|
||||
QPC_AUTH_TOKEN=
|
||||
|
||||
# SQLCipher database encryption key (required for store_backend=sql)
|
||||
QPQ_DB_KEY=
|
||||
QPC_DB_KEY=
|
||||
|
||||
# Ports (defaults shown)
|
||||
QPQ_LISTEN_PORT=7000
|
||||
QPQ_WS_PORT=9000
|
||||
QPC_LISTEN_PORT=7000
|
||||
QPC_WS_PORT=9000
|
||||
|
||||
# Optional features
|
||||
QPQ_SEALED_SENDER=false
|
||||
QPQ_REDACT_LOGS=true
|
||||
QPQ_WS_LISTEN=
|
||||
QPC_SEALED_SENDER=false
|
||||
QPC_REDACT_LOGS=true
|
||||
QPC_WS_LISTEN=
|
||||
|
||||
# Grafana admin password
|
||||
GRAFANA_ADMIN_PASSWORD=changeme
|
||||
# Grafana admin password (required — must be strong, no default)
|
||||
GRAFANA_ADMIN_PASSWORD=
|
||||
|
||||
134
.gitea/workflows/claude.yml
Normal file
134
.gitea/workflows/claude.yml
Normal file
@@ -0,0 +1,134 @@
|
||||
name: Claude Code Assistant
|
||||
|
||||
on:
|
||||
issues:
|
||||
types: [opened, labeled]
|
||||
issue_comment:
|
||||
types: [created]
|
||||
|
||||
concurrency:
|
||||
group: claude-${{ github.event.issue.number }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
claude-code:
|
||||
if: >-
|
||||
(github.event_name == 'issues' &&
|
||||
contains(toJSON(github.event.issue.labels), 'claude')) ||
|
||||
(github.event_name == 'issue_comment' &&
|
||||
contains(github.event.comment.body, '@claude') &&
|
||||
github.event.comment.user.login != 'admin')
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 15
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Run Claude on Issue
|
||||
env:
|
||||
ANTHROPIC_API_KEY: ${{ secrets.ANTHROPIC_API_KEY }}
|
||||
GIT_TOKEN: ${{ secrets.GIT_TOKEN }}
|
||||
run: |
|
||||
set +e
|
||||
|
||||
# Configure git
|
||||
git config user.name "Claude Bot"
|
||||
git config user.email "claude@localhost"
|
||||
git remote set-url origin "http://admin:${GIT_TOKEN}@localhost:3000/${{ github.repository }}.git"
|
||||
|
||||
ISSUE_NUMBER="${{ github.event.issue.number }}"
|
||||
ISSUE_TITLE="${{ github.event.issue.title }}"
|
||||
REPO="${{ github.repository }}"
|
||||
LABELS_JSON='${{ toJSON(github.event.issue.labels) }}'
|
||||
|
||||
# Determine model + cost limits from issue labels
|
||||
# Default: haiku (cheap). Add claude:sonnet or claude:opus for harder tasks.
|
||||
CLAUDE_MODEL="haiku"
|
||||
MAX_TURNS=15
|
||||
MAX_BUDGET="0.50"
|
||||
EFFORT="low"
|
||||
if echo "$LABELS_JSON" | grep -q '"claude:opus"'; then
|
||||
CLAUDE_MODEL="claude-opus-4-6"
|
||||
MAX_TURNS=40
|
||||
MAX_BUDGET="5.00"
|
||||
EFFORT="high"
|
||||
elif echo "$LABELS_JSON" | grep -q '"claude:sonnet"'; then
|
||||
CLAUDE_MODEL="claude-sonnet-4-6"
|
||||
MAX_TURNS=25
|
||||
MAX_BUDGET="2.00"
|
||||
EFFORT="medium"
|
||||
fi
|
||||
|
||||
ISSUE_BODY=$(curl -s "http://localhost:3000/api/v1/repos/${REPO}/issues/${ISSUE_NUMBER}" \
|
||||
-H "Authorization: token ${GIT_TOKEN}" | python3 -c "import sys,json; print(json.load(sys.stdin).get('body',''))")
|
||||
|
||||
COMMENT_BODY=""
|
||||
if [ "${{ github.event_name }}" = "issue_comment" ]; then
|
||||
COMMENT_ID="${{ github.event.comment.id }}"
|
||||
COMMENT_BODY=$(curl -s "http://localhost:3000/api/v1/repos/${REPO}/issues/comments/${COMMENT_ID}" \
|
||||
-H "Authorization: token ${GIT_TOKEN}" | python3 -c "import sys,json; print(json.load(sys.stdin).get('body',''))")
|
||||
fi
|
||||
|
||||
BRANCH="claude/issue-${ISSUE_NUMBER}"
|
||||
git checkout -b "${BRANCH}"
|
||||
|
||||
# Run Claude Code with cost controls
|
||||
claude -p "You are working on the repository ${REPO} (Gitea instance at http://localhost:3000).
|
||||
A Gitea issue needs your attention:
|
||||
|
||||
Issue #${ISSUE_NUMBER}: ${ISSUE_TITLE}
|
||||
Description: ${ISSUE_BODY}
|
||||
Additional context: ${COMMENT_BODY}
|
||||
|
||||
IMPORTANT RULES:
|
||||
- Do NOT retry failed commands more than once. If something fails twice, stop and report the error.
|
||||
- Do NOT loop on failing tests. Fix the obvious issue or report it. Never run the same failing command 3+ times.
|
||||
- If you cannot complete the task, push what you have, create the PR as draft, and explain what is blocked.
|
||||
- Be efficient: read only files you need, make targeted edits, avoid unnecessary exploration.
|
||||
|
||||
Steps:
|
||||
1. Read and understand the relevant parts of the codebase
|
||||
2. Implement the requested changes
|
||||
3. Commit your changes with a descriptive message
|
||||
4. Push branch ${BRANCH} to origin
|
||||
5. Create a pull request targeting main that references issue #${ISSUE_NUMBER}
|
||||
6. Post a comment on issue #${ISSUE_NUMBER} summarizing what you did
|
||||
|
||||
Git is configured. You are on branch ${BRANCH}. Work in the current directory.
|
||||
Use git commands to push, and curl to the Gitea API for PR creation and comments.
|
||||
Gitea API token is available as env var GIT_TOKEN." \
|
||||
--allowedTools "Bash,Read,Edit,Write,Glob,Grep" \
|
||||
--model "${CLAUDE_MODEL}" \
|
||||
--max-turns "${MAX_TURNS}" \
|
||||
--max-budget-usd "${MAX_BUDGET}" \
|
||||
--effort "${EFFORT}" \
|
||||
--permission-mode bypassPermissions \
|
||||
--output-format json 2>&1 > /tmp/claude-result.json
|
||||
|
||||
CLAUDE_EXIT=$?
|
||||
|
||||
# Extract cost from JSON output
|
||||
COST=$(python3 -c "
|
||||
import json
|
||||
with open('/tmp/claude-result.json') as f:
|
||||
data = json.load(f)
|
||||
cost = data.get('total_cost_usd', 0)
|
||||
print(f'\${cost:.4f}')
|
||||
" 2>/dev/null || echo "unknown")
|
||||
|
||||
# Amend the last commit to include cost and model
|
||||
if git log --oneline main..HEAD 2>/dev/null | head -1 | grep -q .; then
|
||||
LAST_MSG=$(git log -1 --format=%B)
|
||||
git commit --amend -m "${LAST_MSG}
|
||||
|
||||
Claude model: ${CLAUDE_MODEL} | API cost: ${COST}" --no-verify
|
||||
git push origin "${BRANCH}" --force
|
||||
fi
|
||||
|
||||
# Post cost as comment
|
||||
curl -s -X POST "http://localhost:3000/api/v1/repos/${REPO}/issues/${ISSUE_NUMBER}/comments" \
|
||||
-H "Authorization: token ${GIT_TOKEN}" \
|
||||
-H "Content-Type: application/json" \
|
||||
-d "{\"body\": \"Done (model: **${CLAUDE_MODEL}**, effort: ${EFFORT}, budget cap: \$${MAX_BUDGET}). API cost: **${COST}**\"}" > /dev/null
|
||||
|
||||
exit ${CLAUDE_EXIT}
|
||||
20
.github/CODEOWNERS
vendored
20
.github/CODEOWNERS
vendored
@@ -1,4 +1,4 @@
|
||||
# Code owners for quicproquo. PRs require review from owners.
|
||||
# Code owners for quicprochat. PRs require review from owners.
|
||||
# See https://docs.github.com/en/repositories/managing-your-repositorys-settings-and-features/customizing-your-repository/about-code-owners
|
||||
# Replace 'maintainers' with your GitHub user/team handle.
|
||||
|
||||
@@ -6,32 +6,32 @@
|
||||
* @maintainers
|
||||
|
||||
# Security-critical: crypto primitives, MLS, hybrid KEM
|
||||
/crates/quicproquo-core/ @maintainers
|
||||
/crates/quicprochat-core/ @maintainers
|
||||
|
||||
# Wire format: protobuf definitions, Cap'n Proto schemas
|
||||
/crates/quicproquo-proto/ @maintainers
|
||||
/crates/quicprochat-proto/ @maintainers
|
||||
/proto/ @maintainers
|
||||
|
||||
# Auth and server-side domain logic
|
||||
/crates/quicproquo-server/ @maintainers
|
||||
/crates/quicprochat-server/ @maintainers
|
||||
|
||||
# Client SDK: auth, conversation store, messaging pipeline
|
||||
/crates/quicproquo-sdk/ @maintainers
|
||||
/crates/quicprochat-sdk/ @maintainers
|
||||
|
||||
# CLI/TUI client
|
||||
/crates/quicproquo-client/ @maintainers
|
||||
/crates/quicprochat-client/ @maintainers
|
||||
|
||||
# RPC framework: framing, middleware, QUIC transport
|
||||
/crates/quicproquo-rpc/ @maintainers
|
||||
/crates/quicprochat-rpc/ @maintainers
|
||||
|
||||
# Key transparency
|
||||
/crates/quicproquo-kt/ @maintainers
|
||||
/crates/quicprochat-kt/ @maintainers
|
||||
|
||||
# Plugin ABI (no_std C-ABI boundary)
|
||||
/crates/quicproquo-plugin-api/ @maintainers
|
||||
/crates/quicprochat-plugin-api/ @maintainers
|
||||
|
||||
# P2P transport
|
||||
/crates/quicproquo-p2p/ @maintainers
|
||||
/crates/quicprochat-p2p/ @maintainers
|
||||
|
||||
# CI and infrastructure
|
||||
/.github/ @maintainers
|
||||
|
||||
2
.github/workflows/bench.yml
vendored
2
.github/workflows/bench.yml
vendored
@@ -35,7 +35,7 @@ jobs:
|
||||
${{ runner.os }}-bench-
|
||||
|
||||
- name: Run benchmarks
|
||||
run: cargo bench --package quicproquo-core -- --output-format=bencher 2>&1 | tee bench-output.txt
|
||||
run: cargo bench --package quicprochat-core -- --output-format=bencher 2>&1 | tee bench-output.txt
|
||||
|
||||
- name: Upload HTML reports
|
||||
uses: actions/upload-artifact@v4
|
||||
|
||||
53
.github/workflows/ci.yml
vendored
53
.github/workflows/ci.yml
vendored
@@ -102,7 +102,7 @@ jobs:
|
||||
- name: Run coverage
|
||||
run: |
|
||||
cargo tarpaulin --workspace \
|
||||
--exclude quicproquo-p2p \
|
||||
--exclude quicprochat-p2p \
|
||||
--out xml \
|
||||
--output-dir coverage/ \
|
||||
-- --test-threads 1
|
||||
@@ -113,6 +113,57 @@ jobs:
|
||||
name: coverage-report
|
||||
path: coverage/cobertura.xml
|
||||
|
||||
msrv:
|
||||
name: MSRV Check
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: Install MSRV Rust (1.75)
|
||||
uses: dtolnay/rust-action@1.75
|
||||
with:
|
||||
components: clippy
|
||||
|
||||
- name: Install capnp
|
||||
run: sudo apt-get update && sudo apt-get install -y capnproto
|
||||
|
||||
- name: Cache cargo
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: |
|
||||
~/.cargo/registry
|
||||
~/.cargo/git
|
||||
target
|
||||
key: ${{ runner.os }}-msrv-${{ hashFiles('**/Cargo.lock') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-msrv-
|
||||
|
||||
- name: Check MSRV
|
||||
run: cargo check --workspace
|
||||
|
||||
macos:
|
||||
name: macOS Build Check
|
||||
runs-on: macos-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: Install Rust
|
||||
uses: dtolnay/rust-action@stable
|
||||
|
||||
- name: Cache cargo
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: |
|
||||
~/.cargo/registry
|
||||
~/.cargo/git
|
||||
target
|
||||
key: ${{ runner.os }}-cargo-${{ hashFiles('**/Cargo.lock') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-cargo-
|
||||
|
||||
- name: Check build
|
||||
run: cargo check --workspace
|
||||
|
||||
docker:
|
||||
name: Docker Build
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
8
.github/workflows/openwrt.yml
vendored
8
.github/workflows/openwrt.yml
vendored
@@ -43,11 +43,11 @@ jobs:
|
||||
CARGO_PROFILE_RELEASE_CODEGEN_UNITS: '1'
|
||||
CARGO_PROFILE_RELEASE_STRIP: symbols
|
||||
run: |
|
||||
cargo zigbuild --release --target ${{ matrix.target }} --bin qpq-server
|
||||
cargo zigbuild --release --target ${{ matrix.target }} --bin qpc-server
|
||||
|
||||
- name: Check binary size
|
||||
run: |
|
||||
BINARY="target/${{ matrix.target }}/release/qpq-server"
|
||||
BINARY="target/${{ matrix.target }}/release/qpc-server"
|
||||
SIZE=$(stat -c%s "$BINARY")
|
||||
SIZE_MB=$(echo "scale=2; $SIZE / 1048576" | bc)
|
||||
echo "Binary size: ${SIZE_MB} MB"
|
||||
@@ -60,6 +60,6 @@ jobs:
|
||||
- name: Upload artifact
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: qpq-server-${{ matrix.target }}
|
||||
path: target/${{ matrix.target }}/release/qpq-server
|
||||
name: qpc-server-${{ matrix.target }}
|
||||
path: target/${{ matrix.target }}/release/qpc-server
|
||||
retention-days: 30
|
||||
|
||||
12
.gitignore
vendored
12
.gitignore
vendored
@@ -1,4 +1,6 @@
|
||||
/target
|
||||
**/target/
|
||||
node_modules/
|
||||
**/*.rs.bk
|
||||
.vscode/
|
||||
gitea-mcp.json
|
||||
@@ -16,4 +18,12 @@ data/
|
||||
*.convdb-shm
|
||||
*.convdb-wal
|
||||
*.pending.ks
|
||||
qpq-server.toml
|
||||
qpc-server.toml
|
||||
|
||||
# Internal planning docs (not for public distribution)
|
||||
docs/internal/
|
||||
|
||||
# AI development workflow files
|
||||
CLAUDE.md
|
||||
master-prompt.md
|
||||
scripts/ai_team.py
|
||||
|
||||
23
CLAUDE.md
23
CLAUDE.md
@@ -1,23 +0,0 @@
|
||||
# quicproquo — Claude Code Instructions
|
||||
|
||||
## Agent Team Workflow Rules
|
||||
|
||||
### NEVER delete worktrees before preserving changes
|
||||
When using agent teams with `isolation: "worktree"`:
|
||||
1. **Before calling `TeamDelete`**, always check each worktree for uncommitted or committed changes
|
||||
2. **Create a named branch** from each worktree's HEAD and push/preserve it before cleanup
|
||||
3. **Preferred pattern**: use `git branch fix/<name> <worktree-HEAD-sha>` to save the work
|
||||
4. If an agent reports changes, its worktree branch MUST be merged or saved before the team is deleted
|
||||
|
||||
### Agent team best practices
|
||||
- Always have agents **commit their changes** with descriptive messages before shutting them down
|
||||
- After all agents report, **list worktrees** (`git worktree list`) and **save branches** before cleanup
|
||||
- When using worktree isolation, the sequence must be: agents finish → save branches → merge → TeamDelete
|
||||
- Never call TeamDelete as a shortcut to kill zombie agents — use `rm -rf ~/.claude/teams/<name>` for the team metadata only, preserving worktree dirs
|
||||
|
||||
### Git workflow
|
||||
- Conventional commits: `feat:`, `fix:`, `chore:`, `docs:`, `test:`, `refactor:`
|
||||
- GPG-signed commits only
|
||||
- No `Co-authored-by` trailers
|
||||
- No `.unwrap()` on crypto or I/O in non-test paths
|
||||
- Secrets: zeroize on drop, never in logs
|
||||
40
CONTRIBUTING.md
Normal file
40
CONTRIBUTING.md
Normal file
@@ -0,0 +1,40 @@
|
||||
# Contributing to quicprochat
|
||||
|
||||
## Prerequisites
|
||||
|
||||
- **Rust toolchain** (stable) via [rustup](https://rustup.rs/)
|
||||
- **protoc** is vendored via the `protobuf-src` crate -- no system installation needed
|
||||
- Git with GPG signing configured
|
||||
|
||||
## Building and Testing
|
||||
|
||||
```sh
|
||||
cargo build --workspace
|
||||
cargo test --workspace
|
||||
```
|
||||
|
||||
A `justfile` is also available for common tasks (`just build`, `just test`, `just proto`, etc.).
|
||||
|
||||
## Code Standards
|
||||
|
||||
### Commits
|
||||
|
||||
- **Conventional commits**: `feat:`, `fix:`, `docs:`, `chore:`, `test:`, `refactor:`
|
||||
- Commits must be **GPG-signed**
|
||||
- Commit messages describe *why*, not just *what*
|
||||
- No `Co-authored-by` trailers
|
||||
|
||||
### Rust
|
||||
|
||||
- No `.unwrap()` on crypto or I/O operations outside of tests
|
||||
- Secrets must be zeroized on drop and never logged
|
||||
- No stubs, `todo!()`, or `unimplemented!()` in production code
|
||||
- Prefer clarity over cleverness; avoid unnecessary abstractions
|
||||
|
||||
## Security Vulnerabilities
|
||||
|
||||
Do not open public issues for security bugs. See [SECURITY.md](SECURITY.md) for responsible disclosure instructions.
|
||||
|
||||
## Licensing
|
||||
|
||||
The server crate (`quicprochat-server`) is licensed under **AGPL-3.0**. All other crates are dual-licensed under **Apache-2.0 / MIT**. By submitting a contribution, you agree to license your work under the applicable license(s).
|
||||
891
Cargo.lock
generated
891
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
42
Cargo.toml
42
Cargo.toml
@@ -1,29 +1,38 @@
|
||||
[workspace]
|
||||
resolver = "2"
|
||||
members = [
|
||||
"crates/quicproquo-core",
|
||||
"crates/quicproquo-proto",
|
||||
"crates/quicproquo-plugin-api",
|
||||
"crates/quicproquo-kt",
|
||||
"crates/quicproquo-rpc",
|
||||
"crates/quicproquo-sdk",
|
||||
"crates/quicproquo-server",
|
||||
"crates/quicproquo-client",
|
||||
"crates/quicprochat-core",
|
||||
"crates/quicprochat-proto",
|
||||
"crates/quicprochat-plugin-api",
|
||||
"crates/quicprochat-kt",
|
||||
"crates/quicprochat-rpc",
|
||||
"crates/quicprochat-sdk",
|
||||
"crates/quicprochat-server",
|
||||
"crates/quicprochat-client",
|
||||
# P2P crate uses iroh (~90 extra deps). Only compiled when the `mesh`
|
||||
# feature is enabled on quicproquo-client.
|
||||
"crates/quicproquo-p2p",
|
||||
# feature is enabled on quicprochat-client.
|
||||
"crates/quicprochat-p2p",
|
||||
]
|
||||
|
||||
[workspace.package]
|
||||
edition = "2021"
|
||||
rust-version = "1.75"
|
||||
repository = "https://github.com/quicprochat/quicprochat"
|
||||
description = "End-to-end encrypted group messaging over QUIC"
|
||||
keywords = ["encryption", "messaging", "quic", "mls", "post-quantum"]
|
||||
categories = ["cryptography", "network-programming"]
|
||||
|
||||
# Shared dependency versions — bump here to affect the whole workspace.
|
||||
[workspace.dependencies]
|
||||
|
||||
# ── Crypto ────────────────────────────────────────────────────────────────────
|
||||
openmls = { version = "0.5", default-features = false, features = ["crypto-subtle"] }
|
||||
openmls_rust_crypto = { version = "0.2" }
|
||||
openmls_traits = { version = "0.2" }
|
||||
# tls_codec must match the version used by openmls 0.5 (which uses 0.3) to avoid
|
||||
openmls = { version = "0.8" }
|
||||
openmls_rust_crypto = { version = "0.5" }
|
||||
openmls_traits = { version = "0.5" }
|
||||
openmls_memory_storage = { version = "0.5" }
|
||||
# tls_codec must match the version used by openmls 0.8 (which uses 0.4) to avoid
|
||||
# duplicate Serialize trait versions in the dependency graph.
|
||||
tls_codec = { version = "0.3", features = ["derive"] }
|
||||
tls_codec = { version = "0.4", features = ["derive"] }
|
||||
# ml-kem 0.2 is the current stable release (FIPS 203, ML-KEM-768).
|
||||
ml-kem = { version = "0.2" }
|
||||
x25519-dalek = { version = "2", features = ["static_secrets"] }
|
||||
@@ -79,7 +88,8 @@ tracing-subscriber = { version = "0.3", features = ["env-filter"] }
|
||||
anyhow = { version = "1" }
|
||||
thiserror = { version = "1" }
|
||||
|
||||
# ── CLI ───────────────────────────────────────────────────────────────────────
|
||||
# ── Config / CLI ──────────────────────────────────────────────────────────────
|
||||
toml = { version = "0.8" }
|
||||
clap = { version = "4", features = ["derive", "env"] }
|
||||
rustyline = { version = "14" }
|
||||
|
||||
|
||||
30
LICENSE
Normal file
30
LICENSE
Normal file
@@ -0,0 +1,30 @@
|
||||
quicproquo — Split Licensing
|
||||
============================
|
||||
|
||||
This project uses a split license model similar to Signal:
|
||||
|
||||
Server (quicproquo-server)
|
||||
--------------------------
|
||||
Licensed under the GNU Affero General Public License v3.0 only.
|
||||
See LICENSE-AGPL-3.0 for the full text.
|
||||
|
||||
SPDX-License-Identifier: AGPL-3.0-only
|
||||
|
||||
Libraries and SDKs (all other crates)
|
||||
--------------------------------------
|
||||
Licensed under either of
|
||||
|
||||
* Apache License, Version 2.0 (LICENSE-APACHE)
|
||||
* MIT License (LICENSE-MIT)
|
||||
|
||||
at your option.
|
||||
|
||||
SPDX-License-Identifier: Apache-2.0 OR MIT
|
||||
|
||||
Contribution
|
||||
------------
|
||||
Unless you explicitly state otherwise, any contribution intentionally
|
||||
submitted for inclusion in this project by you, as defined in the
|
||||
Apache-2.0 license, shall be dual licensed as above (for library crates)
|
||||
or AGPL-3.0-only (for the server crate), without any additional terms or
|
||||
conditions.
|
||||
661
LICENSE-AGPL-3.0
Normal file
661
LICENSE-AGPL-3.0
Normal file
@@ -0,0 +1,661 @@
|
||||
GNU AFFERO GENERAL PUBLIC LICENSE
|
||||
Version 3, 19 November 2007
|
||||
|
||||
Copyright (C) 2007 Free Software Foundation, Inc. <https://fsf.org/>
|
||||
Everyone is permitted to copy and distribute verbatim copies
|
||||
of this license document, but changing it is not allowed.
|
||||
|
||||
Preamble
|
||||
|
||||
The GNU Affero General Public License is a free, copyleft license for
|
||||
software and other kinds of works, specifically designed to ensure
|
||||
cooperation with the community in the case of network server software.
|
||||
|
||||
The licenses for most software and other practical works are designed
|
||||
to take away your freedom to share and change the works. By contrast,
|
||||
our General Public Licenses are intended to guarantee your freedom to
|
||||
share and change all versions of a program--to make sure it remains free
|
||||
software for all its users.
|
||||
|
||||
When we speak of free software, we are referring to freedom, not
|
||||
price. Our General Public Licenses are designed to make sure that you
|
||||
have the freedom to distribute copies of free software (and charge for
|
||||
them if you wish), that you receive source code or can get it if you
|
||||
want it, that you can change the software or use pieces of it in new
|
||||
free programs, and that you know you can do these things.
|
||||
|
||||
Developers that use our General Public Licenses protect your rights
|
||||
with two steps: (1) assert copyright on the software, and (2) offer
|
||||
you this License which gives you legal permission to copy, distribute
|
||||
and/or modify the software.
|
||||
|
||||
A secondary benefit of defending all users' freedom is that
|
||||
improvements made in alternate versions of the program, if they
|
||||
receive widespread use, become available for other developers to
|
||||
incorporate. Many developers of free software are heartened and
|
||||
encouraged by the resulting cooperation. However, in the case of
|
||||
software used on network servers, this result may fail to come about.
|
||||
The GNU General Public License permits making a modified version and
|
||||
letting the public access it on a server without ever releasing its
|
||||
source code to the public.
|
||||
|
||||
The GNU Affero General Public License is designed specifically to
|
||||
ensure that, in such cases, the modified source code becomes available
|
||||
to the community. It requires the operator of a network server to
|
||||
provide the source code of the modified version running there to the
|
||||
users of that server. Therefore, public use of a modified version, on
|
||||
a publicly accessible server, gives the public access to the source
|
||||
code of the modified version.
|
||||
|
||||
An older license, called the Affero General Public License and
|
||||
published by Affero, was designed to accomplish similar goals. This is
|
||||
a different license, not a version of the Affero GPL, but Affero has
|
||||
released a new version of the Affero GPL which permits relicensing under
|
||||
this license.
|
||||
|
||||
The precise terms and conditions for copying, distribution and
|
||||
modification follow.
|
||||
|
||||
TERMS AND CONDITIONS
|
||||
|
||||
0. Definitions.
|
||||
|
||||
"This License" refers to version 3 of the GNU Affero General Public License.
|
||||
|
||||
"Copyright" also means copyright-like laws that apply to other kinds of
|
||||
works, such as semiconductor masks.
|
||||
|
||||
"The Program" refers to any copyrightable work licensed under this
|
||||
License. Each licensee is addressed as "you". "Licensees" and
|
||||
"recipients" may be individuals or organizations.
|
||||
|
||||
To "modify" a work means to copy from or adapt all or part of the work
|
||||
in a fashion requiring copyright permission, other than the making of an
|
||||
exact copy. The resulting work is called a "modified version" of the
|
||||
earlier work or a work "based on" the earlier work.
|
||||
|
||||
A "covered work" means either the unmodified Program or a work based
|
||||
on the Program.
|
||||
|
||||
To "propagate" a work means to do anything with it that, without
|
||||
permission, would make you directly or secondarily liable for
|
||||
infringement under applicable copyright law, except executing it on a
|
||||
computer or modifying a private copy. Propagation includes copying,
|
||||
distribution (with or without modification), making available to the
|
||||
public, and in some countries other activities as well.
|
||||
|
||||
To "convey" a work means any kind of propagation that enables other
|
||||
parties to make or receive copies. Mere interaction with a user through
|
||||
a computer network, with no transfer of a copy, is not conveying.
|
||||
|
||||
An interactive user interface displays "Appropriate Legal Notices"
|
||||
to the extent that it includes a convenient and prominently visible
|
||||
feature that (1) displays an appropriate copyright notice, and (2)
|
||||
tells the user that there is no warranty for the work (except to the
|
||||
extent that warranties are provided), that licensees may convey the
|
||||
work under this License, and how to view a copy of this License. If
|
||||
the interface presents a list of user commands or options, such as a
|
||||
menu, a prominent item in the list meets this criterion.
|
||||
|
||||
1. Source Code.
|
||||
|
||||
The "source code" for a work means the preferred form of the work
|
||||
for making modifications to it. "Object code" means any non-source
|
||||
form of a work.
|
||||
|
||||
A "Standard Interface" means an interface that either is an official
|
||||
standard defined by a recognized standards body, or, in the case of
|
||||
interfaces specified for a particular programming language, one that
|
||||
is widely used among developers working in that language.
|
||||
|
||||
The "System Libraries" of an executable work include anything, other
|
||||
than the work as a whole, that (a) is included in the normal form of
|
||||
packaging a Major Component, but which is not part of that Major
|
||||
Component, and (b) serves only to enable use of the work with that
|
||||
Major Component, or to implement a Standard Interface for which an
|
||||
implementation is available to the public in source code form. A
|
||||
"Major Component", in this context, means a major essential component
|
||||
(kernel, window system, and so on) of the specific operating system
|
||||
(if any) on which the executable work runs, or a compiler used to
|
||||
produce the work, or an object code interpreter used to run it.
|
||||
|
||||
The "Corresponding Source" for a work in object code form means all
|
||||
the source code needed to generate, install, and (for an executable
|
||||
work) run the object code and to modify the work, including scripts to
|
||||
control those activities. However, it does not include the work's
|
||||
System Libraries, or general-purpose tools or generally available free
|
||||
programs which are used unmodified in performing those activities but
|
||||
which are not part of the work. For example, Corresponding Source
|
||||
includes interface definition files associated with source files for
|
||||
the work, and the source code for shared libraries and dynamically
|
||||
linked subprograms that the work is specifically designed to require,
|
||||
such as by intimate data communication or control flow between those
|
||||
subprograms and other parts of the work.
|
||||
|
||||
The Corresponding Source need not include anything that users
|
||||
can regenerate automatically from other parts of the Corresponding
|
||||
Source.
|
||||
|
||||
The Corresponding Source for a work in source code form is that
|
||||
same work.
|
||||
|
||||
2. Basic Permissions.
|
||||
|
||||
All rights granted under this License are granted for the term of
|
||||
copyright on the Program, and are irrevocable provided the stated
|
||||
conditions are met. This License explicitly affirms your unlimited
|
||||
permission to run the unmodified Program. The output from running a
|
||||
covered work is covered by this License only if the output, given its
|
||||
content, constitutes a covered work. This License acknowledges your
|
||||
rights of fair use or other equivalent, as provided by copyright law.
|
||||
|
||||
You may make, run and propagate covered works that you do not
|
||||
convey, without conditions so long as your license otherwise remains
|
||||
in force. You may convey covered works to others for the sole purpose
|
||||
of having them make modifications exclusively for you, or provide you
|
||||
with facilities for running those works, provided that you comply with
|
||||
the terms of this License in conveying all material for which you do
|
||||
not control copyright. Those thus making or running the covered works
|
||||
for you must do so exclusively on your behalf, under your direction
|
||||
and control, on terms that prohibit them from making any copies of
|
||||
your copyrighted material outside their relationship with you.
|
||||
|
||||
Conveying under any other circumstances is permitted solely under
|
||||
the conditions stated below. Sublicensing is not allowed; section 10
|
||||
makes it unnecessary.
|
||||
|
||||
3. Protecting Users' Legal Rights From Anti-Circumvention Law.
|
||||
|
||||
No covered work shall be deemed part of an effective technological
|
||||
measure under any applicable law fulfilling obligations under article
|
||||
11 of the WIPO copyright treaty adopted on 20 December 1996, or
|
||||
similar laws prohibiting or restricting circumvention of such
|
||||
measures.
|
||||
|
||||
When you convey a covered work, you waive any legal power to forbid
|
||||
circumvention of technological measures to the extent such circumvention
|
||||
is effected by exercising rights under this License with respect to
|
||||
the covered work, and you disclaim any intention to limit operation or
|
||||
modification of the work as a means of enforcing, against the work's
|
||||
users, your or third parties' legal rights to forbid circumvention of
|
||||
technological measures.
|
||||
|
||||
4. Conveying Verbatim Copies.
|
||||
|
||||
You may convey verbatim copies of the Program's source code as you
|
||||
receive it, in any medium, provided that you conspicuously and
|
||||
appropriately publish on each copy an appropriate copyright notice;
|
||||
keep intact all notices stating that this License and any
|
||||
non-permissive terms added in accord with section 7 apply to the code;
|
||||
keep intact all notices of the absence of any warranty; and give all
|
||||
recipients a copy of this License along with the Program.
|
||||
|
||||
You may charge any price or no price for each copy that you convey,
|
||||
and you may offer support or warranty protection for a fee.
|
||||
|
||||
5. Conveying Modified Source Versions.
|
||||
|
||||
You may convey a work based on the Program, or the modifications to
|
||||
produce it from the Program, in the form of source code under the
|
||||
terms of section 4, provided that you also meet all of these conditions:
|
||||
|
||||
a) The work must carry prominent notices stating that you modified
|
||||
it, and giving a relevant date.
|
||||
|
||||
b) The work must carry prominent notices stating that it is
|
||||
released under this License and any conditions added under section
|
||||
7. This requirement modifies the requirement in section 4 to
|
||||
"keep intact all notices".
|
||||
|
||||
c) You must license the entire work, as a whole, under this
|
||||
License to anyone who comes into possession of a copy. This
|
||||
License will therefore apply, along with any applicable section 7
|
||||
additional terms, to the whole of the work, and all its parts,
|
||||
regardless of how they are packaged. This License gives no
|
||||
permission to license the work in any other way, but it does not
|
||||
invalidate such permission if you have separately received it.
|
||||
|
||||
d) If the work has interactive user interfaces, each must display
|
||||
Appropriate Legal Notices; however, if the Program has interactive
|
||||
interfaces that do not display Appropriate Legal Notices, your
|
||||
work need not make them do so.
|
||||
|
||||
A compilation of a covered work with other separate and independent
|
||||
works, which are not by their nature extensions of the covered work,
|
||||
and which are not combined with it such as to form a larger program,
|
||||
in or on a volume of a storage or distribution medium, is called an
|
||||
"aggregate" if the compilation and its resulting copyright are not
|
||||
used to limit the access or legal rights of the compilation's users
|
||||
beyond what the individual works permit. Inclusion of a covered work
|
||||
in an aggregate does not cause this License to apply to the other
|
||||
parts of the aggregate.
|
||||
|
||||
6. Conveying Non-Source Forms.
|
||||
|
||||
You may convey a covered work in object code form under the terms
|
||||
of sections 4 and 5, provided that you also convey the
|
||||
machine-readable Corresponding Source under the terms of this License,
|
||||
in one of these ways:
|
||||
|
||||
a) Convey the object code in, or embodied in, a physical product
|
||||
(including a physical distribution medium), accompanied by the
|
||||
Corresponding Source fixed on a durable physical medium
|
||||
customarily used for software interchange.
|
||||
|
||||
b) Convey the object code in, or embodied in, a physical product
|
||||
(including a physical distribution medium), accompanied by a
|
||||
written offer, valid for at least three years and valid for as
|
||||
long as you offer spare parts or customer support for that product
|
||||
model, to give anyone who possesses the object code either (1) a
|
||||
copy of the Corresponding Source for all the software in the
|
||||
product that is covered by this License, on a durable physical
|
||||
medium customarily used for software interchange, for a price no
|
||||
more than your reasonable cost of physically performing this
|
||||
conveying of source, or (2) access to copy the
|
||||
Corresponding Source from a network server at no charge.
|
||||
|
||||
c) Convey individual copies of the object code with a copy of the
|
||||
written offer to provide the Corresponding Source. This
|
||||
alternative is allowed only occasionally and noncommercially, and
|
||||
only if you received the object code with such an offer, in accord
|
||||
with subsection 6b.
|
||||
|
||||
d) Convey the object code by offering access from a designated
|
||||
place (gratis or for a charge), and offer equivalent access to the
|
||||
Corresponding Source in the same way through the same place at no
|
||||
further charge. You need not require recipients to copy the
|
||||
Corresponding Source along with the object code. If the place to
|
||||
copy the object code is a network server, the Corresponding Source
|
||||
may be on a different server (operated by you or a third party)
|
||||
that supports equivalent copying facilities, provided you maintain
|
||||
clear directions next to the object code saying where to find the
|
||||
Corresponding Source. Regardless of what server hosts the
|
||||
Corresponding Source, you remain obligated to ensure that it is
|
||||
available for as long as needed to satisfy these requirements.
|
||||
|
||||
e) Convey the object code using peer-to-peer transmission, provided
|
||||
you inform other peers where the object code and Corresponding
|
||||
Source of the work are being offered to the general public at no
|
||||
charge under subsection 6d.
|
||||
|
||||
A separable portion of the object code, whose source code is excluded
|
||||
from the Corresponding Source as a System Library, need not be
|
||||
included in conveying the object code work.
|
||||
|
||||
A "User Product" is either (1) a "consumer product", which means any
|
||||
tangible personal property which is normally used for personal, family,
|
||||
or household purposes, or (2) anything designed or sold for incorporation
|
||||
into a dwelling. In determining whether a product is a consumer product,
|
||||
doubtful cases shall be resolved in favor of coverage. For a particular
|
||||
product received by a particular user, "normally used" refers to a
|
||||
typical or common use of that class of product, regardless of the status
|
||||
of the particular user or of the way in which the particular user
|
||||
actually uses, or expects or is expected to use, the product. A product
|
||||
is a consumer product regardless of whether the product has substantial
|
||||
commercial, industrial or non-consumer uses, unless such uses represent
|
||||
the only significant mode of use of the product.
|
||||
|
||||
"Installation Information" for a User Product means any methods,
|
||||
procedures, authorization keys, or other information required to install
|
||||
and execute modified versions of a covered work in that User Product from
|
||||
a modified version of its Corresponding Source. The information must
|
||||
suffice to ensure that the continued functioning of the modified object
|
||||
code is in no case prevented or interfered with solely because
|
||||
modification has been made.
|
||||
|
||||
If you convey an object code work under this section in, or with, or
|
||||
specifically for use in, a User Product, and the conveying occurs as
|
||||
part of a transaction in which the right of possession and use of the
|
||||
User Product is transferred to the recipient in perpetuity or for a
|
||||
fixed term (regardless of how the transaction is characterized), the
|
||||
Corresponding Source conveyed under this section must be accompanied
|
||||
by the Installation Information. But this requirement does not apply
|
||||
if neither you nor any third party retains the ability to install
|
||||
modified object code on the User Product (for example, the work has
|
||||
been installed in ROM).
|
||||
|
||||
The requirement to provide Installation Information does not include a
|
||||
requirement to continue to provide support service, warranty, or updates
|
||||
for a work that has been modified or installed by the recipient, or for
|
||||
the User Product in which it has been modified or installed. Access to a
|
||||
network may be denied when the modification itself materially and
|
||||
adversely affects the operation of the network or violates the rules and
|
||||
protocols for communication across the network.
|
||||
|
||||
Corresponding Source conveyed, and Installation Information provided,
|
||||
in accord with this section must be in a format that is publicly
|
||||
documented (and with an implementation available to the public in
|
||||
source code form), and must require no special password or key for
|
||||
unpacking, reading or copying.
|
||||
|
||||
7. Additional Terms.
|
||||
|
||||
"Additional permissions" are terms that supplement the terms of this
|
||||
License by making exceptions from one or more of its conditions.
|
||||
Additional permissions that are applicable to the entire Program shall
|
||||
be treated as though they were included in this License, to the extent
|
||||
that they are valid under applicable law. If additional permissions
|
||||
apply only to part of the Program, that part may be used separately
|
||||
under those permissions, but the entire Program remains governed by
|
||||
this License without regard to the additional permissions.
|
||||
|
||||
When you convey a copy of a covered work, you may at your option
|
||||
remove any additional permissions from that copy, or from any part of
|
||||
it. (Additional permissions may be written to require their own
|
||||
removal in certain cases when you modify the work.) You may place
|
||||
additional permissions on material, added by you to a covered work,
|
||||
for which you have or can give appropriate copyright permission.
|
||||
|
||||
Notwithstanding any other provision of this License, for material you
|
||||
add to a covered work, you may (if authorized by the copyright holders of
|
||||
that material) supplement the terms of this License with terms:
|
||||
|
||||
a) Disclaiming warranty or limiting liability differently from the
|
||||
terms of sections 15 and 16 of this License; or
|
||||
|
||||
b) Requiring preservation of specified reasonable legal notices or
|
||||
author attributions in that material or in the Appropriate Legal
|
||||
Notices displayed by works containing it; or
|
||||
|
||||
c) Prohibiting misrepresentation of the origin of that material, or
|
||||
requiring that modified versions of such material be marked in
|
||||
reasonable ways as different from the original version; or
|
||||
|
||||
d) Limiting the use for publicity purposes of names of licensors or
|
||||
authors of the material; or
|
||||
|
||||
e) Declining to grant rights under trademark law for use of some
|
||||
trade names, trademarks, or service marks; or
|
||||
|
||||
f) Requiring indemnification of licensors and authors of that
|
||||
material by anyone who conveys the material (or modified versions of
|
||||
it) with contractual assumptions of liability to the recipient, for
|
||||
any liability that these contractual assumptions directly impose on
|
||||
those licensors and authors.
|
||||
|
||||
All other non-permissive additional terms are considered "further
|
||||
restrictions" within the meaning of section 10. If the Program as you
|
||||
received it, or any part of it, contains a notice stating that it is
|
||||
governed by this License along with a term that is a further
|
||||
restriction, you may remove that term. If a license document contains
|
||||
a further restriction but permits relicensing or conveying under this
|
||||
License, you may add to a covered work material governed by the terms
|
||||
of that license document, provided that the further restriction does
|
||||
not survive such relicensing or conveying.
|
||||
|
||||
If you add terms to a covered work in accord with this section, you
|
||||
must place, in the relevant source files, a statement of the
|
||||
additional terms that apply to those files, or a notice indicating
|
||||
where to find the applicable terms.
|
||||
|
||||
Additional terms, permissive or non-permissive, may be stated in the
|
||||
form of a separately written license, or stated as exceptions;
|
||||
the above requirements apply either way.
|
||||
|
||||
8. Termination.
|
||||
|
||||
You may not propagate or modify a covered work except as expressly
|
||||
provided under this License. Any attempt otherwise to propagate or
|
||||
modify it is void, and will automatically terminate your rights under
|
||||
this License (including any patent licenses granted under the third
|
||||
paragraph of section 11).
|
||||
|
||||
However, if you cease all violation of this License, then your
|
||||
license from a particular copyright holder is reinstated (a)
|
||||
provisionally, unless and until the copyright holder explicitly and
|
||||
finally terminates your license, and (b) permanently, if the copyright
|
||||
holder fails to notify you of the violation by some reasonable means
|
||||
prior to 60 days after the cessation.
|
||||
|
||||
Moreover, your license from a particular copyright holder is
|
||||
reinstated permanently if the copyright holder notifies you of the
|
||||
violation by some reasonable means, this is the first time you have
|
||||
received notice of violation of this License (for any work) from that
|
||||
copyright holder, and you cure the violation prior to 30 days after
|
||||
your receipt of the notice.
|
||||
|
||||
Termination of your rights under this section does not terminate the
|
||||
licenses of parties who have received copies or rights from you under
|
||||
this License. If your rights have been terminated and not permanently
|
||||
reinstated, you do not qualify to receive new licenses for the same
|
||||
material under section 10.
|
||||
|
||||
9. Acceptance Not Required for Having Copies.
|
||||
|
||||
You are not required to accept this License in order to receive or
|
||||
run a copy of the Program. Ancillary propagation of a covered work
|
||||
occurring solely as a consequence of using peer-to-peer transmission
|
||||
to receive a copy likewise does not require acceptance. However,
|
||||
nothing other than this License grants you permission to propagate or
|
||||
modify any covered work. These actions infringe copyright if you do
|
||||
not accept this License. Therefore, by modifying or propagating a
|
||||
covered work, you indicate your acceptance of this License to do so.
|
||||
|
||||
10. Automatic Licensing of Downstream Recipients.
|
||||
|
||||
Each time you convey a covered work, the recipient automatically
|
||||
receives a license from the original licensors, to run, modify and
|
||||
propagate that work, subject to this License. You are not responsible
|
||||
for enforcing compliance by third parties with this License.
|
||||
|
||||
An "entity transaction" is a transaction transferring control of an
|
||||
organization, or substantially all assets of one, or subdividing an
|
||||
organization, or merging organizations. If propagation of a covered
|
||||
work results from an entity transaction, each party to that
|
||||
transaction who receives a copy of the work also receives whatever
|
||||
licenses to the work the party's predecessor in interest had or could
|
||||
give under the previous paragraph, plus a right to possession of the
|
||||
Corresponding Source of the work from the predecessor in interest, if
|
||||
the predecessor has it or can get it with reasonable efforts.
|
||||
|
||||
You may not impose any further restrictions on the exercise of the
|
||||
rights granted or affirmed under this License. For example, you may
|
||||
not impose a license fee, royalty, or other charge for exercise of
|
||||
rights granted under this License, and you may not initiate litigation
|
||||
(including a cross-claim or counterclaim in a lawsuit) alleging that
|
||||
any patent claim is infringed by making, using, selling, offering for
|
||||
sale, or importing the Program or any portion of it.
|
||||
|
||||
11. Patents.
|
||||
|
||||
A "contributor" is a copyright holder who authorizes use under this
|
||||
License of the Program or a work on which the Program is based. The
|
||||
work thus licensed is called the contributor's "contributor version".
|
||||
|
||||
A contributor's "essential patent claims" are all patent claims
|
||||
owned or controlled by the contributor, whether already acquired or
|
||||
hereafter acquired, that would be infringed by some manner, permitted
|
||||
by this License, of making, using, or selling its contributor version,
|
||||
but do not include claims that would be infringed only as a
|
||||
consequence of further modification of the contributor version. For
|
||||
purposes of this definition, "control" includes the right to grant
|
||||
patent sublicenses in a manner consistent with the requirements of
|
||||
this License.
|
||||
|
||||
Each contributor grants you a non-exclusive, worldwide, royalty-free
|
||||
patent license under the contributor's essential patent claims, to
|
||||
make, use, sell, offer for sale, import and otherwise run, modify and
|
||||
propagate the contents of its contributor version.
|
||||
|
||||
In the following three paragraphs, a "patent license" is any express
|
||||
agreement or commitment, however denominated, not to enforce a patent
|
||||
(such as an express permission to practice a patent or covenant not to
|
||||
sue for patent infringement). To "grant" such a patent license to a
|
||||
party means to make such an agreement or commitment not to enforce a
|
||||
patent against the party.
|
||||
|
||||
If you convey a covered work, knowingly relying on a patent license,
|
||||
and the Corresponding Source of the work is not available for anyone
|
||||
to copy, free of charge and under the terms of this License, through a
|
||||
publicly available network server or other readily accessible means,
|
||||
then you must either (1) cause the Corresponding Source to be so
|
||||
available, or (2) arrange to deprive yourself of the benefit of the
|
||||
patent license for this particular work, or (3) arrange, in a manner
|
||||
consistent with the requirements of this License, to extend the patent
|
||||
license to downstream recipients. "Knowingly relying" means you have
|
||||
actual knowledge that, but for the patent license, your conveying the
|
||||
covered work in a country, or your recipient's use of the covered work
|
||||
in a country, would infringe one or more identifiable patents in that
|
||||
country that you have reason to believe are valid.
|
||||
|
||||
If, pursuant to or in connection with a single transaction or
|
||||
arrangement, you convey, or propagate by procuring conveyance of, a
|
||||
covered work, and grant a patent license to some of the parties
|
||||
receiving the covered work authorizing them to use, propagate, modify
|
||||
or convey a specific copy of the covered work, then the patent license
|
||||
you grant is automatically extended to all recipients of the covered
|
||||
work and works based on it.
|
||||
|
||||
A patent license is "discriminatory" if it does not include within
|
||||
the scope of its coverage, prohibits the exercise of, or is
|
||||
conditioned on the non-exercise of one or more of the rights that are
|
||||
specifically granted under this License. You may not convey a covered
|
||||
work if you are a party to an arrangement with a third party that is
|
||||
in the business of distributing software, under which you make payment
|
||||
to the third party based on the extent of your activity of conveying
|
||||
the work, and under which the third party grants, to any of the
|
||||
parties who would receive the covered work from you, a discriminatory
|
||||
patent license (a) in connection with copies of the covered work
|
||||
conveyed by you (or copies made from those copies), or (b) primarily
|
||||
for and in connection with specific products or compilations that
|
||||
contain the covered work, unless you entered into that arrangement,
|
||||
or that patent license was granted, prior to 28 March 2007.
|
||||
|
||||
Nothing in this License shall be construed as excluding or limiting
|
||||
any implied license or other defenses to infringement that may
|
||||
otherwise be available to you under applicable patent law.
|
||||
|
||||
12. No Surrender of Others' Freedom.
|
||||
|
||||
If conditions are imposed on you (whether by court order, agreement or
|
||||
otherwise) that contradict the conditions of this License, they do not
|
||||
excuse you from the conditions of this License. If you cannot convey a
|
||||
covered work so as to satisfy simultaneously your obligations under this
|
||||
License and any other pertinent obligations, then as a consequence you may
|
||||
not convey it at all. For example, if you agree to terms that obligate you
|
||||
to collect a royalty for further conveying from those to whom you convey
|
||||
the Program, the only way you could satisfy both those terms and this
|
||||
License would be to refrain entirely from conveying the Program.
|
||||
|
||||
13. Remote Network Interaction; Use with the GNU General Public License.
|
||||
|
||||
Notwithstanding any other provision of this License, if you modify the
|
||||
Program, your modified version must prominently offer all users
|
||||
interacting with it remotely through a computer network (if your version
|
||||
supports such interaction) an opportunity to receive the Corresponding
|
||||
Source of your version by providing access to the Corresponding Source
|
||||
from a network server at no charge, through some standard or customary
|
||||
means of facilitating copying of software. This Corresponding Source
|
||||
shall include the Corresponding Source for any work covered by version 3
|
||||
of the GNU General Public License that is incorporated pursuant to the
|
||||
following paragraph.
|
||||
|
||||
Notwithstanding any other provision of this License, you have
|
||||
permission to link or combine any covered work with a work licensed
|
||||
under version 3 of the GNU General Public License into a single
|
||||
combined work, and to convey the resulting work. The terms of this
|
||||
License will continue to apply to the part which is the covered work,
|
||||
but the work with which it is combined will remain governed by version
|
||||
3 of the GNU General Public License.
|
||||
|
||||
14. Revised Versions of this License.
|
||||
|
||||
The Free Software Foundation may publish revised and/or new versions of
|
||||
the GNU Affero General Public License from time to time. Such new versions
|
||||
will be similar in spirit to the present version, but may differ in detail to
|
||||
address new problems or concerns.
|
||||
|
||||
Each version is given a distinguishing version number. If the
|
||||
Program specifies that a certain numbered version of the GNU Affero General
|
||||
Public License "or any later version" applies to it, you have the
|
||||
option of following the terms and conditions either of that numbered
|
||||
version or of any later version published by the Free Software
|
||||
Foundation. If the Program does not specify a version number of the
|
||||
GNU Affero General Public License, you may choose any version ever published
|
||||
by the Free Software Foundation.
|
||||
|
||||
If the Program specifies that a proxy can decide which future
|
||||
versions of the GNU Affero General Public License can be used, that proxy's
|
||||
public statement of acceptance of a version permanently authorizes you
|
||||
to choose that version for the Program.
|
||||
|
||||
Later license versions may give you additional or different
|
||||
permissions. However, no additional obligations are imposed on any
|
||||
author or copyright holder as a result of your choosing to follow a
|
||||
later version.
|
||||
|
||||
15. Disclaimer of Warranty.
|
||||
|
||||
THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
|
||||
APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
|
||||
HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
|
||||
OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
|
||||
THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
|
||||
PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
|
||||
IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
|
||||
ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
|
||||
|
||||
16. Limitation of Liability.
|
||||
|
||||
IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
|
||||
WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
|
||||
THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
|
||||
GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
|
||||
USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
|
||||
DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
|
||||
PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
|
||||
EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
|
||||
SUCH DAMAGES.
|
||||
|
||||
17. Interpretation of Sections 15 and 16.
|
||||
|
||||
If the disclaimer of warranty and limitation of liability provided
|
||||
above cannot be given local legal effect according to their terms,
|
||||
reviewing courts shall apply local law that most closely approximates
|
||||
an absolute waiver of all civil liability in connection with the
|
||||
Program, unless a warranty or assumption of liability accompanies a
|
||||
copy of the Program in return for a fee.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
How to Apply These Terms to Your New Programs
|
||||
|
||||
If you develop a new program, and you want it to be of the greatest
|
||||
possible use to the public, the best way to achieve this is to make it
|
||||
free software which everyone can redistribute and change under these terms.
|
||||
|
||||
To do so, attach the following notices to the program. It is safest
|
||||
to attach them to the start of each source file to most effectively
|
||||
state the exclusion of warranty; and each file should have at least
|
||||
the "copyright" line and a pointer to where the full notice is found.
|
||||
|
||||
<one line to give the program's name and a brief idea of what it does.>
|
||||
Copyright (C) <year> <name of author>
|
||||
|
||||
This program is free software: you can redistribute it and/or modify
|
||||
it under the terms of the GNU Affero General Public License as published by
|
||||
the Free Software Foundation, either version 3 of the License, or
|
||||
(at your option) any later version.
|
||||
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU Affero General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU Affero General Public License
|
||||
along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||
|
||||
Also add information on how to contact you by electronic and paper mail.
|
||||
|
||||
If your software can interact with users remotely through a computer
|
||||
network, you should also make sure that it provides a way for users to
|
||||
get its source. For example, if your program is a web application, its
|
||||
interface could display a "Source" link that leads users to an archive
|
||||
of the code. There are many ways you could offer source, and different
|
||||
solutions will be better for different programs; see section 13 for the
|
||||
specific requirements.
|
||||
|
||||
You should also get your employer (if you work as a programmer) or school,
|
||||
if any, to sign a "copyright disclaimer" for the program, if necessary.
|
||||
For more information on this, and how to apply and follow the GNU AGPL, see
|
||||
<https://www.gnu.org/licenses/>.
|
||||
199
LICENSE-APACHE
Normal file
199
LICENSE-APACHE
Normal file
@@ -0,0 +1,199 @@
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to the Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by the Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding any notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. Please also get an
|
||||
OpenPGP-compatible signature on any file you distribute.
|
||||
|
||||
Copyright [yyyy] [name of copyright owner]
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
21
LICENSE-MIT
Normal file
21
LICENSE-MIT
Normal file
@@ -0,0 +1,21 @@
|
||||
MIT License
|
||||
|
||||
Copyright (c) quicproquo contributors
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
522
README.md
522
README.md
@@ -1,340 +1,274 @@
|
||||
<p align="center">
|
||||
<img src="assets/logo.png" alt="QPQ logo" width="200">
|
||||
<img src="assets/logo.png" alt="quicprochat" width="160">
|
||||
</p>
|
||||
|
||||
# QPQ — quicproquo
|
||||
<h1 align="center">quicprochat</h1>
|
||||
|
||||
[](https://github.com/xorwell/quicproquo/actions/workflows/ci.yml)
|
||||
<p align="center">
|
||||
<strong>End-to-end encrypted group messaging over QUIC, powered by MLS and post-quantum cryptography.</strong>
|
||||
</p>
|
||||
|
||||
> End-to-end encrypted messaging over **QUIC + TLS 1.3 + MLS** (RFC 9420), written in Rust.
|
||||
|
||||
The server never sees plaintext. Every byte on the wire is protected by a QUIC
|
||||
transport secured with TLS 1.3 (`quinn` + `rustls`). The inner **MLS** layer
|
||||
provides forward secrecy, post-compromise security, and ratcheted group key
|
||||
agreement across any number of participants. Messages are framed with
|
||||
**Cap'n Proto** for zero-copy, schema-versioned serialisation.
|
||||
|
||||
```
|
||||
┌─────────────────────────────────────────────┐
|
||||
│ Application / MLS ciphertext │ <- group key ratchet (RFC 9420)
|
||||
├─────────────────────────────────────────────┤
|
||||
│ Cap'n Proto RPC │ <- typed, schema-versioned framing
|
||||
├─────────────────────────────────────────────┤
|
||||
│ QUIC + TLS 1.3 (quinn/rustls) │ <- mutual auth + transport secrecy
|
||||
└─────────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
| Property | Mechanism |
|
||||
| ------------------------- | -------------------------------------------------- |
|
||||
| Transport confidentiality | TLS 1.3 over QUIC (rustls) |
|
||||
| Transport authentication | TLS 1.3 server cert (self-signed or CA) |
|
||||
| Group key agreement | MLS `MLS_128_DHKEMX25519_AES128GCM_SHA256_Ed25519` |
|
||||
| Post-compromise security | MLS epoch ratchet |
|
||||
| Forward secrecy | Per-epoch key schedule |
|
||||
| Identity | Ed25519 (MLS credential + leaf node signature) |
|
||||
| Password auth | OPAQUE (password never sent to server) |
|
||||
| Post-quantum readiness | X25519 + ML-KEM-768 hybrid KEM envelope |
|
||||
| Local storage encryption | SQLCipher + Argon2id + ChaCha20-Poly1305 |
|
||||
| Message framing | Cap'n Proto (unpacked wire format) |
|
||||
<p align="center">
|
||||
<a href="docs/src/design-rationale/messenger-comparison.md">Why quicprochat?</a> ·
|
||||
<a href="ROADMAP.md">Roadmap</a> ·
|
||||
<a href="docs/sdk/index.md">SDK Docs</a> ·
|
||||
<a href="docs/operations/monitoring.md">Operations</a> ·
|
||||
<a href="#quick-start">Quick Start</a>
|
||||
</p>
|
||||
|
||||
---
|
||||
|
||||
quicprochat is a production-grade messenger where the server **never sees plaintext**. All traffic flows over QUIC/TLS 1.3, group keys are negotiated with the [MLS protocol](https://www.rfc-editor.org/rfc/rfc9420) (RFC 9420), and a hybrid X25519 + ML-KEM-768 KEM provides post-quantum confidentiality. Written in Rust. 45,000 lines of code. 301 tests.
|
||||
|
||||
```
|
||||
┌─────────────────────────────────────────────────┐
|
||||
│ Application / MLS ciphertext │ ← group key ratchet (RFC 9420)
|
||||
├─────────────────────────────────────────────────┤
|
||||
│ Protobuf RPC / Cap'n Proto (legacy) │ ← typed, schema-versioned framing
|
||||
├─────────────────────────────────────────────────┤
|
||||
│ QUIC + TLS 1.3 (quinn/rustls) │ ← mutual auth + transport secrecy
|
||||
└─────────────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
## Highlights
|
||||
|
||||
| | |
|
||||
|---|---|
|
||||
| **Zero-knowledge server** | Routes opaque MLS ciphertexts by recipient key — never decrypts |
|
||||
| **Post-quantum ready** | Hybrid X25519 + ML-KEM-768 KEM on both MLS and Noise layers |
|
||||
| **Password auth** | OPAQUE — password never leaves the client, not even as a hash |
|
||||
| **Forward secrecy** | MLS epoch ratchet: compromise today can't decrypt yesterday |
|
||||
| **Multi-device** | Per-device keys, delivery fan-out, up to 5 devices per account |
|
||||
| **Federation** | Server-to-server relay over QUIC with mTLS |
|
||||
| **Offline-first** | Client-side outbox with idempotent retry and gap detection |
|
||||
| **Sealed sender** | Optional anonymous enqueue — server can't see who sent a message |
|
||||
| **7 SDKs** | Rust, Go, Python, TypeScript, Swift, Kotlin/Java, Ruby |
|
||||
|
||||
## Quick Start
|
||||
|
||||
```bash
|
||||
# Build (no system dependencies — protoc is vendored)
|
||||
cargo build --workspace
|
||||
|
||||
# Run tests
|
||||
cargo test --workspace
|
||||
|
||||
# Start the server (auto-generates self-signed TLS cert)
|
||||
cargo run --bin qpc-server -- --allow-insecure-auth
|
||||
|
||||
# Interactive REPL (registers + logs in automatically)
|
||||
cargo run --bin qpc -- repl --username alice --password secret
|
||||
```
|
||||
|
||||
**Two-terminal demo:**
|
||||
|
||||
```bash
|
||||
# Terminal 1 # Terminal 2
|
||||
qpc repl -u alice -p secretA qpc repl -u bob -p secretB
|
||||
|
||||
# Alice: # Bob sees:
|
||||
/dm bob [alice] Hello, Bob!
|
||||
Hello, Bob!
|
||||
```
|
||||
|
||||
## Architecture
|
||||
|
||||
```
|
||||
quicprochat/
|
||||
├── crates/
|
||||
│ ├── quicprochat-core # MLS, hybrid KEM, PQ Noise, OPAQUE, recovery, padding
|
||||
│ ├── quicprochat-proto # Protobuf (prost) + Cap'n Proto generated types
|
||||
│ ├── quicprochat-rpc # QUIC RPC framework (framing, dispatch, middleware)
|
||||
│ ├── quicprochat-sdk # Client SDK (QpqClient, conversation store, outbox)
|
||||
│ ├── quicprochat-server # QUIC server, 33 RPC methods, domain services, plugins
|
||||
│ ├── quicprochat-client # CLI + REPL + TUI (Ratatui)
|
||||
│ ├── quicprochat-kt # Key transparency (Merkle-log, revocation)
|
||||
│ ├── quicprochat-p2p # iroh P2P, mesh identity, store-and-forward
|
||||
│ ├── quicprochat-ffi # C FFI (libquicprochat_ffi.so)
|
||||
│ └── quicprochat-plugin-api # Dynamic plugin hooks (C ABI)
|
||||
├── proto/qpc/v1/ # 15 .proto schema files
|
||||
├── sdks/ # Go, Python, TypeScript, Swift, Kotlin, Java, Ruby
|
||||
├── docs/ # mdBook docs, SDK guides, operational runbooks
|
||||
└── packaging/ # OpenWrt, Docker, cross-compilation
|
||||
```
|
||||
|
||||
### Security Properties
|
||||
|
||||
| Property | Mechanism |
|
||||
|---|---|
|
||||
| Transport confidentiality | TLS 1.3 over QUIC (rustls) |
|
||||
| Group key agreement | MLS `MLS_128_DHKEMX25519_AES128GCM_SHA256_Ed25519` |
|
||||
| Post-quantum confidentiality | X25519 + ML-KEM-768 hybrid KEM (MLS + Noise layers) |
|
||||
| Forward secrecy | MLS epoch ratchet + per-epoch key schedule |
|
||||
| Post-compromise security | MLS Update proposals rotate leaf material |
|
||||
| Identity | Ed25519 long-term keys (MLS credential + leaf signature) |
|
||||
| Password authentication | OPAQUE-ke (augmented PAKE, no password on wire) |
|
||||
| Local storage | SQLCipher + Argon2id + ChaCha20-Poly1305 |
|
||||
| Key transparency | Append-only Merkle log with inclusion proofs + revocation |
|
||||
| Traffic analysis resistance | Uniform padding + configurable decoy traffic |
|
||||
|
||||
## Features
|
||||
|
||||
### Core
|
||||
### Messaging
|
||||
- **1:1 DMs** and **N-party groups** with full MLS lifecycle (add, remove, key rotation)
|
||||
- **Rich messaging** — reactions, read receipts, typing indicators, edit, delete
|
||||
- **File transfer** — chunked upload/download, SHA-256 content addressing, 50 MB limit
|
||||
- **Disappearing messages** — per-conversation TTL with server-side GC
|
||||
- **Offline queue** — messages queued locally when disconnected, flushed on reconnect
|
||||
- **Delivery proofs** — server-signed Ed25519 receipts for cryptographic send confirmation
|
||||
- **Transcript export** — encrypted, tamper-evident archives with Merkle chain verification
|
||||
|
||||
- **Interactive REPL** — multi-conversation chat with auto-register, auto-login, 40+ slash commands, background polling, and message history
|
||||
- **1:1 DMs** — dedicated channels with server-enforced membership authorization
|
||||
- **Multi-party groups** — N-member MLS groups with Commit fan-out and epoch sync
|
||||
- **OPAQUE authentication** — password-authenticated key exchange (password never leaves the client)
|
||||
- **Encrypted local storage** — SQLCipher database + encrypted session tokens (Argon2id + ChaCha20-Poly1305)
|
||||
- **Persistent state** — server and client survive restarts; SQLite/SQLCipher or file-backed storage
|
||||
- **Rich messaging** — reactions, read receipts, typing indicators, message editing, message deletion
|
||||
- **File transfer** — chunked upload/download with SHA-256 content addressing, MIME detection, 50 MB limit
|
||||
- **Disappearing messages** — per-conversation TTL with server-side GC (`/disappear 30m`, `1h`, `1d`, `7d`)
|
||||
- **Account deletion** — transactional purge of all user data, sessions, and channel memberships (GDPR-ready)
|
||||
- **Self-DM notepad** — send messages to yourself (local-only, no server round-trip)
|
||||
- **Certificate pinning** — pass the server cert as `--ca-cert` to trust only that server
|
||||
- **Federation** — server-to-server message relay via Cap'n Proto RPC over QUIC with mTLS
|
||||
- **mDNS discovery** — servers announce `_quicproquo._udp.local.`; clients auto-discover nearby nodes
|
||||
- **Sealed sender mode** — optional anonymous enqueue (sender identity inside MLS ciphertext only)
|
||||
- **Prometheus metrics** — `--metrics-listen` exposes `/metrics` endpoint for monitoring
|
||||
- **Dynamic plugin system** — load `.so`/`.dylib` plugins at runtime via `--plugin-dir`
|
||||
- **Safety numbers** — `/verify <username>` for out-of-band key verification (60-digit numeric code)
|
||||
- **Transcript export** — encrypted, tamper-evident message archives with hash-chain integrity verification
|
||||
- **MLS key rotation** — `/update-key` rotates MLS leaf node material with epoch advancement
|
||||
### Infrastructure
|
||||
- **Multi-device** — per-device keys and delivery fan-out (up to 5 devices)
|
||||
- **Account recovery** — 8 recovery codes, encrypted bundles, zero-knowledge server
|
||||
- **Federation** — server-to-server relay with mTLS and cross-server user resolution
|
||||
- **Abuse prevention** — user blocking, message reporting, ban enforcement, admin tools
|
||||
- **Graceful shutdown** — SIGTERM drain with configurable timeout, health endpoint awareness
|
||||
- **Rate limiting** — sliding window algorithm, trait-based for Redis swap
|
||||
- **Observability** — request correlation IDs, per-endpoint latency histograms, structured audit log
|
||||
- **Dynamic plugins** — load `.so`/`.dylib` at runtime via `--plugin-dir` (6 hook points)
|
||||
- **Mesh networking** — iroh P2P, mDNS discovery, store-and-forward, broadcast channels
|
||||
|
||||
### Mesh & P2P Features
|
||||
|
||||
The `quicprochat-p2p` crate provides a full **serverless mesh networking stack**:
|
||||
|
||||
| Feature | Module | Description |
|
||||
|---------|--------|-------------|
|
||||
| **P2P Transport** | `P2pNode` | Direct QUIC connections via iroh with NAT traversal |
|
||||
| **Mesh Identity** | `MeshIdentity` | Ed25519 keypairs with 16-byte truncated addresses |
|
||||
| **Mesh Envelope** | `MeshEnvelope` | Encrypted, signed, TTL-aware message containers |
|
||||
| **Store-and-Forward** | `MeshStore` | Queue messages for offline recipients |
|
||||
| **Multi-Hop Routing** | `MeshRouter` | Distributed routing table, forward through intermediaries |
|
||||
| **Announce Protocol** | `MeshAnnounce` | Signed peer discovery with capability flags |
|
||||
| **Broadcast Channels** | `BroadcastManager` | Pub/sub with symmetric key encryption |
|
||||
| **Transport Abstraction** | `TransportManager` | Iroh, TCP, LoRa — route by address type |
|
||||
| **LoRa Transport** | `transport_lora` | Duty-cycle aware, fragmentation, SF12 support |
|
||||
| **MLS-Lite** | `mls_lite` | Lightweight symmetric mode for constrained links |
|
||||
| **FAPP** | `fapp` + `fapp_router` | Free Appointment Propagation Protocol (see below) |
|
||||
|
||||
#### FAPP — Decentralized Appointment Discovery
|
||||
|
||||
**Problem:** In Germany, finding a psychotherapist takes 3–6 months due to artificial slot visibility limits.
|
||||
|
||||
**Solution:** FAPP lets licensed therapists announce free slots into the mesh. Patients discover and reserve slots anonymously — no central registry.
|
||||
|
||||
```rust
|
||||
// Therapist publishes slots
|
||||
let announce = SlotAnnounce::new(
|
||||
&therapist_identity,
|
||||
vec![Fachrichtung::Verhaltenstherapie],
|
||||
vec![Modalitaet::Praxis, Modalitaet::Video],
|
||||
vec![Kostentraeger::GKV],
|
||||
"80331", // PLZ only, never exact address
|
||||
slots,
|
||||
approbation_hash,
|
||||
sequence,
|
||||
);
|
||||
fapp_router.broadcast_announce(announce)?;
|
||||
|
||||
// Patient queries anonymously
|
||||
let query = SlotQuery {
|
||||
fachrichtung: Some(Fachrichtung::Verhaltenstherapie),
|
||||
plz_prefix: Some("803".into()),
|
||||
kostentraeger: Some(Kostentraeger::GKV),
|
||||
..Default::default()
|
||||
};
|
||||
fapp_router.send_query(query)?;
|
||||
```
|
||||
|
||||
**Privacy model:**
|
||||
- Therapist identity is **public** (bound to Approbation hash)
|
||||
- Patient queries are **anonymous** (no identifying information)
|
||||
- Reservations use **E2E encryption** to therapist's key
|
||||
|
||||
See [`docs/specs/fapp-protocol.md`](docs/specs/fapp-protocol.md) for the full protocol spec.
|
||||
|
||||
### Client SDKs
|
||||
|
||||
- **Go SDK** (`sdks/go/`) — native QUIC transport via `quic-go`, Cap'n Proto RPC, full API: connect, OPAQUE auth, send/receive, disappearing messages, account deletion
|
||||
- **TypeScript SDK** (`sdks/typescript/`) — `@quicproquo/client` with WASM crypto (175 KB), WebSocket transport, offline crypto mode, browser demo
|
||||
- **Python FFI** (`examples/python/`) — `ctypes` wrapper over the C FFI library with CLI
|
||||
- **C FFI** (`crates/quicproquo-ffi/`) — `libquicproquo_ffi.so` with 7 extern functions: connect, login, send, receive, disconnect, last_error, free_string
|
||||
| Language | Location | Transport | Notes |
|
||||
|---|---|---|---|
|
||||
| **Rust** | `crates/quicprochat-sdk` | QUIC (quinn) | Reference implementation |
|
||||
| **Go** | `sdks/go/` | QUIC (quic-go) | Cap'n Proto RPC, full API |
|
||||
| **Python** | `sdks/python/` | QUIC (aioquic) + FFI | Async client, PyPI-ready |
|
||||
| **TypeScript** | `sdks/typescript/` | WebSocket + WASM crypto | 175 KB WASM bundle, browser demo |
|
||||
| **Swift** | `sdks/swift/` | C FFI wrapper | iOS 15+ / macOS 13+ |
|
||||
| **Kotlin/Java** | `sdks/kotlin/`, `sdks/java/` | JNI + C FFI | Android + JVM |
|
||||
| **Ruby** | `sdks/ruby/` | C FFI gem | Block-form auto-disconnect |
|
||||
|
||||
### REPL slash commands
|
||||
### REPL Commands
|
||||
|
||||
| Command | Description |
|
||||
| ----------------------------------- | --------------------------------------------------- |
|
||||
| `/dm <username>` | Start a 1:1 DM with a peer |
|
||||
| `/create-group <name>` (or `/cg`) | Create a new group |
|
||||
| `/invite <username>` | Add a member to the current group |
|
||||
| `/remove <username>` | Remove a member from the current group |
|
||||
| `/join` | Join a pending group invitation |
|
||||
| `/leave` | Leave the current group |
|
||||
| `/switch @user` or `/switch #group` | Switch active conversation |
|
||||
| `/list` or `/ls` | List all conversations |
|
||||
| `/members` | Show group members with resolved usernames |
|
||||
| `/group-info` (or `/gi`) | Show group type, members, MLS epoch |
|
||||
| `/rename <name>` | Rename the current conversation |
|
||||
| `/history [count]` (or `/hist`) | Show message history (default 20) |
|
||||
| `/react <emoji> [index]` | React to a message with an emoji |
|
||||
| `/typing` | Send a typing indicator |
|
||||
| `/typing-notify on\|off` | Toggle typing indicator display |
|
||||
| `/edit <index> <text>` | Edit one of your messages |
|
||||
| `/delete <index>` | Delete one of your messages |
|
||||
| `/send-file <path>` (or `/sf`) | Upload and send a file (chunked, SHA-256 verified) |
|
||||
| `/download <index>` (or `/dl`) | Download a received file |
|
||||
| `/disappear <duration>` | Set message TTL (`30m`, `1h`, `1d`, `7d`) |
|
||||
| `/verify <username>` | Compare safety numbers with a peer |
|
||||
| `/update-key` (or `/rotate-key`) | Rotate your MLS key material |
|
||||
| `/delete-account` | Permanently delete your account (with confirmation) |
|
||||
| `/whoami` | Show identity and group status |
|
||||
| `/help` | Command reference |
|
||||
| `/quit` | Exit |
|
||||
40+ slash commands including:
|
||||
|
||||
**Mesh commands** (requires `--features mesh`):
|
||||
|
||||
| Command | Description |
|
||||
| ------------------------------- | ---------------------------------- |
|
||||
| `/mesh peers` | Scan for nearby qpq nodes via mDNS |
|
||||
| `/mesh server <host:port>` | Note a discovered server address |
|
||||
| `/mesh send <peer_id> <msg>` | Direct P2P message via iroh |
|
||||
| `/mesh broadcast <topic> <msg>` | Publish to a broadcast channel |
|
||||
| `/mesh subscribe <topic>` | Join a broadcast channel |
|
||||
| `/mesh route` | Show routing table |
|
||||
| `/mesh identity` | Show mesh identity info |
|
||||
| `/mesh store` | Show store-and-forward stats |
|
||||
|
||||
### Mesh networking (feature-gated: `--features mesh`)
|
||||
|
||||
- **P2P transport** (`quicproquo-p2p`) — iroh-based direct peer-to-peer messaging with NAT traversal
|
||||
- **Self-sovereign identity** — Ed25519 keypair-based mesh identity, independent of server registration
|
||||
- **Store-and-forward** — TTL-based message buffering with hop counting and deduplication
|
||||
- **Broadcast channels** — ChaCha20-Poly1305 symmetric topic-based pub/sub (no MLS overhead)
|
||||
- **mDNS discovery** — servers announce `_quicproquo._udp.local.`; clients auto-discover nearby nodes
|
||||
- **Federation routing** — server-to-server message relay with mTLS
|
||||
|
||||
### Experimental / proof-of-concept
|
||||
|
||||
- **Tauri 2 GUI** (`quicproquo-gui`) — foundational desktop app shell; not feature-complete
|
||||
- **Mobile FFI** (`quicproquo-mobile`) — C API for QUIC connection migration (wifi to cellular)
|
||||
- **Bot framework** (`quicproquo-bot`) — programmable bot client
|
||||
|
||||
---
|
||||
|
||||
## Quick start
|
||||
|
||||
```bash
|
||||
# Prerequisites: Rust 1.77+, capnp CLI
|
||||
brew install capnp # macOS
|
||||
# apt-get install capnproto # Debian/Ubuntu
|
||||
|
||||
# Build (excludes GUI — requires GTK system libs)
|
||||
cargo build --bin qpq-server --bin qpq
|
||||
|
||||
# Run tests
|
||||
cargo test --workspace --exclude quicproquo-gui
|
||||
|
||||
# Start the server (port 7000 by default, auto-generates self-signed cert)
|
||||
cargo run --bin qpq-server -- --allow-insecure-auth
|
||||
|
||||
# Interactive REPL (auto-registers and logs in)
|
||||
cargo run --bin qpq -- repl --username alice --password mypass
|
||||
```
|
||||
/dm <user> Start a 1:1 DM
|
||||
/create-group <name> Create a group
|
||||
/invite <user> Add member to group
|
||||
/remove <user> Remove member
|
||||
/send-file <path> Upload and send a file
|
||||
/verify <user> Compare safety numbers
|
||||
/rotate-keys Rotate MLS key material
|
||||
/disappear <duration> Set message TTL
|
||||
/export <path> Export encrypted transcript
|
||||
/devices list|add|rm Manage devices
|
||||
/block <user> Block a user
|
||||
/recovery setup Generate recovery codes
|
||||
/help Full command reference
|
||||
```
|
||||
|
||||
### REPL quickstart (two terminals)
|
||||
## Deployment
|
||||
|
||||
### Docker
|
||||
|
||||
```bash
|
||||
# Terminal 1
|
||||
qpq repl --username alice --password secretA
|
||||
|
||||
# Terminal 2
|
||||
qpq repl --username bob --password secretB
|
||||
|
||||
# In Alice's REPL:
|
||||
/dm bob
|
||||
Hello from Alice!
|
||||
|
||||
# Bob sees: [alice] Hello from Alice!
|
||||
docker build -t quicprochat -f docker/Dockerfile .
|
||||
docker run -p 7000:7000 -v qpc-data:/data quicprochat
|
||||
```
|
||||
|
||||
### Server configuration (TOML)
|
||||
### Production (Docker Compose)
|
||||
|
||||
```bash
|
||||
cat > qpq-server.toml <<'EOF'
|
||||
listen = "0.0.0.0:7000"
|
||||
data_dir = "data"
|
||||
tls_cert = "data/server-cert.der"
|
||||
tls_key = "data/server-key.der"
|
||||
auth_token = "your-strong-token-here"
|
||||
store_backend = "sql" # or "file"
|
||||
db_path = "data/qpq.db"
|
||||
db_key = "your-db-encryption-key"
|
||||
metrics_listen = "0.0.0.0:9090"
|
||||
metrics_enabled = true
|
||||
# Federation (optional)
|
||||
# federation_enabled = true
|
||||
# federation_domain = "chat.example.com"
|
||||
# federation_listen = "0.0.0.0:7001"
|
||||
# Plugin loading (optional)
|
||||
# plugin_dir = "/etc/qpq/plugins"
|
||||
EOF
|
||||
cargo run --bin qpq-server -- --config qpq-server.toml
|
||||
# Includes server + Prometheus + Grafana with pre-configured dashboards
|
||||
docker compose -f docker-compose.prod.yml up -d
|
||||
```
|
||||
|
||||
> **Production:** use a strong `QPQ_AUTH_TOKEN`, set `QPQ_DB_KEY` when using `store_backend = "sql"`, and provide real TLS certificates (the server refuses to auto-generate certs in production mode).
|
||||
### OpenWrt
|
||||
|
||||
See the [full demo walkthrough](docs/src/getting-started/demo-walkthrough.md) for a step-by-step guide.
|
||||
|
||||
---
|
||||
|
||||
## Crate layout
|
||||
|
||||
| Crate | Purpose |
|
||||
| ----------------------- | ---------------------------------------------------------------------------------------------- |
|
||||
| `quicproquo-core` | MLS group operations, hybrid KEM, OPAQUE auth, crypto primitives, WASM-compatible modules |
|
||||
| `quicproquo-proto` | Cap'n Proto schemas and generated RPC code |
|
||||
| `quicproquo-server` | QUIC server, NodeService RPC (24 methods), storage backends, federation, plugins, blob storage |
|
||||
| `quicproquo-client` | CLI + REPL (40+ commands), session management, conversation store, file transfer |
|
||||
| `quicproquo-ffi` | C FFI bindings (`libquicproquo_ffi.so`) for cross-language integration |
|
||||
| `quicproquo-plugin-api` | C-compatible plugin hook API (`HookVTable`, 6 hooks) |
|
||||
| `quicproquo-kt` | Key transparency / Merkle-log identity bindings |
|
||||
| `quicproquo-bot` | Programmable bot client framework |
|
||||
| `quicproquo-gen` | Code generation utilities |
|
||||
| `quicproquo-gui` | Tauri 2 desktop app (experimental, requires GTK) |
|
||||
| `quicproquo-mobile` | C FFI for mobile connection migration (experimental) |
|
||||
| `quicproquo-p2p` | iroh-based P2P transport, mesh identity, store-and-forward, broadcast channels |
|
||||
|
||||
---
|
||||
|
||||
## CI pipeline
|
||||
|
||||
GitHub Actions runs on every push and PR:
|
||||
|
||||
- `cargo fmt --check` — formatting
|
||||
- `cargo build --workspace` — full build
|
||||
- `cargo test --workspace` — 130+ tests (core, server, client, E2E, P2P, doctests)
|
||||
- `cargo clippy --workspace` — lint
|
||||
- `cargo deny check` — license and advisory audit
|
||||
- `cargo audit` — vulnerability scan
|
||||
- `cargo tarpaulin` — code coverage (uploaded as artifact)
|
||||
- `docker build` — container image validation
|
||||
|
||||
---
|
||||
|
||||
## Milestones
|
||||
|
||||
| # | Name | Status | What it adds |
|
||||
| --- | ----------------------------- | ----------- | ------------------------------------------------------------------------ |
|
||||
| M1 | QUIC/TLS transport | **Done** | QUIC + TLS 1.3 endpoint, length-prefixed framing, Ping/Pong |
|
||||
| M2 | Authentication Service | **Done** | Ed25519 identity, KeyPackage generation, AS upload/fetch |
|
||||
| M3 | Delivery Service + MLS groups | **Done** | DS relay, `GroupMember` create/join/add/send/recv |
|
||||
| M4 | Group CLI subcommands | **Done** | Persistent CLI, OPAQUE login, 20 subcommands |
|
||||
| M5 | Multi-party groups | **Done** | N > 2 members, Commit fan-out, `send --all`, epoch sync |
|
||||
| M6 | Persistence + REPL | **Done** | SQLite/SQLCipher, interactive REPL, DM channels, encrypted local storage |
|
||||
| M7 | Post-quantum MLS | **Planned** | Hybrid X25519 + ML-KEM-768 integrated into MLS ciphersuite |
|
||||
|
||||
M7 note: the hybrid KEM envelope is already implemented and tested (10 tests passing). What remains is integrating it into the OpenMLS CryptoProvider so all MLS key material gets post-quantum confidentiality.
|
||||
|
||||
---
|
||||
|
||||
## Roadmap
|
||||
|
||||
See [ROADMAP.md](ROADMAP.md) for the full phased plan. Summary:
|
||||
|
||||
| Phase | Focus | Status |
|
||||
| ----- | -------------------------------------------------------------- | ------------------------------------- |
|
||||
| 1 | Production hardening (unwrap removal, secure defaults, Docker) | In progress |
|
||||
| 2 | Test and CI maturity | Partially done |
|
||||
| 3 | Client SDKs (Go, TypeScript/WASM, Python FFI, C FFI) | **Go, TS, FFI, WASM done** |
|
||||
| 4 | Trust and security (audit, key transparency, PQ MLS) | DS auth + enumeration mitigation done |
|
||||
| 5 | Features and UX (rich messaging, file transfer, disappearing) | **Edit/delete, files, TTL done** |
|
||||
| 6 | Scale and operations (horizontal scaling, observability) | Planned |
|
||||
| 7 | Platform expansion (mobile, web, federation, sealed sender) | **Sealed sender done** |
|
||||
| 8 | Freifunk / community mesh networking | **F0-F6 done** |
|
||||
| 9 | Developer experience and community growth | Safety numbers + plugins done |
|
||||
|
||||
### Recently completed (Sprints 1-9)
|
||||
|
||||
- **Rich messaging** — reactions, read receipts, typing indicators, edit/delete messages
|
||||
- **File transfer** — chunked upload/download with SHA-256 content addressing and progress bars
|
||||
- **Disappearing messages** — per-conversation TTL with server-side garbage collection
|
||||
- **Account deletion** — transactional purge of all user data (GDPR-ready)
|
||||
- **Go SDK** — native QUIC + Cap'n Proto client with full API coverage
|
||||
- **TypeScript SDK** — WASM crypto (175 KB) + WebSocket transport + browser demo
|
||||
- **C FFI + Python bindings** — cross-language integration via `libquicproquo_ffi`
|
||||
- **Mesh networking** — self-sovereign identity, store-and-forward, broadcast channels, extended REPL
|
||||
- **Security hardening** — DS sender binding, username enumeration mitigation, MLS key rotation
|
||||
- **CI pipeline** — fmt, build, test, clippy, deny, audit, tarpaulin coverage, Docker build
|
||||
- **Plugin system** — dynamic `.so`/`.dylib` loading with 6 C-compatible hook points
|
||||
|
||||
---
|
||||
|
||||
## Building without the GUI
|
||||
|
||||
The GUI crate requires GTK system libraries. To build just the server and client:
|
||||
Cross-compiled static binaries for mesh/embedded deployments:
|
||||
|
||||
```bash
|
||||
cargo build --bin qpq-server --bin qpq
|
||||
./scripts/cross-compile.sh # builds for x86_64, armv7, aarch64 (musl)
|
||||
```
|
||||
|
||||
To build the client with mesh/P2P support:
|
||||
See [docs/openwrt.md](docs/openwrt.md) for `opkg` packaging and `procd` init scripts.
|
||||
|
||||
### Configuration
|
||||
|
||||
```bash
|
||||
cargo build -p quicproquo-client --features mesh
|
||||
# Environment variables (see .env.example for full list)
|
||||
QPC_LISTEN=0.0.0.0:7000
|
||||
QPC_AUTH_TOKEN=your-strong-token
|
||||
QPC_DB_KEY=your-db-encryption-key
|
||||
QPC_STORE_BACKEND=sql
|
||||
QPC_METRICS_LISTEN=0.0.0.0:9090
|
||||
QPC_DRAIN_TIMEOUT=30
|
||||
QPC_RPC_TIMEOUT=30
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Documentation
|
||||
|
||||
Full documentation is available as an **mdBook** in [`docs/`](docs/):
|
||||
|
||||
```bash
|
||||
cargo install mdbook # once
|
||||
mdbook serve docs # http://localhost:3000
|
||||
mdbook serve docs # http://localhost:3000
|
||||
```
|
||||
|
||||
- **[Getting Started](docs/src/getting-started/prerequisites.md)** — build, run, demo walkthrough
|
||||
- **[REPL Command Reference](docs/src/getting-started/repl-reference.md)** — complete list of 40+ commands
|
||||
- **[Go SDK Guide](docs/src/getting-started/go-sdk.md)** — native QUIC + Cap'n Proto client
|
||||
- **[TypeScript SDK & Browser Demo](docs/src/getting-started/typescript-sdk.md)** — WASM crypto + WebSocket transport
|
||||
- **[Rich Messaging](docs/src/getting-started/rich-messaging.md)** — reactions, typing, edit/delete, receipts
|
||||
- **[File Transfer](docs/src/getting-started/file-transfer.md)** — chunked upload/download with SHA-256
|
||||
- **[Mesh Networking](docs/src/getting-started/mesh-networking.md)** — P2P, broadcast, store-and-forward
|
||||
- **[Architecture Overview](docs/src/architecture/overview.md)** — two-service model, dual-key design, crate layout
|
||||
- **[Protocol Deep Dives](docs/src/protocol-layers/overview.md)** — QUIC/TLS 1.3, Cap'n Proto, MLS, Hybrid KEM
|
||||
- **[Cryptographic Properties](docs/src/cryptography/overview.md)** — forward secrecy, post-compromise security, PQ readiness, threat model
|
||||
- **[Design Rationale](docs/src/design-rationale/overview.md)** — why MLS over Signal/Matrix, ADRs for key decisions
|
||||
- **[Wire Format Reference](docs/src/wire-format/overview.md)** — annotated Cap'n Proto schemas
|
||||
- **[Roadmap](docs/src/roadmap/milestones.md)** — milestones, production readiness, future research
|
||||
- **[Future Improvements](docs/FUTURE-IMPROVEMENTS.md)** — prioritised list of security, ops, and feature improvements
|
||||
- [SDK Integration Guide](docs/sdk/index.md) — wire format, per-language guides, "build your own SDK"
|
||||
- [Operational Runbook](docs/operations/backup-restore.md) — backup/restore, key rotation, incident response
|
||||
- [Scaling Guide](docs/operations/scaling-guide.md) — resource sizing, horizontal scaling, capacity planning
|
||||
- [Monitoring](docs/operations/monitoring.md) — Prometheus metrics, Grafana dashboards, alert rules
|
||||
|
||||
---
|
||||
## Security Status
|
||||
|
||||
## Security
|
||||
|
||||
This is a **research project** and has not undergone a formal third-party audit. See the [threat model](docs/src/cryptography/threat-model.md) and [security audit](docs/SECURITY-AUDIT.md) for details.
|
||||
|
||||
- The server only routes opaque ciphertexts by recipient key — it never sees plaintext.
|
||||
- OPAQUE ensures passwords never leave the client.
|
||||
- Local databases are encrypted with SQLCipher when a password is provided.
|
||||
- Session tokens are encrypted at rest (Argon2id key derivation + ChaCha20-Poly1305).
|
||||
- **Certificate pinning:** pass the server cert as `--ca-cert` so the client trusts only that server.
|
||||
- **Sealed sender:** optional mode where the server cannot see who sent a message.
|
||||
- **Dependency checks:** CI runs `cargo deny check` and `cargo audit` on every PR.
|
||||
|
||||
---
|
||||
> **This software has not undergone an independent security audit.** While it implements cryptographic best practices (MLS RFC 9420, OPAQUE, zeroization, constant-time comparisons), no third-party firm has reviewed the implementation. Do not rely on it for high-risk communications until an audit is completed. See [SECURITY.md](SECURITY.md) for our vulnerability disclosure policy.
|
||||
|
||||
## License
|
||||
|
||||
|
||||
88
ROADMAP.html
88
ROADMAP.html
@@ -3,7 +3,7 @@
|
||||
<head>
|
||||
<!-- Book generated using mdBook -->
|
||||
<meta charset="UTF-8">
|
||||
<title>Full Roadmap (Phases 1–8) - quicproquo</title>
|
||||
<title>Full Roadmap (Phases 1-8) - quicproquo</title>
|
||||
|
||||
|
||||
<!-- Custom HTML head -->
|
||||
@@ -35,10 +35,10 @@
|
||||
const path_to_root = "";
|
||||
const default_light_theme = "navy";
|
||||
const default_dark_theme = "navy";
|
||||
window.path_to_searchindex_js = "searchindex-92ce38c7.js";
|
||||
window.path_to_searchindex_js = "searchindex-1e4ee6e2.js";
|
||||
</script>
|
||||
<!-- Start loading toc.js asap -->
|
||||
<script src="toc-4c7c920d.js"></script>
|
||||
<script src="toc-69b0eb95.js"></script>
|
||||
</head>
|
||||
<body>
|
||||
<div id="mdbook-help-container">
|
||||
@@ -185,7 +185,7 @@ can be parallelised. Check the box when done.</p>
|
||||
<p>Eliminate all crash paths, enforce secure defaults, fix deployment blockers.</p>
|
||||
<ul>
|
||||
<li>
|
||||
<p><input disabled="" type="checkbox"> <strong>1.1 Remove <code>.unwrap()</code> / <code>.expect()</code> from production paths</strong></p>
|
||||
<p><input disabled="" type="checkbox" checked=""> <strong>1.1 Remove <code>.unwrap()</code> / <code>.expect()</code> from production paths</strong></p>
|
||||
<ul>
|
||||
<li>Replace <code>AUTH_CONTEXT.read().expect()</code> in client RPC with proper <code>Result</code></li>
|
||||
<li>Replace <code>"0.0.0.0:0".parse().unwrap()</code> in client with fallible parse</li>
|
||||
@@ -194,7 +194,7 @@ can be parallelised. Check the box when done.</p>
|
||||
</ul>
|
||||
</li>
|
||||
<li>
|
||||
<p><input disabled="" type="checkbox"> <strong>1.2 Enforce secure defaults in production mode</strong></p>
|
||||
<p><input disabled="" type="checkbox" checked=""> <strong>1.2 Enforce secure defaults in production mode</strong></p>
|
||||
<ul>
|
||||
<li>Reject startup if <code>QPQ_PRODUCTION=true</code> and <code>auth_token</code> is empty or <code>"devtoken"</code></li>
|
||||
<li>Require non-empty <code>db_key</code> when using SQL backend in production</li>
|
||||
@@ -203,14 +203,14 @@ can be parallelised. Check the box when done.</p>
|
||||
</ul>
|
||||
</li>
|
||||
<li>
|
||||
<p><input disabled="" type="checkbox"> <strong>1.3 Fix <code>.gitignore</code></strong></p>
|
||||
<p><input disabled="" type="checkbox" checked=""> <strong>1.3 Fix <code>.gitignore</code></strong></p>
|
||||
<ul>
|
||||
<li>Add <code>data/</code>, <code>*.der</code>, <code>*.pem</code>, <code>*.db</code>, <code>*.bin</code> (state files), <code>*.ks</code> (keystores)</li>
|
||||
<li>Verify no secrets are already tracked: <code>git ls-files data/ *.der *.db</code></li>
|
||||
</ul>
|
||||
</li>
|
||||
<li>
|
||||
<p><input disabled="" type="checkbox"> <strong>1.4 Fix Dockerfile</strong></p>
|
||||
<p><input disabled="" type="checkbox" checked=""> <strong>1.4 Fix Dockerfile</strong></p>
|
||||
<ul>
|
||||
<li>Sync workspace members (handle excluded <code>p2p</code> crate)</li>
|
||||
<li>Create dedicated user/group instead of <code>nobody</code></li>
|
||||
@@ -219,7 +219,7 @@ can be parallelised. Check the box when done.</p>
|
||||
</ul>
|
||||
</li>
|
||||
<li>
|
||||
<p><input disabled="" type="checkbox"> <strong>1.5 TLS certificate lifecycle</strong></p>
|
||||
<p><input disabled="" type="checkbox" checked=""> <strong>1.5 TLS certificate lifecycle</strong></p>
|
||||
<ul>
|
||||
<li>Document CA-signed cert setup (Let’s Encrypt / custom CA)</li>
|
||||
<li>Add <code>--tls-required</code> flag that refuses to start without valid cert</li>
|
||||
@@ -233,7 +233,7 @@ can be parallelised. Check the box when done.</p>
|
||||
<p>Build confidence before adding features.</p>
|
||||
<ul>
|
||||
<li>
|
||||
<p><input disabled="" type="checkbox"> <strong>2.1 Expand E2E test coverage</strong></p>
|
||||
<p><input disabled="" type="checkbox" checked=""> <strong>2.1 Expand E2E test coverage</strong></p>
|
||||
<ul>
|
||||
<li>Auth failure scenarios (wrong password, expired token, invalid token)</li>
|
||||
<li>Message ordering verification (send N messages, verify seq numbers)</li>
|
||||
@@ -246,7 +246,7 @@ can be parallelised. Check the box when done.</p>
|
||||
</ul>
|
||||
</li>
|
||||
<li>
|
||||
<p><input disabled="" type="checkbox"> <strong>2.2 Add unit tests for untested paths</strong></p>
|
||||
<p><input disabled="" type="checkbox" checked=""> <strong>2.2 Add unit tests for untested paths</strong></p>
|
||||
<ul>
|
||||
<li>Client retry logic (exponential backoff, jitter, retriable classification)</li>
|
||||
<li>REPL input parsing edge cases (empty input, special characters, <code>/</code> commands)</li>
|
||||
@@ -256,7 +256,7 @@ can be parallelised. Check the box when done.</p>
|
||||
</ul>
|
||||
</li>
|
||||
<li>
|
||||
<p><input disabled="" type="checkbox"> <strong>2.3 CI hardening</strong></p>
|
||||
<p><input disabled="" type="checkbox" checked=""> <strong>2.3 CI hardening</strong></p>
|
||||
<ul>
|
||||
<li>Add <code>.github/CODEOWNERS</code> (crypto, auth, wire-format require 2 reviewers)</li>
|
||||
<li>Ensure <code>cargo deny check</code> runs on every PR (already in CI — verify)</li>
|
||||
@@ -266,7 +266,7 @@ can be parallelised. Check the box when done.</p>
|
||||
</ul>
|
||||
</li>
|
||||
<li>
|
||||
<p><input disabled="" type="checkbox"> <strong>2.4 Clean up build warnings</strong></p>
|
||||
<p><input disabled="" type="checkbox" checked=""> <strong>2.4 Clean up build warnings</strong></p>
|
||||
<ul>
|
||||
<li>Fix Cap’n Proto generated <code>unused_parens</code> warnings</li>
|
||||
<li>Remove dead code / unused imports</li>
|
||||
@@ -328,7 +328,7 @@ WASM/FFI for the crypto layer.</p>
|
||||
</ul>
|
||||
</li>
|
||||
<li>
|
||||
<p><input disabled="" type="checkbox"> <strong>3.2 Python SDK (<code>quicproquo-py</code>)</strong></p>
|
||||
<p><input disabled="" type="checkbox" checked=""> <strong>3.2 Python SDK (<code>quicproquo-py</code>)</strong></p>
|
||||
<ul>
|
||||
<li>QUIC transport: <code>aioquic</code> with custom Cap’n Proto stream handler</li>
|
||||
<li>Cap’n Proto serialization: <code>pycapnp</code> for message types</li>
|
||||
@@ -357,7 +357,7 @@ WASM/FFI for the crypto layer.</p>
|
||||
</ul>
|
||||
</li>
|
||||
<li>
|
||||
<p><input disabled="" type="checkbox"> <strong>3.5 WebTransport server endpoint</strong></p>
|
||||
<p><input disabled="" type="checkbox" checked=""> <strong>3.5 WebTransport server endpoint</strong></p>
|
||||
<ul>
|
||||
<li>Add HTTP/3 + WebTransport listener to server (same QUIC stack via quinn)</li>
|
||||
<li>Cap’n Proto RPC framed over WebTransport bidirectional streams</li>
|
||||
@@ -378,7 +378,7 @@ WASM/FFI for the crypto layer.</p>
|
||||
</ul>
|
||||
</li>
|
||||
<li>
|
||||
<p><input disabled="" type="checkbox"> <strong>3.7 SDK documentation and schema publishing</strong></p>
|
||||
<p><input disabled="" type="checkbox" checked=""> <strong>3.7 SDK documentation and schema publishing</strong></p>
|
||||
<ul>
|
||||
<li>Publish <code>.capnp</code> schemas as the canonical API contract</li>
|
||||
<li>Document the QUIC + Cap’n Proto connection pattern for each language</li>
|
||||
@@ -401,7 +401,7 @@ WASM/FFI for the crypto layer.</p>
|
||||
</ul>
|
||||
</li>
|
||||
<li>
|
||||
<p><input disabled="" type="checkbox"> <strong>4.2 Key Transparency / revocation</strong></p>
|
||||
<p><input disabled="" type="checkbox" checked=""> <strong>4.2 Key Transparency / revocation</strong></p>
|
||||
<ul>
|
||||
<li>Replace <code>BasicCredential</code> with X.509-based MLS credentials</li>
|
||||
<li>Or: verifiable key directory (Merkle tree, auditable log)</li>
|
||||
@@ -418,7 +418,7 @@ WASM/FFI for the crypto layer.</p>
|
||||
</ul>
|
||||
</li>
|
||||
<li>
|
||||
<p><input disabled="" type="checkbox"> <strong>4.4 M7 — Post-quantum MLS integration</strong></p>
|
||||
<p><input disabled="" type="checkbox" checked=""> <strong>4.4 M7 — Post-quantum MLS integration</strong></p>
|
||||
<ul>
|
||||
<li>Integrate hybrid KEM (X25519 + ML-KEM-768) into the OpenMLS crypto provider</li>
|
||||
<li>Group key material gets post-quantum confidentiality</li>
|
||||
@@ -439,7 +439,7 @@ WASM/FFI for the crypto layer.</p>
|
||||
<p>Make it a product people want to use.</p>
|
||||
<ul>
|
||||
<li>
|
||||
<p><input disabled="" type="checkbox"> <strong>5.1 Multi-device support</strong></p>
|
||||
<p><input disabled="" type="checkbox" checked=""> <strong>5.1 Multi-device support</strong></p>
|
||||
<ul>
|
||||
<li>Account → multiple devices, each with own Ed25519 key + MLS KeyPackages</li>
|
||||
<li>Device graph management (add device, remove device, list devices)</li>
|
||||
@@ -448,7 +448,7 @@ WASM/FFI for the crypto layer.</p>
|
||||
</ul>
|
||||
</li>
|
||||
<li>
|
||||
<p><input disabled="" type="checkbox"> <strong>5.2 Account recovery</strong></p>
|
||||
<p><input disabled="" type="checkbox" checked=""> <strong>5.2 Account recovery</strong></p>
|
||||
<ul>
|
||||
<li>Recovery codes or backup key (encrypted, stored by user)</li>
|
||||
<li>Option: server-assisted recovery with security questions (lower security)</li>
|
||||
@@ -456,7 +456,7 @@ WASM/FFI for the crypto layer.</p>
|
||||
</ul>
|
||||
</li>
|
||||
<li>
|
||||
<p><input disabled="" type="checkbox"> <strong>5.3 Full MLS lifecycle</strong></p>
|
||||
<p><input disabled="" type="checkbox" checked=""> <strong>5.3 Full MLS lifecycle</strong></p>
|
||||
<ul>
|
||||
<li>Member removal (Remove proposal → Commit → fan-out)</li>
|
||||
<li>Credential update (Update proposal for key rotation)</li>
|
||||
@@ -483,7 +483,7 @@ WASM/FFI for the crypto layer.</p>
|
||||
</ul>
|
||||
</li>
|
||||
<li>
|
||||
<p><input disabled="" type="checkbox"> <strong>5.6 Abuse prevention and moderation</strong></p>
|
||||
<p><input disabled="" type="checkbox" checked=""> <strong>5.6 Abuse prevention and moderation</strong></p>
|
||||
<ul>
|
||||
<li>Block user (client-side, suppress display)</li>
|
||||
<li>Report message (encrypted report to admin key)</li>
|
||||
@@ -491,7 +491,7 @@ WASM/FFI for the crypto layer.</p>
|
||||
</ul>
|
||||
</li>
|
||||
<li>
|
||||
<p><input disabled="" type="checkbox"> <strong>5.7 Offline message queue (client-side)</strong></p>
|
||||
<p><input disabled="" type="checkbox" checked=""> <strong>5.7 Offline message queue (client-side)</strong></p>
|
||||
<ul>
|
||||
<li>Queue messages when disconnected, send on reconnect</li>
|
||||
<li>Idempotent message IDs to prevent duplicates</li>
|
||||
@@ -504,7 +504,7 @@ WASM/FFI for the crypto layer.</p>
|
||||
<p>Prepare for real traffic.</p>
|
||||
<ul>
|
||||
<li>
|
||||
<p><input disabled="" type="checkbox"> <strong>6.1 Distributed rate limiting</strong></p>
|
||||
<p><input disabled="" type="checkbox" checked=""> <strong>6.1 Distributed rate limiting</strong></p>
|
||||
<ul>
|
||||
<li>Current: in-memory per-process, lost on restart</li>
|
||||
<li>Move to Redis or shared state for multi-node deployments</li>
|
||||
@@ -512,7 +512,7 @@ WASM/FFI for the crypto layer.</p>
|
||||
</ul>
|
||||
</li>
|
||||
<li>
|
||||
<p><input disabled="" type="checkbox"> <strong>6.2 Multi-node / horizontal scaling</strong></p>
|
||||
<p><input disabled="" type="checkbox" checked=""> <strong>6.2 Multi-node / horizontal scaling</strong></p>
|
||||
<ul>
|
||||
<li>Stateless server design (already mostly there — state is in storage backend)</li>
|
||||
<li>Shared PostgreSQL or CockroachDB backend (replace SQLite)</li>
|
||||
@@ -521,7 +521,7 @@ WASM/FFI for the crypto layer.</p>
|
||||
</ul>
|
||||
</li>
|
||||
<li>
|
||||
<p><input disabled="" type="checkbox"> <strong>6.3 Operational runbook</strong></p>
|
||||
<p><input disabled="" type="checkbox" checked=""> <strong>6.3 Operational runbook</strong></p>
|
||||
<ul>
|
||||
<li>Backup / restore procedures (SQLCipher, file backend)</li>
|
||||
<li>Key rotation (auth token, TLS cert, DB encryption key)</li>
|
||||
@@ -531,7 +531,7 @@ WASM/FFI for the crypto layer.</p>
|
||||
</ul>
|
||||
</li>
|
||||
<li>
|
||||
<p><input disabled="" type="checkbox"> <strong>6.4 Connection draining and graceful shutdown</strong></p>
|
||||
<p><input disabled="" type="checkbox" checked=""> <strong>6.4 Connection draining and graceful shutdown</strong></p>
|
||||
<ul>
|
||||
<li>Stop accepting new connections on SIGTERM</li>
|
||||
<li>Wait for in-flight RPCs (configurable timeout, default 30s)</li>
|
||||
@@ -540,7 +540,7 @@ WASM/FFI for the crypto layer.</p>
|
||||
</ul>
|
||||
</li>
|
||||
<li>
|
||||
<p><input disabled="" type="checkbox"> <strong>6.5 Request-level timeouts</strong></p>
|
||||
<p><input disabled="" type="checkbox" checked=""> <strong>6.5 Request-level timeouts</strong></p>
|
||||
<ul>
|
||||
<li>Per-RPC timeout (prevent slow clients from holding resources)</li>
|
||||
<li>Database query timeout</li>
|
||||
@@ -548,7 +548,7 @@ WASM/FFI for the crypto layer.</p>
|
||||
</ul>
|
||||
</li>
|
||||
<li>
|
||||
<p><input disabled="" type="checkbox"> <strong>6.6 Observability enhancements</strong></p>
|
||||
<p><input disabled="" type="checkbox" checked=""> <strong>6.6 Observability enhancements</strong></p>
|
||||
<ul>
|
||||
<li>Request correlation IDs (trace across RPC → storage)</li>
|
||||
<li>Storage operation latency metrics</li>
|
||||
@@ -563,7 +563,7 @@ WASM/FFI for the crypto layer.</p>
|
||||
<p>Long-term vision for wide adoption.</p>
|
||||
<ul>
|
||||
<li>
|
||||
<p><input disabled="" type="checkbox"> <strong>7.1 Mobile clients (iOS + Android)</strong></p>
|
||||
<p><input disabled="" type="checkbox" checked=""> <strong>7.1 Mobile clients (iOS + Android)</strong></p>
|
||||
<ul>
|
||||
<li>Use C FFI (Phase 3.3) for crypto + transport (single library)</li>
|
||||
<li>Push notifications via APNs / FCM (server sends notification on enqueue)</li>
|
||||
@@ -572,7 +572,7 @@ WASM/FFI for the crypto layer.</p>
|
||||
</ul>
|
||||
</li>
|
||||
<li>
|
||||
<p><input disabled="" type="checkbox"> <strong>7.2 Web client (browser)</strong></p>
|
||||
<p><input disabled="" type="checkbox" checked=""> <strong>7.2 Web client (browser)</strong></p>
|
||||
<ul>
|
||||
<li>Use WASM (Phase 3.4) for crypto</li>
|
||||
<li>Use WebTransport (Phase 3.5) for native QUIC transport</li>
|
||||
@@ -583,7 +583,7 @@ WASM/FFI for the crypto layer.</p>
|
||||
</ul>
|
||||
</li>
|
||||
<li>
|
||||
<p><input disabled="" type="checkbox"> <strong>7.3 Federation</strong></p>
|
||||
<p><input disabled="" type="checkbox" checked=""> <strong>7.3 Federation</strong></p>
|
||||
<ul>
|
||||
<li>Server-to-server protocol via Cap’n Proto RPC over QUIC (see <code>federation.capnp</code>)</li>
|
||||
<li><code>relayEnqueue</code>, <code>proxyFetchKeyPackage</code>, <code>federationHealth</code> methods</li>
|
||||
@@ -601,7 +601,7 @@ WASM/FFI for the crypto layer.</p>
|
||||
</ul>
|
||||
</li>
|
||||
<li>
|
||||
<p><input disabled="" type="checkbox"> <strong>7.5 Additional language SDKs</strong></p>
|
||||
<p><input disabled="" type="checkbox" checked=""> <strong>7.5 Additional language SDKs</strong></p>
|
||||
<ul>
|
||||
<li>Java/Kotlin: JNI bindings to C FFI (Phase 3.3) + native QUIC (netty-quic)</li>
|
||||
<li>Swift: Swift wrapper over C FFI + Network.framework QUIC</li>
|
||||
@@ -610,7 +610,7 @@ WASM/FFI for the crypto layer.</p>
|
||||
</ul>
|
||||
</li>
|
||||
<li>
|
||||
<p><input disabled="" type="checkbox"> <strong>7.6 P2P / NAT traversal</strong></p>
|
||||
<p><input disabled="" type="checkbox" checked=""> <strong>7.6 P2P / NAT traversal</strong></p>
|
||||
<ul>
|
||||
<li>Direct peer-to-peer via iroh (foundation exists in <code>quicproquo-p2p</code>)</li>
|
||||
<li>Server as fallback relay only</li>
|
||||
@@ -619,7 +619,7 @@ WASM/FFI for the crypto layer.</p>
|
||||
</ul>
|
||||
</li>
|
||||
<li>
|
||||
<p><input disabled="" type="checkbox"> <strong>7.7 Traffic analysis resistance</strong></p>
|
||||
<p><input disabled="" type="checkbox" checked=""> <strong>7.7 Traffic analysis resistance</strong></p>
|
||||
<ul>
|
||||
<li>Padding messages to uniform size</li>
|
||||
<li>Decoy traffic to mask timing patterns</li>
|
||||
@@ -706,7 +706,7 @@ functions without any central infrastructure or internet uplink.</p>
|
||||
</ul>
|
||||
</li>
|
||||
<li>
|
||||
<p><input disabled="" type="checkbox"> <strong>F7 — OpenWrt cross-compilation guide</strong></p>
|
||||
<p><input disabled="" type="checkbox" checked=""> <strong>F7 — OpenWrt cross-compilation guide</strong></p>
|
||||
<ul>
|
||||
<li>Musl static builds: <code>x86_64-unknown-linux-musl</code>, <code>armv7-unknown-linux-musleabihf</code>, <code>mips-unknown-linux-musl</code></li>
|
||||
<li>Strip binary: <code>--release</code> + <code>strip</code> → target size < 5 MB for flash storage</li>
|
||||
@@ -716,7 +716,7 @@ functions without any central infrastructure or internet uplink.</p>
|
||||
</ul>
|
||||
</li>
|
||||
<li>
|
||||
<p><input disabled="" type="checkbox"> <strong>F8 — Traffic analysis resistance for mesh</strong></p>
|
||||
<p><input disabled="" type="checkbox" checked=""> <strong>F8 — Traffic analysis resistance for mesh</strong></p>
|
||||
<ul>
|
||||
<li>Uniform message padding to nearest 256-byte boundary (hides message size)</li>
|
||||
<li>Configurable decoy traffic rate (fake messages to mask send timing)</li>
|
||||
@@ -731,7 +731,7 @@ functions without any central infrastructure or internet uplink.</p>
|
||||
and lower the barrier to entry for non-crypto developers.</p>
|
||||
<ul>
|
||||
<li>
|
||||
<p><input disabled="" type="checkbox"> <strong>9.1 Criterion Benchmark Suite (<code>qpq-bench</code>)</strong></p>
|
||||
<p><input disabled="" type="checkbox" checked=""> <strong>9.1 Criterion Benchmark Suite (<code>qpq-bench</code>)</strong></p>
|
||||
<ul>
|
||||
<li>Criterion benchmarks for all crypto primitives: hybrid KEM encap/decap,
|
||||
MLS group-add at 10/100/1000 members, epoch rotation, Noise_XX handshake</li>
|
||||
@@ -748,7 +748,7 @@ MLS group-add at 10/100/1000 members, epoch rotation, Noise_XX handshake</li>
|
||||
</ul>
|
||||
</li>
|
||||
<li>
|
||||
<p><input disabled="" type="checkbox"> <strong>9.3 Full-Screen TUI (Ratatui + Crossterm)</strong></p>
|
||||
<p><input disabled="" type="checkbox" checked=""> <strong>9.3 Full-Screen TUI (Ratatui + Crossterm)</strong></p>
|
||||
<ul>
|
||||
<li><code>qpq tui</code> launches a full-screen terminal UI: message pane, input bar,
|
||||
channel sidebar with unread counts, MLS epoch indicator</li>
|
||||
@@ -757,7 +757,7 @@ channel sidebar with unread counts, MLS epoch indicator</li>
|
||||
</ul>
|
||||
</li>
|
||||
<li>
|
||||
<p><input disabled="" type="checkbox"> <strong>9.4 Delivery Proof Canary Tokens</strong></p>
|
||||
<p><input disabled="" type="checkbox" checked=""> <strong>9.4 Delivery Proof Canary Tokens</strong></p>
|
||||
<ul>
|
||||
<li>Server signs <code>Ed25519(SHA-256(message_id || recipient || timestamp))</code> on enqueue</li>
|
||||
<li>Sender stores proof locally — cryptographic evidence the server queued the message</li>
|
||||
@@ -765,7 +765,7 @@ channel sidebar with unread counts, MLS epoch indicator</li>
|
||||
</ul>
|
||||
</li>
|
||||
<li>
|
||||
<p><input disabled="" type="checkbox"> <strong>9.5 Verifiable Transcript Archive</strong></p>
|
||||
<p><input disabled="" type="checkbox" checked=""> <strong>9.5 Verifiable Transcript Archive</strong></p>
|
||||
<ul>
|
||||
<li><code>GroupMember::export_transcript(path, password)</code> writes encrypted, tamper-evident
|
||||
message archive (CBOR records, Argon2id + ChaCha20-Poly1305, Merkle chain)</li>
|
||||
@@ -774,7 +774,7 @@ message archive (CBOR records, Argon2id + ChaCha20-Poly1305, Merkle chain)</li>
|
||||
</ul>
|
||||
</li>
|
||||
<li>
|
||||
<p><input disabled="" type="checkbox"> <strong>9.6 Key Transparency (Merkle-Log Identity Binding)</strong></p>
|
||||
<p><input disabled="" type="checkbox" checked=""> <strong>9.6 Key Transparency (Merkle-Log Identity Binding)</strong></p>
|
||||
<ul>
|
||||
<li>Append-only Merkle log of (username, identity_key) bindings in the AS</li>
|
||||
<li>Clients receive inclusion proofs alongside key fetches</li>
|
||||
@@ -792,7 +792,7 @@ message archive (CBOR records, Argon2id + ChaCha20-Poly1305, Merkle chain)</li>
|
||||
</ul>
|
||||
</li>
|
||||
<li>
|
||||
<p><input disabled="" type="checkbox"> <strong>9.8 PQ Noise Transport Layer</strong></p>
|
||||
<p><input disabled="" type="checkbox" checked=""> <strong>9.8 PQ Noise Transport Layer</strong></p>
|
||||
<ul>
|
||||
<li>Hybrid <code>Noise_XX + ML-KEM-768</code> handshake for post-quantum transport security</li>
|
||||
<li>Closes the harvest-now-decrypt-later gap on handshake metadata (ADR-006)</li>
|
||||
@@ -840,7 +840,7 @@ message archive (CBOR records, Argon2id + ChaCha20-Poly1305, Merkle chain)</li>
|
||||
<span class=fa-svg><svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 320 512"><!--! Font Awesome Free 6.2.0 by @fontawesome - https://fontawesome.com License - https://fontawesome.com/license/free (Icons: CC BY 4.0, Fonts: SIL OFL 1.1, Code: MIT License) Copyright 2022 Fonticons, Inc. --><path d="M41.4 233.4c-12.5 12.5-12.5 32.8 0 45.3l160 160c12.5 12.5 32.8 12.5 45.3 0s12.5-32.8 0-45.3L109.3 256 246.6 118.6c12.5-12.5 12.5-32.8 0-45.3s-32.8-12.5-45.3 0l-160 160z"/></svg></span>
|
||||
</a>
|
||||
|
||||
<a rel="next prefetch" href="contributing/coding-standards.html" class="mobile-nav-chapters next" title="Next chapter" aria-label="Next chapter" aria-keyshortcuts="Right">
|
||||
<a rel="next prefetch" href="operations/monitoring.html" class="mobile-nav-chapters next" title="Next chapter" aria-label="Next chapter" aria-keyshortcuts="Right">
|
||||
<span class=fa-svg><svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 320 512"><!--! Font Awesome Free 6.2.0 by @fontawesome - https://fontawesome.com License - https://fontawesome.com/license/free (Icons: CC BY 4.0, Fonts: SIL OFL 1.1, Code: MIT License) Copyright 2022 Fonticons, Inc. --><path d="M278.6 233.4c12.5 12.5 12.5 32.8 0 45.3l-160 160c-12.5 12.5-32.8 12.5-45.3 0s-12.5-32.8 0-45.3L210.7 256 73.4 118.6c-12.5-12.5-12.5-32.8 0-45.3s32.8-12.5 45.3 0l160 160z"/></svg></span>
|
||||
</a>
|
||||
|
||||
@@ -854,7 +854,7 @@ message archive (CBOR records, Argon2id + ChaCha20-Poly1305, Merkle chain)</li>
|
||||
<span class=fa-svg><svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 320 512"><!--! Font Awesome Free 6.2.0 by @fontawesome - https://fontawesome.com License - https://fontawesome.com/license/free (Icons: CC BY 4.0, Fonts: SIL OFL 1.1, Code: MIT License) Copyright 2022 Fonticons, Inc. --><path d="M41.4 233.4c-12.5 12.5-12.5 32.8 0 45.3l160 160c12.5 12.5 32.8 12.5 45.3 0s12.5-32.8 0-45.3L109.3 256 246.6 118.6c12.5-12.5 12.5-32.8 0-45.3s-32.8-12.5-45.3 0l-160 160z"/></svg></span>
|
||||
</a>
|
||||
|
||||
<a rel="next prefetch" href="contributing/coding-standards.html" class="nav-chapters next" title="Next chapter" aria-label="Next chapter" aria-keyshortcuts="Right">
|
||||
<a rel="next prefetch" href="operations/monitoring.html" class="nav-chapters next" title="Next chapter" aria-label="Next chapter" aria-keyshortcuts="Right">
|
||||
<span class=fa-svg><svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 320 512"><!--! Font Awesome Free 6.2.0 by @fontawesome - https://fontawesome.com License - https://fontawesome.com/license/free (Icons: CC BY 4.0, Fonts: SIL OFL 1.1, Code: MIT License) Copyright 2022 Fonticons, Inc. --><path d="M278.6 233.4c12.5 12.5 12.5 32.8 0 45.3l-160 160c-12.5 12.5-32.8 12.5-45.3 0s-12.5-32.8 0-45.3L210.7 256 73.4 118.6c-12.5-12.5-12.5-32.8 0-45.3s32.8-12.5 45.3 0l160 160z"/></svg></span>
|
||||
</a>
|
||||
</nav>
|
||||
|
||||
64
ROADMAP.md
64
ROADMAP.md
@@ -1,4 +1,4 @@
|
||||
# Roadmap — quicproquo
|
||||
# Roadmap — quicprochat
|
||||
|
||||
> From proof-of-concept to production-grade E2E encrypted messaging.
|
||||
>
|
||||
@@ -18,7 +18,7 @@ Eliminate all crash paths, enforce secure defaults, fix deployment blockers.
|
||||
- Audit: `grep -rn 'unwrap()\|expect(' crates/` outside `#[cfg(test)]`
|
||||
|
||||
- [x] **1.2 Enforce secure defaults in production mode**
|
||||
- Reject startup if `QPQ_PRODUCTION=true` and `auth_token` is empty or `"devtoken"`
|
||||
- Reject startup if `QPC_PRODUCTION=true` and `auth_token` is empty or `"devtoken"`
|
||||
- Require non-empty `db_key` when using SQL backend in production
|
||||
- Refuse to auto-generate TLS certs in production mode (require existing cert+key)
|
||||
- Already partially implemented — verify and harden the validation in `config.rs`
|
||||
@@ -30,8 +30,8 @@ Eliminate all crash paths, enforce secure defaults, fix deployment blockers.
|
||||
- [x] **1.4 Fix Dockerfile**
|
||||
- Sync workspace members (handle excluded `p2p` crate)
|
||||
- Create dedicated user/group instead of `nobody`
|
||||
- Set writable `QPQ_DATA_DIR` with correct permissions
|
||||
- Test: `docker build . && docker run --rm -it qpq-server --help`
|
||||
- Set writable `QPC_DATA_DIR` with correct permissions
|
||||
- Test: `docker build . && docker run --rm -it qpc-server --help`
|
||||
|
||||
- [x] **1.5 TLS certificate lifecycle**
|
||||
- Document CA-signed cert setup (Let's Encrypt / custom CA)
|
||||
@@ -121,27 +121,27 @@ WASM/FFI for the crypto layer.
|
||||
|
||||
### Implementation
|
||||
|
||||
- [x] **3.1 Go SDK (`quicproquo-go`)**
|
||||
- [x] **3.1 Go SDK (`quicprochat-go`)**
|
||||
- Generated Go types from `node.capnp` (6487-line codegen, all 24 RPC methods)
|
||||
- QUIC transport via `quic-go` with TLS 1.3 + ALPN `"capnp"`
|
||||
- High-level `qpq` package: Connect, Health, ResolveUser, CreateChannel, Send/SendWithTTL, Receive/ReceiveWait, DeleteAccount, OPAQUE auth
|
||||
- High-level `qpc` package: Connect, Health, ResolveUser, CreateChannel, Send/SendWithTTL, Receive/ReceiveWait, DeleteAccount, OPAQUE auth
|
||||
- Example CLI in `sdks/go/cmd/example/`
|
||||
|
||||
- [x] **3.2 Python SDK (`quicproquo-py`)**
|
||||
- [x] **3.2 Python SDK (`quicprochat-py`)**
|
||||
- QUIC transport: `aioquic` with custom Cap'n Proto stream handler
|
||||
- Cap'n Proto serialization: `pycapnp` for message types
|
||||
- Manual RPC framing: length-prefixed request/response over QUIC stream
|
||||
- Async/await API matching the Rust client patterns
|
||||
- Crypto: PyO3 bindings to `quicproquo-core` for MLS operations
|
||||
- Publish: PyPI `quicproquo`
|
||||
- Crypto: PyO3 bindings to `quicprochat-core` for MLS operations
|
||||
- Publish: PyPI `quicprochat`
|
||||
- Example: async bot client
|
||||
|
||||
- [x] **3.3 C FFI layer (`quicproquo-ffi`)**
|
||||
- `crates/quicproquo-ffi` with 7 extern "C" functions: connect, login, send, receive, disconnect, last_error, free_string
|
||||
- Builds as `libquicproquo_ffi.so` / `.dylib` / `.dll`
|
||||
- Python ctypes wrapper in `examples/python/qpq_client.py`
|
||||
- [x] **3.3 C FFI layer (`quicprochat-ffi`)**
|
||||
- `crates/quicprochat-ffi` with 7 extern "C" functions: connect, login, send, receive, disconnect, last_error, free_string
|
||||
- Builds as `libquicprochat_ffi.so` / `.dylib` / `.dll`
|
||||
- Python ctypes wrapper in `examples/python/qpc_client.py`
|
||||
|
||||
- [x] **3.4 WASM compilation of `quicproquo-core`**
|
||||
- [x] **3.4 WASM compilation of `quicprochat-core`**
|
||||
- `wasm-pack build` target producing 175 KB WASM bundle (LTO + opt-level=s)
|
||||
- 13 `wasm_bindgen` functions: Ed25519 identity, hybrid KEM, safety numbers, sealed sender, padding
|
||||
- Browser-ready with `crypto.getRandomValues()` RNG
|
||||
@@ -156,7 +156,7 @@ WASM/FFI for the crypto layer.
|
||||
- Configurable port: `--webtransport-listen 0.0.0.0:7443`
|
||||
- Feature-flagged: `--features webtransport`
|
||||
|
||||
- [x] **3.6 TypeScript/JavaScript SDK (`@quicproquo/client`)**
|
||||
- [x] **3.6 TypeScript/JavaScript SDK (`@quicprochat/client`)**
|
||||
- `QpqClient` class: connect, offline, health, resolveUser, createChannel, send/sendWithTTL, receive, deleteAccount
|
||||
- WASM crypto wrapper: generateIdentity, sign/verify, hybridEncrypt/Decrypt, computeSafetyNumber, sealedSend, pad
|
||||
- WebSocket transport with request/response correlation and reconnection
|
||||
@@ -317,17 +317,17 @@ Long-term vision for wide adoption.
|
||||
|
||||
- [x] **7.4 Sealed Sender**
|
||||
- Sender identity inside MLS ciphertext only (server can't see who sent)
|
||||
- `sealed_sender` module in quicproquo-core with seal/unseal API
|
||||
- `sealed_sender` module in quicprochat-core with seal/unseal API
|
||||
- WASM-accessible via `wasm_bindgen` for browser use
|
||||
|
||||
- [x] **7.5 Additional language SDKs**
|
||||
- Java/Kotlin: JNI bindings to C FFI (Phase 3.3) + native QUIC (netty-quic)
|
||||
- Swift: Swift wrapper over C FFI + Network.framework QUIC
|
||||
- Ruby: FFI bindings via `quicproquo-ffi`
|
||||
- Ruby: FFI bindings via `quicprochat-ffi`
|
||||
- Evaluate demand-driven — only build SDKs people request
|
||||
|
||||
- [x] **7.6 P2P / NAT traversal**
|
||||
- Direct peer-to-peer via iroh (foundation exists in `quicproquo-p2p`)
|
||||
- Direct peer-to-peer via iroh (foundation exists in `quicprochat-p2p`)
|
||||
- Server as fallback relay only
|
||||
- Reduces latency and single-point-of-failure
|
||||
- Ref: `FUTURE-IMPROVEMENTS.md § 6.1`
|
||||
@@ -342,35 +342,35 @@ Long-term vision for wide adoption.
|
||||
|
||||
## Phase 8 — Freifunk / Community Mesh Networking
|
||||
|
||||
Make qpq a first-class citizen on decentralised, community-operated wireless
|
||||
networks (Freifunk, BATMAN-adv/Babel routing, OpenWrt). Multiple qpq nodes form
|
||||
Make qpc a first-class citizen on decentralised, community-operated wireless
|
||||
networks (Freifunk, BATMAN-adv/Babel routing, OpenWrt). Multiple qpc nodes form
|
||||
a federated mesh; clients auto-discover nearby nodes via mDNS; the network
|
||||
functions without any central infrastructure or internet uplink.
|
||||
|
||||
### Architecture
|
||||
|
||||
```
|
||||
Client A ─── mDNS discovery ──► nearby qpq node (LAN / mesh)
|
||||
Client A ─── mDNS discovery ──► nearby qpc node (LAN / mesh)
|
||||
│
|
||||
Cap'n Proto federation
|
||||
│
|
||||
remote qpq node (across mesh)
|
||||
remote qpc node (across mesh)
|
||||
```
|
||||
|
||||
- [x] **F0 — Re-include `quicproquo-p2p` in workspace; fix ALPN strings**
|
||||
- Moved `crates/quicproquo-p2p` from `exclude` back into `[workspace] members`
|
||||
- Fixed ALPN `b"quicnprotochat/p2p/1"` → `b"quicproquo/p2p/1"` (breaking wire change)
|
||||
- Fixed federation ALPN `b"qnpc-fed"` → `b"quicproquo/federation/1"`
|
||||
- [x] **F0 — Re-include `quicprochat-p2p` in workspace; fix ALPN strings**
|
||||
- Moved `crates/quicprochat-p2p` from `exclude` back into `[workspace] members`
|
||||
- Fixed ALPN `b"quicnprotochat/p2p/1"` → `b"quicprochat/p2p/1"` (breaking wire change)
|
||||
- Fixed federation ALPN `b"qnpc-fed"` → `b"quicprochat/federation/1"`
|
||||
- Feature-gated behind `--features mesh` on client (keeps iroh out of default builds)
|
||||
|
||||
- [x] **F1 — Federation routing in message delivery**
|
||||
- `handle_enqueue` and `handle_batch_enqueue` call `federation::routing::resolve_destination()`
|
||||
- Recipients with a remote home server are relayed via `FederationClient::relay_enqueue()`
|
||||
- mTLS mutual authentication between nodes (both present client certs, validated against shared CA)
|
||||
- Config: `QPQ_FEDERATION_LISTEN`, `QPQ_LOCAL_DOMAIN`, `QPQ_FEDERATION_CERT/KEY/CA`
|
||||
- Config: `QPC_FEDERATION_LISTEN`, `QPC_LOCAL_DOMAIN`, `QPC_FEDERATION_CERT/KEY/CA`
|
||||
|
||||
- [x] **F2 — mDNS local peer discovery**
|
||||
- Server announces `_quicproquo._udp.local.` on startup via `mdns-sd`
|
||||
- Server announces `_quicprochat._udp.local.` on startup via `mdns-sd`
|
||||
- Client: `MeshDiscovery::start()` browses for nearby nodes (feature-gated)
|
||||
- REPL commands: `/mesh peers` (scan + list), `/mesh server <host:port>` (note address)
|
||||
- Nodes announce: `ver=1`, `server=<host:port>`, `domain=<local_domain>` TXT records
|
||||
@@ -378,7 +378,7 @@ functions without any central infrastructure or internet uplink.
|
||||
- [x] **F3 — Self-sovereign mesh identity**
|
||||
- Ed25519 keypair-based identity independent of AS registration
|
||||
- JSON-persisted seed + known peers directory
|
||||
- Sign/verify operations for mesh authenticity (`crates/quicproquo-p2p/src/identity.rs`)
|
||||
- Sign/verify operations for mesh authenticity (`crates/quicprochat-p2p/src/identity.rs`)
|
||||
|
||||
- [x] **F4 — Store-and-forward with TTL**
|
||||
- `MeshEnvelope` with TTL-based expiry, hop_count tracking, max_hops routing limit
|
||||
@@ -419,7 +419,7 @@ functions without any central infrastructure or internet uplink.
|
||||
Features designed to attract contributors, create demo/showcase potential,
|
||||
and lower the barrier to entry for non-crypto developers.
|
||||
|
||||
- [x] **9.1 Criterion Benchmark Suite (`qpq-bench`)**
|
||||
- [x] **9.1 Criterion Benchmark Suite (`qpc-bench`)**
|
||||
- Criterion benchmarks for all crypto primitives: hybrid KEM encap/decap,
|
||||
MLS group-add at 10/100/1000 members, epoch rotation, Noise_XX handshake
|
||||
- CI publishes HTML benchmark reports as GitHub Actions artifacts
|
||||
@@ -431,7 +431,7 @@ and lower the barrier to entry for non-crypto developers.
|
||||
- Available in WASM via `compute_safety_number` binding
|
||||
|
||||
- [x] **9.3 Full-Screen TUI (Ratatui + Crossterm)**
|
||||
- `qpq tui` launches a full-screen terminal UI: message pane, input bar,
|
||||
- `qpc tui` launches a full-screen terminal UI: message pane, input bar,
|
||||
channel sidebar with unread counts, MLS epoch indicator
|
||||
- Feature-gated `--features tui` to keep ratatui/crossterm out of default builds
|
||||
- Existing REPL and CLI subcommands are unaffected
|
||||
@@ -444,7 +444,7 @@ and lower the barrier to entry for non-crypto developers.
|
||||
- [x] **9.5 Verifiable Transcript Archive**
|
||||
- `GroupMember::export_transcript(path, password)` writes encrypted, tamper-evident
|
||||
message archive (CBOR records, Argon2id + ChaCha20-Poly1305, Merkle chain)
|
||||
- `qpq export verify` CLI command independently verifies chain integrity
|
||||
- `qpc export verify` CLI command independently verifies chain integrity
|
||||
- Useful for legal discovery, audit, or personal backup
|
||||
|
||||
- [x] **9.6 Key Transparency (Merkle-Log Identity Binding)**
|
||||
|
||||
29
SECURITY.md
Normal file
29
SECURITY.md
Normal file
@@ -0,0 +1,29 @@
|
||||
# Security Policy
|
||||
|
||||
## Supported Versions
|
||||
|
||||
Only the current `main` branch is supported with security updates.
|
||||
|
||||
## Reporting a Vulnerability
|
||||
|
||||
**Do not use public GitHub issues to report security vulnerabilities.**
|
||||
|
||||
Instead, email **security@quicprochat.org** with:
|
||||
|
||||
- A description of the vulnerability
|
||||
- Steps to reproduce or a proof of concept
|
||||
- The affected component(s) and potential impact
|
||||
|
||||
We will acknowledge your report within **48 hours** and work with you on a fix under a **90-day coordinated disclosure** timeline.
|
||||
|
||||
## What Qualifies
|
||||
|
||||
- Cryptographic implementation bugs (MLS, Noise, hybrid KEM, key derivation)
|
||||
- Authentication or authorization bypass
|
||||
- Key material leakage (memory, logs, network)
|
||||
- Protocol-level flaws (replay, downgrade, impersonation)
|
||||
- Any issue that compromises message confidentiality or integrity
|
||||
|
||||
## Credit
|
||||
|
||||
Reporters are credited in published security advisories unless they prefer to remain anonymous. Let us know your preference when you report.
|
||||
229
SPRINTS.md
Normal file
229
SPRINTS.md
Normal file
@@ -0,0 +1,229 @@
|
||||
# quicprochat — Sprint Plan
|
||||
|
||||
> 7 sprints synthesized from code audit, architecture analysis, and ecosystem research.
|
||||
> Each sprint is ~1 week. Sprints are ordered by priority and dependency.
|
||||
|
||||
---
|
||||
|
||||
## Sprint 1 — Bug Fixes & Code Quality (Quick Wins)
|
||||
|
||||
Fix all known bugs, clippy warnings, and dead code before building on top.
|
||||
|
||||
- [x] **1.1 Fix boolean logic bug in TUI**
|
||||
- `crates/quicprochat-client/src/client/v2_tui.rs:832` — remove `|| true`
|
||||
- Cursor positioning always executes regardless of input state
|
||||
|
||||
- [x] **1.2 Fix unwrap violations in P2P router**
|
||||
- `crates/quicprochat-p2p/src/routing.rs:416,419` — `.lock().unwrap()` on Mutex
|
||||
- Replace with `.expect("lock poisoned")` or proper error handling
|
||||
|
||||
- [x] **1.3 Remove placeholder assertion in WebTransport**
|
||||
- `crates/quicprochat-server/src/webtransport.rs:418` — `assert!(true);`
|
||||
|
||||
- [x] **1.4 Wire up unused metrics**
|
||||
- `record_storage_latency()` — instrument storage layer calls
|
||||
- `record_uptime_seconds()` — add periodic heartbeat task in server main loop
|
||||
|
||||
- [x] **1.5 Wire up or remove unused config fields**
|
||||
- `EffectiveConfig::webtransport_listen` — connect to WebTransport listener
|
||||
- `EffectiveConfig::rpc_timeout_secs` — apply as per-RPC deadline
|
||||
- `EffectiveConfig::storage_timeout_secs` — apply as DB query timeout
|
||||
|
||||
- [x] **1.6 Fix remaining clippy warnings**
|
||||
- Reduce function arity (2 functions with 8-9 args → use config/param structs)
|
||||
- Remove useless `format!()` call
|
||||
- Collapse nested conditionals
|
||||
- Rename `from_str` method to avoid `FromStr` trait confusion
|
||||
|
||||
---
|
||||
|
||||
## Sprint 2 — OpenMLS 0.5 → 0.8 Migration
|
||||
|
||||
**CRITICAL**: OpenMLS 0.7.2 includes security patches. Staying on 0.5 is a risk.
|
||||
|
||||
- [x] **2.1 Migrate StorageProvider trait**
|
||||
- Old `OpenMlsKeyStore` → new `StorageProvider` (most invasive change)
|
||||
- Rework `DiskKeyStore` integration (must keep bincode serialization)
|
||||
- Update all `group.rs` calls that interact with the key store
|
||||
|
||||
- [x] **2.2 Update MLS API calls**
|
||||
- `self_update()` / `propose_self_update()` — add `LeafNodeParameters` arg
|
||||
- `join_by_external_commit()` — add optional LeafNode params
|
||||
- `Sender::NewMember` → split into `NewMemberProposal` / `NewMemberCommit`
|
||||
|
||||
- [x] **2.3 Handle GREASE support**
|
||||
- New variants in `ProposalType`, `ExtensionType`, `CredentialType`
|
||||
- Update match arms to handle unknown/GREASE values
|
||||
|
||||
- [x] **2.4 Update AAD handling**
|
||||
- AAD no longer persisted — set before every API call generating `MlsMessageOut`
|
||||
|
||||
- [x] **2.5 Verify FIPS 203 alignment**
|
||||
- Confirm ML-KEM-768 parameters match final FIPS 203 (not draft)
|
||||
- Review hybrid KEM against RFC 9794 combination methods
|
||||
|
||||
- [x] **2.6 Full test suite pass**
|
||||
- All 301 tests must pass with OpenMLS 0.8
|
||||
- Run crypto benchmarks to check for performance regressions
|
||||
|
||||
---
|
||||
|
||||
## Sprint 3 — Client Resilience
|
||||
|
||||
Currently, network glitches cause the client to hang. This blocks v2 launch.
|
||||
|
||||
- [x] **3.1 Auto-reconnect with backoff**
|
||||
- Integrate existing `retry.rs` into `RpcClient::call()` path
|
||||
- Exponential backoff with jitter (already implemented, not wired)
|
||||
- Configurable max retries and backoff ceiling
|
||||
|
||||
- [x] **3.2 Push subscription recovery**
|
||||
- Detect broken push stream and re-subscribe automatically
|
||||
- Buffer missed events during reconnection window
|
||||
|
||||
- [x] **3.3 Heartbeat / keepalive**
|
||||
- Periodic QUIC ping in TUI and REPL modes
|
||||
- Detect dead connections before user notices
|
||||
|
||||
- [x] **3.4 SDK disconnect lifecycle**
|
||||
- Add `QpcClient::disconnect()` for clean shutdown
|
||||
- Proper state machine: Connected → Reconnecting → Disconnected
|
||||
|
||||
- [x] **3.5 Connection status UI**
|
||||
- TUI: show connection state in status bar (Connected / Reconnecting / Offline)
|
||||
- REPL: print status change notifications
|
||||
|
||||
---
|
||||
|
||||
## Sprint 4 — Server Hardening
|
||||
|
||||
Fix graceful shutdown and wire up timeouts for production readiness.
|
||||
|
||||
- [x] **4.1 In-flight RPC tracking**
|
||||
- Replace fixed 30s shutdown delay with actual in-flight RPC counter
|
||||
- Drain when counter reaches zero (with configurable max wait)
|
||||
|
||||
- [x] **4.2 Apply request-level timeouts**
|
||||
- Wire `rpc_timeout_secs` config into per-RPC deadline enforcement
|
||||
- Wire `storage_timeout_secs` into DB query timeouts
|
||||
- Cancel long-running operations cleanly
|
||||
|
||||
- [x] **4.3 Plugin shutdown hooks**
|
||||
- Add `on_shutdown` hook to `HookVTable`
|
||||
- Call plugin shutdown before server exits
|
||||
|
||||
- [x] **4.4 Federation drain during shutdown**
|
||||
- Stop accepting federation relay requests on SIGTERM
|
||||
- Wait for in-flight federation RPCs before exit
|
||||
|
||||
- [x] **4.5 Connection draining improvements**
|
||||
- Send QUIC CONNECTION_CLOSE with application reason
|
||||
- WebTransport: send close frame before dropping sessions
|
||||
|
||||
---
|
||||
|
||||
## Sprint 5 — Test Coverage & CI Hardening
|
||||
|
||||
Address the major test coverage gaps identified in the audit.
|
||||
|
||||
- [x] **5.1 RPC framing unit tests**
|
||||
- `crates/quicprochat-rpc/src/framing.rs` — encode/decode edge cases
|
||||
- Malformed frames, truncated input, max-size payloads
|
||||
- Fuzzing harness for frame parser
|
||||
|
||||
- [x] **5.2 SDK state machine tests**
|
||||
- `crates/quicprochat-sdk/src/conversation.rs` — conversation lifecycle
|
||||
- `crates/quicprochat-sdk/src/groups.rs` — group join/leave/update
|
||||
- `crates/quicprochat-sdk/src/messaging.rs` — send/receive/queue
|
||||
|
||||
- [x] **5.3 Server domain service tests**
|
||||
- `crates/quicprochat-server/src/domain/` — all service modules
|
||||
- Test business logic without DB (mock storage trait)
|
||||
|
||||
- [x] **5.4 Integration tests**
|
||||
- Reconnection scenario (kill server, restart, verify client recovers)
|
||||
- Graceful shutdown (send SIGTERM during active RPCs, verify drain)
|
||||
- Multi-node federation relay (if federation wired in Sprint 6)
|
||||
|
||||
- [x] **5.5 CI hardening**
|
||||
- Add MSRV check (Rust 1.75 or declared minimum)
|
||||
- Add cross-platform CI (macOS, Windows — at least build check)
|
||||
- Add cargo-fuzz for crypto and parsing code
|
||||
- Add MIRI for unsafe code in plugin-api/FFI
|
||||
|
||||
---
|
||||
|
||||
## Sprint 6 — Federation & P2P Integration
|
||||
|
||||
Wire up the scaffolded federation and P2P code into working features.
|
||||
|
||||
- [x] **6.1 Federation message routing**
|
||||
- Wire `federation::routing::resolve_destination()` into `handle_enqueue`
|
||||
- Route messages to remote home servers via `FederationClient::relay_enqueue()`
|
||||
- Resolve protocol mismatch (Cap'n Proto federation vs Protobuf main RPC)
|
||||
|
||||
- [x] **6.2 Federation identity resolution**
|
||||
- Cross-server user lookup (`user@remote-server`)
|
||||
- KeyPackage fetching across federated nodes
|
||||
|
||||
- [x] **6.3 P2P client integration**
|
||||
- Wire iroh P2P into client as transport option
|
||||
- Fallback logic: prefer P2P direct → fall back to server relay
|
||||
- mDNS discovery in client (already scaffolded, needs activation)
|
||||
|
||||
- [x] **6.4 Multipath QUIC evaluation**
|
||||
- Research draft-ietf-quic-multipath (likely RFC in 2026)
|
||||
- Prototype: use multiple paths for mesh relay resilience
|
||||
- Decision: adopt or defer based on quinn support
|
||||
|
||||
- [x] **6.5 Federation integration tests**
|
||||
- Two-server test: register on A, send to user on B, verify delivery
|
||||
- mTLS mutual auth verification
|
||||
- Partition tolerance (one node goes down, messages queue)
|
||||
|
||||
---
|
||||
|
||||
## Sprint 7 — Documentation, Polish & Future Prep
|
||||
|
||||
Final polish and forward-looking improvements.
|
||||
|
||||
- [x] **7.1 Crate-level documentation**
|
||||
- Add module-level docs to `quicprochat-plugin-api`, `quicprochat-rpc`, `quicprochat-sdk`
|
||||
- Doc comments for all public APIs in domain services
|
||||
|
||||
- [x] **7.2 Refactor high-arity functions** (none found — already clean)
|
||||
- Consolidate 8-9 parameter functions into config/param structs
|
||||
- Improve builder patterns where appropriate
|
||||
|
||||
- [ ] **7.3 Review RFC 9750 (MLS Architecture)** (deferred — requires manual review)
|
||||
- Verify quicprochat's AS/DS split aligns with RFC 9750 recommendations
|
||||
- Document any deviations and rationale
|
||||
|
||||
- [ ] **7.4 Desktop client evaluation** (deferred — requires Tauri prototype)
|
||||
- Prototype Tauri v2 desktop shell wrapping the TUI or a web UI
|
||||
- Evaluate effort to ship cross-platform desktop client
|
||||
|
||||
- [x] **7.5 Security pre-audit prep**
|
||||
- Document all crypto boundaries and trust assumptions
|
||||
- Create threat model document
|
||||
- Prepare scope document for external auditors (Roadmap item 4.1)
|
||||
- Budget: NCC Group / Trail of Bits / Cure53 ($50K–$150K, 4-6 weeks)
|
||||
|
||||
- [ ] **7.6 Repository rename** (requires GitHub admin action)
|
||||
- Rename GitHub repository from `quicproquo` → `quicprochat`
|
||||
- Update all GitHub URLs, CI badge links, go.mod import paths
|
||||
- Set up redirect from old repo name
|
||||
|
||||
---
|
||||
|
||||
## Sprint Summary
|
||||
|
||||
| Sprint | Focus | Risk | Key Deliverable |
|
||||
|--------|-------|------|----------------|
|
||||
| **1** | Bug fixes & code quality | Low | Zero clippy warnings, metrics wired |
|
||||
| **2** | OpenMLS 0.5 → 0.8 | High | Security patches applied, FIPS 203 verified |
|
||||
| **3** | Client resilience | Medium | Auto-reconnect, heartbeat, status UI |
|
||||
| **4** | Server hardening | Medium | Real graceful shutdown, timeouts enforced |
|
||||
| **5** | Test coverage & CI | Low | Unit tests for SDK/RPC/domain, fuzzing |
|
||||
| **6** | Federation & P2P | High | Working cross-server messaging, P2P fallback |
|
||||
| **7** | Docs, polish & audit prep | Low | Audit-ready, desktop prototype |
|
||||
@@ -1,18 +1,19 @@
|
||||
[package]
|
||||
name = "quicproquo-client"
|
||||
name = "quicprochat-client"
|
||||
version = "0.1.0"
|
||||
edition = "2021"
|
||||
description = "CLI client for quicproquo."
|
||||
license = "MIT"
|
||||
edition.workspace = true
|
||||
description = "CLI client for quicprochat."
|
||||
license = "Apache-2.0 OR MIT"
|
||||
repository.workspace = true
|
||||
|
||||
[[bin]]
|
||||
name = "qpq"
|
||||
name = "qpc"
|
||||
path = "src/main.rs"
|
||||
|
||||
[dependencies]
|
||||
quicproquo-core = { path = "../quicproquo-core" }
|
||||
quicproquo-proto = { path = "../quicproquo-proto" }
|
||||
quicproquo-kt = { path = "../quicproquo-kt" }
|
||||
quicprochat-core = { path = "../quicprochat-core" }
|
||||
quicprochat-proto = { path = "../quicprochat-proto" }
|
||||
quicprochat-kt = { path = "../quicprochat-kt" }
|
||||
openmls_rust_crypto = { workspace = true }
|
||||
|
||||
# Serialisation + RPC
|
||||
@@ -49,8 +50,9 @@ rustls = { workspace = true }
|
||||
tracing = { workspace = true }
|
||||
tracing-subscriber = { workspace = true }
|
||||
|
||||
# CLI
|
||||
# CLI + config
|
||||
clap = { workspace = true }
|
||||
toml = { workspace = true }
|
||||
|
||||
# Local message/conversation storage
|
||||
rusqlite = { workspace = true }
|
||||
@@ -65,7 +67,7 @@ rpassword = "5"
|
||||
mdns-sd = { version = "0.12", optional = true }
|
||||
|
||||
# Optional P2P transport for direct node-to-node messaging.
|
||||
quicproquo-p2p = { path = "../quicproquo-p2p", optional = true }
|
||||
quicprochat-p2p = { path = "../quicprochat-p2p", optional = true }
|
||||
|
||||
# Optional TUI dependencies (Ratatui full-screen interface).
|
||||
ratatui = { version = "0.29", optional = true, default-features = false, features = ["crossterm"] }
|
||||
@@ -74,9 +76,9 @@ crossterm = { version = "0.28", optional = true }
|
||||
# YAML playbook parsing (only compiled with --features playbook).
|
||||
serde_yaml = { version = "0.9", optional = true }
|
||||
|
||||
# v2 SDK-based CLI (thin shell over quicproquo-sdk).
|
||||
quicproquo-sdk = { path = "../quicproquo-sdk", optional = true }
|
||||
quicproquo-rpc = { path = "../quicproquo-rpc", optional = true }
|
||||
# v2 SDK-based CLI (thin shell over quicprochat-sdk).
|
||||
quicprochat-sdk = { path = "../quicprochat-sdk", optional = true }
|
||||
quicprochat-rpc = { path = "../quicprochat-rpc", optional = true }
|
||||
rustyline = { workspace = true, optional = true }
|
||||
|
||||
[lints]
|
||||
@@ -84,15 +86,15 @@ workspace = true
|
||||
|
||||
[features]
|
||||
# Enable mesh-mode features: mDNS local peer discovery + P2P transport.
|
||||
# Build: cargo build -p quicproquo-client --features mesh
|
||||
mesh = ["dep:mdns-sd", "dep:quicproquo-p2p"]
|
||||
# Enable full-screen Ratatui TUI: cargo build -p quicproquo-client --features tui
|
||||
# Build: cargo build -p quicprochat-client --features mesh
|
||||
mesh = ["dep:mdns-sd", "dep:quicprochat-p2p"]
|
||||
# Enable full-screen Ratatui TUI: cargo build -p quicprochat-client --features tui
|
||||
tui = ["dep:ratatui", "dep:crossterm"]
|
||||
# Enable playbook (scripted command execution): YAML parser + serde derives.
|
||||
# Build: cargo build -p quicproquo-client --features playbook
|
||||
# Build: cargo build -p quicprochat-client --features playbook
|
||||
playbook = ["dep:serde_yaml"]
|
||||
# v2 CLI over SDK: cargo build -p quicproquo-client --features v2
|
||||
v2 = ["dep:quicproquo-sdk", "dep:quicproquo-rpc", "dep:rustyline"]
|
||||
# v2 CLI over SDK: cargo build -p quicprochat-client --features v2
|
||||
v2 = ["dep:quicprochat-sdk", "dep:quicprochat-rpc", "dep:rustyline"]
|
||||
|
||||
[dev-dependencies]
|
||||
dashmap = { workspace = true }
|
||||
@@ -6,7 +6,7 @@
|
||||
|
||||
use std::collections::HashMap;
|
||||
|
||||
use quicproquo_proto::node_capnp::node_service;
|
||||
use quicprochat_proto::node_capnp::node_service;
|
||||
|
||||
use super::repl::{Input, SlashCommand, parse_input};
|
||||
use super::session::SessionState;
|
||||
@@ -109,6 +109,8 @@ pub enum Command {
|
||||
History { count: usize },
|
||||
|
||||
// Mesh
|
||||
MeshStart,
|
||||
MeshStop,
|
||||
MeshPeers,
|
||||
MeshServer { addr: String },
|
||||
MeshSend { peer_id: String, message: String },
|
||||
@@ -171,6 +173,8 @@ impl Command {
|
||||
Command::GroupInfo => Some(SlashCommand::GroupInfo),
|
||||
Command::Rename { name } => Some(SlashCommand::Rename { name }),
|
||||
Command::History { count } => Some(SlashCommand::History { count }),
|
||||
Command::MeshStart => Some(SlashCommand::MeshStart),
|
||||
Command::MeshStop => Some(SlashCommand::MeshStop),
|
||||
Command::MeshPeers => Some(SlashCommand::MeshPeers),
|
||||
Command::MeshServer { addr } => Some(SlashCommand::MeshServer { addr }),
|
||||
Command::MeshSend { peer_id, message } => {
|
||||
@@ -332,6 +336,8 @@ fn slash_to_command(sc: SlashCommand) -> Command {
|
||||
SlashCommand::GroupInfo => Command::GroupInfo,
|
||||
SlashCommand::Rename { name } => Command::Rename { name },
|
||||
SlashCommand::History { count } => Command::History { count },
|
||||
SlashCommand::MeshStart => Command::MeshStart,
|
||||
SlashCommand::MeshStop => Command::MeshStop,
|
||||
SlashCommand::MeshPeers => Command::MeshPeers,
|
||||
SlashCommand::MeshServer { addr } => Command::MeshServer { addr },
|
||||
SlashCommand::MeshSend { peer_id, message } => Command::MeshSend { peer_id, message },
|
||||
@@ -394,6 +400,8 @@ async fn execute_slash(
|
||||
SlashCommand::GroupInfo => cmd_group_info(session, client).await,
|
||||
SlashCommand::Rename { name } => cmd_rename(session, &name),
|
||||
SlashCommand::History { count } => cmd_history(session, count),
|
||||
SlashCommand::MeshStart => cmd_mesh_start(session).await,
|
||||
SlashCommand::MeshStop => cmd_mesh_stop(session).await,
|
||||
SlashCommand::MeshPeers => cmd_mesh_peers(),
|
||||
SlashCommand::MeshServer { addr } => {
|
||||
super::display::print_status(&format!(
|
||||
@@ -401,9 +409,9 @@ async fn execute_slash(
|
||||
));
|
||||
Ok(())
|
||||
}
|
||||
SlashCommand::MeshSend { peer_id, message } => cmd_mesh_send(&peer_id, &message),
|
||||
SlashCommand::MeshBroadcast { topic, message } => cmd_mesh_broadcast(&topic, &message),
|
||||
SlashCommand::MeshSubscribe { topic } => cmd_mesh_subscribe(&topic),
|
||||
SlashCommand::MeshSend { peer_id, message } => cmd_mesh_send(session, &peer_id, &message).await,
|
||||
SlashCommand::MeshBroadcast { topic, message } => cmd_mesh_broadcast(session, &topic, &message).await,
|
||||
SlashCommand::MeshSubscribe { topic } => cmd_mesh_subscribe(session, &topic),
|
||||
SlashCommand::MeshRoute => cmd_mesh_route(session),
|
||||
SlashCommand::MeshIdentity => cmd_mesh_identity(session),
|
||||
SlashCommand::MeshStore => cmd_mesh_store(session),
|
||||
@@ -5,7 +5,7 @@ use opaque_ke::{
|
||||
ClientLogin, ClientLoginFinishParameters, ClientRegistration,
|
||||
ClientRegistrationFinishParameters, CredentialResponse, RegistrationResponse,
|
||||
};
|
||||
use quicproquo_core::{
|
||||
use quicprochat_core::{
|
||||
generate_key_package, hybrid_decrypt, hybrid_encrypt, opaque_auth::OpaqueSuite,
|
||||
GroupMember, HybridKeypair, IdentityKeypair, ReceivedMessage,
|
||||
};
|
||||
@@ -317,7 +317,7 @@ fn derive_identity_for_login(
|
||||
/// The error message contains "E018" if the user already exists.
|
||||
/// Does NOT require init_auth() — OPAQUE RPCs are unauthenticated.
|
||||
pub(crate) async fn opaque_register(
|
||||
client: &quicproquo_proto::node_capnp::node_service::Client,
|
||||
client: &quicprochat_proto::node_capnp::node_service::Client,
|
||||
username: &str,
|
||||
password: &str,
|
||||
identity_key: Option<&[u8]>,
|
||||
@@ -378,7 +378,7 @@ pub(crate) async fn opaque_register(
|
||||
/// Perform OPAQUE login and return the raw session token bytes.
|
||||
/// Does NOT require init_auth() — OPAQUE RPCs are unauthenticated.
|
||||
pub async fn opaque_login(
|
||||
client: &quicproquo_proto::node_capnp::node_service::Client,
|
||||
client: &quicprochat_proto::node_capnp::node_service::Client,
|
||||
username: &str,
|
||||
password: &str,
|
||||
identity_key: &[u8],
|
||||
@@ -647,8 +647,8 @@ pub async fn cmd_fetch_key(
|
||||
|
||||
/// Run a two-party MLS demo against the unified server.
|
||||
pub async fn cmd_demo_group(server: &str, ca_cert: &Path, server_name: &str) -> anyhow::Result<()> {
|
||||
let creator_state_path = PathBuf::from("qpq-demo-creator.bin");
|
||||
let joiner_state_path = PathBuf::from("qpq-demo-joiner.bin");
|
||||
let creator_state_path = PathBuf::from("qpc-demo-creator.bin");
|
||||
let joiner_state_path = PathBuf::from("qpc-demo-joiner.bin");
|
||||
|
||||
let (mut creator, creator_hybrid_opt) =
|
||||
load_or_init_state(&creator_state_path, None)?.into_parts(&creator_state_path)?;
|
||||
@@ -1298,7 +1298,7 @@ pub async fn cmd_chat(
|
||||
///
|
||||
/// `conv_db` is the path to the conversation SQLite database (`.convdb` file).
|
||||
/// `conv_id_hex` is the 32-hex-character conversation ID to export.
|
||||
/// `output` is the path for the `.qpqt` transcript file to write.
|
||||
/// `output` is the path for the `.qpct` transcript file to write.
|
||||
/// `transcript_password` is used to derive the encryption key (Argon2id).
|
||||
/// `db_password` is the optional SQLCipher password for the conversation database.
|
||||
pub fn cmd_export(
|
||||
@@ -1308,7 +1308,7 @@ pub fn cmd_export(
|
||||
transcript_password: &str,
|
||||
db_password: Option<&str>,
|
||||
) -> anyhow::Result<()> {
|
||||
use quicproquo_core::{TranscriptRecord, TranscriptWriter};
|
||||
use quicprochat_core::{TranscriptRecord, TranscriptWriter};
|
||||
use super::conversation::{ConversationId, ConversationStore};
|
||||
|
||||
// Decode conversation ID from hex.
|
||||
@@ -1367,7 +1367,7 @@ pub fn cmd_export(
|
||||
conv.display_name,
|
||||
output.display()
|
||||
);
|
||||
println!("Decrypt with: qpq export verify --input <file> --password <password>");
|
||||
println!("Decrypt with: qpc export verify --input <file> --password <password>");
|
||||
|
||||
Ok(())
|
||||
}
|
||||
@@ -1376,7 +1376,7 @@ pub fn cmd_export(
|
||||
///
|
||||
/// Prints a summary. Does not require the encryption password (structural check only).
|
||||
pub fn cmd_export_verify(input: &Path) -> anyhow::Result<()> {
|
||||
use quicproquo_core::{validate_transcript_structure, ChainVerdict};
|
||||
use quicprochat_core::{validate_transcript_structure, ChainVerdict};
|
||||
|
||||
let data = std::fs::read(input)
|
||||
.with_context(|| format!("read transcript file '{}'", input.display()))?;
|
||||
@@ -1,6 +1,6 @@
|
||||
//! mDNS-based peer discovery for Freifunk / community mesh deployments.
|
||||
//!
|
||||
//! Browse for `_quicproquo._udp.local.` services on the local network and
|
||||
//! Browse for `_quicprochat._udp.local.` services on the local network and
|
||||
//! surface them as [`DiscoveredPeer`] structs. Servers announce themselves
|
||||
//! automatically on startup; this module lets clients find them without manual
|
||||
//! configuration.
|
||||
@@ -8,7 +8,7 @@
|
||||
//! # Usage
|
||||
//!
|
||||
//! ```no_run
|
||||
//! use quicproquo_client::client::mesh_discovery::MeshDiscovery;
|
||||
//! use quicprochat_client::client::mesh_discovery::MeshDiscovery;
|
||||
//!
|
||||
//! let disc = MeshDiscovery::start()?;
|
||||
//! // Give mDNS time to collect announcements before reading.
|
||||
@@ -16,7 +16,7 @@
|
||||
//! for peer in disc.peers() {
|
||||
//! println!("found: {} at {}", peer.domain, peer.server_addr);
|
||||
//! }
|
||||
//! # Ok::<(), quicproquo_client::client::mesh_discovery::MeshDiscoveryError>(())
|
||||
//! # Ok::<(), quicprochat_client::client::mesh_discovery::MeshDiscoveryError>(())
|
||||
//! ```
|
||||
|
||||
#[cfg(feature = "mesh")]
|
||||
@@ -27,7 +27,7 @@ use std::sync::{Arc, Mutex};
|
||||
#[cfg(feature = "mesh")]
|
||||
use std::collections::HashMap;
|
||||
|
||||
/// A qpq server discovered on the local network via mDNS.
|
||||
/// A qpc server discovered on the local network via mDNS.
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct DiscoveredPeer {
|
||||
/// Federation domain of the remote server (e.g. `"node1.freifunk.net"`).
|
||||
@@ -57,7 +57,7 @@ pub enum MeshDiscoveryError {
|
||||
}
|
||||
|
||||
impl MeshDiscovery {
|
||||
/// Start browsing for `_quicproquo._udp.local.` services.
|
||||
/// Start browsing for `_quicprochat._udp.local.` services.
|
||||
///
|
||||
/// Returns immediately; peers are collected in the background.
|
||||
/// Returns [`MeshDiscoveryError::FeatureDisabled`] when built without the
|
||||
@@ -79,7 +79,7 @@ impl MeshDiscovery {
|
||||
.map_err(|e| MeshDiscoveryError::DaemonError(e.to_string()))?;
|
||||
|
||||
let receiver = daemon
|
||||
.browse("_quicproquo._udp.local.")
|
||||
.browse("_quicprochat._udp.local.")
|
||||
.map_err(|e| MeshDiscoveryError::BrowseError(e.to_string()))?;
|
||||
|
||||
let peers: Arc<Mutex<HashMap<String, DiscoveredPeer>>> =
|
||||
@@ -91,7 +91,7 @@ impl MeshDiscovery {
|
||||
for event in receiver {
|
||||
match event {
|
||||
ServiceEvent::ServiceResolved(info) => {
|
||||
// Extract the qpq server address from TXT records.
|
||||
// Extract the qpc server address from TXT records.
|
||||
let server_addr_str = info
|
||||
.get_property_val_str("server")
|
||||
.map(|s| s.to_string());
|
||||
@@ -24,7 +24,7 @@ use std::path::Path;
|
||||
use std::time::{Duration, Instant};
|
||||
|
||||
use anyhow::{Context, bail};
|
||||
use quicproquo_proto::node_capnp::node_service;
|
||||
use quicprochat_proto::node_capnp::node_service;
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use super::command_engine::{AssertCondition, CmpOp, Command, CommandRegistry};
|
||||
@@ -9,13 +9,13 @@ use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
|
||||
use anyhow::Context;
|
||||
use quicproquo_core::{
|
||||
use quicprochat_core::{
|
||||
AppMessage, DiskKeyStore, GroupMember, IdentityKeypair, ReceivedMessage,
|
||||
compute_safety_number, hybrid_encrypt, parse as parse_app_msg, serialize_chat,
|
||||
serialize_delete, serialize_dummy, serialize_edit, serialize_file_ref, serialize_reaction,
|
||||
serialize_read_receipt, serialize_typing,
|
||||
};
|
||||
use quicproquo_proto::node_capnp::node_service;
|
||||
use quicprochat_proto::node_capnp::node_service;
|
||||
use tokio::sync::mpsc;
|
||||
use tokio::time::interval;
|
||||
|
||||
@@ -60,6 +60,8 @@ pub(crate) enum SlashCommand {
|
||||
Rename { name: String },
|
||||
History { count: usize },
|
||||
/// Mesh subcommands: /mesh peers, /mesh server <addr>, etc.
|
||||
MeshStart,
|
||||
MeshStop,
|
||||
MeshPeers,
|
||||
MeshServer { addr: String },
|
||||
MeshSend { peer_id: String, message: String },
|
||||
@@ -68,6 +70,8 @@ pub(crate) enum SlashCommand {
|
||||
MeshRoute,
|
||||
MeshIdentity,
|
||||
MeshStore,
|
||||
MeshTrace { address: String },
|
||||
MeshStats,
|
||||
/// Display safety number for out-of-band key verification with a contact.
|
||||
Verify { username: String },
|
||||
/// Rotate own MLS leaf key in the active group.
|
||||
@@ -173,6 +177,8 @@ pub(crate) fn parse_input(line: &str) -> Input {
|
||||
Input::Slash(SlashCommand::History { count })
|
||||
}
|
||||
"/mesh" => match arg.as_deref() {
|
||||
Some("start") => Input::Slash(SlashCommand::MeshStart),
|
||||
Some("stop") => Input::Slash(SlashCommand::MeshStop),
|
||||
Some("peers") => Input::Slash(SlashCommand::MeshPeers),
|
||||
Some(rest) if rest.starts_with("server ") => {
|
||||
let addr = rest.trim_start_matches("server ").trim().to_string();
|
||||
@@ -216,12 +222,22 @@ pub(crate) fn parse_input(line: &str) -> Input {
|
||||
Input::Slash(SlashCommand::MeshSubscribe { topic: topic.into() })
|
||||
}
|
||||
}
|
||||
Some("route") => Input::Slash(SlashCommand::MeshRoute),
|
||||
Some("route") | Some("routes") => Input::Slash(SlashCommand::MeshRoute),
|
||||
Some("identity") | Some("id") => Input::Slash(SlashCommand::MeshIdentity),
|
||||
Some("store") => Input::Slash(SlashCommand::MeshStore),
|
||||
Some("stats") => Input::Slash(SlashCommand::MeshStats),
|
||||
Some(rest) if rest.starts_with("trace ") => {
|
||||
let address = rest[6..].trim();
|
||||
if address.is_empty() {
|
||||
display::print_error("usage: /mesh trace <address>");
|
||||
Input::Empty
|
||||
} else {
|
||||
Input::Slash(SlashCommand::MeshTrace { address: address.into() })
|
||||
}
|
||||
}
|
||||
_ => {
|
||||
display::print_error(
|
||||
"usage: /mesh peers|server|send|broadcast|subscribe|route|identity|store"
|
||||
"usage: /mesh start|stop|peers|server|send|broadcast|subscribe|route|identity|store|trace|stats"
|
||||
);
|
||||
Input::Empty
|
||||
}
|
||||
@@ -355,10 +371,10 @@ fn derive_key_path(cert_path: &Path) -> PathBuf {
|
||||
cert_path.with_file_name(key_name)
|
||||
}
|
||||
|
||||
/// Find the `qpq-server` binary: same directory as current exe, then PATH.
|
||||
/// Find the `qpc-server` binary: same directory as current exe, then PATH.
|
||||
fn find_server_binary() -> Option<PathBuf> {
|
||||
if let Ok(exe) = std::env::current_exe() {
|
||||
let sibling = exe.with_file_name("qpq-server");
|
||||
let sibling = exe.with_file_name("qpc-server");
|
||||
if sibling.exists() {
|
||||
return Some(sibling);
|
||||
}
|
||||
@@ -366,7 +382,7 @@ fn find_server_binary() -> Option<PathBuf> {
|
||||
// Fall back to PATH lookup.
|
||||
std::env::var_os("PATH").and_then(|paths| {
|
||||
std::env::split_paths(&paths)
|
||||
.map(|dir| dir.join("qpq-server"))
|
||||
.map(|dir| dir.join("qpc-server"))
|
||||
.find(|p| p.exists())
|
||||
})
|
||||
}
|
||||
@@ -400,13 +416,13 @@ async fn ensure_server(
|
||||
if ca_cert.exists() {
|
||||
// Cert exists but connection failed and no binary found.
|
||||
anyhow::bail!(
|
||||
"server at {server} is not reachable and qpq-server binary not found; \
|
||||
start a server manually or install qpq-server"
|
||||
"server at {server} is not reachable and qpc-server binary not found; \
|
||||
start a server manually or install qpc-server"
|
||||
);
|
||||
} else {
|
||||
anyhow::bail!(
|
||||
"no server running and qpq-server binary not found; \
|
||||
start a server manually or install qpq-server"
|
||||
"no server running and qpc-server binary not found; \
|
||||
start a server manually or install qpc-server"
|
||||
);
|
||||
}
|
||||
}
|
||||
@@ -445,7 +461,7 @@ async fn ensure_server(
|
||||
|
||||
if start.elapsed() > max_wait {
|
||||
anyhow::bail!(
|
||||
"auto-started qpq-server but it did not become ready within {max_wait:?}"
|
||||
"auto-started qpc-server but it did not become ready within {max_wait:?}"
|
||||
);
|
||||
}
|
||||
|
||||
@@ -804,6 +820,8 @@ async fn handle_slash(
|
||||
SlashCommand::GroupInfo => cmd_group_info(session, client).await,
|
||||
SlashCommand::Rename { name } => cmd_rename(session, &name),
|
||||
SlashCommand::History { count } => cmd_history(session, count),
|
||||
SlashCommand::MeshStart => cmd_mesh_start(session).await,
|
||||
SlashCommand::MeshStop => cmd_mesh_stop(session).await,
|
||||
SlashCommand::MeshPeers => cmd_mesh_peers(),
|
||||
SlashCommand::MeshServer { addr } => {
|
||||
display::print_status(&format!(
|
||||
@@ -811,12 +829,14 @@ async fn handle_slash(
|
||||
));
|
||||
Ok(())
|
||||
}
|
||||
SlashCommand::MeshSend { peer_id, message } => cmd_mesh_send(&peer_id, &message),
|
||||
SlashCommand::MeshBroadcast { topic, message } => cmd_mesh_broadcast(&topic, &message),
|
||||
SlashCommand::MeshSubscribe { topic } => cmd_mesh_subscribe(&topic),
|
||||
SlashCommand::MeshSend { peer_id, message } => cmd_mesh_send(session, &peer_id, &message).await,
|
||||
SlashCommand::MeshBroadcast { topic, message } => cmd_mesh_broadcast(session, &topic, &message).await,
|
||||
SlashCommand::MeshSubscribe { topic } => cmd_mesh_subscribe(session, &topic),
|
||||
SlashCommand::MeshRoute => cmd_mesh_route(session),
|
||||
SlashCommand::MeshIdentity => cmd_mesh_identity(session),
|
||||
SlashCommand::MeshStore => cmd_mesh_store(session),
|
||||
SlashCommand::MeshTrace { address } => cmd_mesh_trace(session, &address),
|
||||
SlashCommand::MeshStats => cmd_mesh_stats(session),
|
||||
SlashCommand::Verify { username } => cmd_verify(session, client, &username).await,
|
||||
SlashCommand::UpdateKey => cmd_update_key(session, client).await,
|
||||
SlashCommand::Typing => cmd_typing(session, client).await,
|
||||
@@ -862,7 +882,9 @@ pub(crate) fn print_help() {
|
||||
display::print_status(" /rename <name> - Rename the current conversation");
|
||||
display::print_status(" /history [N] - Show last N messages (default: 20)");
|
||||
display::print_status(" /whoami - Show your identity");
|
||||
display::print_status(" /mesh peers - Discover nearby qpq nodes via mDNS");
|
||||
display::print_status(" /mesh start - Start the P2P node for direct messaging");
|
||||
display::print_status(" /mesh stop - Stop the P2P node");
|
||||
display::print_status(" /mesh peers - Discover nearby qpc nodes via mDNS");
|
||||
display::print_status(" /mesh server <host:port> - Show how to reconnect to a mesh node");
|
||||
display::print_status(" /mesh send <peer> <msg> - Send a P2P message to a mesh peer");
|
||||
display::print_status(" /mesh broadcast <topic> <m> - Broadcast an encrypted message on a topic");
|
||||
@@ -870,6 +892,8 @@ pub(crate) fn print_help() {
|
||||
display::print_status(" /mesh route - Show known mesh peers and routes");
|
||||
display::print_status(" /mesh identity - Show mesh node identity info");
|
||||
display::print_status(" /mesh store - Show mesh store-and-forward stats");
|
||||
display::print_status(" /mesh trace <address> - Show route to a mesh address");
|
||||
display::print_status(" /mesh stats - Show delivery statistics per destination");
|
||||
display::print_status(" /update-key - Rotate your MLS leaf key in the active group");
|
||||
display::print_status(" /verify <username> - Show safety number for key verification");
|
||||
display::print_status(" /react <emoji> [index] - React to last message (or message at index)");
|
||||
@@ -1099,7 +1123,7 @@ pub(crate) async fn cmd_rotate_all_keys(
|
||||
cmd_update_key(session, client).await?;
|
||||
|
||||
// Step 2: Generate new hybrid KEM keypair and upload.
|
||||
let new_kp = quicproquo_core::HybridKeypair::generate();
|
||||
let new_kp = quicprochat_core::HybridKeypair::generate();
|
||||
let id_key = session.identity.public_key_bytes();
|
||||
upload_hybrid_key(client, &id_key, &new_kp.public_key()).await?;
|
||||
session.hybrid_kp = Some(new_kp);
|
||||
@@ -1108,7 +1132,95 @@ pub(crate) async fn cmd_rotate_all_keys(
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Discover nearby qpq servers via mDNS (requires `--features mesh` build).
|
||||
/// Start the P2P node for mesh messaging.
|
||||
pub(crate) async fn cmd_mesh_start(session: &mut SessionState) -> anyhow::Result<()> {
|
||||
#[cfg(feature = "mesh")]
|
||||
{
|
||||
if session.p2p_node.is_some() {
|
||||
display::print_status("P2P node is already running");
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
display::print_status("starting P2P node...");
|
||||
|
||||
// Try to load a persisted mesh identity or generate a new one.
|
||||
let mesh_state_path = session.state_path.with_extension("mesh.json");
|
||||
let mesh_id = if mesh_state_path.exists() {
|
||||
match quicprochat_p2p::identity::MeshIdentity::load(&mesh_state_path) {
|
||||
Ok(id) => {
|
||||
display::print_status("loaded existing mesh identity");
|
||||
Some(id)
|
||||
}
|
||||
Err(e) => {
|
||||
display::print_status(&format!("could not load mesh identity: {e}, generating new"));
|
||||
None
|
||||
}
|
||||
}
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
let node = if let Some(id) = mesh_id {
|
||||
match quicprochat_p2p::P2pNode::start_with_mesh(None, id, 1000).await {
|
||||
Ok(n) => n,
|
||||
Err(e) => {
|
||||
display::print_error(&format!("failed to start P2P node: {e}"));
|
||||
return Ok(());
|
||||
}
|
||||
}
|
||||
} else {
|
||||
match quicprochat_p2p::P2pNode::start(None).await {
|
||||
Ok(n) => n,
|
||||
Err(e) => {
|
||||
display::print_error(&format!("failed to start P2P node: {e}"));
|
||||
return Ok(());
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
let node_id = node.node_id();
|
||||
session.p2p_node = Some(Arc::new(node));
|
||||
display::print_status(&format!("P2P node started: {}", node_id.fmt_short()));
|
||||
}
|
||||
#[cfg(not(feature = "mesh"))]
|
||||
{
|
||||
let _ = session;
|
||||
display::print_error("requires --features mesh");
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Stop the P2P node.
|
||||
pub(crate) async fn cmd_mesh_stop(session: &mut SessionState) -> anyhow::Result<()> {
|
||||
#[cfg(feature = "mesh")]
|
||||
{
|
||||
match session.p2p_node.take() {
|
||||
Some(node) => {
|
||||
// Try to unwrap the Arc; if there are other references, just drop our handle.
|
||||
match Arc::try_unwrap(node) {
|
||||
Ok(owned) => {
|
||||
owned.close().await;
|
||||
display::print_status("P2P node stopped");
|
||||
}
|
||||
Err(_arc) => {
|
||||
display::print_status("P2P node reference released (other tasks may still hold it)");
|
||||
}
|
||||
}
|
||||
}
|
||||
None => {
|
||||
display::print_status("P2P node is not running");
|
||||
}
|
||||
}
|
||||
}
|
||||
#[cfg(not(feature = "mesh"))]
|
||||
{
|
||||
let _ = session;
|
||||
display::print_error("requires --features mesh");
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Discover nearby qpc servers via mDNS (requires `--features mesh` build).
|
||||
pub(crate) fn cmd_mesh_peers() -> anyhow::Result<()> {
|
||||
use super::mesh_discovery::MeshDiscovery;
|
||||
|
||||
@@ -1118,65 +1230,117 @@ pub(crate) fn cmd_mesh_peers() -> anyhow::Result<()> {
|
||||
return Ok(());
|
||||
}
|
||||
Ok(disc) => {
|
||||
display::print_status("scanning for nearby qpq nodes (2s)...");
|
||||
display::print_status("scanning for nearby qpc nodes (2s)...");
|
||||
// Block briefly to collect mDNS announcements from the local network.
|
||||
std::thread::sleep(std::time::Duration::from_secs(2));
|
||||
let peers = disc.peers();
|
||||
if peers.is_empty() {
|
||||
display::print_status("no qpq nodes found on the local network");
|
||||
display::print_status("no qpc nodes found on the local network");
|
||||
} else {
|
||||
display::print_status(&format!("found {} node(s):", peers.len()));
|
||||
for p in &peers {
|
||||
display::print_status(&format!(" {} at {}", p.domain, p.server_addr));
|
||||
}
|
||||
display::print_status("use: /mesh server <host:port> to note the address,");
|
||||
display::print_status("then reconnect with: qpq --server <host:port>");
|
||||
display::print_status("then reconnect with: qpc --server <host:port>");
|
||||
}
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Send a direct P2P mesh message (stub — P2pNode not yet wired into session).
|
||||
pub(crate) fn cmd_mesh_send(peer_id: &str, message: &str) -> anyhow::Result<()> {
|
||||
/// Send a direct P2P mesh message via the session's P2P node.
|
||||
pub(crate) async fn cmd_mesh_send(session: &SessionState, peer_id: &str, message: &str) -> anyhow::Result<()> {
|
||||
#[cfg(feature = "mesh")]
|
||||
{
|
||||
display::print_status(&format!("mesh send: would send to {peer_id}: {message}"));
|
||||
display::print_status("(P2P node integration pending — message not actually sent)");
|
||||
match &session.p2p_node {
|
||||
Some(node) => {
|
||||
// Parse the peer_id as an iroh PublicKey hex string and create an EndpointAddr.
|
||||
let pk_bytes = hex::decode(peer_id)
|
||||
.map_err(|e| anyhow::anyhow!("invalid peer_id hex: {e}"))?;
|
||||
let pk_array: [u8; 32] = pk_bytes
|
||||
.as_slice()
|
||||
.try_into()
|
||||
.map_err(|_| anyhow::anyhow!("peer_id must be 32 bytes (64 hex chars)"))?;
|
||||
let pk = iroh::PublicKey::from_bytes(&pk_array);
|
||||
let addr = iroh::EndpointAddr::from(pk);
|
||||
|
||||
match node.send(addr, message.as_bytes()).await {
|
||||
Ok(()) => {
|
||||
display::print_status(&format!("sent to {}: {message}", &peer_id[..8.min(peer_id.len())]));
|
||||
}
|
||||
Err(e) => {
|
||||
display::print_error(&format!("P2P send failed: {e}"));
|
||||
}
|
||||
}
|
||||
}
|
||||
None => {
|
||||
display::print_error("P2P node not started. Use /mesh start to initialize.");
|
||||
}
|
||||
}
|
||||
}
|
||||
#[cfg(not(feature = "mesh"))]
|
||||
{
|
||||
let _ = (peer_id, message);
|
||||
let _ = (session, peer_id, message);
|
||||
display::print_error("requires --features mesh");
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Broadcast an encrypted message on a topic (stub — P2pNode not yet wired into session).
|
||||
pub(crate) fn cmd_mesh_broadcast(topic: &str, message: &str) -> anyhow::Result<()> {
|
||||
/// Broadcast an encrypted message on a topic via the session's P2P node.
|
||||
pub(crate) async fn cmd_mesh_broadcast(session: &SessionState, topic: &str, message: &str) -> anyhow::Result<()> {
|
||||
#[cfg(feature = "mesh")]
|
||||
{
|
||||
display::print_status(&format!("mesh broadcast to {topic}: {message}"));
|
||||
display::print_status("(P2P node integration pending — message not actually sent)");
|
||||
match &session.p2p_node {
|
||||
Some(node) => {
|
||||
match node.broadcast(topic, message.as_bytes()).await {
|
||||
Ok(()) => {
|
||||
display::print_status(&format!("broadcast to {topic}: {message}"));
|
||||
}
|
||||
Err(e) => {
|
||||
display::print_error(&format!("broadcast failed: {e}"));
|
||||
}
|
||||
}
|
||||
}
|
||||
None => {
|
||||
display::print_error("P2P node not started. Use /mesh start to initialize.");
|
||||
}
|
||||
}
|
||||
}
|
||||
#[cfg(not(feature = "mesh"))]
|
||||
{
|
||||
let _ = (topic, message);
|
||||
let _ = (session, topic, message);
|
||||
display::print_error("requires --features mesh");
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Subscribe to a broadcast topic (stub — P2pNode not yet wired into session).
|
||||
pub(crate) fn cmd_mesh_subscribe(topic: &str) -> anyhow::Result<()> {
|
||||
/// Subscribe to a broadcast topic on the session's P2P node.
|
||||
pub(crate) fn cmd_mesh_subscribe(session: &SessionState, topic: &str) -> anyhow::Result<()> {
|
||||
#[cfg(feature = "mesh")]
|
||||
{
|
||||
display::print_status(&format!("subscribed to topic: {topic}"));
|
||||
display::print_status("(P2P node integration pending — subscription is not persisted)");
|
||||
match &session.p2p_node {
|
||||
Some(node) => {
|
||||
// Generate a random key for the subscription.
|
||||
let key: [u8; 32] = rand::random();
|
||||
match node.subscribe(topic, key) {
|
||||
Ok(()) => {
|
||||
display::print_status(&format!("subscribed to topic: {topic}"));
|
||||
display::print_status(&format!("share this key to let others join: {}", hex::encode(key)));
|
||||
}
|
||||
Err(e) => {
|
||||
display::print_error(&format!("subscribe failed: {e}"));
|
||||
}
|
||||
}
|
||||
}
|
||||
None => {
|
||||
display::print_error("P2P node not started. Use /mesh start to initialize.");
|
||||
}
|
||||
}
|
||||
}
|
||||
#[cfg(not(feature = "mesh"))]
|
||||
{
|
||||
let _ = topic;
|
||||
let _ = (session, topic);
|
||||
display::print_error("requires --features mesh");
|
||||
}
|
||||
Ok(())
|
||||
@@ -1188,7 +1352,7 @@ pub(crate) fn cmd_mesh_route(session: &SessionState) -> anyhow::Result<()> {
|
||||
{
|
||||
let mesh_state_path = session.state_path.with_extension("mesh.json");
|
||||
if mesh_state_path.exists() {
|
||||
let id = quicproquo_p2p::identity::MeshIdentity::load(&mesh_state_path)?;
|
||||
let id = quicprochat_p2p::identity::MeshIdentity::load(&mesh_state_path)?;
|
||||
let peers = id.known_peers();
|
||||
if peers.is_empty() {
|
||||
display::print_status("no known mesh peers");
|
||||
@@ -1222,7 +1386,7 @@ pub(crate) fn cmd_mesh_identity(session: &SessionState) -> anyhow::Result<()> {
|
||||
{
|
||||
let mesh_state_path = session.state_path.with_extension("mesh.json");
|
||||
if mesh_state_path.exists() {
|
||||
let id = quicproquo_p2p::identity::MeshIdentity::load(&mesh_state_path)?;
|
||||
let id = quicprochat_p2p::identity::MeshIdentity::load(&mesh_state_path)?;
|
||||
display::print_status(&format!("mesh public key: {}", hex::encode(id.public_key())));
|
||||
display::print_status(&format!("known peers: {}", id.known_peers().len()));
|
||||
} else {
|
||||
@@ -1242,10 +1406,74 @@ pub(crate) fn cmd_mesh_identity(session: &SessionState) -> anyhow::Result<()> {
|
||||
pub(crate) fn cmd_mesh_store(session: &SessionState) -> anyhow::Result<()> {
|
||||
#[cfg(feature = "mesh")]
|
||||
{
|
||||
// Without a live P2pNode in the session, we can only report that the store
|
||||
// is not active. Once P2pNode is wired in, this will show real stats.
|
||||
display::print_status("mesh store: not active (P2P node not started in this session)");
|
||||
display::print_status("start mesh mode to enable store-and-forward");
|
||||
match &session.p2p_node {
|
||||
Some(node) => {
|
||||
let store = node.mesh_store();
|
||||
let guard = store.lock().map_err(|e| anyhow::anyhow!("store lock: {e}"))?;
|
||||
let (total_messages, unique_recipients) = guard.stats();
|
||||
display::print_status(&format!("mesh store: {} messages for {} recipients", total_messages, unique_recipients));
|
||||
}
|
||||
None => {
|
||||
display::print_status("mesh store: not active (P2P node not started)");
|
||||
display::print_status("use /mesh start to enable store-and-forward");
|
||||
}
|
||||
}
|
||||
}
|
||||
#[cfg(not(feature = "mesh"))]
|
||||
{
|
||||
let _ = session;
|
||||
display::print_error("requires --features mesh");
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Show route to a mesh address.
|
||||
pub(crate) fn cmd_mesh_trace(session: &SessionState, address: &str) -> anyhow::Result<()> {
|
||||
#[cfg(feature = "mesh")]
|
||||
{
|
||||
// Parse the address (hex string to 16 bytes)
|
||||
let addr_bytes = match hex::decode(address) {
|
||||
Ok(b) if b.len() == 16 => {
|
||||
let mut arr = [0u8; 16];
|
||||
arr.copy_from_slice(&b);
|
||||
arr
|
||||
}
|
||||
Ok(b) if b.len() == 32 => {
|
||||
// Full public key — compute truncated address
|
||||
quicprochat_p2p::announce::compute_address(&b)
|
||||
}
|
||||
_ => {
|
||||
display::print_error("invalid address: expected 16-byte hex (32 chars) or 32-byte key (64 chars)");
|
||||
return Ok(());
|
||||
}
|
||||
};
|
||||
|
||||
display::print_status(&format!("tracing route to {}", hex::encode(addr_bytes)));
|
||||
|
||||
// For now, show the route from the routing table if we had one
|
||||
// In a full implementation, this would query the MeshRouter
|
||||
display::print_status(" (routing table not yet wired to REPL session)");
|
||||
display::print_status(" this will show hop-by-hop path once MeshRouter is integrated");
|
||||
|
||||
let _ = session;
|
||||
}
|
||||
#[cfg(not(feature = "mesh"))]
|
||||
{
|
||||
let _ = (session, address);
|
||||
display::print_error("requires --features mesh");
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Show delivery statistics per destination.
|
||||
pub(crate) fn cmd_mesh_stats(session: &SessionState) -> anyhow::Result<()> {
|
||||
#[cfg(feature = "mesh")]
|
||||
{
|
||||
// For now, report that stats are not available without MeshRouter
|
||||
display::print_status("mesh delivery statistics:");
|
||||
display::print_status(" (MeshRouter not yet wired to REPL session)");
|
||||
display::print_status(" stats will show per-destination delivery counts once integrated");
|
||||
|
||||
let _ = session;
|
||||
}
|
||||
#[cfg(not(feature = "mesh"))]
|
||||
@@ -1449,10 +1677,8 @@ pub(crate) async fn cmd_dm(
|
||||
},
|
||||
display_name: format!("@{username}"),
|
||||
mls_group_blob: member
|
||||
.group_ref()
|
||||
.map(bincode::serialize)
|
||||
.transpose()
|
||||
.context("serialize group")?,
|
||||
.serialize_mls_state()
|
||||
.context("serialize MLS state")?,
|
||||
keystore_blob: None,
|
||||
member_keys,
|
||||
unread_count: 0,
|
||||
@@ -1493,10 +1719,8 @@ pub(crate) fn cmd_create_group(session: &mut SessionState, name: &str) -> anyhow
|
||||
kind: ConversationKind::Group { name: name.to_string() },
|
||||
display_name: format!("#{name}"),
|
||||
mls_group_blob: member
|
||||
.group_ref()
|
||||
.map(bincode::serialize)
|
||||
.transpose()
|
||||
.context("serialize group")?,
|
||||
.serialize_mls_state()
|
||||
.context("serialize MLS state")?,
|
||||
keystore_blob: None,
|
||||
member_keys,
|
||||
unread_count: 0,
|
||||
@@ -1780,9 +2004,7 @@ pub(crate) async fn cmd_join(
|
||||
kind: ConversationKind::Group { name: display.clone() },
|
||||
display_name: format!("#{display}"),
|
||||
mls_group_blob: new_member
|
||||
.group_ref()
|
||||
.map(bincode::serialize)
|
||||
.transpose()
|
||||
.serialize_mls_state()
|
||||
.context("serialize joined group")?,
|
||||
keystore_blob: None,
|
||||
member_keys,
|
||||
@@ -2005,8 +2227,8 @@ pub(crate) async fn cmd_typing(
|
||||
);
|
||||
|
||||
let app_payload = serialize_typing(1);
|
||||
let sealed = quicproquo_core::sealed_sender::seal(&identity, &app_payload);
|
||||
let padded = quicproquo_core::padding::pad(&sealed);
|
||||
let sealed = quicprochat_core::sealed_sender::seal(&identity, &app_payload);
|
||||
let padded = quicprochat_core::padding::pad(&sealed);
|
||||
|
||||
let ct = member
|
||||
.send_message(&padded)
|
||||
@@ -2082,8 +2304,8 @@ pub(crate) async fn cmd_react(
|
||||
let app_payload = serialize_reaction(ref_msg_id, emoji.as_bytes())
|
||||
.context("serialize reaction")?;
|
||||
|
||||
let sealed = quicproquo_core::sealed_sender::seal(&identity, &app_payload);
|
||||
let padded = quicproquo_core::padding::pad(&sealed);
|
||||
let sealed = quicprochat_core::sealed_sender::seal(&identity, &app_payload);
|
||||
let padded = quicprochat_core::padding::pad(&sealed);
|
||||
|
||||
let ct = member
|
||||
.send_message(&padded)
|
||||
@@ -2167,8 +2389,8 @@ pub(crate) async fn cmd_edit(
|
||||
|
||||
let app_payload = serialize_edit(&msg_id, new_text.as_bytes())
|
||||
.context("serialize edit message")?;
|
||||
let sealed = quicproquo_core::sealed_sender::seal(&identity, &app_payload);
|
||||
let padded = quicproquo_core::padding::pad(&sealed);
|
||||
let sealed = quicprochat_core::sealed_sender::seal(&identity, &app_payload);
|
||||
let padded = quicprochat_core::padding::pad(&sealed);
|
||||
|
||||
let ct = member
|
||||
.send_message(&padded)
|
||||
@@ -2238,8 +2460,8 @@ pub(crate) async fn cmd_delete(
|
||||
);
|
||||
|
||||
let app_payload = serialize_delete(&msg_id);
|
||||
let sealed = quicproquo_core::sealed_sender::seal(&identity, &app_payload);
|
||||
let padded = quicproquo_core::padding::pad(&sealed);
|
||||
let sealed = quicprochat_core::sealed_sender::seal(&identity, &app_payload);
|
||||
let padded = quicprochat_core::padding::pad(&sealed);
|
||||
|
||||
let ct = member
|
||||
.send_message(&padded)
|
||||
@@ -2394,8 +2616,8 @@ pub(crate) async fn cmd_send_file(
|
||||
"cannot send files in a local-only conversation"
|
||||
);
|
||||
|
||||
let sealed = quicproquo_core::sealed_sender::seal(&identity, &app_payload);
|
||||
let padded = quicproquo_core::padding::pad(&sealed);
|
||||
let sealed = quicprochat_core::sealed_sender::seal(&identity, &app_payload);
|
||||
let padded = quicprochat_core::padding::pad(&sealed);
|
||||
|
||||
let ct = member
|
||||
.send_message(&padded)
|
||||
@@ -2672,8 +2894,8 @@ pub(crate) async fn do_send(
|
||||
.context("serialize app message")?;
|
||||
|
||||
// Metadata protection: seal sender identity inside payload + pad to bucket size.
|
||||
let sealed = quicproquo_core::sealed_sender::seal(&identity, &app_payload);
|
||||
let padded = quicproquo_core::padding::pad(&sealed);
|
||||
let sealed = quicprochat_core::sealed_sender::seal(&identity, &app_payload);
|
||||
let padded = quicprochat_core::padding::pad(&sealed);
|
||||
|
||||
let ct = member
|
||||
.send_message(&padded)
|
||||
@@ -2762,8 +2984,8 @@ async fn send_dummy_message(
|
||||
}
|
||||
|
||||
let dummy_payload = serialize_dummy();
|
||||
let sealed = quicproquo_core::sealed_sender::seal(&identity, &dummy_payload);
|
||||
let padded = quicproquo_core::padding::pad(&sealed);
|
||||
let sealed = quicprochat_core::sealed_sender::seal(&identity, &dummy_payload);
|
||||
let padded = quicprochat_core::padding::pad(&sealed);
|
||||
|
||||
let ct = match member.send_message(&padded) {
|
||||
Ok(ct) => ct,
|
||||
@@ -2845,12 +3067,12 @@ async fn poll_messages(
|
||||
// Falls back gracefully for messages from older clients.
|
||||
let (sender_key, app_bytes) = {
|
||||
// Step 1: try unpad
|
||||
let after_unpad = quicproquo_core::padding::unpad(&plaintext)
|
||||
let after_unpad = quicprochat_core::padding::unpad(&plaintext)
|
||||
.unwrap_or_else(|_| plaintext.clone());
|
||||
|
||||
// Step 2: try unseal
|
||||
if quicproquo_core::sealed_sender::is_sealed(&after_unpad) {
|
||||
match quicproquo_core::sealed_sender::unseal(&after_unpad) {
|
||||
if quicprochat_core::sealed_sender::is_sealed(&after_unpad) {
|
||||
match quicprochat_core::sealed_sender::unseal(&after_unpad) {
|
||||
Ok((sk, inner)) => (sk.to_vec(), inner),
|
||||
Err(_) => (my_key.clone(), after_unpad),
|
||||
}
|
||||
@@ -3048,8 +3270,8 @@ async fn poll_messages(
|
||||
if let Some(mid) = msg_id {
|
||||
let receipt_bytes = serialize_read_receipt(mid);
|
||||
let identity = Arc::clone(&session.identity);
|
||||
let sealed = quicproquo_core::sealed_sender::seal(&identity, &receipt_bytes);
|
||||
let padded = quicproquo_core::padding::pad(&sealed);
|
||||
let sealed = quicprochat_core::sealed_sender::seal(&identity, &receipt_bytes);
|
||||
let padded = quicprochat_core::padding::pad(&sealed);
|
||||
if let Some(m) = session.members.get_mut(conv_id) {
|
||||
if let Ok(ct) = m.send_message(&padded) {
|
||||
let _ = enqueue(client, &sender_key, &ct).await;
|
||||
@@ -3186,8 +3408,9 @@ async fn try_auto_join(
|
||||
};
|
||||
|
||||
let mls_blob = member
|
||||
.group_ref()
|
||||
.and_then(|g| bincode::serialize(g).ok());
|
||||
.serialize_mls_state()
|
||||
.ok()
|
||||
.flatten();
|
||||
|
||||
let conv = Conversation {
|
||||
id: conv_id.clone(),
|
||||
@@ -10,8 +10,8 @@ use rustls::{ClientConfig as RustlsClientConfig, RootCertStore};
|
||||
use tokio_util::compat::{TokioAsyncReadCompatExt, TokioAsyncWriteCompatExt};
|
||||
use capnp_rpc::{rpc_twoparty_capnp::Side, twoparty, RpcSystem};
|
||||
|
||||
use quicproquo_core::HybridPublicKey;
|
||||
use quicproquo_proto::node_capnp::{auth, node_service};
|
||||
use quicprochat_core::HybridPublicKey;
|
||||
use quicprochat_proto::node_capnp::{auth, node_service};
|
||||
|
||||
use crate::{AUTH_CONTEXT, INSECURE_SKIP_VERIFY};
|
||||
|
||||
@@ -440,11 +440,11 @@ pub async fn fetch_hybrid_key(
|
||||
|
||||
/// Decrypt a hybrid envelope. Requires a hybrid key; no fallback to plaintext MLS.
|
||||
pub fn try_hybrid_decrypt(
|
||||
hybrid_kp: Option<&quicproquo_core::HybridKeypair>,
|
||||
hybrid_kp: Option<&quicprochat_core::HybridKeypair>,
|
||||
payload: &[u8],
|
||||
) -> anyhow::Result<Vec<u8>> {
|
||||
let kp = hybrid_kp.ok_or_else(|| anyhow::anyhow!("hybrid key required for decryption"))?;
|
||||
quicproquo_core::hybrid_decrypt(kp, payload, b"", b"").map_err(|e| anyhow::anyhow!("{e}"))
|
||||
quicprochat_core::hybrid_decrypt(kp, payload, b"", b"").map_err(|e| anyhow::anyhow!("{e}"))
|
||||
}
|
||||
|
||||
/// Peek at queued payloads without removing them.
|
||||
@@ -701,9 +701,9 @@ pub async fn resolve_user(
|
||||
.to_vec();
|
||||
|
||||
if !proof_bytes.is_empty() {
|
||||
let proof = quicproquo_kt::InclusionProof::from_bytes(&proof_bytes)
|
||||
let proof = quicprochat_kt::InclusionProof::from_bytes(&proof_bytes)
|
||||
.context("resolve_user: inclusion proof deserialise failed")?;
|
||||
quicproquo_kt::verify_inclusion(&proof, username, &key)
|
||||
quicprochat_kt::verify_inclusion(&proof, username, &key)
|
||||
.context("resolve_user: KT inclusion proof verification FAILED — possible key mislabelling")?;
|
||||
}
|
||||
|
||||
@@ -11,12 +11,12 @@ use std::time::Instant;
|
||||
use anyhow::Context;
|
||||
use zeroize::Zeroizing;
|
||||
|
||||
use quicproquo_core::{DiskKeyStore, GroupMember, HybridKeypair, IdentityKeypair};
|
||||
use quicprochat_core::{DiskKeyStore, GroupMember, HybridKeypair, IdentityKeypair};
|
||||
|
||||
use super::conversation::{
|
||||
now_ms, Conversation, ConversationId, ConversationKind, ConversationStore,
|
||||
};
|
||||
use super::state::{load_or_init_state, keystore_path};
|
||||
use super::state::load_or_init_state;
|
||||
|
||||
/// Runtime state for an interactive REPL session.
|
||||
pub struct SessionState {
|
||||
@@ -53,6 +53,9 @@ pub struct SessionState {
|
||||
pub padding_enabled: bool,
|
||||
/// Last epoch at which we sent a message (for /verify-fs).
|
||||
pub last_send_epoch: Option<u64>,
|
||||
/// P2P node for direct mesh messaging (requires `--features mesh`).
|
||||
#[cfg(feature = "mesh")]
|
||||
pub p2p_node: Option<Arc<quicprochat_p2p::P2pNode>>,
|
||||
}
|
||||
|
||||
impl SessionState {
|
||||
@@ -93,6 +96,8 @@ impl SessionState {
|
||||
auto_clear_secs: None,
|
||||
padding_enabled: false,
|
||||
last_send_epoch: None,
|
||||
#[cfg(feature = "mesh")]
|
||||
p2p_node: None,
|
||||
};
|
||||
|
||||
// Migrate legacy single-group into conversations if present and not yet migrated.
|
||||
@@ -109,7 +114,7 @@ impl SessionState {
|
||||
/// Migrate the legacy single-group from StoredState into the conversation DB.
|
||||
fn migrate_legacy_group(
|
||||
&mut self,
|
||||
state_path: &Path,
|
||||
_state_path: &Path,
|
||||
group_blob: &Option<Vec<u8>>,
|
||||
) -> anyhow::Result<()> {
|
||||
let blob = match group_blob {
|
||||
@@ -117,16 +122,22 @@ impl SessionState {
|
||||
None => return Ok(()),
|
||||
};
|
||||
|
||||
// Reconstruct GroupMember using the legacy keystore and group blob.
|
||||
let ks_path = keystore_path(state_path);
|
||||
let ks = DiskKeyStore::persistent(&ks_path)?;
|
||||
let group = bincode::deserialize(blob).context("decode legacy group")?;
|
||||
let member = GroupMember::new_with_state(
|
||||
// Legacy group blobs used openmls 0.5 serde format. After the 0.8
|
||||
// upgrade the blob format changed to storage-provider state. Attempt
|
||||
// to load from the new format; if that fails, skip the legacy group.
|
||||
let group_id_guess = &blob[..blob.len().min(16)];
|
||||
let member = match GroupMember::new_from_storage_bytes(
|
||||
Arc::clone(&self.identity),
|
||||
ks,
|
||||
Some(group),
|
||||
blob,
|
||||
group_id_guess,
|
||||
false, // legacy groups are classical
|
||||
);
|
||||
) {
|
||||
Ok(m) => m,
|
||||
Err(e) => {
|
||||
tracing::warn!(error = %e, "skipping incompatible legacy group blob (openmls version mismatch)");
|
||||
return Ok(());
|
||||
}
|
||||
};
|
||||
|
||||
let group_id_bytes = member.group_id().unwrap_or_default();
|
||||
|
||||
@@ -182,26 +193,31 @@ impl SessionState {
|
||||
|
||||
/// Create a GroupMember from a stored conversation.
|
||||
fn create_member_from_conv(&self, conv: &Conversation) -> anyhow::Result<GroupMember> {
|
||||
let ks_path = self.keystore_path_for(&conv.id);
|
||||
let ks = DiskKeyStore::persistent(&ks_path)
|
||||
.unwrap_or_else(|e| {
|
||||
tracing::warn!(path = %ks_path.display(), error = %e, "DiskKeyStore open failed, falling back to ephemeral");
|
||||
DiskKeyStore::ephemeral()
|
||||
});
|
||||
|
||||
let group = conv
|
||||
.mls_group_blob
|
||||
.as_ref()
|
||||
.map(|b| bincode::deserialize(b))
|
||||
.transpose()
|
||||
.context("decode MLS group from conversation db")?;
|
||||
|
||||
Ok(GroupMember::new_with_state(
|
||||
Arc::clone(&self.identity),
|
||||
ks,
|
||||
group,
|
||||
conv.is_hybrid,
|
||||
))
|
||||
if let Some(blob) = conv.mls_group_blob.as_ref() {
|
||||
let group_id = conv.id.0.as_slice();
|
||||
let member = GroupMember::new_from_storage_bytes(
|
||||
Arc::clone(&self.identity),
|
||||
blob,
|
||||
group_id,
|
||||
conv.is_hybrid,
|
||||
)
|
||||
.context("restore MLS state from conversation db")?;
|
||||
Ok(member)
|
||||
} else {
|
||||
// No MLS state — create an empty member.
|
||||
let ks_path = self.keystore_path_for(&conv.id);
|
||||
let ks = DiskKeyStore::persistent(&ks_path)
|
||||
.unwrap_or_else(|e| {
|
||||
tracing::warn!(path = %ks_path.display(), error = %e, "DiskKeyStore open failed, falling back to ephemeral");
|
||||
DiskKeyStore::ephemeral()
|
||||
});
|
||||
Ok(GroupMember::new_with_state(
|
||||
Arc::clone(&self.identity),
|
||||
ks,
|
||||
None,
|
||||
conv.is_hybrid,
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
/// Path for a per-conversation keystore file.
|
||||
@@ -214,10 +230,8 @@ impl SessionState {
|
||||
pub fn save_member(&self, conv_id: &ConversationId) -> anyhow::Result<()> {
|
||||
let member = self.members.get(conv_id).context("no such conversation")?;
|
||||
let blob = member
|
||||
.group_ref()
|
||||
.map(bincode::serialize)
|
||||
.transpose()
|
||||
.context("serialize MLS group")?;
|
||||
.serialize_mls_state()
|
||||
.context("serialize MLS state")?;
|
||||
|
||||
let member_keys = member.member_identities();
|
||||
|
||||
@@ -10,7 +10,7 @@ use chacha20poly1305::{
|
||||
use rand::RngCore;
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use quicproquo_core::{DiskKeyStore, GroupMember, HybridKeypair, HybridKeypairBytes, IdentityKeypair};
|
||||
use quicprochat_core::{DiskKeyStore, GroupMember, HybridKeypair, HybridKeypairBytes, IdentityKeypair};
|
||||
|
||||
/// Magic bytes for encrypted client state files.
|
||||
const STATE_MAGIC: &[u8; 4] = b"QPCE";
|
||||
@@ -27,18 +27,31 @@ pub struct StoredState {
|
||||
/// Cached member public keys for group participants.
|
||||
#[serde(default)]
|
||||
pub member_keys: Vec<Vec<u8>>,
|
||||
/// MLS group ID bytes, needed to reload the group from StorageProvider state.
|
||||
#[serde(default)]
|
||||
pub group_id: Option<Vec<u8>>,
|
||||
}
|
||||
|
||||
impl StoredState {
|
||||
pub fn into_parts(self, state_path: &Path) -> anyhow::Result<(GroupMember, Option<HybridKeypair>)> {
|
||||
let identity = Arc::new(IdentityKeypair::from_seed(self.identity_seed));
|
||||
let group = self
|
||||
.group
|
||||
.map(|bytes| bincode::deserialize(&bytes).context("decode group"))
|
||||
.transpose()?;
|
||||
let key_store = DiskKeyStore::persistent(keystore_path(state_path))?;
|
||||
let hybrid = self.hybrid_key.is_some();
|
||||
let member = GroupMember::new_with_state(identity, key_store, group, hybrid);
|
||||
|
||||
let member = match (self.group.as_ref(), self.group_id.as_ref()) {
|
||||
(Some(storage_bytes), Some(gid)) => {
|
||||
GroupMember::new_from_storage_bytes(
|
||||
identity,
|
||||
storage_bytes,
|
||||
gid,
|
||||
hybrid,
|
||||
)
|
||||
.context("restore MLS state from stored state")?
|
||||
}
|
||||
_ => {
|
||||
let key_store = DiskKeyStore::persistent(keystore_path(state_path))?;
|
||||
GroupMember::new_with_state(identity, key_store, None, hybrid)
|
||||
}
|
||||
};
|
||||
|
||||
let hybrid_kp = self
|
||||
.hybrid_key
|
||||
@@ -50,15 +63,15 @@ impl StoredState {
|
||||
|
||||
pub fn from_parts(member: &GroupMember, hybrid_kp: Option<&HybridKeypair>) -> anyhow::Result<Self> {
|
||||
let group = member
|
||||
.group_ref()
|
||||
.map(|g| bincode::serialize(g).context("serialize group"))
|
||||
.transpose()?;
|
||||
.serialize_mls_state()
|
||||
.context("serialize MLS state")?;
|
||||
|
||||
Ok(Self {
|
||||
identity_seed: *member.identity_seed(),
|
||||
group,
|
||||
hybrid_key: hybrid_kp.map(|kp| kp.to_bytes()),
|
||||
member_keys: Vec::new(),
|
||||
group_id: member.group_id(),
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -245,6 +258,7 @@ mod tests {
|
||||
hybrid_key: None,
|
||||
group: None,
|
||||
member_keys: Vec::new(),
|
||||
group_id: None,
|
||||
};
|
||||
let password = "test-password";
|
||||
let plaintext = bincode::serialize(&state).unwrap();
|
||||
@@ -268,6 +282,7 @@ mod tests {
|
||||
}),
|
||||
group: None,
|
||||
member_keys: Vec::new(),
|
||||
group_id: None,
|
||||
};
|
||||
let password = "another-password";
|
||||
let plaintext = bincode::serialize(&state).unwrap();
|
||||
@@ -285,6 +300,7 @@ mod tests {
|
||||
hybrid_key: None,
|
||||
group: None,
|
||||
member_keys: Vec::new(),
|
||||
group_id: None,
|
||||
};
|
||||
let plaintext = bincode::serialize(&state).unwrap();
|
||||
let encrypted = encrypt_state("correct", &plaintext).unwrap();
|
||||
@@ -1,4 +1,4 @@
|
||||
//! Full-screen Ratatui TUI for quicproquo.
|
||||
//! Full-screen Ratatui TUI for quicprochat.
|
||||
//!
|
||||
//! Layout:
|
||||
//! ┌──────────────┬──────────────────────────────────────────┐
|
||||
@@ -48,11 +48,11 @@ use super::session::SessionState;
|
||||
use super::state::load_or_init_state;
|
||||
use super::token_cache::{load_cached_session, save_cached_session};
|
||||
|
||||
use quicproquo_core::{
|
||||
use quicprochat_core::{
|
||||
AppMessage, DiskKeyStore, GroupMember, IdentityKeypair, ReceivedMessage,
|
||||
hybrid_encrypt, parse as parse_app_msg, serialize_chat,
|
||||
};
|
||||
use quicproquo_proto::node_capnp::node_service;
|
||||
use quicprochat_proto::node_capnp::node_service;
|
||||
|
||||
// ── App events ───────────────────────────────────────────────────────────────
|
||||
|
||||
@@ -393,11 +393,11 @@ async fn poll_task(
|
||||
match member.receive_message(&mls_payload) {
|
||||
Ok(ReceivedMessage::Application(plaintext)) => {
|
||||
let (sender_key, app_bytes) = {
|
||||
let after_unpad = quicproquo_core::padding::unpad(&plaintext)
|
||||
let after_unpad = quicprochat_core::padding::unpad(&plaintext)
|
||||
.unwrap_or_else(|_| plaintext.clone());
|
||||
|
||||
if quicproquo_core::sealed_sender::is_sealed(&after_unpad) {
|
||||
match quicproquo_core::sealed_sender::unseal(&after_unpad) {
|
||||
if quicprochat_core::sealed_sender::is_sealed(&after_unpad) {
|
||||
match quicprochat_core::sealed_sender::unseal(&after_unpad) {
|
||||
Ok((sk, inner)) => (sk.to_vec(), inner),
|
||||
Err(_) => (my_key.clone(), after_unpad),
|
||||
}
|
||||
@@ -493,8 +493,8 @@ async fn send_message(
|
||||
.context("serialize app message")?;
|
||||
|
||||
// Metadata protection: seal + pad.
|
||||
let sealed = quicproquo_core::sealed_sender::seal(&identity, &app_payload);
|
||||
let padded = quicproquo_core::padding::pad(&sealed);
|
||||
let sealed = quicprochat_core::sealed_sender::seal(&identity, &app_payload);
|
||||
let padded = quicprochat_core::padding::pad(&sealed);
|
||||
|
||||
let ct = member.send_message(&padded).context("MLS encrypt")?;
|
||||
|
||||
@@ -543,7 +543,7 @@ async fn send_message(
|
||||
|
||||
// ── TUI entry point ───────────────────────────────────────────────────────────
|
||||
|
||||
/// Entry point for `qpq tui`. Sets up the terminal, runs the event loop, and
|
||||
/// Entry point for `qpc tui`. Sets up the terminal, runs the event loop, and
|
||||
/// restores the terminal on exit.
|
||||
pub async fn run_tui(
|
||||
state_path: &Path,
|
||||
@@ -1,10 +1,10 @@
|
||||
//! v2 REPL — thin shell over `quicproquo_sdk::QpqClient`.
|
||||
//! v2 REPL — thin shell over `quicprochat_sdk::QpqClient`.
|
||||
//!
|
||||
//! Provides an interactive command-line interface with categorized `/help`,
|
||||
//! tab-completion, and a background event listener. Delegates all crypto,
|
||||
//! MLS, and RPC work to the SDK.
|
||||
//!
|
||||
//! Build: `cargo build -p quicproquo-client --features v2`
|
||||
//! Build: `cargo build -p quicprochat-client --features v2`
|
||||
|
||||
use std::path::PathBuf;
|
||||
use std::process::{Child, Command as ProcessCommand};
|
||||
@@ -12,10 +12,10 @@ use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
|
||||
use anyhow::Context;
|
||||
use quicproquo_core::{GroupMember, IdentityKeypair};
|
||||
use quicproquo_sdk::client::QpqClient;
|
||||
use quicproquo_sdk::conversation::{ConversationId, ConversationKind, StoredMessage};
|
||||
use quicproquo_sdk::events::ClientEvent;
|
||||
use quicprochat_core::{GroupMember, IdentityKeypair};
|
||||
use quicprochat_sdk::client::QpqClient;
|
||||
use quicprochat_sdk::conversation::{ConversationId, ConversationKind, StoredMessage};
|
||||
use quicprochat_sdk::events::ClientEvent;
|
||||
use rustyline::completion::{Completer, Pair};
|
||||
use rustyline::error::ReadlineError;
|
||||
use rustyline::highlight::Highlighter;
|
||||
@@ -216,14 +216,14 @@ impl Drop for ServerGuard {
|
||||
|
||||
fn find_server_binary() -> Option<PathBuf> {
|
||||
if let Ok(exe) = std::env::current_exe() {
|
||||
let sibling = exe.with_file_name("qpq-server");
|
||||
let sibling = exe.with_file_name("qpc-server");
|
||||
if sibling.exists() {
|
||||
return Some(sibling);
|
||||
}
|
||||
}
|
||||
std::env::var_os("PATH").and_then(|paths| {
|
||||
std::env::split_paths(&paths)
|
||||
.map(|dir| dir.join("qpq-server"))
|
||||
.map(|dir| dir.join("qpc-server"))
|
||||
.find(|p| p.exists())
|
||||
})
|
||||
}
|
||||
@@ -235,7 +235,7 @@ async fn auto_start_server(addr: &str) -> ServerGuard {
|
||||
let binary = match find_server_binary() {
|
||||
Some(b) => b,
|
||||
None => {
|
||||
display::print_status("server not reachable and qpq-server binary not found");
|
||||
display::print_status("server not reachable and qpc-server binary not found");
|
||||
return ServerGuard(None);
|
||||
}
|
||||
};
|
||||
@@ -294,6 +294,15 @@ fn show_event(event: &ClientEvent) {
|
||||
};
|
||||
display::print_incoming(&sender, body);
|
||||
}
|
||||
ClientEvent::Connected => {
|
||||
display::print_status("connected to server");
|
||||
}
|
||||
ClientEvent::Disconnected { reason } => {
|
||||
display::print_error(&format!("disconnected: {reason}"));
|
||||
}
|
||||
ClientEvent::Reconnecting { attempt } => {
|
||||
display::print_status(&format!("reconnecting... (attempt {attempt})"));
|
||||
}
|
||||
ClientEvent::ConversationCreated { display_name, .. } => {
|
||||
display::print_status(&format!("new conversation: {display_name}"));
|
||||
}
|
||||
@@ -311,7 +320,7 @@ fn show_event(event: &ClientEvent) {
|
||||
// ── Help ────────────────────────────────────────────────────────────────────
|
||||
|
||||
fn print_help() {
|
||||
println!("\n{BOLD}quicproquo v2 REPL{RESET}\n");
|
||||
println!("\n{BOLD}quicprochat v2 REPL{RESET}\n");
|
||||
for cat in Category::all() {
|
||||
println!("{BOLD}{}{RESET}", cat.label());
|
||||
for cmd in COMMANDS.iter().filter(|c| c.category == *cat) {
|
||||
@@ -397,7 +406,7 @@ async fn dispatch(
|
||||
|
||||
fn do_status(client: &QpqClient, st: &ReplState) {
|
||||
println!("{BOLD}Status{RESET}");
|
||||
println!(" connected: {}", if client.is_connected() { "yes" } else { "no" });
|
||||
println!(" connection: {}", client.connection_state());
|
||||
println!(" authenticated: {}", if client.is_authenticated() { "yes" } else { "no" });
|
||||
println!(" username: {}", client.username().unwrap_or("(none)"));
|
||||
println!(" conversation: {}", st.current_display_name.as_deref().unwrap_or("(none)"));
|
||||
@@ -462,14 +471,14 @@ async fn do_login(client: &mut QpqClient, st: &mut ReplState, args: &str) -> any
|
||||
// Try to load identity keypair from state file.
|
||||
let state_path = &client.config_state_path();
|
||||
if state_path.exists() {
|
||||
match quicproquo_sdk::state::load_state(state_path, Some(pass)) {
|
||||
match quicprochat_sdk::state::load_state(state_path, Some(pass)) {
|
||||
Ok(stored) => {
|
||||
let kp = IdentityKeypair::from_seed(stored.identity_seed);
|
||||
st.identity = Some(Arc::new(kp));
|
||||
}
|
||||
Err(_) => {
|
||||
// Try without password (unencrypted state).
|
||||
if let Ok(stored) = quicproquo_sdk::state::load_state(state_path, None) {
|
||||
if let Ok(stored) = quicprochat_sdk::state::load_state(state_path, None) {
|
||||
let kp = IdentityKeypair::from_seed(stored.identity_seed);
|
||||
st.identity = Some(Arc::new(kp));
|
||||
}
|
||||
@@ -493,7 +502,7 @@ async fn do_resolve(client: &QpqClient, args: &str) -> anyhow::Result<()> {
|
||||
return Ok(());
|
||||
}
|
||||
let rpc = client.rpc().map_err(|e| anyhow::anyhow!("{e}"))?;
|
||||
match quicproquo_sdk::users::resolve_user(rpc, name).await? {
|
||||
match quicprochat_sdk::users::resolve_user(rpc, name).await? {
|
||||
Some(key) => println!(" {name} -> {}", hex::encode(&key)),
|
||||
None => display::print_error(&format!("user '{name}' not found")),
|
||||
}
|
||||
@@ -510,7 +519,7 @@ async fn do_safety(client: &QpqClient, st: &ReplState, args: &str) -> anyhow::Re
|
||||
let my_key = identity.public_key_bytes();
|
||||
|
||||
let rpc = client.rpc().map_err(|e| anyhow::anyhow!("{e}"))?;
|
||||
let peer_key = quicproquo_sdk::users::resolve_user(rpc, name)
|
||||
let peer_key = quicprochat_sdk::users::resolve_user(rpc, name)
|
||||
.await?
|
||||
.ok_or_else(|| anyhow::anyhow!("user '{name}' not found"))?;
|
||||
if peer_key.len() != 32 {
|
||||
@@ -519,7 +528,7 @@ async fn do_safety(client: &QpqClient, st: &ReplState, args: &str) -> anyhow::Re
|
||||
let mut peer_arr = [0u8; 32];
|
||||
peer_arr.copy_from_slice(&peer_key);
|
||||
|
||||
let sn = quicproquo_core::compute_safety_number(&my_key, &peer_arr);
|
||||
let sn = quicprochat_core::compute_safety_number(&my_key, &peer_arr);
|
||||
println!("\n{BOLD}Safety number with {name}:{RESET}");
|
||||
println!(" {sn}\n");
|
||||
println!("{DIM}Compare with {name} over a trusted channel.{RESET}");
|
||||
@@ -536,7 +545,7 @@ async fn do_refresh_key(client: &QpqClient, st: &ReplState) -> anyhow::Result<()
|
||||
.map_err(|e| anyhow::anyhow!("generate key package: {e}"))?;
|
||||
|
||||
let pub_key = identity.public_key_bytes();
|
||||
let fp = quicproquo_sdk::keys::upload_key_package(rpc, &pub_key, &kp_bytes).await?;
|
||||
let fp = quicprochat_sdk::keys::upload_key_package(rpc, &pub_key, &kp_bytes).await?;
|
||||
display::print_status(&format!(
|
||||
"KeyPackage uploaded (fp: {})",
|
||||
hex::encode(&fp[..8.min(fp.len())])
|
||||
@@ -554,7 +563,7 @@ async fn do_dm(client: &mut QpqClient, st: &mut ReplState, args: &str) -> anyhow
|
||||
let rpc = client.rpc().map_err(|e| anyhow::anyhow!("{e}"))?;
|
||||
let conv_store = client.conversations().map_err(|e| anyhow::anyhow!("{e}"))?;
|
||||
|
||||
let peer_key = quicproquo_sdk::users::resolve_user(rpc, username)
|
||||
let peer_key = quicprochat_sdk::users::resolve_user(rpc, username)
|
||||
.await?
|
||||
.ok_or_else(|| anyhow::anyhow!("user '{username}' not found"))?;
|
||||
|
||||
@@ -565,13 +574,13 @@ async fn do_dm(client: &mut QpqClient, st: &mut ReplState, args: &str) -> anyhow
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
let peer_kp = quicproquo_sdk::keys::fetch_key_package(rpc, &peer_key)
|
||||
let peer_kp = quicprochat_sdk::keys::fetch_key_package(rpc, &peer_key)
|
||||
.await?
|
||||
.ok_or_else(|| anyhow::anyhow!("peer has no available KeyPackage"))?;
|
||||
|
||||
let mut member = GroupMember::new(Arc::clone(&identity));
|
||||
|
||||
let (conv_id, was_new) = quicproquo_sdk::groups::create_dm(
|
||||
let (conv_id, was_new) = quicprochat_sdk::groups::create_dm(
|
||||
rpc, conv_store, &mut member, &identity,
|
||||
&peer_key, &peer_kp, None, None,
|
||||
).await?;
|
||||
@@ -599,7 +608,7 @@ async fn do_send(client: &QpqClient, st: &ReplState, msg: &str) -> anyhow::Resul
|
||||
.load_conversation(conv_id)?
|
||||
.ok_or_else(|| anyhow::anyhow!("conversation not found"))?;
|
||||
|
||||
let mut member = quicproquo_sdk::groups::restore_mls_state(&conv, &identity)?;
|
||||
let mut member = quicprochat_sdk::groups::restore_mls_state(&conv, &identity)?;
|
||||
|
||||
let my_pub = identity.public_key_bytes();
|
||||
let recipients: Vec<Vec<u8>> = conv
|
||||
@@ -614,13 +623,13 @@ async fn do_send(client: &QpqClient, st: &ReplState, msg: &str) -> anyhow::Resul
|
||||
}
|
||||
|
||||
let hybrid_keys = vec![None; recipients.len()];
|
||||
quicproquo_sdk::messaging::send_message(
|
||||
quicprochat_sdk::messaging::send_message(
|
||||
rpc, &mut member, &identity, msg, &recipients, &hybrid_keys, conv_id.0.as_slice(),
|
||||
).await?;
|
||||
|
||||
quicproquo_sdk::groups::save_mls_state(conv_store, conv_id, &member)?;
|
||||
quicprochat_sdk::groups::save_mls_state(conv_store, conv_id, &member)?;
|
||||
|
||||
let now = quicproquo_sdk::conversation::now_ms();
|
||||
let now = quicprochat_sdk::conversation::now_ms();
|
||||
conv_store.save_message(&StoredMessage {
|
||||
conversation_id: conv_id.clone(),
|
||||
message_id: None,
|
||||
@@ -647,10 +656,10 @@ async fn do_recv(client: &QpqClient, st: &ReplState) -> anyhow::Result<()> {
|
||||
.load_conversation(conv_id)?
|
||||
.ok_or_else(|| anyhow::anyhow!("conversation not found"))?;
|
||||
|
||||
let mut member = quicproquo_sdk::groups::restore_mls_state(&conv, &identity)?;
|
||||
let mut member = quicprochat_sdk::groups::restore_mls_state(&conv, &identity)?;
|
||||
let my_pub = identity.public_key_bytes();
|
||||
|
||||
let messages = quicproquo_sdk::messaging::receive_messages(
|
||||
let messages = quicprochat_sdk::messaging::receive_messages(
|
||||
rpc, &mut member, &my_pub, None, conv_id.0.as_slice(), &[],
|
||||
).await?;
|
||||
|
||||
@@ -659,10 +668,10 @@ async fn do_recv(client: &QpqClient, st: &ReplState) -> anyhow::Result<()> {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
quicproquo_sdk::groups::save_mls_state(conv_store, conv_id, &member)?;
|
||||
quicprochat_sdk::groups::save_mls_state(conv_store, conv_id, &member)?;
|
||||
|
||||
for m in &messages {
|
||||
let sender_name = quicproquo_sdk::users::resolve_identity(rpc, &m.sender_key)
|
||||
let sender_name = quicprochat_sdk::users::resolve_identity(rpc, &m.sender_key)
|
||||
.await
|
||||
.ok()
|
||||
.flatten();
|
||||
@@ -670,13 +679,13 @@ async fn do_recv(client: &QpqClient, st: &ReplState) -> anyhow::Result<()> {
|
||||
let sender = sender_name.as_deref().unwrap_or(&sender_hex);
|
||||
|
||||
let body = match &m.message {
|
||||
quicproquo_core::AppMessage::Chat { body, .. } => {
|
||||
quicprochat_core::AppMessage::Chat { body, .. } => {
|
||||
String::from_utf8_lossy(body).to_string()
|
||||
}
|
||||
other => format!("{other:?}"),
|
||||
};
|
||||
|
||||
let now = quicproquo_sdk::conversation::now_ms();
|
||||
let now = quicprochat_sdk::conversation::now_ms();
|
||||
println!("{DIM}[{}]{RESET} {CYAN}{BOLD}{sender}{RESET}: {body}", ts(now));
|
||||
|
||||
conv_store.save_message(&StoredMessage {
|
||||
@@ -772,7 +781,7 @@ async fn do_group(client: &mut QpqClient, st: &mut ReplState, args: &str) -> any
|
||||
let identity = st.require_identity()?;
|
||||
let conv_store = client.conversations().map_err(|e| anyhow::anyhow!("{e}"))?;
|
||||
let mut member = GroupMember::new(Arc::clone(&identity));
|
||||
let conv_id = quicproquo_sdk::groups::create_group(conv_store, &mut member, name)?;
|
||||
let conv_id = quicprochat_sdk::groups::create_group(conv_store, &mut member, name)?;
|
||||
st.set_conversation(conv_id, format!("#{name}"));
|
||||
display::print_status(&format!("group #{name} created"));
|
||||
}
|
||||
@@ -788,10 +797,10 @@ async fn do_group(client: &mut QpqClient, st: &mut ReplState, args: &str) -> any
|
||||
let rpc = client.rpc().map_err(|e| anyhow::anyhow!("{e}"))?;
|
||||
let conv_store = client.conversations().map_err(|e| anyhow::anyhow!("{e}"))?;
|
||||
|
||||
let peer_key = quicproquo_sdk::users::resolve_user(rpc, user)
|
||||
let peer_key = quicprochat_sdk::users::resolve_user(rpc, user)
|
||||
.await?
|
||||
.ok_or_else(|| anyhow::anyhow!("user '{user}' not found"))?;
|
||||
let peer_kp = quicproquo_sdk::keys::fetch_key_package(rpc, &peer_key)
|
||||
let peer_kp = quicprochat_sdk::keys::fetch_key_package(rpc, &peer_key)
|
||||
.await?
|
||||
.ok_or_else(|| anyhow::anyhow!("peer has no KeyPackage"))?;
|
||||
|
||||
@@ -799,9 +808,9 @@ async fn do_group(client: &mut QpqClient, st: &mut ReplState, args: &str) -> any
|
||||
let conv = conv_store
|
||||
.load_conversation(&conv_id)?
|
||||
.ok_or_else(|| anyhow::anyhow!("group '{group}' not found"))?;
|
||||
let mut member = quicproquo_sdk::groups::restore_mls_state(&conv, &identity)?;
|
||||
let mut member = quicprochat_sdk::groups::restore_mls_state(&conv, &identity)?;
|
||||
|
||||
quicproquo_sdk::groups::invite_to_group(
|
||||
quicprochat_sdk::groups::invite_to_group(
|
||||
rpc, conv_store, &mut member, &identity,
|
||||
&conv_id, &peer_key, &peer_kp, None, None,
|
||||
).await?;
|
||||
@@ -816,8 +825,8 @@ async fn do_group(client: &mut QpqClient, st: &mut ReplState, args: &str) -> any
|
||||
let conv = conv_store
|
||||
.load_conversation(&conv_id)?
|
||||
.ok_or_else(|| anyhow::anyhow!("conversation not found"))?;
|
||||
let mut member = quicproquo_sdk::groups::restore_mls_state(&conv, &identity)?;
|
||||
quicproquo_sdk::groups::leave_group(rpc, conv_store, &mut member, &conv_id).await?;
|
||||
let mut member = quicprochat_sdk::groups::restore_mls_state(&conv, &identity)?;
|
||||
quicprochat_sdk::groups::leave_group(rpc, conv_store, &mut member, &conv_id).await?;
|
||||
display::print_status("left group");
|
||||
}
|
||||
|
||||
@@ -834,7 +843,7 @@ async fn do_group(client: &mut QpqClient, st: &mut ReplState, args: &str) -> any
|
||||
for key in &conv.member_keys {
|
||||
let short = hex::encode(&key[..4.min(key.len())]);
|
||||
if let Ok(rpc) = client.rpc() {
|
||||
if let Ok(Some(n)) = quicproquo_sdk::users::resolve_identity(rpc, key).await {
|
||||
if let Ok(Some(n)) = quicprochat_sdk::users::resolve_identity(rpc, key).await {
|
||||
println!(" @{n} {DIM}({short}){RESET}");
|
||||
continue;
|
||||
}
|
||||
@@ -855,14 +864,14 @@ async fn do_group(client: &mut QpqClient, st: &mut ReplState, args: &str) -> any
|
||||
let rpc = client.rpc().map_err(|e| anyhow::anyhow!("{e}"))?;
|
||||
let conv_store = client.conversations().map_err(|e| anyhow::anyhow!("{e}"))?;
|
||||
|
||||
let peer_key = quicproquo_sdk::users::resolve_user(rpc, user)
|
||||
let peer_key = quicprochat_sdk::users::resolve_user(rpc, user)
|
||||
.await?
|
||||
.ok_or_else(|| anyhow::anyhow!("user '{user}' not found"))?;
|
||||
let conv = conv_store
|
||||
.load_conversation(&conv_id)?
|
||||
.ok_or_else(|| anyhow::anyhow!("conversation not found"))?;
|
||||
let mut member = quicproquo_sdk::groups::restore_mls_state(&conv, &identity)?;
|
||||
quicproquo_sdk::groups::remove_member_from_group(
|
||||
let mut member = quicprochat_sdk::groups::restore_mls_state(&conv, &identity)?;
|
||||
quicprochat_sdk::groups::remove_member_from_group(
|
||||
rpc, conv_store, &mut member, &conv_id, &peer_key,
|
||||
).await?;
|
||||
display::print_status(&format!("removed @{user} from group"));
|
||||
@@ -877,7 +886,7 @@ async fn do_group(client: &mut QpqClient, st: &mut ReplState, args: &str) -> any
|
||||
let conv_id = st.require_conversation()?.clone();
|
||||
let rpc = client.rpc().map_err(|e| anyhow::anyhow!("{e}"))?;
|
||||
let conv_store = client.conversations().map_err(|e| anyhow::anyhow!("{e}"))?;
|
||||
quicproquo_sdk::groups::set_group_metadata(
|
||||
quicprochat_sdk::groups::set_group_metadata(
|
||||
rpc, conv_store, &conv_id, new_name, "", &[],
|
||||
).await?;
|
||||
st.set_conversation(conv_id, format!("#{new_name}"));
|
||||
@@ -892,8 +901,8 @@ async fn do_group(client: &mut QpqClient, st: &mut ReplState, args: &str) -> any
|
||||
let conv = conv_store
|
||||
.load_conversation(&conv_id)?
|
||||
.ok_or_else(|| anyhow::anyhow!("conversation not found"))?;
|
||||
let mut member = quicproquo_sdk::groups::restore_mls_state(&conv, &identity)?;
|
||||
quicproquo_sdk::groups::rotate_group_keys(rpc, conv_store, &mut member, &conv_id).await?;
|
||||
let mut member = quicprochat_sdk::groups::restore_mls_state(&conv, &identity)?;
|
||||
quicprochat_sdk::groups::rotate_group_keys(rpc, conv_store, &mut member, &conv_id).await?;
|
||||
display::print_status("group keys rotated");
|
||||
}
|
||||
|
||||
@@ -911,7 +920,7 @@ async fn do_devices(client: &mut QpqClient, args: &str) -> anyhow::Result<()> {
|
||||
match sub {
|
||||
"list" => {
|
||||
let rpc = client.rpc().map_err(|e| anyhow::anyhow!("{e}"))?;
|
||||
let devices = quicproquo_sdk::devices::list_devices(rpc).await?;
|
||||
let devices = quicprochat_sdk::devices::list_devices(rpc).await?;
|
||||
if devices.is_empty() {
|
||||
display::print_status("no devices registered");
|
||||
} else {
|
||||
@@ -941,7 +950,7 @@ async fn do_devices(client: &mut QpqClient, args: &str) -> anyhow::Result<()> {
|
||||
let mut dev_id = vec![0u8; 16];
|
||||
rand::rngs::OsRng.fill_bytes(&mut dev_id);
|
||||
let was_new =
|
||||
quicproquo_sdk::devices::register_device(rpc, &dev_id, name).await?;
|
||||
quicprochat_sdk::devices::register_device(rpc, &dev_id, name).await?;
|
||||
if was_new {
|
||||
display::print_status(&format!(
|
||||
"device registered: {name} (id: {})",
|
||||
@@ -961,7 +970,7 @@ async fn do_devices(client: &mut QpqClient, args: &str) -> anyhow::Result<()> {
|
||||
let id_bytes = hex::decode(id_hex)
|
||||
.map_err(|e| anyhow::anyhow!("invalid device_id hex: {e}"))?;
|
||||
let rpc = client.rpc().map_err(|e| anyhow::anyhow!("{e}"))?;
|
||||
let revoked = quicproquo_sdk::devices::revoke_device(rpc, &id_bytes).await?;
|
||||
let revoked = quicprochat_sdk::devices::revoke_device(rpc, &id_bytes).await?;
|
||||
if revoked {
|
||||
display::print_status(&format!("device revoked: {id_hex}"));
|
||||
} else {
|
||||
@@ -990,6 +999,9 @@ pub async fn run_v2_repl(
|
||||
// Connect to server.
|
||||
client.connect().await.context("connect to server")?;
|
||||
|
||||
// Start heartbeat for proactive dead-connection detection.
|
||||
client.start_heartbeat();
|
||||
|
||||
// Background event listener.
|
||||
let rx = client.subscribe();
|
||||
spawn_event_listener(rx);
|
||||
@@ -1004,8 +1016,8 @@ pub async fn run_v2_repl(
|
||||
// Load identity from state.
|
||||
let state_path = client.config_state_path();
|
||||
if state_path.exists() {
|
||||
if let Ok(stored) = quicproquo_sdk::state::load_state(&state_path, Some(pass))
|
||||
.or_else(|_| quicproquo_sdk::state::load_state(&state_path, None))
|
||||
if let Ok(stored) = quicprochat_sdk::state::load_state(&state_path, Some(pass))
|
||||
.or_else(|_| quicprochat_sdk::state::load_state(&state_path, None))
|
||||
{
|
||||
let kp = IdentityKeypair::from_seed(stored.identity_seed);
|
||||
st.identity = Some(Arc::new(kp));
|
||||
@@ -1016,7 +1028,7 @@ pub async fn run_v2_repl(
|
||||
}
|
||||
}
|
||||
|
||||
println!("\n{BOLD}quicproquo v2 REPL{RESET}");
|
||||
println!("\n{BOLD}quicprochat v2 REPL{RESET}");
|
||||
println!("{DIM}Type /help for commands, /quit to exit.{RESET}\n");
|
||||
if let Some(u) = client.username() {
|
||||
display::print_status(&format!("authenticated as {u}"));
|
||||
@@ -1,4 +1,4 @@
|
||||
//! Full-screen Ratatui TUI for quicproquo v2, driven by the SDK event system.
|
||||
//! Full-screen Ratatui TUI for quicprochat v2, driven by the SDK event system.
|
||||
//!
|
||||
//! Layout:
|
||||
//! +-- Conversations -+-- Messages ------------------------------+
|
||||
@@ -20,6 +20,9 @@
|
||||
//! Ctrl+C / Ctrl+Q -- quit
|
||||
//!
|
||||
//! Feature gate: requires both `v2` and `tui` features.
|
||||
//!
|
||||
//! **Note:** Message display is currently local-only. Use the REPL client for
|
||||
//! end-to-end encrypted delivery. See `quicprochat-sdk::messaging` for the full pipeline.
|
||||
|
||||
use std::time::Duration;
|
||||
|
||||
@@ -38,9 +41,9 @@ use ratatui::{
|
||||
};
|
||||
use tokio::sync::broadcast;
|
||||
|
||||
use quicproquo_sdk::client::QpqClient;
|
||||
use quicproquo_sdk::conversation::ConversationStore;
|
||||
use quicproquo_sdk::events::ClientEvent;
|
||||
use quicprochat_sdk::client::{ConnectionState, QpqClient};
|
||||
use quicprochat_sdk::conversation::ConversationStore;
|
||||
use quicprochat_sdk::events::ClientEvent;
|
||||
|
||||
// ── Data Types ──────────────────────────────────────────────────────────────
|
||||
|
||||
@@ -84,8 +87,8 @@ pub struct TuiApp {
|
||||
focus: Focus,
|
||||
/// Notification line (shown briefly, e.g. "Message sent", "Error: ...").
|
||||
notification: Option<String>,
|
||||
/// Whether the client is currently connected.
|
||||
connected: bool,
|
||||
/// Current connection state.
|
||||
conn_state: quicprochat_sdk::client::ConnectionState,
|
||||
/// Current MLS epoch for the active conversation (if available).
|
||||
mls_epoch: Option<u64>,
|
||||
}
|
||||
@@ -105,7 +108,7 @@ impl TuiApp {
|
||||
server_addr: server_addr.to_string(),
|
||||
focus: Focus::Input,
|
||||
notification: None,
|
||||
connected: false,
|
||||
conn_state: ConnectionState::Disconnected,
|
||||
mls_epoch: None,
|
||||
}
|
||||
}
|
||||
@@ -146,7 +149,15 @@ impl TuiApp {
|
||||
}
|
||||
|
||||
fn update_status(&mut self) {
|
||||
let conn_indicator = if self.connected { "Online" } else { "Offline" };
|
||||
let conn_indicator = match self.conn_state {
|
||||
ConnectionState::Connected => "Connected",
|
||||
ConnectionState::Reconnecting { attempt } => {
|
||||
// We can't use format! in a match arm and return &str,
|
||||
// so we'll handle this below.
|
||||
return self.update_status_reconnecting(attempt);
|
||||
}
|
||||
ConnectionState::Disconnected => "Offline",
|
||||
};
|
||||
let user = self
|
||||
.username
|
||||
.as_deref()
|
||||
@@ -164,6 +175,25 @@ impl TuiApp {
|
||||
if conv_count == 1 { "" } else { "s" }
|
||||
);
|
||||
}
|
||||
|
||||
fn update_status_reconnecting(&mut self, attempt: u32) {
|
||||
let user = self
|
||||
.username
|
||||
.as_deref()
|
||||
.unwrap_or("not logged in");
|
||||
let conv_count = self.conversations.len();
|
||||
let epoch_str = match self.mls_epoch {
|
||||
Some(e) => format!("epoch {e}"),
|
||||
None => "epoch --".to_string(),
|
||||
};
|
||||
self.status_line = format!(
|
||||
"Reconnecting... (attempt {attempt}) | {} | {} | {} conversation{} | MLS {epoch_str}",
|
||||
self.server_addr,
|
||||
user,
|
||||
conv_count,
|
||||
if conv_count == 1 { "" } else { "s" }
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
// ── Terminal Drop Guard ─────────────────────────────────────────────────────
|
||||
@@ -195,7 +225,7 @@ pub async fn run_v2_tui(client: &mut QpqClient) -> anyhow::Result<()> {
|
||||
"disconnected"
|
||||
};
|
||||
let mut app = TuiApp::new(server_addr);
|
||||
app.connected = client.is_connected();
|
||||
app.conn_state = client.connection_state();
|
||||
|
||||
// Populate initial state from client.
|
||||
if let Some(name) = client.username() {
|
||||
@@ -222,6 +252,9 @@ pub async fn run_v2_tui(client: &mut QpqClient) -> anyhow::Result<()> {
|
||||
|
||||
app.update_status();
|
||||
|
||||
// Start heartbeat for proactive dead-connection detection.
|
||||
client.start_heartbeat();
|
||||
|
||||
// Subscribe to SDK events.
|
||||
let mut event_rx = client.subscribe();
|
||||
|
||||
@@ -275,15 +308,20 @@ pub async fn run_v2_tui(client: &mut QpqClient) -> anyhow::Result<()> {
|
||||
fn handle_sdk_event(app: &mut TuiApp, event: ClientEvent) {
|
||||
match event {
|
||||
ClientEvent::Connected => {
|
||||
app.connected = true;
|
||||
app.conn_state = ConnectionState::Connected;
|
||||
app.notification = Some("Connected to server".to_string());
|
||||
app.update_status();
|
||||
}
|
||||
ClientEvent::Disconnected { reason } => {
|
||||
app.connected = false;
|
||||
app.conn_state = ConnectionState::Disconnected;
|
||||
app.notification = Some(format!("Disconnected: {reason}"));
|
||||
app.update_status();
|
||||
}
|
||||
ClientEvent::Reconnecting { attempt } => {
|
||||
app.conn_state = ConnectionState::Reconnecting { attempt };
|
||||
app.notification = Some(format!("Reconnecting... (attempt {attempt})"));
|
||||
app.update_status();
|
||||
}
|
||||
ClientEvent::Registered { username } => {
|
||||
app.notification = Some(format!("Registered as {username}"));
|
||||
}
|
||||
@@ -535,9 +573,11 @@ async fn handle_input(app: &mut TuiApp, client: &mut QpqClient, text: &str) {
|
||||
// Snap to bottom.
|
||||
app.scroll_offset = 0;
|
||||
|
||||
// TODO: actually send via SDK when the send pipeline is wired up.
|
||||
// For now, emit a notification.
|
||||
app.notification = Some(format!("Sent: {text}"));
|
||||
// NOTE: TUI message display is local-only. The full MLS encryption
|
||||
// pipeline (sealed sender + hybrid wrap + enqueue) is implemented in
|
||||
// quicprochat-sdk/src/messaging.rs but is not yet wired into the TUI.
|
||||
// Use the REPL client (`qpc repl`) for end-to-end message delivery.
|
||||
app.notification = Some("Message queued locally (TUI send not yet wired to SDK)".to_string());
|
||||
}
|
||||
}
|
||||
|
||||
@@ -824,7 +864,7 @@ fn draw_input(frame: &mut Frame, app: &TuiApp, area: Rect) {
|
||||
frame.render_widget(input_text, area);
|
||||
|
||||
// Position cursor in the input area.
|
||||
if !app.input.is_empty() || true {
|
||||
if !app.input.is_empty() {
|
||||
let cursor_x = area.x + 1 + app.input_cursor as u16;
|
||||
let cursor_y = area.y + 1;
|
||||
if cursor_x < area.x + area.width - 1 {
|
||||
@@ -834,12 +874,11 @@ fn draw_input(frame: &mut Frame, app: &TuiApp, area: Rect) {
|
||||
}
|
||||
|
||||
fn draw_status(frame: &mut Frame, app: &TuiApp, area: Rect) {
|
||||
let conn_color = if app.connected {
|
||||
Color::Green
|
||||
} else {
|
||||
Color::Red
|
||||
let (conn_color, conn_indicator) = match app.conn_state {
|
||||
ConnectionState::Connected => (Color::Green, " ON "),
|
||||
ConnectionState::Reconnecting { .. } => (Color::Yellow, " ... "),
|
||||
ConnectionState::Disconnected => (Color::Red, " OFF "),
|
||||
};
|
||||
let conn_indicator = if app.connected { " ON " } else { " OFF " };
|
||||
|
||||
let spans = vec![
|
||||
Span::styled(
|
||||
@@ -859,7 +898,7 @@ fn draw_status(frame: &mut Frame, app: &TuiApp, area: Rect) {
|
||||
fn draw_help(frame: &mut Frame, area: Rect) {
|
||||
let help_text = vec![
|
||||
Line::from(Span::styled(
|
||||
" quicproquo TUI -- Help",
|
||||
" quicprochat TUI -- Help",
|
||||
Style::default()
|
||||
.fg(Color::Cyan)
|
||||
.add_modifier(Modifier::BOLD),
|
||||
@@ -954,7 +993,7 @@ fn load_messages_for_selected(app: &mut TuiApp, client: &QpqClient) {
|
||||
};
|
||||
|
||||
let sdk_conv_id =
|
||||
quicproquo_sdk::conversation::ConversationId::from_slice(&conv_id);
|
||||
quicprochat_sdk::conversation::ConversationId::from_slice(&conv_id);
|
||||
let sdk_conv_id = match sdk_conv_id {
|
||||
Some(id) => id,
|
||||
None => return,
|
||||
@@ -1014,7 +1053,7 @@ mod tests {
|
||||
|
||||
fn make_app() -> TuiApp {
|
||||
let mut app = TuiApp::new("127.0.0.1:7000");
|
||||
app.connected = true;
|
||||
app.conn_state = ConnectionState::Connected;
|
||||
app.username = Some("alice".to_string());
|
||||
app.conversations.push(ConversationItem {
|
||||
id: [1u8; 16],
|
||||
@@ -1062,12 +1101,12 @@ mod tests {
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn status_bar_shows_online() {
|
||||
fn status_bar_shows_connected() {
|
||||
let mut app = TuiApp::new("127.0.0.1:7000");
|
||||
app.connected = true;
|
||||
app.conn_state = ConnectionState::Connected;
|
||||
app.username = Some("alice".to_string());
|
||||
app.update_status();
|
||||
assert!(app.status_line.contains("Online"));
|
||||
assert!(app.status_line.contains("Connected"));
|
||||
assert!(app.status_line.contains("alice"));
|
||||
assert!(app.status_line.contains("MLS epoch --"));
|
||||
}
|
||||
@@ -1075,15 +1114,32 @@ mod tests {
|
||||
#[test]
|
||||
fn status_bar_shows_offline() {
|
||||
let mut app = TuiApp::new("127.0.0.1:7000");
|
||||
app.connected = false;
|
||||
app.conn_state = ConnectionState::Disconnected;
|
||||
app.update_status();
|
||||
assert!(app.status_line.contains("Offline"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn status_bar_shows_reconnecting() {
|
||||
let mut app = TuiApp::new("127.0.0.1:7000");
|
||||
app.conn_state = ConnectionState::Reconnecting { attempt: 2 };
|
||||
app.update_status();
|
||||
assert!(
|
||||
app.status_line.contains("Reconnecting"),
|
||||
"expected Reconnecting in: {}",
|
||||
app.status_line
|
||||
);
|
||||
assert!(
|
||||
app.status_line.contains("attempt 2"),
|
||||
"expected attempt count in: {}",
|
||||
app.status_line
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn status_bar_shows_epoch() {
|
||||
let mut app = TuiApp::new("127.0.0.1:7000");
|
||||
app.connected = true;
|
||||
app.conn_state = ConnectionState::Connected;
|
||||
app.mls_epoch = Some(42);
|
||||
app.update_status();
|
||||
assert!(app.status_line.contains("MLS epoch 42"));
|
||||
@@ -1,17 +1,17 @@
|
||||
//! quicproquo CLI client library.
|
||||
//! quicprochat CLI client library.
|
||||
//!
|
||||
//! # KeyPackage expiry and refresh
|
||||
//!
|
||||
//! KeyPackages are single-use (consumed when someone fetches them for an invite) and the server
|
||||
//! may enforce a TTL (e.g. 24 hours). To stay invitable, run `qpq refresh-keypackage`
|
||||
//! may enforce a TTL (e.g. 24 hours). To stay invitable, run `qpc refresh-keypackage`
|
||||
//! periodically (e.g. before the server TTL) or after your KeyPackage was consumed:
|
||||
//!
|
||||
//! ```bash
|
||||
//! qpq refresh-keypackage --state qpq-state.bin --server 127.0.0.1:7000
|
||||
//! qpc refresh-keypackage --state qpc-state.bin --server 127.0.0.1:7000
|
||||
//! ```
|
||||
//!
|
||||
//! Use the same `--access-token` (or `QPQ_ACCESS_TOKEN`) as for other authenticated
|
||||
//! commands. See the [running-the-client](https://docs.quicproquo.dev/getting-started/running-the-client)
|
||||
//! commands. See the [running-the-client](https://docs.quicprochat.dev/getting-started/running-the-client)
|
||||
//! docs for details.
|
||||
|
||||
use std::sync::RwLock;
|
||||
@@ -1,4 +1,4 @@
|
||||
//! quicproquo CLI client.
|
||||
//! quicprochat CLI client.
|
||||
|
||||
// ── v2 feature gate: when compiled with --features v2, use the SDK-based CLI.
|
||||
#[cfg(feature = "v2")]
|
||||
@@ -19,21 +19,168 @@ use anyhow::Context;
|
||||
#[cfg(not(feature = "v2"))]
|
||||
use clap::{Parser, Subcommand};
|
||||
#[cfg(not(feature = "v2"))]
|
||||
use quicproquo_client::{
|
||||
use quicprochat_client::{
|
||||
cmd_chat, cmd_check_key, cmd_create_group, cmd_demo_group, cmd_export, cmd_export_verify,
|
||||
cmd_fetch_key, cmd_health, cmd_invite, cmd_join, cmd_login, cmd_ping, cmd_recv, cmd_register,
|
||||
cmd_register_state, cmd_refresh_keypackage, cmd_register_user, cmd_send, cmd_whoami,
|
||||
init_auth, run_repl, set_insecure_skip_verify, ClientAuth,
|
||||
};
|
||||
#[cfg(all(feature = "tui", not(feature = "v2")))]
|
||||
use quicproquo_client::client::tui::run_tui;
|
||||
use quicprochat_client::client::tui::run_tui;
|
||||
|
||||
// ── Config file loading ──────────────────────────────────────────────────────
|
||||
//
|
||||
// Loads a TOML config file and sets QPQ_* environment variables for values
|
||||
// not already set. This runs BEFORE clap parses, so the natural precedence is:
|
||||
// CLI flags > environment variables > config file > compiled defaults.
|
||||
//
|
||||
// Config file search order:
|
||||
// 1. --config <path> (parsed manually from argv)
|
||||
// 2. $QPC_CONFIG env var
|
||||
// 3. $XDG_CONFIG_HOME/qpc/config.toml (usually ~/.config/qpc/config.toml)
|
||||
// 4. ~/.qpc.toml
|
||||
#[cfg(not(feature = "v2"))]
|
||||
mod client_config {
|
||||
use serde::Deserialize;
|
||||
use std::path::PathBuf;
|
||||
|
||||
#[derive(Debug, Default, Deserialize)]
|
||||
pub struct ClientFileConfig {
|
||||
pub server: Option<String>,
|
||||
pub server_name: Option<String>,
|
||||
pub ca_cert: Option<String>,
|
||||
pub username: Option<String>,
|
||||
pub password: Option<String>,
|
||||
pub access_token: Option<String>,
|
||||
pub device_id: Option<String>,
|
||||
pub state_password: Option<String>,
|
||||
pub state: Option<String>,
|
||||
pub danger_accept_invalid_certs: Option<bool>,
|
||||
pub no_server: Option<bool>,
|
||||
}
|
||||
|
||||
/// Find and load the config file. Returns the parsed config (or default if
|
||||
/// no file is found).
|
||||
pub fn load_client_config() -> ClientFileConfig {
|
||||
let path = find_config_path();
|
||||
let path = match path {
|
||||
Some(p) if p.exists() => p,
|
||||
_ => return ClientFileConfig::default(),
|
||||
};
|
||||
|
||||
match std::fs::read_to_string(&path) {
|
||||
Ok(contents) => match toml::from_str(&contents) {
|
||||
Ok(cfg) => {
|
||||
eprintln!("Loaded config: {}", path.display());
|
||||
cfg
|
||||
}
|
||||
Err(e) => {
|
||||
eprintln!("Warning: failed to parse {}: {e}", path.display());
|
||||
ClientFileConfig::default()
|
||||
}
|
||||
},
|
||||
Err(e) => {
|
||||
eprintln!("Warning: failed to read {}: {e}", path.display());
|
||||
ClientFileConfig::default()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn find_config_path() -> Option<PathBuf> {
|
||||
// 1. --config <path> from argv (before clap parses).
|
||||
let args: Vec<String> = std::env::args().collect();
|
||||
for i in 0..args.len().saturating_sub(1) {
|
||||
if args[i] == "--config" || args[i] == "-c" {
|
||||
return Some(PathBuf::from(&args[i + 1]));
|
||||
}
|
||||
}
|
||||
|
||||
// 2. $QPC_CONFIG env var.
|
||||
if let Ok(p) = std::env::var("QPC_CONFIG") {
|
||||
return Some(PathBuf::from(p));
|
||||
}
|
||||
|
||||
// 3. $XDG_CONFIG_HOME/qpc/config.toml
|
||||
let xdg = std::env::var("XDG_CONFIG_HOME")
|
||||
.map(PathBuf::from)
|
||||
.unwrap_or_else(|_| {
|
||||
let home = std::env::var("HOME").unwrap_or_else(|_| ".".to_string());
|
||||
PathBuf::from(home).join(".config")
|
||||
});
|
||||
let xdg_path = xdg.join("qpc").join("config.toml");
|
||||
if xdg_path.exists() {
|
||||
return Some(xdg_path);
|
||||
}
|
||||
|
||||
// 4. ~/.qpc.toml
|
||||
if let Ok(home) = std::env::var("HOME") {
|
||||
let home_path = PathBuf::from(home).join(".qpc.toml");
|
||||
if home_path.exists() {
|
||||
return Some(home_path);
|
||||
}
|
||||
}
|
||||
|
||||
None
|
||||
}
|
||||
|
||||
/// Set QPQ_* env vars from config values, but only if they're not already set.
|
||||
pub fn apply_config_to_env(cfg: &ClientFileConfig) {
|
||||
fn set_if_empty(key: &str, val: &str) {
|
||||
if std::env::var(key).is_err() {
|
||||
std::env::set_var(key, val);
|
||||
}
|
||||
}
|
||||
|
||||
if let Some(ref v) = cfg.server {
|
||||
set_if_empty("QPQ_SERVER", v);
|
||||
}
|
||||
if let Some(ref v) = cfg.server_name {
|
||||
set_if_empty("QPQ_SERVER_NAME", v);
|
||||
}
|
||||
if let Some(ref v) = cfg.ca_cert {
|
||||
set_if_empty("QPQ_CA_CERT", v);
|
||||
}
|
||||
if let Some(ref v) = cfg.username {
|
||||
set_if_empty("QPQ_USERNAME", v);
|
||||
}
|
||||
if let Some(ref v) = cfg.password {
|
||||
set_if_empty("QPQ_PASSWORD", v);
|
||||
}
|
||||
if let Some(ref v) = cfg.access_token {
|
||||
set_if_empty("QPQ_ACCESS_TOKEN", v);
|
||||
}
|
||||
if let Some(ref v) = cfg.device_id {
|
||||
set_if_empty("QPQ_DEVICE_ID", v);
|
||||
}
|
||||
if let Some(ref v) = cfg.state_password {
|
||||
set_if_empty("QPQ_STATE_PASSWORD", v);
|
||||
}
|
||||
if let Some(ref v) = cfg.state {
|
||||
set_if_empty("QPQ_STATE", v);
|
||||
}
|
||||
if let Some(v) = cfg.danger_accept_invalid_certs {
|
||||
if v {
|
||||
set_if_empty("QPQ_DANGER_ACCEPT_INVALID_CERTS", "true");
|
||||
}
|
||||
}
|
||||
if let Some(v) = cfg.no_server {
|
||||
if v {
|
||||
set_if_empty("QPQ_NO_SERVER", "true");
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// ── CLI ───────────────────────────────────────────────────────────────────────
|
||||
#[cfg(not(feature = "v2"))]
|
||||
|
||||
#[derive(Debug, Parser)]
|
||||
#[command(name = "qpq", about = "quicproquo CLI client", version)]
|
||||
#[command(name = "qpc", about = "quicprochat CLI client", version)]
|
||||
struct Args {
|
||||
/// Path to a TOML config file (auto-detected from ~/.config/qpc/config.toml or ~/.qpc.toml).
|
||||
#[arg(long, short = 'c', global = true, env = "QPC_CONFIG")]
|
||||
config: Option<PathBuf>,
|
||||
|
||||
/// Path to the server's TLS certificate (self-signed by default).
|
||||
#[arg(
|
||||
long,
|
||||
@@ -82,7 +229,7 @@ struct Args {
|
||||
|
||||
// ── Default-repl args (used when no subcommand is given) ─────────
|
||||
/// State file path (identity + MLS state). Used when running the default REPL.
|
||||
#[arg(long, default_value = "qpq-state.bin", env = "QPQ_STATE")]
|
||||
#[arg(long, default_value = "qpc-state.bin", env = "QPQ_STATE")]
|
||||
state: PathBuf,
|
||||
|
||||
/// Server address (host:port). Used when running the default REPL.
|
||||
@@ -97,7 +244,7 @@ struct Args {
|
||||
#[arg(long, env = "QPQ_PASSWORD")]
|
||||
password: Option<String>,
|
||||
|
||||
/// Do not auto-start a local qpq-server (useful when connecting to a remote server).
|
||||
/// Do not auto-start a local qpc-server (useful when connecting to a remote server).
|
||||
#[arg(long, env = "QPQ_NO_SERVER")]
|
||||
no_server: bool,
|
||||
|
||||
@@ -144,7 +291,7 @@ enum Command {
|
||||
/// State file path (identity + MLS state).
|
||||
#[arg(
|
||||
long,
|
||||
default_value = "qpq-state.bin",
|
||||
default_value = "qpc-state.bin",
|
||||
env = "QPQ_STATE"
|
||||
)]
|
||||
state: PathBuf,
|
||||
@@ -203,7 +350,7 @@ enum Command {
|
||||
/// State file path (identity + MLS state).
|
||||
#[arg(
|
||||
long,
|
||||
default_value = "qpq-state.bin",
|
||||
default_value = "qpc-state.bin",
|
||||
env = "QPQ_STATE"
|
||||
)]
|
||||
state: PathBuf,
|
||||
@@ -219,7 +366,7 @@ enum Command {
|
||||
/// State file path (identity + MLS state).
|
||||
#[arg(
|
||||
long,
|
||||
default_value = "qpq-state.bin",
|
||||
default_value = "qpc-state.bin",
|
||||
env = "QPQ_STATE"
|
||||
)]
|
||||
state: PathBuf,
|
||||
@@ -234,7 +381,7 @@ enum Command {
|
||||
/// State file path (identity + MLS state).
|
||||
#[arg(
|
||||
long,
|
||||
default_value = "qpq-state.bin",
|
||||
default_value = "qpc-state.bin",
|
||||
env = "QPQ_STATE"
|
||||
)]
|
||||
state: PathBuf,
|
||||
@@ -252,7 +399,7 @@ enum Command {
|
||||
Invite {
|
||||
#[arg(
|
||||
long,
|
||||
default_value = "qpq-state.bin",
|
||||
default_value = "qpc-state.bin",
|
||||
env = "QPQ_STATE"
|
||||
)]
|
||||
state: PathBuf,
|
||||
@@ -267,7 +414,7 @@ enum Command {
|
||||
Join {
|
||||
#[arg(
|
||||
long,
|
||||
default_value = "qpq-state.bin",
|
||||
default_value = "qpc-state.bin",
|
||||
env = "QPQ_STATE"
|
||||
)]
|
||||
state: PathBuf,
|
||||
@@ -279,7 +426,7 @@ enum Command {
|
||||
Send {
|
||||
#[arg(
|
||||
long,
|
||||
default_value = "qpq-state.bin",
|
||||
default_value = "qpc-state.bin",
|
||||
env = "QPQ_STATE"
|
||||
)]
|
||||
state: PathBuf,
|
||||
@@ -300,7 +447,7 @@ enum Command {
|
||||
Recv {
|
||||
#[arg(
|
||||
long,
|
||||
default_value = "qpq-state.bin",
|
||||
default_value = "qpc-state.bin",
|
||||
env = "QPQ_STATE"
|
||||
)]
|
||||
state: PathBuf,
|
||||
@@ -321,7 +468,7 @@ enum Command {
|
||||
Repl {
|
||||
#[arg(
|
||||
long,
|
||||
default_value = "qpq-state.bin",
|
||||
default_value = "qpc-state.bin",
|
||||
env = "QPQ_STATE"
|
||||
)]
|
||||
state: PathBuf,
|
||||
@@ -333,7 +480,7 @@ enum Command {
|
||||
/// OPAQUE password (prompted securely if --username is set but --password is not).
|
||||
#[arg(long, env = "QPQ_PASSWORD")]
|
||||
password: Option<String>,
|
||||
/// Do not auto-start a local qpq-server.
|
||||
/// Do not auto-start a local qpc-server.
|
||||
#[arg(long, env = "QPQ_NO_SERVER")]
|
||||
no_server: bool,
|
||||
},
|
||||
@@ -344,7 +491,7 @@ enum Command {
|
||||
Tui {
|
||||
#[arg(
|
||||
long,
|
||||
default_value = "qpq-state.bin",
|
||||
default_value = "qpc-state.bin",
|
||||
env = "QPQ_STATE"
|
||||
)]
|
||||
state: PathBuf,
|
||||
@@ -358,12 +505,12 @@ enum Command {
|
||||
password: Option<String>,
|
||||
},
|
||||
|
||||
/// Interactive 1:1 chat: type to send, incoming messages printed as [peer] <msg>. Ctrl+D to exit.
|
||||
/// Interactive 1:1 chat: type to send, incoming messages printed as \[peer\] msg. Ctrl+D to exit.
|
||||
/// In a two-person group, peer is chosen automatically; use --peer-key only with 3+ members.
|
||||
Chat {
|
||||
#[arg(
|
||||
long,
|
||||
default_value = "qpq-state.bin",
|
||||
default_value = "qpc-state.bin",
|
||||
env = "QPQ_STATE"
|
||||
)]
|
||||
state: PathBuf,
|
||||
@@ -380,18 +527,18 @@ enum Command {
|
||||
/// Export a conversation's message history to an encrypted, tamper-evident transcript file.
|
||||
///
|
||||
/// The output file uses Argon2id + ChaCha20-Poly1305 encryption with a SHA-256 hash chain
|
||||
/// linking every record. Use `qpq export verify` to check chain integrity without decrypting.
|
||||
/// linking every record. Use `qpc export verify` to check chain integrity without decrypting.
|
||||
Export {
|
||||
/// Path to the conversation database (.convdb file).
|
||||
#[arg(long, default_value = "qpq-convdb.sqlite", env = "QPQ_CONV_DB")]
|
||||
#[arg(long, default_value = "qpc-convdb.sqlite", env = "QPQ_CONV_DB")]
|
||||
conv_db: PathBuf,
|
||||
|
||||
/// Conversation ID to export (32 hex chars = 16 bytes).
|
||||
#[arg(long)]
|
||||
conv_id: String,
|
||||
|
||||
/// Output path for the .qpqt transcript file.
|
||||
#[arg(long, default_value = "transcript.qpqt")]
|
||||
/// Output path for the .qpct transcript file.
|
||||
#[arg(long, default_value = "transcript.qpct")]
|
||||
output: PathBuf,
|
||||
|
||||
/// Password used to encrypt the transcript (separate from the state/DB password).
|
||||
@@ -405,7 +552,7 @@ enum Command {
|
||||
|
||||
/// Verify the hash-chain integrity of a transcript file without decrypting content.
|
||||
ExportVerify {
|
||||
/// Path to the .qpqt transcript file to verify.
|
||||
/// Path to the .qpct transcript file to verify.
|
||||
#[arg(long)]
|
||||
input: PathBuf,
|
||||
},
|
||||
@@ -418,7 +565,7 @@ enum Command {
|
||||
playbook: PathBuf,
|
||||
|
||||
/// State file path (identity + MLS state).
|
||||
#[arg(long, default_value = "qpq-state.bin", env = "QPQ_STATE")]
|
||||
#[arg(long, default_value = "qpc-state.bin", env = "QPQ_STATE")]
|
||||
state: PathBuf,
|
||||
|
||||
/// Server address (host:port).
|
||||
@@ -441,14 +588,14 @@ enum Command {
|
||||
|
||||
// ── Helpers ───────────────────────────────────────────────────────────────────
|
||||
#[cfg(not(feature = "v2"))]
|
||||
/// Returns `qpq-{username}.bin` when `state` is still at the default
|
||||
/// (`qpq-state.bin`) and a username has been provided. Otherwise returns
|
||||
/// `state` unchanged. This lets `qpq --username alice` automatically isolate
|
||||
/// Returns `qpc-{username}.bin` when `state` is still at the default
|
||||
/// (`qpc-state.bin`) and a username has been provided. Otherwise returns
|
||||
/// `state` unchanged. This lets `qpc --username alice` automatically isolate
|
||||
/// Alice's state without requiring a manual `--state` flag.
|
||||
fn derive_state_path(state: PathBuf, username: Option<&str>) -> PathBuf {
|
||||
if state == Path::new("qpq-state.bin") {
|
||||
if state == Path::new("qpc-state.bin") {
|
||||
if let Some(uname) = username {
|
||||
return PathBuf::from(format!("qpq-{uname}.bin"));
|
||||
return PathBuf::from(format!("qpc-{uname}.bin"));
|
||||
}
|
||||
}
|
||||
state
|
||||
@@ -470,24 +617,24 @@ async fn run_playbook(
|
||||
device_id: Option<&str>,
|
||||
extra_vars: &[String],
|
||||
) -> anyhow::Result<()> {
|
||||
use quicproquo_client::PlaybookRunner;
|
||||
use quicprochat_client::PlaybookRunner;
|
||||
|
||||
let insecure = std::env::var("QPQ_DANGER_ACCEPT_INVALID_CERTS").is_ok();
|
||||
|
||||
// Connect to server.
|
||||
let client =
|
||||
quicproquo_client::connect_node_opt(server, ca_cert, server_name, insecure)
|
||||
quicprochat_client::connect_node_opt(server, ca_cert, server_name, insecure)
|
||||
.await
|
||||
.context("connect to server")?;
|
||||
|
||||
// Build session state.
|
||||
let mut session = quicproquo_client::client::session::SessionState::load(state, state_pw)
|
||||
let mut session = quicprochat_client::client::session::SessionState::load(state, state_pw)
|
||||
.context("load session state")?;
|
||||
|
||||
// If username/password provided, do OPAQUE login.
|
||||
if let (Some(uname), Some(pw)) = (username, password) {
|
||||
if let Err(e) =
|
||||
quicproquo_client::opaque_login(&client, uname, pw, &session.identity.public_key_bytes()).await
|
||||
quicprochat_client::opaque_login(&client, uname, pw, &session.identity.public_key_bytes()).await
|
||||
{
|
||||
eprintln!("OPAQUE login failed: {e:#}");
|
||||
}
|
||||
@@ -540,6 +687,13 @@ async fn main() -> anyhow::Result<()> {
|
||||
)
|
||||
.init();
|
||||
|
||||
// Load config file and apply to env BEFORE clap parses (so config values
|
||||
// act as defaults that env vars and CLI flags can override).
|
||||
{
|
||||
let cfg = client_config::load_client_config();
|
||||
client_config::apply_config_to_env(&cfg);
|
||||
}
|
||||
|
||||
let args = Args::parse();
|
||||
|
||||
if args.danger_accept_invalid_certs {
|
||||
@@ -1,7 +1,7 @@
|
||||
//! v2 CLI command implementations — thin wrappers over the SDK.
|
||||
|
||||
use quicproquo_sdk::client::QpqClient;
|
||||
use quicproquo_sdk::error::SdkError;
|
||||
use quicprochat_sdk::client::QpqClient;
|
||||
use quicprochat_sdk::error::SdkError;
|
||||
|
||||
/// Register a new user account via OPAQUE.
|
||||
pub async fn cmd_register_user(
|
||||
@@ -61,7 +61,7 @@ pub async fn cmd_health(client: &mut QpqClient) -> Result<(), SdkError> {
|
||||
/// Resolve a username to its identity key.
|
||||
pub async fn cmd_resolve(client: &mut QpqClient, username: &str) -> Result<(), SdkError> {
|
||||
let rpc = client.rpc()?;
|
||||
match quicproquo_sdk::users::resolve_user(rpc, username).await? {
|
||||
match quicprochat_sdk::users::resolve_user(rpc, username).await? {
|
||||
Some(key) => {
|
||||
println!("{username} -> {}", hex::encode(&key));
|
||||
}
|
||||
@@ -75,7 +75,7 @@ pub async fn cmd_resolve(client: &mut QpqClient, username: &str) -> Result<(), S
|
||||
/// List registered devices.
|
||||
pub async fn cmd_devices_list(client: &mut QpqClient) -> Result<(), SdkError> {
|
||||
let rpc = client.rpc()?;
|
||||
let devices = quicproquo_sdk::devices::list_devices(rpc).await?;
|
||||
let devices = quicprochat_sdk::devices::list_devices(rpc).await?;
|
||||
if devices.is_empty() {
|
||||
println!("no devices registered");
|
||||
} else {
|
||||
@@ -101,7 +101,7 @@ pub async fn cmd_devices_register(
|
||||
let rpc = client.rpc()?;
|
||||
let id_bytes = hex::decode(device_id)
|
||||
.map_err(|e| SdkError::Other(anyhow::anyhow!("invalid device_id hex: {e}")))?;
|
||||
let was_new = quicproquo_sdk::devices::register_device(rpc, &id_bytes, device_name).await?;
|
||||
let was_new = quicprochat_sdk::devices::register_device(rpc, &id_bytes, device_name).await?;
|
||||
if was_new {
|
||||
println!("device registered: {device_name}");
|
||||
} else {
|
||||
@@ -118,7 +118,7 @@ pub async fn cmd_devices_revoke(
|
||||
let rpc = client.rpc()?;
|
||||
let id_bytes = hex::decode(device_id)
|
||||
.map_err(|e| SdkError::Other(anyhow::anyhow!("invalid device_id hex: {e}")))?;
|
||||
let revoked = quicproquo_sdk::devices::revoke_device(rpc, &id_bytes).await?;
|
||||
let revoked = quicprochat_sdk::devices::revoke_device(rpc, &id_bytes).await?;
|
||||
if revoked {
|
||||
println!("device revoked: {device_id}");
|
||||
} else {
|
||||
@@ -131,12 +131,12 @@ pub async fn cmd_devices_revoke(
|
||||
pub async fn cmd_recovery_setup(client: &mut QpqClient) -> Result<(), SdkError> {
|
||||
// Load identity seed from state file.
|
||||
let state_path = client.config_state_path();
|
||||
let stored = quicproquo_sdk::state::load_state(&state_path, None)
|
||||
let stored = quicprochat_sdk::state::load_state(&state_path, None)
|
||||
.map_err(|e| SdkError::Crypto(format!("load identity for recovery: {e}")))?;
|
||||
|
||||
let rpc = client.rpc()?;
|
||||
let codes =
|
||||
quicproquo_sdk::recovery::setup_recovery(rpc, &stored.identity_seed, &[]).await?;
|
||||
quicprochat_sdk::recovery::setup_recovery(rpc, &stored.identity_seed, &[]).await?;
|
||||
|
||||
println!("=== RECOVERY CODES ===");
|
||||
println!("Save these codes securely. They will NOT be shown again.");
|
||||
@@ -155,7 +155,7 @@ pub async fn cmd_recovery_setup(client: &mut QpqClient) -> Result<(), SdkError>
|
||||
/// List pending outbox entries.
|
||||
pub fn cmd_outbox_list(client: &QpqClient) -> Result<(), SdkError> {
|
||||
let store = client.conversations()?;
|
||||
let entries = quicproquo_sdk::outbox::list_pending(store)?;
|
||||
let entries = quicprochat_sdk::outbox::list_pending(store)?;
|
||||
if entries.is_empty() {
|
||||
println!("outbox is empty — no pending messages");
|
||||
} else {
|
||||
@@ -178,7 +178,7 @@ pub fn cmd_outbox_list(client: &QpqClient) -> Result<(), SdkError> {
|
||||
pub async fn cmd_outbox_retry(client: &mut QpqClient) -> Result<(), SdkError> {
|
||||
let rpc = client.rpc()?;
|
||||
let store = client.conversations()?;
|
||||
let (sent, failed) = quicproquo_sdk::outbox::flush_outbox(rpc, store).await?;
|
||||
let (sent, failed) = quicprochat_sdk::outbox::flush_outbox(rpc, store).await?;
|
||||
println!("outbox flush: {sent} sent, {failed} permanently failed");
|
||||
Ok(())
|
||||
}
|
||||
@@ -186,7 +186,7 @@ pub async fn cmd_outbox_retry(client: &mut QpqClient) -> Result<(), SdkError> {
|
||||
/// Clear permanently failed outbox entries.
|
||||
pub fn cmd_outbox_clear(client: &QpqClient) -> Result<(), SdkError> {
|
||||
let store = client.conversations()?;
|
||||
let cleared = quicproquo_sdk::outbox::clear_failed(store)?;
|
||||
let cleared = quicprochat_sdk::outbox::clear_failed(store)?;
|
||||
println!("cleared {cleared} failed outbox entries");
|
||||
Ok(())
|
||||
}
|
||||
@@ -198,10 +198,10 @@ pub async fn cmd_recovery_restore(
|
||||
) -> Result<(), SdkError> {
|
||||
let rpc = client.rpc()?;
|
||||
let (identity_seed, conversation_ids) =
|
||||
quicproquo_sdk::recovery::recover_account(rpc, code).await?;
|
||||
quicprochat_sdk::recovery::recover_account(rpc, code).await?;
|
||||
|
||||
// Restore identity.
|
||||
let keypair = quicproquo_core::IdentityKeypair::from_seed(identity_seed);
|
||||
let keypair = quicprochat_core::IdentityKeypair::from_seed(identity_seed);
|
||||
client.set_identity_key(keypair.public_key_bytes().to_vec());
|
||||
|
||||
println!("account recovered successfully");
|
||||
@@ -214,14 +214,14 @@ pub async fn cmd_recovery_restore(
|
||||
}
|
||||
|
||||
// Save recovered state.
|
||||
let state = quicproquo_sdk::state::StoredState {
|
||||
let state = quicprochat_sdk::state::StoredState {
|
||||
identity_seed,
|
||||
group: None,
|
||||
hybrid_key: None,
|
||||
member_keys: Vec::new(),
|
||||
};
|
||||
let state_path = client.config_state_path();
|
||||
quicproquo_sdk::state::save_state(&state_path, &state, None)?;
|
||||
quicprochat_sdk::state::save_state(&state_path, &state, None)?;
|
||||
println!("state saved to {}", state_path.display());
|
||||
|
||||
Ok(())
|
||||
@@ -1,4 +1,4 @@
|
||||
//! v2 CLI entry point — thin shell over `quicproquo_sdk::QpqClient`.
|
||||
//! v2 CLI entry point — thin shell over `quicprochat_sdk::QpqClient`.
|
||||
//!
|
||||
//! Activated via `--features v2`. Replaces the v1 Cap'n Proto RPC main
|
||||
//! with a simplified command surface backed by the SDK.
|
||||
@@ -10,15 +10,15 @@ use std::time::Duration;
|
||||
use anyhow::Context;
|
||||
use clap::{Parser, Subcommand};
|
||||
|
||||
use quicproquo_sdk::client::QpqClient;
|
||||
use quicproquo_sdk::config::ClientConfig;
|
||||
use quicprochat_sdk::client::QpqClient;
|
||||
use quicprochat_sdk::config::ClientConfig;
|
||||
|
||||
use crate::v2_commands;
|
||||
|
||||
// ── CLI ───────────────────────────────────────────────────────────────────────
|
||||
|
||||
#[derive(Debug, Parser)]
|
||||
#[command(name = "qpq", about = "quicproquo CLI client (v2)", version)]
|
||||
#[command(name = "qpc", about = "quicprochat CLI client (v2)", version)]
|
||||
struct Args {
|
||||
/// Server address (host:port).
|
||||
#[arg(long, global = true, default_value = "127.0.0.1:7000", env = "QPQ_SERVER")]
|
||||
@@ -37,7 +37,7 @@ struct Args {
|
||||
db_password: Option<String>,
|
||||
|
||||
/// Path to the client state file (identity key, MLS state).
|
||||
#[arg(long, global = true, default_value = "qpq-state.bin", env = "QPQ_STATE")]
|
||||
#[arg(long, global = true, default_value = "qpc-state.bin", env = "QPQ_STATE")]
|
||||
state: PathBuf,
|
||||
|
||||
/// DANGER: Skip TLS certificate verification. Development only.
|
||||
@@ -48,7 +48,7 @@ struct Args {
|
||||
)]
|
||||
danger_accept_invalid_certs: bool,
|
||||
|
||||
/// Do not auto-start a local qpq-server.
|
||||
/// Do not auto-start a local qpc-server.
|
||||
#[arg(long, global = true, env = "QPQ_NO_SERVER")]
|
||||
no_server: bool,
|
||||
|
||||
@@ -210,17 +210,17 @@ impl Drop for ServerGuard {
|
||||
}
|
||||
}
|
||||
|
||||
/// Find the `qpq-server` binary: same directory as current exe, then PATH.
|
||||
/// Find the `qpc-server` binary: same directory as current exe, then PATH.
|
||||
fn find_server_binary() -> Option<PathBuf> {
|
||||
if let Ok(exe) = std::env::current_exe() {
|
||||
let sibling = exe.with_file_name("qpq-server");
|
||||
let sibling = exe.with_file_name("qpc-server");
|
||||
if sibling.exists() {
|
||||
return Some(sibling);
|
||||
}
|
||||
}
|
||||
std::env::var_os("PATH").and_then(|paths| {
|
||||
std::env::split_paths(&paths)
|
||||
.map(|dir| dir.join("qpq-server"))
|
||||
.map(|dir| dir.join("qpc-server"))
|
||||
.find(|p| p.exists())
|
||||
})
|
||||
}
|
||||
@@ -241,7 +241,7 @@ async fn probe_server(server_addr: &str) -> bool {
|
||||
.is_ok()
|
||||
}
|
||||
|
||||
/// Start a local qpq-server if one isn't already running.
|
||||
/// Start a local qpc-server if one isn't already running.
|
||||
/// Returns a guard that kills the child on drop (if we started one).
|
||||
async fn ensure_server_running(
|
||||
server_addr: &str,
|
||||
@@ -258,8 +258,8 @@ async fn ensure_server_running(
|
||||
|
||||
let binary = find_server_binary().ok_or_else(|| {
|
||||
anyhow::anyhow!(
|
||||
"server at {server_addr} is not reachable and qpq-server binary not found; \
|
||||
start a server manually or install qpq-server"
|
||||
"server at {server_addr} is not reachable and qpc-server binary not found; \
|
||||
start a server manually or install qpc-server"
|
||||
)
|
||||
})?;
|
||||
|
||||
@@ -300,7 +300,7 @@ async fn ensure_server_running(
|
||||
|
||||
if start.elapsed() > max_wait {
|
||||
anyhow::bail!(
|
||||
"auto-started qpq-server but it did not become ready within {max_wait:?}"
|
||||
"auto-started qpc-server but it did not become ready within {max_wait:?}"
|
||||
);
|
||||
}
|
||||
|
||||
@@ -336,9 +336,9 @@ async fn connect_client(args: &Args) -> anyhow::Result<QpqClient> {
|
||||
|
||||
// Try loading identity from state file.
|
||||
if args.state.exists() {
|
||||
match quicproquo_sdk::state::load_state(&args.state, args.db_password.as_deref()) {
|
||||
match quicprochat_sdk::state::load_state(&args.state, args.db_password.as_deref()) {
|
||||
Ok(stored) => {
|
||||
let keypair = quicproquo_core::IdentityKeypair::from_seed(stored.identity_seed);
|
||||
let keypair = quicprochat_core::IdentityKeypair::from_seed(stored.identity_seed);
|
||||
client.set_identity_key(keypair.public_key_bytes().to_vec());
|
||||
}
|
||||
Err(e) => {
|
||||
@@ -414,13 +414,13 @@ async fn run(args: Args) -> anyhow::Result<()> {
|
||||
let config = build_config(&args)?;
|
||||
let mut client = QpqClient::new(config);
|
||||
if args.state.exists() {
|
||||
match quicproquo_sdk::state::load_state(
|
||||
match quicprochat_sdk::state::load_state(
|
||||
&args.state,
|
||||
args.db_password.as_deref(),
|
||||
) {
|
||||
Ok(stored) => {
|
||||
let keypair =
|
||||
quicproquo_core::IdentityKeypair::from_seed(stored.identity_seed);
|
||||
quicprochat_core::IdentityKeypair::from_seed(stored.identity_seed);
|
||||
client.set_identity_key(keypair.public_key_bytes().to_vec());
|
||||
}
|
||||
Err(e) => {
|
||||
@@ -1,4 +1,4 @@
|
||||
// cargo_bin! only works for current package's binary; we spawn qpq-server from another package.
|
||||
// cargo_bin! only works for current package's binary; we spawn qpc-server from another package.
|
||||
#![allow(deprecated)]
|
||||
#![allow(clippy::unwrap_used)]
|
||||
#![allow(clippy::await_holding_lock)] // AUTH_LOCK intentionally held across await to serialize tests
|
||||
@@ -18,12 +18,12 @@ fn ensure_rustls_provider() {
|
||||
|
||||
use sha2::{Sha256, Digest};
|
||||
|
||||
use quicproquo_client::{
|
||||
use quicprochat_client::{
|
||||
cmd_create_group, cmd_invite, cmd_join, cmd_login, cmd_ping, cmd_register_state,
|
||||
cmd_register_user, cmd_send, connect_node, create_channel, enqueue, fetch_wait, init_auth,
|
||||
opaque_login, receive_pending_plaintexts, resolve_user, ClientAuth,
|
||||
};
|
||||
use quicproquo_core::{GroupMember, HybridKeypair, IdentityKeypair, ReceivedMessage};
|
||||
use quicprochat_core::{GroupMember, HybridKeypair, IdentityKeypair, ReceivedMessage};
|
||||
|
||||
/// Serialises ALL tests that call `init_auth` to prevent the global `AUTH_CONTEXT`
|
||||
/// from being overwritten by concurrent tests. Every test that mutates auth state
|
||||
@@ -71,7 +71,7 @@ fn spawn_server(base: &std::path::Path, extra_args: &[&str]) -> (String, PathBuf
|
||||
let tls_key = base.join("server-key.der");
|
||||
let data_dir = base.join("data");
|
||||
|
||||
let server_bin = cargo_bin("qpq-server");
|
||||
let server_bin = cargo_bin("qpc-server");
|
||||
let mut cmd = Command::new(server_bin);
|
||||
cmd.arg("--listen")
|
||||
.arg(&listen)
|
||||
@@ -948,14 +948,14 @@ async fn e2e_dm_multi_message_epoch_synchronized() -> anyhow::Result<()> {
|
||||
/// Helper: load a state file and reconstruct a GroupMember with its keystore.
|
||||
fn load_member(state_path: &std::path::Path) -> (GroupMember, Option<HybridKeypair>) {
|
||||
let bytes = std::fs::read(state_path).expect("read state");
|
||||
let state: quicproquo_client::client::state::StoredState =
|
||||
let state: quicprochat_client::client::state::StoredState =
|
||||
bincode::deserialize(&bytes).expect("decode state");
|
||||
state.into_parts(state_path).expect("into_parts")
|
||||
}
|
||||
|
||||
/// Helper: save a GroupMember back to its state file.
|
||||
fn save_member(state_path: &std::path::Path, member: &GroupMember, hybrid: Option<&HybridKeypair>) {
|
||||
quicproquo_client::client::state::save_state(state_path, member, hybrid, None)
|
||||
quicprochat_client::client::state::save_state(state_path, member, hybrid, None)
|
||||
.expect("save state");
|
||||
}
|
||||
|
||||
@@ -1394,7 +1394,7 @@ async fn e2e_file_upload_download() -> anyhow::Result<()> {
|
||||
{
|
||||
let mut p = req.get();
|
||||
let mut auth = p.reborrow().init_auth();
|
||||
quicproquo_client::client::rpc::set_auth(&mut auth)?;
|
||||
quicprochat_client::client::rpc::set_auth(&mut auth)?;
|
||||
p.set_blob_hash(&hash);
|
||||
p.set_chunk(file_data);
|
||||
p.set_offset(0);
|
||||
@@ -1426,7 +1426,7 @@ async fn e2e_file_upload_download() -> anyhow::Result<()> {
|
||||
{
|
||||
let mut p = req.get();
|
||||
let mut auth = p.reborrow().init_auth();
|
||||
quicproquo_client::client::rpc::set_auth(&mut auth)?;
|
||||
quicprochat_client::client::rpc::set_auth(&mut auth)?;
|
||||
p.set_blob_id(&blob_id);
|
||||
p.set_offset(0);
|
||||
p.set_length(file_data.len() as u32);
|
||||
@@ -1463,7 +1463,7 @@ async fn e2e_file_upload_download() -> anyhow::Result<()> {
|
||||
{
|
||||
let mut p = req.get();
|
||||
let mut auth = p.reborrow().init_auth();
|
||||
quicproquo_client::client::rpc::set_auth(&mut auth)?;
|
||||
quicprochat_client::client::rpc::set_auth(&mut auth)?;
|
||||
p.set_blob_id(&blob_id);
|
||||
p.set_offset(100);
|
||||
p.set_length(200);
|
||||
@@ -1521,7 +1521,7 @@ async fn e2e_blob_hash_mismatch() -> anyhow::Result<()> {
|
||||
{
|
||||
let mut p = req.get();
|
||||
let mut auth = p.reborrow().init_auth();
|
||||
quicproquo_client::client::rpc::set_auth(&mut auth)?;
|
||||
quicprochat_client::client::rpc::set_auth(&mut auth)?;
|
||||
p.set_blob_hash(&wrong_hash);
|
||||
p.set_chunk(&chunk_data[..]);
|
||||
p.set_offset(0);
|
||||
@@ -1560,7 +1560,7 @@ fn spawn_server_custom(base: &std::path::Path, args: &[&str]) -> (String, PathBu
|
||||
let tls_key = base.join("server-key.der");
|
||||
let data_dir = base.join("data");
|
||||
|
||||
let server_bin = cargo_bin("qpq-server");
|
||||
let server_bin = cargo_bin("qpc-server");
|
||||
let mut cmd = Command::new(server_bin);
|
||||
cmd.arg("--listen")
|
||||
.arg(&listen)
|
||||
@@ -1888,7 +1888,7 @@ async fn e2e_keypackage_exhaustion_graceful() -> anyhow::Result<()> {
|
||||
// Now try to fetch A's KeyPackage again — it should be exhausted.
|
||||
let client = local.run_until(connect_node(&server, &ca_cert, "localhost")).await?;
|
||||
let pkg = local
|
||||
.run_until(quicproquo_client::client::rpc::fetch_key_package(&client, &a_pk))
|
||||
.run_until(quicprochat_client::client::rpc::fetch_key_package(&client, &a_pk))
|
||||
.await?;
|
||||
|
||||
// Graceful: either empty (no package available) or an error — but NOT a panic.
|
||||
@@ -1,9 +1,10 @@
|
||||
[package]
|
||||
name = "quicproquo-core"
|
||||
name = "quicprochat-core"
|
||||
version = "0.1.0"
|
||||
edition = "2021"
|
||||
description = "Crypto primitives, MLS state machine, and hybrid post-quantum KEM for quicproquo."
|
||||
license = "MIT"
|
||||
edition.workspace = true
|
||||
description = "Crypto primitives, MLS state machine, and hybrid post-quantum KEM for quicprochat."
|
||||
license = "Apache-2.0 OR MIT"
|
||||
repository.workspace = true
|
||||
|
||||
[features]
|
||||
default = ["native"]
|
||||
@@ -14,11 +15,12 @@ native = [
|
||||
"dep:openmls",
|
||||
"dep:openmls_rust_crypto",
|
||||
"dep:openmls_traits",
|
||||
"dep:openmls_memory_storage",
|
||||
"dep:tls_codec",
|
||||
"dep:opaque-ke",
|
||||
"dep:bincode",
|
||||
"dep:capnp",
|
||||
"dep:quicproquo-proto",
|
||||
"dep:quicprochat-proto",
|
||||
"dep:tokio",
|
||||
]
|
||||
|
||||
@@ -48,12 +50,13 @@ opaque-ke = { workspace = true, optional = true }
|
||||
openmls = { workspace = true, optional = true }
|
||||
openmls_rust_crypto = { workspace = true, optional = true }
|
||||
openmls_traits = { workspace = true, optional = true }
|
||||
openmls_memory_storage = { workspace = true, optional = true }
|
||||
tls_codec = { workspace = true, optional = true }
|
||||
bincode = { workspace = true, optional = true }
|
||||
|
||||
# Serialisation (native only)
|
||||
capnp = { workspace = true, optional = true }
|
||||
quicproquo-proto = { path = "../quicproquo-proto", optional = true }
|
||||
quicprochat-proto = { path = "../quicprochat-proto", optional = true }
|
||||
|
||||
# Async runtime (native only)
|
||||
tokio = { workspace = true, optional = true }
|
||||
@@ -8,7 +8,7 @@
|
||||
|
||||
use criterion::{black_box, criterion_group, criterion_main, BenchmarkId, Criterion};
|
||||
|
||||
use quicproquo_core::{compute_safety_number, IdentityKeypair, padding};
|
||||
use quicprochat_core::{compute_safety_number, IdentityKeypair, padding};
|
||||
|
||||
// ── Identity keypair benchmarks ──────────────────────────────────────────────
|
||||
|
||||
@@ -48,7 +48,7 @@ fn bench_identity_verify(c: &mut Criterion) {
|
||||
// ── Sealed sender benchmarks ─────────────────────────────────────────────────
|
||||
|
||||
fn bench_sealed_sender(c: &mut Criterion) {
|
||||
use quicproquo_core::sealed_sender::{seal, unseal};
|
||||
use quicprochat_core::sealed_sender::{seal, unseal};
|
||||
|
||||
let sizes: &[(&str, usize)] = &[
|
||||
("32B", 32),
|
||||
@@ -6,7 +6,7 @@
|
||||
|
||||
use criterion::{black_box, criterion_group, criterion_main, BenchmarkId, Criterion};
|
||||
|
||||
use quicproquo_core::{hybrid_encrypt, hybrid_decrypt, HybridKeypair};
|
||||
use quicprochat_core::{hybrid_encrypt, hybrid_decrypt, HybridKeypair};
|
||||
|
||||
// ── Classical baseline (X25519 + ChaCha20-Poly1305) ─────────────────────────
|
||||
|
||||
@@ -7,7 +7,7 @@
|
||||
use std::sync::Arc;
|
||||
|
||||
use criterion::{criterion_group, criterion_main, BatchSize, BenchmarkId, Criterion};
|
||||
use quicproquo_core::{GroupMember, IdentityKeypair};
|
||||
use quicprochat_core::{GroupMember, IdentityKeypair};
|
||||
|
||||
/// Create identities and a group of the given size.
|
||||
/// Returns (creator, Vec<members>).
|
||||
@@ -11,17 +11,17 @@ use criterion::{black_box, criterion_group, criterion_main, BenchmarkId, Criteri
|
||||
fn capnp_serialize_envelope(seq: u64, data: &[u8]) -> Vec<u8> {
|
||||
let mut msg = capnp::message::Builder::new_default();
|
||||
{
|
||||
let mut envelope = msg.init_root::<quicproquo_proto::node_capnp::envelope::Builder>();
|
||||
let mut envelope = msg.init_root::<quicprochat_proto::node_capnp::envelope::Builder>();
|
||||
envelope.set_seq(seq);
|
||||
envelope.set_data(data);
|
||||
}
|
||||
quicproquo_proto::to_bytes(&msg).unwrap()
|
||||
quicprochat_proto::to_bytes(&msg).unwrap()
|
||||
}
|
||||
|
||||
fn capnp_deserialize_envelope(bytes: &[u8]) -> (u64, Vec<u8>) {
|
||||
let reader = quicproquo_proto::from_bytes(bytes).unwrap();
|
||||
let reader = quicprochat_proto::from_bytes(bytes).unwrap();
|
||||
let envelope = reader
|
||||
.get_root::<quicproquo_proto::node_capnp::envelope::Reader>()
|
||||
.get_root::<quicprochat_proto::node_capnp::envelope::Reader>()
|
||||
.unwrap();
|
||||
(envelope.get_seq(), envelope.get_data().unwrap().to_vec())
|
||||
}
|
||||
@@ -1,5 +1,5 @@
|
||||
syntax = "proto3";
|
||||
package quicproquo.bench;
|
||||
package quicprochat.bench;
|
||||
|
||||
// Equivalent to the Envelope struct in delivery.capnp
|
||||
message Envelope {
|
||||
@@ -1,4 +1,4 @@
|
||||
//! Error types for `quicproquo-core`.
|
||||
//! Error types for `quicprochat-core`.
|
||||
|
||||
use thiserror::Error;
|
||||
|
||||
@@ -29,7 +29,7 @@
|
||||
//! # Ratchet tree
|
||||
//!
|
||||
//! `use_ratchet_tree_extension = true` so that the ratchet tree is embedded
|
||||
//! in Welcome messages. `new_from_welcome` is called with `ratchet_tree = None`;
|
||||
//! in Welcome messages. `new_from_welcome` is called without a ratchet_tree;
|
||||
//! openmls extracts the tree from the Welcome's `GroupInfo` extension.
|
||||
|
||||
use std::{path::Path, sync::Arc};
|
||||
@@ -37,12 +37,13 @@ use std::{path::Path, sync::Arc};
|
||||
use zeroize::Zeroizing;
|
||||
|
||||
use openmls::prelude::{
|
||||
Ciphersuite, Credential, CredentialType, CredentialWithKey, CryptoConfig, GroupId, KeyPackage,
|
||||
KeyPackageIn, MlsGroup, MlsGroupConfig, MlsMessageInBody, MlsMessageOut,
|
||||
ProcessedMessageContent, ProtocolMessage, ProtocolVersion, TlsDeserializeTrait,
|
||||
TlsSerializeTrait,
|
||||
BasicCredential, Ciphersuite, Credential, CredentialWithKey, GroupId, KeyPackage,
|
||||
KeyPackageIn, LeafNodeParameters, MlsGroup, MlsGroupCreateConfig, MlsGroupJoinConfig,
|
||||
MlsMessageBodyIn, MlsMessageOut, ProcessedMessageContent, ProtocolMessage,
|
||||
ProtocolVersion, StagedWelcome,
|
||||
};
|
||||
use openmls_traits::OpenMlsCryptoProvider;
|
||||
use openmls_traits::OpenMlsProvider;
|
||||
use tls_codec::{Deserialize as TlsDeserializeTrait, Serialize as TlsSerializeTrait};
|
||||
|
||||
use crate::{
|
||||
error::CoreError,
|
||||
@@ -102,8 +103,10 @@ pub struct GroupMember {
|
||||
identity: Arc<IdentityKeypair>,
|
||||
/// Active MLS group, if any.
|
||||
group: Option<MlsGroup>,
|
||||
/// Shared group configuration (wire format, ratchet tree extension, etc.).
|
||||
config: MlsGroupConfig,
|
||||
/// Shared group creation configuration (wire format, ratchet tree extension, etc.).
|
||||
create_config: MlsGroupCreateConfig,
|
||||
/// Shared group join configuration (wire format, ratchet tree extension, etc.).
|
||||
join_config: MlsGroupJoinConfig,
|
||||
/// Whether this member uses hybrid (X25519 + ML-KEM-768) HPKE keys.
|
||||
hybrid: bool,
|
||||
}
|
||||
@@ -139,7 +142,11 @@ impl GroupMember {
|
||||
group: Option<MlsGroup>,
|
||||
hybrid: bool,
|
||||
) -> Self {
|
||||
let config = MlsGroupConfig::builder()
|
||||
let create_config = MlsGroupCreateConfig::builder()
|
||||
.use_ratchet_tree_extension(true)
|
||||
.build();
|
||||
|
||||
let join_config = MlsGroupJoinConfig::builder()
|
||||
.use_ratchet_tree_extension(true)
|
||||
.build();
|
||||
|
||||
@@ -153,7 +160,8 @@ impl GroupMember {
|
||||
backend,
|
||||
identity,
|
||||
group,
|
||||
config,
|
||||
create_config,
|
||||
join_config,
|
||||
hybrid,
|
||||
}
|
||||
}
|
||||
@@ -175,18 +183,19 @@ impl GroupMember {
|
||||
///
|
||||
/// Returns [`CoreError::Mls`] if openmls fails to create the KeyPackage.
|
||||
pub fn generate_key_package(&mut self) -> Result<Vec<u8>, CoreError> {
|
||||
let credential_with_key = self.make_credential_with_key()?;
|
||||
let credential_with_key = self.make_credential_with_key();
|
||||
|
||||
let key_package = KeyPackage::builder()
|
||||
let key_package_bundle = KeyPackage::builder()
|
||||
.build(
|
||||
CryptoConfig::with_default_version(CIPHERSUITE),
|
||||
CIPHERSUITE,
|
||||
&self.backend,
|
||||
self.identity.as_ref(),
|
||||
credential_with_key,
|
||||
)
|
||||
.map_err(|e| CoreError::Mls(format!("{e:?}")))?;
|
||||
|
||||
key_package
|
||||
key_package_bundle
|
||||
.key_package()
|
||||
.tls_serialize_detached()
|
||||
.map_err(|e| CoreError::Mls(format!("{e:?}")))
|
||||
}
|
||||
@@ -205,13 +214,13 @@ impl GroupMember {
|
||||
///
|
||||
/// Returns [`CoreError::Mls`] if the group already exists or openmls fails.
|
||||
pub fn create_group(&mut self, group_id: &[u8]) -> Result<(), CoreError> {
|
||||
let credential_with_key = self.make_credential_with_key()?;
|
||||
let credential_with_key = self.make_credential_with_key();
|
||||
let mls_id = GroupId::from_slice(group_id);
|
||||
|
||||
let group = MlsGroup::new_with_group_id(
|
||||
&self.backend,
|
||||
self.identity.as_ref(),
|
||||
&self.config,
|
||||
&self.create_config,
|
||||
mls_id,
|
||||
credential_with_key,
|
||||
)
|
||||
@@ -303,7 +312,7 @@ impl GroupMember {
|
||||
|
||||
let leaf_index = group
|
||||
.members()
|
||||
.find(|m| m.credential.identity() == member_identity)
|
||||
.find(|m| m.credential.serialized_content() == member_identity)
|
||||
.map(|m| m.index)
|
||||
.ok_or_else(|| CoreError::Mls("member not found in group".into()))?;
|
||||
|
||||
@@ -384,7 +393,11 @@ impl GroupMember {
|
||||
.ok_or_else(|| CoreError::Mls("no active group".into()))?;
|
||||
|
||||
let (proposal_out, _ref) = group
|
||||
.propose_self_update(&self.backend, self.identity.as_ref(), None)
|
||||
.propose_self_update(
|
||||
&self.backend,
|
||||
self.identity.as_ref(),
|
||||
LeafNodeParameters::default(),
|
||||
)
|
||||
.map_err(|e| CoreError::Mls(format!("propose_self_update: {e:?}")))?;
|
||||
|
||||
proposal_out
|
||||
@@ -396,7 +409,7 @@ impl GroupMember {
|
||||
pub fn has_pending_proposals(&self) -> bool {
|
||||
self.group
|
||||
.as_ref()
|
||||
.map(|g| g.pending_proposals().next().is_some())
|
||||
.map(|g| g.has_pending_proposals())
|
||||
.unwrap_or(false)
|
||||
}
|
||||
|
||||
@@ -417,16 +430,22 @@ impl GroupMember {
|
||||
let msg_in = openmls::prelude::MlsMessageIn::tls_deserialize(&mut welcome_bytes)
|
||||
.map_err(|e| CoreError::Mls(format!("Welcome deserialise: {e:?}")))?;
|
||||
|
||||
// into_welcome() is feature-gated in openmls 0.5; extract() is public.
|
||||
let welcome = match msg_in.extract() {
|
||||
MlsMessageInBody::Welcome(w) => w,
|
||||
MlsMessageBodyIn::Welcome(w) => w,
|
||||
_ => return Err(CoreError::Mls("expected a Welcome message".into())),
|
||||
};
|
||||
|
||||
// ratchet_tree = None because use_ratchet_tree_extension = true embeds
|
||||
// the tree inside the Welcome's GroupInfo extension.
|
||||
let group = MlsGroup::new_from_welcome(&self.backend, &self.config, welcome, None)
|
||||
.map_err(|e| CoreError::Mls(format!("new_from_welcome: {e:?}")))?;
|
||||
let staged = StagedWelcome::new_from_welcome(
|
||||
&self.backend,
|
||||
&self.join_config,
|
||||
welcome,
|
||||
None, // ratchet tree extracted from the Welcome's GroupInfo extension
|
||||
)
|
||||
.map_err(|e| CoreError::Mls(format!("new_from_welcome: {e:?}")))?;
|
||||
|
||||
let group = staged
|
||||
.into_group(&self.backend)
|
||||
.map_err(|e| CoreError::Mls(format!("into_group: {e:?}")))?;
|
||||
|
||||
self.group = Some(group);
|
||||
Ok(())
|
||||
@@ -508,10 +527,9 @@ impl GroupMember {
|
||||
let msg_in = openmls::prelude::MlsMessageIn::tls_deserialize(&mut bytes)
|
||||
.map_err(|e| CoreError::Mls(format!("message deserialise: {e:?}")))?;
|
||||
|
||||
// into_protocol_message() is feature-gated; extract() + manual construction is not.
|
||||
let protocol_message = match msg_in.extract() {
|
||||
MlsMessageInBody::PrivateMessage(m) => ProtocolMessage::PrivateMessage(m),
|
||||
MlsMessageInBody::PublicMessage(m) => ProtocolMessage::PublicMessage(m),
|
||||
let protocol_message: ProtocolMessage = match msg_in.extract() {
|
||||
MlsMessageBodyIn::PrivateMessage(m) => m.into(),
|
||||
MlsMessageBodyIn::PublicMessage(m) => m.into(),
|
||||
_ => return Err(CoreError::Mls("not a protocol message".into())),
|
||||
};
|
||||
|
||||
@@ -519,7 +537,7 @@ impl GroupMember {
|
||||
.process_message(&self.backend, protocol_message)
|
||||
.map_err(|e| CoreError::Mls(format!("process_message: {e:?}")))?;
|
||||
|
||||
let sender_identity = processed.credential().identity().to_vec();
|
||||
let sender_identity = processed.credential().serialized_content().to_vec();
|
||||
|
||||
match processed.into_content() {
|
||||
ProcessedMessageContent::ApplicationMessage(app) => {
|
||||
@@ -545,11 +563,15 @@ impl GroupMember {
|
||||
}
|
||||
// Proposals are stored for a later Commit; nothing to return yet.
|
||||
ProcessedMessageContent::ProposalMessage(proposal) => {
|
||||
group.store_pending_proposal(*proposal);
|
||||
group
|
||||
.store_pending_proposal(self.backend.storage(), *proposal)
|
||||
.map_err(|e| CoreError::Mls(format!("store_pending_proposal: {e:?}")))?;
|
||||
Ok((sender_identity, ReceivedMessage::StateChanged))
|
||||
}
|
||||
ProcessedMessageContent::ExternalJoinProposalMessage(proposal) => {
|
||||
group.store_pending_proposal(*proposal);
|
||||
group
|
||||
.store_pending_proposal(self.backend.storage(), *proposal)
|
||||
.map_err(|e| CoreError::Mls(format!("store_pending_proposal: {e:?}")))?;
|
||||
Ok((sender_identity, ReceivedMessage::StateChanged))
|
||||
}
|
||||
}
|
||||
@@ -597,6 +619,69 @@ impl GroupMember {
|
||||
self.group.as_ref()
|
||||
}
|
||||
|
||||
/// Serialize the MLS group state (via the backing `StorageProvider`).
|
||||
///
|
||||
/// In openmls 0.8 the `MlsGroup` is no longer `Serialize`; its state is
|
||||
/// held inside the `StorageProvider`. This method serializes the full
|
||||
/// provider storage to bytes, which can later be restored with
|
||||
/// [`new_from_storage_bytes`].
|
||||
///
|
||||
/// Returns `None` if no active group exists.
|
||||
///
|
||||
/// [`new_from_storage_bytes`]: Self::new_from_storage_bytes
|
||||
pub fn serialize_mls_state(&self) -> Result<Option<Vec<u8>>, CoreError> {
|
||||
if self.group.is_none() {
|
||||
return Ok(None);
|
||||
}
|
||||
let bytes = self
|
||||
.backend
|
||||
.storage()
|
||||
.to_bytes()
|
||||
.map_err(|e| CoreError::Mls(format!("serialize storage: {e}")))?;
|
||||
Ok(Some(bytes))
|
||||
}
|
||||
|
||||
/// Create a `GroupMember` from previously serialized storage bytes.
|
||||
///
|
||||
/// Reconstructs the `DiskKeyStore` from the blob, then loads the
|
||||
/// `MlsGroup` from the storage provider using the given `group_id`.
|
||||
pub fn new_from_storage_bytes(
|
||||
identity: Arc<IdentityKeypair>,
|
||||
storage_bytes: &[u8],
|
||||
group_id: &[u8],
|
||||
hybrid: bool,
|
||||
) -> Result<Self, CoreError> {
|
||||
let key_store = DiskKeyStore::from_bytes(storage_bytes)
|
||||
.map_err(|e| CoreError::Mls(format!("deserialize storage: {e}")))?;
|
||||
|
||||
let create_config = MlsGroupCreateConfig::builder()
|
||||
.use_ratchet_tree_extension(true)
|
||||
.build();
|
||||
|
||||
let join_config = MlsGroupJoinConfig::builder()
|
||||
.use_ratchet_tree_extension(true)
|
||||
.build();
|
||||
|
||||
let backend = if hybrid {
|
||||
HybridCryptoProvider::new_hybrid(key_store)
|
||||
} else {
|
||||
HybridCryptoProvider::new_classical(key_store)
|
||||
};
|
||||
|
||||
let mls_group_id = GroupId::from_slice(group_id);
|
||||
let group = MlsGroup::load(backend.storage(), &mls_group_id)
|
||||
.map_err(|e| CoreError::Mls(format!("load group from storage: {e}")))?;
|
||||
|
||||
Ok(Self {
|
||||
backend,
|
||||
identity,
|
||||
group,
|
||||
create_config,
|
||||
join_config,
|
||||
hybrid,
|
||||
})
|
||||
}
|
||||
|
||||
/// Return the identity (credential) bytes of all current group members.
|
||||
///
|
||||
/// Each entry is the raw credential payload (Ed25519 public key bytes)
|
||||
@@ -608,23 +693,20 @@ impl GroupMember {
|
||||
};
|
||||
group
|
||||
.members()
|
||||
.map(|m| m.credential.identity().to_vec())
|
||||
.map(|m| m.credential.serialized_content().to_vec())
|
||||
.collect()
|
||||
}
|
||||
|
||||
// ── Private helpers ───────────────────────────────────────────────────────
|
||||
|
||||
fn make_credential_with_key(&self) -> Result<CredentialWithKey, CoreError> {
|
||||
let credential = Credential::new(
|
||||
self.identity.public_key_bytes().to_vec(),
|
||||
CredentialType::Basic,
|
||||
)
|
||||
.map_err(|e| CoreError::Mls(format!("{e:?}")))?;
|
||||
fn make_credential_with_key(&self) -> CredentialWithKey {
|
||||
let credential: Credential =
|
||||
BasicCredential::new(self.identity.public_key_bytes().to_vec()).into();
|
||||
|
||||
Ok(CredentialWithKey {
|
||||
CredentialWithKey {
|
||||
credential,
|
||||
signature_key: self.identity.public_key_bytes().to_vec().into(),
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -758,11 +840,6 @@ mod tests {
|
||||
let (_commit_a, welcome_a) = creator.add_member(&a_kp).expect("add A");
|
||||
a.join_group(&welcome_a).expect("A join");
|
||||
|
||||
// A must process the commit that added them (it's a StateChanged for A since
|
||||
// the commit itself is what brought them in — but actually A joined via Welcome,
|
||||
// so A doesn't process the add-commit). The creator already merged the pending
|
||||
// commit in add_member, so creator is at epoch 2.
|
||||
|
||||
// Add B — at this point creator is at epoch 2 (after adding A).
|
||||
let (commit_b, welcome_b) = creator.add_member(&b_kp).expect("add B");
|
||||
b.join_group(&welcome_b).expect("B join");
|
||||
@@ -958,7 +1035,7 @@ mod tests {
|
||||
);
|
||||
}
|
||||
|
||||
/// 10 messages alternating Alice→Bob and Bob→Alice all decrypt successfully.
|
||||
/// 10 messages alternating Alice->Bob and Bob->Alice all decrypt successfully.
|
||||
/// Verifies that epoch state stays in sync across multiple application messages.
|
||||
#[test]
|
||||
fn multi_message_roundtrip_epoch_stays_in_sync() {
|
||||
@@ -1002,4 +1079,96 @@ mod tests {
|
||||
"send_message before join must return an error"
|
||||
);
|
||||
}
|
||||
|
||||
/// Measure actual MLS artifact sizes for mesh planning.
|
||||
/// These numbers inform the MLS-Lite design and constrained link feasibility.
|
||||
#[test]
|
||||
fn measure_mls_wire_sizes() {
|
||||
let creator_id = Arc::new(IdentityKeypair::generate());
|
||||
let joiner_id = Arc::new(IdentityKeypair::generate());
|
||||
|
||||
let mut creator = GroupMember::new(Arc::clone(&creator_id));
|
||||
let mut joiner = GroupMember::new(Arc::clone(&joiner_id));
|
||||
|
||||
// 1. KeyPackage size
|
||||
let kp_bytes = joiner.generate_key_package().expect("generate KP");
|
||||
println!("=== MLS Wire Format Sizes ===");
|
||||
println!("KeyPackage: {} bytes", kp_bytes.len());
|
||||
|
||||
// 2. Create group (no wire message, just local state)
|
||||
creator.create_group(b"size-test").expect("create group");
|
||||
|
||||
// 3. Add member -> Commit + Welcome
|
||||
let (commit_bytes, welcome_bytes) = creator.add_member(&kp_bytes).expect("add member");
|
||||
println!("Commit (add): {} bytes", commit_bytes.len());
|
||||
println!("Welcome: {} bytes", welcome_bytes.len());
|
||||
|
||||
// Join the group
|
||||
joiner.join_group(&welcome_bytes).expect("join");
|
||||
|
||||
// 4. Application message (short payload)
|
||||
let short_msg = creator.send_message(b"hello").expect("short msg");
|
||||
println!("AppMessage (5B): {} bytes", short_msg.len());
|
||||
|
||||
// 5. Application message (medium payload ~100 bytes)
|
||||
let medium_payload = vec![0x42u8; 100];
|
||||
let medium_msg = creator.send_message(&medium_payload).expect("medium msg");
|
||||
println!("AppMessage (100B): {} bytes", medium_msg.len());
|
||||
|
||||
// 6. Self-update proposal
|
||||
let update_proposal = creator.propose_self_update().expect("update proposal");
|
||||
println!("UpdateProposal: {} bytes", update_proposal.len());
|
||||
|
||||
// Joiner processes the proposal
|
||||
joiner.receive_message(&update_proposal).expect("recv proposal");
|
||||
|
||||
// 7. Commit (update only, no welcome)
|
||||
let (update_commit, _) = joiner.commit_pending_proposals().expect("commit update");
|
||||
println!("Commit (update): {} bytes", update_commit.len());
|
||||
|
||||
// Summary for LoRa feasibility
|
||||
println!("\n=== LoRa Feasibility (SF12/BW125, MTU=51 bytes) ===");
|
||||
println!("KeyPackage: {} fragments ({:.0}s at 1% duty)",
|
||||
(kp_bytes.len() + 50) / 51,
|
||||
(kp_bytes.len() as f64 / 51.0).ceil() * 36.0 / 60.0);
|
||||
println!("Welcome: {} fragments ({:.0}s at 1% duty)",
|
||||
(welcome_bytes.len() + 50) / 51,
|
||||
(welcome_bytes.len() as f64 / 51.0).ceil() * 36.0 / 60.0);
|
||||
println!("AppMessage (5B): {} fragments",
|
||||
(short_msg.len() + 50) / 51);
|
||||
|
||||
// Assertions to catch regressions / validate estimates
|
||||
assert!(kp_bytes.len() < 1000, "KeyPackage should be under 1KB");
|
||||
assert!(welcome_bytes.len() < 3000, "Welcome should be under 3KB");
|
||||
assert!(short_msg.len() < 300, "Short AppMessage should be under 300B");
|
||||
}
|
||||
|
||||
/// Measure MLS sizes with hybrid (post-quantum) mode enabled.
|
||||
#[test]
|
||||
fn measure_mls_wire_sizes_hybrid() {
|
||||
let creator_id = Arc::new(IdentityKeypair::generate());
|
||||
let joiner_id = Arc::new(IdentityKeypair::generate());
|
||||
|
||||
let mut creator = GroupMember::new_hybrid(Arc::clone(&creator_id));
|
||||
let mut joiner = GroupMember::new_hybrid(Arc::clone(&joiner_id));
|
||||
|
||||
// KeyPackage with hybrid (X25519 + ML-KEM-768) init key
|
||||
let kp_bytes = joiner.generate_key_package().expect("generate hybrid KP");
|
||||
println!("=== MLS Wire Format Sizes (Hybrid PQ Mode) ===");
|
||||
println!("KeyPackage (PQ): {} bytes", kp_bytes.len());
|
||||
|
||||
creator.create_group(b"hybrid-size-test").expect("create group");
|
||||
let (commit_bytes, welcome_bytes) = creator.add_member(&kp_bytes).expect("add member");
|
||||
println!("Commit (add, PQ): {} bytes", commit_bytes.len());
|
||||
println!("Welcome (PQ): {} bytes", welcome_bytes.len());
|
||||
|
||||
joiner.join_group(&welcome_bytes).expect("join");
|
||||
|
||||
let short_msg = creator.send_message(b"hello").expect("short msg");
|
||||
println!("AppMessage (PQ): {} bytes", short_msg.len());
|
||||
|
||||
// PQ KeyPackages are larger due to ML-KEM-768 public key (1184 bytes)
|
||||
assert!(kp_bytes.len() > 1000, "Hybrid KeyPackage should be >1KB due to ML-KEM");
|
||||
assert!(kp_bytes.len() < 3000, "Hybrid KeyPackage should be <3KB");
|
||||
}
|
||||
}
|
||||
@@ -27,8 +27,9 @@ use openmls_traits::{
|
||||
crypto::OpenMlsCrypto,
|
||||
types::{
|
||||
CryptoError, ExporterSecret, HpkeCiphertext, HpkeConfig, HpkeKeyPair, HpkeKemType,
|
||||
KemOutput,
|
||||
},
|
||||
OpenMlsCryptoProvider,
|
||||
OpenMlsProvider,
|
||||
};
|
||||
use tls_codec::SecretVLBytes;
|
||||
|
||||
@@ -128,6 +129,15 @@ impl OpenMlsCrypto for HybridCrypto {
|
||||
self.rust_crypto.hkdf_extract(hash_type, salt, ikm)
|
||||
}
|
||||
|
||||
fn hmac(
|
||||
&self,
|
||||
hash_type: HashType,
|
||||
key: &[u8],
|
||||
message: &[u8],
|
||||
) -> Result<SecretVLBytes, CryptoError> {
|
||||
self.rust_crypto.hmac(hash_type, key, message)
|
||||
}
|
||||
|
||||
fn hkdf_expand(
|
||||
&self,
|
||||
hash_type: HashType,
|
||||
@@ -189,25 +199,18 @@ impl OpenMlsCrypto for HybridCrypto {
|
||||
info: &[u8],
|
||||
aad: &[u8],
|
||||
ptxt: &[u8],
|
||||
) -> HpkeCiphertext {
|
||||
) -> Result<HpkeCiphertext, CryptoError> {
|
||||
if Self::is_hybrid_public_key(pk_r) {
|
||||
// The trait `OpenMlsCrypto::hpke_seal` returns `HpkeCiphertext` (not
|
||||
// `Result`), so we cannot propagate errors through the return type.
|
||||
// Returning an empty ciphertext would silently cause data loss.
|
||||
// Instead, panic on failure — a hybrid key that passes the length
|
||||
// check but fails deserialization or encryption indicates a critical
|
||||
// bug (corrupted key material), not a recoverable condition.
|
||||
let recipient_pk = HybridPublicKey::from_bytes(pk_r)
|
||||
.expect("hybrid public key deserialization failed — key material is corrupted");
|
||||
// Pass HPKE info and aad through for proper context binding (RFC 9180).
|
||||
.map_err(|_| CryptoError::CryptoLibraryError)?;
|
||||
let envelope = hybrid_encrypt(&recipient_pk, ptxt, info, aad)
|
||||
.expect("hybrid HPKE encryption failed — critical crypto error");
|
||||
.map_err(|_| CryptoError::CryptoLibraryError)?;
|
||||
let kem_output = envelope[..HYBRID_KEM_OUTPUT_LEN].to_vec();
|
||||
let ciphertext = envelope[HYBRID_KEM_OUTPUT_LEN..].to_vec();
|
||||
HpkeCiphertext {
|
||||
Ok(HpkeCiphertext {
|
||||
kem_output: kem_output.into(),
|
||||
ciphertext: ciphertext.into(),
|
||||
}
|
||||
})
|
||||
} else {
|
||||
self.rust_crypto.hpke_seal(config, pk_r, info, aad, ptxt)
|
||||
}
|
||||
@@ -245,7 +248,7 @@ impl OpenMlsCrypto for HybridCrypto {
|
||||
info: &[u8],
|
||||
exporter_context: &[u8],
|
||||
exporter_length: usize,
|
||||
) -> Result<(Vec<u8>, ExporterSecret), CryptoError> {
|
||||
) -> Result<(KemOutput, ExporterSecret), CryptoError> {
|
||||
if Self::is_hybrid_public_key(pk_r) {
|
||||
// A key that passes the hybrid length check but fails deserialization
|
||||
// is corrupted — return an error instead of silently downgrading to
|
||||
@@ -286,14 +289,14 @@ impl OpenMlsCrypto for HybridCrypto {
|
||||
}
|
||||
}
|
||||
|
||||
fn derive_hpke_keypair(&self, config: HpkeConfig, ikm: &[u8]) -> HpkeKeyPair {
|
||||
fn derive_hpke_keypair(&self, config: HpkeConfig, ikm: &[u8]) -> Result<HpkeKeyPair, CryptoError> {
|
||||
if self.hybrid_enabled && config.0 == HpkeKemType::DhKem25519 {
|
||||
let kp = HybridKeypair::derive_from_ikm(ikm);
|
||||
let private_bytes = kp.private_to_bytes();
|
||||
HpkeKeyPair {
|
||||
Ok(HpkeKeyPair {
|
||||
private: private_bytes.as_slice().into(),
|
||||
public: kp.public_key().to_bytes(),
|
||||
}
|
||||
})
|
||||
} else {
|
||||
self.rust_crypto.derive_hpke_keypair(config, ikm)
|
||||
}
|
||||
@@ -343,10 +346,10 @@ impl Default for HybridCryptoProvider {
|
||||
}
|
||||
}
|
||||
|
||||
impl OpenMlsCryptoProvider for HybridCryptoProvider {
|
||||
impl OpenMlsProvider for HybridCryptoProvider {
|
||||
type CryptoProvider = HybridCrypto;
|
||||
type RandProvider = RustCrypto;
|
||||
type KeyStoreProvider = DiskKeyStore;
|
||||
type StorageProvider = DiskKeyStore;
|
||||
|
||||
fn crypto(&self) -> &Self::CryptoProvider {
|
||||
&self.crypto
|
||||
@@ -356,7 +359,7 @@ impl OpenMlsCryptoProvider for HybridCryptoProvider {
|
||||
self.crypto.rust_crypto()
|
||||
}
|
||||
|
||||
fn key_store(&self) -> &Self::KeyStoreProvider {
|
||||
fn storage(&self) -> &Self::StorageProvider {
|
||||
&self.key_store
|
||||
}
|
||||
}
|
||||
@@ -383,7 +386,7 @@ mod tests {
|
||||
let crypto = HybridCrypto::new();
|
||||
let ikm = b"test-ikm-for-hybrid-hpke-keypair";
|
||||
|
||||
let keypair = crypto.derive_hpke_keypair(hpke_config_dhkem_x25519(), ikm);
|
||||
let keypair = crypto.derive_hpke_keypair(hpke_config_dhkem_x25519(), ikm).unwrap();
|
||||
assert_eq!(keypair.public.len(), HYBRID_PUBLIC_KEY_LEN);
|
||||
assert_eq!(keypair.private.as_ref().len(), HYBRID_PRIVATE_KEY_LEN);
|
||||
|
||||
@@ -397,7 +400,7 @@ mod tests {
|
||||
info,
|
||||
aad,
|
||||
plaintext,
|
||||
);
|
||||
).unwrap();
|
||||
assert!(!ct.kem_output.as_slice().is_empty());
|
||||
assert!(!ct.ciphertext.as_slice().is_empty());
|
||||
|
||||
@@ -419,7 +422,7 @@ mod tests {
|
||||
let crypto = HybridCrypto::new();
|
||||
let ikm = b"exporter-ikm";
|
||||
|
||||
let keypair = crypto.derive_hpke_keypair(hpke_config_dhkem_x25519(), ikm);
|
||||
let keypair = crypto.derive_hpke_keypair(hpke_config_dhkem_x25519(), ikm).unwrap();
|
||||
let info = b"";
|
||||
let exporter_context = b"MLS 1.0 external init";
|
||||
let exporter_length = 32;
|
||||
@@ -457,7 +460,7 @@ mod tests {
|
||||
let crypto = HybridCrypto::new_classical();
|
||||
let ikm = b"test-ikm-for-classical-hpke";
|
||||
|
||||
let keypair = crypto.derive_hpke_keypair(hpke_config_dhkem_x25519(), ikm);
|
||||
let keypair = crypto.derive_hpke_keypair(hpke_config_dhkem_x25519(), ikm).unwrap();
|
||||
// Classical X25519 keys are 32 bytes
|
||||
assert_eq!(keypair.public.len(), 32);
|
||||
assert_eq!(keypair.private.as_ref().len(), 32);
|
||||
@@ -469,7 +472,7 @@ mod tests {
|
||||
let crypto = HybridCrypto::new_classical();
|
||||
let ikm = b"test-ikm-for-classical-round-trip";
|
||||
|
||||
let keypair = crypto.derive_hpke_keypair(hpke_config_dhkem_x25519(), ikm);
|
||||
let keypair = crypto.derive_hpke_keypair(hpke_config_dhkem_x25519(), ikm).unwrap();
|
||||
assert_eq!(keypair.public.len(), 32); // classical key
|
||||
|
||||
let plaintext = b"hello classical MLS";
|
||||
@@ -482,7 +485,7 @@ mod tests {
|
||||
info,
|
||||
aad,
|
||||
plaintext,
|
||||
);
|
||||
).unwrap();
|
||||
assert!(!ct.kem_output.as_slice().is_empty());
|
||||
|
||||
let decrypted = crypto
|
||||
@@ -501,7 +504,7 @@ mod tests {
|
||||
#[test]
|
||||
fn key_package_generation_with_hybrid_provider() {
|
||||
use openmls::prelude::{
|
||||
Credential, CredentialType, CredentialWithKey, CryptoConfig, KeyPackage,
|
||||
BasicCredential, CredentialWithKey, KeyPackage,
|
||||
};
|
||||
use std::sync::Arc;
|
||||
use tls_codec::Serialize;
|
||||
@@ -514,26 +517,24 @@ mod tests {
|
||||
let provider = HybridCryptoProvider::default();
|
||||
let identity = Arc::new(IdentityKeypair::generate());
|
||||
|
||||
let credential = Credential::new(
|
||||
identity.public_key_bytes().to_vec(),
|
||||
CredentialType::Basic,
|
||||
)
|
||||
.unwrap();
|
||||
let credential: openmls::prelude::Credential =
|
||||
BasicCredential::new(identity.public_key_bytes().to_vec()).into();
|
||||
let credential_with_key = CredentialWithKey {
|
||||
credential,
|
||||
signature_key: identity.public_key_bytes().to_vec().into(),
|
||||
};
|
||||
|
||||
let key_package = KeyPackage::builder()
|
||||
let key_package_bundle = KeyPackage::builder()
|
||||
.build(
|
||||
CryptoConfig::with_default_version(CIPHERSUITE),
|
||||
CIPHERSUITE,
|
||||
&provider,
|
||||
identity.as_ref(),
|
||||
credential_with_key,
|
||||
)
|
||||
.expect("KeyPackage with hybrid HPKE");
|
||||
|
||||
let bytes = key_package
|
||||
let bytes = key_package_bundle
|
||||
.key_package()
|
||||
.tls_serialize_detached()
|
||||
.expect("serialize KeyPackage");
|
||||
assert!(!bytes.is_empty());
|
||||
@@ -90,7 +90,7 @@ impl IdentityKeypair {
|
||||
/// `openmls_basic_credential` crate.
|
||||
#[cfg(feature = "native")]
|
||||
impl openmls_traits::signatures::Signer for IdentityKeypair {
|
||||
fn sign(&self, payload: &[u8]) -> Result<Vec<u8>, openmls_traits::types::Error> {
|
||||
fn sign(&self, payload: &[u8]) -> Result<Vec<u8>, openmls_traits::signatures::SignerError> {
|
||||
let sk = self.signing_key();
|
||||
let sig: ed25519_dalek::Signature = sk.sign(payload);
|
||||
Ok(sig.to_bytes().to_vec())
|
||||
@@ -14,18 +14,18 @@
|
||||
//! # Wire format
|
||||
//!
|
||||
//! KeyPackages are TLS-encoded using `tls_codec` (same version as openmls).
|
||||
//! The resulting bytes are opaque to the quicproquo transport layer.
|
||||
//! The resulting bytes are opaque to the quicprochat transport layer.
|
||||
|
||||
use openmls::prelude::{
|
||||
Ciphersuite, Credential, CredentialType, CredentialWithKey, CryptoConfig, KeyPackage,
|
||||
KeyPackageIn, TlsDeserializeTrait, TlsSerializeTrait,
|
||||
BasicCredential, Ciphersuite, CredentialWithKey, KeyPackage, KeyPackageIn,
|
||||
};
|
||||
use openmls_rust_crypto::OpenMlsRustCrypto;
|
||||
use tls_codec::{Deserialize as TlsDeserializeTrait, Serialize as TlsSerializeTrait};
|
||||
use sha2::{Digest, Sha256};
|
||||
|
||||
use crate::{error::CoreError, identity::IdentityKeypair};
|
||||
|
||||
/// The MLS ciphersuite used throughout quicproquo (RFC 9420 §17.1).
|
||||
/// The MLS ciphersuite used throughout quicprochat (RFC 9420 §17.1).
|
||||
pub const ALLOWED_CIPHERSUITE: Ciphersuite =
|
||||
Ciphersuite::MLS_128_DHKEMX25519_AES128GCM_SHA256_Ed25519;
|
||||
|
||||
@@ -74,8 +74,8 @@ pub fn generate_key_package(identity: &IdentityKeypair) -> Result<(Vec<u8>, Vec<
|
||||
|
||||
// Build a BasicCredential using the raw Ed25519 public key bytes as the
|
||||
// MLS identity. Per RFC 9420, any byte string may serve as the identity.
|
||||
let credential = Credential::new(identity.public_key_bytes().to_vec(), CredentialType::Basic)
|
||||
.map_err(|e| CoreError::Mls(format!("{e:?}")))?;
|
||||
let credential: openmls::prelude::Credential =
|
||||
BasicCredential::new(identity.public_key_bytes().to_vec()).into();
|
||||
|
||||
// The `signature_key` in CredentialWithKey is the Ed25519 public key that
|
||||
// will be used to verify the KeyPackage's leaf node signature.
|
||||
@@ -87,19 +87,13 @@ pub fn generate_key_package(identity: &IdentityKeypair) -> Result<(Vec<u8>, Vec<
|
||||
|
||||
// `IdentityKeypair` implements `openmls_traits::signatures::Signer`
|
||||
// so it can be passed directly to the builder.
|
||||
let key_package = KeyPackage::builder()
|
||||
.build(
|
||||
CryptoConfig::with_default_version(CIPHERSUITE),
|
||||
&backend,
|
||||
identity,
|
||||
credential_with_key,
|
||||
)
|
||||
let key_package_bundle = KeyPackage::builder()
|
||||
.build(CIPHERSUITE, &backend, identity, credential_with_key)
|
||||
.map_err(|e| CoreError::Mls(format!("{e:?}")))?;
|
||||
|
||||
// TLS-encode the KeyPackage using the trait from the openmls prelude.
|
||||
// This uses tls_codec 0.3 (the same version openmls uses internally),
|
||||
// avoiding a duplicate-trait conflict with tls_codec 0.4.
|
||||
let tls_bytes = key_package
|
||||
// TLS-encode the KeyPackage.
|
||||
let tls_bytes = key_package_bundle
|
||||
.key_package()
|
||||
.tls_serialize_detached()
|
||||
.map_err(|e| CoreError::Mls(format!("{e:?}")))?;
|
||||
|
||||
713
crates/quicprochat-core/src/keystore.rs
Normal file
713
crates/quicprochat-core/src/keystore.rs
Normal file
@@ -0,0 +1,713 @@
|
||||
use std::{
|
||||
fs,
|
||||
path::{Path, PathBuf},
|
||||
};
|
||||
|
||||
use openmls_memory_storage::MemoryStorage;
|
||||
use openmls_traits::storage::{traits, StorageProvider, CURRENT_VERSION};
|
||||
|
||||
/// A disk-backed storage provider implementing `StorageProvider`.
|
||||
///
|
||||
/// Wraps `openmls_memory_storage::MemoryStorage` and flushes to disk on every
|
||||
/// write so that HPKE init keys and group state survive process restarts.
|
||||
///
|
||||
/// # Serialization
|
||||
///
|
||||
/// Uses bincode for the outer `HashMap<Vec<u8>, Vec<u8>>` container when
|
||||
/// persisting to disk. The inner values use serde_json (matching
|
||||
/// `MemoryStorage`'s serialization format).
|
||||
///
|
||||
/// # Persistence security
|
||||
///
|
||||
/// When `path` is set, file permissions are restricted to owner-only (0o600)
|
||||
/// on Unix platforms, since the store may contain HPKE private keys.
|
||||
#[derive(Debug)]
|
||||
pub struct DiskKeyStore {
|
||||
path: Option<PathBuf>,
|
||||
storage: MemoryStorage,
|
||||
}
|
||||
|
||||
#[derive(thiserror::Error, Debug)]
|
||||
pub enum DiskKeyStoreError {
|
||||
#[error("serialization error")]
|
||||
Serialization,
|
||||
#[error("io error: {0}")]
|
||||
Io(String),
|
||||
#[error("memory storage error: {0}")]
|
||||
MemoryStorage(#[from] openmls_memory_storage::MemoryStorageError),
|
||||
}
|
||||
|
||||
impl DiskKeyStore {
|
||||
/// In-memory keystore (no persistence).
|
||||
pub fn ephemeral() -> Self {
|
||||
Self {
|
||||
path: None,
|
||||
storage: MemoryStorage::default(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Persistent keystore backed by `path`. Creates an empty store if missing.
|
||||
pub fn persistent(path: impl AsRef<Path>) -> Result<Self, DiskKeyStoreError> {
|
||||
let path = path.as_ref().to_path_buf();
|
||||
let storage = if path.exists() {
|
||||
let bytes = fs::read(&path).map_err(|e| DiskKeyStoreError::Io(e.to_string()))?;
|
||||
if bytes.is_empty() {
|
||||
MemoryStorage::default()
|
||||
} else {
|
||||
let map: std::collections::HashMap<Vec<u8>, Vec<u8>> =
|
||||
bincode::deserialize(&bytes)
|
||||
.map_err(|_| DiskKeyStoreError::Serialization)?;
|
||||
let storage = MemoryStorage::default();
|
||||
let mut values = storage.values.write()
|
||||
.map_err(|_| DiskKeyStoreError::Io("lock poisoned".into()))?;
|
||||
*values = map;
|
||||
drop(values);
|
||||
storage
|
||||
}
|
||||
} else {
|
||||
MemoryStorage::default()
|
||||
};
|
||||
|
||||
let store = Self {
|
||||
path: Some(path),
|
||||
storage,
|
||||
};
|
||||
|
||||
// Set restrictive file permissions on the keystore file.
|
||||
store.set_file_permissions()?;
|
||||
|
||||
Ok(store)
|
||||
}
|
||||
|
||||
fn flush(&self) -> Result<(), DiskKeyStoreError> {
|
||||
let Some(path) = &self.path else {
|
||||
return Ok(());
|
||||
};
|
||||
let values = self.storage.values.read()
|
||||
.map_err(|_| DiskKeyStoreError::Io("lock poisoned".into()))?;
|
||||
let bytes = bincode::serialize(&*values)
|
||||
.map_err(|_| DiskKeyStoreError::Serialization)?;
|
||||
if let Some(parent) = path.parent() {
|
||||
fs::create_dir_all(parent).map_err(|e| DiskKeyStoreError::Io(e.to_string()))?;
|
||||
}
|
||||
fs::write(path, &bytes).map_err(|e| DiskKeyStoreError::Io(e.to_string()))?;
|
||||
self.set_file_permissions()?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Serialize the backing storage to bytes (bincode).
|
||||
///
|
||||
/// This captures all key material *and* MLS group state held by the
|
||||
/// `StorageProvider`, allowing the caller to persist it in a database
|
||||
/// column instead of (or in addition to) on-disk files.
|
||||
pub fn to_bytes(&self) -> Result<Vec<u8>, DiskKeyStoreError> {
|
||||
let values = self.storage.values.read()
|
||||
.map_err(|_| DiskKeyStoreError::Io("lock poisoned".into()))?;
|
||||
bincode::serialize(&*values).map_err(|_| DiskKeyStoreError::Serialization)
|
||||
}
|
||||
|
||||
/// Restore a `DiskKeyStore` from bytes previously produced by [`to_bytes`].
|
||||
pub fn from_bytes(bytes: &[u8]) -> Result<Self, DiskKeyStoreError> {
|
||||
let map: std::collections::HashMap<Vec<u8>, Vec<u8>> =
|
||||
bincode::deserialize(bytes).map_err(|_| DiskKeyStoreError::Serialization)?;
|
||||
let storage = MemoryStorage::default();
|
||||
let mut values = storage.values.write()
|
||||
.map_err(|_| DiskKeyStoreError::Io("lock poisoned".into()))?;
|
||||
*values = map;
|
||||
drop(values);
|
||||
Ok(Self {
|
||||
path: None,
|
||||
storage,
|
||||
})
|
||||
}
|
||||
|
||||
/// Restrict file permissions to owner-only (0o600) on Unix.
|
||||
#[cfg(unix)]
|
||||
fn set_file_permissions(&self) -> Result<(), DiskKeyStoreError> {
|
||||
use std::os::unix::fs::PermissionsExt;
|
||||
if let Some(path) = &self.path {
|
||||
if path.exists() {
|
||||
let perms = std::fs::Permissions::from_mode(0o600);
|
||||
fs::set_permissions(path, perms)
|
||||
.map_err(|e| DiskKeyStoreError::Io(format!("set permissions: {e}")))?;
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[cfg(not(unix))]
|
||||
fn set_file_permissions(&self) -> Result<(), DiskKeyStoreError> {
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for DiskKeyStore {
|
||||
fn default() -> Self {
|
||||
Self::ephemeral()
|
||||
}
|
||||
}
|
||||
|
||||
/// Delegate all `StorageProvider` methods to the inner `MemoryStorage`,
|
||||
/// flushing to disk after every write/delete operation.
|
||||
///
|
||||
/// The flush errors are mapped to `DiskKeyStoreError` via the
|
||||
/// `MemoryStorageError` conversion. If a flush fails, the in-memory state
|
||||
/// is still updated (matching the old DiskKeyStore behavior).
|
||||
impl StorageProvider<CURRENT_VERSION> for DiskKeyStore {
|
||||
type Error = DiskKeyStoreError;
|
||||
|
||||
fn write_mls_join_config<
|
||||
GroupId: traits::GroupId<CURRENT_VERSION>,
|
||||
MlsGroupJoinConfig: traits::MlsGroupJoinConfig<CURRENT_VERSION>,
|
||||
>(
|
||||
&self,
|
||||
group_id: &GroupId,
|
||||
config: &MlsGroupJoinConfig,
|
||||
) -> Result<(), Self::Error> {
|
||||
self.storage.write_mls_join_config(group_id, config)?;
|
||||
self.flush()
|
||||
}
|
||||
|
||||
fn append_own_leaf_node<
|
||||
GroupId: traits::GroupId<CURRENT_VERSION>,
|
||||
LeafNode: traits::LeafNode<CURRENT_VERSION>,
|
||||
>(
|
||||
&self,
|
||||
group_id: &GroupId,
|
||||
leaf_node: &LeafNode,
|
||||
) -> Result<(), Self::Error> {
|
||||
self.storage.append_own_leaf_node(group_id, leaf_node)?;
|
||||
self.flush()
|
||||
}
|
||||
|
||||
fn queue_proposal<
|
||||
GroupId: traits::GroupId<CURRENT_VERSION>,
|
||||
ProposalRef: traits::ProposalRef<CURRENT_VERSION>,
|
||||
QueuedProposal: traits::QueuedProposal<CURRENT_VERSION>,
|
||||
>(
|
||||
&self,
|
||||
group_id: &GroupId,
|
||||
proposal_ref: &ProposalRef,
|
||||
proposal: &QueuedProposal,
|
||||
) -> Result<(), Self::Error> {
|
||||
self.storage.queue_proposal(group_id, proposal_ref, proposal)?;
|
||||
self.flush()
|
||||
}
|
||||
|
||||
fn write_tree<
|
||||
GroupId: traits::GroupId<CURRENT_VERSION>,
|
||||
TreeSync: traits::TreeSync<CURRENT_VERSION>,
|
||||
>(
|
||||
&self,
|
||||
group_id: &GroupId,
|
||||
tree: &TreeSync,
|
||||
) -> Result<(), Self::Error> {
|
||||
self.storage.write_tree(group_id, tree)?;
|
||||
self.flush()
|
||||
}
|
||||
|
||||
fn write_interim_transcript_hash<
|
||||
GroupId: traits::GroupId<CURRENT_VERSION>,
|
||||
InterimTranscriptHash: traits::InterimTranscriptHash<CURRENT_VERSION>,
|
||||
>(
|
||||
&self,
|
||||
group_id: &GroupId,
|
||||
interim_transcript_hash: &InterimTranscriptHash,
|
||||
) -> Result<(), Self::Error> {
|
||||
self.storage.write_interim_transcript_hash(group_id, interim_transcript_hash)?;
|
||||
self.flush()
|
||||
}
|
||||
|
||||
fn write_context<
|
||||
GroupId: traits::GroupId<CURRENT_VERSION>,
|
||||
GroupContext: traits::GroupContext<CURRENT_VERSION>,
|
||||
>(
|
||||
&self,
|
||||
group_id: &GroupId,
|
||||
group_context: &GroupContext,
|
||||
) -> Result<(), Self::Error> {
|
||||
self.storage.write_context(group_id, group_context)?;
|
||||
self.flush()
|
||||
}
|
||||
|
||||
fn write_confirmation_tag<
|
||||
GroupId: traits::GroupId<CURRENT_VERSION>,
|
||||
ConfirmationTag: traits::ConfirmationTag<CURRENT_VERSION>,
|
||||
>(
|
||||
&self,
|
||||
group_id: &GroupId,
|
||||
confirmation_tag: &ConfirmationTag,
|
||||
) -> Result<(), Self::Error> {
|
||||
self.storage.write_confirmation_tag(group_id, confirmation_tag)?;
|
||||
self.flush()
|
||||
}
|
||||
|
||||
fn write_group_state<
|
||||
GroupState: traits::GroupState<CURRENT_VERSION>,
|
||||
GroupId: traits::GroupId<CURRENT_VERSION>,
|
||||
>(
|
||||
&self,
|
||||
group_id: &GroupId,
|
||||
group_state: &GroupState,
|
||||
) -> Result<(), Self::Error> {
|
||||
self.storage.write_group_state(group_id, group_state)?;
|
||||
self.flush()
|
||||
}
|
||||
|
||||
fn write_message_secrets<
|
||||
GroupId: traits::GroupId<CURRENT_VERSION>,
|
||||
MessageSecrets: traits::MessageSecrets<CURRENT_VERSION>,
|
||||
>(
|
||||
&self,
|
||||
group_id: &GroupId,
|
||||
message_secrets: &MessageSecrets,
|
||||
) -> Result<(), Self::Error> {
|
||||
self.storage.write_message_secrets(group_id, message_secrets)?;
|
||||
self.flush()
|
||||
}
|
||||
|
||||
fn write_resumption_psk_store<
|
||||
GroupId: traits::GroupId<CURRENT_VERSION>,
|
||||
ResumptionPskStore: traits::ResumptionPskStore<CURRENT_VERSION>,
|
||||
>(
|
||||
&self,
|
||||
group_id: &GroupId,
|
||||
resumption_psk_store: &ResumptionPskStore,
|
||||
) -> Result<(), Self::Error> {
|
||||
self.storage.write_resumption_psk_store(group_id, resumption_psk_store)?;
|
||||
self.flush()
|
||||
}
|
||||
|
||||
fn write_own_leaf_index<
|
||||
GroupId: traits::GroupId<CURRENT_VERSION>,
|
||||
LeafNodeIndex: traits::LeafNodeIndex<CURRENT_VERSION>,
|
||||
>(
|
||||
&self,
|
||||
group_id: &GroupId,
|
||||
own_leaf_index: &LeafNodeIndex,
|
||||
) -> Result<(), Self::Error> {
|
||||
self.storage.write_own_leaf_index(group_id, own_leaf_index)?;
|
||||
self.flush()
|
||||
}
|
||||
|
||||
fn write_group_epoch_secrets<
|
||||
GroupId: traits::GroupId<CURRENT_VERSION>,
|
||||
GroupEpochSecrets: traits::GroupEpochSecrets<CURRENT_VERSION>,
|
||||
>(
|
||||
&self,
|
||||
group_id: &GroupId,
|
||||
group_epoch_secrets: &GroupEpochSecrets,
|
||||
) -> Result<(), Self::Error> {
|
||||
self.storage.write_group_epoch_secrets(group_id, group_epoch_secrets)?;
|
||||
self.flush()
|
||||
}
|
||||
|
||||
fn write_signature_key_pair<
|
||||
SignaturePublicKey: traits::SignaturePublicKey<CURRENT_VERSION>,
|
||||
SignatureKeyPair: traits::SignatureKeyPair<CURRENT_VERSION>,
|
||||
>(
|
||||
&self,
|
||||
public_key: &SignaturePublicKey,
|
||||
signature_key_pair: &SignatureKeyPair,
|
||||
) -> Result<(), Self::Error> {
|
||||
self.storage.write_signature_key_pair(public_key, signature_key_pair)?;
|
||||
self.flush()
|
||||
}
|
||||
|
||||
fn write_encryption_key_pair<
|
||||
EncryptionKey: traits::EncryptionKey<CURRENT_VERSION>,
|
||||
HpkeKeyPair: traits::HpkeKeyPair<CURRENT_VERSION>,
|
||||
>(
|
||||
&self,
|
||||
public_key: &EncryptionKey,
|
||||
key_pair: &HpkeKeyPair,
|
||||
) -> Result<(), Self::Error> {
|
||||
self.storage.write_encryption_key_pair(public_key, key_pair)?;
|
||||
self.flush()
|
||||
}
|
||||
|
||||
fn write_encryption_epoch_key_pairs<
|
||||
GroupId: traits::GroupId<CURRENT_VERSION>,
|
||||
EpochKey: traits::EpochKey<CURRENT_VERSION>,
|
||||
HpkeKeyPair: traits::HpkeKeyPair<CURRENT_VERSION>,
|
||||
>(
|
||||
&self,
|
||||
group_id: &GroupId,
|
||||
epoch: &EpochKey,
|
||||
leaf_index: u32,
|
||||
key_pairs: &[HpkeKeyPair],
|
||||
) -> Result<(), Self::Error> {
|
||||
self.storage.write_encryption_epoch_key_pairs(group_id, epoch, leaf_index, key_pairs)?;
|
||||
self.flush()
|
||||
}
|
||||
|
||||
fn write_key_package<
|
||||
HashReference: traits::HashReference<CURRENT_VERSION>,
|
||||
KeyPackage: traits::KeyPackage<CURRENT_VERSION>,
|
||||
>(
|
||||
&self,
|
||||
hash_ref: &HashReference,
|
||||
key_package: &KeyPackage,
|
||||
) -> Result<(), Self::Error> {
|
||||
self.storage.write_key_package(hash_ref, key_package)?;
|
||||
self.flush()
|
||||
}
|
||||
|
||||
fn write_psk<
|
||||
PskId: traits::PskId<CURRENT_VERSION>,
|
||||
PskBundle: traits::PskBundle<CURRENT_VERSION>,
|
||||
>(
|
||||
&self,
|
||||
psk_id: &PskId,
|
||||
psk: &PskBundle,
|
||||
) -> Result<(), Self::Error> {
|
||||
self.storage.write_psk(psk_id, psk)?;
|
||||
self.flush()
|
||||
}
|
||||
|
||||
// --- getters (no flush needed) ---
|
||||
|
||||
fn mls_group_join_config<
|
||||
GroupId: traits::GroupId<CURRENT_VERSION>,
|
||||
MlsGroupJoinConfig: traits::MlsGroupJoinConfig<CURRENT_VERSION>,
|
||||
>(
|
||||
&self,
|
||||
group_id: &GroupId,
|
||||
) -> Result<Option<MlsGroupJoinConfig>, Self::Error> {
|
||||
Ok(self.storage.mls_group_join_config(group_id)?)
|
||||
}
|
||||
|
||||
fn own_leaf_nodes<
|
||||
GroupId: traits::GroupId<CURRENT_VERSION>,
|
||||
LeafNode: traits::LeafNode<CURRENT_VERSION>,
|
||||
>(
|
||||
&self,
|
||||
group_id: &GroupId,
|
||||
) -> Result<Vec<LeafNode>, Self::Error> {
|
||||
Ok(self.storage.own_leaf_nodes(group_id)?)
|
||||
}
|
||||
|
||||
fn queued_proposal_refs<
|
||||
GroupId: traits::GroupId<CURRENT_VERSION>,
|
||||
ProposalRef: traits::ProposalRef<CURRENT_VERSION>,
|
||||
>(
|
||||
&self,
|
||||
group_id: &GroupId,
|
||||
) -> Result<Vec<ProposalRef>, Self::Error> {
|
||||
Ok(self.storage.queued_proposal_refs(group_id)?)
|
||||
}
|
||||
|
||||
fn queued_proposals<
|
||||
GroupId: traits::GroupId<CURRENT_VERSION>,
|
||||
ProposalRef: traits::ProposalRef<CURRENT_VERSION>,
|
||||
QueuedProposal: traits::QueuedProposal<CURRENT_VERSION>,
|
||||
>(
|
||||
&self,
|
||||
group_id: &GroupId,
|
||||
) -> Result<Vec<(ProposalRef, QueuedProposal)>, Self::Error> {
|
||||
Ok(self.storage.queued_proposals(group_id)?)
|
||||
}
|
||||
|
||||
fn tree<
|
||||
GroupId: traits::GroupId<CURRENT_VERSION>,
|
||||
TreeSync: traits::TreeSync<CURRENT_VERSION>,
|
||||
>(
|
||||
&self,
|
||||
group_id: &GroupId,
|
||||
) -> Result<Option<TreeSync>, Self::Error> {
|
||||
Ok(self.storage.tree(group_id)?)
|
||||
}
|
||||
|
||||
fn group_context<
|
||||
GroupId: traits::GroupId<CURRENT_VERSION>,
|
||||
GroupContext: traits::GroupContext<CURRENT_VERSION>,
|
||||
>(
|
||||
&self,
|
||||
group_id: &GroupId,
|
||||
) -> Result<Option<GroupContext>, Self::Error> {
|
||||
Ok(self.storage.group_context(group_id)?)
|
||||
}
|
||||
|
||||
fn interim_transcript_hash<
|
||||
GroupId: traits::GroupId<CURRENT_VERSION>,
|
||||
InterimTranscriptHash: traits::InterimTranscriptHash<CURRENT_VERSION>,
|
||||
>(
|
||||
&self,
|
||||
group_id: &GroupId,
|
||||
) -> Result<Option<InterimTranscriptHash>, Self::Error> {
|
||||
Ok(self.storage.interim_transcript_hash(group_id)?)
|
||||
}
|
||||
|
||||
fn confirmation_tag<
|
||||
GroupId: traits::GroupId<CURRENT_VERSION>,
|
||||
ConfirmationTag: traits::ConfirmationTag<CURRENT_VERSION>,
|
||||
>(
|
||||
&self,
|
||||
group_id: &GroupId,
|
||||
) -> Result<Option<ConfirmationTag>, Self::Error> {
|
||||
Ok(self.storage.confirmation_tag(group_id)?)
|
||||
}
|
||||
|
||||
fn group_state<
|
||||
GroupState: traits::GroupState<CURRENT_VERSION>,
|
||||
GroupId: traits::GroupId<CURRENT_VERSION>,
|
||||
>(
|
||||
&self,
|
||||
group_id: &GroupId,
|
||||
) -> Result<Option<GroupState>, Self::Error> {
|
||||
Ok(self.storage.group_state(group_id)?)
|
||||
}
|
||||
|
||||
fn message_secrets<
|
||||
GroupId: traits::GroupId<CURRENT_VERSION>,
|
||||
MessageSecrets: traits::MessageSecrets<CURRENT_VERSION>,
|
||||
>(
|
||||
&self,
|
||||
group_id: &GroupId,
|
||||
) -> Result<Option<MessageSecrets>, Self::Error> {
|
||||
Ok(self.storage.message_secrets(group_id)?)
|
||||
}
|
||||
|
||||
fn resumption_psk_store<
|
||||
GroupId: traits::GroupId<CURRENT_VERSION>,
|
||||
ResumptionPskStore: traits::ResumptionPskStore<CURRENT_VERSION>,
|
||||
>(
|
||||
&self,
|
||||
group_id: &GroupId,
|
||||
) -> Result<Option<ResumptionPskStore>, Self::Error> {
|
||||
Ok(self.storage.resumption_psk_store(group_id)?)
|
||||
}
|
||||
|
||||
fn own_leaf_index<
|
||||
GroupId: traits::GroupId<CURRENT_VERSION>,
|
||||
LeafNodeIndex: traits::LeafNodeIndex<CURRENT_VERSION>,
|
||||
>(
|
||||
&self,
|
||||
group_id: &GroupId,
|
||||
) -> Result<Option<LeafNodeIndex>, Self::Error> {
|
||||
Ok(self.storage.own_leaf_index(group_id)?)
|
||||
}
|
||||
|
||||
fn group_epoch_secrets<
|
||||
GroupId: traits::GroupId<CURRENT_VERSION>,
|
||||
GroupEpochSecrets: traits::GroupEpochSecrets<CURRENT_VERSION>,
|
||||
>(
|
||||
&self,
|
||||
group_id: &GroupId,
|
||||
) -> Result<Option<GroupEpochSecrets>, Self::Error> {
|
||||
Ok(self.storage.group_epoch_secrets(group_id)?)
|
||||
}
|
||||
|
||||
fn signature_key_pair<
|
||||
SignaturePublicKey: traits::SignaturePublicKey<CURRENT_VERSION>,
|
||||
SignatureKeyPair: traits::SignatureKeyPair<CURRENT_VERSION>,
|
||||
>(
|
||||
&self,
|
||||
public_key: &SignaturePublicKey,
|
||||
) -> Result<Option<SignatureKeyPair>, Self::Error> {
|
||||
Ok(self.storage.signature_key_pair(public_key)?)
|
||||
}
|
||||
|
||||
fn encryption_key_pair<
|
||||
HpkeKeyPair: traits::HpkeKeyPair<CURRENT_VERSION>,
|
||||
EncryptionKey: traits::EncryptionKey<CURRENT_VERSION>,
|
||||
>(
|
||||
&self,
|
||||
public_key: &EncryptionKey,
|
||||
) -> Result<Option<HpkeKeyPair>, Self::Error> {
|
||||
Ok(self.storage.encryption_key_pair(public_key)?)
|
||||
}
|
||||
|
||||
fn encryption_epoch_key_pairs<
|
||||
GroupId: traits::GroupId<CURRENT_VERSION>,
|
||||
EpochKey: traits::EpochKey<CURRENT_VERSION>,
|
||||
HpkeKeyPair: traits::HpkeKeyPair<CURRENT_VERSION>,
|
||||
>(
|
||||
&self,
|
||||
group_id: &GroupId,
|
||||
epoch: &EpochKey,
|
||||
leaf_index: u32,
|
||||
) -> Result<Vec<HpkeKeyPair>, Self::Error> {
|
||||
Ok(self.storage.encryption_epoch_key_pairs(group_id, epoch, leaf_index)?)
|
||||
}
|
||||
|
||||
fn key_package<
|
||||
KeyPackageRef: traits::HashReference<CURRENT_VERSION>,
|
||||
KeyPackage: traits::KeyPackage<CURRENT_VERSION>,
|
||||
>(
|
||||
&self,
|
||||
hash_ref: &KeyPackageRef,
|
||||
) -> Result<Option<KeyPackage>, Self::Error> {
|
||||
Ok(self.storage.key_package(hash_ref)?)
|
||||
}
|
||||
|
||||
fn psk<
|
||||
PskBundle: traits::PskBundle<CURRENT_VERSION>,
|
||||
PskId: traits::PskId<CURRENT_VERSION>,
|
||||
>(
|
||||
&self,
|
||||
psk_id: &PskId,
|
||||
) -> Result<Option<PskBundle>, Self::Error> {
|
||||
Ok(self.storage.psk(psk_id)?)
|
||||
}
|
||||
|
||||
// --- deleters (flush needed) ---
|
||||
|
||||
fn remove_proposal<
|
||||
GroupId: traits::GroupId<CURRENT_VERSION>,
|
||||
ProposalRef: traits::ProposalRef<CURRENT_VERSION>,
|
||||
>(
|
||||
&self,
|
||||
group_id: &GroupId,
|
||||
proposal_ref: &ProposalRef,
|
||||
) -> Result<(), Self::Error> {
|
||||
self.storage.remove_proposal(group_id, proposal_ref)?;
|
||||
self.flush()
|
||||
}
|
||||
|
||||
fn delete_own_leaf_nodes<GroupId: traits::GroupId<CURRENT_VERSION>>(
|
||||
&self,
|
||||
group_id: &GroupId,
|
||||
) -> Result<(), Self::Error> {
|
||||
self.storage.delete_own_leaf_nodes(group_id)?;
|
||||
self.flush()
|
||||
}
|
||||
|
||||
fn delete_group_config<GroupId: traits::GroupId<CURRENT_VERSION>>(
|
||||
&self,
|
||||
group_id: &GroupId,
|
||||
) -> Result<(), Self::Error> {
|
||||
self.storage.delete_group_config(group_id)?;
|
||||
self.flush()
|
||||
}
|
||||
|
||||
fn delete_tree<GroupId: traits::GroupId<CURRENT_VERSION>>(
|
||||
&self,
|
||||
group_id: &GroupId,
|
||||
) -> Result<(), Self::Error> {
|
||||
self.storage.delete_tree(group_id)?;
|
||||
self.flush()
|
||||
}
|
||||
|
||||
fn delete_confirmation_tag<GroupId: traits::GroupId<CURRENT_VERSION>>(
|
||||
&self,
|
||||
group_id: &GroupId,
|
||||
) -> Result<(), Self::Error> {
|
||||
self.storage.delete_confirmation_tag(group_id)?;
|
||||
self.flush()
|
||||
}
|
||||
|
||||
fn delete_group_state<GroupId: traits::GroupId<CURRENT_VERSION>>(
|
||||
&self,
|
||||
group_id: &GroupId,
|
||||
) -> Result<(), Self::Error> {
|
||||
self.storage.delete_group_state(group_id)?;
|
||||
self.flush()
|
||||
}
|
||||
|
||||
fn delete_context<GroupId: traits::GroupId<CURRENT_VERSION>>(
|
||||
&self,
|
||||
group_id: &GroupId,
|
||||
) -> Result<(), Self::Error> {
|
||||
self.storage.delete_context(group_id)?;
|
||||
self.flush()
|
||||
}
|
||||
|
||||
fn delete_interim_transcript_hash<GroupId: traits::GroupId<CURRENT_VERSION>>(
|
||||
&self,
|
||||
group_id: &GroupId,
|
||||
) -> Result<(), Self::Error> {
|
||||
self.storage.delete_interim_transcript_hash(group_id)?;
|
||||
self.flush()
|
||||
}
|
||||
|
||||
fn delete_message_secrets<GroupId: traits::GroupId<CURRENT_VERSION>>(
|
||||
&self,
|
||||
group_id: &GroupId,
|
||||
) -> Result<(), Self::Error> {
|
||||
self.storage.delete_message_secrets(group_id)?;
|
||||
self.flush()
|
||||
}
|
||||
|
||||
fn delete_all_resumption_psk_secrets<GroupId: traits::GroupId<CURRENT_VERSION>>(
|
||||
&self,
|
||||
group_id: &GroupId,
|
||||
) -> Result<(), Self::Error> {
|
||||
self.storage.delete_all_resumption_psk_secrets(group_id)?;
|
||||
self.flush()
|
||||
}
|
||||
|
||||
fn delete_own_leaf_index<GroupId: traits::GroupId<CURRENT_VERSION>>(
|
||||
&self,
|
||||
group_id: &GroupId,
|
||||
) -> Result<(), Self::Error> {
|
||||
self.storage.delete_own_leaf_index(group_id)?;
|
||||
self.flush()
|
||||
}
|
||||
|
||||
fn delete_group_epoch_secrets<GroupId: traits::GroupId<CURRENT_VERSION>>(
|
||||
&self,
|
||||
group_id: &GroupId,
|
||||
) -> Result<(), Self::Error> {
|
||||
self.storage.delete_group_epoch_secrets(group_id)?;
|
||||
self.flush()
|
||||
}
|
||||
|
||||
fn clear_proposal_queue<
|
||||
GroupId: traits::GroupId<CURRENT_VERSION>,
|
||||
ProposalRef: traits::ProposalRef<CURRENT_VERSION>,
|
||||
>(
|
||||
&self,
|
||||
group_id: &GroupId,
|
||||
) -> Result<(), Self::Error> {
|
||||
self.storage.clear_proposal_queue::<GroupId, ProposalRef>(group_id)?;
|
||||
self.flush()
|
||||
}
|
||||
|
||||
fn delete_signature_key_pair<
|
||||
SignaturePublicKey: traits::SignaturePublicKey<CURRENT_VERSION>,
|
||||
>(
|
||||
&self,
|
||||
public_key: &SignaturePublicKey,
|
||||
) -> Result<(), Self::Error> {
|
||||
self.storage.delete_signature_key_pair(public_key)?;
|
||||
self.flush()
|
||||
}
|
||||
|
||||
fn delete_encryption_key_pair<EncryptionKey: traits::EncryptionKey<CURRENT_VERSION>>(
|
||||
&self,
|
||||
public_key: &EncryptionKey,
|
||||
) -> Result<(), Self::Error> {
|
||||
self.storage.delete_encryption_key_pair(public_key)?;
|
||||
self.flush()
|
||||
}
|
||||
|
||||
fn delete_encryption_epoch_key_pairs<
|
||||
GroupId: traits::GroupId<CURRENT_VERSION>,
|
||||
EpochKey: traits::EpochKey<CURRENT_VERSION>,
|
||||
>(
|
||||
&self,
|
||||
group_id: &GroupId,
|
||||
epoch: &EpochKey,
|
||||
leaf_index: u32,
|
||||
) -> Result<(), Self::Error> {
|
||||
self.storage.delete_encryption_epoch_key_pairs(group_id, epoch, leaf_index)?;
|
||||
self.flush()
|
||||
}
|
||||
|
||||
fn delete_key_package<KeyPackageRef: traits::HashReference<CURRENT_VERSION>>(
|
||||
&self,
|
||||
hash_ref: &KeyPackageRef,
|
||||
) -> Result<(), Self::Error> {
|
||||
self.storage.delete_key_package(hash_ref)?;
|
||||
self.flush()
|
||||
}
|
||||
|
||||
fn delete_psk<PskKey: traits::PskId<CURRENT_VERSION>>(
|
||||
&self,
|
||||
psk_id: &PskKey,
|
||||
) -> Result<(), Self::Error> {
|
||||
self.storage.delete_psk(psk_id)?;
|
||||
self.flush()
|
||||
}
|
||||
}
|
||||
@@ -1,5 +1,5 @@
|
||||
//! Core cryptographic primitives, MLS group state machine, and hybrid
|
||||
//! post-quantum KEM for quicproquo.
|
||||
//! post-quantum KEM for quicprochat.
|
||||
//!
|
||||
//! # WASM support
|
||||
//!
|
||||
@@ -5,7 +5,7 @@
|
||||
|
||||
use opaque_ke::CipherSuite;
|
||||
|
||||
/// OPAQUE cipher suite for quicproquo.
|
||||
/// OPAQUE cipher suite for quicprochat.
|
||||
///
|
||||
/// - **OPRF**: Ristretto255 (curve25519-based, ~128-bit security)
|
||||
/// - **Key exchange**: Triple-DH (3DH) over Ristretto255 with SHA-512
|
||||
@@ -48,7 +48,7 @@ use zeroize::Zeroizing;
|
||||
use crate::error::CoreError;
|
||||
|
||||
/// Domain separation label for the hybrid Noise handshake.
|
||||
const PROTOCOL_NAME: &[u8] = b"quicproquo-pq-noise-v1";
|
||||
const PROTOCOL_NAME: &[u8] = b"quicprochat-pq-noise-v1";
|
||||
|
||||
/// ML-KEM-768 encapsulation key length.
|
||||
const MLKEM_EK_LEN: usize = 1184;
|
||||
@@ -91,10 +91,10 @@ fn generate_code(rng: &mut impl RngCore) -> String {
|
||||
}
|
||||
|
||||
/// Derive a 32-byte recovery token from a code (used for server-side lookup).
|
||||
/// The token is `SHA-256("qpq-recovery-token:" || code)`.
|
||||
/// The token is `SHA-256("qpc-recovery-token:" || code)`.
|
||||
fn derive_recovery_token(code: &str) -> [u8; 32] {
|
||||
let mut hasher = Sha256::new();
|
||||
hasher.update(b"qpq-recovery-token:");
|
||||
hasher.update(b"qpc-recovery-token:");
|
||||
hasher.update(code.as_bytes());
|
||||
hasher.finalize().into()
|
||||
}
|
||||
@@ -206,7 +206,7 @@ pub fn recover_from_bundle(
|
||||
|
||||
/// Compute the token hash for a recovery code (for server-side lookup).
|
||||
///
|
||||
/// This is `SHA-256(SHA-256("qpq-recovery-token:" || code))`.
|
||||
/// This is `SHA-256(SHA-256("qpc-recovery-token:" || code))`.
|
||||
pub fn recovery_token_hash(code: &str) -> Vec<u8> {
|
||||
let token = derive_recovery_token(code);
|
||||
Sha256::digest(token).to_vec()
|
||||
@@ -7,7 +7,7 @@
|
||||
//! 1. Sort the keys lexicographically so the result is symmetric.
|
||||
//! 2. Concatenate: `input = key_lo || key_hi` (64 bytes).
|
||||
//! 3. Compute HMAC-SHA256(key=info, data=input) where
|
||||
//! `info = b"quicproquo-safety-number-v1"`.
|
||||
//! `info = b"quicprochat-safety-number-v1"`.
|
||||
//! 4. Iterate the HMAC 5200 times: `hash = HMAC-SHA256(key=info, data=hash)`.
|
||||
//! 5. Interpret the 32-byte result as 4× 64-bit big-endian integers
|
||||
//! (= 256 bits → 4 groups of 64 bits). Extract 3 decimal groups per
|
||||
@@ -23,7 +23,7 @@ use sha2::Sha256;
|
||||
type HmacSha256 = Hmac<Sha256>;
|
||||
|
||||
/// Fixed info string used as the HMAC key throughout the key-stretching loop.
|
||||
const INFO: &[u8] = b"quicproquo-safety-number-v1";
|
||||
const INFO: &[u8] = b"quicprochat-safety-number-v1";
|
||||
|
||||
/// Compute a 60-digit safety number from two 32-byte Ed25519 public keys.
|
||||
///
|
||||
@@ -1,9 +1,10 @@
|
||||
[package]
|
||||
name = "quicproquo-kt"
|
||||
name = "quicprochat-kt"
|
||||
version = "0.1.0"
|
||||
edition = "2021"
|
||||
edition.workspace = true
|
||||
description = "Key Transparency: append-only SHA-256 Merkle log for (username, identity_key) bindings."
|
||||
license = "MIT"
|
||||
license = "Apache-2.0 OR MIT"
|
||||
repository.workspace = true
|
||||
|
||||
[lints]
|
||||
workspace = true
|
||||
49
crates/quicprochat-p2p/Cargo.toml
Normal file
49
crates/quicprochat-p2p/Cargo.toml
Normal file
@@ -0,0 +1,49 @@
|
||||
[package]
|
||||
name = "quicprochat-p2p"
|
||||
version = "0.1.0"
|
||||
edition.workspace = true
|
||||
description = "P2P transport layer for quicprochat using iroh."
|
||||
license = "Apache-2.0 OR MIT"
|
||||
repository.workspace = true
|
||||
|
||||
[features]
|
||||
traffic-resistance = []
|
||||
|
||||
[lints]
|
||||
workspace = true
|
||||
|
||||
[dependencies]
|
||||
iroh = "0.96"
|
||||
tokio = { version = "1", features = ["macros", "rt-multi-thread", "time", "sync", "net", "io-util"] }
|
||||
async-trait = "0.1"
|
||||
tracing = "0.1"
|
||||
anyhow = "1"
|
||||
|
||||
# Mesh identity & store-and-forward
|
||||
quicprochat-core = { path = "../quicprochat-core", default-features = false }
|
||||
serde = { workspace = true }
|
||||
serde_json = { workspace = true }
|
||||
ciborium = { workspace = true }
|
||||
sha2 = { workspace = true }
|
||||
hex = { workspace = true }
|
||||
|
||||
# Broadcast channels (ChaCha20-Poly1305 symmetric encryption)
|
||||
chacha20poly1305 = { workspace = true }
|
||||
rand = { workspace = true }
|
||||
zeroize = { workspace = true }
|
||||
|
||||
# Lightweight mesh link handshake (X25519 ECDH + HKDF)
|
||||
x25519-dalek = { workspace = true }
|
||||
hkdf = { workspace = true }
|
||||
thiserror = { workspace = true }
|
||||
|
||||
# Configuration
|
||||
toml = "0.8"
|
||||
humantime-serde = "1"
|
||||
|
||||
[dev-dependencies]
|
||||
tempfile = "3"
|
||||
|
||||
[[example]]
|
||||
name = "fapp_demo"
|
||||
path = "../../examples/fapp_demo.rs"
|
||||
96
crates/quicprochat-p2p/examples/mesh_lora_relay_demo.rs
Normal file
96
crates/quicprochat-p2p/examples/mesh_lora_relay_demo.rs
Normal file
@@ -0,0 +1,96 @@
|
||||
//! Simulated mesh leg: **A (LoRa)** → **B (LoRa + TCP relay)** → **C (TCP)** → zurück über B → **A**.
|
||||
//!
|
||||
//! Uses [`quicprochat_p2p::transport_lora::LoRaMockMedium`] — keine Hardware.
|
||||
//!
|
||||
//! ```text
|
||||
//! Node A Node B Node C
|
||||
//! LoRa addr 0x01 LoRa 0x02 + TCP listen TCP (WiFi / LAN)
|
||||
//! │ │ │
|
||||
//! └──── LoRa ───────┘ │
|
||||
//! └──────── TCP ──────────────┘
|
||||
//! ```
|
||||
//!
|
||||
//! Run: `cargo run -p quicprochat-p2p --example mesh_lora_relay_demo`
|
||||
|
||||
use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
|
||||
use quicprochat_p2p::transport::{MeshTransport, TransportAddr};
|
||||
use quicprochat_p2p::transport_lora::{DutyCycleTracker, LoRaConfig, LoRaMockMedium};
|
||||
use quicprochat_p2p::transport_tcp::TcpTransport;
|
||||
|
||||
const ADDR_A: [u8; 4] = [0x01, 0, 0, 0];
|
||||
const ADDR_B: [u8; 4] = [0x02, 0, 0, 0];
|
||||
|
||||
#[tokio::main]
|
||||
async fn main() -> anyhow::Result<()> {
|
||||
let medium = LoRaMockMedium::new();
|
||||
let duty = Arc::new(DutyCycleTracker::new(3_600_000));
|
||||
|
||||
let lora_a = medium
|
||||
.connect(ADDR_A, LoRaConfig::default(), Arc::clone(&duty))
|
||||
.await?;
|
||||
let lora_b = medium
|
||||
.connect(ADDR_B, LoRaConfig::default(), Arc::clone(&duty))
|
||||
.await?;
|
||||
|
||||
let tcp_b = TcpTransport::bind("127.0.0.1:0").await?;
|
||||
let tcp_c = TcpTransport::bind("127.0.0.1:0").await?;
|
||||
|
||||
let c_listen = tcp_c.local_addr();
|
||||
let b_listen = tcp_b.local_addr();
|
||||
let c_addr = TransportAddr::Socket(c_listen);
|
||||
let b_addr = TransportAddr::Socket(b_listen);
|
||||
|
||||
println!(
|
||||
"LoRa mock mesh demo: B relays LoRa <-> TCP (B TCP {}, C TCP {})",
|
||||
b_listen, c_listen
|
||||
);
|
||||
|
||||
let relay = tokio::spawn(async move {
|
||||
for _ in 0..2 {
|
||||
tokio::select! {
|
||||
p = lora_b.recv() => {
|
||||
let p = p.expect("B LoRa recv");
|
||||
println!("B: LoRa from {} -> TCP ({} bytes)", p.from, p.data.len());
|
||||
tcp_b.send(&c_addr, &p.data).await.expect("B TCP send to C");
|
||||
}
|
||||
p = tcp_b.recv() => {
|
||||
let p = p.expect("B TCP recv");
|
||||
println!("B: TCP -> LoRa A ({} bytes)", p.data.len());
|
||||
lora_b
|
||||
.send(&TransportAddr::LoRa(ADDR_A), &p.data)
|
||||
.await
|
||||
.expect("B LoRa send to A");
|
||||
}
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
let c_task = tokio::spawn(async move {
|
||||
let pkt = tcp_c.recv().await.expect("C TCP recv");
|
||||
println!("C: got {} bytes from B relay", pkt.data.len());
|
||||
assert_eq!(pkt.data, b"hello via mesh");
|
||||
tcp_c
|
||||
.send(&b_addr, b"ack from C")
|
||||
.await
|
||||
.expect("C TCP send");
|
||||
});
|
||||
|
||||
tokio::time::sleep(Duration::from_millis(50)).await;
|
||||
|
||||
lora_a
|
||||
.send(&TransportAddr::LoRa(ADDR_B), b"hello via mesh")
|
||||
.await?;
|
||||
|
||||
let reply = lora_a.recv().await?;
|
||||
println!("A: LoRa reply {} bytes", reply.data.len());
|
||||
assert_eq!(reply.data, b"ack from C");
|
||||
|
||||
c_task.await.expect("node C task panicked");
|
||||
relay.await.expect("relay task panicked");
|
||||
|
||||
lora_a.close().await.ok();
|
||||
println!("Done: LoRa + TCP relay path OK.");
|
||||
Ok(())
|
||||
}
|
||||
135
crates/quicprochat-p2p/src/address.rs
Normal file
135
crates/quicprochat-p2p/src/address.rs
Normal file
@@ -0,0 +1,135 @@
|
||||
//! Truncated mesh addresses for bandwidth-efficient routing.
|
||||
//!
|
||||
//! A [`MeshAddress`] is derived from an Ed25519 public key by taking the first
|
||||
//! 16 bytes of its SHA-256 hash. This provides globally unique addressing
|
||||
//! (birthday collision at ~2^64) while saving 16 bytes per packet compared to
|
||||
//! full 32-byte public keys.
|
||||
|
||||
use serde::{Deserialize, Serialize};
|
||||
use sha2::{Digest, Sha256};
|
||||
use std::fmt;
|
||||
|
||||
/// 16-byte truncated mesh address.
|
||||
#[derive(Clone, Copy, PartialEq, Eq, Hash, Serialize, Deserialize)]
|
||||
pub struct MeshAddress([u8; 16]);
|
||||
|
||||
impl MeshAddress {
|
||||
/// Derive from a 32-byte Ed25519 public key.
|
||||
pub fn from_public_key(key: &[u8; 32]) -> Self {
|
||||
let hash = Sha256::digest(key);
|
||||
let mut addr = [0u8; 16];
|
||||
addr.copy_from_slice(&hash[..16]);
|
||||
Self(addr)
|
||||
}
|
||||
|
||||
/// Create from raw 16-byte array.
|
||||
pub fn from_bytes(bytes: [u8; 16]) -> Self {
|
||||
Self(bytes)
|
||||
}
|
||||
|
||||
/// Get the raw 16-byte address.
|
||||
pub fn as_bytes(&self) -> &[u8; 16] {
|
||||
&self.0
|
||||
}
|
||||
|
||||
/// Check if a 32-byte public key matches this address.
|
||||
pub fn matches_key(&self, key: &[u8; 32]) -> bool {
|
||||
Self::from_public_key(key) == *self
|
||||
}
|
||||
|
||||
/// The broadcast address (all zeros).
|
||||
pub const BROADCAST: Self = Self([0u8; 16]);
|
||||
|
||||
/// Check if this is the broadcast address.
|
||||
pub fn is_broadcast(&self) -> bool {
|
||||
self.0 == [0u8; 16]
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Debug for MeshAddress {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
write!(f, "MeshAddress({})", hex::encode(self.0))
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Display for MeshAddress {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
write!(f, "{}", hex::encode(&self.0[..8]))
|
||||
}
|
||||
}
|
||||
|
||||
impl From<[u8; 16]> for MeshAddress {
|
||||
fn from(bytes: [u8; 16]) -> Self {
|
||||
Self(bytes)
|
||||
}
|
||||
}
|
||||
|
||||
impl AsRef<[u8; 16]> for MeshAddress {
|
||||
fn as_ref(&self) -> &[u8; 16] {
|
||||
&self.0
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn from_key_deterministic() {
|
||||
let key = [42u8; 32];
|
||||
let addr1 = MeshAddress::from_public_key(&key);
|
||||
let addr2 = MeshAddress::from_public_key(&key);
|
||||
assert_eq!(addr1, addr2, "same key must produce same address");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn different_keys_different_addresses() {
|
||||
let key_a = [1u8; 32];
|
||||
let key_b = [2u8; 32];
|
||||
let addr_a = MeshAddress::from_public_key(&key_a);
|
||||
let addr_b = MeshAddress::from_public_key(&key_b);
|
||||
assert_ne!(addr_a, addr_b, "different keys must produce different addresses");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn matches_key_works() {
|
||||
let key = [99u8; 32];
|
||||
let addr = MeshAddress::from_public_key(&key);
|
||||
assert!(addr.matches_key(&key), "correct key must match");
|
||||
|
||||
let wrong_key = [100u8; 32];
|
||||
assert!(!addr.matches_key(&wrong_key), "wrong key must not match");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn broadcast_address() {
|
||||
assert_eq!(*MeshAddress::BROADCAST.as_bytes(), [0u8; 16]);
|
||||
assert!(MeshAddress::BROADCAST.is_broadcast());
|
||||
|
||||
let non_broadcast = MeshAddress::from_bytes([1u8; 16]);
|
||||
assert!(!non_broadcast.is_broadcast());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn display_formatting() {
|
||||
let key = [0xAB; 32];
|
||||
let addr = MeshAddress::from_public_key(&key);
|
||||
let display = format!("{addr}");
|
||||
// Display shows first 8 bytes as hex = 16 hex chars.
|
||||
assert_eq!(display.len(), 16, "display should show 8 bytes = 16 hex chars");
|
||||
|
||||
let debug = format!("{addr:?}");
|
||||
// Debug shows all 16 bytes as hex = 32 hex chars, plus wrapper.
|
||||
assert!(debug.starts_with("MeshAddress("));
|
||||
assert!(debug.ends_with(')'));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn serde_roundtrip() {
|
||||
let key = [77u8; 32];
|
||||
let addr = MeshAddress::from_public_key(&key);
|
||||
let json = serde_json::to_string(&addr).expect("serialize");
|
||||
let restored: MeshAddress = serde_json::from_str(&json).expect("deserialize");
|
||||
assert_eq!(addr, restored);
|
||||
}
|
||||
}
|
||||
316
crates/quicprochat-p2p/src/announce.rs
Normal file
316
crates/quicprochat-p2p/src/announce.rs
Normal file
@@ -0,0 +1,316 @@
|
||||
//! Mesh announce protocol for self-organizing network discovery.
|
||||
//!
|
||||
//! Nodes periodically broadcast signed [`MeshAnnounce`] packets. These propagate
|
||||
//! through the mesh, building each node's [`RoutingTable`](crate::routing_table::RoutingTable).
|
||||
|
||||
use serde::{Deserialize, Serialize};
|
||||
use sha2::{Digest, Sha256};
|
||||
use std::time::{SystemTime, UNIX_EPOCH};
|
||||
|
||||
use crate::identity::MeshIdentity;
|
||||
|
||||
/// Capability flag: node can relay messages for others.
|
||||
pub const CAP_RELAY: u16 = 0x0001;
|
||||
/// Capability flag: node has store-and-forward.
|
||||
pub const CAP_STORE: u16 = 0x0002;
|
||||
/// Capability flag: node is connected to Internet/server.
|
||||
pub const CAP_GATEWAY: u16 = 0x0004;
|
||||
/// Capability flag: node is on a low-bandwidth transport only.
|
||||
pub const CAP_CONSTRAINED: u16 = 0x0008;
|
||||
/// Capability flag: node has KeyPackages available for MLS group invites.
|
||||
pub const CAP_MLS_READY: u16 = 0x0010;
|
||||
|
||||
/// A signed mesh node announcement.
|
||||
#[derive(Clone, Debug, Serialize, Deserialize)]
|
||||
pub struct MeshAnnounce {
|
||||
/// Ed25519 public key of the announcing node (32 bytes).
|
||||
pub identity_key: Vec<u8>,
|
||||
/// Truncated address: SHA-256(identity_key)[0..16] — used for routing.
|
||||
pub address: [u8; 16],
|
||||
/// Capability bitfield.
|
||||
pub capabilities: u16,
|
||||
/// Monotonically increasing sequence number (per node).
|
||||
pub sequence: u64,
|
||||
/// Unix timestamp of creation.
|
||||
pub timestamp: u64,
|
||||
/// Transports this node is reachable on: Vec<(transport_name, serialized_addr)>.
|
||||
pub reachable_via: Vec<(String, Vec<u8>)>,
|
||||
/// Current hop count (incremented on re-broadcast).
|
||||
pub hop_count: u8,
|
||||
/// Maximum propagation hops.
|
||||
pub max_hops: u8,
|
||||
/// Optional hash of current KeyPackage (SHA-256, truncated to 8 bytes).
|
||||
/// Present when CAP_MLS_READY is set. Peers can request the full KeyPackage.
|
||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||
pub keypackage_hash: Option<[u8; 8]>,
|
||||
/// Ed25519 signature over all fields except signature and hop_count.
|
||||
pub signature: Vec<u8>,
|
||||
}
|
||||
|
||||
/// Compute the 16-byte mesh address from an Ed25519 public key.
|
||||
///
|
||||
/// The address is the first 16 bytes of SHA-256(identity_key).
|
||||
pub fn compute_address(identity_key: &[u8]) -> [u8; 16] {
|
||||
let hash = Sha256::digest(identity_key);
|
||||
let mut addr = [0u8; 16];
|
||||
addr.copy_from_slice(&hash[..16]);
|
||||
addr
|
||||
}
|
||||
|
||||
/// Compute the 8-byte truncated hash of a KeyPackage for announce inclusion.
|
||||
///
|
||||
/// This hash is used to identify which KeyPackage version a node has available.
|
||||
pub fn compute_keypackage_hash(keypackage_bytes: &[u8]) -> [u8; 8] {
|
||||
let hash = Sha256::digest(keypackage_bytes);
|
||||
let mut kp_hash = [0u8; 8];
|
||||
kp_hash.copy_from_slice(&hash[..8]);
|
||||
kp_hash
|
||||
}
|
||||
|
||||
impl MeshAnnounce {
|
||||
/// Create and sign a new mesh announcement.
|
||||
pub fn new(
|
||||
identity: &MeshIdentity,
|
||||
capabilities: u16,
|
||||
reachable_via: Vec<(String, Vec<u8>)>,
|
||||
max_hops: u8,
|
||||
) -> Self {
|
||||
Self::with_keypackage(identity, capabilities, reachable_via, max_hops, None)
|
||||
}
|
||||
|
||||
/// Create announcement with an optional KeyPackage hash.
|
||||
pub fn with_keypackage(
|
||||
identity: &MeshIdentity,
|
||||
capabilities: u16,
|
||||
reachable_via: Vec<(String, Vec<u8>)>,
|
||||
max_hops: u8,
|
||||
keypackage_hash: Option<[u8; 8]>,
|
||||
) -> Self {
|
||||
let identity_key = identity.public_key().to_vec();
|
||||
let address = compute_address(&identity_key);
|
||||
let timestamp = SystemTime::now()
|
||||
.duration_since(UNIX_EPOCH)
|
||||
.unwrap_or_default()
|
||||
.as_secs();
|
||||
|
||||
let mut announce = Self {
|
||||
identity_key,
|
||||
address,
|
||||
capabilities,
|
||||
sequence: 0,
|
||||
timestamp,
|
||||
reachable_via,
|
||||
hop_count: 0,
|
||||
max_hops,
|
||||
keypackage_hash,
|
||||
signature: Vec::new(),
|
||||
};
|
||||
|
||||
let signable = announce.signable_bytes();
|
||||
announce.signature = identity.sign(&signable).to_vec();
|
||||
announce
|
||||
}
|
||||
|
||||
/// Create and sign with a specific sequence number.
|
||||
pub fn with_sequence(
|
||||
identity: &MeshIdentity,
|
||||
capabilities: u16,
|
||||
reachable_via: Vec<(String, Vec<u8>)>,
|
||||
max_hops: u8,
|
||||
sequence: u64,
|
||||
) -> Self {
|
||||
let mut announce = Self::new(identity, capabilities, reachable_via, max_hops);
|
||||
announce.sequence = sequence;
|
||||
// Re-sign with the correct sequence number.
|
||||
let signable = announce.signable_bytes();
|
||||
announce.signature = identity.sign(&signable).to_vec();
|
||||
announce
|
||||
}
|
||||
|
||||
/// Assemble the byte string that is signed / verified.
|
||||
///
|
||||
/// `hop_count` and `signature` are excluded: forwarding nodes increment
|
||||
/// hop_count without re-signing (same design as [`MeshEnvelope`]).
|
||||
fn signable_bytes(&self) -> Vec<u8> {
|
||||
let mut buf = Vec::with_capacity(
|
||||
self.identity_key.len() + 16 + 2 + 8 + 8 + self.reachable_via.len() * 32 + 1 + 9,
|
||||
);
|
||||
buf.extend_from_slice(&self.identity_key);
|
||||
buf.extend_from_slice(&self.address);
|
||||
buf.extend_from_slice(&self.capabilities.to_le_bytes());
|
||||
buf.extend_from_slice(&self.sequence.to_le_bytes());
|
||||
buf.extend_from_slice(&self.timestamp.to_le_bytes());
|
||||
for (name, addr) in &self.reachable_via {
|
||||
buf.extend_from_slice(name.as_bytes());
|
||||
buf.extend_from_slice(addr);
|
||||
}
|
||||
buf.push(self.max_hops);
|
||||
// Include keypackage_hash in signature if present
|
||||
if let Some(kp_hash) = &self.keypackage_hash {
|
||||
buf.push(1); // presence marker
|
||||
buf.extend_from_slice(kp_hash);
|
||||
} else {
|
||||
buf.push(0); // absence marker
|
||||
}
|
||||
buf
|
||||
}
|
||||
|
||||
/// Verify the Ed25519 signature on this announcement.
|
||||
pub fn verify(&self) -> bool {
|
||||
let identity_key: [u8; 32] = match self.identity_key.as_slice().try_into() {
|
||||
Ok(k) => k,
|
||||
Err(_) => return false,
|
||||
};
|
||||
let sig: [u8; 64] = match self.signature.as_slice().try_into() {
|
||||
Ok(s) => s,
|
||||
Err(_) => return false,
|
||||
};
|
||||
let signable = self.signable_bytes();
|
||||
quicprochat_core::IdentityKeypair::verify_raw(&identity_key, &signable, &sig).is_ok()
|
||||
}
|
||||
|
||||
/// Check whether this announce has expired relative to a maximum age.
|
||||
pub fn is_expired(&self, max_age_secs: u64) -> bool {
|
||||
let now = SystemTime::now()
|
||||
.duration_since(UNIX_EPOCH)
|
||||
.unwrap_or_default()
|
||||
.as_secs();
|
||||
now.saturating_sub(self.timestamp) > max_age_secs
|
||||
}
|
||||
|
||||
/// Create a forwarded copy with `hop_count` incremented by one.
|
||||
///
|
||||
/// The signature remains the original — forwarding nodes do not re-sign.
|
||||
pub fn forwarded(&self) -> Self {
|
||||
let mut copy = self.clone();
|
||||
copy.hop_count = copy.hop_count.saturating_add(1);
|
||||
copy
|
||||
}
|
||||
|
||||
/// Whether this announce can still propagate (under hop limit and not expired).
|
||||
///
|
||||
/// Uses a generous default max age of 1800 seconds (30 minutes) for the
|
||||
/// expiry check. Callers that need a different max age should check
|
||||
/// [`is_expired`](Self::is_expired) separately.
|
||||
pub fn can_propagate(&self) -> bool {
|
||||
self.hop_count < self.max_hops && !self.is_expired(1800)
|
||||
}
|
||||
|
||||
/// Serialize to compact CBOR binary format (for wire transmission).
|
||||
pub fn to_wire(&self) -> Vec<u8> {
|
||||
let mut buf = Vec::new();
|
||||
ciborium::into_writer(self, &mut buf).expect("CBOR serialization should not fail");
|
||||
buf
|
||||
}
|
||||
|
||||
/// Deserialize from CBOR binary format.
|
||||
pub fn from_wire(bytes: &[u8]) -> anyhow::Result<Self> {
|
||||
let announce: Self = ciborium::from_reader(bytes)?;
|
||||
Ok(announce)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
fn test_identity() -> MeshIdentity {
|
||||
MeshIdentity::generate()
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn create_and_verify() {
|
||||
let id = test_identity();
|
||||
let announce = MeshAnnounce::new(
|
||||
&id,
|
||||
CAP_RELAY | CAP_STORE,
|
||||
vec![("tcp".into(), b"127.0.0.1:9000".to_vec())],
|
||||
8,
|
||||
);
|
||||
|
||||
assert!(announce.verify(), "freshly created announce must verify");
|
||||
assert_eq!(announce.hop_count, 0);
|
||||
assert_eq!(announce.identity_key, id.public_key().to_vec());
|
||||
assert_eq!(announce.capabilities, CAP_RELAY | CAP_STORE);
|
||||
assert_eq!(announce.max_hops, 8);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn tampered_fails_verify() {
|
||||
let id = test_identity();
|
||||
let mut announce = MeshAnnounce::new(&id, CAP_RELAY, vec![], 4);
|
||||
announce.capabilities = CAP_GATEWAY; // tamper
|
||||
assert!(
|
||||
!announce.verify(),
|
||||
"tampered announce must fail verification"
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn forwarded_still_verifies() {
|
||||
let id = test_identity();
|
||||
let announce = MeshAnnounce::new(&id, CAP_RELAY, vec![], 8);
|
||||
assert!(announce.verify());
|
||||
|
||||
let fwd = announce.forwarded();
|
||||
assert_eq!(fwd.hop_count, 1);
|
||||
assert!(
|
||||
fwd.verify(),
|
||||
"forwarded announce must still verify (hop_count excluded from signature)"
|
||||
);
|
||||
|
||||
let fwd2 = fwd.forwarded();
|
||||
assert_eq!(fwd2.hop_count, 2);
|
||||
assert!(fwd2.verify(), "double-forwarded must still verify");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn expired_announce() {
|
||||
let id = test_identity();
|
||||
let mut announce = MeshAnnounce::new(&id, 0, vec![], 4);
|
||||
// Set timestamp far in the past.
|
||||
announce.timestamp = 0;
|
||||
assert!(announce.is_expired(60), "announce from epoch should be expired with 60s max age");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn address_from_key_deterministic() {
|
||||
let key = [42u8; 32];
|
||||
let addr1 = compute_address(&key);
|
||||
let addr2 = compute_address(&key);
|
||||
assert_eq!(addr1, addr2, "same key must produce same address");
|
||||
|
||||
// Different key produces different address.
|
||||
let other_key = [99u8; 32];
|
||||
let other_addr = compute_address(&other_key);
|
||||
assert_ne!(addr1, other_addr);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn cbor_roundtrip() {
|
||||
let id = test_identity();
|
||||
let announce = MeshAnnounce::new(
|
||||
&id,
|
||||
CAP_RELAY | CAP_GATEWAY,
|
||||
vec![
|
||||
("tcp".into(), b"127.0.0.1:9000".to_vec()),
|
||||
("lora".into(), vec![0x01, 0x02, 0x03, 0x04]),
|
||||
],
|
||||
6,
|
||||
);
|
||||
|
||||
let wire = announce.to_wire();
|
||||
let restored = MeshAnnounce::from_wire(&wire).expect("CBOR deserialize");
|
||||
|
||||
assert_eq!(announce.identity_key, restored.identity_key);
|
||||
assert_eq!(announce.address, restored.address);
|
||||
assert_eq!(announce.capabilities, restored.capabilities);
|
||||
assert_eq!(announce.sequence, restored.sequence);
|
||||
assert_eq!(announce.timestamp, restored.timestamp);
|
||||
assert_eq!(announce.reachable_via, restored.reachable_via);
|
||||
assert_eq!(announce.hop_count, restored.hop_count);
|
||||
assert_eq!(announce.max_hops, restored.max_hops);
|
||||
assert_eq!(announce.signature, restored.signature);
|
||||
assert!(restored.verify());
|
||||
}
|
||||
}
|
||||
302
crates/quicprochat-p2p/src/announce_protocol.rs
Normal file
302
crates/quicprochat-p2p/src/announce_protocol.rs
Normal file
@@ -0,0 +1,302 @@
|
||||
//! Announce protocol engine — sends, receives, and propagates mesh announcements.
|
||||
//!
|
||||
//! This module ties together [`MeshAnnounce`], [`RoutingTable`], and
|
||||
//! deduplication logic to form a complete announce processing pipeline.
|
||||
|
||||
use std::collections::HashSet;
|
||||
use std::time::Duration;
|
||||
|
||||
use crate::announce::MeshAnnounce;
|
||||
use crate::identity::MeshIdentity;
|
||||
use crate::routing_table::RoutingTable;
|
||||
use crate::transport::TransportAddr;
|
||||
|
||||
/// Configuration for the announce protocol.
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct AnnounceConfig {
|
||||
/// Interval between periodic re-announcements.
|
||||
pub announce_interval: Duration,
|
||||
/// Maximum age before an announce is considered expired.
|
||||
pub max_announce_age: Duration,
|
||||
/// Maximum hops for announce propagation.
|
||||
pub max_hops: u8,
|
||||
/// This node's capabilities.
|
||||
pub capabilities: u16,
|
||||
/// Interval for routing table garbage collection.
|
||||
pub gc_interval: Duration,
|
||||
}
|
||||
|
||||
impl Default for AnnounceConfig {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
announce_interval: Duration::from_secs(600), // 10 minutes
|
||||
max_announce_age: Duration::from_secs(1800), // 30 minutes
|
||||
max_hops: 8,
|
||||
capabilities: 0,
|
||||
gc_interval: Duration::from_secs(60),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Tracks which announces we've already seen (to prevent re-broadcast loops).
|
||||
pub struct AnnounceDedup {
|
||||
/// Set of (address, sequence) pairs we've seen.
|
||||
seen: HashSet<([u8; 16], u64)>,
|
||||
/// Maximum entries before pruning.
|
||||
max_entries: usize,
|
||||
}
|
||||
|
||||
impl AnnounceDedup {
|
||||
/// Create a new dedup tracker with the given capacity.
|
||||
pub fn new(max_entries: usize) -> Self {
|
||||
Self {
|
||||
seen: HashSet::new(),
|
||||
max_entries,
|
||||
}
|
||||
}
|
||||
|
||||
/// Check if this announce is new (not seen before).
|
||||
///
|
||||
/// Returns `true` if the (address, sequence) pair has not been seen before,
|
||||
/// and adds it to the set. Returns `false` if it was already seen.
|
||||
pub fn is_new(&mut self, address: &[u8; 16], sequence: u64) -> bool {
|
||||
if self.seen.len() >= self.max_entries {
|
||||
self.prune();
|
||||
}
|
||||
self.seen.insert((*address, sequence))
|
||||
}
|
||||
|
||||
/// Remove all entries when the set exceeds capacity.
|
||||
///
|
||||
/// Uses a simple clear-all strategy; a more sophisticated implementation
|
||||
/// could track insertion order and evict oldest entries.
|
||||
pub fn prune(&mut self) {
|
||||
self.seen.clear();
|
||||
}
|
||||
}
|
||||
|
||||
/// Create this node's own mesh announcement.
|
||||
pub fn create_announce(
|
||||
identity: &MeshIdentity,
|
||||
config: &AnnounceConfig,
|
||||
sequence: u64,
|
||||
reachable_via: Vec<(String, Vec<u8>)>,
|
||||
) -> MeshAnnounce {
|
||||
MeshAnnounce::with_sequence(
|
||||
identity,
|
||||
config.capabilities,
|
||||
reachable_via,
|
||||
config.max_hops,
|
||||
sequence,
|
||||
)
|
||||
}
|
||||
|
||||
/// Process a received mesh announcement.
|
||||
///
|
||||
/// Steps:
|
||||
/// 1. Verify signature — return `None` if invalid.
|
||||
/// 2. Check if expired — return `None` if stale.
|
||||
/// 3. Check dedup — return `None` if already seen.
|
||||
/// 4. Update routing table.
|
||||
/// 5. If `can_propagate` — return `Some(forwarded)` for re-broadcast.
|
||||
/// 6. Otherwise return `None`.
|
||||
pub fn process_received_announce(
|
||||
announce: &MeshAnnounce,
|
||||
routing_table: &mut RoutingTable,
|
||||
dedup: &mut AnnounceDedup,
|
||||
received_via: &str,
|
||||
received_from: TransportAddr,
|
||||
max_age: Duration,
|
||||
) -> Option<MeshAnnounce> {
|
||||
// 1. Verify signature.
|
||||
if !announce.verify() {
|
||||
return None;
|
||||
}
|
||||
|
||||
// 2. Check expiry.
|
||||
if announce.is_expired(max_age.as_secs()) {
|
||||
return None;
|
||||
}
|
||||
|
||||
// 3. Dedup check.
|
||||
if !dedup.is_new(&announce.address, announce.sequence) {
|
||||
return None;
|
||||
}
|
||||
|
||||
// 4. Update routing table.
|
||||
routing_table.update(announce, received_via, received_from);
|
||||
|
||||
// 5. Check if the announce can propagate further.
|
||||
if announce.hop_count < announce.max_hops && !announce.is_expired(max_age.as_secs()) {
|
||||
Some(announce.forwarded())
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use crate::announce::CAP_RELAY;
|
||||
use crate::identity::MeshIdentity;
|
||||
|
||||
fn test_identity() -> MeshIdentity {
|
||||
MeshIdentity::generate()
|
||||
}
|
||||
|
||||
fn default_config() -> AnnounceConfig {
|
||||
AnnounceConfig {
|
||||
capabilities: CAP_RELAY,
|
||||
..AnnounceConfig::default()
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn create_announce_is_valid() {
|
||||
let id = test_identity();
|
||||
let config = default_config();
|
||||
let announce = create_announce(
|
||||
&id,
|
||||
&config,
|
||||
1,
|
||||
vec![("tcp".into(), b"127.0.0.1:9000".to_vec())],
|
||||
);
|
||||
|
||||
assert!(announce.verify());
|
||||
assert_eq!(announce.sequence, 1);
|
||||
assert_eq!(announce.capabilities, CAP_RELAY);
|
||||
assert_eq!(announce.max_hops, 8);
|
||||
assert_eq!(announce.hop_count, 0);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn process_valid_announce_updates_table() {
|
||||
let id = test_identity();
|
||||
let config = default_config();
|
||||
let announce = create_announce(&id, &config, 1, vec![]);
|
||||
|
||||
let mut table = RoutingTable::new(Duration::from_secs(300));
|
||||
let mut dedup = AnnounceDedup::new(1000);
|
||||
let addr = TransportAddr::Socket("127.0.0.1:9000".parse().unwrap());
|
||||
|
||||
let result = process_received_announce(
|
||||
&announce,
|
||||
&mut table,
|
||||
&mut dedup,
|
||||
"tcp",
|
||||
addr,
|
||||
Duration::from_secs(1800),
|
||||
);
|
||||
|
||||
// Should propagate (hop_count 0 < max_hops 8).
|
||||
assert!(result.is_some());
|
||||
// Routing table should have the entry.
|
||||
assert_eq!(table.len(), 1);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn process_duplicate_ignored() {
|
||||
let id = test_identity();
|
||||
let config = default_config();
|
||||
let announce = create_announce(&id, &config, 1, vec![]);
|
||||
|
||||
let mut table = RoutingTable::new(Duration::from_secs(300));
|
||||
let mut dedup = AnnounceDedup::new(1000);
|
||||
let addr = TransportAddr::Socket("127.0.0.1:9000".parse().unwrap());
|
||||
|
||||
// First time — accepted.
|
||||
let result1 = process_received_announce(
|
||||
&announce,
|
||||
&mut table,
|
||||
&mut dedup,
|
||||
"tcp",
|
||||
addr.clone(),
|
||||
Duration::from_secs(1800),
|
||||
);
|
||||
assert!(result1.is_some());
|
||||
|
||||
// Second time — duplicate, ignored.
|
||||
let result2 = process_received_announce(
|
||||
&announce,
|
||||
&mut table,
|
||||
&mut dedup,
|
||||
"tcp",
|
||||
addr,
|
||||
Duration::from_secs(1800),
|
||||
);
|
||||
assert!(result2.is_none());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn process_expired_ignored() {
|
||||
let id = test_identity();
|
||||
let config = default_config();
|
||||
let mut announce = create_announce(&id, &config, 1, vec![]);
|
||||
// Set timestamp far in the past.
|
||||
announce.timestamp = 0;
|
||||
|
||||
let mut table = RoutingTable::new(Duration::from_secs(300));
|
||||
let mut dedup = AnnounceDedup::new(1000);
|
||||
let addr = TransportAddr::Socket("127.0.0.1:9000".parse().unwrap());
|
||||
|
||||
let result = process_received_announce(
|
||||
&announce,
|
||||
&mut table,
|
||||
&mut dedup,
|
||||
"tcp",
|
||||
addr,
|
||||
Duration::from_secs(60),
|
||||
);
|
||||
assert!(result.is_none(), "expired announce must be ignored");
|
||||
assert!(table.is_empty());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn process_invalid_sig_ignored() {
|
||||
let id = test_identity();
|
||||
let config = default_config();
|
||||
let mut announce = create_announce(&id, &config, 1, vec![]);
|
||||
// Tamper with capabilities to invalidate signature.
|
||||
announce.capabilities = 0xFFFF;
|
||||
|
||||
let mut table = RoutingTable::new(Duration::from_secs(300));
|
||||
let mut dedup = AnnounceDedup::new(1000);
|
||||
let addr = TransportAddr::Socket("127.0.0.1:9000".parse().unwrap());
|
||||
|
||||
let result = process_received_announce(
|
||||
&announce,
|
||||
&mut table,
|
||||
&mut dedup,
|
||||
"tcp",
|
||||
addr,
|
||||
Duration::from_secs(1800),
|
||||
);
|
||||
assert!(result.is_none(), "tampered announce must be ignored");
|
||||
assert!(table.is_empty());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn process_returns_forwarded_for_propagation() {
|
||||
let id = test_identity();
|
||||
let config = default_config();
|
||||
let announce = create_announce(&id, &config, 1, vec![]);
|
||||
assert_eq!(announce.hop_count, 0);
|
||||
|
||||
let mut table = RoutingTable::new(Duration::from_secs(300));
|
||||
let mut dedup = AnnounceDedup::new(1000);
|
||||
let addr = TransportAddr::Socket("127.0.0.1:9000".parse().unwrap());
|
||||
|
||||
let result = process_received_announce(
|
||||
&announce,
|
||||
&mut table,
|
||||
&mut dedup,
|
||||
"tcp",
|
||||
addr,
|
||||
Duration::from_secs(1800),
|
||||
);
|
||||
|
||||
let forwarded = result.expect("should return forwarded announce");
|
||||
assert_eq!(forwarded.hop_count, 1);
|
||||
assert!(forwarded.verify(), "forwarded announce must still verify");
|
||||
}
|
||||
}
|
||||
460
crates/quicprochat-p2p/src/config.rs
Normal file
460
crates/quicprochat-p2p/src/config.rs
Normal file
@@ -0,0 +1,460 @@
|
||||
//! Runtime configuration for mesh networking.
|
||||
//!
|
||||
//! This module provides centralized configuration with sensible defaults
|
||||
//! and validation. Configuration can be loaded from files, environment
|
||||
//! variables, or set programmatically.
|
||||
|
||||
use std::path::PathBuf;
|
||||
use std::time::Duration;
|
||||
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use crate::error::{ConfigError, MeshResult};
|
||||
use crate::transport::CryptoMode;
|
||||
|
||||
/// Top-level mesh node configuration.
|
||||
#[derive(Clone, Debug, Serialize, Deserialize)]
|
||||
#[serde(default)]
|
||||
pub struct MeshConfig {
|
||||
/// Node identity configuration.
|
||||
pub identity: IdentityConfig,
|
||||
/// Announce protocol configuration.
|
||||
pub announce: AnnounceConfig,
|
||||
/// Routing configuration.
|
||||
pub routing: RoutingConfig,
|
||||
/// Store-and-forward configuration.
|
||||
pub store: StoreConfig,
|
||||
/// Transport configuration.
|
||||
pub transport: TransportConfig,
|
||||
/// Crypto configuration.
|
||||
pub crypto: CryptoConfig,
|
||||
/// Rate limiting configuration.
|
||||
pub rate_limit: RateLimitConfig,
|
||||
/// Logging configuration.
|
||||
pub logging: LoggingConfig,
|
||||
}
|
||||
|
||||
impl Default for MeshConfig {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
identity: IdentityConfig::default(),
|
||||
announce: AnnounceConfig::default(),
|
||||
routing: RoutingConfig::default(),
|
||||
store: StoreConfig::default(),
|
||||
transport: TransportConfig::default(),
|
||||
crypto: CryptoConfig::default(),
|
||||
rate_limit: RateLimitConfig::default(),
|
||||
logging: LoggingConfig::default(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl MeshConfig {
|
||||
/// Load configuration from a TOML file.
|
||||
pub fn from_file(path: &PathBuf) -> MeshResult<Self> {
|
||||
let content = std::fs::read_to_string(path).map_err(|e| {
|
||||
ConfigError::Parse(format!("failed to read config file: {}", e))
|
||||
})?;
|
||||
Self::from_toml(&content)
|
||||
}
|
||||
|
||||
/// Parse configuration from TOML string.
|
||||
pub fn from_toml(toml: &str) -> MeshResult<Self> {
|
||||
let config: Self = toml::from_str(toml).map_err(|e| {
|
||||
ConfigError::Parse(format!("TOML parse error: {}", e))
|
||||
})?;
|
||||
config.validate()?;
|
||||
Ok(config)
|
||||
}
|
||||
|
||||
/// Serialize to TOML string.
|
||||
pub fn to_toml(&self) -> MeshResult<String> {
|
||||
toml::to_string_pretty(self).map_err(|e| {
|
||||
ConfigError::Parse(format!("TOML serialize error: {}", e)).into()
|
||||
})
|
||||
}
|
||||
|
||||
/// Validate configuration values.
|
||||
pub fn validate(&self) -> MeshResult<()> {
|
||||
self.announce.validate()?;
|
||||
self.routing.validate()?;
|
||||
self.store.validate()?;
|
||||
self.rate_limit.validate()?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Create a minimal config for constrained devices.
|
||||
pub fn constrained() -> Self {
|
||||
Self {
|
||||
store: StoreConfig {
|
||||
max_messages: 100,
|
||||
max_keypackages: 50,
|
||||
..Default::default()
|
||||
},
|
||||
routing: RoutingConfig {
|
||||
max_entries: 100,
|
||||
..Default::default()
|
||||
},
|
||||
announce: AnnounceConfig {
|
||||
interval: Duration::from_secs(1800), // 30 min
|
||||
..Default::default()
|
||||
},
|
||||
crypto: CryptoConfig {
|
||||
default_mode: CryptoMode::MlsLiteUnsigned,
|
||||
..Default::default()
|
||||
},
|
||||
..Default::default()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Identity configuration.
|
||||
#[derive(Clone, Debug, Serialize, Deserialize)]
|
||||
#[serde(default)]
|
||||
pub struct IdentityConfig {
|
||||
/// Path to persist identity keypair.
|
||||
pub keypair_path: Option<PathBuf>,
|
||||
/// Whether to auto-generate keypair if missing.
|
||||
pub auto_generate: bool,
|
||||
}
|
||||
|
||||
impl Default for IdentityConfig {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
keypair_path: None,
|
||||
auto_generate: true,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Announce protocol configuration.
|
||||
#[derive(Clone, Debug, Serialize, Deserialize)]
|
||||
#[serde(default)]
|
||||
pub struct AnnounceConfig {
|
||||
/// Interval between periodic announcements.
|
||||
#[serde(with = "humantime_serde")]
|
||||
pub interval: Duration,
|
||||
/// Maximum age before announce is considered stale.
|
||||
#[serde(with = "humantime_serde")]
|
||||
pub max_age: Duration,
|
||||
/// Maximum propagation hops.
|
||||
pub max_hops: u8,
|
||||
/// Capabilities to advertise.
|
||||
pub capabilities: u16,
|
||||
/// Whether to include KeyPackage hash in announces.
|
||||
pub include_keypackage: bool,
|
||||
}
|
||||
|
||||
impl Default for AnnounceConfig {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
interval: Duration::from_secs(600), // 10 min
|
||||
max_age: Duration::from_secs(1800), // 30 min
|
||||
max_hops: 8,
|
||||
capabilities: 0x0003, // CAP_RELAY | CAP_STORE
|
||||
include_keypackage: true,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl AnnounceConfig {
|
||||
fn validate(&self) -> MeshResult<()> {
|
||||
if self.interval < Duration::from_secs(10) {
|
||||
return Err(ConfigError::InvalidValue {
|
||||
key: "announce.interval".to_string(),
|
||||
reason: "must be at least 10 seconds".to_string(),
|
||||
}.into());
|
||||
}
|
||||
if self.max_hops == 0 || self.max_hops > 32 {
|
||||
return Err(ConfigError::InvalidValue {
|
||||
key: "announce.max_hops".to_string(),
|
||||
reason: "must be between 1 and 32".to_string(),
|
||||
}.into());
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
/// Routing configuration.
|
||||
#[derive(Clone, Debug, Serialize, Deserialize)]
|
||||
#[serde(default)]
|
||||
pub struct RoutingConfig {
|
||||
/// Maximum routing table entries.
|
||||
pub max_entries: usize,
|
||||
/// Default route TTL.
|
||||
#[serde(with = "humantime_serde")]
|
||||
pub default_ttl: Duration,
|
||||
/// How often to garbage collect expired routes.
|
||||
#[serde(with = "humantime_serde")]
|
||||
pub gc_interval: Duration,
|
||||
}
|
||||
|
||||
impl Default for RoutingConfig {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
max_entries: 10_000,
|
||||
default_ttl: Duration::from_secs(1800), // 30 min
|
||||
gc_interval: Duration::from_secs(60),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl RoutingConfig {
|
||||
fn validate(&self) -> MeshResult<()> {
|
||||
if self.max_entries == 0 {
|
||||
return Err(ConfigError::InvalidValue {
|
||||
key: "routing.max_entries".to_string(),
|
||||
reason: "must be at least 1".to_string(),
|
||||
}.into());
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
/// Store-and-forward configuration.
|
||||
#[derive(Clone, Debug, Serialize, Deserialize)]
|
||||
#[serde(default)]
|
||||
pub struct StoreConfig {
|
||||
/// Maximum messages in store.
|
||||
pub max_messages: usize,
|
||||
/// Maximum messages per recipient.
|
||||
pub max_per_recipient: usize,
|
||||
/// Maximum cached KeyPackages.
|
||||
pub max_keypackages: usize,
|
||||
/// Maximum KeyPackages per address.
|
||||
pub max_keypackages_per_addr: usize,
|
||||
/// Default message TTL.
|
||||
#[serde(with = "humantime_serde")]
|
||||
pub default_ttl: Duration,
|
||||
/// Path for persistent storage (None = in-memory only).
|
||||
pub persistence_path: Option<PathBuf>,
|
||||
}
|
||||
|
||||
impl Default for StoreConfig {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
max_messages: 10_000,
|
||||
max_per_recipient: 100,
|
||||
max_keypackages: 1_000,
|
||||
max_keypackages_per_addr: 3,
|
||||
default_ttl: Duration::from_secs(24 * 3600), // 24 hours
|
||||
persistence_path: None,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl StoreConfig {
|
||||
fn validate(&self) -> MeshResult<()> {
|
||||
if self.max_messages == 0 {
|
||||
return Err(ConfigError::InvalidValue {
|
||||
key: "store.max_messages".to_string(),
|
||||
reason: "must be at least 1".to_string(),
|
||||
}.into());
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
/// Transport configuration.
|
||||
#[derive(Clone, Debug, Serialize, Deserialize)]
|
||||
#[serde(default)]
|
||||
pub struct TransportConfig {
|
||||
/// Enable iroh/QUIC transport.
|
||||
pub enable_iroh: bool,
|
||||
/// Enable TCP transport.
|
||||
pub enable_tcp: bool,
|
||||
/// TCP listen address.
|
||||
pub tcp_listen: Option<String>,
|
||||
/// Enable LoRa transport.
|
||||
pub enable_lora: bool,
|
||||
/// LoRa device path (e.g., /dev/ttyUSB0).
|
||||
pub lora_device: Option<String>,
|
||||
/// LoRa spreading factor (7-12).
|
||||
pub lora_sf: u8,
|
||||
/// LoRa bandwidth in kHz.
|
||||
pub lora_bw: u32,
|
||||
/// Connection timeout.
|
||||
#[serde(with = "humantime_serde")]
|
||||
pub connect_timeout: Duration,
|
||||
/// Send timeout.
|
||||
#[serde(with = "humantime_serde")]
|
||||
pub send_timeout: Duration,
|
||||
}
|
||||
|
||||
impl Default for TransportConfig {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
enable_iroh: true,
|
||||
enable_tcp: true,
|
||||
tcp_listen: None,
|
||||
enable_lora: false,
|
||||
lora_device: None,
|
||||
lora_sf: 10,
|
||||
lora_bw: 125,
|
||||
connect_timeout: Duration::from_secs(10),
|
||||
send_timeout: Duration::from_secs(30),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Crypto configuration.
|
||||
#[derive(Clone, Debug, Serialize, Deserialize)]
|
||||
#[serde(default)]
|
||||
pub struct CryptoConfig {
|
||||
/// Default crypto mode.
|
||||
pub default_mode: CryptoMode,
|
||||
/// Whether to auto-upgrade to better crypto when available.
|
||||
pub auto_upgrade: bool,
|
||||
/// Whether to sign MLS-Lite messages.
|
||||
pub mls_lite_sign: bool,
|
||||
/// Enable post-quantum hybrid mode.
|
||||
pub enable_pq: bool,
|
||||
}
|
||||
|
||||
impl Default for CryptoConfig {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
default_mode: CryptoMode::MlsClassical,
|
||||
auto_upgrade: true,
|
||||
mls_lite_sign: true,
|
||||
enable_pq: false, // PQ is large, opt-in
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Rate limiting configuration.
|
||||
#[derive(Clone, Debug, Serialize, Deserialize)]
|
||||
#[serde(default)]
|
||||
pub struct RateLimitConfig {
|
||||
/// Maximum announces per peer per minute.
|
||||
pub announce_per_peer_per_min: u32,
|
||||
/// Maximum messages per peer per minute.
|
||||
pub message_per_peer_per_min: u32,
|
||||
/// Maximum KeyPackage requests per minute.
|
||||
pub keypackage_requests_per_min: u32,
|
||||
/// LoRa duty cycle limit (0.0-1.0, e.g., 0.01 = 1%).
|
||||
pub lora_duty_cycle: f32,
|
||||
}
|
||||
|
||||
impl Default for RateLimitConfig {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
announce_per_peer_per_min: 10,
|
||||
message_per_peer_per_min: 60,
|
||||
keypackage_requests_per_min: 20,
|
||||
lora_duty_cycle: 0.01, // EU868 1% default
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl RateLimitConfig {
|
||||
fn validate(&self) -> MeshResult<()> {
|
||||
if self.lora_duty_cycle < 0.0 || self.lora_duty_cycle > 1.0 {
|
||||
return Err(ConfigError::InvalidValue {
|
||||
key: "rate_limit.lora_duty_cycle".to_string(),
|
||||
reason: "must be between 0.0 and 1.0".to_string(),
|
||||
}.into());
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
/// Logging configuration.
|
||||
#[derive(Clone, Debug, Serialize, Deserialize)]
|
||||
#[serde(default)]
|
||||
pub struct LoggingConfig {
|
||||
/// Log level (trace, debug, info, warn, error).
|
||||
pub level: String,
|
||||
/// Whether to log to file.
|
||||
pub file: Option<PathBuf>,
|
||||
/// Whether to include timestamps.
|
||||
pub timestamps: bool,
|
||||
/// Whether to include span context.
|
||||
pub spans: bool,
|
||||
}
|
||||
|
||||
impl Default for LoggingConfig {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
level: "info".to_string(),
|
||||
file: None,
|
||||
timestamps: true,
|
||||
spans: false,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Serde helper for CryptoMode
|
||||
impl Serialize for CryptoMode {
|
||||
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
|
||||
where
|
||||
S: serde::Serializer,
|
||||
{
|
||||
let s = match self {
|
||||
CryptoMode::MlsHybrid => "mls-hybrid",
|
||||
CryptoMode::MlsClassical => "mls-classical",
|
||||
CryptoMode::MlsLiteSigned => "mls-lite-signed",
|
||||
CryptoMode::MlsLiteUnsigned => "mls-lite-unsigned",
|
||||
};
|
||||
serializer.serialize_str(s)
|
||||
}
|
||||
}
|
||||
|
||||
impl<'de> Deserialize<'de> for CryptoMode {
|
||||
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
|
||||
where
|
||||
D: serde::Deserializer<'de>,
|
||||
{
|
||||
let s = String::deserialize(deserializer)?;
|
||||
match s.as_str() {
|
||||
"mls-hybrid" => Ok(CryptoMode::MlsHybrid),
|
||||
"mls-classical" => Ok(CryptoMode::MlsClassical),
|
||||
"mls-lite-signed" => Ok(CryptoMode::MlsLiteSigned),
|
||||
"mls-lite-unsigned" => Ok(CryptoMode::MlsLiteUnsigned),
|
||||
_ => Err(serde::de::Error::unknown_variant(
|
||||
&s,
|
||||
&["mls-hybrid", "mls-classical", "mls-lite-signed", "mls-lite-unsigned"],
|
||||
)),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn default_config_is_valid() {
|
||||
let config = MeshConfig::default();
|
||||
assert!(config.validate().is_ok());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn constrained_config_is_valid() {
|
||||
let config = MeshConfig::constrained();
|
||||
assert!(config.validate().is_ok());
|
||||
assert_eq!(config.store.max_messages, 100);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn toml_roundtrip() {
|
||||
let config = MeshConfig::default();
|
||||
let toml = config.to_toml().expect("serialize");
|
||||
let restored = MeshConfig::from_toml(&toml).expect("parse");
|
||||
assert_eq!(config.announce.max_hops, restored.announce.max_hops);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn invalid_announce_interval() {
|
||||
let mut config = MeshConfig::default();
|
||||
config.announce.interval = Duration::from_secs(1); // Too short
|
||||
assert!(config.validate().is_err());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn invalid_duty_cycle() {
|
||||
let mut config = MeshConfig::default();
|
||||
config.rate_limit.lora_duty_cycle = 2.0; // > 1.0
|
||||
assert!(config.validate().is_err());
|
||||
}
|
||||
}
|
||||
337
crates/quicprochat-p2p/src/crypto_negotiation.rs
Normal file
337
crates/quicprochat-p2p/src/crypto_negotiation.rs
Normal file
@@ -0,0 +1,337 @@
|
||||
//! Crypto mode negotiation and upgrade path.
|
||||
//!
|
||||
//! This module handles transitions between crypto modes based on transport
|
||||
//! capability. Groups can upgrade from MLS-Lite to full MLS when a
|
||||
//! higher-bandwidth transport becomes available.
|
||||
//!
|
||||
//! # Upgrade Path
|
||||
//!
|
||||
//! ```text
|
||||
//! MLS-Lite (constrained) → Full MLS (when high-bandwidth available)
|
||||
//!
|
||||
//! 1. Group running MLS-Lite over LoRa
|
||||
//! 2. Member connects via WiFi/QUIC
|
||||
//! 3. Member sends MLS KeyPackage over fast link
|
||||
//! 4. Creator imports MLS-Lite members into MLS group
|
||||
//! 5. Sends MLS Welcome + epoch secret derivation
|
||||
//! 6. Group transitions to full MLS (can still use LoRa for app messages)
|
||||
//! ```
|
||||
//!
|
||||
//! # Security Considerations
|
||||
//!
|
||||
//! - Upgrade requires re-keying (new epoch in MLS)
|
||||
//! - Cannot downgrade without explicit action (security property)
|
||||
//! - MLS-Lite epoch secret can be derived from MLS export
|
||||
|
||||
use crate::mls_lite::MlsLiteGroup;
|
||||
use crate::transport::{CryptoMode, TransportCapability};
|
||||
|
||||
/// State of a group's crypto negotiation.
|
||||
#[derive(Clone, Debug, PartialEq, Eq)]
|
||||
pub enum GroupCryptoState {
|
||||
/// Group uses MLS-Lite with pre-shared key.
|
||||
MlsLite {
|
||||
group_id: [u8; 8],
|
||||
epoch: u16,
|
||||
signed: bool,
|
||||
},
|
||||
/// Group uses full MLS.
|
||||
FullMls {
|
||||
group_id: Vec<u8>,
|
||||
epoch: u64,
|
||||
hybrid_pq: bool,
|
||||
},
|
||||
/// Group is upgrading from MLS-Lite to full MLS.
|
||||
Upgrading {
|
||||
lite_group_id: [u8; 8],
|
||||
lite_epoch: u16,
|
||||
mls_group_id: Vec<u8>,
|
||||
},
|
||||
}
|
||||
|
||||
impl GroupCryptoState {
|
||||
/// Current crypto mode.
|
||||
pub fn mode(&self) -> CryptoMode {
|
||||
match self {
|
||||
Self::MlsLite { signed: true, .. } => CryptoMode::MlsLiteSigned,
|
||||
Self::MlsLite { signed: false, .. } => CryptoMode::MlsLiteUnsigned,
|
||||
Self::FullMls { hybrid_pq: true, .. } => CryptoMode::MlsHybrid,
|
||||
Self::FullMls { hybrid_pq: false, .. } => CryptoMode::MlsClassical,
|
||||
Self::Upgrading { .. } => CryptoMode::MlsClassical, // Upgrading assumes MLS available
|
||||
}
|
||||
}
|
||||
|
||||
/// Check if upgrade to full MLS is possible.
|
||||
pub fn can_upgrade(&self, available_capability: TransportCapability) -> bool {
|
||||
match self {
|
||||
Self::MlsLite { .. } => available_capability.supports_mls(),
|
||||
Self::FullMls { hybrid_pq: false, .. } => {
|
||||
// Can upgrade from classical MLS to hybrid if unconstrained
|
||||
available_capability == TransportCapability::Unconstrained
|
||||
}
|
||||
_ => false,
|
||||
}
|
||||
}
|
||||
|
||||
/// Check if this state supports the given transport capability.
|
||||
pub fn compatible_with(&self, capability: TransportCapability) -> bool {
|
||||
match self {
|
||||
Self::MlsLite { .. } => true, // MLS-Lite works on all transports
|
||||
Self::FullMls { hybrid_pq: true, .. } => {
|
||||
capability == TransportCapability::Unconstrained
|
||||
}
|
||||
Self::FullMls { hybrid_pq: false, .. } => capability.supports_mls(),
|
||||
Self::Upgrading { .. } => capability.supports_mls(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Parameters for deriving MLS-Lite key from MLS epoch secret.
|
||||
///
|
||||
/// This enables bootstrapping MLS-Lite from an existing MLS group.
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct MlsLiteBootstrap {
|
||||
/// MLS group ID (for domain separation).
|
||||
pub mls_group_id: Vec<u8>,
|
||||
/// MLS epoch from which to derive.
|
||||
pub mls_epoch: u64,
|
||||
/// Label for HKDF derivation.
|
||||
pub label: &'static str,
|
||||
}
|
||||
|
||||
impl MlsLiteBootstrap {
|
||||
/// Standard label for MLS-Lite derivation.
|
||||
pub const LABEL: &'static str = "quicprochat-mls-lite-from-mls";
|
||||
|
||||
/// Create bootstrap parameters from MLS group state.
|
||||
pub fn new(mls_group_id: Vec<u8>, mls_epoch: u64) -> Self {
|
||||
Self {
|
||||
mls_group_id,
|
||||
mls_epoch,
|
||||
label: Self::LABEL,
|
||||
}
|
||||
}
|
||||
|
||||
/// Derive an MLS-Lite group secret from MLS epoch secret.
|
||||
///
|
||||
/// Uses HKDF with the epoch secret as input keying material.
|
||||
pub fn derive_lite_secret(&self, mls_epoch_secret: &[u8]) -> [u8; 32] {
|
||||
use hkdf::Hkdf;
|
||||
use sha2::Sha256;
|
||||
|
||||
let salt = b"quicprochat-mls-lite-bootstrap-v1";
|
||||
let hk = Hkdf::<Sha256>::new(Some(salt), mls_epoch_secret);
|
||||
|
||||
let mut info = Vec::with_capacity(self.mls_group_id.len() + 8 + self.label.len());
|
||||
info.extend_from_slice(&self.mls_group_id);
|
||||
info.extend_from_slice(&self.mls_epoch.to_be_bytes());
|
||||
info.extend_from_slice(self.label.as_bytes());
|
||||
|
||||
let mut secret = [0u8; 32];
|
||||
hk.expand(&info, &mut secret)
|
||||
.expect("HKDF expand should not fail");
|
||||
secret
|
||||
}
|
||||
|
||||
/// Derive MLS-Lite group ID from MLS group ID.
|
||||
pub fn derive_lite_group_id(&self) -> [u8; 8] {
|
||||
use sha2::{Digest, Sha256};
|
||||
|
||||
let mut hasher = Sha256::new();
|
||||
hasher.update(b"mls-lite-group-id:");
|
||||
hasher.update(&self.mls_group_id);
|
||||
hasher.update(&self.mls_epoch.to_be_bytes());
|
||||
let hash = hasher.finalize();
|
||||
|
||||
let mut id = [0u8; 8];
|
||||
id.copy_from_slice(&hash[..8]);
|
||||
id
|
||||
}
|
||||
}
|
||||
|
||||
/// Create an MLS-Lite group derived from MLS epoch secret.
|
||||
///
|
||||
/// This enables constrained-link fallback for established MLS groups.
|
||||
pub fn create_lite_from_mls(
|
||||
mls_group_id: &[u8],
|
||||
mls_epoch: u64,
|
||||
mls_epoch_secret: &[u8],
|
||||
) -> MlsLiteGroup {
|
||||
let bootstrap = MlsLiteBootstrap::new(mls_group_id.to_vec(), mls_epoch);
|
||||
let lite_secret = bootstrap.derive_lite_secret(mls_epoch_secret);
|
||||
let lite_group_id = bootstrap.derive_lite_group_id();
|
||||
|
||||
MlsLiteGroup::new(lite_group_id, &lite_secret, 0)
|
||||
}
|
||||
|
||||
/// Upgrade request message sent when initiating MLS upgrade.
|
||||
#[derive(Clone, Debug, serde::Serialize, serde::Deserialize)]
|
||||
pub struct UpgradeRequest {
|
||||
/// MLS-Lite group being upgraded.
|
||||
pub lite_group_id: [u8; 8],
|
||||
/// Current MLS-Lite epoch.
|
||||
pub lite_epoch: u16,
|
||||
/// Requester's MLS KeyPackage.
|
||||
pub keypackage: Vec<u8>,
|
||||
}
|
||||
|
||||
/// Upgrade response with MLS Welcome for the upgrading member.
|
||||
#[derive(Clone, Debug, serde::Serialize, serde::Deserialize)]
|
||||
pub struct UpgradeResponse {
|
||||
/// MLS-Lite group being upgraded.
|
||||
pub lite_group_id: [u8; 8],
|
||||
/// New MLS group ID.
|
||||
pub mls_group_id: Vec<u8>,
|
||||
/// MLS Welcome message for the requesting member.
|
||||
pub mls_welcome: Vec<u8>,
|
||||
/// Derived MLS-Lite secret for constrained links (optional).
|
||||
/// Allows continued MLS-Lite operation alongside full MLS.
|
||||
pub derived_lite_secret: Option<[u8; 32]>,
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn crypto_state_modes() {
|
||||
let lite_unsigned = GroupCryptoState::MlsLite {
|
||||
group_id: [0; 8],
|
||||
epoch: 0,
|
||||
signed: false,
|
||||
};
|
||||
assert_eq!(lite_unsigned.mode(), CryptoMode::MlsLiteUnsigned);
|
||||
|
||||
let lite_signed = GroupCryptoState::MlsLite {
|
||||
group_id: [0; 8],
|
||||
epoch: 0,
|
||||
signed: true,
|
||||
};
|
||||
assert_eq!(lite_signed.mode(), CryptoMode::MlsLiteSigned);
|
||||
|
||||
let mls_classical = GroupCryptoState::FullMls {
|
||||
group_id: vec![1, 2, 3],
|
||||
epoch: 5,
|
||||
hybrid_pq: false,
|
||||
};
|
||||
assert_eq!(mls_classical.mode(), CryptoMode::MlsClassical);
|
||||
|
||||
let mls_hybrid = GroupCryptoState::FullMls {
|
||||
group_id: vec![1, 2, 3],
|
||||
epoch: 5,
|
||||
hybrid_pq: true,
|
||||
};
|
||||
assert_eq!(mls_hybrid.mode(), CryptoMode::MlsHybrid);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn can_upgrade_from_lite() {
|
||||
let lite = GroupCryptoState::MlsLite {
|
||||
group_id: [0; 8],
|
||||
epoch: 0,
|
||||
signed: true,
|
||||
};
|
||||
|
||||
// Can upgrade with unconstrained transport
|
||||
assert!(lite.can_upgrade(TransportCapability::Unconstrained));
|
||||
assert!(lite.can_upgrade(TransportCapability::Medium));
|
||||
|
||||
// Cannot upgrade with constrained transport
|
||||
assert!(!lite.can_upgrade(TransportCapability::Constrained));
|
||||
assert!(!lite.can_upgrade(TransportCapability::SeverelyConstrained));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn can_upgrade_classical_to_hybrid() {
|
||||
let classical = GroupCryptoState::FullMls {
|
||||
group_id: vec![1, 2, 3],
|
||||
epoch: 5,
|
||||
hybrid_pq: false,
|
||||
};
|
||||
|
||||
assert!(classical.can_upgrade(TransportCapability::Unconstrained));
|
||||
assert!(!classical.can_upgrade(TransportCapability::Medium));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn bootstrap_derivation() {
|
||||
let mls_group_id = b"test-mls-group".to_vec();
|
||||
let mls_epoch = 42u64;
|
||||
let mls_secret = [0x42u8; 32];
|
||||
|
||||
let bootstrap = MlsLiteBootstrap::new(mls_group_id.clone(), mls_epoch);
|
||||
|
||||
// Secret derivation should be deterministic
|
||||
let secret1 = bootstrap.derive_lite_secret(&mls_secret);
|
||||
let secret2 = bootstrap.derive_lite_secret(&mls_secret);
|
||||
assert_eq!(secret1, secret2);
|
||||
|
||||
// Different epoch should give different secret
|
||||
let bootstrap2 = MlsLiteBootstrap::new(mls_group_id, mls_epoch + 1);
|
||||
let secret3 = bootstrap2.derive_lite_secret(&mls_secret);
|
||||
assert_ne!(secret1, secret3);
|
||||
|
||||
// Group ID derivation
|
||||
let lite_id = bootstrap.derive_lite_group_id();
|
||||
assert_eq!(lite_id.len(), 8);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn create_lite_from_mls_works() {
|
||||
let mls_group_id = b"mls-group-123".to_vec();
|
||||
let mls_epoch = 10;
|
||||
let mls_secret = [0xABu8; 32];
|
||||
|
||||
let lite_group = create_lite_from_mls(&mls_group_id, mls_epoch, &mls_secret);
|
||||
|
||||
// Should be able to encrypt/decrypt
|
||||
let mut alice = lite_group;
|
||||
let mut bob = create_lite_from_mls(&mls_group_id, mls_epoch, &mls_secret);
|
||||
|
||||
let (ct, nonce, _seq) = alice.encrypt(b"hello from alice").expect("encrypt");
|
||||
|
||||
use crate::address::MeshAddress;
|
||||
let alice_addr = MeshAddress::from_bytes([0xAA; 16]);
|
||||
|
||||
match bob.decrypt(&ct, &nonce, alice_addr) {
|
||||
crate::mls_lite::DecryptResult::Success(pt) => {
|
||||
assert_eq!(pt, b"hello from alice");
|
||||
}
|
||||
other => panic!("expected Success, got {other:?}"),
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn compatibility_check() {
|
||||
let lite = GroupCryptoState::MlsLite {
|
||||
group_id: [0; 8],
|
||||
epoch: 0,
|
||||
signed: true,
|
||||
};
|
||||
|
||||
// MLS-Lite works on all transports
|
||||
assert!(lite.compatible_with(TransportCapability::Unconstrained));
|
||||
assert!(lite.compatible_with(TransportCapability::SeverelyConstrained));
|
||||
|
||||
let mls_hybrid = GroupCryptoState::FullMls {
|
||||
group_id: vec![1],
|
||||
epoch: 1,
|
||||
hybrid_pq: true,
|
||||
};
|
||||
|
||||
// PQ-hybrid only works on unconstrained
|
||||
assert!(mls_hybrid.compatible_with(TransportCapability::Unconstrained));
|
||||
assert!(!mls_hybrid.compatible_with(TransportCapability::Medium));
|
||||
|
||||
let mls_classical = GroupCryptoState::FullMls {
|
||||
group_id: vec![1],
|
||||
epoch: 1,
|
||||
hybrid_pq: false,
|
||||
};
|
||||
|
||||
// Classical MLS works on medium+
|
||||
assert!(mls_classical.compatible_with(TransportCapability::Unconstrained));
|
||||
assert!(mls_classical.compatible_with(TransportCapability::Medium));
|
||||
assert!(!mls_classical.compatible_with(TransportCapability::Constrained));
|
||||
}
|
||||
}
|
||||
@@ -149,7 +149,7 @@ impl MeshEnvelope {
|
||||
self.max_hops,
|
||||
self.timestamp,
|
||||
);
|
||||
quicproquo_core::IdentityKeypair::verify_raw(&sender_key, &signable, &sig).is_ok()
|
||||
quicprochat_core::IdentityKeypair::verify_raw(&sender_key, &signable, &sig).is_ok()
|
||||
}
|
||||
|
||||
/// Check whether this envelope has expired (TTL elapsed since timestamp).
|
||||
@@ -176,13 +176,31 @@ impl MeshEnvelope {
|
||||
copy
|
||||
}
|
||||
|
||||
/// Serialize to bytes (JSON).
|
||||
/// Serialize to compact CBOR binary format (for wire transmission).
|
||||
pub fn to_wire(&self) -> Vec<u8> {
|
||||
let mut buf = Vec::new();
|
||||
ciborium::into_writer(self, &mut buf).expect("CBOR serialization should not fail");
|
||||
buf
|
||||
}
|
||||
|
||||
/// Deserialize from CBOR binary format.
|
||||
pub fn from_wire(bytes: &[u8]) -> anyhow::Result<Self> {
|
||||
let env: Self = ciborium::from_reader(bytes)?;
|
||||
Ok(env)
|
||||
}
|
||||
|
||||
/// Deserialize from wire format, trying CBOR first then JSON fallback.
|
||||
pub fn from_wire_or_json(bytes: &[u8]) -> anyhow::Result<Self> {
|
||||
Self::from_wire(bytes).or_else(|_| Self::from_bytes(bytes))
|
||||
}
|
||||
|
||||
/// Serialize to bytes (JSON). Kept for backward compatibility and debugging.
|
||||
pub fn to_bytes(&self) -> Vec<u8> {
|
||||
// serde_json::to_vec should not fail on a well-formed envelope.
|
||||
serde_json::to_vec(self).expect("envelope serialization should not fail")
|
||||
}
|
||||
|
||||
/// Deserialize from bytes (JSON).
|
||||
/// Deserialize from bytes (JSON). Kept for backward compatibility and debugging.
|
||||
pub fn from_bytes(bytes: &[u8]) -> anyhow::Result<Self> {
|
||||
let env: Self = serde_json::from_slice(bytes)?;
|
||||
Ok(env)
|
||||
@@ -293,4 +311,128 @@ mod tests {
|
||||
assert!(env.recipient_key.is_empty());
|
||||
assert!(env.verify());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn cbor_roundtrip() {
|
||||
let id = test_identity();
|
||||
let recipient = [0xABu8; 32];
|
||||
let env = MeshEnvelope::new(&id, &recipient, b"cbor roundtrip".to_vec(), 3600, 5);
|
||||
|
||||
let wire = env.to_wire();
|
||||
let restored = MeshEnvelope::from_wire(&wire).expect("CBOR deserialize");
|
||||
|
||||
assert_eq!(env.id, restored.id);
|
||||
assert_eq!(env.sender_key, restored.sender_key);
|
||||
assert_eq!(env.recipient_key, restored.recipient_key);
|
||||
assert_eq!(env.payload, restored.payload);
|
||||
assert_eq!(env.ttl_secs, restored.ttl_secs);
|
||||
assert_eq!(env.hop_count, restored.hop_count);
|
||||
assert_eq!(env.max_hops, restored.max_hops);
|
||||
assert_eq!(env.timestamp, restored.timestamp);
|
||||
assert_eq!(env.signature, restored.signature);
|
||||
assert!(restored.verify());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn cbor_smaller_than_json() {
|
||||
let id = test_identity();
|
||||
let recipient = [0xCCu8; 32];
|
||||
let payload = b"a typical chat message for size comparison testing".to_vec();
|
||||
let env = MeshEnvelope::new(&id, &recipient, payload, 3600, 5);
|
||||
|
||||
let wire_len = env.to_wire().len();
|
||||
let json_len = env.to_bytes().len();
|
||||
|
||||
println!("CBOR wire size: {wire_len} bytes");
|
||||
println!("JSON size: {json_len} bytes");
|
||||
println!("Ratio: {:.1}x smaller", json_len as f64 / wire_len as f64);
|
||||
|
||||
assert!(
|
||||
json_len * 2 > wire_len * 3,
|
||||
"CBOR ({wire_len}B) should be materially smaller than JSON ({json_len}B)"
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn cbor_backward_compat() {
|
||||
let id = test_identity();
|
||||
let env = MeshEnvelope::new(&id, &[0xDD; 32], b"json compat".to_vec(), 60, 3);
|
||||
|
||||
// Serialize as JSON (old format).
|
||||
let json_bytes = env.to_bytes();
|
||||
|
||||
// from_wire_or_json should fall back to JSON parsing.
|
||||
let restored = MeshEnvelope::from_wire_or_json(&json_bytes)
|
||||
.expect("from_wire_or_json should handle JSON");
|
||||
assert_eq!(env.id, restored.id);
|
||||
assert_eq!(env.payload, restored.payload);
|
||||
assert!(restored.verify());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn cbor_from_wire_rejects_garbage() {
|
||||
let garbage = [0xFF, 0xFE, 0x00, 0x42, 0x99, 0x01, 0x02, 0x03];
|
||||
let result = MeshEnvelope::from_wire(&garbage);
|
||||
assert!(result.is_err(), "garbage input must return Err, not panic");
|
||||
}
|
||||
|
||||
/// Measure MeshEnvelope overhead for various payload sizes.
|
||||
/// This informs constrained link feasibility planning.
|
||||
#[test]
|
||||
fn measure_mesh_envelope_overhead() {
|
||||
let id = test_identity();
|
||||
let recipient = [0xAAu8; 32];
|
||||
|
||||
println!("=== MeshEnvelope Wire Overhead (CBOR) ===");
|
||||
|
||||
// Empty payload
|
||||
let env_empty = MeshEnvelope::new(&id, &recipient, vec![], 3600, 5);
|
||||
let wire_empty = env_empty.to_wire();
|
||||
println!("Payload 0B: wire {} bytes (overhead: {} bytes)", wire_empty.len(), wire_empty.len());
|
||||
let base_overhead = wire_empty.len();
|
||||
|
||||
// 1-byte payload
|
||||
let env_1 = MeshEnvelope::new(&id, &recipient, vec![0x42], 3600, 5);
|
||||
let wire_1 = env_1.to_wire();
|
||||
println!("Payload 1B: wire {} bytes (overhead: {} bytes)", wire_1.len(), wire_1.len() - 1);
|
||||
|
||||
// 10-byte payload ("hello mesh")
|
||||
let env_10 = MeshEnvelope::new(&id, &recipient, b"hello mesh".to_vec(), 3600, 5);
|
||||
let wire_10 = env_10.to_wire();
|
||||
println!("Payload 10B: wire {} bytes (overhead: {} bytes)", wire_10.len(), wire_10.len() - 10);
|
||||
|
||||
// 50-byte payload
|
||||
let env_50 = MeshEnvelope::new(&id, &recipient, vec![0x42; 50], 3600, 5);
|
||||
let wire_50 = env_50.to_wire();
|
||||
println!("Payload 50B: wire {} bytes (overhead: {} bytes)", wire_50.len(), wire_50.len() - 50);
|
||||
|
||||
// 100-byte payload (typical short message)
|
||||
let env_100 = MeshEnvelope::new(&id, &recipient, vec![0x42; 100], 3600, 5);
|
||||
let wire_100 = env_100.to_wire();
|
||||
println!("Payload 100B: wire {} bytes (overhead: {} bytes)", wire_100.len(), wire_100.len() - 100);
|
||||
|
||||
// Broadcast (empty recipient) - saves 32 bytes
|
||||
let env_bc = MeshEnvelope::new(&id, &[], b"broadcast".to_vec(), 3600, 5);
|
||||
let wire_bc = env_bc.to_wire();
|
||||
println!("Broadcast 9B: wire {} bytes (no recipient)", wire_bc.len());
|
||||
|
||||
println!("\n=== LoRa Feasibility (SF12/BW125, MTU=51 bytes) ===");
|
||||
println!("Empty envelope: {} fragments", (wire_empty.len() + 50) / 51);
|
||||
println!("10B payload: {} fragments", (wire_10.len() + 50) / 51);
|
||||
println!("100B payload: {} fragments", (wire_100.len() + 50) / 51);
|
||||
|
||||
// Baseline overhead is fixed fields:
|
||||
// - id: 32 bytes
|
||||
// - sender_key: 32 bytes
|
||||
// - recipient_key: 32 bytes (or 0 for broadcast)
|
||||
// - signature: 64 bytes
|
||||
// - ttl_secs: 4 bytes
|
||||
// - hop_count: 1 byte
|
||||
// - max_hops: 1 byte
|
||||
// - timestamp: 8 bytes
|
||||
// Total fixed: ~174 bytes raw, CBOR adds overhead for field names/types
|
||||
// Actual measured: ~400+ bytes with CBOR (field names add significant overhead)
|
||||
assert!(base_overhead < 500, "Base overhead should be under 500 bytes");
|
||||
assert!(base_overhead > 100, "Base overhead should be over 100 bytes (sanity check)");
|
||||
}
|
||||
}
|
||||
440
crates/quicprochat-p2p/src/envelope_v2.rs
Normal file
440
crates/quicprochat-p2p/src/envelope_v2.rs
Normal file
@@ -0,0 +1,440 @@
|
||||
//! Compact mesh envelope using truncated 16-byte addresses.
|
||||
//!
|
||||
//! [`MeshEnvelopeV2`] is a bandwidth-optimized envelope format for constrained
|
||||
//! links (LoRa, serial). It uses [`MeshAddress`] (16 bytes) instead of full
|
||||
//! 32-byte public keys, saving 32 bytes per envelope.
|
||||
//!
|
||||
//! Full public keys are exchanged during the announce phase and cached in the
|
||||
//! routing table. The envelope only needs addresses for routing.
|
||||
|
||||
use serde::{Deserialize, Serialize};
|
||||
use sha2::{Digest, Sha256};
|
||||
use std::time::{SystemTime, UNIX_EPOCH};
|
||||
|
||||
use crate::address::MeshAddress;
|
||||
use crate::identity::MeshIdentity;
|
||||
|
||||
/// Default maximum hops for mesh forwarding.
|
||||
const DEFAULT_MAX_HOPS: u8 = 5;
|
||||
|
||||
/// Version byte for envelope format detection.
|
||||
const ENVELOPE_V2_VERSION: u8 = 0x02;
|
||||
|
||||
/// Priority levels for mesh routing.
|
||||
#[derive(Clone, Copy, Debug, PartialEq, Eq, Serialize, Deserialize)]
|
||||
#[repr(u8)]
|
||||
pub enum Priority {
|
||||
/// Lowest priority (announce, telemetry).
|
||||
Low = 0,
|
||||
/// Normal priority (regular messages).
|
||||
Normal = 1,
|
||||
/// High priority (important messages).
|
||||
High = 2,
|
||||
/// Emergency priority (always forwarded first).
|
||||
Emergency = 3,
|
||||
}
|
||||
|
||||
impl Default for Priority {
|
||||
fn default() -> Self {
|
||||
Self::Normal
|
||||
}
|
||||
}
|
||||
|
||||
impl From<u8> for Priority {
|
||||
fn from(v: u8) -> Self {
|
||||
match v {
|
||||
0 => Self::Low,
|
||||
1 => Self::Normal,
|
||||
2 => Self::High,
|
||||
3 => Self::Emergency,
|
||||
_ => Self::Normal,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Compact mesh envelope with 16-byte truncated addresses.
|
||||
///
|
||||
/// # Wire overhead
|
||||
///
|
||||
/// - Version: 1 byte
|
||||
/// - Flags: 1 byte (priority: 2 bits, reserved: 6 bits)
|
||||
/// - ID: 16 bytes (truncated from 32)
|
||||
/// - Sender: 16 bytes
|
||||
/// - Recipient: 16 bytes (or 0 for broadcast)
|
||||
/// - TTL: 2 bytes (u16, max ~18 hours)
|
||||
/// - Hop count: 1 byte
|
||||
/// - Max hops: 1 byte
|
||||
/// - Timestamp: 4 bytes (u32, seconds since epoch mod 2^32)
|
||||
/// - Signature: 64 bytes
|
||||
/// - Payload: variable
|
||||
///
|
||||
/// **Total fixed overhead: ~122 bytes** (vs ~174 for V1 with full keys)
|
||||
/// Savings: ~52 bytes per envelope
|
||||
#[derive(Clone, Debug, Serialize, Deserialize)]
|
||||
pub struct MeshEnvelopeV2 {
|
||||
/// Format version (0x02 for V2).
|
||||
pub version: u8,
|
||||
/// Flags byte: bits 0-1 = priority, bits 2-7 reserved.
|
||||
pub flags: u8,
|
||||
/// 16-byte truncated content ID (for deduplication).
|
||||
pub id: [u8; 16],
|
||||
/// 16-byte truncated sender address.
|
||||
pub sender_addr: MeshAddress,
|
||||
/// 16-byte truncated recipient address (BROADCAST for all).
|
||||
pub recipient_addr: MeshAddress,
|
||||
/// Encrypted payload (opaque to mesh layer).
|
||||
pub payload: Vec<u8>,
|
||||
/// Time-to-live in seconds (u16, max 65535 = ~18 hours).
|
||||
pub ttl_secs: u16,
|
||||
/// Current hop count.
|
||||
pub hop_count: u8,
|
||||
/// Maximum hops before drop.
|
||||
pub max_hops: u8,
|
||||
/// Unix timestamp (seconds, truncated to u32).
|
||||
pub timestamp: u32,
|
||||
/// Ed25519 signature (64 bytes, stored as Vec for serde compatibility).
|
||||
pub signature: Vec<u8>,
|
||||
}
|
||||
|
||||
impl MeshEnvelopeV2 {
|
||||
/// Create and sign a new compact mesh envelope.
|
||||
pub fn new(
|
||||
identity: &MeshIdentity,
|
||||
recipient_addr: MeshAddress,
|
||||
payload: Vec<u8>,
|
||||
ttl_secs: u16,
|
||||
max_hops: u8,
|
||||
priority: Priority,
|
||||
) -> Self {
|
||||
let sender_addr = MeshAddress::from_public_key(&identity.public_key());
|
||||
let hop_count = 0u8;
|
||||
let max_hops = if max_hops == 0 { DEFAULT_MAX_HOPS } else { max_hops };
|
||||
let timestamp = (SystemTime::now()
|
||||
.duration_since(UNIX_EPOCH)
|
||||
.unwrap_or_default()
|
||||
.as_secs() & 0xFFFF_FFFF) as u32;
|
||||
|
||||
let id = Self::compute_id(
|
||||
&sender_addr,
|
||||
&recipient_addr,
|
||||
&payload,
|
||||
ttl_secs,
|
||||
max_hops,
|
||||
timestamp,
|
||||
);
|
||||
|
||||
let flags = (priority as u8) & 0x03;
|
||||
|
||||
let mut envelope = Self {
|
||||
version: ENVELOPE_V2_VERSION,
|
||||
flags,
|
||||
id,
|
||||
sender_addr,
|
||||
recipient_addr,
|
||||
payload,
|
||||
ttl_secs,
|
||||
hop_count,
|
||||
max_hops,
|
||||
timestamp,
|
||||
signature: Vec::new(),
|
||||
};
|
||||
|
||||
let signable = envelope.signable_bytes();
|
||||
let sig = identity.sign(&signable);
|
||||
envelope.signature = sig.to_vec();
|
||||
|
||||
envelope
|
||||
}
|
||||
|
||||
/// Create for broadcast (recipient = all zeros).
|
||||
pub fn broadcast(
|
||||
identity: &MeshIdentity,
|
||||
payload: Vec<u8>,
|
||||
ttl_secs: u16,
|
||||
max_hops: u8,
|
||||
priority: Priority,
|
||||
) -> Self {
|
||||
Self::new(identity, MeshAddress::BROADCAST, payload, ttl_secs, max_hops, priority)
|
||||
}
|
||||
|
||||
/// Compute the 16-byte truncated content ID.
|
||||
fn compute_id(
|
||||
sender_addr: &MeshAddress,
|
||||
recipient_addr: &MeshAddress,
|
||||
payload: &[u8],
|
||||
ttl_secs: u16,
|
||||
max_hops: u8,
|
||||
timestamp: u32,
|
||||
) -> [u8; 16] {
|
||||
let mut hasher = Sha256::new();
|
||||
hasher.update(sender_addr.as_bytes());
|
||||
hasher.update(recipient_addr.as_bytes());
|
||||
hasher.update(payload);
|
||||
hasher.update(ttl_secs.to_le_bytes());
|
||||
hasher.update([max_hops]);
|
||||
hasher.update(timestamp.to_le_bytes());
|
||||
let hash = hasher.finalize();
|
||||
let mut id = [0u8; 16];
|
||||
id.copy_from_slice(&hash[..16]);
|
||||
id
|
||||
}
|
||||
|
||||
/// Bytes to sign/verify (excludes signature and hop_count).
|
||||
fn signable_bytes(&self) -> Vec<u8> {
|
||||
let mut buf = Vec::with_capacity(64 + self.payload.len());
|
||||
buf.push(self.version);
|
||||
buf.push(self.flags);
|
||||
buf.extend_from_slice(&self.id);
|
||||
buf.extend_from_slice(self.sender_addr.as_bytes());
|
||||
buf.extend_from_slice(self.recipient_addr.as_bytes());
|
||||
buf.extend_from_slice(&self.payload);
|
||||
buf.extend_from_slice(&self.ttl_secs.to_le_bytes());
|
||||
buf.push(self.max_hops);
|
||||
buf.extend_from_slice(&self.timestamp.to_le_bytes());
|
||||
buf
|
||||
}
|
||||
|
||||
/// Verify the signature using the sender's full public key.
|
||||
///
|
||||
/// The caller must have the sender's full key (from announce/routing table).
|
||||
pub fn verify_with_key(&self, sender_public_key: &[u8; 32]) -> bool {
|
||||
// First check that the address matches the key
|
||||
if !self.sender_addr.matches_key(sender_public_key) {
|
||||
return false;
|
||||
}
|
||||
// Signature must be exactly 64 bytes
|
||||
let sig: [u8; 64] = match self.signature.as_slice().try_into() {
|
||||
Ok(s) => s,
|
||||
Err(_) => return false,
|
||||
};
|
||||
let signable = self.signable_bytes();
|
||||
quicprochat_core::IdentityKeypair::verify_raw(sender_public_key, &signable, &sig).is_ok()
|
||||
}
|
||||
|
||||
/// Get the priority level.
|
||||
pub fn priority(&self) -> Priority {
|
||||
Priority::from(self.flags & 0x03)
|
||||
}
|
||||
|
||||
/// Check if broadcast (recipient is all zeros).
|
||||
pub fn is_broadcast(&self) -> bool {
|
||||
self.recipient_addr.is_broadcast()
|
||||
}
|
||||
|
||||
/// Check if expired.
|
||||
pub fn is_expired(&self) -> bool {
|
||||
let now = (SystemTime::now()
|
||||
.duration_since(UNIX_EPOCH)
|
||||
.unwrap_or_default()
|
||||
.as_secs() & 0xFFFF_FFFF) as u32;
|
||||
// Handle u32 wraparound (every ~136 years)
|
||||
let elapsed = now.wrapping_sub(self.timestamp);
|
||||
elapsed > self.ttl_secs as u32
|
||||
}
|
||||
|
||||
/// Can this envelope be forwarded?
|
||||
pub fn can_forward(&self) -> bool {
|
||||
self.hop_count < self.max_hops && !self.is_expired()
|
||||
}
|
||||
|
||||
/// Create a forwarded copy with hop_count incremented.
|
||||
pub fn forwarded(&self) -> Self {
|
||||
let mut copy = self.clone();
|
||||
copy.hop_count = copy.hop_count.saturating_add(1);
|
||||
copy
|
||||
}
|
||||
|
||||
/// Serialize to compact CBOR.
|
||||
pub fn to_wire(&self) -> Vec<u8> {
|
||||
let mut buf = Vec::new();
|
||||
ciborium::into_writer(self, &mut buf).expect("CBOR serialization should not fail");
|
||||
buf
|
||||
}
|
||||
|
||||
/// Deserialize from CBOR.
|
||||
pub fn from_wire(bytes: &[u8]) -> anyhow::Result<Self> {
|
||||
let env: Self = ciborium::from_reader(bytes)?;
|
||||
if env.version != ENVELOPE_V2_VERSION {
|
||||
anyhow::bail!("unexpected envelope version: {}", env.version);
|
||||
}
|
||||
Ok(env)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
fn test_identity() -> MeshIdentity {
|
||||
MeshIdentity::generate()
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn create_and_verify() {
|
||||
let id = test_identity();
|
||||
let recipient_key = [0xBBu8; 32];
|
||||
let recipient_addr = MeshAddress::from_public_key(&recipient_key);
|
||||
|
||||
let env = MeshEnvelopeV2::new(
|
||||
&id,
|
||||
recipient_addr,
|
||||
b"hello compact".to_vec(),
|
||||
3600,
|
||||
5,
|
||||
Priority::Normal,
|
||||
);
|
||||
|
||||
assert_eq!(env.version, ENVELOPE_V2_VERSION);
|
||||
assert_eq!(env.hop_count, 0);
|
||||
assert!(env.verify_with_key(&id.public_key()));
|
||||
assert!(!env.is_expired());
|
||||
assert!(env.can_forward());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn broadcast_envelope() {
|
||||
let id = test_identity();
|
||||
let env = MeshEnvelopeV2::broadcast(
|
||||
&id,
|
||||
b"announcement".to_vec(),
|
||||
300,
|
||||
8,
|
||||
Priority::Low,
|
||||
);
|
||||
|
||||
assert!(env.is_broadcast());
|
||||
assert_eq!(env.priority(), Priority::Low);
|
||||
assert!(env.verify_with_key(&id.public_key()));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn forwarded_still_verifies() {
|
||||
let id = test_identity();
|
||||
let env = MeshEnvelopeV2::new(
|
||||
&id,
|
||||
MeshAddress::from_bytes([0xCC; 16]),
|
||||
b"forward me".to_vec(),
|
||||
3600,
|
||||
5,
|
||||
Priority::High,
|
||||
);
|
||||
|
||||
let fwd = env.forwarded();
|
||||
assert_eq!(fwd.hop_count, 1);
|
||||
assert!(fwd.verify_with_key(&id.public_key()));
|
||||
|
||||
let fwd2 = fwd.forwarded();
|
||||
assert_eq!(fwd2.hop_count, 2);
|
||||
assert!(fwd2.verify_with_key(&id.public_key()));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn cbor_roundtrip() {
|
||||
let id = test_identity();
|
||||
let env = MeshEnvelopeV2::new(
|
||||
&id,
|
||||
MeshAddress::from_bytes([0xDD; 16]),
|
||||
b"roundtrip test".to_vec(),
|
||||
1800,
|
||||
4,
|
||||
Priority::Emergency,
|
||||
);
|
||||
|
||||
let wire = env.to_wire();
|
||||
let restored = MeshEnvelopeV2::from_wire(&wire).expect("deserialize");
|
||||
|
||||
assert_eq!(env.id, restored.id);
|
||||
assert_eq!(env.sender_addr, restored.sender_addr);
|
||||
assert_eq!(env.recipient_addr, restored.recipient_addr);
|
||||
assert_eq!(env.payload, restored.payload);
|
||||
assert_eq!(env.ttl_secs, restored.ttl_secs);
|
||||
assert_eq!(env.hop_count, restored.hop_count);
|
||||
assert_eq!(env.max_hops, restored.max_hops);
|
||||
assert_eq!(env.timestamp, restored.timestamp);
|
||||
assert_eq!(env.signature, restored.signature);
|
||||
assert_eq!(env.priority(), Priority::Emergency);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn measure_v2_overhead() {
|
||||
let id = test_identity();
|
||||
let recipient_addr = MeshAddress::from_bytes([0xEE; 16]);
|
||||
|
||||
println!("=== MeshEnvelopeV2 Wire Overhead (CBOR) ===");
|
||||
|
||||
// Empty payload
|
||||
let env_empty = MeshEnvelopeV2::new(&id, recipient_addr, vec![], 3600, 5, Priority::Normal);
|
||||
let wire_empty = env_empty.to_wire();
|
||||
println!("Payload 0B: wire {} bytes (overhead: {} bytes)", wire_empty.len(), wire_empty.len());
|
||||
let v2_overhead = wire_empty.len();
|
||||
|
||||
// Compare to V1
|
||||
let v1_env = crate::envelope::MeshEnvelope::new(
|
||||
&id,
|
||||
&[0xEE; 32],
|
||||
vec![],
|
||||
3600,
|
||||
5,
|
||||
);
|
||||
let v1_wire = v1_env.to_wire();
|
||||
println!("V1 empty: {} bytes", v1_wire.len());
|
||||
println!("V2 savings: {} bytes ({:.1}%)",
|
||||
v1_wire.len() - v2_overhead,
|
||||
((v1_wire.len() - v2_overhead) as f64 / v1_wire.len() as f64) * 100.0);
|
||||
|
||||
// 10-byte payload
|
||||
let env_10 = MeshEnvelopeV2::new(&id, recipient_addr, b"hello mesh".to_vec(), 3600, 5, Priority::Normal);
|
||||
let wire_10 = env_10.to_wire();
|
||||
println!("Payload 10B: wire {} bytes", wire_10.len());
|
||||
|
||||
// 100-byte payload
|
||||
let env_100 = MeshEnvelopeV2::new(&id, recipient_addr, vec![0x42; 100], 3600, 5, Priority::Normal);
|
||||
let wire_100 = env_100.to_wire();
|
||||
println!("Payload 100B: wire {} bytes", wire_100.len());
|
||||
|
||||
// V2 should be smaller than V1 due to truncated addresses
|
||||
// With CBOR field names, actual overhead is higher than theoretical minimum
|
||||
// (~336 bytes for V2 vs ~410 for V1 = ~18% savings)
|
||||
assert!(v2_overhead < v1_wire.len(), "V2 should be smaller than V1");
|
||||
let savings_pct = ((v1_wire.len() - v2_overhead) as f64 / v1_wire.len() as f64) * 100.0;
|
||||
assert!(savings_pct > 10.0, "V2 should save at least 10% vs V1");
|
||||
println!("Actual V2 savings: {:.1}%", savings_pct);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn wrong_key_fails_verification() {
|
||||
let id = test_identity();
|
||||
let env = MeshEnvelopeV2::new(
|
||||
&id,
|
||||
MeshAddress::from_bytes([0xFF; 16]),
|
||||
b"verify me".to_vec(),
|
||||
3600,
|
||||
5,
|
||||
Priority::Normal,
|
||||
);
|
||||
|
||||
// Wrong key should fail
|
||||
let wrong_key = [0x42u8; 32];
|
||||
assert!(!env.verify_with_key(&wrong_key));
|
||||
|
||||
// Correct key should pass
|
||||
assert!(env.verify_with_key(&id.public_key()));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn priority_levels() {
|
||||
let id = test_identity();
|
||||
|
||||
for prio in [Priority::Low, Priority::Normal, Priority::High, Priority::Emergency] {
|
||||
let env = MeshEnvelopeV2::new(
|
||||
&id,
|
||||
MeshAddress::BROADCAST,
|
||||
b"prio test".to_vec(),
|
||||
60,
|
||||
3,
|
||||
prio,
|
||||
);
|
||||
assert_eq!(env.priority(), prio);
|
||||
}
|
||||
}
|
||||
}
|
||||
354
crates/quicprochat-p2p/src/error.rs
Normal file
354
crates/quicprochat-p2p/src/error.rs
Normal file
@@ -0,0 +1,354 @@
|
||||
//! Production-ready error types for the mesh P2P layer.
|
||||
//!
|
||||
//! This module provides structured error types with context for debugging
|
||||
//! and recovery. Errors are categorized by subsystem for easier handling.
|
||||
|
||||
use std::fmt;
|
||||
|
||||
use thiserror::Error;
|
||||
|
||||
use crate::address::MeshAddress;
|
||||
use crate::transport::TransportAddr;
|
||||
|
||||
/// Top-level mesh error type.
|
||||
#[derive(Debug, Error)]
|
||||
pub enum MeshError {
|
||||
/// Transport layer errors.
|
||||
#[error("transport error: {0}")]
|
||||
Transport(#[from] TransportError),
|
||||
|
||||
/// Routing errors.
|
||||
#[error("routing error: {0}")]
|
||||
Routing(#[from] RoutingError),
|
||||
|
||||
/// Crypto/encryption errors.
|
||||
#[error("crypto error: {0}")]
|
||||
Crypto(#[from] CryptoError),
|
||||
|
||||
/// Protocol errors (malformed messages, version mismatch).
|
||||
#[error("protocol error: {0}")]
|
||||
Protocol(#[from] ProtocolError),
|
||||
|
||||
/// Store/cache errors.
|
||||
#[error("store error: {0}")]
|
||||
Store(#[from] StoreError),
|
||||
|
||||
/// Configuration errors.
|
||||
#[error("config error: {0}")]
|
||||
Config(#[from] ConfigError),
|
||||
|
||||
/// Internal errors (bugs, invariant violations).
|
||||
#[error("internal error: {0}")]
|
||||
Internal(String),
|
||||
}
|
||||
|
||||
/// Transport layer errors.
|
||||
#[derive(Debug, Error)]
|
||||
pub enum TransportError {
|
||||
/// Failed to send data.
|
||||
#[error("send failed to {dest}: {reason}")]
|
||||
SendFailed { dest: String, reason: String },
|
||||
|
||||
/// Failed to receive data.
|
||||
#[error("receive failed: {0}")]
|
||||
ReceiveFailed(String),
|
||||
|
||||
/// Connection failed or lost.
|
||||
#[error("connection to {dest} failed: {reason}")]
|
||||
ConnectionFailed { dest: String, reason: String },
|
||||
|
||||
/// Transport not available.
|
||||
#[error("transport '{name}' not available")]
|
||||
NotAvailable { name: String },
|
||||
|
||||
/// No transports registered.
|
||||
#[error("no transports registered")]
|
||||
NoTransports,
|
||||
|
||||
/// MTU exceeded.
|
||||
#[error("payload {size} bytes exceeds MTU {mtu} bytes")]
|
||||
MtuExceeded { size: usize, mtu: usize },
|
||||
|
||||
/// Duty cycle limit reached.
|
||||
#[error("duty cycle limit reached: {used_ms}ms used of {limit_ms}ms allowed")]
|
||||
DutyCycleExceeded { used_ms: u64, limit_ms: u64 },
|
||||
|
||||
/// Timeout waiting for response.
|
||||
#[error("timeout waiting for response from {dest}")]
|
||||
Timeout { dest: String },
|
||||
|
||||
/// I/O error.
|
||||
#[error("I/O error: {0}")]
|
||||
Io(#[from] std::io::Error),
|
||||
}
|
||||
|
||||
/// Routing errors.
|
||||
#[derive(Debug, Error)]
|
||||
pub enum RoutingError {
|
||||
/// No route to destination.
|
||||
#[error("no route to {0}")]
|
||||
NoRoute(String),
|
||||
|
||||
/// Route expired.
|
||||
#[error("route to {dest} expired (last seen {age_secs}s ago)")]
|
||||
RouteExpired { dest: String, age_secs: u64 },
|
||||
|
||||
/// Too many hops.
|
||||
#[error("max hops ({max}) exceeded for message to {dest}")]
|
||||
MaxHopsExceeded { dest: String, max: u8 },
|
||||
|
||||
/// Message expired.
|
||||
#[error("message expired (TTL {ttl_secs}s, age {age_secs}s)")]
|
||||
MessageExpired { ttl_secs: u32, age_secs: u64 },
|
||||
|
||||
/// Duplicate message (dedup).
|
||||
#[error("duplicate message ID {0}")]
|
||||
Duplicate(String),
|
||||
|
||||
/// Routing table full.
|
||||
#[error("routing table full ({capacity} entries)")]
|
||||
TableFull { capacity: usize },
|
||||
}
|
||||
|
||||
/// Crypto/encryption errors.
|
||||
#[derive(Debug, Error)]
|
||||
pub enum CryptoError {
|
||||
/// Signature verification failed.
|
||||
#[error("signature verification failed for {context}")]
|
||||
SignatureInvalid { context: String },
|
||||
|
||||
/// Decryption failed.
|
||||
#[error("decryption failed: {0}")]
|
||||
DecryptionFailed(String),
|
||||
|
||||
/// Key not found.
|
||||
#[error("key not found for {0}")]
|
||||
KeyNotFound(String),
|
||||
|
||||
/// KeyPackage invalid or expired.
|
||||
#[error("KeyPackage invalid: {0}")]
|
||||
KeyPackageInvalid(String),
|
||||
|
||||
/// Replay attack detected.
|
||||
#[error("replay detected: sequence {seq} already seen from {sender}")]
|
||||
ReplayDetected { sender: String, seq: u32 },
|
||||
|
||||
/// Wrong epoch.
|
||||
#[error("wrong epoch: expected {expected}, got {got}")]
|
||||
WrongEpoch { expected: u16, got: u16 },
|
||||
|
||||
/// MLS error (from openmls).
|
||||
#[error("MLS error: {0}")]
|
||||
Mls(String),
|
||||
}
|
||||
|
||||
/// Protocol errors.
|
||||
#[derive(Debug, Error)]
|
||||
pub enum ProtocolError {
|
||||
/// Unknown message type.
|
||||
#[error("unknown message type: 0x{0:02x}")]
|
||||
UnknownMessageType(u8),
|
||||
|
||||
/// Invalid message format.
|
||||
#[error("invalid message format: {0}")]
|
||||
InvalidFormat(String),
|
||||
|
||||
/// Version mismatch.
|
||||
#[error("protocol version mismatch: expected {expected}, got {got}")]
|
||||
VersionMismatch { expected: u8, got: u8 },
|
||||
|
||||
/// Required field missing.
|
||||
#[error("required field missing: {0}")]
|
||||
MissingField(String),
|
||||
|
||||
/// CBOR decode error.
|
||||
#[error("CBOR decode error: {0}")]
|
||||
CborDecode(String),
|
||||
|
||||
/// CBOR encode error.
|
||||
#[error("CBOR encode error: {0}")]
|
||||
CborEncode(String),
|
||||
|
||||
/// Message too large.
|
||||
#[error("message too large: {size} bytes (max {max})")]
|
||||
MessageTooLarge { size: usize, max: usize },
|
||||
}
|
||||
|
||||
/// Store/cache errors.
|
||||
#[derive(Debug, Error)]
|
||||
pub enum StoreError {
|
||||
/// Store is full.
|
||||
#[error("store full: {current}/{capacity} items")]
|
||||
Full { current: usize, capacity: usize },
|
||||
|
||||
/// Item not found.
|
||||
#[error("item not found: {0}")]
|
||||
NotFound(String),
|
||||
|
||||
/// Persistence error.
|
||||
#[error("persistence error: {0}")]
|
||||
Persistence(String),
|
||||
|
||||
/// Serialization error.
|
||||
#[error("serialization error: {0}")]
|
||||
Serialization(String),
|
||||
}
|
||||
|
||||
/// Configuration errors.
|
||||
#[derive(Debug, Error)]
|
||||
pub enum ConfigError {
|
||||
/// Invalid configuration value.
|
||||
#[error("invalid config value for '{key}': {reason}")]
|
||||
InvalidValue { key: String, reason: String },
|
||||
|
||||
/// Missing required configuration.
|
||||
#[error("missing required config: {0}")]
|
||||
Missing(String),
|
||||
|
||||
/// Configuration parse error.
|
||||
#[error("config parse error: {0}")]
|
||||
Parse(String),
|
||||
}
|
||||
|
||||
/// Result type alias for mesh operations.
|
||||
pub type MeshResult<T> = Result<T, MeshError>;
|
||||
|
||||
/// Error context extension trait for adding context to errors.
|
||||
pub trait ErrorContext<T> {
|
||||
/// Add context to an error.
|
||||
fn context(self, context: impl Into<String>) -> MeshResult<T>;
|
||||
|
||||
/// Add context with a closure (lazy evaluation).
|
||||
fn with_context<F>(self, f: F) -> MeshResult<T>
|
||||
where
|
||||
F: FnOnce() -> String;
|
||||
}
|
||||
|
||||
impl<T, E: Into<MeshError>> ErrorContext<T> for Result<T, E> {
|
||||
fn context(self, context: impl Into<String>) -> MeshResult<T> {
|
||||
self.map_err(|e| {
|
||||
let err = e.into();
|
||||
MeshError::Internal(format!("{}: {}", context.into(), err))
|
||||
})
|
||||
}
|
||||
|
||||
fn with_context<F>(self, f: F) -> MeshResult<T>
|
||||
where
|
||||
F: FnOnce() -> String,
|
||||
{
|
||||
self.map_err(|e| {
|
||||
let err = e.into();
|
||||
MeshError::Internal(format!("{}: {}", f(), err))
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
/// Convert anyhow errors to MeshError.
|
||||
impl From<anyhow::Error> for MeshError {
|
||||
fn from(e: anyhow::Error) -> Self {
|
||||
MeshError::Internal(e.to_string())
|
||||
}
|
||||
}
|
||||
|
||||
/// Helper to create transport send errors.
|
||||
impl TransportError {
|
||||
pub fn send_failed(dest: &TransportAddr, reason: impl Into<String>) -> Self {
|
||||
Self::SendFailed {
|
||||
dest: dest.to_string(),
|
||||
reason: reason.into(),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn connection_failed(dest: &TransportAddr, reason: impl Into<String>) -> Self {
|
||||
Self::ConnectionFailed {
|
||||
dest: dest.to_string(),
|
||||
reason: reason.into(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Helper to create routing errors.
|
||||
impl RoutingError {
|
||||
pub fn no_route(addr: &MeshAddress) -> Self {
|
||||
Self::NoRoute(format!("{}", addr))
|
||||
}
|
||||
|
||||
pub fn no_route_bytes(addr: &[u8]) -> Self {
|
||||
Self::NoRoute(hex::encode(&addr[..8.min(addr.len())]))
|
||||
}
|
||||
}
|
||||
|
||||
/// Helper to create crypto errors.
|
||||
impl CryptoError {
|
||||
pub fn signature_invalid(context: impl Into<String>) -> Self {
|
||||
Self::SignatureInvalid {
|
||||
context: context.into(),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn replay(sender: &MeshAddress, seq: u32) -> Self {
|
||||
Self::ReplayDetected {
|
||||
sender: format!("{}", sender),
|
||||
seq,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Helper to create protocol errors.
|
||||
impl ProtocolError {
|
||||
pub fn cbor_decode(e: impl fmt::Display) -> Self {
|
||||
Self::CborDecode(e.to_string())
|
||||
}
|
||||
|
||||
pub fn cbor_encode(e: impl fmt::Display) -> Self {
|
||||
Self::CborEncode(e.to_string())
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn error_display() {
|
||||
let err = TransportError::SendFailed {
|
||||
dest: "tcp:127.0.0.1:8080".to_string(),
|
||||
reason: "connection refused".to_string(),
|
||||
};
|
||||
assert!(err.to_string().contains("tcp:127.0.0.1:8080"));
|
||||
assert!(err.to_string().contains("connection refused"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn error_conversion() {
|
||||
let transport_err = TransportError::NoTransports;
|
||||
let mesh_err: MeshError = transport_err.into();
|
||||
assert!(matches!(mesh_err, MeshError::Transport(_)));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn routing_error_helpers() {
|
||||
let addr = MeshAddress::from_bytes([0xAB; 16]);
|
||||
let err = RoutingError::no_route(&addr);
|
||||
assert!(err.to_string().contains("no route"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn crypto_error_helpers() {
|
||||
let addr = MeshAddress::from_bytes([0xCD; 16]);
|
||||
let err = CryptoError::replay(&addr, 42);
|
||||
assert!(err.to_string().contains("42"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn context_extension() {
|
||||
fn fallible() -> Result<(), TransportError> {
|
||||
Err(TransportError::NoTransports)
|
||||
}
|
||||
|
||||
let result: MeshResult<()> = fallible().context("during startup");
|
||||
assert!(result.is_err());
|
||||
let err_str = result.unwrap_err().to_string();
|
||||
assert!(err_str.contains("during startup"));
|
||||
}
|
||||
}
|
||||
1127
crates/quicprochat-p2p/src/fapp.rs
Normal file
1127
crates/quicprochat-p2p/src/fapp.rs
Normal file
File diff suppressed because it is too large
Load Diff
335
crates/quicprochat-p2p/src/fapp_router.rs
Normal file
335
crates/quicprochat-p2p/src/fapp_router.rs
Normal file
@@ -0,0 +1,335 @@
|
||||
//! FAPP routing: decode wire frames, integrate with [`RoutingTable`](crate::routing_table::RoutingTable)
|
||||
//! and [`TransportManager`](crate::transport_manager::TransportManager).
|
||||
//!
|
||||
//! [`FappRouter::broadcast_announce`](FappRouter::broadcast_announce) and
|
||||
//! [`FappRouter::send_query`](FappRouter::send_query) enqueue outbound frames; call
|
||||
//! [`FappRouter::drain_pending_sends`](FappRouter::drain_pending_sends) and pass each
|
||||
//! payload to [`TransportManager::send`](crate::transport_manager::TransportManager::send)
|
||||
//! from an async context.
|
||||
|
||||
use std::collections::HashSet;
|
||||
use std::sync::{Arc, Mutex, RwLock};
|
||||
|
||||
use anyhow::{bail, Result};
|
||||
|
||||
use crate::fapp::{
|
||||
FappStore, SlotAnnounce, SlotQuery, SlotResponse, CAP_FAPP_PATIENT, CAP_FAPP_RELAY,
|
||||
CAP_FAPP_THERAPIST,
|
||||
};
|
||||
use crate::routing_table::RoutingTable;
|
||||
use crate::transport::TransportAddr;
|
||||
use crate::transport_manager::TransportManager;
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Wire message tags (CBOR body follows the tag byte)
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
/// [`SlotAnnounce`] frame.
|
||||
pub const FAPP_WIRE_ANNOUNCE: u8 = 0x01;
|
||||
/// [`SlotQuery`] frame.
|
||||
pub const FAPP_WIRE_QUERY: u8 = 0x02;
|
||||
/// [`SlotResponse`] frame.
|
||||
pub const FAPP_WIRE_RESPONSE: u8 = 0x03;
|
||||
/// [`SlotReserve`](crate::fapp::SlotReserve) frame (handled later).
|
||||
pub const FAPP_WIRE_RESERVE: u8 = 0x04;
|
||||
/// [`SlotConfirm`](crate::fapp::SlotConfirm) frame (handled later).
|
||||
pub const FAPP_WIRE_CONFIRM: u8 = 0x05;
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// FappAction — what to do after handling an incoming FAPP frame
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
/// Result of processing an incoming FAPP payload (mirrors [`IncomingAction`](crate::mesh_router::IncomingAction) style).
|
||||
#[derive(Debug)]
|
||||
pub enum FappAction {
|
||||
/// No application-visible effect.
|
||||
Ignore,
|
||||
/// Invalid frame, unknown tag, or rejected message.
|
||||
Dropped(String),
|
||||
/// Flood this wire payload to each listed next hop.
|
||||
Forward {
|
||||
wire: Vec<u8>,
|
||||
next_hops: Vec<TransportAddr>,
|
||||
},
|
||||
/// Relay answered from [`FappStore`] (matches may be empty).
|
||||
QueryResponse(SlotResponse),
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Wire helpers
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
fn encode_tagged(tag: u8, cbor_body: &[u8]) -> Vec<u8> {
|
||||
let mut out = Vec::with_capacity(1 + cbor_body.len());
|
||||
out.push(tag);
|
||||
out.extend_from_slice(cbor_body);
|
||||
out
|
||||
}
|
||||
|
||||
fn slot_query_to_wire(query: &SlotQuery) -> Vec<u8> {
|
||||
let mut buf = Vec::new();
|
||||
ciborium::into_writer(query, &mut buf).expect("SlotQuery CBOR");
|
||||
buf
|
||||
}
|
||||
|
||||
fn slot_query_from_wire(bytes: &[u8]) -> Result<SlotQuery> {
|
||||
let q: SlotQuery = ciborium::from_reader(bytes)?;
|
||||
Ok(q)
|
||||
}
|
||||
|
||||
/// Unique next-hop addresses from the routing table (flood fan-out).
|
||||
fn flood_targets(table: &RoutingTable) -> Vec<TransportAddr> {
|
||||
let mut seen = HashSet::new();
|
||||
let mut out = Vec::new();
|
||||
for e in table.entries() {
|
||||
if seen.insert(e.next_hop_addr.clone()) {
|
||||
out.push(e.next_hop_addr.clone());
|
||||
}
|
||||
}
|
||||
out
|
||||
}
|
||||
|
||||
fn enqueue_flood(
|
||||
pending: &Mutex<Vec<(TransportAddr, Vec<u8>)>>,
|
||||
wire: Vec<u8>,
|
||||
table: &RoutingTable,
|
||||
) -> Result<()> {
|
||||
let hops = flood_targets(table);
|
||||
if hops.is_empty() {
|
||||
bail!("no mesh neighbors in routing table for flood");
|
||||
}
|
||||
let mut q = pending
|
||||
.lock()
|
||||
.map_err(|e| anyhow::anyhow!("pending_sends lock poisoned: {e}"))?;
|
||||
for addr in hops {
|
||||
q.push((addr, wire.clone()));
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// FappRouter
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
/// FAPP message router integrated with the mesh [`RoutingTable`] and transports.
|
||||
pub struct FappRouter {
|
||||
/// Local announcement cache and query index (relay nodes).
|
||||
store: Mutex<FappStore>,
|
||||
/// Shared with [`MeshRouter`](crate::mesh_router::MeshRouter).
|
||||
routes: Arc<RwLock<RoutingTable>>,
|
||||
/// Shared transport manager (same as [`MeshRouter`](crate::mesh_router::MeshRouter); wire-up sends via [`Self::drain_pending_sends`] until sync send exists).
|
||||
#[allow(dead_code)]
|
||||
transports: Arc<TransportManager>,
|
||||
/// Bitfield: [`CAP_FAPP_THERAPIST`], [`CAP_FAPP_RELAY`], [`CAP_FAPP_PATIENT`].
|
||||
local_capabilities: u16,
|
||||
/// Frames produced by [`Self::broadcast_announce`] and [`Self::send_query`].
|
||||
pending_sends: Mutex<Vec<(TransportAddr, Vec<u8>)>>,
|
||||
}
|
||||
|
||||
impl FappRouter {
|
||||
/// Create a router with the given store, shared routing table, transports, and capability mask.
|
||||
pub fn new(
|
||||
store: FappStore,
|
||||
routes: Arc<RwLock<RoutingTable>>,
|
||||
transports: Arc<TransportManager>,
|
||||
local_capabilities: u16,
|
||||
) -> Self {
|
||||
Self {
|
||||
store: Mutex::new(store),
|
||||
routes,
|
||||
transports,
|
||||
local_capabilities,
|
||||
pending_sends: Mutex::new(Vec::new()),
|
||||
}
|
||||
}
|
||||
|
||||
/// Decode a tagged FAPP wire frame and apply local policy.
|
||||
pub fn handle_incoming(&self, bytes: &[u8]) -> FappAction {
|
||||
if bytes.is_empty() {
|
||||
return FappAction::Dropped("empty FAPP frame".into());
|
||||
}
|
||||
let tag = bytes[0];
|
||||
let body = &bytes[1..];
|
||||
match tag {
|
||||
FAPP_WIRE_ANNOUNCE => match SlotAnnounce::from_wire(body) {
|
||||
Ok(a) => self.process_slot_announce(a),
|
||||
Err(e) => FappAction::Dropped(format!("announce CBOR: {e}")),
|
||||
},
|
||||
FAPP_WIRE_QUERY => match slot_query_from_wire(body) {
|
||||
Ok(q) => self.process_slot_query(q),
|
||||
Err(e) => FappAction::Dropped(format!("query CBOR: {e}")),
|
||||
},
|
||||
FAPP_WIRE_RESPONSE | FAPP_WIRE_RESERVE | FAPP_WIRE_CONFIRM => {
|
||||
FappAction::Dropped(format!("unhandled FAPP tag 0x{tag:02x}"))
|
||||
}
|
||||
_ => FappAction::Dropped(format!("unknown FAPP tag 0x{tag:02x}")),
|
||||
}
|
||||
}
|
||||
|
||||
/// Enqueue a signed [`SlotAnnounce`] to all known next hops (therapist publish / relay re-flood).
|
||||
pub fn broadcast_announce(&self, announce: SlotAnnounce) -> Result<()> {
|
||||
if self.local_capabilities & CAP_FAPP_THERAPIST == 0 {
|
||||
bail!("missing CAP_FAPP_THERAPIST");
|
||||
}
|
||||
let wire = encode_tagged(FAPP_WIRE_ANNOUNCE, &announce.to_wire());
|
||||
let table = self
|
||||
.routes
|
||||
.read()
|
||||
.map_err(|e| anyhow::anyhow!("routing table lock poisoned: {e}"))?;
|
||||
enqueue_flood(&self.pending_sends, wire, &table)
|
||||
}
|
||||
|
||||
/// Enqueue an anonymous [`SlotQuery`] flood (patient discovery).
|
||||
pub fn send_query(&self, query: SlotQuery) -> Result<()> {
|
||||
if self.local_capabilities & CAP_FAPP_PATIENT == 0 {
|
||||
bail!("missing CAP_FAPP_PATIENT");
|
||||
}
|
||||
let body = slot_query_to_wire(&query);
|
||||
let wire = encode_tagged(FAPP_WIRE_QUERY, &body);
|
||||
let table = self
|
||||
.routes
|
||||
.read()
|
||||
.map_err(|e| anyhow::anyhow!("routing table lock poisoned: {e}"))?;
|
||||
enqueue_flood(&self.pending_sends, wire, &table)
|
||||
}
|
||||
|
||||
/// Apply relay / propagation rules to a decoded [`SlotAnnounce`].
|
||||
pub fn process_slot_announce(&self, announce: SlotAnnounce) -> FappAction {
|
||||
if !announce.can_propagate() {
|
||||
return FappAction::Dropped("announce expired or max hops".into());
|
||||
}
|
||||
|
||||
let has_relay = self.local_capabilities & CAP_FAPP_RELAY != 0;
|
||||
if !has_relay {
|
||||
return FappAction::Ignore;
|
||||
}
|
||||
|
||||
let mut store = match self.store.lock() {
|
||||
Ok(g) => g,
|
||||
Err(e) => return FappAction::Dropped(format!("fapp store lock poisoned: {e}")),
|
||||
};
|
||||
|
||||
if store.seen(&announce.id) {
|
||||
return FappAction::Ignore;
|
||||
}
|
||||
|
||||
let stored = store.store(announce.clone());
|
||||
if !stored {
|
||||
return FappAction::Ignore;
|
||||
}
|
||||
|
||||
let forwarded = announce.forwarded();
|
||||
if !forwarded.can_propagate() {
|
||||
return FappAction::Ignore;
|
||||
}
|
||||
|
||||
let wire = encode_tagged(FAPP_WIRE_ANNOUNCE, &forwarded.to_wire());
|
||||
let next_hops = {
|
||||
let table = match self.routes.read() {
|
||||
Ok(t) => t,
|
||||
Err(e) => {
|
||||
return FappAction::Dropped(format!("routing table lock poisoned: {e}"));
|
||||
}
|
||||
};
|
||||
flood_targets(&table)
|
||||
};
|
||||
|
||||
if next_hops.is_empty() {
|
||||
return FappAction::Ignore;
|
||||
}
|
||||
|
||||
FappAction::Forward {
|
||||
wire,
|
||||
next_hops,
|
||||
}
|
||||
}
|
||||
|
||||
/// Answer from cache and/or ignore (query flooding is a separate [`Self::send_query`] path).
|
||||
pub fn process_slot_query(&self, query: SlotQuery) -> FappAction {
|
||||
if self.local_capabilities & CAP_FAPP_RELAY == 0 {
|
||||
return FappAction::Ignore;
|
||||
}
|
||||
|
||||
let store = match self.store.lock() {
|
||||
Ok(g) => g,
|
||||
Err(e) => return FappAction::Dropped(format!("fapp store lock poisoned: {e}")),
|
||||
};
|
||||
|
||||
let response = store.query(&query);
|
||||
FappAction::QueryResponse(response)
|
||||
}
|
||||
|
||||
/// Take queued outbound frames (typically sent with `TransportManager::send` in async code).
|
||||
pub fn drain_pending_sends(&self) -> Result<Vec<(TransportAddr, Vec<u8>)>> {
|
||||
let mut q = self
|
||||
.pending_sends
|
||||
.lock()
|
||||
.map_err(|e| anyhow::anyhow!("pending_sends lock poisoned: {e}"))?;
|
||||
let out = std::mem::take(&mut *q);
|
||||
Ok(out)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use std::time::Duration;
|
||||
|
||||
use crate::fapp::{Fachrichtung, Kostentraeger, Modalitaet, SlotType, TimeSlot};
|
||||
use crate::identity::MeshIdentity;
|
||||
|
||||
#[test]
|
||||
fn handle_incoming_unknown_tag_dropped() {
|
||||
let routes = Arc::new(RwLock::new(RoutingTable::new(Duration::from_secs(300))));
|
||||
let transports = Arc::new(TransportManager::new());
|
||||
let r = FappRouter::new(FappStore::new(), routes, transports, CAP_FAPP_RELAY);
|
||||
|
||||
match r.handle_incoming(&[0xFF]) {
|
||||
FappAction::Dropped(msg) => assert!(msg.contains("unknown")),
|
||||
other => panic!("expected Dropped, got {other:?}"),
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn process_slot_query_requires_relay_cap() {
|
||||
let routes = Arc::new(RwLock::new(RoutingTable::new(Duration::from_secs(300))));
|
||||
let transports = Arc::new(TransportManager::new());
|
||||
let r = FappRouter::new(FappStore::new(), routes, transports, 0);
|
||||
|
||||
let q = SlotQuery {
|
||||
query_id: [1u8; 16],
|
||||
fachrichtung: None,
|
||||
modalitaet: None,
|
||||
kostentraeger: None,
|
||||
plz_prefix: None,
|
||||
earliest: None,
|
||||
latest: None,
|
||||
slot_type: None,
|
||||
max_results: 5,
|
||||
};
|
||||
assert!(matches!(r.process_slot_query(q), FappAction::Ignore));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn broadcast_announce_requires_therapist_cap() {
|
||||
let routes = Arc::new(RwLock::new(RoutingTable::new(Duration::from_secs(300))));
|
||||
let transports = Arc::new(TransportManager::new());
|
||||
let r = FappRouter::new(FappStore::new(), routes, transports, CAP_FAPP_RELAY);
|
||||
let id = MeshIdentity::generate();
|
||||
let a = SlotAnnounce::new(
|
||||
&id,
|
||||
vec![Fachrichtung::Verhaltenstherapie],
|
||||
vec![Modalitaet::Praxis],
|
||||
vec![Kostentraeger::GKV],
|
||||
"80331".into(),
|
||||
vec![TimeSlot {
|
||||
start_unix: 1,
|
||||
duration_minutes: 50,
|
||||
slot_type: SlotType::Therapie,
|
||||
}],
|
||||
[0xAA; 32],
|
||||
1,
|
||||
);
|
||||
assert!(r.broadcast_announce(a).is_err());
|
||||
}
|
||||
}
|
||||
@@ -1,4 +1,4 @@
|
||||
//! Self-sovereign mesh identity backed by quicproquo-core Ed25519 keypairs.
|
||||
//! Self-sovereign mesh identity backed by quicprochat-core Ed25519 keypairs.
|
||||
//!
|
||||
//! A [`MeshIdentity`] wraps an [`IdentityKeypair`] with a peer directory,
|
||||
//! enabling P2P nodes to persist identity and track known peers across
|
||||
@@ -7,7 +7,7 @@
|
||||
use std::collections::HashMap;
|
||||
use std::path::Path;
|
||||
|
||||
use quicproquo_core::IdentityKeypair;
|
||||
use quicprochat_core::IdentityKeypair;
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
#[cfg(unix)]
|
||||
@@ -130,7 +130,7 @@ mod tests {
|
||||
let msg = b"test message";
|
||||
let sig = id.sign(msg);
|
||||
|
||||
// Verify through quicproquo_core
|
||||
// Verify through quicprochat_core
|
||||
let pk = id.public_key();
|
||||
IdentityKeypair::verify_raw(&pk, msg, &sig).expect("valid signature");
|
||||
}
|
||||
360
crates/quicprochat-p2p/src/keypackage_cache.rs
Normal file
360
crates/quicprochat-p2p/src/keypackage_cache.rs
Normal file
@@ -0,0 +1,360 @@
|
||||
//! KeyPackage cache for mesh-based MLS group setup.
|
||||
//!
|
||||
//! The [`KeyPackageCache`] stores MLS KeyPackages received from other nodes,
|
||||
//! enabling group creation without a central server. KeyPackages are:
|
||||
//!
|
||||
//! - Indexed by the node's 16-byte mesh address
|
||||
//! - Hashed (8 bytes) for announce inclusion
|
||||
//! - TTL-managed for expiry (MLS KeyPackages are single-use but we cache N of them)
|
||||
//! - Bounded by capacity to prevent memory exhaustion
|
||||
//!
|
||||
//! # Protocol Flow
|
||||
//!
|
||||
//! 1. Bob generates KeyPackage, computes hash, includes hash in MeshAnnounce
|
||||
//! 2. Bob broadcasts full KeyPackage periodically (or on request)
|
||||
//! 3. Alice receives Bob's KeyPackage, stores in cache
|
||||
//! 4. Alice wants to create group with Bob: fetches from cache, creates Welcome
|
||||
//! 5. Alice sends Welcome to Bob via mesh routing
|
||||
|
||||
use std::collections::HashMap;
|
||||
use std::time::{Duration, Instant};
|
||||
|
||||
use crate::address::MeshAddress;
|
||||
use crate::announce::compute_keypackage_hash;
|
||||
|
||||
/// Default TTL for cached KeyPackages (24 hours).
|
||||
const DEFAULT_TTL: Duration = Duration::from_secs(24 * 60 * 60);
|
||||
|
||||
/// Default maximum KeyPackages per address (allow rotation).
|
||||
const DEFAULT_MAX_PER_ADDRESS: usize = 3;
|
||||
|
||||
/// A cached KeyPackage entry.
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct CachedKeyPackage {
|
||||
/// The serialized MLS KeyPackage bytes.
|
||||
pub bytes: Vec<u8>,
|
||||
/// 8-byte truncated hash for matching against announces.
|
||||
pub hash: [u8; 8],
|
||||
/// When this entry was stored.
|
||||
pub stored_at: Instant,
|
||||
/// When this entry expires.
|
||||
pub expires_at: Instant,
|
||||
}
|
||||
|
||||
impl CachedKeyPackage {
|
||||
/// Create a new cached entry with default TTL.
|
||||
pub fn new(bytes: Vec<u8>) -> Self {
|
||||
Self::with_ttl(bytes, DEFAULT_TTL)
|
||||
}
|
||||
|
||||
/// Create with custom TTL.
|
||||
pub fn with_ttl(bytes: Vec<u8>, ttl: Duration) -> Self {
|
||||
let hash = compute_keypackage_hash(&bytes);
|
||||
let now = Instant::now();
|
||||
Self {
|
||||
bytes,
|
||||
hash,
|
||||
stored_at: now,
|
||||
expires_at: now + ttl,
|
||||
}
|
||||
}
|
||||
|
||||
/// Check if this entry has expired.
|
||||
pub fn is_expired(&self) -> bool {
|
||||
Instant::now() > self.expires_at
|
||||
}
|
||||
}
|
||||
|
||||
/// Cache for KeyPackages received from mesh peers.
|
||||
pub struct KeyPackageCache {
|
||||
/// Address -> list of cached KeyPackages (multiple for rotation).
|
||||
entries: HashMap<MeshAddress, Vec<CachedKeyPackage>>,
|
||||
/// Maximum KeyPackages stored per address.
|
||||
max_per_address: usize,
|
||||
/// Total capacity (max addresses).
|
||||
max_addresses: usize,
|
||||
}
|
||||
|
||||
impl KeyPackageCache {
|
||||
/// Create a new cache with default settings.
|
||||
pub fn new() -> Self {
|
||||
Self::with_capacity(1000, DEFAULT_MAX_PER_ADDRESS)
|
||||
}
|
||||
|
||||
/// Create with custom capacity.
|
||||
pub fn with_capacity(max_addresses: usize, max_per_address: usize) -> Self {
|
||||
Self {
|
||||
entries: HashMap::new(),
|
||||
max_per_address,
|
||||
max_addresses,
|
||||
}
|
||||
}
|
||||
|
||||
/// Store a KeyPackage for a given address.
|
||||
///
|
||||
/// Returns `true` if stored, `false` if rejected (at capacity or duplicate hash).
|
||||
pub fn store(&mut self, address: MeshAddress, keypackage_bytes: Vec<u8>) -> bool {
|
||||
let entry = CachedKeyPackage::new(keypackage_bytes);
|
||||
self.store_entry(address, entry)
|
||||
}
|
||||
|
||||
/// Store a KeyPackage entry.
|
||||
fn store_entry(&mut self, address: MeshAddress, entry: CachedKeyPackage) -> bool {
|
||||
// Check if we already have this exact KeyPackage
|
||||
if let Some(existing) = self.entries.get(&address) {
|
||||
if existing.iter().any(|e| e.hash == entry.hash) {
|
||||
return false; // Duplicate
|
||||
}
|
||||
}
|
||||
|
||||
// Check total capacity
|
||||
if !self.entries.contains_key(&address) && self.entries.len() >= self.max_addresses {
|
||||
// Evict oldest entry
|
||||
self.evict_oldest();
|
||||
}
|
||||
|
||||
let list = self.entries.entry(address).or_default();
|
||||
|
||||
// Enforce per-address limit
|
||||
while list.len() >= self.max_per_address {
|
||||
list.remove(0); // Remove oldest
|
||||
}
|
||||
|
||||
list.push(entry);
|
||||
true
|
||||
}
|
||||
|
||||
/// Get the newest KeyPackage for an address.
|
||||
pub fn get(&self, address: &MeshAddress) -> Option<&CachedKeyPackage> {
|
||||
self.entries
|
||||
.get(address)
|
||||
.and_then(|list| list.iter().rev().find(|e| !e.is_expired()))
|
||||
}
|
||||
|
||||
/// Get a KeyPackage by its hash.
|
||||
pub fn get_by_hash(&self, address: &MeshAddress, hash: &[u8; 8]) -> Option<&CachedKeyPackage> {
|
||||
self.entries.get(address).and_then(|list| {
|
||||
list.iter()
|
||||
.rev()
|
||||
.find(|e| &e.hash == hash && !e.is_expired())
|
||||
})
|
||||
}
|
||||
|
||||
/// Get the newest KeyPackage bytes for an address.
|
||||
pub fn get_bytes(&self, address: &MeshAddress) -> Option<Vec<u8>> {
|
||||
self.get(address).map(|e| e.bytes.clone())
|
||||
}
|
||||
|
||||
/// Check if we have a KeyPackage matching a given hash.
|
||||
pub fn has_hash(&self, address: &MeshAddress, hash: &[u8; 8]) -> bool {
|
||||
self.get_by_hash(address, hash).is_some()
|
||||
}
|
||||
|
||||
/// Remove all expired entries. Returns count removed.
|
||||
pub fn gc_expired(&mut self) -> usize {
|
||||
let mut removed = 0;
|
||||
self.entries.retain(|_, list| {
|
||||
let before = list.len();
|
||||
list.retain(|e| !e.is_expired());
|
||||
removed += before - list.len();
|
||||
!list.is_empty()
|
||||
});
|
||||
removed
|
||||
}
|
||||
|
||||
/// Evict the oldest entry across all addresses.
|
||||
fn evict_oldest(&mut self) {
|
||||
let oldest_addr = self
|
||||
.entries
|
||||
.iter()
|
||||
.filter_map(|(addr, list)| {
|
||||
list.first().map(|e| (addr.clone(), e.stored_at))
|
||||
})
|
||||
.min_by_key(|(_, stored)| *stored)
|
||||
.map(|(addr, _)| addr);
|
||||
|
||||
if let Some(addr) = oldest_addr {
|
||||
if let Some(list) = self.entries.get_mut(&addr) {
|
||||
list.remove(0);
|
||||
if list.is_empty() {
|
||||
self.entries.remove(&addr);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Number of addresses with cached KeyPackages.
|
||||
pub fn len(&self) -> usize {
|
||||
self.entries.len()
|
||||
}
|
||||
|
||||
/// Whether the cache is empty.
|
||||
pub fn is_empty(&self) -> bool {
|
||||
self.entries.is_empty()
|
||||
}
|
||||
|
||||
/// Total number of cached KeyPackages.
|
||||
pub fn total_keypackages(&self) -> usize {
|
||||
self.entries.values().map(|v| v.len()).sum()
|
||||
}
|
||||
|
||||
/// Consume a KeyPackage (remove after use, as MLS KeyPackages are single-use).
|
||||
///
|
||||
/// Returns the KeyPackage bytes if found.
|
||||
pub fn consume(&mut self, address: &MeshAddress, hash: &[u8; 8]) -> Option<Vec<u8>> {
|
||||
let list = self.entries.get_mut(address)?;
|
||||
let idx = list.iter().position(|e| &e.hash == hash)?;
|
||||
let entry = list.remove(idx);
|
||||
if list.is_empty() {
|
||||
self.entries.remove(address);
|
||||
}
|
||||
Some(entry.bytes)
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for KeyPackageCache {
|
||||
fn default() -> Self {
|
||||
Self::new()
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
fn make_keypackage(seed: u8) -> Vec<u8> {
|
||||
vec![seed; 100 + seed as usize]
|
||||
}
|
||||
|
||||
fn make_address(seed: u8) -> MeshAddress {
|
||||
MeshAddress::from_bytes([seed; 16])
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn store_and_retrieve() {
|
||||
let mut cache = KeyPackageCache::new();
|
||||
let addr = make_address(1);
|
||||
let kp = make_keypackage(1);
|
||||
let hash = compute_keypackage_hash(&kp);
|
||||
|
||||
assert!(cache.store(addr, kp.clone()));
|
||||
assert_eq!(cache.len(), 1);
|
||||
|
||||
let retrieved = cache.get(&addr).expect("should exist");
|
||||
assert_eq!(retrieved.bytes, kp);
|
||||
assert_eq!(retrieved.hash, hash);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn reject_duplicate() {
|
||||
let mut cache = KeyPackageCache::new();
|
||||
let addr = make_address(2);
|
||||
let kp = make_keypackage(2);
|
||||
|
||||
assert!(cache.store(addr, kp.clone()));
|
||||
assert!(!cache.store(addr, kp), "duplicate should be rejected");
|
||||
assert_eq!(cache.total_keypackages(), 1);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn multiple_per_address() {
|
||||
let mut cache = KeyPackageCache::with_capacity(100, 3);
|
||||
let addr = make_address(3);
|
||||
|
||||
assert!(cache.store(addr, make_keypackage(1)));
|
||||
assert!(cache.store(addr, make_keypackage(2)));
|
||||
assert!(cache.store(addr, make_keypackage(3)));
|
||||
assert_eq!(cache.total_keypackages(), 3);
|
||||
|
||||
// Fourth should evict first
|
||||
assert!(cache.store(addr, make_keypackage(4)));
|
||||
assert_eq!(cache.total_keypackages(), 3);
|
||||
|
||||
// First should be gone
|
||||
let hash1 = compute_keypackage_hash(&make_keypackage(1));
|
||||
assert!(!cache.has_hash(&addr, &hash1));
|
||||
|
||||
// Fourth should be present
|
||||
let hash4 = compute_keypackage_hash(&make_keypackage(4));
|
||||
assert!(cache.has_hash(&addr, &hash4));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn consume_removes_keypackage() {
|
||||
let mut cache = KeyPackageCache::new();
|
||||
let addr = make_address(4);
|
||||
let kp = make_keypackage(4);
|
||||
let hash = compute_keypackage_hash(&kp);
|
||||
|
||||
cache.store(addr, kp.clone());
|
||||
assert!(cache.has_hash(&addr, &hash));
|
||||
|
||||
let consumed = cache.consume(&addr, &hash).expect("should consume");
|
||||
assert_eq!(consumed, kp);
|
||||
assert!(!cache.has_hash(&addr, &hash));
|
||||
assert!(cache.is_empty());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn get_by_hash() {
|
||||
let mut cache = KeyPackageCache::new();
|
||||
let addr = make_address(5);
|
||||
let kp1 = make_keypackage(51);
|
||||
let kp2 = make_keypackage(52);
|
||||
let hash1 = compute_keypackage_hash(&kp1);
|
||||
let hash2 = compute_keypackage_hash(&kp2);
|
||||
|
||||
cache.store(addr, kp1.clone());
|
||||
cache.store(addr, kp2.clone());
|
||||
|
||||
let found1 = cache.get_by_hash(&addr, &hash1).expect("hash1");
|
||||
assert_eq!(found1.bytes, kp1);
|
||||
|
||||
let found2 = cache.get_by_hash(&addr, &hash2).expect("hash2");
|
||||
assert_eq!(found2.bytes, kp2);
|
||||
|
||||
let wrong_hash = [0xFFu8; 8];
|
||||
assert!(cache.get_by_hash(&addr, &wrong_hash).is_none());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn capacity_eviction() {
|
||||
let mut cache = KeyPackageCache::with_capacity(2, 1);
|
||||
|
||||
let addr1 = make_address(1);
|
||||
let addr2 = make_address(2);
|
||||
let addr3 = make_address(3);
|
||||
|
||||
cache.store(addr1, make_keypackage(1));
|
||||
cache.store(addr2, make_keypackage(2));
|
||||
assert_eq!(cache.len(), 2);
|
||||
|
||||
// Third should evict oldest (addr1)
|
||||
cache.store(addr3, make_keypackage(3));
|
||||
assert_eq!(cache.len(), 2);
|
||||
assert!(cache.get(&addr1).is_none());
|
||||
assert!(cache.get(&addr2).is_some());
|
||||
assert!(cache.get(&addr3).is_some());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn expiry() {
|
||||
let mut cache = KeyPackageCache::new();
|
||||
let addr = make_address(6);
|
||||
|
||||
// Create entry with very short TTL
|
||||
let kp = make_keypackage(6);
|
||||
let entry = CachedKeyPackage::with_ttl(kp, Duration::from_millis(1));
|
||||
cache.store_entry(addr, entry);
|
||||
|
||||
assert_eq!(cache.total_keypackages(), 1);
|
||||
|
||||
// Wait for expiry
|
||||
std::thread::sleep(Duration::from_millis(10));
|
||||
|
||||
// GC should remove it
|
||||
let removed = cache.gc_expired();
|
||||
assert_eq!(removed, 1);
|
||||
assert!(cache.is_empty());
|
||||
}
|
||||
}
|
||||
@@ -1,4 +1,4 @@
|
||||
//! P2P transport layer for quicproquo using iroh.
|
||||
//! P2P transport layer for quicprochat using iroh.
|
||||
//!
|
||||
//! Provides direct peer-to-peer QUIC connections with NAT traversal via iroh
|
||||
//! relay servers. When both peers are online, messages bypass the central
|
||||
@@ -12,11 +12,35 @@
|
||||
//! └── QUIC/TLS ── Server ── QUIC/TLS ┘ (fallback: store-and-forward)
|
||||
//! ```
|
||||
|
||||
pub mod address;
|
||||
pub mod announce;
|
||||
pub mod announce_protocol;
|
||||
pub mod config;
|
||||
pub mod crypto_negotiation;
|
||||
pub mod error;
|
||||
pub mod fapp;
|
||||
pub mod fapp_router;
|
||||
pub mod broadcast;
|
||||
pub mod envelope;
|
||||
pub mod envelope_v2;
|
||||
pub mod keypackage_cache;
|
||||
pub mod mesh_protocol;
|
||||
pub mod metrics;
|
||||
pub mod mls_lite;
|
||||
pub mod persistence;
|
||||
pub mod rate_limit;
|
||||
pub mod shutdown;
|
||||
pub mod identity;
|
||||
pub mod link;
|
||||
pub mod mesh_router;
|
||||
pub mod routing;
|
||||
pub mod routing_table;
|
||||
pub mod store;
|
||||
pub mod transport;
|
||||
pub mod transport_iroh;
|
||||
pub mod transport_manager;
|
||||
pub mod transport_tcp;
|
||||
pub mod transport_lora;
|
||||
#[cfg(feature = "traffic-resistance")]
|
||||
pub mod traffic_resistance;
|
||||
|
||||
@@ -29,10 +53,10 @@ use crate::envelope::MeshEnvelope;
|
||||
use crate::identity::MeshIdentity;
|
||||
use crate::store::MeshStore;
|
||||
|
||||
/// ALPN protocol identifier for quicproquo P2P messaging.
|
||||
/// Updated from the original project name "quicnprotochat" to "quicproquo" (breaking wire change;
|
||||
/// ALPN protocol identifier for quicprochat P2P messaging.
|
||||
/// Updated from the original project name "quicnprotochat" to "quicprochat" (breaking wire change;
|
||||
/// all peers must be on the same version to connect).
|
||||
const P2P_ALPN: &[u8] = b"quicproquo/p2p/1";
|
||||
const P2P_ALPN: &[u8] = b"quicprochat/p2p/1";
|
||||
|
||||
/// A P2P node backed by an iroh endpoint.
|
||||
///
|
||||
@@ -204,7 +228,7 @@ impl P2pNode {
|
||||
.ok_or_else(|| anyhow::anyhow!("mesh identity not configured"))?;
|
||||
|
||||
let envelope = MeshEnvelope::new(identity, recipient_key, payload, ttl_secs, 0);
|
||||
let bytes = envelope.to_bytes();
|
||||
let bytes = envelope.to_wire();
|
||||
|
||||
if let Some(addr) = peer_addr {
|
||||
self.send(addr, &bytes).await?;
|
||||
@@ -257,7 +281,7 @@ impl P2pNode {
|
||||
for env in envelopes {
|
||||
if env.can_forward() {
|
||||
let fwd = env.forwarded();
|
||||
let bytes = fwd.to_bytes();
|
||||
let bytes = fwd.to_wire();
|
||||
self.send(peer_addr.clone(), &bytes).await?;
|
||||
forwarded += 1;
|
||||
}
|
||||
@@ -318,7 +342,7 @@ impl P2pNode {
|
||||
|
||||
// Create a broadcast envelope (empty recipient_key signals broadcast).
|
||||
let envelope = MeshEnvelope::new(identity, &[], encrypted, 300, 0);
|
||||
let bytes = envelope.to_bytes();
|
||||
let bytes = envelope.to_wire();
|
||||
|
||||
// Store in the mesh store for flood-forwarding.
|
||||
let mut store = self
|
||||
492
crates/quicprochat-p2p/src/link.rs
Normal file
492
crates/quicprochat-p2p/src/link.rs
Normal file
@@ -0,0 +1,492 @@
|
||||
//! Lightweight encrypted mesh link for constrained transports.
|
||||
//!
|
||||
//! On high-bandwidth transports (QUIC/TCP), we use TLS 1.3. On constrained
|
||||
//! transports (LoRa, Serial), the full TLS handshake is too expensive
|
||||
//! (~2-4 KB). This module provides a minimal 3-packet handshake that
|
||||
//! establishes a ChaCha20-Poly1305 encrypted session in ~240 bytes total.
|
||||
//!
|
||||
//! # Handshake Protocol
|
||||
//!
|
||||
//! ```text
|
||||
//! Packet 1: Initiator -> Responder (80 bytes)
|
||||
//! [initiator_addr: 16][eph_x25519_pub: 32][nonce: 24][flags: 8]
|
||||
//!
|
||||
//! Packet 2: Responder -> Initiator (96 bytes)
|
||||
//! [responder_addr: 16][eph_x25519_pub: 32][encrypted_proof: 32][tag: 16]
|
||||
//!
|
||||
//! Packet 3: Initiator -> Responder (48 bytes)
|
||||
//! [encrypted_proof: 32][tag: 16]
|
||||
//!
|
||||
//! Total: 224 bytes
|
||||
//!
|
||||
//! Shared secret: HKDF-SHA256(ikm = X25519(eph_a, eph_b), info = "qpc-mesh-link-v1")
|
||||
//! ```
|
||||
|
||||
use chacha20poly1305::aead::{Aead, KeyInit};
|
||||
use chacha20poly1305::{ChaCha20Poly1305, Nonce};
|
||||
use hkdf::Hkdf;
|
||||
use rand::rngs::OsRng;
|
||||
use rand::RngCore;
|
||||
use sha2::Sha256;
|
||||
use x25519_dalek::{EphemeralSecret, PublicKey as X25519Public};
|
||||
use zeroize::Zeroize;
|
||||
|
||||
use crate::address::MeshAddress;
|
||||
|
||||
/// Errors that can occur during link handshake or encryption.
|
||||
#[derive(Debug, thiserror::Error)]
|
||||
pub enum LinkError {
|
||||
/// Received packet has wrong length.
|
||||
#[error("invalid packet length: expected {expected}, got {got}")]
|
||||
InvalidLength { expected: usize, got: usize },
|
||||
|
||||
/// AEAD decryption failed (wrong key or tampered data).
|
||||
#[error("decryption failed: invalid ciphertext or authentication tag")]
|
||||
DecryptionFailed,
|
||||
|
||||
/// The proof inside a handshake packet did not match the expected address.
|
||||
#[error("handshake proof mismatch: peer address does not match encrypted proof")]
|
||||
ProofMismatch,
|
||||
}
|
||||
|
||||
/// Packet sizes for the 3-packet handshake.
|
||||
pub const PACKET1_LEN: usize = 80; // 16 + 32 + 24 + 8
|
||||
pub const PACKET2_LEN: usize = 96; // 16 + 32 + 16 + 16 + 16 (addr + pub + encrypted_addr + tag)
|
||||
pub const PACKET3_LEN: usize = 48; // 16 + 16 + 16 (encrypted_addr + tag)
|
||||
|
||||
/// Derive a 32-byte session key from a shared secret and nonce via HKDF-SHA256.
|
||||
fn derive_session_key(shared_secret: &[u8], salt: &[u8]) -> [u8; 32] {
|
||||
let hk = Hkdf::<Sha256>::new(Some(salt), shared_secret);
|
||||
let mut key = [0u8; 32];
|
||||
hk.expand(b"qpc-mesh-link-v1", &mut key)
|
||||
.expect("HKDF expand to 32 bytes should never fail");
|
||||
key
|
||||
}
|
||||
|
||||
/// Build a ChaCha20Poly1305 nonce from a u64 counter (zero-padded, little-endian).
|
||||
fn counter_nonce(counter: u64) -> Nonce {
|
||||
let mut nonce_bytes = [0u8; 12];
|
||||
nonce_bytes[..8].copy_from_slice(&counter.to_le_bytes());
|
||||
*Nonce::from_slice(&nonce_bytes)
|
||||
}
|
||||
|
||||
/// An established encrypted mesh link session.
|
||||
pub struct MeshLink {
|
||||
/// Derived symmetric key for ChaCha20-Poly1305.
|
||||
session_key: [u8; 32],
|
||||
/// Remote peer's mesh address.
|
||||
remote_address: MeshAddress,
|
||||
/// Message counter for nonce derivation (send direction).
|
||||
send_counter: u64,
|
||||
/// Message counter for nonce derivation (receive direction).
|
||||
recv_counter: u64,
|
||||
}
|
||||
|
||||
impl Drop for MeshLink {
|
||||
fn drop(&mut self) {
|
||||
self.session_key.zeroize();
|
||||
}
|
||||
}
|
||||
|
||||
impl MeshLink {
|
||||
/// Encrypt a message using the session key.
|
||||
///
|
||||
/// Returns the ciphertext (plaintext + 16-byte Poly1305 tag).
|
||||
pub fn encrypt(&mut self, plaintext: &[u8]) -> Result<Vec<u8>, LinkError> {
|
||||
// Nonces for encrypt start at offset 256 to avoid collision with handshake nonces.
|
||||
let nonce = counter_nonce(256 + self.send_counter);
|
||||
let cipher = ChaCha20Poly1305::new((&self.session_key).into());
|
||||
let ciphertext = cipher
|
||||
.encrypt(&nonce, plaintext)
|
||||
.map_err(|_| LinkError::DecryptionFailed)?;
|
||||
self.send_counter += 1;
|
||||
Ok(ciphertext)
|
||||
}
|
||||
|
||||
/// Decrypt a message using the session key.
|
||||
pub fn decrypt(&mut self, ciphertext: &[u8]) -> Result<Vec<u8>, LinkError> {
|
||||
let nonce = counter_nonce(256 + self.recv_counter);
|
||||
let cipher = ChaCha20Poly1305::new((&self.session_key).into());
|
||||
let plaintext = cipher
|
||||
.decrypt(&nonce, ciphertext)
|
||||
.map_err(|_| LinkError::DecryptionFailed)?;
|
||||
self.recv_counter += 1;
|
||||
Ok(plaintext)
|
||||
}
|
||||
|
||||
/// Remote peer's address.
|
||||
pub fn remote_address(&self) -> MeshAddress {
|
||||
self.remote_address
|
||||
}
|
||||
|
||||
/// Number of messages sent on this link.
|
||||
pub fn messages_sent(&self) -> u64 {
|
||||
self.send_counter
|
||||
}
|
||||
|
||||
/// Number of messages received on this link.
|
||||
pub fn messages_received(&self) -> u64 {
|
||||
self.recv_counter
|
||||
}
|
||||
|
||||
/// Access the session key (for testing only).
|
||||
#[cfg(test)]
|
||||
fn session_key(&self) -> &[u8; 32] {
|
||||
&self.session_key
|
||||
}
|
||||
}
|
||||
|
||||
/// Handshake state for the initiator side of a mesh link.
|
||||
pub struct LinkInitiator {
|
||||
local_address: MeshAddress,
|
||||
eph_secret: EphemeralSecret,
|
||||
nonce: [u8; 24],
|
||||
}
|
||||
|
||||
/// Handshake state for the responder side of a mesh link.
|
||||
pub struct LinkResponder {
|
||||
remote_address: MeshAddress,
|
||||
session_key: [u8; 32],
|
||||
}
|
||||
|
||||
impl Drop for LinkResponder {
|
||||
fn drop(&mut self) {
|
||||
self.session_key.zeroize();
|
||||
}
|
||||
}
|
||||
|
||||
impl LinkInitiator {
|
||||
/// Create initiator state and generate Packet 1.
|
||||
///
|
||||
/// Packet 1 layout (80 bytes):
|
||||
/// `[initiator_addr: 16][eph_pub: 32][nonce: 24][flags: 8]`
|
||||
pub fn new(local_address: MeshAddress) -> (Self, Vec<u8>) {
|
||||
let eph_secret = EphemeralSecret::random_from_rng(OsRng);
|
||||
let eph_public = X25519Public::from(&eph_secret);
|
||||
|
||||
let mut nonce = [0u8; 24];
|
||||
OsRng.fill_bytes(&mut nonce);
|
||||
|
||||
let mut packet = Vec::with_capacity(PACKET1_LEN);
|
||||
packet.extend_from_slice(local_address.as_bytes());
|
||||
packet.extend_from_slice(eph_public.as_bytes());
|
||||
packet.extend_from_slice(&nonce);
|
||||
packet.extend_from_slice(&[0u8; 8]); // flags: reserved
|
||||
|
||||
let initiator = Self {
|
||||
local_address,
|
||||
eph_secret,
|
||||
nonce,
|
||||
};
|
||||
|
||||
(initiator, packet)
|
||||
}
|
||||
|
||||
/// Process Packet 2 from responder, generate Packet 3, return completed link.
|
||||
///
|
||||
/// Packet 2 layout (96 bytes):
|
||||
/// `[responder_addr: 16][eph_pub: 32][encrypted_responder_addr: 16+16]`
|
||||
///
|
||||
/// Packet 3 layout (48 bytes):
|
||||
/// `[encrypted_initiator_addr: 16+16][padding: 16]`
|
||||
pub fn process_response(self, packet2: &[u8]) -> Result<(MeshLink, Vec<u8>), LinkError> {
|
||||
if packet2.len() != PACKET2_LEN {
|
||||
return Err(LinkError::InvalidLength {
|
||||
expected: PACKET2_LEN,
|
||||
got: packet2.len(),
|
||||
});
|
||||
}
|
||||
|
||||
// Parse Packet 2.
|
||||
let mut responder_addr_bytes = [0u8; 16];
|
||||
responder_addr_bytes.copy_from_slice(&packet2[..16]);
|
||||
let responder_address = MeshAddress::from_bytes(responder_addr_bytes);
|
||||
|
||||
let mut responder_eph_pub_bytes = [0u8; 32];
|
||||
responder_eph_pub_bytes.copy_from_slice(&packet2[16..48]);
|
||||
let responder_eph_pub = X25519Public::from(responder_eph_pub_bytes);
|
||||
|
||||
let encrypted_proof = &packet2[48..80]; // 16-byte ciphertext + 16-byte Poly1305 tag = 32 bytes
|
||||
|
||||
// Compute shared secret (consumes eph_secret).
|
||||
let shared_secret = self.eph_secret.diffie_hellman(&responder_eph_pub);
|
||||
|
||||
// Derive session key.
|
||||
let session_key = derive_session_key(shared_secret.as_bytes(), &self.nonce);
|
||||
|
||||
// Verify responder's proof: decrypt and check it matches responder_addr.
|
||||
let cipher = ChaCha20Poly1305::new((&session_key).into());
|
||||
let proof_nonce = counter_nonce(0);
|
||||
let decrypted_proof = cipher
|
||||
.decrypt(&proof_nonce, encrypted_proof)
|
||||
.map_err(|_| LinkError::DecryptionFailed)?;
|
||||
|
||||
if decrypted_proof.as_slice() != responder_addr_bytes.as_slice() {
|
||||
return Err(LinkError::ProofMismatch);
|
||||
}
|
||||
|
||||
// Build Packet 3: encrypt our address as proof.
|
||||
let proof_nonce_3 = counter_nonce(1);
|
||||
let encrypted_initiator_addr = cipher
|
||||
.encrypt(&proof_nonce_3, self.local_address.as_bytes().as_slice())
|
||||
.map_err(|_| LinkError::DecryptionFailed)?;
|
||||
|
||||
let mut packet3 = Vec::with_capacity(PACKET3_LEN);
|
||||
packet3.extend_from_slice(&encrypted_initiator_addr);
|
||||
// Pad to 48 bytes.
|
||||
packet3.resize(PACKET3_LEN, 0);
|
||||
|
||||
let link = MeshLink {
|
||||
session_key,
|
||||
remote_address: responder_address,
|
||||
send_counter: 0,
|
||||
recv_counter: 0,
|
||||
};
|
||||
|
||||
Ok((link, packet3))
|
||||
}
|
||||
}
|
||||
|
||||
impl LinkResponder {
|
||||
/// Process Packet 1 from initiator, generate Packet 2.
|
||||
///
|
||||
/// Packet 1 layout (80 bytes):
|
||||
/// `[initiator_addr: 16][eph_pub: 32][nonce: 24][flags: 8]`
|
||||
///
|
||||
/// Packet 2 layout (96 bytes):
|
||||
/// `[responder_addr: 16][eph_pub: 32][encrypted_responder_addr: 16+16]`
|
||||
pub fn new(
|
||||
local_address: MeshAddress,
|
||||
packet1: &[u8],
|
||||
) -> Result<(Self, Vec<u8>), LinkError> {
|
||||
if packet1.len() != PACKET1_LEN {
|
||||
return Err(LinkError::InvalidLength {
|
||||
expected: PACKET1_LEN,
|
||||
got: packet1.len(),
|
||||
});
|
||||
}
|
||||
|
||||
// Parse Packet 1.
|
||||
let mut initiator_addr_bytes = [0u8; 16];
|
||||
initiator_addr_bytes.copy_from_slice(&packet1[..16]);
|
||||
let remote_address = MeshAddress::from_bytes(initiator_addr_bytes);
|
||||
|
||||
let mut initiator_eph_pub_bytes = [0u8; 32];
|
||||
initiator_eph_pub_bytes.copy_from_slice(&packet1[16..48]);
|
||||
let initiator_eph_pub = X25519Public::from(initiator_eph_pub_bytes);
|
||||
|
||||
let mut nonce = [0u8; 24];
|
||||
nonce.copy_from_slice(&packet1[48..72]);
|
||||
// flags at [72..80] — reserved, ignored.
|
||||
|
||||
// Generate our ephemeral keypair.
|
||||
let eph_secret = EphemeralSecret::random_from_rng(OsRng);
|
||||
let eph_public = X25519Public::from(&eph_secret);
|
||||
|
||||
// Compute shared secret (consumes eph_secret).
|
||||
let shared_secret = eph_secret.diffie_hellman(&initiator_eph_pub);
|
||||
|
||||
// Derive session key.
|
||||
let session_key = derive_session_key(shared_secret.as_bytes(), &nonce);
|
||||
|
||||
// Build Packet 2: our address + our eph_pub + encrypted proof of our address.
|
||||
let cipher = ChaCha20Poly1305::new((&session_key).into());
|
||||
let proof_nonce = counter_nonce(0);
|
||||
let encrypted_proof = cipher
|
||||
.encrypt(&proof_nonce, local_address.as_bytes().as_slice())
|
||||
.map_err(|_| LinkError::DecryptionFailed)?;
|
||||
|
||||
let mut packet2 = Vec::with_capacity(PACKET2_LEN);
|
||||
packet2.extend_from_slice(local_address.as_bytes());
|
||||
packet2.extend_from_slice(eph_public.as_bytes());
|
||||
packet2.extend_from_slice(&encrypted_proof);
|
||||
// Pad to PACKET2_LEN for fixed-size framing on constrained transports.
|
||||
packet2.resize(PACKET2_LEN, 0);
|
||||
|
||||
let responder = Self {
|
||||
remote_address,
|
||||
session_key,
|
||||
};
|
||||
|
||||
Ok((responder, packet2))
|
||||
}
|
||||
|
||||
/// Process Packet 3 from initiator, return completed link.
|
||||
///
|
||||
/// Packet 3 layout (48 bytes):
|
||||
/// `[encrypted_initiator_addr: 16+16][padding: 16]`
|
||||
pub fn complete(self, packet3: &[u8]) -> Result<MeshLink, LinkError> {
|
||||
if packet3.len() != PACKET3_LEN {
|
||||
return Err(LinkError::InvalidLength {
|
||||
expected: PACKET3_LEN,
|
||||
got: packet3.len(),
|
||||
});
|
||||
}
|
||||
|
||||
// The encrypted proof is the first 32 bytes (16 plaintext + 16 tag).
|
||||
let encrypted_proof = &packet3[..32];
|
||||
|
||||
let cipher = ChaCha20Poly1305::new((&self.session_key).into());
|
||||
let proof_nonce = counter_nonce(1);
|
||||
let decrypted_proof = cipher
|
||||
.decrypt(&proof_nonce, encrypted_proof)
|
||||
.map_err(|_| LinkError::DecryptionFailed)?;
|
||||
|
||||
let mut expected_addr = [0u8; 16];
|
||||
expected_addr.copy_from_slice(self.remote_address.as_bytes());
|
||||
|
||||
if decrypted_proof.as_slice() != expected_addr.as_slice() {
|
||||
return Err(LinkError::ProofMismatch);
|
||||
}
|
||||
|
||||
Ok(MeshLink {
|
||||
session_key: self.session_key,
|
||||
remote_address: self.remote_address,
|
||||
send_counter: 0,
|
||||
recv_counter: 0,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
fn test_address(byte: u8) -> MeshAddress {
|
||||
MeshAddress::from_public_key(&[byte; 32])
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn full_handshake_roundtrip() {
|
||||
let addr_a = test_address(1);
|
||||
let addr_b = test_address(2);
|
||||
|
||||
// Initiator creates Packet 1.
|
||||
let (initiator, packet1) = LinkInitiator::new(addr_a);
|
||||
assert_eq!(packet1.len(), PACKET1_LEN);
|
||||
|
||||
// Responder processes Packet 1, creates Packet 2.
|
||||
let (responder, packet2) = LinkResponder::new(addr_b, &packet1).expect("responder::new");
|
||||
assert_eq!(packet2.len(), PACKET2_LEN);
|
||||
|
||||
// Initiator processes Packet 2, creates Packet 3, gets link.
|
||||
let (link_a, packet3) = initiator
|
||||
.process_response(&packet2)
|
||||
.expect("initiator::process_response");
|
||||
assert_eq!(packet3.len(), PACKET3_LEN);
|
||||
|
||||
// Responder processes Packet 3, gets link.
|
||||
let link_b = responder.complete(&packet3).expect("responder::complete");
|
||||
|
||||
// Both sides should have the same session key.
|
||||
assert_eq!(link_a.session_key(), link_b.session_key());
|
||||
|
||||
// Check remote addresses.
|
||||
assert_eq!(link_a.remote_address(), addr_b);
|
||||
assert_eq!(link_b.remote_address(), addr_a);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn encrypt_decrypt_roundtrip() {
|
||||
let addr_a = test_address(10);
|
||||
let addr_b = test_address(20);
|
||||
|
||||
let (initiator, packet1) = LinkInitiator::new(addr_a);
|
||||
let (responder, packet2) = LinkResponder::new(addr_b, &packet1).expect("responder");
|
||||
let (mut link_a, packet3) = initiator.process_response(&packet2).expect("initiator");
|
||||
let mut link_b = responder.complete(&packet3).expect("complete");
|
||||
|
||||
let plaintext = b"hello constrained mesh";
|
||||
let ciphertext = link_a.encrypt(plaintext).expect("encrypt");
|
||||
let decrypted = link_b.decrypt(&ciphertext).expect("decrypt");
|
||||
assert_eq!(decrypted, plaintext);
|
||||
|
||||
// Reverse direction.
|
||||
let plaintext2 = b"hello back";
|
||||
let ciphertext2 = link_b.encrypt(plaintext2).expect("encrypt");
|
||||
let decrypted2 = link_a.decrypt(&ciphertext2).expect("decrypt");
|
||||
assert_eq!(decrypted2, plaintext2);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn wrong_key_fails_decrypt() {
|
||||
let addr_a = test_address(30);
|
||||
let addr_b = test_address(40);
|
||||
|
||||
let (initiator, packet1) = LinkInitiator::new(addr_a);
|
||||
let (responder, packet2) = LinkResponder::new(addr_b, &packet1).expect("responder");
|
||||
let (mut link_a, packet3) = initiator.process_response(&packet2).expect("initiator");
|
||||
let _link_b = responder.complete(&packet3).expect("complete");
|
||||
|
||||
let ciphertext = link_a.encrypt(b"secret").expect("encrypt");
|
||||
|
||||
// Create a link with a different session key.
|
||||
let mut fake_link = MeshLink {
|
||||
session_key: [0xFFu8; 32],
|
||||
remote_address: addr_a,
|
||||
send_counter: 0,
|
||||
recv_counter: 0,
|
||||
};
|
||||
|
||||
let result = fake_link.decrypt(&ciphertext);
|
||||
assert!(result.is_err(), "decryption with wrong key must fail");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn counter_increments() {
|
||||
let addr_a = test_address(50);
|
||||
let addr_b = test_address(60);
|
||||
|
||||
let (initiator, packet1) = LinkInitiator::new(addr_a);
|
||||
let (responder, packet2) = LinkResponder::new(addr_b, &packet1).expect("responder");
|
||||
let (mut link_a, packet3) = initiator.process_response(&packet2).expect("initiator");
|
||||
let mut link_b = responder.complete(&packet3).expect("complete");
|
||||
|
||||
assert_eq!(link_a.messages_sent(), 0);
|
||||
assert_eq!(link_b.messages_received(), 0);
|
||||
|
||||
link_a.encrypt(b"msg1").expect("encrypt");
|
||||
assert_eq!(link_a.messages_sent(), 1);
|
||||
|
||||
link_a.encrypt(b"msg2").expect("encrypt");
|
||||
assert_eq!(link_a.messages_sent(), 2);
|
||||
|
||||
// Decrypt two messages on the other side.
|
||||
// We need fresh ciphertexts — re-do with proper counter tracking.
|
||||
let addr_c = test_address(70);
|
||||
let addr_d = test_address(80);
|
||||
let (init2, p1) = LinkInitiator::new(addr_c);
|
||||
let (resp2, p2) = LinkResponder::new(addr_d, &p1).expect("responder");
|
||||
let (mut la, p3) = init2.process_response(&p2).expect("initiator");
|
||||
let mut lb = resp2.complete(&p3).expect("complete");
|
||||
|
||||
let ct1 = la.encrypt(b"msg1").expect("encrypt");
|
||||
let ct2 = la.encrypt(b"msg2").expect("encrypt");
|
||||
|
||||
lb.decrypt(&ct1).expect("decrypt");
|
||||
assert_eq!(lb.messages_received(), 1);
|
||||
|
||||
lb.decrypt(&ct2).expect("decrypt");
|
||||
assert_eq!(lb.messages_received(), 2);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn packet_sizes() {
|
||||
let addr = test_address(90);
|
||||
|
||||
let (_initiator, packet1) = LinkInitiator::new(addr);
|
||||
assert_eq!(packet1.len(), 80, "packet 1 must be 80 bytes");
|
||||
|
||||
// Complete a handshake to check packet 2 and 3 sizes.
|
||||
let addr_b = test_address(91);
|
||||
let (init, p1) = LinkInitiator::new(addr);
|
||||
let (resp, p2) = LinkResponder::new(addr_b, &p1).expect("responder");
|
||||
assert_eq!(p2.len(), 96, "packet 2 must be 96 bytes");
|
||||
|
||||
let (_link, p3) = init.process_response(&p2).expect("initiator");
|
||||
assert_eq!(p3.len(), 48, "packet 3 must be 48 bytes");
|
||||
|
||||
// Verify responder can complete.
|
||||
resp.complete(&p3).expect("complete");
|
||||
}
|
||||
}
|
||||
269
crates/quicprochat-p2p/src/mesh_protocol.rs
Normal file
269
crates/quicprochat-p2p/src/mesh_protocol.rs
Normal file
@@ -0,0 +1,269 @@
|
||||
//! Mesh protocol messages for peer-to-peer communication.
|
||||
//!
|
||||
//! This module defines the control messages used for mesh coordination:
|
||||
//! - KeyPackage request/response for MLS group setup
|
||||
//! - Future: route requests, capability queries, etc.
|
||||
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use crate::address::MeshAddress;
|
||||
|
||||
/// Protocol message type discriminator.
|
||||
#[derive(Clone, Copy, Debug, PartialEq, Eq, Serialize, Deserialize)]
|
||||
#[repr(u8)]
|
||||
pub enum MessageType {
|
||||
/// Request a KeyPackage from a node.
|
||||
KeyPackageRequest = 0x10,
|
||||
/// Response with KeyPackage data.
|
||||
KeyPackageResponse = 0x11,
|
||||
/// Node has no KeyPackage available.
|
||||
KeyPackageUnavailable = 0x12,
|
||||
}
|
||||
|
||||
/// Request a KeyPackage from a peer.
|
||||
#[derive(Clone, Debug, Serialize, Deserialize)]
|
||||
pub struct KeyPackageRequest {
|
||||
/// Who is requesting.
|
||||
pub requester_addr: MeshAddress,
|
||||
/// Whose KeyPackage is requested.
|
||||
pub target_addr: MeshAddress,
|
||||
/// Optional: specific hash to request (from announce).
|
||||
pub hash: Option<[u8; 8]>,
|
||||
/// Request ID for correlation.
|
||||
pub request_id: u32,
|
||||
}
|
||||
|
||||
impl KeyPackageRequest {
|
||||
/// Create a new request.
|
||||
pub fn new(requester: MeshAddress, target: MeshAddress) -> Self {
|
||||
Self {
|
||||
requester_addr: requester,
|
||||
target_addr: target,
|
||||
hash: None,
|
||||
request_id: rand::random(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Create with specific hash.
|
||||
pub fn with_hash(requester: MeshAddress, target: MeshAddress, hash: [u8; 8]) -> Self {
|
||||
Self {
|
||||
requester_addr: requester,
|
||||
target_addr: target,
|
||||
hash: Some(hash),
|
||||
request_id: rand::random(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Serialize to CBOR.
|
||||
pub fn to_wire(&self) -> Vec<u8> {
|
||||
let mut buf = Vec::new();
|
||||
buf.push(MessageType::KeyPackageRequest as u8);
|
||||
ciborium::into_writer(self, &mut buf).expect("CBOR serialization");
|
||||
buf
|
||||
}
|
||||
|
||||
/// Deserialize from CBOR (after type byte).
|
||||
pub fn from_wire(bytes: &[u8]) -> anyhow::Result<Self> {
|
||||
if bytes.is_empty() || bytes[0] != MessageType::KeyPackageRequest as u8 {
|
||||
anyhow::bail!("not a KeyPackageRequest");
|
||||
}
|
||||
let req: Self = ciborium::from_reader(&bytes[1..])?;
|
||||
Ok(req)
|
||||
}
|
||||
}
|
||||
|
||||
/// Response with KeyPackage data.
|
||||
#[derive(Clone, Debug, Serialize, Deserialize)]
|
||||
pub struct KeyPackageResponse {
|
||||
/// Whose KeyPackage this is.
|
||||
pub owner_addr: MeshAddress,
|
||||
/// The serialized MLS KeyPackage.
|
||||
pub keypackage_bytes: Vec<u8>,
|
||||
/// Hash of the KeyPackage (for verification).
|
||||
pub hash: [u8; 8],
|
||||
/// Matching request ID.
|
||||
pub request_id: u32,
|
||||
}
|
||||
|
||||
impl KeyPackageResponse {
|
||||
/// Create a new response.
|
||||
pub fn new(
|
||||
owner: MeshAddress,
|
||||
keypackage_bytes: Vec<u8>,
|
||||
request_id: u32,
|
||||
) -> Self {
|
||||
let hash = crate::announce::compute_keypackage_hash(&keypackage_bytes);
|
||||
Self {
|
||||
owner_addr: owner,
|
||||
keypackage_bytes,
|
||||
hash,
|
||||
request_id,
|
||||
}
|
||||
}
|
||||
|
||||
/// Serialize to CBOR.
|
||||
pub fn to_wire(&self) -> Vec<u8> {
|
||||
let mut buf = Vec::new();
|
||||
buf.push(MessageType::KeyPackageResponse as u8);
|
||||
ciborium::into_writer(self, &mut buf).expect("CBOR serialization");
|
||||
buf
|
||||
}
|
||||
|
||||
/// Deserialize from CBOR (after type byte).
|
||||
pub fn from_wire(bytes: &[u8]) -> anyhow::Result<Self> {
|
||||
if bytes.is_empty() || bytes[0] != MessageType::KeyPackageResponse as u8 {
|
||||
anyhow::bail!("not a KeyPackageResponse");
|
||||
}
|
||||
let resp: Self = ciborium::from_reader(&bytes[1..])?;
|
||||
Ok(resp)
|
||||
}
|
||||
|
||||
/// Verify the hash matches the KeyPackage.
|
||||
pub fn verify_hash(&self) -> bool {
|
||||
let computed = crate::announce::compute_keypackage_hash(&self.keypackage_bytes);
|
||||
computed == self.hash
|
||||
}
|
||||
}
|
||||
|
||||
/// Response indicating no KeyPackage available.
|
||||
#[derive(Clone, Debug, Serialize, Deserialize)]
|
||||
pub struct KeyPackageUnavailable {
|
||||
/// Whose KeyPackage was requested.
|
||||
pub target_addr: MeshAddress,
|
||||
/// Matching request ID.
|
||||
pub request_id: u32,
|
||||
}
|
||||
|
||||
impl KeyPackageUnavailable {
|
||||
/// Create a new unavailable response.
|
||||
pub fn new(target: MeshAddress, request_id: u32) -> Self {
|
||||
Self {
|
||||
target_addr: target,
|
||||
request_id,
|
||||
}
|
||||
}
|
||||
|
||||
/// Serialize to CBOR.
|
||||
pub fn to_wire(&self) -> Vec<u8> {
|
||||
let mut buf = Vec::new();
|
||||
buf.push(MessageType::KeyPackageUnavailable as u8);
|
||||
ciborium::into_writer(self, &mut buf).expect("CBOR serialization");
|
||||
buf
|
||||
}
|
||||
|
||||
/// Deserialize from CBOR (after type byte).
|
||||
pub fn from_wire(bytes: &[u8]) -> anyhow::Result<Self> {
|
||||
if bytes.is_empty() || bytes[0] != MessageType::KeyPackageUnavailable as u8 {
|
||||
anyhow::bail!("not a KeyPackageUnavailable");
|
||||
}
|
||||
let resp: Self = ciborium::from_reader(&bytes[1..])?;
|
||||
Ok(resp)
|
||||
}
|
||||
}
|
||||
|
||||
/// Parse the message type from wire bytes.
|
||||
pub fn parse_message_type(bytes: &[u8]) -> Option<MessageType> {
|
||||
if bytes.is_empty() {
|
||||
return None;
|
||||
}
|
||||
match bytes[0] {
|
||||
0x10 => Some(MessageType::KeyPackageRequest),
|
||||
0x11 => Some(MessageType::KeyPackageResponse),
|
||||
0x12 => Some(MessageType::KeyPackageUnavailable),
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
fn make_address(seed: u8) -> MeshAddress {
|
||||
MeshAddress::from_bytes([seed; 16])
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn request_roundtrip() {
|
||||
let req = KeyPackageRequest::new(make_address(1), make_address(2));
|
||||
let wire = req.to_wire();
|
||||
let restored = KeyPackageRequest::from_wire(&wire).expect("parse");
|
||||
|
||||
assert_eq!(req.requester_addr, restored.requester_addr);
|
||||
assert_eq!(req.target_addr, restored.target_addr);
|
||||
assert_eq!(req.request_id, restored.request_id);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn request_with_hash_roundtrip() {
|
||||
let hash = [0xAB; 8];
|
||||
let req = KeyPackageRequest::with_hash(make_address(1), make_address(2), hash);
|
||||
let wire = req.to_wire();
|
||||
let restored = KeyPackageRequest::from_wire(&wire).expect("parse");
|
||||
|
||||
assert_eq!(req.hash, restored.hash);
|
||||
assert_eq!(Some(hash), restored.hash);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn response_roundtrip() {
|
||||
let kp_bytes = vec![0x42; 100];
|
||||
let resp = KeyPackageResponse::new(make_address(3), kp_bytes.clone(), 12345);
|
||||
let wire = resp.to_wire();
|
||||
let restored = KeyPackageResponse::from_wire(&wire).expect("parse");
|
||||
|
||||
assert_eq!(resp.owner_addr, restored.owner_addr);
|
||||
assert_eq!(resp.keypackage_bytes, restored.keypackage_bytes);
|
||||
assert_eq!(resp.hash, restored.hash);
|
||||
assert_eq!(resp.request_id, restored.request_id);
|
||||
assert!(restored.verify_hash());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn unavailable_roundtrip() {
|
||||
let resp = KeyPackageUnavailable::new(make_address(4), 99999);
|
||||
let wire = resp.to_wire();
|
||||
let restored = KeyPackageUnavailable::from_wire(&wire).expect("parse");
|
||||
|
||||
assert_eq!(resp.target_addr, restored.target_addr);
|
||||
assert_eq!(resp.request_id, restored.request_id);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn parse_message_type_works() {
|
||||
let req = KeyPackageRequest::new(make_address(1), make_address(2));
|
||||
let wire = req.to_wire();
|
||||
assert_eq!(parse_message_type(&wire), Some(MessageType::KeyPackageRequest));
|
||||
|
||||
let resp = KeyPackageResponse::new(make_address(3), vec![0x42], 1);
|
||||
let wire = resp.to_wire();
|
||||
assert_eq!(parse_message_type(&wire), Some(MessageType::KeyPackageResponse));
|
||||
|
||||
let unavail = KeyPackageUnavailable::new(make_address(4), 2);
|
||||
let wire = unavail.to_wire();
|
||||
assert_eq!(parse_message_type(&wire), Some(MessageType::KeyPackageUnavailable));
|
||||
|
||||
assert_eq!(parse_message_type(&[]), None);
|
||||
assert_eq!(parse_message_type(&[0xFF]), None);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn measure_protocol_overhead() {
|
||||
let req = KeyPackageRequest::new(make_address(1), make_address(2));
|
||||
let wire = req.to_wire();
|
||||
println!("KeyPackageRequest: {} bytes", wire.len());
|
||||
|
||||
let kp_bytes = vec![0x42; 306]; // Typical MLS KeyPackage size
|
||||
let resp = KeyPackageResponse::new(make_address(3), kp_bytes.clone(), 12345);
|
||||
let wire = resp.to_wire();
|
||||
println!("KeyPackageResponse (306B payload): {} bytes", wire.len());
|
||||
println!("Response overhead: {} bytes", wire.len() - 306);
|
||||
|
||||
let unavail = KeyPackageUnavailable::new(make_address(4), 99999);
|
||||
let wire = unavail.to_wire();
|
||||
println!("KeyPackageUnavailable: {} bytes", wire.len());
|
||||
|
||||
// Assertions
|
||||
assert!(req.to_wire().len() < 100, "request should be compact");
|
||||
assert!(unavail.to_wire().len() < 50, "unavailable should be compact");
|
||||
}
|
||||
}
|
||||
516
crates/quicprochat-p2p/src/mesh_router.rs
Normal file
516
crates/quicprochat-p2p/src/mesh_router.rs
Normal file
@@ -0,0 +1,516 @@
|
||||
//! Multi-hop mesh router using the distributed routing table.
|
||||
//!
|
||||
//! The [`MeshRouter`] delivers messages using the best available path:
|
||||
//! direct transport -> multi-hop via intermediate nodes -> store-and-forward.
|
||||
//!
|
||||
//! # Routing Algorithm
|
||||
//!
|
||||
//! ```text
|
||||
//! send(destination, payload):
|
||||
//! 1. Look up destination in routing table
|
||||
//! 2. If direct transport available -> send via transport
|
||||
//! 3. If next-hop known -> wrap in MeshEnvelope, send to next-hop
|
||||
//! 4. If no route -> queue in store-and-forward
|
||||
//! ```
|
||||
|
||||
use std::collections::HashMap;
|
||||
use std::sync::{Arc, Mutex, RwLock};
|
||||
use std::time::{Duration, Instant};
|
||||
|
||||
use anyhow::{bail, Result};
|
||||
|
||||
use crate::announce::compute_address;
|
||||
use crate::envelope::MeshEnvelope;
|
||||
use crate::identity::MeshIdentity;
|
||||
use crate::routing_table::RoutingTable;
|
||||
use crate::store::MeshStore;
|
||||
use crate::transport::TransportAddr;
|
||||
use crate::transport_manager::TransportManager;
|
||||
|
||||
/// How a message was delivered.
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
|
||||
pub enum DeliveryResult {
|
||||
/// Sent directly to destination via a transport.
|
||||
Direct,
|
||||
/// Forwarded to next-hop node for relay.
|
||||
Forwarded,
|
||||
/// Queued in store-and-forward (destination unreachable).
|
||||
Stored,
|
||||
/// Delivered via server relay (legacy fallback).
|
||||
ServerRelay,
|
||||
}
|
||||
|
||||
/// What to do with an incoming envelope.
|
||||
#[derive(Debug)]
|
||||
pub enum IncomingAction {
|
||||
/// Message is for us — deliver to application.
|
||||
Deliver(MeshEnvelope),
|
||||
/// Message is for someone else — forward it.
|
||||
Forward {
|
||||
envelope: MeshEnvelope,
|
||||
next_hop: TransportAddr,
|
||||
},
|
||||
/// Message should be stored for later forwarding.
|
||||
Store(MeshEnvelope),
|
||||
/// Message was dropped (expired, max hops, invalid).
|
||||
Dropped(String),
|
||||
}
|
||||
|
||||
/// Per-destination delivery statistics.
|
||||
#[derive(Debug, Clone, Default)]
|
||||
pub struct DeliveryStats {
|
||||
pub direct_count: u64,
|
||||
pub forwarded_count: u64,
|
||||
pub stored_count: u64,
|
||||
pub relay_count: u64,
|
||||
pub last_delivery: Option<Instant>,
|
||||
pub avg_latency: Option<Duration>,
|
||||
}
|
||||
|
||||
impl DeliveryStats {
|
||||
fn record(&mut self, method: DeliveryResult, latency: Duration) {
|
||||
match method {
|
||||
DeliveryResult::Direct => self.direct_count += 1,
|
||||
DeliveryResult::Forwarded => self.forwarded_count += 1,
|
||||
DeliveryResult::Stored => self.stored_count += 1,
|
||||
DeliveryResult::ServerRelay => self.relay_count += 1,
|
||||
}
|
||||
self.last_delivery = Some(Instant::now());
|
||||
self.avg_latency = Some(match self.avg_latency {
|
||||
Some(prev) => (prev + latency) / 2,
|
||||
None => latency,
|
||||
});
|
||||
}
|
||||
|
||||
/// Total number of deliveries across all methods.
|
||||
pub fn total(&self) -> u64 {
|
||||
self.direct_count + self.forwarded_count + self.stored_count + self.relay_count
|
||||
}
|
||||
}
|
||||
|
||||
/// Multi-hop mesh message router.
|
||||
pub struct MeshRouter {
|
||||
/// This node's mesh identity.
|
||||
identity: MeshIdentity,
|
||||
/// This node's 16-byte truncated address.
|
||||
local_address: [u8; 16],
|
||||
/// Distributed routing table.
|
||||
routes: Arc<RwLock<RoutingTable>>,
|
||||
/// Transport manager for sending packets.
|
||||
transports: Arc<TransportManager>,
|
||||
/// Store-and-forward queue for unreachable destinations.
|
||||
store: Arc<Mutex<MeshStore>>,
|
||||
/// Per-destination delivery stats.
|
||||
stats: Mutex<HashMap<[u8; 16], DeliveryStats>>,
|
||||
}
|
||||
|
||||
impl MeshRouter {
|
||||
/// Create a new mesh router.
|
||||
pub fn new(
|
||||
identity: MeshIdentity,
|
||||
routes: Arc<RwLock<RoutingTable>>,
|
||||
transports: Arc<TransportManager>,
|
||||
store: Arc<Mutex<MeshStore>>,
|
||||
) -> Self {
|
||||
let local_address = compute_address(&identity.public_key());
|
||||
Self {
|
||||
identity,
|
||||
local_address,
|
||||
routes,
|
||||
transports,
|
||||
store,
|
||||
stats: Mutex::new(HashMap::new()),
|
||||
}
|
||||
}
|
||||
|
||||
/// Send a payload to a destination identified by its 16-byte mesh address.
|
||||
///
|
||||
/// Routing priority:
|
||||
/// 1. Route found in routing table -> wrap in envelope and send via transport
|
||||
/// 2. No route -> store for later forwarding
|
||||
pub async fn send(&self, dest_address: &[u8; 16], payload: &[u8]) -> Result<DeliveryResult> {
|
||||
let start = Instant::now();
|
||||
|
||||
// Look up destination in routing table.
|
||||
let route_info = {
|
||||
let table = self
|
||||
.routes
|
||||
.read()
|
||||
.map_err(|e| anyhow::anyhow!("routing table lock poisoned: {e}"))?;
|
||||
table.lookup(dest_address).map(|entry| {
|
||||
(
|
||||
entry.identity_key,
|
||||
entry.next_hop_addr.clone(),
|
||||
entry.hops,
|
||||
)
|
||||
})
|
||||
};
|
||||
|
||||
if let Some((dest_key, next_hop_addr, hops)) = route_info {
|
||||
// Build an envelope addressed to the destination.
|
||||
let envelope =
|
||||
MeshEnvelope::new(&self.identity, &dest_key, payload.to_vec(), 300, 0);
|
||||
let wire = envelope.to_wire();
|
||||
|
||||
self.transports.send(&next_hop_addr, &wire).await?;
|
||||
|
||||
// Classify: if destination is directly reachable (hop count <= 1),
|
||||
// consider it Direct; otherwise it's Forwarded through intermediaries.
|
||||
let result = if hops <= 1 {
|
||||
DeliveryResult::Direct
|
||||
} else {
|
||||
DeliveryResult::Forwarded
|
||||
};
|
||||
|
||||
let latency = start.elapsed();
|
||||
self.record_stats(dest_address, result, latency);
|
||||
Ok(result)
|
||||
} else {
|
||||
// No route — store for later forwarding.
|
||||
// We need a recipient key for the store. Since we only have the address
|
||||
// and no key, store with the address zero-padded to 32 bytes as a key
|
||||
// placeholder. The drain_store_for method matches on this convention.
|
||||
let mut recipient_key = [0u8; 32];
|
||||
recipient_key[..16].copy_from_slice(dest_address);
|
||||
|
||||
let envelope = MeshEnvelope::new(
|
||||
&self.identity,
|
||||
&recipient_key,
|
||||
payload.to_vec(),
|
||||
300,
|
||||
0,
|
||||
);
|
||||
let stored = {
|
||||
let mut store = self
|
||||
.store
|
||||
.lock()
|
||||
.map_err(|e| anyhow::anyhow!("store lock poisoned: {e}"))?;
|
||||
store.store(envelope)
|
||||
};
|
||||
if !stored {
|
||||
bail!("store rejected envelope (duplicate or at capacity)");
|
||||
}
|
||||
|
||||
let latency = start.elapsed();
|
||||
let result = DeliveryResult::Stored;
|
||||
self.record_stats(dest_address, result, latency);
|
||||
Ok(result)
|
||||
}
|
||||
}
|
||||
|
||||
/// Convenience: compute the 16-byte address from a 32-byte key, then send.
|
||||
pub async fn send_to_key(
|
||||
&self,
|
||||
dest_key: &[u8; 32],
|
||||
payload: &[u8],
|
||||
) -> Result<DeliveryResult> {
|
||||
let addr = compute_address(dest_key);
|
||||
self.send(&addr, payload).await
|
||||
}
|
||||
|
||||
/// Process a received envelope and decide what to do with it.
|
||||
pub fn handle_incoming(&self, envelope: MeshEnvelope) -> Result<IncomingAction> {
|
||||
// Verify envelope signature.
|
||||
if !envelope.verify() {
|
||||
return Ok(IncomingAction::Dropped(
|
||||
"invalid signature".to_string(),
|
||||
));
|
||||
}
|
||||
|
||||
// Check if it's for us (recipient_key matches our identity).
|
||||
let our_key = self.identity.public_key();
|
||||
if envelope.recipient_key.len() == 32 {
|
||||
let recipient: [u8; 32] = envelope
|
||||
.recipient_key
|
||||
.as_slice()
|
||||
.try_into()
|
||||
.map_err(|_| anyhow::anyhow!("invalid recipient key length"))?;
|
||||
if recipient == our_key {
|
||||
return Ok(IncomingAction::Deliver(envelope));
|
||||
}
|
||||
}
|
||||
|
||||
// Broadcast (empty recipient) — always deliver locally.
|
||||
if envelope.recipient_key.is_empty() {
|
||||
return Ok(IncomingAction::Deliver(envelope));
|
||||
}
|
||||
|
||||
// Not for us — check if we can forward.
|
||||
if !envelope.can_forward() {
|
||||
let reason = if envelope.is_expired() {
|
||||
"envelope expired"
|
||||
} else {
|
||||
"max hops reached"
|
||||
};
|
||||
return Ok(IncomingAction::Dropped(reason.to_string()));
|
||||
}
|
||||
|
||||
// Look up the recipient in the routing table.
|
||||
let dest_address = compute_address(&envelope.recipient_key);
|
||||
let next_hop = {
|
||||
let table = self
|
||||
.routes
|
||||
.read()
|
||||
.map_err(|e| anyhow::anyhow!("routing table lock poisoned: {e}"))?;
|
||||
table
|
||||
.lookup(&dest_address)
|
||||
.map(|entry| entry.next_hop_addr.clone())
|
||||
};
|
||||
|
||||
match next_hop {
|
||||
Some(addr) => {
|
||||
let forwarded = envelope.forwarded();
|
||||
Ok(IncomingAction::Forward {
|
||||
envelope: forwarded,
|
||||
next_hop: addr,
|
||||
})
|
||||
}
|
||||
None => Ok(IncomingAction::Store(envelope)),
|
||||
}
|
||||
}
|
||||
|
||||
/// Forward an envelope to its next hop based on the routing table.
|
||||
///
|
||||
/// The envelope is sent as-is (callers such as [`handle_incoming`](Self::handle_incoming)
|
||||
/// are expected to have already incremented the hop count via [`MeshEnvelope::forwarded`]).
|
||||
pub async fn forward(&self, envelope: MeshEnvelope) -> Result<DeliveryResult> {
|
||||
let start = Instant::now();
|
||||
let dest_address = compute_address(&envelope.recipient_key);
|
||||
|
||||
let next_hop_addr = {
|
||||
let table = self
|
||||
.routes
|
||||
.read()
|
||||
.map_err(|e| anyhow::anyhow!("routing table lock poisoned: {e}"))?;
|
||||
table
|
||||
.lookup(&dest_address)
|
||||
.map(|entry| entry.next_hop_addr.clone())
|
||||
.ok_or_else(|| anyhow::anyhow!("no route for forwarding target"))?
|
||||
};
|
||||
|
||||
let wire = envelope.to_wire();
|
||||
self.transports.send(&next_hop_addr, &wire).await?;
|
||||
|
||||
let latency = start.elapsed();
|
||||
let result = DeliveryResult::Forwarded;
|
||||
self.record_stats(&dest_address, result, latency);
|
||||
Ok(result)
|
||||
}
|
||||
|
||||
/// Drain stored messages for a destination and attempt to forward them.
|
||||
///
|
||||
/// Call this when a new route appears (e.g., from an announce) to flush
|
||||
/// queued messages. Returns the count of successfully forwarded messages.
|
||||
pub async fn drain_store_for(&self, dest_address: &[u8; 16]) -> Result<usize> {
|
||||
// Look up the route to get identity key and next-hop.
|
||||
let (identity_key, next_hop_addr) = {
|
||||
let table = self
|
||||
.routes
|
||||
.read()
|
||||
.map_err(|e| anyhow::anyhow!("routing table lock poisoned: {e}"))?;
|
||||
match table.lookup(dest_address) {
|
||||
Some(entry) => (entry.identity_key, entry.next_hop_addr.clone()),
|
||||
None => return Ok(0),
|
||||
}
|
||||
};
|
||||
|
||||
// Fetch stored envelopes keyed by the full identity key.
|
||||
let envelopes = {
|
||||
let mut store = self
|
||||
.store
|
||||
.lock()
|
||||
.map_err(|e| anyhow::anyhow!("store lock poisoned: {e}"))?;
|
||||
let mut result = store.fetch(&identity_key);
|
||||
// Also try the zero-padded address convention used by send().
|
||||
let mut padded_key = [0u8; 32];
|
||||
padded_key[..16].copy_from_slice(dest_address);
|
||||
result.extend(store.fetch(&padded_key));
|
||||
result
|
||||
};
|
||||
|
||||
let mut forwarded_count = 0;
|
||||
for env in envelopes {
|
||||
if env.can_forward() {
|
||||
let fwd = env.forwarded();
|
||||
let wire = fwd.to_wire();
|
||||
if self.transports.send(&next_hop_addr, &wire).await.is_ok() {
|
||||
forwarded_count += 1;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(forwarded_count)
|
||||
}
|
||||
|
||||
/// Get delivery statistics for a specific destination.
|
||||
pub fn stats(&self, address: &[u8; 16]) -> Option<DeliveryStats> {
|
||||
self.stats
|
||||
.lock()
|
||||
.ok()
|
||||
.and_then(|s| s.get(address).cloned())
|
||||
}
|
||||
|
||||
/// Get delivery statistics for all known destinations.
|
||||
pub fn all_stats(&self) -> HashMap<[u8; 16], DeliveryStats> {
|
||||
self.stats
|
||||
.lock()
|
||||
.map(|s| s.clone())
|
||||
.unwrap_or_default()
|
||||
}
|
||||
|
||||
/// This node's 16-byte truncated mesh address.
|
||||
pub fn local_address(&self) -> &[u8; 16] {
|
||||
&self.local_address
|
||||
}
|
||||
|
||||
/// Record a delivery in the per-destination stats.
|
||||
fn record_stats(&self, address: &[u8; 16], method: DeliveryResult, latency: Duration) {
|
||||
if let Ok(mut stats) = self.stats.lock() {
|
||||
stats
|
||||
.entry(*address)
|
||||
.or_default()
|
||||
.record(method, latency);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn delivery_stats_tracking() {
|
||||
let mut stats = DeliveryStats::default();
|
||||
assert_eq!(stats.total(), 0);
|
||||
|
||||
stats.record(DeliveryResult::Direct, Duration::from_millis(10));
|
||||
assert_eq!(stats.direct_count, 1);
|
||||
assert_eq!(stats.total(), 1);
|
||||
assert!(stats.last_delivery.is_some());
|
||||
assert!(stats.avg_latency.is_some());
|
||||
|
||||
stats.record(DeliveryResult::Forwarded, Duration::from_millis(20));
|
||||
assert_eq!(stats.forwarded_count, 1);
|
||||
assert_eq!(stats.total(), 2);
|
||||
|
||||
stats.record(DeliveryResult::Stored, Duration::from_millis(5));
|
||||
assert_eq!(stats.stored_count, 1);
|
||||
assert_eq!(stats.total(), 3);
|
||||
|
||||
stats.record(DeliveryResult::ServerRelay, Duration::from_millis(50));
|
||||
assert_eq!(stats.relay_count, 1);
|
||||
assert_eq!(stats.total(), 4);
|
||||
|
||||
// avg_latency should be present and reasonable.
|
||||
let avg = stats.avg_latency.unwrap();
|
||||
assert!(avg.as_millis() > 0);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn incoming_action_deliver_to_self() {
|
||||
let identity = MeshIdentity::generate();
|
||||
let our_key = identity.public_key();
|
||||
let routes = Arc::new(RwLock::new(RoutingTable::new(Duration::from_secs(300))));
|
||||
let transports = Arc::new(TransportManager::new());
|
||||
let store = Arc::new(Mutex::new(MeshStore::new(100)));
|
||||
|
||||
let router = MeshRouter::new(identity, routes, transports, store);
|
||||
|
||||
// Create an envelope addressed to our key.
|
||||
let sender = MeshIdentity::generate();
|
||||
let envelope =
|
||||
MeshEnvelope::new(&sender, &our_key, b"hello self".to_vec(), 3600, 5);
|
||||
|
||||
let action = router.handle_incoming(envelope).expect("handle_incoming");
|
||||
match action {
|
||||
IncomingAction::Deliver(env) => {
|
||||
assert_eq!(env.payload, b"hello self");
|
||||
}
|
||||
other => panic!("expected Deliver, got {:?}", std::mem::discriminant(&other)),
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn incoming_action_broadcast_delivers() {
|
||||
let identity = MeshIdentity::generate();
|
||||
let routes = Arc::new(RwLock::new(RoutingTable::new(Duration::from_secs(300))));
|
||||
let transports = Arc::new(TransportManager::new());
|
||||
let store = Arc::new(Mutex::new(MeshStore::new(100)));
|
||||
|
||||
let router = MeshRouter::new(identity, routes, transports, store);
|
||||
|
||||
// Create a broadcast envelope (empty recipient key).
|
||||
let sender = MeshIdentity::generate();
|
||||
let envelope =
|
||||
MeshEnvelope::new(&sender, &[], b"broadcast msg".to_vec(), 3600, 5);
|
||||
|
||||
let action = router.handle_incoming(envelope).expect("handle_incoming");
|
||||
match action {
|
||||
IncomingAction::Deliver(env) => {
|
||||
assert_eq!(env.payload, b"broadcast msg");
|
||||
assert!(env.recipient_key.is_empty());
|
||||
}
|
||||
other => panic!("expected Deliver, got {:?}", std::mem::discriminant(&other)),
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn incoming_action_dropped_expired() {
|
||||
let identity = MeshIdentity::generate();
|
||||
let routes = Arc::new(RwLock::new(RoutingTable::new(Duration::from_secs(300))));
|
||||
let transports = Arc::new(TransportManager::new());
|
||||
let store = Arc::new(Mutex::new(MeshStore::new(100)));
|
||||
|
||||
let router = MeshRouter::new(identity, routes, transports, store);
|
||||
|
||||
// Create an envelope addressed to someone else with TTL=0.
|
||||
// is_expired() checks: now - timestamp > ttl_secs.
|
||||
// With ttl=0 and timestamp=now, we need to wait >0 seconds for expiry.
|
||||
let sender = MeshIdentity::generate();
|
||||
let other_key = [0xBB; 32];
|
||||
let envelope =
|
||||
MeshEnvelope::new(&sender, &other_key, b"expired".to_vec(), 0, 5);
|
||||
|
||||
// Sleep briefly so that now - timestamp > 0 (the TTL).
|
||||
std::thread::sleep(Duration::from_millis(1100));
|
||||
|
||||
let action = router.handle_incoming(envelope).expect("handle_incoming");
|
||||
match action {
|
||||
IncomingAction::Dropped(reason) => {
|
||||
assert!(
|
||||
reason.contains("expired"),
|
||||
"expected expired reason, got: {reason}"
|
||||
);
|
||||
}
|
||||
other => panic!("expected Dropped, got {:?}", std::mem::discriminant(&other)),
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn incoming_action_dropped_invalid_sig() {
|
||||
let identity = MeshIdentity::generate();
|
||||
let routes = Arc::new(RwLock::new(RoutingTable::new(Duration::from_secs(300))));
|
||||
let transports = Arc::new(TransportManager::new());
|
||||
let store = Arc::new(Mutex::new(MeshStore::new(100)));
|
||||
|
||||
let router = MeshRouter::new(identity, routes, transports, store);
|
||||
|
||||
// Create a valid envelope then tamper with the payload.
|
||||
let sender = MeshIdentity::generate();
|
||||
let other_key = [0xCC; 32];
|
||||
let mut envelope =
|
||||
MeshEnvelope::new(&sender, &other_key, b"original".to_vec(), 3600, 5);
|
||||
envelope.payload = b"tampered".to_vec();
|
||||
|
||||
let action = router.handle_incoming(envelope).expect("handle_incoming");
|
||||
match action {
|
||||
IncomingAction::Dropped(reason) => {
|
||||
assert!(
|
||||
reason.contains("invalid signature"),
|
||||
"expected invalid signature reason, got: {reason}"
|
||||
);
|
||||
}
|
||||
other => panic!("expected Dropped, got {:?}", std::mem::discriminant(&other)),
|
||||
}
|
||||
}
|
||||
}
|
||||
502
crates/quicprochat-p2p/src/metrics.rs
Normal file
502
crates/quicprochat-p2p/src/metrics.rs
Normal file
@@ -0,0 +1,502 @@
|
||||
//! Observability metrics for mesh networking.
|
||||
//!
|
||||
//! This module provides structured metrics collection for monitoring
|
||||
//! mesh node health, performance, and resource usage.
|
||||
|
||||
use std::collections::HashMap;
|
||||
use std::sync::atomic::{AtomicU64, Ordering};
|
||||
use std::sync::{Arc, RwLock};
|
||||
use std::time::{Duration, Instant};
|
||||
|
||||
/// Atomic counter for thread-safe metric updates.
|
||||
#[derive(Debug, Default)]
|
||||
pub struct Counter(AtomicU64);
|
||||
|
||||
impl Counter {
|
||||
pub fn new() -> Self {
|
||||
Self(AtomicU64::new(0))
|
||||
}
|
||||
|
||||
pub fn inc(&self) {
|
||||
self.0.fetch_add(1, Ordering::Relaxed);
|
||||
}
|
||||
|
||||
pub fn inc_by(&self, n: u64) {
|
||||
self.0.fetch_add(n, Ordering::Relaxed);
|
||||
}
|
||||
|
||||
pub fn get(&self) -> u64 {
|
||||
self.0.load(Ordering::Relaxed)
|
||||
}
|
||||
|
||||
pub fn reset(&self) -> u64 {
|
||||
self.0.swap(0, Ordering::Relaxed)
|
||||
}
|
||||
}
|
||||
|
||||
/// Gauge for values that can go up and down.
|
||||
#[derive(Debug, Default)]
|
||||
pub struct Gauge(AtomicU64);
|
||||
|
||||
impl Gauge {
|
||||
pub fn new() -> Self {
|
||||
Self(AtomicU64::new(0))
|
||||
}
|
||||
|
||||
pub fn set(&self, val: u64) {
|
||||
self.0.store(val, Ordering::Relaxed);
|
||||
}
|
||||
|
||||
pub fn inc(&self) {
|
||||
self.0.fetch_add(1, Ordering::Relaxed);
|
||||
}
|
||||
|
||||
pub fn dec(&self) {
|
||||
self.0.fetch_sub(1, Ordering::Relaxed);
|
||||
}
|
||||
|
||||
pub fn get(&self) -> u64 {
|
||||
self.0.load(Ordering::Relaxed)
|
||||
}
|
||||
}
|
||||
|
||||
/// Histogram for tracking distributions (simple bucket-based).
|
||||
#[derive(Debug)]
|
||||
pub struct Histogram {
|
||||
/// Bucket boundaries (upper limits).
|
||||
buckets: Vec<u64>,
|
||||
/// Count in each bucket.
|
||||
counts: Vec<AtomicU64>,
|
||||
/// Sum of all values.
|
||||
sum: AtomicU64,
|
||||
/// Total count.
|
||||
count: AtomicU64,
|
||||
}
|
||||
|
||||
impl Histogram {
|
||||
/// Create with default latency buckets (ms).
|
||||
pub fn latency_ms() -> Self {
|
||||
Self::new(vec![1, 5, 10, 25, 50, 100, 250, 500, 1000, 5000, 10000])
|
||||
}
|
||||
|
||||
/// Create with default size buckets (bytes).
|
||||
pub fn size_bytes() -> Self {
|
||||
Self::new(vec![64, 128, 256, 512, 1024, 2048, 4096, 8192, 16384, 65536])
|
||||
}
|
||||
|
||||
pub fn new(buckets: Vec<u64>) -> Self {
|
||||
let counts = buckets.iter().map(|_| AtomicU64::new(0)).collect();
|
||||
Self {
|
||||
buckets,
|
||||
counts,
|
||||
sum: AtomicU64::new(0),
|
||||
count: AtomicU64::new(0),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn observe(&self, value: u64) {
|
||||
self.sum.fetch_add(value, Ordering::Relaxed);
|
||||
self.count.fetch_add(1, Ordering::Relaxed);
|
||||
|
||||
for (i, &upper) in self.buckets.iter().enumerate() {
|
||||
if value <= upper {
|
||||
self.counts[i].fetch_add(1, Ordering::Relaxed);
|
||||
return;
|
||||
}
|
||||
}
|
||||
// Value exceeds all buckets — count in last
|
||||
if let Some(last) = self.counts.last() {
|
||||
last.fetch_add(1, Ordering::Relaxed);
|
||||
}
|
||||
}
|
||||
|
||||
pub fn observe_duration(&self, d: Duration) {
|
||||
self.observe(d.as_millis() as u64);
|
||||
}
|
||||
|
||||
pub fn sum(&self) -> u64 {
|
||||
self.sum.load(Ordering::Relaxed)
|
||||
}
|
||||
|
||||
pub fn count(&self) -> u64 {
|
||||
self.count.load(Ordering::Relaxed)
|
||||
}
|
||||
|
||||
pub fn avg(&self) -> f64 {
|
||||
let count = self.count();
|
||||
if count == 0 {
|
||||
0.0
|
||||
} else {
|
||||
self.sum() as f64 / count as f64
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Per-transport metrics.
|
||||
#[derive(Debug, Default)]
|
||||
pub struct TransportMetrics {
|
||||
/// Messages sent successfully.
|
||||
pub sent: Counter,
|
||||
/// Messages received.
|
||||
pub received: Counter,
|
||||
/// Send failures.
|
||||
pub send_errors: Counter,
|
||||
/// Receive errors.
|
||||
pub recv_errors: Counter,
|
||||
/// Bytes sent.
|
||||
pub bytes_sent: Counter,
|
||||
/// Bytes received.
|
||||
pub bytes_received: Counter,
|
||||
/// Active connections (for connection-oriented transports).
|
||||
pub connections: Gauge,
|
||||
}
|
||||
|
||||
/// Per-peer metrics.
|
||||
#[derive(Debug)]
|
||||
pub struct PeerMetrics {
|
||||
/// Messages sent to this peer.
|
||||
pub messages_sent: Counter,
|
||||
/// Messages received from this peer.
|
||||
pub messages_received: Counter,
|
||||
/// Last seen timestamp.
|
||||
pub last_seen: RwLock<Option<Instant>>,
|
||||
/// Round-trip time samples.
|
||||
pub rtt_ms: Histogram,
|
||||
}
|
||||
|
||||
impl Default for PeerMetrics {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
messages_sent: Counter::new(),
|
||||
messages_received: Counter::new(),
|
||||
last_seen: RwLock::new(None),
|
||||
rtt_ms: Histogram::latency_ms(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl PeerMetrics {
|
||||
pub fn touch(&self) {
|
||||
if let Ok(mut last) = self.last_seen.write() {
|
||||
*last = Some(Instant::now());
|
||||
}
|
||||
}
|
||||
|
||||
pub fn age(&self) -> Option<Duration> {
|
||||
self.last_seen
|
||||
.read()
|
||||
.ok()
|
||||
.and_then(|t| t.map(|i| i.elapsed()))
|
||||
}
|
||||
}
|
||||
|
||||
/// Global mesh metrics.
|
||||
#[derive(Debug)]
|
||||
pub struct MeshMetrics {
|
||||
/// Transport metrics by name.
|
||||
pub transports: RwLock<HashMap<String, Arc<TransportMetrics>>>,
|
||||
/// Routing metrics.
|
||||
pub routing: RoutingMetrics,
|
||||
/// Store metrics.
|
||||
pub store: StoreMetrics,
|
||||
/// Crypto metrics.
|
||||
pub crypto: CryptoMetrics,
|
||||
/// Protocol metrics.
|
||||
pub protocol: ProtocolMetrics,
|
||||
/// Node start time.
|
||||
pub started_at: Instant,
|
||||
}
|
||||
|
||||
impl Default for MeshMetrics {
|
||||
fn default() -> Self {
|
||||
Self::new()
|
||||
}
|
||||
}
|
||||
|
||||
impl MeshMetrics {
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
transports: RwLock::new(HashMap::new()),
|
||||
routing: RoutingMetrics::default(),
|
||||
store: StoreMetrics::default(),
|
||||
crypto: CryptoMetrics::default(),
|
||||
protocol: ProtocolMetrics::default(),
|
||||
started_at: Instant::now(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Get or create transport metrics.
|
||||
pub fn transport(&self, name: &str) -> Arc<TransportMetrics> {
|
||||
{
|
||||
let map = self.transports.read().unwrap();
|
||||
if let Some(m) = map.get(name) {
|
||||
return Arc::clone(m);
|
||||
}
|
||||
}
|
||||
let mut map = self.transports.write().unwrap();
|
||||
map.entry(name.to_string())
|
||||
.or_insert_with(|| Arc::new(TransportMetrics::default()))
|
||||
.clone()
|
||||
}
|
||||
|
||||
/// Node uptime.
|
||||
pub fn uptime(&self) -> Duration {
|
||||
self.started_at.elapsed()
|
||||
}
|
||||
|
||||
/// Export metrics as a snapshot.
|
||||
pub fn snapshot(&self) -> MetricsSnapshot {
|
||||
let transports = self.transports.read().unwrap();
|
||||
let transport_snapshots: HashMap<String, TransportSnapshot> = transports
|
||||
.iter()
|
||||
.map(|(name, m)| {
|
||||
(
|
||||
name.clone(),
|
||||
TransportSnapshot {
|
||||
sent: m.sent.get(),
|
||||
received: m.received.get(),
|
||||
send_errors: m.send_errors.get(),
|
||||
bytes_sent: m.bytes_sent.get(),
|
||||
bytes_received: m.bytes_received.get(),
|
||||
connections: m.connections.get(),
|
||||
},
|
||||
)
|
||||
})
|
||||
.collect();
|
||||
|
||||
MetricsSnapshot {
|
||||
uptime_secs: self.uptime().as_secs(),
|
||||
transports: transport_snapshots,
|
||||
routing: RoutingSnapshot {
|
||||
table_size: self.routing.table_size.get(),
|
||||
lookups: self.routing.lookups.get(),
|
||||
lookup_misses: self.routing.lookup_misses.get(),
|
||||
announcements_processed: self.routing.announcements_processed.get(),
|
||||
},
|
||||
store: StoreSnapshot {
|
||||
messages_stored: self.store.messages_stored.get(),
|
||||
messages_delivered: self.store.messages_delivered.get(),
|
||||
messages_expired: self.store.messages_expired.get(),
|
||||
current_size: self.store.current_size.get(),
|
||||
},
|
||||
crypto: CryptoSnapshot {
|
||||
encryptions: self.crypto.encryptions.get(),
|
||||
decryptions: self.crypto.decryptions.get(),
|
||||
signature_verifications: self.crypto.signature_verifications.get(),
|
||||
signature_failures: self.crypto.signature_failures.get(),
|
||||
replay_detections: self.crypto.replay_detections.get(),
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Routing subsystem metrics.
|
||||
#[derive(Debug, Default)]
|
||||
pub struct RoutingMetrics {
|
||||
/// Current routing table size.
|
||||
pub table_size: Gauge,
|
||||
/// Route lookups.
|
||||
pub lookups: Counter,
|
||||
/// Route lookup misses.
|
||||
pub lookup_misses: Counter,
|
||||
/// Routes added.
|
||||
pub routes_added: Counter,
|
||||
/// Routes expired.
|
||||
pub routes_expired: Counter,
|
||||
/// Announcements processed.
|
||||
pub announcements_processed: Counter,
|
||||
/// Announcements forwarded.
|
||||
pub announcements_forwarded: Counter,
|
||||
/// Duplicate announcements dropped.
|
||||
pub duplicates_dropped: Counter,
|
||||
}
|
||||
|
||||
/// Store subsystem metrics.
|
||||
#[derive(Debug, Default)]
|
||||
pub struct StoreMetrics {
|
||||
/// Messages stored.
|
||||
pub messages_stored: Counter,
|
||||
/// Messages delivered.
|
||||
pub messages_delivered: Counter,
|
||||
/// Messages expired.
|
||||
pub messages_expired: Counter,
|
||||
/// Current store size.
|
||||
pub current_size: Gauge,
|
||||
/// Store capacity reached events.
|
||||
pub capacity_reached: Counter,
|
||||
}
|
||||
|
||||
/// Crypto subsystem metrics.
|
||||
#[derive(Debug)]
|
||||
pub struct CryptoMetrics {
|
||||
/// Successful encryptions.
|
||||
pub encryptions: Counter,
|
||||
/// Successful decryptions.
|
||||
pub decryptions: Counter,
|
||||
/// Decryption failures.
|
||||
pub decryption_failures: Counter,
|
||||
/// Signature verifications.
|
||||
pub signature_verifications: Counter,
|
||||
/// Signature failures.
|
||||
pub signature_failures: Counter,
|
||||
/// Replay attacks detected.
|
||||
pub replay_detections: Counter,
|
||||
/// Encryption latency.
|
||||
pub encrypt_latency: Histogram,
|
||||
}
|
||||
|
||||
impl Default for CryptoMetrics {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
encryptions: Counter::new(),
|
||||
decryptions: Counter::new(),
|
||||
decryption_failures: Counter::new(),
|
||||
signature_verifications: Counter::new(),
|
||||
signature_failures: Counter::new(),
|
||||
replay_detections: Counter::new(),
|
||||
encrypt_latency: Histogram::latency_ms(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Protocol metrics.
|
||||
#[derive(Debug, Default)]
|
||||
pub struct ProtocolMetrics {
|
||||
/// Messages parsed.
|
||||
pub messages_parsed: Counter,
|
||||
/// Parse errors.
|
||||
pub parse_errors: Counter,
|
||||
/// Unknown message types.
|
||||
pub unknown_types: Counter,
|
||||
/// Messages too large.
|
||||
pub oversized: Counter,
|
||||
}
|
||||
|
||||
/// Point-in-time snapshot of metrics.
|
||||
#[derive(Debug, Clone, serde::Serialize)]
|
||||
pub struct MetricsSnapshot {
|
||||
pub uptime_secs: u64,
|
||||
pub transports: HashMap<String, TransportSnapshot>,
|
||||
pub routing: RoutingSnapshot,
|
||||
pub store: StoreSnapshot,
|
||||
pub crypto: CryptoSnapshot,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, serde::Serialize)]
|
||||
pub struct TransportSnapshot {
|
||||
pub sent: u64,
|
||||
pub received: u64,
|
||||
pub send_errors: u64,
|
||||
pub bytes_sent: u64,
|
||||
pub bytes_received: u64,
|
||||
pub connections: u64,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, serde::Serialize)]
|
||||
pub struct RoutingSnapshot {
|
||||
pub table_size: u64,
|
||||
pub lookups: u64,
|
||||
pub lookup_misses: u64,
|
||||
pub announcements_processed: u64,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, serde::Serialize)]
|
||||
pub struct StoreSnapshot {
|
||||
pub messages_stored: u64,
|
||||
pub messages_delivered: u64,
|
||||
pub messages_expired: u64,
|
||||
pub current_size: u64,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, serde::Serialize)]
|
||||
pub struct CryptoSnapshot {
|
||||
pub encryptions: u64,
|
||||
pub decryptions: u64,
|
||||
pub signature_verifications: u64,
|
||||
pub signature_failures: u64,
|
||||
pub replay_detections: u64,
|
||||
}
|
||||
|
||||
/// Global metrics instance.
|
||||
static GLOBAL_METRICS: std::sync::OnceLock<Arc<MeshMetrics>> = std::sync::OnceLock::new();
|
||||
|
||||
/// Get the global metrics instance.
|
||||
pub fn metrics() -> &'static Arc<MeshMetrics> {
|
||||
GLOBAL_METRICS.get_or_init(|| Arc::new(MeshMetrics::new()))
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn counter_basics() {
|
||||
let c = Counter::new();
|
||||
assert_eq!(c.get(), 0);
|
||||
c.inc();
|
||||
assert_eq!(c.get(), 1);
|
||||
c.inc_by(5);
|
||||
assert_eq!(c.get(), 6);
|
||||
let old = c.reset();
|
||||
assert_eq!(old, 6);
|
||||
assert_eq!(c.get(), 0);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn gauge_basics() {
|
||||
let g = Gauge::new();
|
||||
assert_eq!(g.get(), 0);
|
||||
g.set(10);
|
||||
assert_eq!(g.get(), 10);
|
||||
g.inc();
|
||||
assert_eq!(g.get(), 11);
|
||||
g.dec();
|
||||
assert_eq!(g.get(), 10);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn histogram_basics() {
|
||||
let h = Histogram::new(vec![10, 50, 100]);
|
||||
h.observe(5);
|
||||
h.observe(25);
|
||||
h.observe(75);
|
||||
h.observe(200);
|
||||
|
||||
assert_eq!(h.count(), 4);
|
||||
assert_eq!(h.sum(), 5 + 25 + 75 + 200);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn transport_metrics() {
|
||||
let m = MeshMetrics::new();
|
||||
let tcp = m.transport("tcp");
|
||||
tcp.sent.inc();
|
||||
tcp.bytes_sent.inc_by(100);
|
||||
|
||||
assert_eq!(tcp.sent.get(), 1);
|
||||
assert_eq!(tcp.bytes_sent.get(), 100);
|
||||
|
||||
// Same name returns same instance
|
||||
let tcp2 = m.transport("tcp");
|
||||
assert_eq!(tcp2.sent.get(), 1);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn snapshot_serializes() {
|
||||
let m = MeshMetrics::new();
|
||||
m.transport("tcp").sent.inc();
|
||||
m.routing.lookups.inc_by(10);
|
||||
|
||||
let snapshot = m.snapshot();
|
||||
let json = serde_json::to_string(&snapshot).expect("serialize");
|
||||
assert!(json.contains("\"uptime_secs\":"));
|
||||
assert!(json.contains("\"lookups\":10"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn global_metrics() {
|
||||
let m = metrics();
|
||||
m.protocol.messages_parsed.inc();
|
||||
assert_eq!(metrics().protocol.messages_parsed.get(), 1);
|
||||
}
|
||||
}
|
||||
562
crates/quicprochat-p2p/src/mls_lite.rs
Normal file
562
crates/quicprochat-p2p/src/mls_lite.rs
Normal file
@@ -0,0 +1,562 @@
|
||||
//! MLS-Lite: Lightweight symmetric encryption for constrained mesh links.
|
||||
//!
|
||||
//! MLS-Lite provides group encryption without the overhead of full MLS:
|
||||
//! - Pre-shared group secret (exchanged out-of-band: QR code, NFC, voice)
|
||||
//! - ChaCha20-Poly1305 symmetric encryption (same as MLS application messages)
|
||||
//! - Per-message nonce derived from epoch + sequence
|
||||
//! - Replay protection via sequence numbers
|
||||
//! - Optional Ed25519 signatures for sender authentication
|
||||
//!
|
||||
//! # Security Properties
|
||||
//!
|
||||
//! - **Confidentiality**: ChaCha20-Poly1305 (256-bit key)
|
||||
//! - **Integrity**: Poly1305 MAC
|
||||
//! - **Replay protection**: Sequence numbers
|
||||
//! - **Sender authentication (optional)**: Ed25519 signatures
|
||||
//!
|
||||
//! # NOT Provided (vs full MLS)
|
||||
//!
|
||||
//! - Automatic post-compromise security (requires manual key rotation)
|
||||
//! - Automatic forward secrecy (only per-epoch, not per-message)
|
||||
//! - Key agreement (keys are pre-shared)
|
||||
//!
|
||||
//! # Wire Format
|
||||
//!
|
||||
//! See [`MlsLiteEnvelope`] for the compact envelope structure.
|
||||
|
||||
use chacha20poly1305::{
|
||||
aead::{Aead, KeyInit},
|
||||
ChaCha20Poly1305, Nonce,
|
||||
};
|
||||
use hkdf::Hkdf;
|
||||
use rand::RngCore;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use sha2::Sha256;
|
||||
use std::collections::HashMap;
|
||||
|
||||
use crate::address::MeshAddress;
|
||||
use crate::identity::MeshIdentity;
|
||||
|
||||
/// Maximum replay window size (track last N sequence numbers per sender).
|
||||
const REPLAY_WINDOW_SIZE: usize = 64;
|
||||
|
||||
/// MLS-Lite group state.
|
||||
pub struct MlsLiteGroup {
|
||||
/// 8-byte group identifier.
|
||||
group_id: [u8; 8],
|
||||
/// Current epoch (incremented on key rotation).
|
||||
epoch: u16,
|
||||
/// 32-byte symmetric encryption key (derived from group_secret + epoch).
|
||||
encryption_key: [u8; 32],
|
||||
/// 7-byte nonce prefix (derived from group_secret).
|
||||
nonce_prefix: [u8; 7],
|
||||
/// Next sequence number for sending.
|
||||
next_seq: u32,
|
||||
/// Replay protection: track seen (sender_addr, seq) pairs.
|
||||
replay_window: HashMap<MeshAddress, ReplayWindow>,
|
||||
}
|
||||
|
||||
/// Sliding window for replay detection.
|
||||
struct ReplayWindow {
|
||||
/// Highest sequence number seen.
|
||||
max_seq: u32,
|
||||
/// Bitmap of seen sequence numbers in window.
|
||||
seen: u64,
|
||||
}
|
||||
|
||||
impl ReplayWindow {
|
||||
fn new() -> Self {
|
||||
Self { max_seq: 0, seen: 0 }
|
||||
}
|
||||
|
||||
/// Check if sequence number is valid (not replayed).
|
||||
/// Returns true if valid, false if replayed or too old.
|
||||
fn check_and_update(&mut self, seq: u32) -> bool {
|
||||
if seq == 0 {
|
||||
// Seq 0 is always allowed once (first message)
|
||||
if self.max_seq == 0 && self.seen == 0 {
|
||||
self.seen = 1;
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
if seq > self.max_seq {
|
||||
// New highest sequence
|
||||
let shift = (seq - self.max_seq).min(64);
|
||||
self.seen = self.seen.checked_shl(shift as u32).unwrap_or(0);
|
||||
self.seen |= 1; // Mark current as seen
|
||||
self.max_seq = seq;
|
||||
true
|
||||
} else if self.max_seq - seq >= REPLAY_WINDOW_SIZE as u32 {
|
||||
// Too old
|
||||
false
|
||||
} else {
|
||||
// Within window — check bitmap
|
||||
let idx = (self.max_seq - seq) as u32;
|
||||
let bit = 1u64 << idx;
|
||||
if self.seen & bit != 0 {
|
||||
false // Already seen
|
||||
} else {
|
||||
self.seen |= bit;
|
||||
true
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Result of decryption.
|
||||
#[derive(Debug)]
|
||||
pub enum DecryptResult {
|
||||
/// Successfully decrypted plaintext.
|
||||
Success(Vec<u8>),
|
||||
/// Decryption failed (wrong key, corrupted, etc).
|
||||
DecryptionFailed,
|
||||
/// Replay detected (sequence number already seen).
|
||||
ReplayDetected,
|
||||
/// Signature verification failed.
|
||||
SignatureFailed,
|
||||
}
|
||||
|
||||
impl MlsLiteGroup {
|
||||
/// Create a new MLS-Lite group from a pre-shared secret.
|
||||
///
|
||||
/// The `group_secret` should be at least 32 bytes of high-entropy data.
|
||||
/// It can be:
|
||||
/// - Randomly generated and shared via QR code
|
||||
/// - Derived from a password via Argon2id
|
||||
/// - Exported from a full MLS group's epoch secret
|
||||
pub fn new(group_id: [u8; 8], group_secret: &[u8], epoch: u16) -> Self {
|
||||
let (encryption_key, nonce_prefix) = Self::derive_keys(group_secret, &group_id, epoch);
|
||||
|
||||
Self {
|
||||
group_id,
|
||||
epoch,
|
||||
encryption_key,
|
||||
nonce_prefix,
|
||||
next_seq: 0,
|
||||
replay_window: HashMap::new(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Derive encryption key and nonce prefix from group secret and epoch.
|
||||
fn derive_keys(group_secret: &[u8], group_id: &[u8; 8], epoch: u16) -> ([u8; 32], [u8; 7]) {
|
||||
let salt = b"quicprochat-mls-lite-v1";
|
||||
let hk = Hkdf::<Sha256>::new(Some(salt), group_secret);
|
||||
|
||||
// Include epoch in the info to get different keys per epoch
|
||||
let mut info = Vec::with_capacity(10);
|
||||
info.extend_from_slice(group_id);
|
||||
info.extend_from_slice(&epoch.to_be_bytes());
|
||||
|
||||
let mut okm = [0u8; 39]; // 32 bytes key + 7 bytes nonce prefix
|
||||
hk.expand(&info, &mut okm)
|
||||
.expect("HKDF expand should not fail with valid length");
|
||||
|
||||
let mut key = [0u8; 32];
|
||||
let mut prefix = [0u8; 7];
|
||||
key.copy_from_slice(&okm[..32]);
|
||||
prefix.copy_from_slice(&okm[32..39]);
|
||||
|
||||
(key, prefix)
|
||||
}
|
||||
|
||||
/// Rotate to a new epoch with a new group secret.
|
||||
pub fn rotate(&mut self, new_secret: &[u8], new_epoch: u16) {
|
||||
let (key, prefix) = Self::derive_keys(new_secret, &self.group_id, new_epoch);
|
||||
self.encryption_key = key;
|
||||
self.nonce_prefix = prefix;
|
||||
self.epoch = new_epoch;
|
||||
self.next_seq = 0;
|
||||
self.replay_window.clear();
|
||||
}
|
||||
|
||||
/// Encrypt a plaintext payload.
|
||||
///
|
||||
/// Returns `(ciphertext, nonce_suffix, seq)`.
|
||||
/// The ciphertext includes the 16-byte Poly1305 tag.
|
||||
pub fn encrypt(&mut self, plaintext: &[u8]) -> anyhow::Result<(Vec<u8>, [u8; 5], u32)> {
|
||||
let seq = self.next_seq;
|
||||
self.next_seq = self.next_seq.wrapping_add(1);
|
||||
|
||||
// Build nonce: 7-byte prefix + 5-byte suffix (1 byte random + 4 byte seq)
|
||||
let mut nonce_suffix = [0u8; 5];
|
||||
rand::thread_rng().fill_bytes(&mut nonce_suffix[..1]);
|
||||
nonce_suffix[1..].copy_from_slice(&seq.to_be_bytes());
|
||||
|
||||
let mut nonce_bytes = [0u8; 12];
|
||||
nonce_bytes[..7].copy_from_slice(&self.nonce_prefix);
|
||||
nonce_bytes[7..].copy_from_slice(&nonce_suffix);
|
||||
let nonce = Nonce::from_slice(&nonce_bytes);
|
||||
|
||||
let cipher = ChaCha20Poly1305::new_from_slice(&self.encryption_key)
|
||||
.expect("key length is 32 bytes");
|
||||
|
||||
let ciphertext = cipher
|
||||
.encrypt(nonce, plaintext)
|
||||
.map_err(|e| anyhow::anyhow!("encryption failed: {e}"))?;
|
||||
|
||||
Ok((ciphertext, nonce_suffix, seq))
|
||||
}
|
||||
|
||||
/// Decrypt a ciphertext.
|
||||
///
|
||||
/// `sender_addr` is used for replay detection.
|
||||
pub fn decrypt(
|
||||
&mut self,
|
||||
ciphertext: &[u8],
|
||||
nonce_suffix: &[u8; 5],
|
||||
sender_addr: MeshAddress,
|
||||
) -> DecryptResult {
|
||||
// Extract sequence number from nonce suffix
|
||||
let seq = u32::from_be_bytes([
|
||||
nonce_suffix[1],
|
||||
nonce_suffix[2],
|
||||
nonce_suffix[3],
|
||||
nonce_suffix[4],
|
||||
]);
|
||||
|
||||
// Replay check
|
||||
let window = self.replay_window.entry(sender_addr).or_insert_with(ReplayWindow::new);
|
||||
if !window.check_and_update(seq) {
|
||||
return DecryptResult::ReplayDetected;
|
||||
}
|
||||
|
||||
// Build nonce
|
||||
let mut nonce_bytes = [0u8; 12];
|
||||
nonce_bytes[..7].copy_from_slice(&self.nonce_prefix);
|
||||
nonce_bytes[7..].copy_from_slice(nonce_suffix);
|
||||
let nonce = Nonce::from_slice(&nonce_bytes);
|
||||
|
||||
let cipher = ChaCha20Poly1305::new_from_slice(&self.encryption_key)
|
||||
.expect("key length is 32 bytes");
|
||||
|
||||
match cipher.decrypt(nonce, ciphertext) {
|
||||
Ok(plaintext) => DecryptResult::Success(plaintext),
|
||||
Err(_) => DecryptResult::DecryptionFailed,
|
||||
}
|
||||
}
|
||||
|
||||
/// Current epoch.
|
||||
pub fn epoch(&self) -> u16 {
|
||||
self.epoch
|
||||
}
|
||||
|
||||
/// Group ID.
|
||||
pub fn group_id(&self) -> &[u8; 8] {
|
||||
&self.group_id
|
||||
}
|
||||
}
|
||||
|
||||
/// Compact MLS-Lite envelope for constrained links.
|
||||
///
|
||||
/// # Wire overhead (approximate)
|
||||
///
|
||||
/// - Version: 1 byte
|
||||
/// - Flags: 1 byte
|
||||
/// - Group ID: 8 bytes
|
||||
/// - Sender addr: 4 bytes (truncated further for constrained)
|
||||
/// - Seq: 4 bytes
|
||||
/// - Epoch: 2 bytes
|
||||
/// - Nonce suffix: 5 bytes
|
||||
/// - Ciphertext: variable (payload + 16 byte tag)
|
||||
/// - Signature (optional): 64 bytes
|
||||
///
|
||||
/// **Minimum overhead without signature: ~41 bytes**
|
||||
/// **Minimum overhead with signature: ~105 bytes**
|
||||
#[derive(Clone, Debug, Serialize, Deserialize)]
|
||||
pub struct MlsLiteEnvelope {
|
||||
/// Format version (0x03 for MLS-Lite).
|
||||
pub version: u8,
|
||||
/// Flags: bit 0 = has_signature, bits 1-2 = priority.
|
||||
pub flags: u8,
|
||||
/// 8-byte group identifier.
|
||||
pub group_id: [u8; 8],
|
||||
/// 4-byte truncated sender address (first 4 bytes of MeshAddress).
|
||||
pub sender_addr: [u8; 4],
|
||||
/// Sequence number.
|
||||
pub seq: u32,
|
||||
/// Key epoch.
|
||||
pub epoch: u16,
|
||||
/// 5-byte nonce suffix.
|
||||
pub nonce: [u8; 5],
|
||||
/// Encrypted payload (includes 16-byte Poly1305 tag).
|
||||
pub ciphertext: Vec<u8>,
|
||||
/// Optional Ed25519 signature (64 bytes, stored as Vec for serde).
|
||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||
pub signature: Option<Vec<u8>>,
|
||||
}
|
||||
|
||||
/// MLS-Lite envelope version byte.
|
||||
const MLS_LITE_VERSION: u8 = 0x03;
|
||||
|
||||
impl MlsLiteEnvelope {
|
||||
/// Create a new MLS-Lite envelope (without signature).
|
||||
pub fn new(
|
||||
identity: &MeshIdentity,
|
||||
group: &mut MlsLiteGroup,
|
||||
plaintext: &[u8],
|
||||
sign: bool,
|
||||
) -> anyhow::Result<Self> {
|
||||
let (ciphertext, nonce, seq) = group.encrypt(plaintext)?;
|
||||
|
||||
let sender_full = MeshAddress::from_public_key(&identity.public_key());
|
||||
let mut sender_addr = [0u8; 4];
|
||||
sender_addr.copy_from_slice(&sender_full.as_bytes()[..4]);
|
||||
|
||||
let flags = if sign { 0x01 } else { 0x00 };
|
||||
|
||||
let mut envelope = Self {
|
||||
version: MLS_LITE_VERSION,
|
||||
flags,
|
||||
group_id: *group.group_id(),
|
||||
sender_addr,
|
||||
seq,
|
||||
epoch: group.epoch(),
|
||||
nonce,
|
||||
ciphertext,
|
||||
signature: None,
|
||||
};
|
||||
|
||||
if sign {
|
||||
let signable = envelope.signable_bytes();
|
||||
let sig = identity.sign(&signable);
|
||||
envelope.signature = Some(sig.to_vec());
|
||||
}
|
||||
|
||||
Ok(envelope)
|
||||
}
|
||||
|
||||
/// Bytes to sign (everything except signature).
|
||||
fn signable_bytes(&self) -> Vec<u8> {
|
||||
let mut buf = Vec::with_capacity(32 + self.ciphertext.len());
|
||||
buf.push(self.version);
|
||||
buf.push(self.flags);
|
||||
buf.extend_from_slice(&self.group_id);
|
||||
buf.extend_from_slice(&self.sender_addr);
|
||||
buf.extend_from_slice(&self.seq.to_le_bytes());
|
||||
buf.extend_from_slice(&self.epoch.to_le_bytes());
|
||||
buf.extend_from_slice(&self.nonce);
|
||||
buf.extend_from_slice(&self.ciphertext);
|
||||
buf
|
||||
}
|
||||
|
||||
/// Verify signature (if present) using sender's full public key.
|
||||
pub fn verify_signature(&self, sender_public_key: &[u8; 32]) -> bool {
|
||||
match &self.signature {
|
||||
None => true, // No signature to verify
|
||||
Some(sig_vec) => {
|
||||
// Signature must be exactly 64 bytes
|
||||
let sig: [u8; 64] = match sig_vec.as_slice().try_into() {
|
||||
Ok(s) => s,
|
||||
Err(_) => return false,
|
||||
};
|
||||
let signable = self.signable_bytes();
|
||||
quicprochat_core::IdentityKeypair::verify_raw(sender_public_key, &signable, &sig)
|
||||
.is_ok()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Whether this envelope has a signature.
|
||||
pub fn has_signature(&self) -> bool {
|
||||
self.flags & 0x01 != 0
|
||||
}
|
||||
|
||||
/// Serialize to CBOR.
|
||||
pub fn to_wire(&self) -> Vec<u8> {
|
||||
let mut buf = Vec::new();
|
||||
ciborium::into_writer(self, &mut buf).expect("CBOR serialization should not fail");
|
||||
buf
|
||||
}
|
||||
|
||||
/// Deserialize from CBOR.
|
||||
pub fn from_wire(bytes: &[u8]) -> anyhow::Result<Self> {
|
||||
let env: Self = ciborium::from_reader(bytes)?;
|
||||
if env.version != MLS_LITE_VERSION {
|
||||
anyhow::bail!("unexpected MLS-Lite version: {}", env.version);
|
||||
}
|
||||
Ok(env)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
fn test_identity() -> MeshIdentity {
|
||||
MeshIdentity::generate()
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn encrypt_decrypt_roundtrip() {
|
||||
let secret = b"super secret group key material!";
|
||||
let group_id = [0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08];
|
||||
|
||||
let mut alice_group = MlsLiteGroup::new(group_id, secret, 0);
|
||||
let mut bob_group = MlsLiteGroup::new(group_id, secret, 0);
|
||||
|
||||
let plaintext = b"hello from alice";
|
||||
let (ciphertext, nonce, _seq) = alice_group.encrypt(plaintext).expect("encrypt");
|
||||
|
||||
let alice_addr = MeshAddress::from_bytes([0xAA; 16]);
|
||||
match bob_group.decrypt(&ciphertext, &nonce, alice_addr) {
|
||||
DecryptResult::Success(pt) => assert_eq!(pt, plaintext),
|
||||
other => panic!("expected Success, got {other:?}"),
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn replay_detection() {
|
||||
let secret = b"replay test key material here!!!";
|
||||
let group_id = [0x11; 8];
|
||||
|
||||
let mut alice_group = MlsLiteGroup::new(group_id, secret, 0);
|
||||
let mut bob_group = MlsLiteGroup::new(group_id, secret, 0);
|
||||
|
||||
let (ciphertext, nonce, _seq) = alice_group.encrypt(b"msg1").expect("encrypt");
|
||||
let alice_addr = MeshAddress::from_bytes([0xAA; 16]);
|
||||
|
||||
// First decrypt succeeds
|
||||
match bob_group.decrypt(&ciphertext, &nonce, alice_addr) {
|
||||
DecryptResult::Success(_) => {}
|
||||
other => panic!("first decrypt should succeed, got {other:?}"),
|
||||
}
|
||||
|
||||
// Replay attempt fails
|
||||
match bob_group.decrypt(&ciphertext, &nonce, alice_addr) {
|
||||
DecryptResult::ReplayDetected => {}
|
||||
other => panic!("replay should be detected, got {other:?}"),
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn different_epochs_different_keys() {
|
||||
let secret = b"epoch rotation test material!!!";
|
||||
let group_id = [0x22; 8];
|
||||
|
||||
let mut group_e0 = MlsLiteGroup::new(group_id, secret, 0);
|
||||
let mut group_e1 = MlsLiteGroup::new(group_id, secret, 1);
|
||||
|
||||
let (ciphertext_e0, nonce_e0, _) = group_e0.encrypt(b"epoch 0").expect("encrypt");
|
||||
|
||||
// Decrypt with wrong epoch should fail
|
||||
let sender = MeshAddress::from_bytes([0xBB; 16]);
|
||||
match group_e1.decrypt(&ciphertext_e0, &nonce_e0, sender) {
|
||||
DecryptResult::DecryptionFailed => {}
|
||||
other => panic!("wrong epoch should fail decryption, got {other:?}"),
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn envelope_with_signature() {
|
||||
let id = test_identity();
|
||||
let secret = b"envelope signature test material";
|
||||
let group_id = [0x33; 8];
|
||||
|
||||
let mut group = MlsLiteGroup::new(group_id, secret, 0);
|
||||
|
||||
let envelope = MlsLiteEnvelope::new(&id, &mut group, b"signed message", true)
|
||||
.expect("create envelope");
|
||||
|
||||
assert!(envelope.has_signature());
|
||||
assert!(envelope.verify_signature(&id.public_key()));
|
||||
|
||||
// Wrong key should fail
|
||||
let wrong_key = [0x42u8; 32];
|
||||
assert!(!envelope.verify_signature(&wrong_key));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn envelope_without_signature() {
|
||||
let id = test_identity();
|
||||
let secret = b"unsigned envelope test material!";
|
||||
let group_id = [0x44; 8];
|
||||
|
||||
let mut group = MlsLiteGroup::new(group_id, secret, 0);
|
||||
|
||||
let envelope = MlsLiteEnvelope::new(&id, &mut group, b"no sig", false)
|
||||
.expect("create envelope");
|
||||
|
||||
assert!(!envelope.has_signature());
|
||||
assert!(envelope.signature.is_none());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn envelope_cbor_roundtrip() {
|
||||
let id = test_identity();
|
||||
let secret = b"cbor roundtrip test material!!!!";
|
||||
let group_id = [0x55; 8];
|
||||
|
||||
let mut group = MlsLiteGroup::new(group_id, secret, 0);
|
||||
|
||||
let envelope = MlsLiteEnvelope::new(&id, &mut group, b"roundtrip", true)
|
||||
.expect("create envelope");
|
||||
|
||||
let wire = envelope.to_wire();
|
||||
let restored = MlsLiteEnvelope::from_wire(&wire).expect("deserialize");
|
||||
|
||||
assert_eq!(envelope.version, restored.version);
|
||||
assert_eq!(envelope.flags, restored.flags);
|
||||
assert_eq!(envelope.group_id, restored.group_id);
|
||||
assert_eq!(envelope.sender_addr, restored.sender_addr);
|
||||
assert_eq!(envelope.seq, restored.seq);
|
||||
assert_eq!(envelope.epoch, restored.epoch);
|
||||
assert_eq!(envelope.nonce, restored.nonce);
|
||||
assert_eq!(envelope.ciphertext, restored.ciphertext);
|
||||
assert_eq!(envelope.signature, restored.signature);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn measure_mls_lite_overhead() {
|
||||
let id = test_identity();
|
||||
let secret = b"overhead measurement test secret";
|
||||
let group_id = [0x66; 8];
|
||||
|
||||
let mut group = MlsLiteGroup::new(group_id, secret, 0);
|
||||
|
||||
println!("=== MLS-Lite Wire Overhead (CBOR) ===");
|
||||
|
||||
// Without signature
|
||||
let env_no_sig = MlsLiteEnvelope::new(&id, &mut group, b"", false)
|
||||
.expect("create");
|
||||
let wire_no_sig = env_no_sig.to_wire();
|
||||
// Overhead = wire - payload - 16 byte tag
|
||||
let overhead_no_sig = wire_no_sig.len() - 16; // tag is in ciphertext
|
||||
println!("No signature, 0B payload: {} bytes (overhead: {})", wire_no_sig.len(), overhead_no_sig);
|
||||
|
||||
// With signature
|
||||
let env_sig = MlsLiteEnvelope::new(&id, &mut group, b"", true)
|
||||
.expect("create");
|
||||
let wire_sig = env_sig.to_wire();
|
||||
let overhead_sig = wire_sig.len() - 16;
|
||||
println!("With signature, 0B payload: {} bytes (overhead: {})", wire_sig.len(), overhead_sig);
|
||||
|
||||
// 10-byte payload without sig
|
||||
let env_10 = MlsLiteEnvelope::new(&id, &mut group, b"hello mesh", false)
|
||||
.expect("create");
|
||||
let wire_10 = env_10.to_wire();
|
||||
println!("No signature, 10B payload: {} bytes", wire_10.len());
|
||||
|
||||
// Compare to MeshEnvelope V1
|
||||
let v1_env = crate::envelope::MeshEnvelope::new(
|
||||
&id,
|
||||
&[0x77; 32],
|
||||
b"hello mesh".to_vec(),
|
||||
3600,
|
||||
5,
|
||||
);
|
||||
let v1_wire = v1_env.to_wire();
|
||||
println!("MeshEnvelope V1, 10B payload: {} bytes", v1_wire.len());
|
||||
println!("MLS-Lite savings (no sig): {} bytes", v1_wire.len() as i32 - wire_10.len() as i32);
|
||||
|
||||
// MLS-Lite overhead is higher than raw struct due to CBOR encoding
|
||||
// but still much less than full MLS or MeshEnvelope
|
||||
assert!(overhead_no_sig < 150, "MLS-Lite overhead without sig should be under 150 bytes");
|
||||
assert!(overhead_sig < 300, "MLS-Lite overhead with sig should be under 300 bytes");
|
||||
// Key assertion: MLS-Lite should be significantly smaller than V1
|
||||
assert!(
|
||||
wire_10.len() < v1_wire.len() / 2,
|
||||
"MLS-Lite should be at least 2x smaller than MeshEnvelope V1"
|
||||
);
|
||||
}
|
||||
}
|
||||
693
crates/quicprochat-p2p/src/persistence.rs
Normal file
693
crates/quicprochat-p2p/src/persistence.rs
Normal file
@@ -0,0 +1,693 @@
|
||||
//! Persistence layer for mesh node state.
|
||||
//!
|
||||
//! This module provides durable storage for:
|
||||
//! - Routing table entries
|
||||
//! - KeyPackage cache
|
||||
//! - Stored messages (store-and-forward)
|
||||
//! - Node identity
|
||||
//!
|
||||
//! Uses a simple append-only log format with periodic compaction.
|
||||
|
||||
use std::collections::HashMap;
|
||||
use std::fs::{self, File, OpenOptions};
|
||||
use std::io::{self, BufRead, BufReader, BufWriter, Write};
|
||||
use std::path::{Path, PathBuf};
|
||||
use std::time::{Duration, SystemTime, UNIX_EPOCH};
|
||||
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use crate::address::MeshAddress;
|
||||
use crate::error::{MeshResult, StoreError};
|
||||
|
||||
/// Storage entry types.
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
#[serde(tag = "type")]
|
||||
pub enum StorageEntry {
|
||||
/// Routing table entry.
|
||||
Route {
|
||||
address: [u8; 16],
|
||||
next_hop: String,
|
||||
hops: u8,
|
||||
sequence: u32,
|
||||
expires_at: u64,
|
||||
},
|
||||
/// Remove a route.
|
||||
RouteRemove { address: [u8; 16] },
|
||||
/// KeyPackage cache entry.
|
||||
KeyPackage {
|
||||
address: [u8; 16],
|
||||
data: Vec<u8>,
|
||||
hash: [u8; 8],
|
||||
expires_at: u64,
|
||||
},
|
||||
/// Remove a KeyPackage.
|
||||
KeyPackageRemove { address: [u8; 16], hash: [u8; 8] },
|
||||
/// Stored message.
|
||||
Message {
|
||||
id: Vec<u8>,
|
||||
recipient: [u8; 16],
|
||||
data: Vec<u8>,
|
||||
expires_at: u64,
|
||||
},
|
||||
/// Remove a message.
|
||||
MessageRemove { id: Vec<u8> },
|
||||
/// Identity keypair (encrypted or raw for development).
|
||||
Identity {
|
||||
public_key: Vec<u8>,
|
||||
secret_key_encrypted: Vec<u8>,
|
||||
},
|
||||
}
|
||||
|
||||
/// Append-only log for persistence.
|
||||
pub struct AppendLog {
|
||||
path: PathBuf,
|
||||
writer: Option<BufWriter<File>>,
|
||||
entries_since_compact: usize,
|
||||
compact_threshold: usize,
|
||||
}
|
||||
|
||||
impl AppendLog {
|
||||
/// Open or create a log file.
|
||||
pub fn open(path: impl AsRef<Path>) -> MeshResult<Self> {
|
||||
let path = path.as_ref().to_path_buf();
|
||||
|
||||
if let Some(parent) = path.parent() {
|
||||
fs::create_dir_all(parent).map_err(|e| {
|
||||
StoreError::Persistence(format!("failed to create directory: {}", e))
|
||||
})?;
|
||||
}
|
||||
|
||||
let file = OpenOptions::new()
|
||||
.create(true)
|
||||
.append(true)
|
||||
.open(&path)
|
||||
.map_err(|e| StoreError::Persistence(format!("failed to open log: {}", e)))?;
|
||||
|
||||
Ok(Self {
|
||||
path,
|
||||
writer: Some(BufWriter::new(file)),
|
||||
entries_since_compact: 0,
|
||||
compact_threshold: 10_000,
|
||||
})
|
||||
}
|
||||
|
||||
/// Append an entry to the log.
|
||||
pub fn append(&mut self, entry: &StorageEntry) -> MeshResult<()> {
|
||||
let writer = self.writer.as_mut().ok_or_else(|| {
|
||||
StoreError::Persistence("log not open".to_string())
|
||||
})?;
|
||||
|
||||
let json = serde_json::to_string(entry).map_err(|e| {
|
||||
StoreError::Serialization(format!("failed to serialize entry: {}", e))
|
||||
})?;
|
||||
|
||||
writeln!(writer, "{}", json).map_err(|e| {
|
||||
StoreError::Persistence(format!("failed to write entry: {}", e))
|
||||
})?;
|
||||
|
||||
writer.flush().map_err(|e| {
|
||||
StoreError::Persistence(format!("failed to flush: {}", e))
|
||||
})?;
|
||||
|
||||
self.entries_since_compact += 1;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Read all entries from the log.
|
||||
pub fn read_all(&self) -> MeshResult<Vec<StorageEntry>> {
|
||||
let file = File::open(&self.path).map_err(|e| {
|
||||
if e.kind() == io::ErrorKind::NotFound {
|
||||
return StoreError::NotFound(self.path.display().to_string());
|
||||
}
|
||||
StoreError::Persistence(format!("failed to open log: {}", e))
|
||||
})?;
|
||||
|
||||
let reader = BufReader::new(file);
|
||||
let mut entries = Vec::new();
|
||||
|
||||
for line in reader.lines() {
|
||||
let line = line.map_err(|e| {
|
||||
StoreError::Persistence(format!("failed to read line: {}", e))
|
||||
})?;
|
||||
|
||||
if line.trim().is_empty() {
|
||||
continue;
|
||||
}
|
||||
|
||||
let entry: StorageEntry = serde_json::from_str(&line).map_err(|e| {
|
||||
StoreError::Serialization(format!("failed to parse entry: {}", e))
|
||||
})?;
|
||||
|
||||
entries.push(entry);
|
||||
}
|
||||
|
||||
Ok(entries)
|
||||
}
|
||||
|
||||
/// Check if compaction is needed.
|
||||
pub fn needs_compaction(&self) -> bool {
|
||||
self.entries_since_compact >= self.compact_threshold
|
||||
}
|
||||
|
||||
/// Compact the log by replaying and removing deleted entries.
|
||||
pub fn compact(&mut self) -> MeshResult<CompactStats> {
|
||||
let entries = self.read_all()?;
|
||||
|
||||
// Build current state by replaying log
|
||||
let mut routes: HashMap<[u8; 16], StorageEntry> = HashMap::new();
|
||||
let mut keypackages: HashMap<([u8; 16], [u8; 8]), StorageEntry> = HashMap::new();
|
||||
let mut messages: HashMap<Vec<u8>, StorageEntry> = HashMap::new();
|
||||
let mut identity: Option<StorageEntry> = None;
|
||||
|
||||
let now = now_secs();
|
||||
|
||||
for entry in entries {
|
||||
match &entry {
|
||||
StorageEntry::Route { address, expires_at, .. } => {
|
||||
if *expires_at > now {
|
||||
routes.insert(*address, entry);
|
||||
}
|
||||
}
|
||||
StorageEntry::RouteRemove { address } => {
|
||||
routes.remove(address);
|
||||
}
|
||||
StorageEntry::KeyPackage { address, hash, expires_at, .. } => {
|
||||
if *expires_at > now {
|
||||
keypackages.insert((*address, *hash), entry);
|
||||
}
|
||||
}
|
||||
StorageEntry::KeyPackageRemove { address, hash } => {
|
||||
keypackages.remove(&(*address, *hash));
|
||||
}
|
||||
StorageEntry::Message { id, expires_at, .. } => {
|
||||
if *expires_at > now {
|
||||
messages.insert(id.clone(), entry);
|
||||
}
|
||||
}
|
||||
StorageEntry::MessageRemove { id } => {
|
||||
messages.remove(id);
|
||||
}
|
||||
StorageEntry::Identity { .. } => {
|
||||
identity = Some(entry);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Write compacted log
|
||||
let tmp_path = self.path.with_extension("tmp");
|
||||
let mut tmp_file = File::create(&tmp_path).map_err(|e| {
|
||||
StoreError::Persistence(format!("failed to create temp file: {}", e))
|
||||
})?;
|
||||
|
||||
let mut written = 0;
|
||||
|
||||
if let Some(id) = identity {
|
||||
let json = serde_json::to_string(&id).map_err(|e| {
|
||||
StoreError::Serialization(e.to_string())
|
||||
})?;
|
||||
writeln!(tmp_file, "{}", json).map_err(|e| {
|
||||
StoreError::Persistence(e.to_string())
|
||||
})?;
|
||||
written += 1;
|
||||
}
|
||||
|
||||
for entry in routes.into_values() {
|
||||
let json = serde_json::to_string(&entry).map_err(|e| {
|
||||
StoreError::Serialization(e.to_string())
|
||||
})?;
|
||||
writeln!(tmp_file, "{}", json).map_err(|e| {
|
||||
StoreError::Persistence(e.to_string())
|
||||
})?;
|
||||
written += 1;
|
||||
}
|
||||
|
||||
for entry in keypackages.into_values() {
|
||||
let json = serde_json::to_string(&entry).map_err(|e| {
|
||||
StoreError::Serialization(e.to_string())
|
||||
})?;
|
||||
writeln!(tmp_file, "{}", json).map_err(|e| {
|
||||
StoreError::Persistence(e.to_string())
|
||||
})?;
|
||||
written += 1;
|
||||
}
|
||||
|
||||
for entry in messages.into_values() {
|
||||
let json = serde_json::to_string(&entry).map_err(|e| {
|
||||
StoreError::Serialization(e.to_string())
|
||||
})?;
|
||||
writeln!(tmp_file, "{}", json).map_err(|e| {
|
||||
StoreError::Persistence(e.to_string())
|
||||
})?;
|
||||
written += 1;
|
||||
}
|
||||
|
||||
tmp_file.sync_all().map_err(|e| {
|
||||
StoreError::Persistence(format!("failed to sync: {}", e))
|
||||
})?;
|
||||
drop(tmp_file);
|
||||
|
||||
// Close current writer
|
||||
self.writer = None;
|
||||
|
||||
// Replace old log with compacted one
|
||||
fs::rename(&tmp_path, &self.path).map_err(|e| {
|
||||
StoreError::Persistence(format!("failed to rename: {}", e))
|
||||
})?;
|
||||
|
||||
// Reopen
|
||||
let file = OpenOptions::new()
|
||||
.create(true)
|
||||
.append(true)
|
||||
.open(&self.path)
|
||||
.map_err(|e| StoreError::Persistence(format!("failed to reopen: {}", e)))?;
|
||||
|
||||
self.writer = Some(BufWriter::new(file));
|
||||
self.entries_since_compact = 0;
|
||||
|
||||
Ok(CompactStats {
|
||||
entries_before: self.entries_since_compact,
|
||||
entries_after: written,
|
||||
})
|
||||
}
|
||||
|
||||
/// Sync to disk.
|
||||
pub fn sync(&mut self) -> MeshResult<()> {
|
||||
if let Some(writer) = self.writer.as_mut() {
|
||||
writer.flush().map_err(|e| {
|
||||
StoreError::Persistence(format!("flush failed: {}", e))
|
||||
})?;
|
||||
writer.get_ref().sync_all().map_err(|e| {
|
||||
StoreError::Persistence(format!("sync failed: {}", e))
|
||||
})?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
/// Compaction statistics.
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct CompactStats {
|
||||
pub entries_before: usize,
|
||||
pub entries_after: usize,
|
||||
}
|
||||
|
||||
/// Persistent routing table storage.
|
||||
pub struct PersistentRoutingTable {
|
||||
log: AppendLog,
|
||||
routes: HashMap<MeshAddress, RouteEntry>,
|
||||
}
|
||||
|
||||
/// In-memory route entry.
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct RouteEntry {
|
||||
pub next_hop: String,
|
||||
pub hops: u8,
|
||||
pub sequence: u32,
|
||||
pub expires_at: u64,
|
||||
}
|
||||
|
||||
impl PersistentRoutingTable {
|
||||
/// Open or create a persistent routing table.
|
||||
pub fn open(path: impl AsRef<Path>) -> MeshResult<Self> {
|
||||
let mut log = AppendLog::open(path)?;
|
||||
let mut routes = HashMap::new();
|
||||
|
||||
let now = now_secs();
|
||||
|
||||
for entry in log.read_all().unwrap_or_default() {
|
||||
if let StorageEntry::Route { address, next_hop, hops, sequence, expires_at } = entry {
|
||||
if expires_at > now {
|
||||
routes.insert(
|
||||
MeshAddress::from_bytes(address),
|
||||
RouteEntry { next_hop, hops, sequence, expires_at },
|
||||
);
|
||||
}
|
||||
} else if let StorageEntry::RouteRemove { address } = entry {
|
||||
routes.remove(&MeshAddress::from_bytes(address));
|
||||
}
|
||||
}
|
||||
|
||||
Ok(Self { log, routes })
|
||||
}
|
||||
|
||||
/// Insert or update a route.
|
||||
pub fn insert(
|
||||
&mut self,
|
||||
address: MeshAddress,
|
||||
next_hop: String,
|
||||
hops: u8,
|
||||
sequence: u32,
|
||||
ttl: Duration,
|
||||
) -> MeshResult<()> {
|
||||
let expires_at = now_secs() + ttl.as_secs();
|
||||
|
||||
self.log.append(&StorageEntry::Route {
|
||||
address: *address.as_bytes(),
|
||||
next_hop: next_hop.clone(),
|
||||
hops,
|
||||
sequence,
|
||||
expires_at,
|
||||
})?;
|
||||
|
||||
self.routes.insert(address, RouteEntry {
|
||||
next_hop,
|
||||
hops,
|
||||
sequence,
|
||||
expires_at,
|
||||
});
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Look up a route.
|
||||
pub fn get(&self, address: &MeshAddress) -> Option<&RouteEntry> {
|
||||
let entry = self.routes.get(address)?;
|
||||
if entry.expires_at > now_secs() {
|
||||
Some(entry)
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
/// Remove a route.
|
||||
pub fn remove(&mut self, address: &MeshAddress) -> MeshResult<bool> {
|
||||
if self.routes.remove(address).is_some() {
|
||||
self.log.append(&StorageEntry::RouteRemove {
|
||||
address: *address.as_bytes(),
|
||||
})?;
|
||||
Ok(true)
|
||||
} else {
|
||||
Ok(false)
|
||||
}
|
||||
}
|
||||
|
||||
/// Number of routes.
|
||||
pub fn len(&self) -> usize {
|
||||
self.routes.len()
|
||||
}
|
||||
|
||||
/// Check if empty.
|
||||
pub fn is_empty(&self) -> bool {
|
||||
self.routes.is_empty()
|
||||
}
|
||||
|
||||
/// Garbage collect expired routes.
|
||||
pub fn gc(&mut self) -> MeshResult<usize> {
|
||||
let now = now_secs();
|
||||
let expired: Vec<_> = self.routes
|
||||
.iter()
|
||||
.filter(|(_, e)| e.expires_at <= now)
|
||||
.map(|(a, _)| *a)
|
||||
.collect();
|
||||
|
||||
let count = expired.len();
|
||||
for addr in expired {
|
||||
self.remove(&addr)?;
|
||||
}
|
||||
Ok(count)
|
||||
}
|
||||
|
||||
/// Compact the underlying log.
|
||||
pub fn compact(&mut self) -> MeshResult<CompactStats> {
|
||||
self.log.compact()
|
||||
}
|
||||
|
||||
/// Sync to disk.
|
||||
pub fn sync(&mut self) -> MeshResult<()> {
|
||||
self.log.sync()
|
||||
}
|
||||
}
|
||||
|
||||
/// Persistent message store.
|
||||
pub struct PersistentMessageStore {
|
||||
log: AppendLog,
|
||||
messages: HashMap<Vec<u8>, MessageEntry>,
|
||||
by_recipient: HashMap<MeshAddress, Vec<Vec<u8>>>,
|
||||
}
|
||||
|
||||
/// In-memory message entry.
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct MessageEntry {
|
||||
pub recipient: MeshAddress,
|
||||
pub data: Vec<u8>,
|
||||
pub expires_at: u64,
|
||||
}
|
||||
|
||||
impl PersistentMessageStore {
|
||||
/// Open or create a persistent message store.
|
||||
pub fn open(path: impl AsRef<Path>) -> MeshResult<Self> {
|
||||
let mut log = AppendLog::open(path)?;
|
||||
let mut messages = HashMap::new();
|
||||
let mut by_recipient: HashMap<MeshAddress, Vec<Vec<u8>>> = HashMap::new();
|
||||
|
||||
let now = now_secs();
|
||||
|
||||
for entry in log.read_all().unwrap_or_default() {
|
||||
if let StorageEntry::Message { id, recipient, data, expires_at } = entry {
|
||||
if expires_at > now {
|
||||
let addr = MeshAddress::from_bytes(recipient);
|
||||
messages.insert(id.clone(), MessageEntry {
|
||||
recipient: addr,
|
||||
data,
|
||||
expires_at,
|
||||
});
|
||||
by_recipient.entry(addr).or_default().push(id);
|
||||
}
|
||||
} else if let StorageEntry::MessageRemove { id } = entry {
|
||||
if let Some(entry) = messages.remove(&id) {
|
||||
if let Some(ids) = by_recipient.get_mut(&entry.recipient) {
|
||||
ids.retain(|i| i != &id);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(Self { log, messages, by_recipient })
|
||||
}
|
||||
|
||||
/// Store a message.
|
||||
pub fn store(
|
||||
&mut self,
|
||||
id: Vec<u8>,
|
||||
recipient: MeshAddress,
|
||||
data: Vec<u8>,
|
||||
ttl: Duration,
|
||||
) -> MeshResult<()> {
|
||||
let expires_at = now_secs() + ttl.as_secs();
|
||||
|
||||
self.log.append(&StorageEntry::Message {
|
||||
id: id.clone(),
|
||||
recipient: *recipient.as_bytes(),
|
||||
data: data.clone(),
|
||||
expires_at,
|
||||
})?;
|
||||
|
||||
self.messages.insert(id.clone(), MessageEntry {
|
||||
recipient,
|
||||
data,
|
||||
expires_at,
|
||||
});
|
||||
self.by_recipient.entry(recipient).or_default().push(id);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Get messages for a recipient.
|
||||
pub fn get_for_recipient(&self, recipient: &MeshAddress) -> Vec<(Vec<u8>, Vec<u8>)> {
|
||||
let now = now_secs();
|
||||
self.by_recipient
|
||||
.get(recipient)
|
||||
.map(|ids| {
|
||||
ids.iter()
|
||||
.filter_map(|id| {
|
||||
let entry = self.messages.get(id)?;
|
||||
if entry.expires_at > now {
|
||||
Some((id.clone(), entry.data.clone()))
|
||||
} else {
|
||||
None
|
||||
}
|
||||
})
|
||||
.collect()
|
||||
})
|
||||
.unwrap_or_default()
|
||||
}
|
||||
|
||||
/// Remove a message.
|
||||
pub fn remove(&mut self, id: &[u8]) -> MeshResult<bool> {
|
||||
if let Some(entry) = self.messages.remove(id) {
|
||||
if let Some(ids) = self.by_recipient.get_mut(&entry.recipient) {
|
||||
ids.retain(|i| i != id);
|
||||
}
|
||||
self.log.append(&StorageEntry::MessageRemove {
|
||||
id: id.to_vec(),
|
||||
})?;
|
||||
Ok(true)
|
||||
} else {
|
||||
Ok(false)
|
||||
}
|
||||
}
|
||||
|
||||
/// Number of stored messages.
|
||||
pub fn len(&self) -> usize {
|
||||
self.messages.len()
|
||||
}
|
||||
|
||||
/// Check if empty.
|
||||
pub fn is_empty(&self) -> bool {
|
||||
self.messages.is_empty()
|
||||
}
|
||||
|
||||
/// Garbage collect expired messages.
|
||||
pub fn gc(&mut self) -> MeshResult<usize> {
|
||||
let now = now_secs();
|
||||
let expired: Vec<_> = self.messages
|
||||
.iter()
|
||||
.filter(|(_, e)| e.expires_at <= now)
|
||||
.map(|(id, _)| id.clone())
|
||||
.collect();
|
||||
|
||||
let count = expired.len();
|
||||
for id in expired {
|
||||
self.remove(&id)?;
|
||||
}
|
||||
Ok(count)
|
||||
}
|
||||
|
||||
/// Compact the underlying log.
|
||||
pub fn compact(&mut self) -> MeshResult<CompactStats> {
|
||||
self.log.compact()
|
||||
}
|
||||
|
||||
/// Sync to disk.
|
||||
pub fn sync(&mut self) -> MeshResult<()> {
|
||||
self.log.sync()
|
||||
}
|
||||
}
|
||||
|
||||
/// Get current time as Unix seconds.
|
||||
fn now_secs() -> u64 {
|
||||
SystemTime::now()
|
||||
.duration_since(UNIX_EPOCH)
|
||||
.unwrap_or_default()
|
||||
.as_secs()
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use tempfile::tempdir;
|
||||
|
||||
#[test]
|
||||
fn append_log_roundtrip() {
|
||||
let dir = tempdir().unwrap();
|
||||
let path = dir.path().join("test.log");
|
||||
|
||||
{
|
||||
let mut log = AppendLog::open(&path).unwrap();
|
||||
log.append(&StorageEntry::Route {
|
||||
address: [1u8; 16],
|
||||
next_hop: "tcp:127.0.0.1:8080".to_string(),
|
||||
hops: 2,
|
||||
sequence: 42,
|
||||
expires_at: now_secs() + 3600,
|
||||
}).unwrap();
|
||||
}
|
||||
|
||||
let log = AppendLog::open(&path).unwrap();
|
||||
let entries = log.read_all().unwrap();
|
||||
assert_eq!(entries.len(), 1);
|
||||
|
||||
if let StorageEntry::Route { sequence, .. } = &entries[0] {
|
||||
assert_eq!(*sequence, 42);
|
||||
} else {
|
||||
panic!("expected Route entry");
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn routing_table_persistence() {
|
||||
let dir = tempdir().unwrap();
|
||||
let path = dir.path().join("routes.log");
|
||||
|
||||
let addr = MeshAddress::from_bytes([0xAB; 16]);
|
||||
|
||||
{
|
||||
let mut rt = PersistentRoutingTable::open(&path).unwrap();
|
||||
rt.insert(
|
||||
addr,
|
||||
"tcp:192.168.1.1:8080".to_string(),
|
||||
3,
|
||||
100,
|
||||
Duration::from_secs(3600),
|
||||
).unwrap();
|
||||
rt.sync().unwrap();
|
||||
}
|
||||
|
||||
// Reopen and verify
|
||||
let rt = PersistentRoutingTable::open(&path).unwrap();
|
||||
let entry = rt.get(&addr).expect("route should exist");
|
||||
assert_eq!(entry.hops, 3);
|
||||
assert_eq!(entry.sequence, 100);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn message_store_persistence() {
|
||||
let dir = tempdir().unwrap();
|
||||
let path = dir.path().join("messages.log");
|
||||
|
||||
let recipient = MeshAddress::from_bytes([0xCD; 16]);
|
||||
let id = b"msg-001".to_vec();
|
||||
let data = b"Hello, mesh!".to_vec();
|
||||
|
||||
{
|
||||
let mut store = PersistentMessageStore::open(&path).unwrap();
|
||||
store.store(id.clone(), recipient, data.clone(), Duration::from_secs(3600)).unwrap();
|
||||
store.sync().unwrap();
|
||||
}
|
||||
|
||||
let store = PersistentMessageStore::open(&path).unwrap();
|
||||
let msgs = store.get_for_recipient(&recipient);
|
||||
assert_eq!(msgs.len(), 1);
|
||||
assert_eq!(msgs[0].0, id);
|
||||
assert_eq!(msgs[0].1, data);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn compaction_removes_deleted() {
|
||||
let dir = tempdir().unwrap();
|
||||
let path = dir.path().join("compact.log");
|
||||
|
||||
let addr1 = MeshAddress::from_bytes([1; 16]);
|
||||
let addr2 = MeshAddress::from_bytes([2; 16]);
|
||||
|
||||
{
|
||||
let mut rt = PersistentRoutingTable::open(&path).unwrap();
|
||||
rt.insert(addr1, "hop1".to_string(), 1, 1, Duration::from_secs(3600)).unwrap();
|
||||
rt.insert(addr2, "hop2".to_string(), 1, 1, Duration::from_secs(3600)).unwrap();
|
||||
rt.remove(&addr1).unwrap(); // Delete one
|
||||
rt.compact().unwrap();
|
||||
}
|
||||
|
||||
let rt = PersistentRoutingTable::open(&path).unwrap();
|
||||
assert!(rt.get(&addr1).is_none());
|
||||
assert!(rt.get(&addr2).is_some());
|
||||
assert_eq!(rt.len(), 1);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn gc_removes_expired() {
|
||||
let dir = tempdir().unwrap();
|
||||
let path = dir.path().join("gc.log");
|
||||
|
||||
let addr = MeshAddress::from_bytes([0xEE; 16]);
|
||||
|
||||
let mut rt = PersistentRoutingTable::open(&path).unwrap();
|
||||
rt.insert(addr, "hop".to_string(), 1, 1, Duration::from_secs(0)).unwrap();
|
||||
|
||||
// Should be expired immediately
|
||||
std::thread::sleep(Duration::from_millis(10));
|
||||
let gc_count = rt.gc().unwrap();
|
||||
assert_eq!(gc_count, 1);
|
||||
assert!(rt.get(&addr).is_none());
|
||||
}
|
||||
}
|
||||
482
crates/quicprochat-p2p/src/rate_limit.rs
Normal file
482
crates/quicprochat-p2p/src/rate_limit.rs
Normal file
@@ -0,0 +1,482 @@
|
||||
//! Rate limiting for DoS protection.
|
||||
//!
|
||||
//! This module provides token bucket rate limiters for controlling
|
||||
//! message rates per peer and globally. Designed for low overhead
|
||||
//! even on constrained devices.
|
||||
|
||||
use std::collections::HashMap;
|
||||
use std::sync::RwLock;
|
||||
use std::time::{Duration, Instant};
|
||||
|
||||
use crate::address::MeshAddress;
|
||||
use crate::config::RateLimitConfig;
|
||||
use crate::error::{MeshError, MeshResult};
|
||||
|
||||
/// Result of a rate limit check.
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
|
||||
pub enum RateLimitResult {
|
||||
/// Request allowed.
|
||||
Allowed,
|
||||
/// Request denied, retry after this duration.
|
||||
Denied { retry_after: Duration },
|
||||
/// Soft warning: approaching limit.
|
||||
Warning { remaining: u32 },
|
||||
}
|
||||
|
||||
impl RateLimitResult {
|
||||
pub fn is_allowed(&self) -> bool {
|
||||
matches!(self, Self::Allowed | Self::Warning { .. })
|
||||
}
|
||||
}
|
||||
|
||||
/// Token bucket rate limiter.
|
||||
#[derive(Debug)]
|
||||
pub struct TokenBucket {
|
||||
/// Maximum tokens (bucket capacity).
|
||||
capacity: u32,
|
||||
/// Current tokens.
|
||||
tokens: f64,
|
||||
/// Tokens added per second.
|
||||
refill_rate: f64,
|
||||
/// Last refill time.
|
||||
last_refill: Instant,
|
||||
}
|
||||
|
||||
impl TokenBucket {
|
||||
/// Create a new token bucket.
|
||||
pub fn new(capacity: u32, per_second: f64) -> Self {
|
||||
Self {
|
||||
capacity,
|
||||
tokens: capacity as f64,
|
||||
refill_rate: per_second,
|
||||
last_refill: Instant::now(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Create from per-minute rate.
|
||||
pub fn per_minute(per_minute: u32) -> Self {
|
||||
let capacity = per_minute.max(1);
|
||||
let per_second = per_minute as f64 / 60.0;
|
||||
Self::new(capacity, per_second)
|
||||
}
|
||||
|
||||
/// Refill tokens based on elapsed time.
|
||||
fn refill(&mut self) {
|
||||
let now = Instant::now();
|
||||
let elapsed = now.duration_since(self.last_refill);
|
||||
let add = elapsed.as_secs_f64() * self.refill_rate;
|
||||
self.tokens = (self.tokens + add).min(self.capacity as f64);
|
||||
self.last_refill = now;
|
||||
}
|
||||
|
||||
/// Try to consume one token.
|
||||
pub fn try_acquire(&mut self) -> RateLimitResult {
|
||||
self.try_acquire_n(1)
|
||||
}
|
||||
|
||||
/// Try to consume n tokens.
|
||||
pub fn try_acquire_n(&mut self, n: u32) -> RateLimitResult {
|
||||
self.refill();
|
||||
|
||||
let n_f = n as f64;
|
||||
if self.tokens >= n_f {
|
||||
self.tokens -= n_f;
|
||||
let remaining = self.tokens as u32;
|
||||
if remaining < self.capacity / 4 {
|
||||
RateLimitResult::Warning { remaining }
|
||||
} else {
|
||||
RateLimitResult::Allowed
|
||||
}
|
||||
} else {
|
||||
let deficit = n_f - self.tokens;
|
||||
let wait_secs = deficit / self.refill_rate;
|
||||
RateLimitResult::Denied {
|
||||
retry_after: Duration::from_secs_f64(wait_secs),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Current available tokens.
|
||||
pub fn available(&mut self) -> u32 {
|
||||
self.refill();
|
||||
self.tokens as u32
|
||||
}
|
||||
}
|
||||
|
||||
/// Per-peer rate limiter with multiple buckets.
|
||||
#[derive(Debug)]
|
||||
pub struct PeerRateLimiter {
|
||||
/// Message bucket.
|
||||
messages: TokenBucket,
|
||||
/// Announce bucket.
|
||||
announces: TokenBucket,
|
||||
/// KeyPackage request bucket.
|
||||
keypackage_requests: TokenBucket,
|
||||
/// Last activity (for cleanup).
|
||||
last_activity: Instant,
|
||||
}
|
||||
|
||||
impl PeerRateLimiter {
|
||||
pub fn from_config(config: &RateLimitConfig) -> Self {
|
||||
Self {
|
||||
messages: TokenBucket::per_minute(config.message_per_peer_per_min),
|
||||
announces: TokenBucket::per_minute(config.announce_per_peer_per_min),
|
||||
keypackage_requests: TokenBucket::per_minute(config.keypackage_requests_per_min),
|
||||
last_activity: Instant::now(),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn check_message(&mut self) -> RateLimitResult {
|
||||
self.last_activity = Instant::now();
|
||||
self.messages.try_acquire()
|
||||
}
|
||||
|
||||
pub fn check_announce(&mut self) -> RateLimitResult {
|
||||
self.last_activity = Instant::now();
|
||||
self.announces.try_acquire()
|
||||
}
|
||||
|
||||
pub fn check_keypackage_request(&mut self) -> RateLimitResult {
|
||||
self.last_activity = Instant::now();
|
||||
self.keypackage_requests.try_acquire()
|
||||
}
|
||||
|
||||
/// Time since last activity.
|
||||
pub fn idle_time(&self) -> Duration {
|
||||
self.last_activity.elapsed()
|
||||
}
|
||||
}
|
||||
|
||||
/// Global rate limiter managing per-peer limits.
|
||||
pub struct RateLimiter {
|
||||
/// Configuration.
|
||||
config: RateLimitConfig,
|
||||
/// Per-peer limiters.
|
||||
peers: RwLock<HashMap<MeshAddress, PeerRateLimiter>>,
|
||||
/// Maximum tracked peers (to prevent memory exhaustion).
|
||||
max_peers: usize,
|
||||
}
|
||||
|
||||
impl RateLimiter {
|
||||
pub fn new(config: RateLimitConfig) -> Self {
|
||||
Self {
|
||||
config,
|
||||
peers: RwLock::new(HashMap::new()),
|
||||
max_peers: 10_000,
|
||||
}
|
||||
}
|
||||
|
||||
/// Check if a message from peer is allowed.
|
||||
pub fn check_message(&self, peer: &MeshAddress) -> MeshResult<RateLimitResult> {
|
||||
let mut peers = self.peers.write().map_err(|_| {
|
||||
MeshError::Internal("rate limiter lock poisoned".to_string())
|
||||
})?;
|
||||
|
||||
let limiter = peers
|
||||
.entry(*peer)
|
||||
.or_insert_with(|| PeerRateLimiter::from_config(&self.config));
|
||||
|
||||
Ok(limiter.check_message())
|
||||
}
|
||||
|
||||
/// Check if an announce from peer is allowed.
|
||||
pub fn check_announce(&self, peer: &MeshAddress) -> MeshResult<RateLimitResult> {
|
||||
let mut peers = self.peers.write().map_err(|_| {
|
||||
MeshError::Internal("rate limiter lock poisoned".to_string())
|
||||
})?;
|
||||
|
||||
let limiter = peers
|
||||
.entry(*peer)
|
||||
.or_insert_with(|| PeerRateLimiter::from_config(&self.config));
|
||||
|
||||
Ok(limiter.check_announce())
|
||||
}
|
||||
|
||||
/// Check if a KeyPackage request from peer is allowed.
|
||||
pub fn check_keypackage_request(&self, peer: &MeshAddress) -> MeshResult<RateLimitResult> {
|
||||
let mut peers = self.peers.write().map_err(|_| {
|
||||
MeshError::Internal("rate limiter lock poisoned".to_string())
|
||||
})?;
|
||||
|
||||
let limiter = peers
|
||||
.entry(*peer)
|
||||
.or_insert_with(|| PeerRateLimiter::from_config(&self.config));
|
||||
|
||||
Ok(limiter.check_keypackage_request())
|
||||
}
|
||||
|
||||
/// Remove limiters for peers idle longer than max_idle.
|
||||
pub fn cleanup(&self, max_idle: Duration) -> usize {
|
||||
let mut peers = match self.peers.write() {
|
||||
Ok(p) => p,
|
||||
Err(_) => return 0,
|
||||
};
|
||||
|
||||
let before = peers.len();
|
||||
peers.retain(|_, limiter| limiter.idle_time() < max_idle);
|
||||
before - peers.len()
|
||||
}
|
||||
|
||||
/// Number of tracked peers.
|
||||
pub fn tracked_peers(&self) -> usize {
|
||||
self.peers.read().map(|p| p.len()).unwrap_or(0)
|
||||
}
|
||||
}
|
||||
|
||||
/// Duty cycle tracker for LoRa compliance.
|
||||
#[derive(Debug)]
|
||||
pub struct DutyCycleTracker {
|
||||
/// Duty cycle limit (0.0 to 1.0).
|
||||
limit: f32,
|
||||
/// Window size for tracking.
|
||||
window: Duration,
|
||||
/// Transmission records: (timestamp, duration_ms).
|
||||
transmissions: RwLock<Vec<(Instant, u64)>>,
|
||||
}
|
||||
|
||||
impl DutyCycleTracker {
|
||||
/// Create with a duty cycle limit (e.g., 0.01 for 1%).
|
||||
pub fn new(limit: f32) -> Self {
|
||||
Self {
|
||||
limit: limit.clamp(0.0, 1.0),
|
||||
window: Duration::from_secs(3600), // 1 hour window
|
||||
transmissions: RwLock::new(Vec::new()),
|
||||
}
|
||||
}
|
||||
|
||||
/// Check if we can transmit for the given duration.
|
||||
pub fn can_transmit(&self, airtime_ms: u64) -> bool {
|
||||
let used = self.used_ms();
|
||||
let window_ms = self.window.as_millis() as u64;
|
||||
let limit_ms = (window_ms as f32 * self.limit) as u64;
|
||||
used + airtime_ms <= limit_ms
|
||||
}
|
||||
|
||||
/// Record a transmission.
|
||||
pub fn record(&self, airtime_ms: u64) {
|
||||
if let Ok(mut tx) = self.transmissions.write() {
|
||||
tx.push((Instant::now(), airtime_ms));
|
||||
}
|
||||
}
|
||||
|
||||
/// Get total airtime used in current window.
|
||||
pub fn used_ms(&self) -> u64 {
|
||||
let cutoff = Instant::now() - self.window;
|
||||
let tx = match self.transmissions.read() {
|
||||
Ok(t) => t,
|
||||
Err(_) => return 0,
|
||||
};
|
||||
|
||||
tx.iter()
|
||||
.filter(|(t, _)| *t > cutoff)
|
||||
.map(|(_, d)| *d)
|
||||
.sum()
|
||||
}
|
||||
|
||||
/// Get remaining airtime in current window.
|
||||
pub fn remaining_ms(&self) -> u64 {
|
||||
let window_ms = self.window.as_millis() as u64;
|
||||
let limit_ms = (window_ms as f32 * self.limit) as u64;
|
||||
limit_ms.saturating_sub(self.used_ms())
|
||||
}
|
||||
|
||||
/// Clean up old records.
|
||||
pub fn cleanup(&self) {
|
||||
let cutoff = Instant::now() - self.window;
|
||||
if let Ok(mut tx) = self.transmissions.write() {
|
||||
tx.retain(|(t, _)| *t > cutoff);
|
||||
}
|
||||
}
|
||||
|
||||
/// Current duty cycle usage as fraction.
|
||||
pub fn current_usage(&self) -> f32 {
|
||||
let window_ms = self.window.as_millis() as f32;
|
||||
self.used_ms() as f32 / window_ms
|
||||
}
|
||||
}
|
||||
|
||||
/// Backpressure signal for flow control.
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
|
||||
pub enum BackpressureLevel {
|
||||
/// No backpressure, process normally.
|
||||
None,
|
||||
/// Light pressure, shed low-priority work.
|
||||
Light,
|
||||
/// Medium pressure, shed non-critical work.
|
||||
Medium,
|
||||
/// Heavy pressure, only process critical messages.
|
||||
Heavy,
|
||||
/// Overloaded, reject new work.
|
||||
Overloaded,
|
||||
}
|
||||
|
||||
impl BackpressureLevel {
|
||||
/// Should we process a message at this priority (0 = highest)?
|
||||
pub fn should_process(&self, priority: u8) -> bool {
|
||||
match self {
|
||||
Self::None => true,
|
||||
Self::Light => priority <= 2,
|
||||
Self::Medium => priority <= 1,
|
||||
Self::Heavy => priority == 0,
|
||||
Self::Overloaded => false,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Backpressure controller based on queue depth.
|
||||
#[derive(Debug)]
|
||||
pub struct BackpressureController {
|
||||
/// Thresholds for each level.
|
||||
thresholds: [usize; 4],
|
||||
/// Current queue depth.
|
||||
current: std::sync::atomic::AtomicUsize,
|
||||
}
|
||||
|
||||
impl BackpressureController {
|
||||
pub fn new(light: usize, medium: usize, heavy: usize, overload: usize) -> Self {
|
||||
Self {
|
||||
thresholds: [light, medium, heavy, overload],
|
||||
current: std::sync::atomic::AtomicUsize::new(0),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn default_for_constrained() -> Self {
|
||||
Self::new(10, 25, 50, 100)
|
||||
}
|
||||
|
||||
pub fn default_for_standard() -> Self {
|
||||
Self::new(100, 500, 1000, 5000)
|
||||
}
|
||||
|
||||
pub fn set_queue_depth(&self, depth: usize) {
|
||||
self.current.store(depth, std::sync::atomic::Ordering::Relaxed);
|
||||
}
|
||||
|
||||
pub fn level(&self) -> BackpressureLevel {
|
||||
let depth = self.current.load(std::sync::atomic::Ordering::Relaxed);
|
||||
if depth >= self.thresholds[3] {
|
||||
BackpressureLevel::Overloaded
|
||||
} else if depth >= self.thresholds[2] {
|
||||
BackpressureLevel::Heavy
|
||||
} else if depth >= self.thresholds[1] {
|
||||
BackpressureLevel::Medium
|
||||
} else if depth >= self.thresholds[0] {
|
||||
BackpressureLevel::Light
|
||||
} else {
|
||||
BackpressureLevel::None
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn token_bucket_allows_burst() {
|
||||
let mut bucket = TokenBucket::new(10, 1.0);
|
||||
for _ in 0..10 {
|
||||
assert!(bucket.try_acquire().is_allowed());
|
||||
}
|
||||
assert!(!bucket.try_acquire().is_allowed());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn token_bucket_refills() {
|
||||
let mut bucket = TokenBucket::new(2, 100.0); // 100/sec refill
|
||||
bucket.try_acquire();
|
||||
bucket.try_acquire();
|
||||
assert!(!bucket.try_acquire().is_allowed());
|
||||
|
||||
std::thread::sleep(Duration::from_millis(50));
|
||||
assert!(bucket.try_acquire().is_allowed());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn token_bucket_warning() {
|
||||
let mut bucket = TokenBucket::new(8, 1.0);
|
||||
// Use 7 tokens (leaves 1, which is < 8/4 = 2)
|
||||
for _ in 0..7 {
|
||||
bucket.try_acquire();
|
||||
}
|
||||
let result = bucket.try_acquire();
|
||||
assert!(matches!(result, RateLimitResult::Warning { remaining: 0 }));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn peer_rate_limiter() {
|
||||
let config = RateLimitConfig {
|
||||
message_per_peer_per_min: 5,
|
||||
..Default::default()
|
||||
};
|
||||
let mut limiter = PeerRateLimiter::from_config(&config);
|
||||
|
||||
for _ in 0..5 {
|
||||
assert!(limiter.check_message().is_allowed());
|
||||
}
|
||||
assert!(!limiter.check_message().is_allowed());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn rate_limiter_per_peer() {
|
||||
let config = RateLimitConfig {
|
||||
message_per_peer_per_min: 2,
|
||||
..Default::default()
|
||||
};
|
||||
let limiter = RateLimiter::new(config);
|
||||
|
||||
let peer1 = MeshAddress::from_bytes([1; 16]);
|
||||
let peer2 = MeshAddress::from_bytes([2; 16]);
|
||||
|
||||
assert!(limiter.check_message(&peer1).unwrap().is_allowed());
|
||||
assert!(limiter.check_message(&peer1).unwrap().is_allowed());
|
||||
assert!(!limiter.check_message(&peer1).unwrap().is_allowed());
|
||||
|
||||
// peer2 has its own bucket
|
||||
assert!(limiter.check_message(&peer2).unwrap().is_allowed());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn duty_cycle_tracker() {
|
||||
let tracker = DutyCycleTracker::new(0.01); // 1%
|
||||
// 1 hour = 3600000 ms, 1% = 36000 ms
|
||||
|
||||
assert!(tracker.can_transmit(1000));
|
||||
tracker.record(1000);
|
||||
assert_eq!(tracker.used_ms(), 1000);
|
||||
|
||||
assert!(tracker.can_transmit(35000));
|
||||
tracker.record(35000);
|
||||
|
||||
// Now at 36000ms, at limit
|
||||
assert!(!tracker.can_transmit(1000));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn backpressure_levels() {
|
||||
let bp = BackpressureController::new(10, 50, 100, 200);
|
||||
|
||||
bp.set_queue_depth(5);
|
||||
assert_eq!(bp.level(), BackpressureLevel::None);
|
||||
|
||||
bp.set_queue_depth(30);
|
||||
assert_eq!(bp.level(), BackpressureLevel::Light);
|
||||
|
||||
bp.set_queue_depth(75);
|
||||
assert_eq!(bp.level(), BackpressureLevel::Medium);
|
||||
|
||||
bp.set_queue_depth(150);
|
||||
assert_eq!(bp.level(), BackpressureLevel::Heavy);
|
||||
|
||||
bp.set_queue_depth(250);
|
||||
assert_eq!(bp.level(), BackpressureLevel::Overloaded);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn backpressure_priority_filter() {
|
||||
assert!(BackpressureLevel::None.should_process(5));
|
||||
assert!(!BackpressureLevel::Light.should_process(5));
|
||||
assert!(BackpressureLevel::Light.should_process(2));
|
||||
assert!(!BackpressureLevel::Overloaded.should_process(0));
|
||||
}
|
||||
}
|
||||
@@ -413,10 +413,10 @@ mod tests {
|
||||
};
|
||||
|
||||
router.add_peer(pk, route);
|
||||
assert!(router.peers.lock().unwrap().contains_key(&pk));
|
||||
assert!(router.peers.lock().expect("routing table lock poisoned").contains_key(&pk));
|
||||
|
||||
router.remove_peer(&pk);
|
||||
assert!(!router.peers.lock().unwrap().contains_key(&pk));
|
||||
assert!(!router.peers.lock().expect("routing table lock poisoned").contains_key(&pk));
|
||||
|
||||
drop(router);
|
||||
Arc::try_unwrap(node).ok().expect("sole owner").close().await;
|
||||
245
crates/quicprochat-p2p/src/routing_table.rs
Normal file
245
crates/quicprochat-p2p/src/routing_table.rs
Normal file
@@ -0,0 +1,245 @@
|
||||
//! Distributed routing table built from mesh announcements.
|
||||
//!
|
||||
//! The [`RoutingTable`] stores [`RoutingEntry`] records keyed by 16-byte
|
||||
//! truncated mesh addresses, enabling multi-hop packet forwarding through
|
||||
//! the mesh network.
|
||||
|
||||
use std::collections::HashMap;
|
||||
use std::time::{Duration, Instant};
|
||||
|
||||
use crate::announce::MeshAnnounce;
|
||||
use crate::transport::TransportAddr;
|
||||
|
||||
/// A routing entry for a known mesh destination.
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct RoutingEntry {
|
||||
/// Full 32-byte Ed25519 public key of the destination.
|
||||
pub identity_key: [u8; 32],
|
||||
/// 16-byte truncated mesh address.
|
||||
pub address: [u8; 16],
|
||||
/// Next-hop transport name (e.g. "tcp", "iroh-quic", "lora").
|
||||
pub next_hop_transport: String,
|
||||
/// Next-hop address to send through.
|
||||
pub next_hop_addr: TransportAddr,
|
||||
/// Number of hops to this destination.
|
||||
pub hops: u8,
|
||||
/// Estimated cost (lower is better). Currently computed as hops as f64.
|
||||
pub cost: f64,
|
||||
/// Capabilities of the destination node.
|
||||
pub capabilities: u16,
|
||||
/// Last announce sequence number seen from this node.
|
||||
pub last_sequence: u64,
|
||||
/// When this entry was last updated.
|
||||
pub last_seen: Instant,
|
||||
/// When this entry expires (based on announce TTL).
|
||||
pub expires_at: Instant,
|
||||
}
|
||||
|
||||
/// Distributed routing table built from received mesh announcements.
|
||||
pub struct RoutingTable {
|
||||
/// Entries keyed by 16-byte truncated address.
|
||||
entries: HashMap<[u8; 16], RoutingEntry>,
|
||||
/// Default entry TTL.
|
||||
default_ttl: Duration,
|
||||
}
|
||||
|
||||
impl RoutingTable {
|
||||
/// Create a new empty routing table with the given default TTL for entries.
|
||||
pub fn new(default_ttl: Duration) -> Self {
|
||||
Self {
|
||||
entries: HashMap::new(),
|
||||
default_ttl,
|
||||
}
|
||||
}
|
||||
|
||||
/// Update the routing table from a received mesh announcement.
|
||||
///
|
||||
/// Returns `true` if this was a new or improved route.
|
||||
///
|
||||
/// Logic:
|
||||
/// - If `sequence <= last_sequence` for this address, the announce is stale — ignored.
|
||||
/// - If the entry is new or has lower cost, it replaces the existing entry.
|
||||
pub fn update(
|
||||
&mut self,
|
||||
announce: &MeshAnnounce,
|
||||
received_via_transport: &str,
|
||||
received_from: TransportAddr,
|
||||
) -> bool {
|
||||
let address = announce.address;
|
||||
let new_cost = announce.hop_count as f64;
|
||||
let now = Instant::now();
|
||||
|
||||
let identity_key: [u8; 32] = match announce.identity_key.as_slice().try_into() {
|
||||
Ok(k) => k,
|
||||
Err(_) => return false,
|
||||
};
|
||||
|
||||
if let Some(existing) = self.entries.get(&address) {
|
||||
// Stale announce — older or same sequence number.
|
||||
if announce.sequence <= existing.last_sequence {
|
||||
return false;
|
||||
}
|
||||
// Only replace if the new route is better or equal (newer sequence wins on tie).
|
||||
if new_cost > existing.cost && announce.sequence == existing.last_sequence + 1 {
|
||||
// Higher cost with only incremental sequence — still update since it's fresher.
|
||||
}
|
||||
}
|
||||
|
||||
let entry = RoutingEntry {
|
||||
identity_key,
|
||||
address,
|
||||
next_hop_transport: received_via_transport.to_string(),
|
||||
next_hop_addr: received_from,
|
||||
hops: announce.hop_count,
|
||||
cost: new_cost,
|
||||
capabilities: announce.capabilities,
|
||||
last_sequence: announce.sequence,
|
||||
last_seen: now,
|
||||
expires_at: now + self.default_ttl,
|
||||
};
|
||||
|
||||
self.entries.insert(address, entry);
|
||||
true
|
||||
}
|
||||
|
||||
/// Look up a routing entry by 16-byte truncated mesh address.
|
||||
pub fn lookup(&self, address: &[u8; 16]) -> Option<&RoutingEntry> {
|
||||
self.entries.get(address)
|
||||
}
|
||||
|
||||
/// Look up a routing entry by the full 32-byte Ed25519 public key.
|
||||
pub fn lookup_by_key(&self, identity_key: &[u8; 32]) -> Option<&RoutingEntry> {
|
||||
self.entries.values().find(|e| &e.identity_key == identity_key)
|
||||
}
|
||||
|
||||
/// Remove all expired entries. Returns the number of entries removed.
|
||||
pub fn remove_expired(&mut self) -> usize {
|
||||
let now = Instant::now();
|
||||
let before = self.entries.len();
|
||||
self.entries.retain(|_, entry| entry.expires_at > now);
|
||||
before - self.entries.len()
|
||||
}
|
||||
|
||||
/// Iterate over all routing entries.
|
||||
pub fn entries(&self) -> impl Iterator<Item = &RoutingEntry> {
|
||||
self.entries.values()
|
||||
}
|
||||
|
||||
/// Number of entries in the routing table.
|
||||
pub fn len(&self) -> usize {
|
||||
self.entries.len()
|
||||
}
|
||||
|
||||
/// Whether the routing table is empty.
|
||||
pub fn is_empty(&self) -> bool {
|
||||
self.entries.is_empty()
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use crate::announce::{compute_address, CAP_RELAY};
|
||||
use crate::identity::MeshIdentity;
|
||||
|
||||
fn make_announce(identity: &MeshIdentity, sequence: u64, hop_count: u8) -> MeshAnnounce {
|
||||
let mut announce =
|
||||
MeshAnnounce::with_sequence(identity, CAP_RELAY, vec![], 8, sequence);
|
||||
announce.hop_count = hop_count;
|
||||
announce
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn insert_and_lookup() {
|
||||
let mut table = RoutingTable::new(Duration::from_secs(300));
|
||||
let id = MeshIdentity::generate();
|
||||
let announce = make_announce(&id, 1, 1);
|
||||
let addr = TransportAddr::Socket("127.0.0.1:9000".parse().unwrap());
|
||||
|
||||
assert!(table.update(&announce, "tcp", addr.clone()));
|
||||
assert_eq!(table.len(), 1);
|
||||
|
||||
let mesh_addr = compute_address(&id.public_key());
|
||||
let entry = table.lookup(&mesh_addr).expect("entry should exist");
|
||||
assert_eq!(entry.hops, 1);
|
||||
assert_eq!(entry.last_sequence, 1);
|
||||
assert_eq!(entry.next_hop_transport, "tcp");
|
||||
assert_eq!(entry.next_hop_addr, addr);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn update_with_better_route() {
|
||||
let mut table = RoutingTable::new(Duration::from_secs(300));
|
||||
let id = MeshIdentity::generate();
|
||||
let addr = TransportAddr::Socket("127.0.0.1:9000".parse().unwrap());
|
||||
|
||||
// First announce: 3 hops, sequence 1.
|
||||
let announce1 = make_announce(&id, 1, 3);
|
||||
assert!(table.update(&announce1, "tcp", addr.clone()));
|
||||
|
||||
let mesh_addr = compute_address(&id.public_key());
|
||||
assert_eq!(table.lookup(&mesh_addr).unwrap().hops, 3);
|
||||
|
||||
// Second announce: 1 hop, sequence 2 — should replace.
|
||||
let announce2 = make_announce(&id, 2, 1);
|
||||
assert!(table.update(&announce2, "tcp", addr));
|
||||
|
||||
let entry = table.lookup(&mesh_addr).unwrap();
|
||||
assert_eq!(entry.hops, 1);
|
||||
assert_eq!(entry.last_sequence, 2);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn reject_stale_sequence() {
|
||||
let mut table = RoutingTable::new(Duration::from_secs(300));
|
||||
let id = MeshIdentity::generate();
|
||||
let addr = TransportAddr::Socket("127.0.0.1:9000".parse().unwrap());
|
||||
|
||||
// Insert with sequence 5.
|
||||
let announce1 = make_announce(&id, 5, 1);
|
||||
assert!(table.update(&announce1, "tcp", addr.clone()));
|
||||
|
||||
// Try to update with sequence 3 — should be rejected.
|
||||
let announce2 = make_announce(&id, 3, 1);
|
||||
assert!(
|
||||
!table.update(&announce2, "tcp", addr),
|
||||
"stale sequence must be rejected"
|
||||
);
|
||||
|
||||
let mesh_addr = compute_address(&id.public_key());
|
||||
assert_eq!(table.lookup(&mesh_addr).unwrap().last_sequence, 5);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn expire_old_entries() {
|
||||
let mut table = RoutingTable::new(Duration::from_millis(1));
|
||||
let id = MeshIdentity::generate();
|
||||
let addr = TransportAddr::Socket("127.0.0.1:9000".parse().unwrap());
|
||||
|
||||
let announce = make_announce(&id, 1, 1);
|
||||
table.update(&announce, "tcp", addr);
|
||||
assert_eq!(table.len(), 1);
|
||||
|
||||
// Wait for TTL to expire.
|
||||
std::thread::sleep(Duration::from_millis(10));
|
||||
|
||||
let removed = table.remove_expired();
|
||||
assert_eq!(removed, 1);
|
||||
assert!(table.is_empty());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn lookup_by_key_works() {
|
||||
let mut table = RoutingTable::new(Duration::from_secs(300));
|
||||
let id = MeshIdentity::generate();
|
||||
let addr = TransportAddr::Socket("127.0.0.1:9000".parse().unwrap());
|
||||
|
||||
let announce = make_announce(&id, 1, 2);
|
||||
table.update(&announce, "tcp", addr);
|
||||
|
||||
let pk = id.public_key();
|
||||
let entry = table.lookup_by_key(&pk).expect("should find by key");
|
||||
assert_eq!(entry.identity_key, pk);
|
||||
assert_eq!(entry.hops, 2);
|
||||
}
|
||||
}
|
||||
470
crates/quicprochat-p2p/src/shutdown.rs
Normal file
470
crates/quicprochat-p2p/src/shutdown.rs
Normal file
@@ -0,0 +1,470 @@
|
||||
//! Graceful shutdown coordination for mesh nodes.
|
||||
//!
|
||||
//! This module provides coordinated shutdown with:
|
||||
//! - Signal handling (SIGTERM, SIGINT, SIGHUP)
|
||||
//! - Connection draining
|
||||
//! - State persistence
|
||||
//! - Cleanup hooks
|
||||
|
||||
use std::future::Future;
|
||||
use std::pin::Pin;
|
||||
use std::sync::atomic::{AtomicBool, AtomicU8, Ordering};
|
||||
use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
|
||||
use tokio::sync::{broadcast, mpsc, watch, Notify};
|
||||
use tokio::time::timeout;
|
||||
|
||||
/// Shutdown phase.
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
|
||||
#[repr(u8)]
|
||||
pub enum ShutdownPhase {
|
||||
/// Normal operation.
|
||||
Running = 0,
|
||||
/// Shutdown initiated, draining connections.
|
||||
Draining = 1,
|
||||
/// Persisting state.
|
||||
Persisting = 2,
|
||||
/// Running cleanup hooks.
|
||||
Cleanup = 3,
|
||||
/// Shutdown complete.
|
||||
Complete = 4,
|
||||
}
|
||||
|
||||
impl From<u8> for ShutdownPhase {
|
||||
fn from(v: u8) -> Self {
|
||||
match v {
|
||||
0 => Self::Running,
|
||||
1 => Self::Draining,
|
||||
2 => Self::Persisting,
|
||||
3 => Self::Cleanup,
|
||||
_ => Self::Complete,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Shutdown coordinator.
|
||||
pub struct ShutdownCoordinator {
|
||||
/// Current phase.
|
||||
phase: AtomicU8,
|
||||
/// Shutdown signal broadcast.
|
||||
shutdown_tx: broadcast::Sender<ShutdownPhase>,
|
||||
/// Notify when all tasks complete.
|
||||
all_done: Arc<Notify>,
|
||||
/// Active task count.
|
||||
active_tasks: std::sync::atomic::AtomicUsize,
|
||||
/// Drain timeout.
|
||||
drain_timeout: Duration,
|
||||
/// Persist timeout.
|
||||
persist_timeout: Duration,
|
||||
}
|
||||
|
||||
impl ShutdownCoordinator {
|
||||
pub fn new() -> Self {
|
||||
let (shutdown_tx, _) = broadcast::channel(16);
|
||||
Self {
|
||||
phase: AtomicU8::new(ShutdownPhase::Running as u8),
|
||||
shutdown_tx,
|
||||
all_done: Arc::new(Notify::new()),
|
||||
active_tasks: std::sync::atomic::AtomicUsize::new(0),
|
||||
drain_timeout: Duration::from_secs(30),
|
||||
persist_timeout: Duration::from_secs(10),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn with_timeouts(drain: Duration, persist: Duration) -> Self {
|
||||
let mut s = Self::new();
|
||||
s.drain_timeout = drain;
|
||||
s.persist_timeout = persist;
|
||||
s
|
||||
}
|
||||
|
||||
/// Get current phase.
|
||||
pub fn phase(&self) -> ShutdownPhase {
|
||||
self.phase.load(Ordering::SeqCst).into()
|
||||
}
|
||||
|
||||
/// Check if shutdown is in progress.
|
||||
pub fn is_shutting_down(&self) -> bool {
|
||||
self.phase() != ShutdownPhase::Running
|
||||
}
|
||||
|
||||
/// Subscribe to shutdown notifications.
|
||||
pub fn subscribe(&self) -> broadcast::Receiver<ShutdownPhase> {
|
||||
self.shutdown_tx.subscribe()
|
||||
}
|
||||
|
||||
/// Register a task.
|
||||
pub fn register_task(&self) -> TaskGuard {
|
||||
self.active_tasks.fetch_add(1, Ordering::SeqCst);
|
||||
TaskGuard {
|
||||
active_tasks: &self.active_tasks,
|
||||
all_done: Arc::clone(&self.all_done),
|
||||
}
|
||||
}
|
||||
|
||||
/// Initiate shutdown.
|
||||
pub async fn shutdown(&self) {
|
||||
// Phase 1: Draining
|
||||
self.set_phase(ShutdownPhase::Draining);
|
||||
|
||||
// Wait for tasks to complete or timeout
|
||||
let drain_result = timeout(
|
||||
self.drain_timeout,
|
||||
self.wait_for_tasks(),
|
||||
).await;
|
||||
|
||||
if drain_result.is_err() {
|
||||
tracing::warn!(
|
||||
"drain timeout reached with {} tasks remaining",
|
||||
self.active_tasks.load(Ordering::SeqCst)
|
||||
);
|
||||
}
|
||||
|
||||
// Phase 2: Persisting
|
||||
self.set_phase(ShutdownPhase::Persisting);
|
||||
|
||||
// Give persist hooks time to run
|
||||
tokio::time::sleep(Duration::from_millis(100)).await;
|
||||
|
||||
// Phase 3: Cleanup
|
||||
self.set_phase(ShutdownPhase::Cleanup);
|
||||
tokio::time::sleep(Duration::from_millis(100)).await;
|
||||
|
||||
// Complete
|
||||
self.set_phase(ShutdownPhase::Complete);
|
||||
}
|
||||
|
||||
fn set_phase(&self, phase: ShutdownPhase) {
|
||||
self.phase.store(phase as u8, Ordering::SeqCst);
|
||||
let _ = self.shutdown_tx.send(phase);
|
||||
}
|
||||
|
||||
async fn wait_for_tasks(&self) {
|
||||
while self.active_tasks.load(Ordering::SeqCst) > 0 {
|
||||
self.all_done.notified().await;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for ShutdownCoordinator {
|
||||
fn default() -> Self {
|
||||
Self::new()
|
||||
}
|
||||
}
|
||||
|
||||
/// RAII guard for tracking active tasks.
|
||||
pub struct TaskGuard<'a> {
|
||||
active_tasks: &'a std::sync::atomic::AtomicUsize,
|
||||
all_done: Arc<Notify>,
|
||||
}
|
||||
|
||||
impl<'a> Drop for TaskGuard<'a> {
|
||||
fn drop(&mut self) {
|
||||
let prev = self.active_tasks.fetch_sub(1, Ordering::SeqCst);
|
||||
if prev == 1 {
|
||||
self.all_done.notify_waiters();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Shutdown handle for use in async tasks.
|
||||
#[derive(Clone)]
|
||||
pub struct ShutdownSignal {
|
||||
/// Watch receiver for shutdown.
|
||||
watch_rx: watch::Receiver<bool>,
|
||||
}
|
||||
|
||||
impl ShutdownSignal {
|
||||
/// Create a new signal pair.
|
||||
pub fn new() -> (ShutdownTrigger, Self) {
|
||||
let (tx, rx) = watch::channel(false);
|
||||
(ShutdownTrigger { watch_tx: tx }, Self { watch_rx: rx })
|
||||
}
|
||||
|
||||
/// Check if shutdown has been triggered.
|
||||
pub fn is_triggered(&self) -> bool {
|
||||
*self.watch_rx.borrow()
|
||||
}
|
||||
|
||||
/// Wait for shutdown signal.
|
||||
pub async fn wait(&mut self) {
|
||||
let _ = self.watch_rx.wait_for(|&triggered| triggered).await;
|
||||
}
|
||||
|
||||
/// Create a future that completes on shutdown.
|
||||
pub fn recv(&mut self) -> impl Future<Output = ()> + '_ {
|
||||
async move {
|
||||
self.wait().await
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for ShutdownSignal {
|
||||
fn default() -> Self {
|
||||
Self::new().1
|
||||
}
|
||||
}
|
||||
|
||||
/// Trigger for shutdown signal.
|
||||
#[derive(Clone)]
|
||||
pub struct ShutdownTrigger {
|
||||
watch_tx: watch::Sender<bool>,
|
||||
}
|
||||
|
||||
impl ShutdownTrigger {
|
||||
/// Trigger shutdown.
|
||||
pub fn trigger(&self) {
|
||||
let _ = self.watch_tx.send(true);
|
||||
}
|
||||
}
|
||||
|
||||
/// Shutdown hook type.
|
||||
pub type ShutdownHook = Box<
|
||||
dyn FnOnce() -> Pin<Box<dyn Future<Output = ()> + Send>> + Send
|
||||
>;
|
||||
|
||||
/// Manages shutdown hooks.
|
||||
pub struct ShutdownHooks {
|
||||
persist_hooks: Vec<ShutdownHook>,
|
||||
cleanup_hooks: Vec<ShutdownHook>,
|
||||
}
|
||||
|
||||
impl ShutdownHooks {
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
persist_hooks: Vec::new(),
|
||||
cleanup_hooks: Vec::new(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Register a persist hook (runs during Persisting phase).
|
||||
pub fn on_persist<F, Fut>(&mut self, f: F)
|
||||
where
|
||||
F: FnOnce() -> Fut + Send + 'static,
|
||||
Fut: Future<Output = ()> + Send + 'static,
|
||||
{
|
||||
self.persist_hooks.push(Box::new(|| Box::pin(f())));
|
||||
}
|
||||
|
||||
/// Register a cleanup hook (runs during Cleanup phase).
|
||||
pub fn on_cleanup<F, Fut>(&mut self, f: F)
|
||||
where
|
||||
F: FnOnce() -> Fut + Send + 'static,
|
||||
Fut: Future<Output = ()> + Send + 'static,
|
||||
{
|
||||
self.cleanup_hooks.push(Box::new(|| Box::pin(f())));
|
||||
}
|
||||
|
||||
/// Run all persist hooks.
|
||||
pub async fn run_persist(&mut self) {
|
||||
for hook in self.persist_hooks.drain(..) {
|
||||
hook().await;
|
||||
}
|
||||
}
|
||||
|
||||
/// Run all cleanup hooks.
|
||||
pub async fn run_cleanup(&mut self) {
|
||||
for hook in self.cleanup_hooks.drain(..) {
|
||||
hook().await;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for ShutdownHooks {
|
||||
fn default() -> Self {
|
||||
Self::new()
|
||||
}
|
||||
}
|
||||
|
||||
/// Draining connection tracker.
|
||||
pub struct ConnectionDrainer {
|
||||
/// Maximum connections to track.
|
||||
max_connections: usize,
|
||||
/// Active connections.
|
||||
active: std::sync::atomic::AtomicUsize,
|
||||
/// Notify when connection count changes.
|
||||
notify: Notify,
|
||||
/// Stopped accepting new connections.
|
||||
draining: AtomicBool,
|
||||
}
|
||||
|
||||
impl ConnectionDrainer {
|
||||
pub fn new(max_connections: usize) -> Self {
|
||||
Self {
|
||||
max_connections,
|
||||
active: std::sync::atomic::AtomicUsize::new(0),
|
||||
notify: Notify::new(),
|
||||
draining: AtomicBool::new(false),
|
||||
}
|
||||
}
|
||||
|
||||
/// Try to accept a new connection.
|
||||
pub fn try_accept(&self) -> Option<ConnectionGuard<'_>> {
|
||||
if self.draining.load(Ordering::SeqCst) {
|
||||
return None;
|
||||
}
|
||||
|
||||
let current = self.active.fetch_add(1, Ordering::SeqCst);
|
||||
if current >= self.max_connections {
|
||||
self.active.fetch_sub(1, Ordering::SeqCst);
|
||||
return None;
|
||||
}
|
||||
|
||||
Some(ConnectionGuard { drainer: self })
|
||||
}
|
||||
|
||||
/// Start draining (stop accepting new connections).
|
||||
pub fn start_drain(&self) {
|
||||
self.draining.store(true, Ordering::SeqCst);
|
||||
}
|
||||
|
||||
/// Wait for all connections to close.
|
||||
pub async fn wait_drained(&self) {
|
||||
while self.active.load(Ordering::SeqCst) > 0 {
|
||||
self.notify.notified().await;
|
||||
}
|
||||
}
|
||||
|
||||
/// Current connection count.
|
||||
pub fn active_count(&self) -> usize {
|
||||
self.active.load(Ordering::SeqCst)
|
||||
}
|
||||
|
||||
/// Is draining?
|
||||
pub fn is_draining(&self) -> bool {
|
||||
self.draining.load(Ordering::SeqCst)
|
||||
}
|
||||
}
|
||||
|
||||
/// RAII guard for active connections.
|
||||
pub struct ConnectionGuard<'a> {
|
||||
drainer: &'a ConnectionDrainer,
|
||||
}
|
||||
|
||||
impl<'a> Drop for ConnectionGuard<'a> {
|
||||
fn drop(&mut self) {
|
||||
self.drainer.active.fetch_sub(1, Ordering::SeqCst);
|
||||
self.drainer.notify.notify_waiters();
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[tokio::test]
|
||||
async fn shutdown_phases() {
|
||||
let coord = ShutdownCoordinator::with_timeouts(
|
||||
Duration::from_millis(100),
|
||||
Duration::from_millis(50),
|
||||
);
|
||||
|
||||
assert_eq!(coord.phase(), ShutdownPhase::Running);
|
||||
assert!(!coord.is_shutting_down());
|
||||
|
||||
let mut rx = coord.subscribe();
|
||||
|
||||
tokio::spawn(async move {
|
||||
coord.shutdown().await;
|
||||
});
|
||||
|
||||
// Should receive phase transitions
|
||||
let phase = rx.recv().await.unwrap();
|
||||
assert_eq!(phase, ShutdownPhase::Draining);
|
||||
|
||||
let phase = rx.recv().await.unwrap();
|
||||
assert_eq!(phase, ShutdownPhase::Persisting);
|
||||
|
||||
let phase = rx.recv().await.unwrap();
|
||||
assert_eq!(phase, ShutdownPhase::Cleanup);
|
||||
|
||||
let phase = rx.recv().await.unwrap();
|
||||
assert_eq!(phase, ShutdownPhase::Complete);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn task_tracking() {
|
||||
let coord = ShutdownCoordinator::with_timeouts(
|
||||
Duration::from_secs(1),
|
||||
Duration::from_millis(50),
|
||||
);
|
||||
|
||||
let guard1 = coord.register_task();
|
||||
let guard2 = coord.register_task();
|
||||
|
||||
assert_eq!(coord.active_tasks.load(Ordering::SeqCst), 2);
|
||||
|
||||
drop(guard1);
|
||||
assert_eq!(coord.active_tasks.load(Ordering::SeqCst), 1);
|
||||
|
||||
drop(guard2);
|
||||
assert_eq!(coord.active_tasks.load(Ordering::SeqCst), 0);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn shutdown_signal() {
|
||||
let (trigger, mut signal) = ShutdownSignal::new();
|
||||
|
||||
assert!(!signal.is_triggered());
|
||||
|
||||
let handle = tokio::spawn(async move {
|
||||
signal.wait().await;
|
||||
true
|
||||
});
|
||||
|
||||
trigger.trigger();
|
||||
assert!(handle.await.unwrap());
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn connection_drainer() {
|
||||
let drainer = ConnectionDrainer::new(2);
|
||||
|
||||
let conn1 = drainer.try_accept().expect("should accept");
|
||||
let conn2 = drainer.try_accept().expect("should accept");
|
||||
assert!(drainer.try_accept().is_none()); // At capacity
|
||||
|
||||
assert_eq!(drainer.active_count(), 2);
|
||||
|
||||
drop(conn1);
|
||||
assert_eq!(drainer.active_count(), 1);
|
||||
|
||||
drainer.start_drain();
|
||||
assert!(drainer.try_accept().is_none()); // Draining
|
||||
|
||||
drop(conn2);
|
||||
|
||||
// Should complete immediately
|
||||
tokio::time::timeout(
|
||||
Duration::from_millis(100),
|
||||
drainer.wait_drained(),
|
||||
).await.expect("should drain quickly");
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn shutdown_hooks() {
|
||||
use std::sync::atomic::AtomicBool;
|
||||
|
||||
let persist_ran = Arc::new(AtomicBool::new(false));
|
||||
let cleanup_ran = Arc::new(AtomicBool::new(false));
|
||||
|
||||
let persist_flag = Arc::clone(&persist_ran);
|
||||
let cleanup_flag = Arc::clone(&cleanup_ran);
|
||||
|
||||
let mut hooks = ShutdownHooks::new();
|
||||
hooks.on_persist(move || async move {
|
||||
persist_flag.store(true, Ordering::SeqCst);
|
||||
});
|
||||
hooks.on_cleanup(move || async move {
|
||||
cleanup_flag.store(true, Ordering::SeqCst);
|
||||
});
|
||||
|
||||
hooks.run_persist().await;
|
||||
assert!(persist_ran.load(Ordering::SeqCst));
|
||||
assert!(!cleanup_ran.load(Ordering::SeqCst));
|
||||
|
||||
hooks.run_cleanup().await;
|
||||
assert!(cleanup_ran.load(Ordering::SeqCst));
|
||||
}
|
||||
}
|
||||
@@ -30,7 +30,7 @@ pub struct MeshTrafficConfig {
|
||||
impl Default for MeshTrafficConfig {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
padding_boundary: quicproquo_core::padding::DEFAULT_PADDING_BOUNDARY,
|
||||
padding_boundary: quicprochat_core::padding::DEFAULT_PADDING_BOUNDARY,
|
||||
decoy_interval_ms: 5000,
|
||||
}
|
||||
}
|
||||
@@ -38,7 +38,7 @@ impl Default for MeshTrafficConfig {
|
||||
|
||||
/// Pad a mesh payload to the nearest boundary before wrapping in a [`MeshEnvelope`].
|
||||
pub fn pad_mesh_payload(payload: &[u8], boundary: usize) -> Vec<u8> {
|
||||
quicproquo_core::padding::pad_uniform(payload, boundary)
|
||||
quicprochat_core::padding::pad_uniform(payload, boundary)
|
||||
}
|
||||
|
||||
/// Create a [`MeshEnvelope`] with a uniformly padded payload.
|
||||
@@ -85,7 +85,7 @@ pub fn spawn_mesh_decoy_generator(
|
||||
}
|
||||
|
||||
// Generate a decoy: padded empty payload with a random recipient.
|
||||
let decoy_payload = quicproquo_core::padding::generate_decoy(config.padding_boundary);
|
||||
let decoy_payload = quicprochat_core::padding::generate_decoy(config.padding_boundary);
|
||||
let mut fake_recipient = [0u8; 32];
|
||||
rand::thread_rng().fill(&mut fake_recipient);
|
||||
|
||||
@@ -121,7 +121,7 @@ mod tests {
|
||||
let padded = pad_mesh_payload(payload, 256);
|
||||
assert_eq!(padded.len() % 256, 0);
|
||||
|
||||
let unpadded = quicproquo_core::padding::unpad_uniform(&padded).unwrap();
|
||||
let unpadded = quicprochat_core::padding::unpad_uniform(&padded).unwrap();
|
||||
assert_eq!(unpadded, payload);
|
||||
}
|
||||
|
||||
@@ -136,7 +136,7 @@ mod tests {
|
||||
assert!(env.verify());
|
||||
|
||||
// The inner payload should unpad correctly.
|
||||
let unpadded = quicproquo_core::padding::unpad_uniform(&env.payload).unwrap();
|
||||
let unpadded = quicprochat_core::padding::unpad_uniform(&env.payload).unwrap();
|
||||
assert_eq!(unpadded, b"short");
|
||||
}
|
||||
|
||||
@@ -149,7 +149,7 @@ mod tests {
|
||||
assert_eq!(env.payload.len() % 256, 0);
|
||||
assert_eq!(env.payload.len(), 512); // 500 + 4 = 504, rounds to 512
|
||||
|
||||
let unpadded = quicproquo_core::padding::unpad_uniform(&env.payload).unwrap();
|
||||
let unpadded = quicprochat_core::padding::unpad_uniform(&env.payload).unwrap();
|
||||
assert_eq!(unpadded, payload);
|
||||
}
|
||||
|
||||
289
crates/quicprochat-p2p/src/transport.rs
Normal file
289
crates/quicprochat-p2p/src/transport.rs
Normal file
@@ -0,0 +1,289 @@
|
||||
//! Transport abstraction for pluggable mesh backends.
|
||||
//!
|
||||
//! Every mesh transport (iroh QUIC, TCP, LoRa, Serial) implements the
|
||||
//! [`MeshTransport`] trait. The [`TransportAddr`] enum provides a
|
||||
//! transport-agnostic address type.
|
||||
|
||||
use std::fmt;
|
||||
|
||||
use anyhow::Result;
|
||||
|
||||
/// Transport-agnostic peer address.
|
||||
#[derive(Clone, Debug, PartialEq, Eq, Hash)]
|
||||
pub enum TransportAddr {
|
||||
/// iroh node ID (32-byte public key) with optional relay info.
|
||||
Iroh(Vec<u8>),
|
||||
/// IP socket address for TCP/UDP transports.
|
||||
Socket(std::net::SocketAddr),
|
||||
/// LoRa device address (4 bytes).
|
||||
LoRa([u8; 4]),
|
||||
/// Serial port identifier.
|
||||
Serial(String),
|
||||
/// Opaque bytes for unknown/future transports.
|
||||
Raw(Vec<u8>),
|
||||
}
|
||||
|
||||
impl fmt::Display for TransportAddr {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
match self {
|
||||
Self::Iroh(id) => write!(f, "iroh:{}", hex::encode(&id[..4.min(id.len())])),
|
||||
Self::Socket(addr) => write!(f, "tcp:{addr}"),
|
||||
Self::LoRa(addr) => write!(f, "lora:{}", hex::encode(addr)),
|
||||
Self::Serial(port) => write!(f, "serial:{port}"),
|
||||
Self::Raw(data) => write!(f, "raw:{}", hex::encode(&data[..4.min(data.len())])),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Transport capability level for crypto mode selection.
|
||||
///
|
||||
/// Ordered from worst to best so max_by_key picks the best transport.
|
||||
#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord)]
|
||||
pub enum TransportCapability {
|
||||
/// Very low bandwidth, severely duty-cycled (LoRa SF11-SF12, serial).
|
||||
/// MLS-Lite without signature preferred.
|
||||
SeverelyConstrained = 0,
|
||||
/// Low bandwidth, duty-cycled (LoRa SF7-SF10).
|
||||
/// Classical MLS marginal, prefer MLS-Lite with sig.
|
||||
Constrained = 1,
|
||||
/// Medium bandwidth (BLE, slower WiFi).
|
||||
/// Supports full MLS with classical crypto.
|
||||
Medium = 2,
|
||||
/// High-bandwidth, low-latency (QUIC, TCP, WiFi).
|
||||
/// Supports full MLS with PQ-KEM, large KeyPackages.
|
||||
Unconstrained = 3,
|
||||
}
|
||||
|
||||
impl TransportCapability {
|
||||
/// Determine capability from bitrate and MTU.
|
||||
pub fn from_metrics(bitrate_bps: u64, mtu: usize) -> Self {
|
||||
match (bitrate_bps, mtu) {
|
||||
(b, _) if b >= 1_000_000 => Self::Unconstrained, // ≥1 Mbps
|
||||
(b, m) if b >= 10_000 && m >= 200 => Self::Medium, // ≥10 kbps, decent MTU
|
||||
(b, m) if b >= 1_000 || m >= 100 => Self::Constrained, // ≥1 kbps
|
||||
_ => Self::SeverelyConstrained,
|
||||
}
|
||||
}
|
||||
|
||||
/// Recommended crypto mode for this capability level.
|
||||
pub fn recommended_crypto(&self) -> CryptoMode {
|
||||
match self {
|
||||
Self::Unconstrained => CryptoMode::MlsHybrid,
|
||||
Self::Medium => CryptoMode::MlsClassical,
|
||||
Self::Constrained => CryptoMode::MlsLiteSigned,
|
||||
Self::SeverelyConstrained => CryptoMode::MlsLiteUnsigned,
|
||||
}
|
||||
}
|
||||
|
||||
/// Whether full MLS is viable on this transport.
|
||||
pub fn supports_mls(&self) -> bool {
|
||||
matches!(self, Self::Unconstrained | Self::Medium)
|
||||
}
|
||||
}
|
||||
|
||||
/// Crypto mode for mesh messaging.
|
||||
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
|
||||
pub enum CryptoMode {
|
||||
/// Full MLS with X25519 + ML-KEM-768 hybrid.
|
||||
MlsHybrid,
|
||||
/// Full MLS with classical X25519 only.
|
||||
MlsClassical,
|
||||
/// MLS-Lite with Ed25519 signature.
|
||||
MlsLiteSigned,
|
||||
/// MLS-Lite without signature (smallest overhead).
|
||||
MlsLiteUnsigned,
|
||||
}
|
||||
|
||||
impl CryptoMode {
|
||||
/// Approximate overhead in bytes for this mode.
|
||||
pub fn overhead_bytes(&self) -> usize {
|
||||
match self {
|
||||
Self::MlsHybrid => 2700, // PQ KeyPackage alone
|
||||
Self::MlsClassical => 400, // Classical KeyPackage + message
|
||||
Self::MlsLiteSigned => 262, // MLS-Lite with sig
|
||||
Self::MlsLiteUnsigned => 129, // MLS-Lite minimal
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Metadata about a transport's capabilities.
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct TransportInfo {
|
||||
/// Human-readable transport name.
|
||||
pub name: String,
|
||||
/// Maximum transmission unit in bytes.
|
||||
pub mtu: usize,
|
||||
/// Estimated bitrate in bits/second.
|
||||
pub bitrate: u64,
|
||||
/// Whether this transport supports bidirectional communication.
|
||||
pub bidirectional: bool,
|
||||
}
|
||||
|
||||
impl TransportInfo {
|
||||
/// Compute capability level from this transport's metrics.
|
||||
pub fn capability(&self) -> TransportCapability {
|
||||
TransportCapability::from_metrics(self.bitrate, self.mtu)
|
||||
}
|
||||
|
||||
/// Recommended crypto mode for this transport.
|
||||
pub fn recommended_crypto(&self) -> CryptoMode {
|
||||
self.capability().recommended_crypto()
|
||||
}
|
||||
}
|
||||
|
||||
/// Received packet from a transport.
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct TransportPacket {
|
||||
/// Source address of the sender.
|
||||
pub from: TransportAddr,
|
||||
/// Raw packet data.
|
||||
pub data: Vec<u8>,
|
||||
}
|
||||
|
||||
/// A pluggable mesh transport backend.
|
||||
///
|
||||
/// Implementations provide send/receive over a specific medium (QUIC, TCP, LoRa, etc).
|
||||
#[async_trait::async_trait]
|
||||
pub trait MeshTransport: Send + Sync {
|
||||
/// Transport metadata (name, MTU, bitrate).
|
||||
fn info(&self) -> TransportInfo;
|
||||
|
||||
/// Send raw bytes to a destination.
|
||||
async fn send(&self, dest: &TransportAddr, data: &[u8]) -> Result<()>;
|
||||
|
||||
/// Receive the next incoming packet. Blocks until data arrives.
|
||||
async fn recv(&self) -> Result<TransportPacket>;
|
||||
|
||||
/// Discover reachable peers on this transport.
|
||||
/// Returns an empty vec if discovery is not supported.
|
||||
async fn discover(&self) -> Result<Vec<TransportAddr>> {
|
||||
Ok(Vec::new())
|
||||
}
|
||||
|
||||
/// Gracefully shut down this transport.
|
||||
async fn close(&self) -> Result<()> {
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn transport_addr_display_iroh() {
|
||||
let addr = TransportAddr::Iroh(vec![0xDE, 0xAD, 0xBE, 0xEF, 0x01, 0x02]);
|
||||
assert_eq!(addr.to_string(), "iroh:deadbeef");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn transport_addr_display_iroh_short() {
|
||||
let addr = TransportAddr::Iroh(vec![0xAB, 0xCD]);
|
||||
assert_eq!(addr.to_string(), "iroh:abcd");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn transport_addr_display_socket() {
|
||||
let addr = TransportAddr::Socket("127.0.0.1:9000".parse().unwrap());
|
||||
assert_eq!(addr.to_string(), "tcp:127.0.0.1:9000");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn transport_addr_display_lora() {
|
||||
let addr = TransportAddr::LoRa([0x01, 0x02, 0x03, 0x04]);
|
||||
assert_eq!(addr.to_string(), "lora:01020304");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn transport_addr_display_serial() {
|
||||
let addr = TransportAddr::Serial("/dev/ttyUSB0".to_string());
|
||||
assert_eq!(addr.to_string(), "serial:/dev/ttyUSB0");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn transport_addr_display_raw() {
|
||||
let addr = TransportAddr::Raw(vec![0xFF, 0xEE, 0xDD, 0xCC, 0xBB]);
|
||||
assert_eq!(addr.to_string(), "raw:ffeeddcc");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn transport_addr_display_raw_short() {
|
||||
let addr = TransportAddr::Raw(vec![0x01]);
|
||||
assert_eq!(addr.to_string(), "raw:01");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn transport_addr_equality() {
|
||||
let a = TransportAddr::Socket("127.0.0.1:8080".parse().unwrap());
|
||||
let b = TransportAddr::Socket("127.0.0.1:8080".parse().unwrap());
|
||||
let c = TransportAddr::Socket("127.0.0.1:9090".parse().unwrap());
|
||||
assert_eq!(a, b);
|
||||
assert_ne!(a, c);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn capability_ordering() {
|
||||
// Higher value = better capability
|
||||
assert!(TransportCapability::Unconstrained > TransportCapability::Medium);
|
||||
assert!(TransportCapability::Medium > TransportCapability::Constrained);
|
||||
assert!(TransportCapability::Constrained > TransportCapability::SeverelyConstrained);
|
||||
|
||||
// max_by_key should pick the best
|
||||
let caps = vec![
|
||||
TransportCapability::Constrained,
|
||||
TransportCapability::Unconstrained,
|
||||
TransportCapability::Medium,
|
||||
];
|
||||
let best = caps.into_iter().max().unwrap();
|
||||
assert_eq!(best, TransportCapability::Unconstrained);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn capability_recommended_crypto() {
|
||||
assert_eq!(
|
||||
TransportCapability::Unconstrained.recommended_crypto(),
|
||||
CryptoMode::MlsHybrid
|
||||
);
|
||||
assert_eq!(
|
||||
TransportCapability::Medium.recommended_crypto(),
|
||||
CryptoMode::MlsClassical
|
||||
);
|
||||
assert_eq!(
|
||||
TransportCapability::Constrained.recommended_crypto(),
|
||||
CryptoMode::MlsLiteSigned
|
||||
);
|
||||
assert_eq!(
|
||||
TransportCapability::SeverelyConstrained.recommended_crypto(),
|
||||
CryptoMode::MlsLiteUnsigned
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn transport_info_capability() {
|
||||
let tcp_info = TransportInfo {
|
||||
name: "tcp".to_string(),
|
||||
mtu: 1500,
|
||||
bitrate: 100_000_000, // 100 Mbps
|
||||
bidirectional: true,
|
||||
};
|
||||
assert_eq!(tcp_info.capability(), TransportCapability::Unconstrained);
|
||||
assert_eq!(tcp_info.recommended_crypto(), CryptoMode::MlsHybrid);
|
||||
|
||||
let lora_info = TransportInfo {
|
||||
name: "lora".to_string(),
|
||||
mtu: 51,
|
||||
bitrate: 300,
|
||||
bidirectional: true,
|
||||
};
|
||||
assert_eq!(lora_info.capability(), TransportCapability::SeverelyConstrained);
|
||||
assert_eq!(lora_info.recommended_crypto(), CryptoMode::MlsLiteUnsigned);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn crypto_mode_overhead() {
|
||||
assert!(CryptoMode::MlsHybrid.overhead_bytes() > 2000);
|
||||
assert!(CryptoMode::MlsClassical.overhead_bytes() < 500);
|
||||
assert!(CryptoMode::MlsLiteSigned.overhead_bytes() < 300);
|
||||
assert!(CryptoMode::MlsLiteUnsigned.overhead_bytes() < 150);
|
||||
}
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user