Compare commits
111 Commits
feat/m1-no
...
9b09f09892
| Author | SHA1 | Date | |
|---|---|---|---|
| 9b09f09892 | |||
| 92fefda41d | |||
| 84ec822823 | |||
| 01bc2a4273 | |||
| f9ac921a0c | |||
| d469999c2a | |||
| f0901f6597 | |||
| 543bd442a3 | |||
| 266bcfed59 | |||
| c256c38ffb | |||
| 416618f4cf | |||
| 872695e5f1 | |||
| e4c5868b31 | |||
| 66eca065e0 | |||
| a05da9b751 | |||
| 077f48f19c | |||
| 3708b8df41 | |||
| b98dcc27ae | |||
| 2e081ead8e | |||
| a710037dde | |||
| d8c1392587 | |||
| a9d1f535aa | |||
| aa29d3bc34 | |||
| 2a9f0b43ef | |||
| d073f614b3 | |||
| f7a7f672b4 | |||
| 189534c511 | |||
| 501f5a577c | |||
| 5cc37cc88b | |||
| 1d59a052ad | |||
| 12717979ba | |||
| 3f5a3a5ac8 | |||
| 511fc7822e | |||
| f57dda3f36 | |||
| cbb76af6b1 | |||
| 2d56824834 | |||
| 496f83067a | |||
| 1768f85258 | |||
| f667281831 | |||
| 372dd67a3b | |||
| 49e8e066d7 | |||
| f4621b3425 | |||
| c401caec60 | |||
| 885cce0d7d | |||
| 913f6faaf3 | |||
| e93a38243f | |||
| 91c5495ab7 | |||
| b94248b3b6 | |||
| a90020fe89 | |||
| fd1accc6dd | |||
| 799aab68fe | |||
| eaca24397b | |||
| 12b19b6931 | |||
| 5b6d8209f0 | |||
| a1f0dbc514 | |||
| 5a66c2e954 | |||
| 4013b223ff | |||
| 3a42130518 | |||
| c8c5f96ecd | |||
| e5329ee8e5 | |||
| e3dfc43e2c | |||
| 7bcfbf175c | |||
| 75f11cb76b | |||
| a3f67aca45 | |||
| 950f477842 | |||
| 3393514911 | |||
| a8ed3c4356 | |||
| cab03bd3f7 | |||
| 99f9abe9ed | |||
| 029c701780 | |||
| 4d62a837a5 | |||
| 67983c7a40 | |||
| 011ff541bb | |||
| 918da0c23d | |||
| 6b757f8d65 | |||
| d118fdbddf | |||
| 6273ab668d | |||
| f09dbe10ce | |||
| ff93275dc1 | |||
| a5864127d1 | |||
| 394199b19b | |||
| 4694a3098b | |||
| 4454458e38 | |||
| 5a6d9ae7f4 | |||
| 9244e80ec7 | |||
| 1b61b7ee8f | |||
| 28ceaaf072 | |||
| 65ff26235e | |||
| fd21ea625c | |||
| 3350d765e5 | |||
| 81d5e2e590 | |||
| db46b72f58 | |||
| 9ab306d891 | |||
| 612b06aa8e | |||
| dc4e4e49a0 | |||
| b6483dedbc | |||
| d7e530435f | |||
| c8398d6cb7 | |||
| e24497bf90 | |||
| c2762f93f6 | |||
| 5b9df61194 | |||
| 9525a3c565 | |||
| 853ca4fec0 | |||
| 553de3a2b7 | |||
| 4c1e4683e3 | |||
| 750b794342 | |||
| 6b8b61c6ae | |||
| b5b361e2ff | |||
| 96f4128b32 | |||
| 230205a152 | |||
| 00b0aa92a1 |
20
.env.example
Normal file
20
.env.example
Normal file
@@ -0,0 +1,20 @@
|
||||
# quicprochat Production Environment Variables
|
||||
# Copy this file to .env and fill in the values.
|
||||
|
||||
# Server auth token (required, >= 16 characters)
|
||||
QPC_AUTH_TOKEN=
|
||||
|
||||
# SQLCipher database encryption key (required for store_backend=sql)
|
||||
QPC_DB_KEY=
|
||||
|
||||
# Ports (defaults shown)
|
||||
QPC_LISTEN_PORT=7000
|
||||
QPC_WS_PORT=9000
|
||||
|
||||
# Optional features
|
||||
QPC_SEALED_SENDER=false
|
||||
QPC_REDACT_LOGS=true
|
||||
QPC_WS_LISTEN=
|
||||
|
||||
# Grafana admin password (required — must be strong, no default)
|
||||
GRAFANA_ADMIN_PASSWORD=
|
||||
134
.gitea/workflows/claude.yml
Normal file
134
.gitea/workflows/claude.yml
Normal file
@@ -0,0 +1,134 @@
|
||||
name: Claude Code Assistant
|
||||
|
||||
on:
|
||||
issues:
|
||||
types: [opened, labeled]
|
||||
issue_comment:
|
||||
types: [created]
|
||||
|
||||
concurrency:
|
||||
group: claude-${{ github.event.issue.number }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
claude-code:
|
||||
if: >-
|
||||
(github.event_name == 'issues' &&
|
||||
contains(toJSON(github.event.issue.labels), 'claude')) ||
|
||||
(github.event_name == 'issue_comment' &&
|
||||
contains(github.event.comment.body, '@claude') &&
|
||||
github.event.comment.user.login != 'admin')
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 15
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Run Claude on Issue
|
||||
env:
|
||||
ANTHROPIC_API_KEY: ${{ secrets.ANTHROPIC_API_KEY }}
|
||||
GIT_TOKEN: ${{ secrets.GIT_TOKEN }}
|
||||
run: |
|
||||
set +e
|
||||
|
||||
# Configure git
|
||||
git config user.name "Claude Bot"
|
||||
git config user.email "claude@localhost"
|
||||
git remote set-url origin "http://admin:${GIT_TOKEN}@localhost:3000/${{ github.repository }}.git"
|
||||
|
||||
ISSUE_NUMBER="${{ github.event.issue.number }}"
|
||||
ISSUE_TITLE="${{ github.event.issue.title }}"
|
||||
REPO="${{ github.repository }}"
|
||||
LABELS_JSON='${{ toJSON(github.event.issue.labels) }}'
|
||||
|
||||
# Determine model + cost limits from issue labels
|
||||
# Default: haiku (cheap). Add claude:sonnet or claude:opus for harder tasks.
|
||||
CLAUDE_MODEL="haiku"
|
||||
MAX_TURNS=15
|
||||
MAX_BUDGET="0.50"
|
||||
EFFORT="low"
|
||||
if echo "$LABELS_JSON" | grep -q '"claude:opus"'; then
|
||||
CLAUDE_MODEL="claude-opus-4-6"
|
||||
MAX_TURNS=40
|
||||
MAX_BUDGET="5.00"
|
||||
EFFORT="high"
|
||||
elif echo "$LABELS_JSON" | grep -q '"claude:sonnet"'; then
|
||||
CLAUDE_MODEL="claude-sonnet-4-6"
|
||||
MAX_TURNS=25
|
||||
MAX_BUDGET="2.00"
|
||||
EFFORT="medium"
|
||||
fi
|
||||
|
||||
ISSUE_BODY=$(curl -s "http://localhost:3000/api/v1/repos/${REPO}/issues/${ISSUE_NUMBER}" \
|
||||
-H "Authorization: token ${GIT_TOKEN}" | python3 -c "import sys,json; print(json.load(sys.stdin).get('body',''))")
|
||||
|
||||
COMMENT_BODY=""
|
||||
if [ "${{ github.event_name }}" = "issue_comment" ]; then
|
||||
COMMENT_ID="${{ github.event.comment.id }}"
|
||||
COMMENT_BODY=$(curl -s "http://localhost:3000/api/v1/repos/${REPO}/issues/comments/${COMMENT_ID}" \
|
||||
-H "Authorization: token ${GIT_TOKEN}" | python3 -c "import sys,json; print(json.load(sys.stdin).get('body',''))")
|
||||
fi
|
||||
|
||||
BRANCH="claude/issue-${ISSUE_NUMBER}"
|
||||
git checkout -b "${BRANCH}"
|
||||
|
||||
# Run Claude Code with cost controls
|
||||
claude -p "You are working on the repository ${REPO} (Gitea instance at http://localhost:3000).
|
||||
A Gitea issue needs your attention:
|
||||
|
||||
Issue #${ISSUE_NUMBER}: ${ISSUE_TITLE}
|
||||
Description: ${ISSUE_BODY}
|
||||
Additional context: ${COMMENT_BODY}
|
||||
|
||||
IMPORTANT RULES:
|
||||
- Do NOT retry failed commands more than once. If something fails twice, stop and report the error.
|
||||
- Do NOT loop on failing tests. Fix the obvious issue or report it. Never run the same failing command 3+ times.
|
||||
- If you cannot complete the task, push what you have, create the PR as draft, and explain what is blocked.
|
||||
- Be efficient: read only files you need, make targeted edits, avoid unnecessary exploration.
|
||||
|
||||
Steps:
|
||||
1. Read and understand the relevant parts of the codebase
|
||||
2. Implement the requested changes
|
||||
3. Commit your changes with a descriptive message
|
||||
4. Push branch ${BRANCH} to origin
|
||||
5. Create a pull request targeting main that references issue #${ISSUE_NUMBER}
|
||||
6. Post a comment on issue #${ISSUE_NUMBER} summarizing what you did
|
||||
|
||||
Git is configured. You are on branch ${BRANCH}. Work in the current directory.
|
||||
Use git commands to push, and curl to the Gitea API for PR creation and comments.
|
||||
Gitea API token is available as env var GIT_TOKEN." \
|
||||
--allowedTools "Bash,Read,Edit,Write,Glob,Grep" \
|
||||
--model "${CLAUDE_MODEL}" \
|
||||
--max-turns "${MAX_TURNS}" \
|
||||
--max-budget-usd "${MAX_BUDGET}" \
|
||||
--effort "${EFFORT}" \
|
||||
--permission-mode bypassPermissions \
|
||||
--output-format json 2>&1 > /tmp/claude-result.json
|
||||
|
||||
CLAUDE_EXIT=$?
|
||||
|
||||
# Extract cost from JSON output
|
||||
COST=$(python3 -c "
|
||||
import json
|
||||
with open('/tmp/claude-result.json') as f:
|
||||
data = json.load(f)
|
||||
cost = data.get('total_cost_usd', 0)
|
||||
print(f'\${cost:.4f}')
|
||||
" 2>/dev/null || echo "unknown")
|
||||
|
||||
# Amend the last commit to include cost and model
|
||||
if git log --oneline main..HEAD 2>/dev/null | head -1 | grep -q .; then
|
||||
LAST_MSG=$(git log -1 --format=%B)
|
||||
git commit --amend -m "${LAST_MSG}
|
||||
|
||||
Claude model: ${CLAUDE_MODEL} | API cost: ${COST}" --no-verify
|
||||
git push origin "${BRANCH}" --force
|
||||
fi
|
||||
|
||||
# Post cost as comment
|
||||
curl -s -X POST "http://localhost:3000/api/v1/repos/${REPO}/issues/${ISSUE_NUMBER}/comments" \
|
||||
-H "Authorization: token ${GIT_TOKEN}" \
|
||||
-H "Content-Type: application/json" \
|
||||
-d "{\"body\": \"Done (model: **${CLAUDE_MODEL}**, effort: ${EFFORT}, budget cap: \$${MAX_BUDGET}). API cost: **${COST}**\"}" > /dev/null
|
||||
|
||||
exit ${CLAUDE_EXIT}
|
||||
37
.github/CODEOWNERS
vendored
Normal file
37
.github/CODEOWNERS
vendored
Normal file
@@ -0,0 +1,37 @@
|
||||
# Code owners for quicprochat. PRs require review from owners.
|
||||
# See https://docs.github.com/en/repositories/managing-your-repositorys-settings-and-features/customizing-your-repository/about-code-owners
|
||||
# Replace 'maintainers' with your GitHub user/team handle.
|
||||
|
||||
# Default owners for everything in the repo
|
||||
* @maintainers
|
||||
|
||||
# Security-critical: crypto primitives, MLS, hybrid KEM
|
||||
/crates/quicprochat-core/ @maintainers
|
||||
|
||||
# Wire format: protobuf definitions, Cap'n Proto schemas
|
||||
/crates/quicprochat-proto/ @maintainers
|
||||
/proto/ @maintainers
|
||||
|
||||
# Auth and server-side domain logic
|
||||
/crates/quicprochat-server/ @maintainers
|
||||
|
||||
# Client SDK: auth, conversation store, messaging pipeline
|
||||
/crates/quicprochat-sdk/ @maintainers
|
||||
|
||||
# CLI/TUI client
|
||||
/crates/quicprochat-client/ @maintainers
|
||||
|
||||
# RPC framework: framing, middleware, QUIC transport
|
||||
/crates/quicprochat-rpc/ @maintainers
|
||||
|
||||
# Key transparency
|
||||
/crates/quicprochat-kt/ @maintainers
|
||||
|
||||
# Plugin ABI (no_std C-ABI boundary)
|
||||
/crates/quicprochat-plugin-api/ @maintainers
|
||||
|
||||
# P2P transport
|
||||
/crates/quicprochat-p2p/ @maintainers
|
||||
|
||||
# CI and infrastructure
|
||||
/.github/ @maintainers
|
||||
9
.github/INSTRUCTIONS.md
vendored
Normal file
9
.github/INSTRUCTIONS.md
vendored
Normal file
@@ -0,0 +1,9 @@
|
||||
# Internal Engineering Guidelines
|
||||
|
||||
## Rust file sizing and layout
|
||||
- Soft cap: keep Rust source files at or below ~400 lines; if a change would exceed that, split into modules first.
|
||||
- Hard cap: avoid exceeding 650 lines in any Rust file; refactor before merging (main.rs should stay <350 lines).
|
||||
- Single-responsibility: group code by concern (config, TLS/setup, auth/session, storage adapters, RPC handlers, CLI parsing) instead of piling into one file.
|
||||
- Structure new features as small modules wired from the entrypoint rather than expanding existing large files.
|
||||
- Co-locate unit tests with their module; keep integration tests in `crates/*/tests` with focused scopes.
|
||||
- Prefer descriptive module names and re-exports over deep `mod` trees that hide logic in `main.rs`.
|
||||
54
.github/workflows/bench.yml
vendored
Normal file
54
.github/workflows/bench.yml
vendored
Normal file
@@ -0,0 +1,54 @@
|
||||
name: Benchmarks
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [main, v2]
|
||||
pull_request:
|
||||
branches: [main, v2]
|
||||
workflow_dispatch:
|
||||
|
||||
env:
|
||||
CARGO_TERM_COLOR: always
|
||||
|
||||
jobs:
|
||||
bench:
|
||||
name: Run Criterion benchmarks
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: Install Rust
|
||||
uses: dtolnay/rust-action@stable
|
||||
|
||||
- name: Install capnp
|
||||
run: sudo apt-get update && sudo apt-get install -y capnproto
|
||||
|
||||
- name: Cache cargo
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: |
|
||||
~/.cargo/registry
|
||||
~/.cargo/git
|
||||
target
|
||||
key: ${{ runner.os }}-bench-${{ hashFiles('**/Cargo.lock') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-bench-
|
||||
|
||||
- name: Run benchmarks
|
||||
run: cargo bench --package quicprochat-core -- --output-format=bencher 2>&1 | tee bench-output.txt
|
||||
|
||||
- name: Upload HTML reports
|
||||
uses: actions/upload-artifact@v4
|
||||
if: always()
|
||||
with:
|
||||
name: criterion-reports
|
||||
path: target/criterion/
|
||||
retention-days: 30
|
||||
|
||||
- name: Upload raw output
|
||||
uses: actions/upload-artifact@v4
|
||||
if: always()
|
||||
with:
|
||||
name: bench-output
|
||||
path: bench-output.txt
|
||||
retention-days: 30
|
||||
174
.github/workflows/ci.yml
vendored
Normal file
174
.github/workflows/ci.yml
vendored
Normal file
@@ -0,0 +1,174 @@
|
||||
name: CI
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [main, master, v2]
|
||||
pull_request:
|
||||
branches: [main, master, v2]
|
||||
|
||||
env:
|
||||
CARGO_TERM_COLOR: always
|
||||
|
||||
jobs:
|
||||
test:
|
||||
name: Test
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: Install Rust
|
||||
uses: dtolnay/rust-action@stable
|
||||
with:
|
||||
components: clippy, rustfmt
|
||||
|
||||
- name: Install capnp
|
||||
run: sudo apt-get update && sudo apt-get install -y capnproto
|
||||
|
||||
- name: Cache cargo
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: |
|
||||
~/.cargo/registry
|
||||
~/.cargo/git
|
||||
target
|
||||
key: ${{ runner.os }}-cargo-${{ hashFiles('**/Cargo.lock') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-cargo-
|
||||
|
||||
- name: Check format
|
||||
run: cargo fmt --all -- --check
|
||||
|
||||
- name: Build
|
||||
run: cargo build --workspace
|
||||
|
||||
- name: Test
|
||||
run: cargo test --workspace
|
||||
|
||||
- name: Clippy
|
||||
run: cargo clippy --workspace --all-targets -- -D warnings
|
||||
|
||||
deny:
|
||||
name: cargo-deny
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: Install cargo-deny
|
||||
run: cargo install cargo-deny --locked
|
||||
|
||||
- name: Check deny
|
||||
run: cargo deny check
|
||||
|
||||
audit:
|
||||
name: cargo-audit
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: Install Rust
|
||||
uses: dtolnay/rust-action@stable
|
||||
|
||||
- name: Run audit
|
||||
run: |
|
||||
cargo install cargo-audit --locked
|
||||
cargo audit
|
||||
|
||||
coverage:
|
||||
name: Coverage
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: Install Rust
|
||||
uses: dtolnay/rust-action@stable
|
||||
|
||||
- name: Install capnp
|
||||
run: sudo apt-get update && sudo apt-get install -y capnproto
|
||||
|
||||
- name: Cache cargo
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: |
|
||||
~/.cargo/registry
|
||||
~/.cargo/git
|
||||
target
|
||||
key: ${{ runner.os }}-coverage-${{ hashFiles('**/Cargo.lock') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-coverage-
|
||||
|
||||
- name: Install cargo-tarpaulin
|
||||
run: cargo install cargo-tarpaulin
|
||||
|
||||
- name: Run coverage
|
||||
run: |
|
||||
cargo tarpaulin --workspace \
|
||||
--exclude quicprochat-p2p \
|
||||
--out xml \
|
||||
--output-dir coverage/ \
|
||||
-- --test-threads 1
|
||||
|
||||
- name: Upload coverage report
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: coverage-report
|
||||
path: coverage/cobertura.xml
|
||||
|
||||
msrv:
|
||||
name: MSRV Check
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: Install MSRV Rust (1.75)
|
||||
uses: dtolnay/rust-action@1.75
|
||||
with:
|
||||
components: clippy
|
||||
|
||||
- name: Install capnp
|
||||
run: sudo apt-get update && sudo apt-get install -y capnproto
|
||||
|
||||
- name: Cache cargo
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: |
|
||||
~/.cargo/registry
|
||||
~/.cargo/git
|
||||
target
|
||||
key: ${{ runner.os }}-msrv-${{ hashFiles('**/Cargo.lock') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-msrv-
|
||||
|
||||
- name: Check MSRV
|
||||
run: cargo check --workspace
|
||||
|
||||
macos:
|
||||
name: macOS Build Check
|
||||
runs-on: macos-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: Install Rust
|
||||
uses: dtolnay/rust-action@stable
|
||||
|
||||
- name: Cache cargo
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: |
|
||||
~/.cargo/registry
|
||||
~/.cargo/git
|
||||
target
|
||||
key: ${{ runner.os }}-cargo-${{ hashFiles('**/Cargo.lock') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-cargo-
|
||||
|
||||
- name: Check build
|
||||
run: cargo check --workspace
|
||||
|
||||
docker:
|
||||
name: Docker Build
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: Build Docker image
|
||||
run: docker build -f docker/Dockerfile .
|
||||
65
.github/workflows/openwrt.yml
vendored
Normal file
65
.github/workflows/openwrt.yml
vendored
Normal file
@@ -0,0 +1,65 @@
|
||||
name: OpenWrt Cross-Compile
|
||||
|
||||
on:
|
||||
push:
|
||||
tags:
|
||||
- 'v*'
|
||||
workflow_dispatch:
|
||||
|
||||
env:
|
||||
CARGO_TERM_COLOR: always
|
||||
MAX_SIZE_MB: 5
|
||||
|
||||
jobs:
|
||||
cross-compile:
|
||||
name: Cross-compile (${{ matrix.target }})
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
target:
|
||||
- x86_64-unknown-linux-musl
|
||||
- armv7-unknown-linux-musleabihf
|
||||
- aarch64-unknown-linux-musl
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: Install Rust toolchain
|
||||
uses: dtolnay/rust-toolchain@stable
|
||||
|
||||
- name: Install cargo-zigbuild and zig
|
||||
run: |
|
||||
pip3 install ziglang
|
||||
cargo install cargo-zigbuild
|
||||
|
||||
- name: Add target
|
||||
run: rustup target add ${{ matrix.target }}
|
||||
|
||||
- name: Build (size-optimised)
|
||||
env:
|
||||
CARGO_PROFILE_RELEASE_OPT_LEVEL: s
|
||||
CARGO_PROFILE_RELEASE_LTO: 'true'
|
||||
CARGO_PROFILE_RELEASE_CODEGEN_UNITS: '1'
|
||||
CARGO_PROFILE_RELEASE_STRIP: symbols
|
||||
run: |
|
||||
cargo zigbuild --release --target ${{ matrix.target }} --bin qpc-server
|
||||
|
||||
- name: Check binary size
|
||||
run: |
|
||||
BINARY="target/${{ matrix.target }}/release/qpc-server"
|
||||
SIZE=$(stat -c%s "$BINARY")
|
||||
SIZE_MB=$(echo "scale=2; $SIZE / 1048576" | bc)
|
||||
echo "Binary size: ${SIZE_MB} MB"
|
||||
MAX_BYTES=$(( ${{ env.MAX_SIZE_MB }} * 1048576 ))
|
||||
if [ "$SIZE" -gt "$MAX_BYTES" ]; then
|
||||
echo "::error::Binary exceeds ${MAX_SIZE_MB} MB limit (${SIZE_MB} MB)"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
- name: Upload artifact
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: qpc-server-${{ matrix.target }}
|
||||
path: target/${{ matrix.target }}/release/qpc-server
|
||||
retention-days: 30
|
||||
16
.github/workflows/size-lint.yml
vendored
Normal file
16
.github/workflows/size-lint.yml
vendored
Normal file
@@ -0,0 +1,16 @@
|
||||
name: rust-file-size-lint
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
push:
|
||||
branches: [ main ]
|
||||
|
||||
jobs:
|
||||
check-rust-file-sizes:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Run rust file-size guardrail
|
||||
run: bash scripts/check_rust_file_sizes.sh
|
||||
24
.gitignore
vendored
24
.gitignore
vendored
@@ -1,5 +1,29 @@
|
||||
/target
|
||||
**/target/
|
||||
node_modules/
|
||||
**/*.rs.bk
|
||||
.vscode/
|
||||
gitea-mcp.json
|
||||
docs/book/
|
||||
|
||||
# Server/client runtime data — do not commit certs, keys, or DBs
|
||||
data/
|
||||
*.der
|
||||
*.pem
|
||||
*.db
|
||||
*.bin
|
||||
*.ks
|
||||
*.session
|
||||
*.convdb
|
||||
*.convdb-shm
|
||||
*.convdb-wal
|
||||
*.pending.ks
|
||||
qpc-server.toml
|
||||
|
||||
# Internal planning docs (not for public distribution)
|
||||
docs/internal/
|
||||
|
||||
# AI development workflow files
|
||||
CLAUDE.md
|
||||
master-prompt.md
|
||||
scripts/ai_team.py
|
||||
|
||||
40
CONTRIBUTING.md
Normal file
40
CONTRIBUTING.md
Normal file
@@ -0,0 +1,40 @@
|
||||
# Contributing to quicprochat
|
||||
|
||||
## Prerequisites
|
||||
|
||||
- **Rust toolchain** (stable) via [rustup](https://rustup.rs/)
|
||||
- **protoc** is vendored via the `protobuf-src` crate -- no system installation needed
|
||||
- Git with GPG signing configured
|
||||
|
||||
## Building and Testing
|
||||
|
||||
```sh
|
||||
cargo build --workspace
|
||||
cargo test --workspace
|
||||
```
|
||||
|
||||
A `justfile` is also available for common tasks (`just build`, `just test`, `just proto`, etc.).
|
||||
|
||||
## Code Standards
|
||||
|
||||
### Commits
|
||||
|
||||
- **Conventional commits**: `feat:`, `fix:`, `docs:`, `chore:`, `test:`, `refactor:`
|
||||
- Commits must be **GPG-signed**
|
||||
- Commit messages describe *why*, not just *what*
|
||||
- No `Co-authored-by` trailers
|
||||
|
||||
### Rust
|
||||
|
||||
- No `.unwrap()` on crypto or I/O operations outside of tests
|
||||
- Secrets must be zeroized on drop and never logged
|
||||
- No stubs, `todo!()`, or `unimplemented!()` in production code
|
||||
- Prefer clarity over cleverness; avoid unnecessary abstractions
|
||||
|
||||
## Security Vulnerabilities
|
||||
|
||||
Do not open public issues for security bugs. See [SECURITY.md](SECURITY.md) for responsible disclosure instructions.
|
||||
|
||||
## Licensing
|
||||
|
||||
The server crate (`quicprochat-server`) is licensed under **AGPL-3.0**. All other crates are dual-licensed under **Apache-2.0 / MIT**. By submitting a contribution, you agree to license your work under the applicable license(s).
|
||||
2186
Cargo.lock
generated
2186
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
67
Cargo.toml
67
Cargo.toml
@@ -1,30 +1,46 @@
|
||||
[workspace]
|
||||
resolver = "2"
|
||||
members = [
|
||||
"crates/quicnprotochat-core",
|
||||
"crates/quicnprotochat-proto",
|
||||
"crates/quicnprotochat-server",
|
||||
"crates/quicnprotochat-client",
|
||||
"crates/quicnprotochat-p2p",
|
||||
"crates/quicprochat-core",
|
||||
"crates/quicprochat-proto",
|
||||
"crates/quicprochat-plugin-api",
|
||||
"crates/quicprochat-kt",
|
||||
"crates/quicprochat-rpc",
|
||||
"crates/quicprochat-sdk",
|
||||
"crates/quicprochat-server",
|
||||
"crates/quicprochat-client",
|
||||
# P2P crate uses iroh (~90 extra deps). Only compiled when the `mesh`
|
||||
# feature is enabled on quicprochat-client.
|
||||
"crates/quicprochat-p2p",
|
||||
]
|
||||
|
||||
[workspace.package]
|
||||
edition = "2021"
|
||||
rust-version = "1.75"
|
||||
repository = "https://github.com/quicprochat/quicprochat"
|
||||
description = "End-to-end encrypted group messaging over QUIC"
|
||||
keywords = ["encryption", "messaging", "quic", "mls", "post-quantum"]
|
||||
categories = ["cryptography", "network-programming"]
|
||||
|
||||
# Shared dependency versions — bump here to affect the whole workspace.
|
||||
[workspace.dependencies]
|
||||
|
||||
# ── Crypto ────────────────────────────────────────────────────────────────────
|
||||
openmls = { version = "0.5", default-features = false, features = ["crypto-subtle"] }
|
||||
openmls_rust_crypto = { version = "0.2" }
|
||||
openmls_traits = { version = "0.2" }
|
||||
# tls_codec must match the version used by openmls 0.5 (which uses 0.3) to avoid
|
||||
openmls = { version = "0.8" }
|
||||
openmls_rust_crypto = { version = "0.5" }
|
||||
openmls_traits = { version = "0.5" }
|
||||
openmls_memory_storage = { version = "0.5" }
|
||||
# tls_codec must match the version used by openmls 0.8 (which uses 0.4) to avoid
|
||||
# duplicate Serialize trait versions in the dependency graph.
|
||||
tls_codec = { version = "0.3", features = ["derive"] }
|
||||
tls_codec = { version = "0.4", features = ["derive"] }
|
||||
# ml-kem 0.2 is the current stable release (FIPS 203, ML-KEM-768).
|
||||
# All three parameter sets (512/768/1024) are compiled in by default — no feature flag needed.
|
||||
ml-kem = { version = "0.2" }
|
||||
x25519-dalek = { version = "2", features = ["static_secrets"] }
|
||||
ed25519-dalek = { version = "2", features = ["rand_core"] }
|
||||
sha2 = { version = "0.10" }
|
||||
hmac = { version = "0.12" }
|
||||
hkdf = { version = "0.12" }
|
||||
ciborium = { version = "0.2" }
|
||||
chacha20poly1305 = { version = "0.10" }
|
||||
opaque-ke = { version = "4", features = ["ristretto255", "argon2"] }
|
||||
zeroize = { version = "1", features = ["derive", "serde"] }
|
||||
@@ -35,12 +51,17 @@ serde = { version = "1", features = ["derive"] }
|
||||
serde_json = { version = "1" }
|
||||
bincode = { version = "1" }
|
||||
|
||||
# ── Serialisation + RPC ───────────────────────────────────────────────────────
|
||||
# ── Serialisation (v2: protobuf) ─────────────────────────────────────────────
|
||||
prost = { version = "0.13" }
|
||||
prost-types = { version = "0.13" }
|
||||
prost-build = { version = "0.13" }
|
||||
|
||||
# ── Serialisation (v1 legacy — capnp, used by existing server/client) ────────
|
||||
capnp = { version = "0.19" }
|
||||
capnp-rpc = { version = "0.19" }
|
||||
|
||||
# ── Async / networking ────────────────────────────────────────────────────────
|
||||
tokio = { version = "1", features = ["full"] }
|
||||
tokio = { version = "1", features = ["macros", "rt-multi-thread", "time", "sync", "signal", "io-util", "io-std"] }
|
||||
tokio-util = { version = "0.7", features = ["codec", "compat"] }
|
||||
futures = { version = "0.3" }
|
||||
quinn = { version = "0.11" }
|
||||
@@ -48,9 +69,16 @@ quinn-proto = { version = "0.11" }
|
||||
rustls = { version = "0.23", default-features = false, features = ["std", "ring"] }
|
||||
rcgen = { version = "0.13" }
|
||||
|
||||
# ── Middleware ────────────────────────────────────────────────────────────────
|
||||
tower = { version = "0.5", features = ["util", "limit", "timeout"] }
|
||||
|
||||
# ── Database ─────────────────────────────────────────────────────────────
|
||||
rusqlite = { version = "0.31", features = ["bundled-sqlcipher"] }
|
||||
|
||||
# ── Encoding ─────────────────────────────────────────────────────────────────
|
||||
hex = { version = "0.4" }
|
||||
bytes = { version = "1" }
|
||||
|
||||
# ── Server utilities ──────────────────────────────────────────────────────────
|
||||
dashmap = { version = "5" }
|
||||
tracing = { version = "0.1" }
|
||||
@@ -60,12 +88,23 @@ tracing-subscriber = { version = "0.3", features = ["env-filter"] }
|
||||
anyhow = { version = "1" }
|
||||
thiserror = { version = "1" }
|
||||
|
||||
# ── CLI ───────────────────────────────────────────────────────────────────────
|
||||
# ── Config / CLI ──────────────────────────────────────────────────────────────
|
||||
toml = { version = "0.8" }
|
||||
clap = { version = "4", features = ["derive", "env"] }
|
||||
rustyline = { version = "14" }
|
||||
|
||||
# ── Certificate parsing ──────────────────────────────────────────────────────
|
||||
x509-parser = { version = "0.16", default-features = false }
|
||||
|
||||
# ── Build-time ────────────────────────────────────────────────────────────────
|
||||
capnpc = { version = "0.19" }
|
||||
|
||||
[workspace.lints.rust]
|
||||
unsafe_code = "warn"
|
||||
|
||||
[workspace.lints.clippy]
|
||||
unwrap_used = "deny"
|
||||
|
||||
[profile.release]
|
||||
opt-level = 3
|
||||
lto = "thin"
|
||||
|
||||
30
LICENSE
Normal file
30
LICENSE
Normal file
@@ -0,0 +1,30 @@
|
||||
quicproquo — Split Licensing
|
||||
============================
|
||||
|
||||
This project uses a split license model similar to Signal:
|
||||
|
||||
Server (quicproquo-server)
|
||||
--------------------------
|
||||
Licensed under the GNU Affero General Public License v3.0 only.
|
||||
See LICENSE-AGPL-3.0 for the full text.
|
||||
|
||||
SPDX-License-Identifier: AGPL-3.0-only
|
||||
|
||||
Libraries and SDKs (all other crates)
|
||||
--------------------------------------
|
||||
Licensed under either of
|
||||
|
||||
* Apache License, Version 2.0 (LICENSE-APACHE)
|
||||
* MIT License (LICENSE-MIT)
|
||||
|
||||
at your option.
|
||||
|
||||
SPDX-License-Identifier: Apache-2.0 OR MIT
|
||||
|
||||
Contribution
|
||||
------------
|
||||
Unless you explicitly state otherwise, any contribution intentionally
|
||||
submitted for inclusion in this project by you, as defined in the
|
||||
Apache-2.0 license, shall be dual licensed as above (for library crates)
|
||||
or AGPL-3.0-only (for the server crate), without any additional terms or
|
||||
conditions.
|
||||
661
LICENSE-AGPL-3.0
Normal file
661
LICENSE-AGPL-3.0
Normal file
@@ -0,0 +1,661 @@
|
||||
GNU AFFERO GENERAL PUBLIC LICENSE
|
||||
Version 3, 19 November 2007
|
||||
|
||||
Copyright (C) 2007 Free Software Foundation, Inc. <https://fsf.org/>
|
||||
Everyone is permitted to copy and distribute verbatim copies
|
||||
of this license document, but changing it is not allowed.
|
||||
|
||||
Preamble
|
||||
|
||||
The GNU Affero General Public License is a free, copyleft license for
|
||||
software and other kinds of works, specifically designed to ensure
|
||||
cooperation with the community in the case of network server software.
|
||||
|
||||
The licenses for most software and other practical works are designed
|
||||
to take away your freedom to share and change the works. By contrast,
|
||||
our General Public Licenses are intended to guarantee your freedom to
|
||||
share and change all versions of a program--to make sure it remains free
|
||||
software for all its users.
|
||||
|
||||
When we speak of free software, we are referring to freedom, not
|
||||
price. Our General Public Licenses are designed to make sure that you
|
||||
have the freedom to distribute copies of free software (and charge for
|
||||
them if you wish), that you receive source code or can get it if you
|
||||
want it, that you can change the software or use pieces of it in new
|
||||
free programs, and that you know you can do these things.
|
||||
|
||||
Developers that use our General Public Licenses protect your rights
|
||||
with two steps: (1) assert copyright on the software, and (2) offer
|
||||
you this License which gives you legal permission to copy, distribute
|
||||
and/or modify the software.
|
||||
|
||||
A secondary benefit of defending all users' freedom is that
|
||||
improvements made in alternate versions of the program, if they
|
||||
receive widespread use, become available for other developers to
|
||||
incorporate. Many developers of free software are heartened and
|
||||
encouraged by the resulting cooperation. However, in the case of
|
||||
software used on network servers, this result may fail to come about.
|
||||
The GNU General Public License permits making a modified version and
|
||||
letting the public access it on a server without ever releasing its
|
||||
source code to the public.
|
||||
|
||||
The GNU Affero General Public License is designed specifically to
|
||||
ensure that, in such cases, the modified source code becomes available
|
||||
to the community. It requires the operator of a network server to
|
||||
provide the source code of the modified version running there to the
|
||||
users of that server. Therefore, public use of a modified version, on
|
||||
a publicly accessible server, gives the public access to the source
|
||||
code of the modified version.
|
||||
|
||||
An older license, called the Affero General Public License and
|
||||
published by Affero, was designed to accomplish similar goals. This is
|
||||
a different license, not a version of the Affero GPL, but Affero has
|
||||
released a new version of the Affero GPL which permits relicensing under
|
||||
this license.
|
||||
|
||||
The precise terms and conditions for copying, distribution and
|
||||
modification follow.
|
||||
|
||||
TERMS AND CONDITIONS
|
||||
|
||||
0. Definitions.
|
||||
|
||||
"This License" refers to version 3 of the GNU Affero General Public License.
|
||||
|
||||
"Copyright" also means copyright-like laws that apply to other kinds of
|
||||
works, such as semiconductor masks.
|
||||
|
||||
"The Program" refers to any copyrightable work licensed under this
|
||||
License. Each licensee is addressed as "you". "Licensees" and
|
||||
"recipients" may be individuals or organizations.
|
||||
|
||||
To "modify" a work means to copy from or adapt all or part of the work
|
||||
in a fashion requiring copyright permission, other than the making of an
|
||||
exact copy. The resulting work is called a "modified version" of the
|
||||
earlier work or a work "based on" the earlier work.
|
||||
|
||||
A "covered work" means either the unmodified Program or a work based
|
||||
on the Program.
|
||||
|
||||
To "propagate" a work means to do anything with it that, without
|
||||
permission, would make you directly or secondarily liable for
|
||||
infringement under applicable copyright law, except executing it on a
|
||||
computer or modifying a private copy. Propagation includes copying,
|
||||
distribution (with or without modification), making available to the
|
||||
public, and in some countries other activities as well.
|
||||
|
||||
To "convey" a work means any kind of propagation that enables other
|
||||
parties to make or receive copies. Mere interaction with a user through
|
||||
a computer network, with no transfer of a copy, is not conveying.
|
||||
|
||||
An interactive user interface displays "Appropriate Legal Notices"
|
||||
to the extent that it includes a convenient and prominently visible
|
||||
feature that (1) displays an appropriate copyright notice, and (2)
|
||||
tells the user that there is no warranty for the work (except to the
|
||||
extent that warranties are provided), that licensees may convey the
|
||||
work under this License, and how to view a copy of this License. If
|
||||
the interface presents a list of user commands or options, such as a
|
||||
menu, a prominent item in the list meets this criterion.
|
||||
|
||||
1. Source Code.
|
||||
|
||||
The "source code" for a work means the preferred form of the work
|
||||
for making modifications to it. "Object code" means any non-source
|
||||
form of a work.
|
||||
|
||||
A "Standard Interface" means an interface that either is an official
|
||||
standard defined by a recognized standards body, or, in the case of
|
||||
interfaces specified for a particular programming language, one that
|
||||
is widely used among developers working in that language.
|
||||
|
||||
The "System Libraries" of an executable work include anything, other
|
||||
than the work as a whole, that (a) is included in the normal form of
|
||||
packaging a Major Component, but which is not part of that Major
|
||||
Component, and (b) serves only to enable use of the work with that
|
||||
Major Component, or to implement a Standard Interface for which an
|
||||
implementation is available to the public in source code form. A
|
||||
"Major Component", in this context, means a major essential component
|
||||
(kernel, window system, and so on) of the specific operating system
|
||||
(if any) on which the executable work runs, or a compiler used to
|
||||
produce the work, or an object code interpreter used to run it.
|
||||
|
||||
The "Corresponding Source" for a work in object code form means all
|
||||
the source code needed to generate, install, and (for an executable
|
||||
work) run the object code and to modify the work, including scripts to
|
||||
control those activities. However, it does not include the work's
|
||||
System Libraries, or general-purpose tools or generally available free
|
||||
programs which are used unmodified in performing those activities but
|
||||
which are not part of the work. For example, Corresponding Source
|
||||
includes interface definition files associated with source files for
|
||||
the work, and the source code for shared libraries and dynamically
|
||||
linked subprograms that the work is specifically designed to require,
|
||||
such as by intimate data communication or control flow between those
|
||||
subprograms and other parts of the work.
|
||||
|
||||
The Corresponding Source need not include anything that users
|
||||
can regenerate automatically from other parts of the Corresponding
|
||||
Source.
|
||||
|
||||
The Corresponding Source for a work in source code form is that
|
||||
same work.
|
||||
|
||||
2. Basic Permissions.
|
||||
|
||||
All rights granted under this License are granted for the term of
|
||||
copyright on the Program, and are irrevocable provided the stated
|
||||
conditions are met. This License explicitly affirms your unlimited
|
||||
permission to run the unmodified Program. The output from running a
|
||||
covered work is covered by this License only if the output, given its
|
||||
content, constitutes a covered work. This License acknowledges your
|
||||
rights of fair use or other equivalent, as provided by copyright law.
|
||||
|
||||
You may make, run and propagate covered works that you do not
|
||||
convey, without conditions so long as your license otherwise remains
|
||||
in force. You may convey covered works to others for the sole purpose
|
||||
of having them make modifications exclusively for you, or provide you
|
||||
with facilities for running those works, provided that you comply with
|
||||
the terms of this License in conveying all material for which you do
|
||||
not control copyright. Those thus making or running the covered works
|
||||
for you must do so exclusively on your behalf, under your direction
|
||||
and control, on terms that prohibit them from making any copies of
|
||||
your copyrighted material outside their relationship with you.
|
||||
|
||||
Conveying under any other circumstances is permitted solely under
|
||||
the conditions stated below. Sublicensing is not allowed; section 10
|
||||
makes it unnecessary.
|
||||
|
||||
3. Protecting Users' Legal Rights From Anti-Circumvention Law.
|
||||
|
||||
No covered work shall be deemed part of an effective technological
|
||||
measure under any applicable law fulfilling obligations under article
|
||||
11 of the WIPO copyright treaty adopted on 20 December 1996, or
|
||||
similar laws prohibiting or restricting circumvention of such
|
||||
measures.
|
||||
|
||||
When you convey a covered work, you waive any legal power to forbid
|
||||
circumvention of technological measures to the extent such circumvention
|
||||
is effected by exercising rights under this License with respect to
|
||||
the covered work, and you disclaim any intention to limit operation or
|
||||
modification of the work as a means of enforcing, against the work's
|
||||
users, your or third parties' legal rights to forbid circumvention of
|
||||
technological measures.
|
||||
|
||||
4. Conveying Verbatim Copies.
|
||||
|
||||
You may convey verbatim copies of the Program's source code as you
|
||||
receive it, in any medium, provided that you conspicuously and
|
||||
appropriately publish on each copy an appropriate copyright notice;
|
||||
keep intact all notices stating that this License and any
|
||||
non-permissive terms added in accord with section 7 apply to the code;
|
||||
keep intact all notices of the absence of any warranty; and give all
|
||||
recipients a copy of this License along with the Program.
|
||||
|
||||
You may charge any price or no price for each copy that you convey,
|
||||
and you may offer support or warranty protection for a fee.
|
||||
|
||||
5. Conveying Modified Source Versions.
|
||||
|
||||
You may convey a work based on the Program, or the modifications to
|
||||
produce it from the Program, in the form of source code under the
|
||||
terms of section 4, provided that you also meet all of these conditions:
|
||||
|
||||
a) The work must carry prominent notices stating that you modified
|
||||
it, and giving a relevant date.
|
||||
|
||||
b) The work must carry prominent notices stating that it is
|
||||
released under this License and any conditions added under section
|
||||
7. This requirement modifies the requirement in section 4 to
|
||||
"keep intact all notices".
|
||||
|
||||
c) You must license the entire work, as a whole, under this
|
||||
License to anyone who comes into possession of a copy. This
|
||||
License will therefore apply, along with any applicable section 7
|
||||
additional terms, to the whole of the work, and all its parts,
|
||||
regardless of how they are packaged. This License gives no
|
||||
permission to license the work in any other way, but it does not
|
||||
invalidate such permission if you have separately received it.
|
||||
|
||||
d) If the work has interactive user interfaces, each must display
|
||||
Appropriate Legal Notices; however, if the Program has interactive
|
||||
interfaces that do not display Appropriate Legal Notices, your
|
||||
work need not make them do so.
|
||||
|
||||
A compilation of a covered work with other separate and independent
|
||||
works, which are not by their nature extensions of the covered work,
|
||||
and which are not combined with it such as to form a larger program,
|
||||
in or on a volume of a storage or distribution medium, is called an
|
||||
"aggregate" if the compilation and its resulting copyright are not
|
||||
used to limit the access or legal rights of the compilation's users
|
||||
beyond what the individual works permit. Inclusion of a covered work
|
||||
in an aggregate does not cause this License to apply to the other
|
||||
parts of the aggregate.
|
||||
|
||||
6. Conveying Non-Source Forms.
|
||||
|
||||
You may convey a covered work in object code form under the terms
|
||||
of sections 4 and 5, provided that you also convey the
|
||||
machine-readable Corresponding Source under the terms of this License,
|
||||
in one of these ways:
|
||||
|
||||
a) Convey the object code in, or embodied in, a physical product
|
||||
(including a physical distribution medium), accompanied by the
|
||||
Corresponding Source fixed on a durable physical medium
|
||||
customarily used for software interchange.
|
||||
|
||||
b) Convey the object code in, or embodied in, a physical product
|
||||
(including a physical distribution medium), accompanied by a
|
||||
written offer, valid for at least three years and valid for as
|
||||
long as you offer spare parts or customer support for that product
|
||||
model, to give anyone who possesses the object code either (1) a
|
||||
copy of the Corresponding Source for all the software in the
|
||||
product that is covered by this License, on a durable physical
|
||||
medium customarily used for software interchange, for a price no
|
||||
more than your reasonable cost of physically performing this
|
||||
conveying of source, or (2) access to copy the
|
||||
Corresponding Source from a network server at no charge.
|
||||
|
||||
c) Convey individual copies of the object code with a copy of the
|
||||
written offer to provide the Corresponding Source. This
|
||||
alternative is allowed only occasionally and noncommercially, and
|
||||
only if you received the object code with such an offer, in accord
|
||||
with subsection 6b.
|
||||
|
||||
d) Convey the object code by offering access from a designated
|
||||
place (gratis or for a charge), and offer equivalent access to the
|
||||
Corresponding Source in the same way through the same place at no
|
||||
further charge. You need not require recipients to copy the
|
||||
Corresponding Source along with the object code. If the place to
|
||||
copy the object code is a network server, the Corresponding Source
|
||||
may be on a different server (operated by you or a third party)
|
||||
that supports equivalent copying facilities, provided you maintain
|
||||
clear directions next to the object code saying where to find the
|
||||
Corresponding Source. Regardless of what server hosts the
|
||||
Corresponding Source, you remain obligated to ensure that it is
|
||||
available for as long as needed to satisfy these requirements.
|
||||
|
||||
e) Convey the object code using peer-to-peer transmission, provided
|
||||
you inform other peers where the object code and Corresponding
|
||||
Source of the work are being offered to the general public at no
|
||||
charge under subsection 6d.
|
||||
|
||||
A separable portion of the object code, whose source code is excluded
|
||||
from the Corresponding Source as a System Library, need not be
|
||||
included in conveying the object code work.
|
||||
|
||||
A "User Product" is either (1) a "consumer product", which means any
|
||||
tangible personal property which is normally used for personal, family,
|
||||
or household purposes, or (2) anything designed or sold for incorporation
|
||||
into a dwelling. In determining whether a product is a consumer product,
|
||||
doubtful cases shall be resolved in favor of coverage. For a particular
|
||||
product received by a particular user, "normally used" refers to a
|
||||
typical or common use of that class of product, regardless of the status
|
||||
of the particular user or of the way in which the particular user
|
||||
actually uses, or expects or is expected to use, the product. A product
|
||||
is a consumer product regardless of whether the product has substantial
|
||||
commercial, industrial or non-consumer uses, unless such uses represent
|
||||
the only significant mode of use of the product.
|
||||
|
||||
"Installation Information" for a User Product means any methods,
|
||||
procedures, authorization keys, or other information required to install
|
||||
and execute modified versions of a covered work in that User Product from
|
||||
a modified version of its Corresponding Source. The information must
|
||||
suffice to ensure that the continued functioning of the modified object
|
||||
code is in no case prevented or interfered with solely because
|
||||
modification has been made.
|
||||
|
||||
If you convey an object code work under this section in, or with, or
|
||||
specifically for use in, a User Product, and the conveying occurs as
|
||||
part of a transaction in which the right of possession and use of the
|
||||
User Product is transferred to the recipient in perpetuity or for a
|
||||
fixed term (regardless of how the transaction is characterized), the
|
||||
Corresponding Source conveyed under this section must be accompanied
|
||||
by the Installation Information. But this requirement does not apply
|
||||
if neither you nor any third party retains the ability to install
|
||||
modified object code on the User Product (for example, the work has
|
||||
been installed in ROM).
|
||||
|
||||
The requirement to provide Installation Information does not include a
|
||||
requirement to continue to provide support service, warranty, or updates
|
||||
for a work that has been modified or installed by the recipient, or for
|
||||
the User Product in which it has been modified or installed. Access to a
|
||||
network may be denied when the modification itself materially and
|
||||
adversely affects the operation of the network or violates the rules and
|
||||
protocols for communication across the network.
|
||||
|
||||
Corresponding Source conveyed, and Installation Information provided,
|
||||
in accord with this section must be in a format that is publicly
|
||||
documented (and with an implementation available to the public in
|
||||
source code form), and must require no special password or key for
|
||||
unpacking, reading or copying.
|
||||
|
||||
7. Additional Terms.
|
||||
|
||||
"Additional permissions" are terms that supplement the terms of this
|
||||
License by making exceptions from one or more of its conditions.
|
||||
Additional permissions that are applicable to the entire Program shall
|
||||
be treated as though they were included in this License, to the extent
|
||||
that they are valid under applicable law. If additional permissions
|
||||
apply only to part of the Program, that part may be used separately
|
||||
under those permissions, but the entire Program remains governed by
|
||||
this License without regard to the additional permissions.
|
||||
|
||||
When you convey a copy of a covered work, you may at your option
|
||||
remove any additional permissions from that copy, or from any part of
|
||||
it. (Additional permissions may be written to require their own
|
||||
removal in certain cases when you modify the work.) You may place
|
||||
additional permissions on material, added by you to a covered work,
|
||||
for which you have or can give appropriate copyright permission.
|
||||
|
||||
Notwithstanding any other provision of this License, for material you
|
||||
add to a covered work, you may (if authorized by the copyright holders of
|
||||
that material) supplement the terms of this License with terms:
|
||||
|
||||
a) Disclaiming warranty or limiting liability differently from the
|
||||
terms of sections 15 and 16 of this License; or
|
||||
|
||||
b) Requiring preservation of specified reasonable legal notices or
|
||||
author attributions in that material or in the Appropriate Legal
|
||||
Notices displayed by works containing it; or
|
||||
|
||||
c) Prohibiting misrepresentation of the origin of that material, or
|
||||
requiring that modified versions of such material be marked in
|
||||
reasonable ways as different from the original version; or
|
||||
|
||||
d) Limiting the use for publicity purposes of names of licensors or
|
||||
authors of the material; or
|
||||
|
||||
e) Declining to grant rights under trademark law for use of some
|
||||
trade names, trademarks, or service marks; or
|
||||
|
||||
f) Requiring indemnification of licensors and authors of that
|
||||
material by anyone who conveys the material (or modified versions of
|
||||
it) with contractual assumptions of liability to the recipient, for
|
||||
any liability that these contractual assumptions directly impose on
|
||||
those licensors and authors.
|
||||
|
||||
All other non-permissive additional terms are considered "further
|
||||
restrictions" within the meaning of section 10. If the Program as you
|
||||
received it, or any part of it, contains a notice stating that it is
|
||||
governed by this License along with a term that is a further
|
||||
restriction, you may remove that term. If a license document contains
|
||||
a further restriction but permits relicensing or conveying under this
|
||||
License, you may add to a covered work material governed by the terms
|
||||
of that license document, provided that the further restriction does
|
||||
not survive such relicensing or conveying.
|
||||
|
||||
If you add terms to a covered work in accord with this section, you
|
||||
must place, in the relevant source files, a statement of the
|
||||
additional terms that apply to those files, or a notice indicating
|
||||
where to find the applicable terms.
|
||||
|
||||
Additional terms, permissive or non-permissive, may be stated in the
|
||||
form of a separately written license, or stated as exceptions;
|
||||
the above requirements apply either way.
|
||||
|
||||
8. Termination.
|
||||
|
||||
You may not propagate or modify a covered work except as expressly
|
||||
provided under this License. Any attempt otherwise to propagate or
|
||||
modify it is void, and will automatically terminate your rights under
|
||||
this License (including any patent licenses granted under the third
|
||||
paragraph of section 11).
|
||||
|
||||
However, if you cease all violation of this License, then your
|
||||
license from a particular copyright holder is reinstated (a)
|
||||
provisionally, unless and until the copyright holder explicitly and
|
||||
finally terminates your license, and (b) permanently, if the copyright
|
||||
holder fails to notify you of the violation by some reasonable means
|
||||
prior to 60 days after the cessation.
|
||||
|
||||
Moreover, your license from a particular copyright holder is
|
||||
reinstated permanently if the copyright holder notifies you of the
|
||||
violation by some reasonable means, this is the first time you have
|
||||
received notice of violation of this License (for any work) from that
|
||||
copyright holder, and you cure the violation prior to 30 days after
|
||||
your receipt of the notice.
|
||||
|
||||
Termination of your rights under this section does not terminate the
|
||||
licenses of parties who have received copies or rights from you under
|
||||
this License. If your rights have been terminated and not permanently
|
||||
reinstated, you do not qualify to receive new licenses for the same
|
||||
material under section 10.
|
||||
|
||||
9. Acceptance Not Required for Having Copies.
|
||||
|
||||
You are not required to accept this License in order to receive or
|
||||
run a copy of the Program. Ancillary propagation of a covered work
|
||||
occurring solely as a consequence of using peer-to-peer transmission
|
||||
to receive a copy likewise does not require acceptance. However,
|
||||
nothing other than this License grants you permission to propagate or
|
||||
modify any covered work. These actions infringe copyright if you do
|
||||
not accept this License. Therefore, by modifying or propagating a
|
||||
covered work, you indicate your acceptance of this License to do so.
|
||||
|
||||
10. Automatic Licensing of Downstream Recipients.
|
||||
|
||||
Each time you convey a covered work, the recipient automatically
|
||||
receives a license from the original licensors, to run, modify and
|
||||
propagate that work, subject to this License. You are not responsible
|
||||
for enforcing compliance by third parties with this License.
|
||||
|
||||
An "entity transaction" is a transaction transferring control of an
|
||||
organization, or substantially all assets of one, or subdividing an
|
||||
organization, or merging organizations. If propagation of a covered
|
||||
work results from an entity transaction, each party to that
|
||||
transaction who receives a copy of the work also receives whatever
|
||||
licenses to the work the party's predecessor in interest had or could
|
||||
give under the previous paragraph, plus a right to possession of the
|
||||
Corresponding Source of the work from the predecessor in interest, if
|
||||
the predecessor has it or can get it with reasonable efforts.
|
||||
|
||||
You may not impose any further restrictions on the exercise of the
|
||||
rights granted or affirmed under this License. For example, you may
|
||||
not impose a license fee, royalty, or other charge for exercise of
|
||||
rights granted under this License, and you may not initiate litigation
|
||||
(including a cross-claim or counterclaim in a lawsuit) alleging that
|
||||
any patent claim is infringed by making, using, selling, offering for
|
||||
sale, or importing the Program or any portion of it.
|
||||
|
||||
11. Patents.
|
||||
|
||||
A "contributor" is a copyright holder who authorizes use under this
|
||||
License of the Program or a work on which the Program is based. The
|
||||
work thus licensed is called the contributor's "contributor version".
|
||||
|
||||
A contributor's "essential patent claims" are all patent claims
|
||||
owned or controlled by the contributor, whether already acquired or
|
||||
hereafter acquired, that would be infringed by some manner, permitted
|
||||
by this License, of making, using, or selling its contributor version,
|
||||
but do not include claims that would be infringed only as a
|
||||
consequence of further modification of the contributor version. For
|
||||
purposes of this definition, "control" includes the right to grant
|
||||
patent sublicenses in a manner consistent with the requirements of
|
||||
this License.
|
||||
|
||||
Each contributor grants you a non-exclusive, worldwide, royalty-free
|
||||
patent license under the contributor's essential patent claims, to
|
||||
make, use, sell, offer for sale, import and otherwise run, modify and
|
||||
propagate the contents of its contributor version.
|
||||
|
||||
In the following three paragraphs, a "patent license" is any express
|
||||
agreement or commitment, however denominated, not to enforce a patent
|
||||
(such as an express permission to practice a patent or covenant not to
|
||||
sue for patent infringement). To "grant" such a patent license to a
|
||||
party means to make such an agreement or commitment not to enforce a
|
||||
patent against the party.
|
||||
|
||||
If you convey a covered work, knowingly relying on a patent license,
|
||||
and the Corresponding Source of the work is not available for anyone
|
||||
to copy, free of charge and under the terms of this License, through a
|
||||
publicly available network server or other readily accessible means,
|
||||
then you must either (1) cause the Corresponding Source to be so
|
||||
available, or (2) arrange to deprive yourself of the benefit of the
|
||||
patent license for this particular work, or (3) arrange, in a manner
|
||||
consistent with the requirements of this License, to extend the patent
|
||||
license to downstream recipients. "Knowingly relying" means you have
|
||||
actual knowledge that, but for the patent license, your conveying the
|
||||
covered work in a country, or your recipient's use of the covered work
|
||||
in a country, would infringe one or more identifiable patents in that
|
||||
country that you have reason to believe are valid.
|
||||
|
||||
If, pursuant to or in connection with a single transaction or
|
||||
arrangement, you convey, or propagate by procuring conveyance of, a
|
||||
covered work, and grant a patent license to some of the parties
|
||||
receiving the covered work authorizing them to use, propagate, modify
|
||||
or convey a specific copy of the covered work, then the patent license
|
||||
you grant is automatically extended to all recipients of the covered
|
||||
work and works based on it.
|
||||
|
||||
A patent license is "discriminatory" if it does not include within
|
||||
the scope of its coverage, prohibits the exercise of, or is
|
||||
conditioned on the non-exercise of one or more of the rights that are
|
||||
specifically granted under this License. You may not convey a covered
|
||||
work if you are a party to an arrangement with a third party that is
|
||||
in the business of distributing software, under which you make payment
|
||||
to the third party based on the extent of your activity of conveying
|
||||
the work, and under which the third party grants, to any of the
|
||||
parties who would receive the covered work from you, a discriminatory
|
||||
patent license (a) in connection with copies of the covered work
|
||||
conveyed by you (or copies made from those copies), or (b) primarily
|
||||
for and in connection with specific products or compilations that
|
||||
contain the covered work, unless you entered into that arrangement,
|
||||
or that patent license was granted, prior to 28 March 2007.
|
||||
|
||||
Nothing in this License shall be construed as excluding or limiting
|
||||
any implied license or other defenses to infringement that may
|
||||
otherwise be available to you under applicable patent law.
|
||||
|
||||
12. No Surrender of Others' Freedom.
|
||||
|
||||
If conditions are imposed on you (whether by court order, agreement or
|
||||
otherwise) that contradict the conditions of this License, they do not
|
||||
excuse you from the conditions of this License. If you cannot convey a
|
||||
covered work so as to satisfy simultaneously your obligations under this
|
||||
License and any other pertinent obligations, then as a consequence you may
|
||||
not convey it at all. For example, if you agree to terms that obligate you
|
||||
to collect a royalty for further conveying from those to whom you convey
|
||||
the Program, the only way you could satisfy both those terms and this
|
||||
License would be to refrain entirely from conveying the Program.
|
||||
|
||||
13. Remote Network Interaction; Use with the GNU General Public License.
|
||||
|
||||
Notwithstanding any other provision of this License, if you modify the
|
||||
Program, your modified version must prominently offer all users
|
||||
interacting with it remotely through a computer network (if your version
|
||||
supports such interaction) an opportunity to receive the Corresponding
|
||||
Source of your version by providing access to the Corresponding Source
|
||||
from a network server at no charge, through some standard or customary
|
||||
means of facilitating copying of software. This Corresponding Source
|
||||
shall include the Corresponding Source for any work covered by version 3
|
||||
of the GNU General Public License that is incorporated pursuant to the
|
||||
following paragraph.
|
||||
|
||||
Notwithstanding any other provision of this License, you have
|
||||
permission to link or combine any covered work with a work licensed
|
||||
under version 3 of the GNU General Public License into a single
|
||||
combined work, and to convey the resulting work. The terms of this
|
||||
License will continue to apply to the part which is the covered work,
|
||||
but the work with which it is combined will remain governed by version
|
||||
3 of the GNU General Public License.
|
||||
|
||||
14. Revised Versions of this License.
|
||||
|
||||
The Free Software Foundation may publish revised and/or new versions of
|
||||
the GNU Affero General Public License from time to time. Such new versions
|
||||
will be similar in spirit to the present version, but may differ in detail to
|
||||
address new problems or concerns.
|
||||
|
||||
Each version is given a distinguishing version number. If the
|
||||
Program specifies that a certain numbered version of the GNU Affero General
|
||||
Public License "or any later version" applies to it, you have the
|
||||
option of following the terms and conditions either of that numbered
|
||||
version or of any later version published by the Free Software
|
||||
Foundation. If the Program does not specify a version number of the
|
||||
GNU Affero General Public License, you may choose any version ever published
|
||||
by the Free Software Foundation.
|
||||
|
||||
If the Program specifies that a proxy can decide which future
|
||||
versions of the GNU Affero General Public License can be used, that proxy's
|
||||
public statement of acceptance of a version permanently authorizes you
|
||||
to choose that version for the Program.
|
||||
|
||||
Later license versions may give you additional or different
|
||||
permissions. However, no additional obligations are imposed on any
|
||||
author or copyright holder as a result of your choosing to follow a
|
||||
later version.
|
||||
|
||||
15. Disclaimer of Warranty.
|
||||
|
||||
THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
|
||||
APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
|
||||
HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
|
||||
OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
|
||||
THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
|
||||
PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
|
||||
IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
|
||||
ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
|
||||
|
||||
16. Limitation of Liability.
|
||||
|
||||
IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
|
||||
WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
|
||||
THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
|
||||
GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
|
||||
USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
|
||||
DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
|
||||
PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
|
||||
EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
|
||||
SUCH DAMAGES.
|
||||
|
||||
17. Interpretation of Sections 15 and 16.
|
||||
|
||||
If the disclaimer of warranty and limitation of liability provided
|
||||
above cannot be given local legal effect according to their terms,
|
||||
reviewing courts shall apply local law that most closely approximates
|
||||
an absolute waiver of all civil liability in connection with the
|
||||
Program, unless a warranty or assumption of liability accompanies a
|
||||
copy of the Program in return for a fee.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
How to Apply These Terms to Your New Programs
|
||||
|
||||
If you develop a new program, and you want it to be of the greatest
|
||||
possible use to the public, the best way to achieve this is to make it
|
||||
free software which everyone can redistribute and change under these terms.
|
||||
|
||||
To do so, attach the following notices to the program. It is safest
|
||||
to attach them to the start of each source file to most effectively
|
||||
state the exclusion of warranty; and each file should have at least
|
||||
the "copyright" line and a pointer to where the full notice is found.
|
||||
|
||||
<one line to give the program's name and a brief idea of what it does.>
|
||||
Copyright (C) <year> <name of author>
|
||||
|
||||
This program is free software: you can redistribute it and/or modify
|
||||
it under the terms of the GNU Affero General Public License as published by
|
||||
the Free Software Foundation, either version 3 of the License, or
|
||||
(at your option) any later version.
|
||||
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU Affero General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU Affero General Public License
|
||||
along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||
|
||||
Also add information on how to contact you by electronic and paper mail.
|
||||
|
||||
If your software can interact with users remotely through a computer
|
||||
network, you should also make sure that it provides a way for users to
|
||||
get its source. For example, if your program is a web application, its
|
||||
interface could display a "Source" link that leads users to an archive
|
||||
of the code. There are many ways you could offer source, and different
|
||||
solutions will be better for different programs; see section 13 for the
|
||||
specific requirements.
|
||||
|
||||
You should also get your employer (if you work as a programmer) or school,
|
||||
if any, to sign a "copyright disclaimer" for the program, if necessary.
|
||||
For more information on this, and how to apply and follow the GNU AGPL, see
|
||||
<https://www.gnu.org/licenses/>.
|
||||
199
LICENSE-APACHE
Normal file
199
LICENSE-APACHE
Normal file
@@ -0,0 +1,199 @@
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to the Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by the Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding any notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. Please also get an
|
||||
OpenPGP-compatible signature on any file you distribute.
|
||||
|
||||
Copyright [yyyy] [name of copyright owner]
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
21
LICENSE-MIT
Normal file
21
LICENSE-MIT
Normal file
@@ -0,0 +1,21 @@
|
||||
MIT License
|
||||
|
||||
Copyright (c) quicproquo contributors
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
291
README.md
291
README.md
@@ -1,116 +1,219 @@
|
||||
# quicnprotochat
|
||||
<p align="center">
|
||||
<img src="assets/logo.png" alt="quicprochat" width="160">
|
||||
</p>
|
||||
|
||||
> End-to-end encrypted group messaging over **QUIC + TLS 1.3 + MLS** (RFC 9420), written in Rust.
|
||||
<h1 align="center">quicprochat</h1>
|
||||
|
||||
Every byte on the wire is protected by a QUIC transport secured with TLS 1.3
|
||||
(`quinn` + `rustls`). The inner **MLS** layer provides post-compromise security
|
||||
and ratcheted group key agreement across any number of participants. Messages
|
||||
are framed with **Cap'n Proto**, keeping serialisation zero-copy and
|
||||
schema-versioned.
|
||||
<p align="center">
|
||||
<strong>End-to-end encrypted group messaging over QUIC, powered by MLS and post-quantum cryptography.</strong>
|
||||
</p>
|
||||
|
||||
<p align="center">
|
||||
<a href="docs/src/design-rationale/messenger-comparison.md">Why quicprochat?</a> ·
|
||||
<a href="ROADMAP.md">Roadmap</a> ·
|
||||
<a href="docs/sdk/index.md">SDK Docs</a> ·
|
||||
<a href="docs/operations/monitoring.md">Operations</a> ·
|
||||
<a href="#quick-start">Quick Start</a>
|
||||
</p>
|
||||
|
||||
---
|
||||
|
||||
quicprochat is a production-grade messenger where the server **never sees plaintext**. All traffic flows over QUIC/TLS 1.3, group keys are negotiated with the [MLS protocol](https://www.rfc-editor.org/rfc/rfc9420) (RFC 9420), and a hybrid X25519 + ML-KEM-768 KEM provides post-quantum confidentiality. Written in Rust. 45,000 lines of code. 301 tests.
|
||||
|
||||
```
|
||||
┌─────────────────────────────────────────────┐
|
||||
│ Application / MLS ciphertext │ <- group key ratchet (RFC 9420)
|
||||
├─────────────────────────────────────────────┤
|
||||
│ Cap'n Proto RPC │ <- typed, schema-versioned framing
|
||||
├─────────────────────────────────────────────┤
|
||||
│ QUIC + TLS 1.3 (quinn/rustls) │ <- mutual auth + transport secrecy
|
||||
└─────────────────────────────────────────────┘
|
||||
┌─────────────────────────────────────────────────┐
|
||||
│ Application / MLS ciphertext │ ← group key ratchet (RFC 9420)
|
||||
├─────────────────────────────────────────────────┤
|
||||
│ Protobuf RPC / Cap'n Proto (legacy) │ ← typed, schema-versioned framing
|
||||
├─────────────────────────────────────────────────┤
|
||||
│ QUIC + TLS 1.3 (quinn/rustls) │ ← mutual auth + transport secrecy
|
||||
└─────────────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
## Highlights
|
||||
|
||||
| | |
|
||||
|---|---|
|
||||
| **Zero-knowledge server** | Routes opaque MLS ciphertexts by recipient key — never decrypts |
|
||||
| **Post-quantum ready** | Hybrid X25519 + ML-KEM-768 KEM on both MLS and Noise layers |
|
||||
| **Password auth** | OPAQUE — password never leaves the client, not even as a hash |
|
||||
| **Forward secrecy** | MLS epoch ratchet: compromise today can't decrypt yesterday |
|
||||
| **Multi-device** | Per-device keys, delivery fan-out, up to 5 devices per account |
|
||||
| **Federation** | Server-to-server relay over QUIC with mTLS |
|
||||
| **Offline-first** | Client-side outbox with idempotent retry and gap detection |
|
||||
| **Sealed sender** | Optional anonymous enqueue — server can't see who sent a message |
|
||||
| **7 SDKs** | Rust, Go, Python, TypeScript, Swift, Kotlin/Java, Ruby |
|
||||
|
||||
## Quick Start
|
||||
|
||||
```bash
|
||||
# Build (no system dependencies — protoc is vendored)
|
||||
cargo build --workspace
|
||||
|
||||
# Run tests
|
||||
cargo test --workspace
|
||||
|
||||
# Start the server (auto-generates self-signed TLS cert)
|
||||
cargo run --bin qpc-server -- --allow-insecure-auth
|
||||
|
||||
# Interactive REPL (registers + logs in automatically)
|
||||
cargo run --bin qpc -- repl --username alice --password secret
|
||||
```
|
||||
|
||||
**Two-terminal demo:**
|
||||
|
||||
```bash
|
||||
# Terminal 1 # Terminal 2
|
||||
qpc repl -u alice -p secretA qpc repl -u bob -p secretB
|
||||
|
||||
# Alice: # Bob sees:
|
||||
/dm bob [alice] Hello, Bob!
|
||||
Hello, Bob!
|
||||
```
|
||||
|
||||
## Architecture
|
||||
|
||||
```
|
||||
quicprochat/
|
||||
├── crates/
|
||||
│ ├── quicprochat-core # MLS, hybrid KEM, PQ Noise, OPAQUE, recovery, padding
|
||||
│ ├── quicprochat-proto # Protobuf (prost) + Cap'n Proto generated types
|
||||
│ ├── quicprochat-rpc # QUIC RPC framework (framing, dispatch, middleware)
|
||||
│ ├── quicprochat-sdk # Client SDK (QpqClient, conversation store, outbox)
|
||||
│ ├── quicprochat-server # QUIC server, 33 RPC methods, domain services, plugins
|
||||
│ ├── quicprochat-client # CLI + REPL + TUI (Ratatui)
|
||||
│ ├── quicprochat-kt # Key transparency (Merkle-log, revocation)
|
||||
│ ├── quicprochat-p2p # iroh P2P, mesh identity, store-and-forward
|
||||
│ ├── quicprochat-ffi # C FFI (libquicprochat_ffi.so)
|
||||
│ └── quicprochat-plugin-api # Dynamic plugin hooks (C ABI)
|
||||
├── proto/qpc/v1/ # 15 .proto schema files
|
||||
├── sdks/ # Go, Python, TypeScript, Swift, Kotlin, Java, Ruby
|
||||
├── docs/ # mdBook docs, SDK guides, operational runbooks
|
||||
└── packaging/ # OpenWrt, Docker, cross-compilation
|
||||
```
|
||||
|
||||
### Security Properties
|
||||
|
||||
| Property | Mechanism |
|
||||
|---|---|
|
||||
| Transport confidentiality | TLS 1.3 over QUIC (rustls) |
|
||||
| Transport authentication | TLS 1.3 server cert (self-signed by default) |
|
||||
| Group key agreement | MLS `MLS_128_DHKEMX25519_AES128GCM_SHA256_Ed25519` |
|
||||
| Post-compromise security | MLS epoch ratchet |
|
||||
| Identity | Ed25519 (MLS credential + leaf node signature) |
|
||||
| Message framing | Cap'n Proto (unpacked wire format) |
|
||||
| Post-quantum confidentiality | X25519 + ML-KEM-768 hybrid KEM (MLS + Noise layers) |
|
||||
| Forward secrecy | MLS epoch ratchet + per-epoch key schedule |
|
||||
| Post-compromise security | MLS Update proposals rotate leaf material |
|
||||
| Identity | Ed25519 long-term keys (MLS credential + leaf signature) |
|
||||
| Password authentication | OPAQUE-ke (augmented PAKE, no password on wire) |
|
||||
| Local storage | SQLCipher + Argon2id + ChaCha20-Poly1305 |
|
||||
| Key transparency | Append-only Merkle log with inclusion proofs + revocation |
|
||||
| Traffic analysis resistance | Uniform padding + configurable decoy traffic |
|
||||
|
||||
---
|
||||
## Features
|
||||
|
||||
### Messaging
|
||||
- **1:1 DMs** and **N-party groups** with full MLS lifecycle (add, remove, key rotation)
|
||||
- **Rich messaging** — reactions, read receipts, typing indicators, edit, delete
|
||||
- **File transfer** — chunked upload/download, SHA-256 content addressing, 50 MB limit
|
||||
- **Disappearing messages** — per-conversation TTL with server-side GC
|
||||
- **Offline queue** — messages queued locally when disconnected, flushed on reconnect
|
||||
- **Delivery proofs** — server-signed Ed25519 receipts for cryptographic send confirmation
|
||||
- **Transcript export** — encrypted, tamper-evident archives with Merkle chain verification
|
||||
|
||||
### Infrastructure
|
||||
- **Multi-device** — per-device keys and delivery fan-out (up to 5 devices)
|
||||
- **Account recovery** — 8 recovery codes, encrypted bundles, zero-knowledge server
|
||||
- **Federation** — server-to-server relay with mTLS and cross-server user resolution
|
||||
- **Abuse prevention** — user blocking, message reporting, ban enforcement, admin tools
|
||||
- **Graceful shutdown** — SIGTERM drain with configurable timeout, health endpoint awareness
|
||||
- **Rate limiting** — sliding window algorithm, trait-based for Redis swap
|
||||
- **Observability** — request correlation IDs, per-endpoint latency histograms, structured audit log
|
||||
- **Dynamic plugins** — load `.so`/`.dylib` at runtime via `--plugin-dir` (6 hook points)
|
||||
- **Mesh networking** — iroh P2P, mDNS discovery, store-and-forward, broadcast channels
|
||||
|
||||
### Client SDKs
|
||||
|
||||
| Language | Location | Transport | Notes |
|
||||
|---|---|---|---|
|
||||
| **Rust** | `crates/quicprochat-sdk` | QUIC (quinn) | Reference implementation |
|
||||
| **Go** | `sdks/go/` | QUIC (quic-go) | Cap'n Proto RPC, full API |
|
||||
| **Python** | `sdks/python/` | QUIC (aioquic) + FFI | Async client, PyPI-ready |
|
||||
| **TypeScript** | `sdks/typescript/` | WebSocket + WASM crypto | 175 KB WASM bundle, browser demo |
|
||||
| **Swift** | `sdks/swift/` | C FFI wrapper | iOS 15+ / macOS 13+ |
|
||||
| **Kotlin/Java** | `sdks/kotlin/`, `sdks/java/` | JNI + C FFI | Android + JVM |
|
||||
| **Ruby** | `sdks/ruby/` | C FFI gem | Block-form auto-disconnect |
|
||||
|
||||
### REPL Commands
|
||||
|
||||
40+ slash commands including:
|
||||
|
||||
```
|
||||
/dm <user> Start a 1:1 DM
|
||||
/create-group <name> Create a group
|
||||
/invite <user> Add member to group
|
||||
/remove <user> Remove member
|
||||
/send-file <path> Upload and send a file
|
||||
/verify <user> Compare safety numbers
|
||||
/rotate-keys Rotate MLS key material
|
||||
/disappear <duration> Set message TTL
|
||||
/export <path> Export encrypted transcript
|
||||
/devices list|add|rm Manage devices
|
||||
/block <user> Block a user
|
||||
/recovery setup Generate recovery codes
|
||||
/help Full command reference
|
||||
```
|
||||
|
||||
## Deployment
|
||||
|
||||
### Docker
|
||||
|
||||
```bash
|
||||
docker build -t quicprochat -f docker/Dockerfile .
|
||||
docker run -p 7000:7000 -v qpc-data:/data quicprochat
|
||||
```
|
||||
|
||||
### Production (Docker Compose)
|
||||
|
||||
```bash
|
||||
# Includes server + Prometheus + Grafana with pre-configured dashboards
|
||||
docker compose -f docker-compose.prod.yml up -d
|
||||
```
|
||||
|
||||
### OpenWrt
|
||||
|
||||
Cross-compiled static binaries for mesh/embedded deployments:
|
||||
|
||||
```bash
|
||||
./scripts/cross-compile.sh # builds for x86_64, armv7, aarch64 (musl)
|
||||
```
|
||||
|
||||
See [docs/openwrt.md](docs/openwrt.md) for `opkg` packaging and `procd` init scripts.
|
||||
|
||||
### Configuration
|
||||
|
||||
```bash
|
||||
# Environment variables (see .env.example for full list)
|
||||
QPC_LISTEN=0.0.0.0:7000
|
||||
QPC_AUTH_TOKEN=your-strong-token
|
||||
QPC_DB_KEY=your-db-encryption-key
|
||||
QPC_STORE_BACKEND=sql
|
||||
QPC_METRICS_LISTEN=0.0.0.0:9090
|
||||
QPC_DRAIN_TIMEOUT=30
|
||||
QPC_RPC_TIMEOUT=30
|
||||
```
|
||||
|
||||
## Documentation
|
||||
|
||||
Full documentation is available as an **mdBook** wiki in [`docs/`](docs/):
|
||||
|
||||
```bash
|
||||
# Install mdBook (once)
|
||||
cargo install mdbook
|
||||
|
||||
# Build and serve locally
|
||||
mdbook serve docs
|
||||
# Open http://localhost:3000
|
||||
mdbook serve docs # http://localhost:3000
|
||||
```
|
||||
|
||||
### Highlights
|
||||
- [SDK Integration Guide](docs/sdk/index.md) — wire format, per-language guides, "build your own SDK"
|
||||
- [Operational Runbook](docs/operations/backup-restore.md) — backup/restore, key rotation, incident response
|
||||
- [Scaling Guide](docs/operations/scaling-guide.md) — resource sizing, horizontal scaling, capacity planning
|
||||
- [Monitoring](docs/operations/monitoring.md) — Prometheus metrics, Grafana dashboards, alert rules
|
||||
|
||||
- **[Architecture Overview](docs/src/architecture/overview.md)** — Two-service model, dual-key design, crate layout
|
||||
- **[Protocol Deep Dives](docs/src/protocol-layers/overview.md)** — QUIC/TLS, Noise_XX, Cap'n Proto, MLS, Hybrid KEM
|
||||
- **[Cryptographic Properties](docs/src/cryptography/overview.md)** — Forward secrecy, post-compromise security, PQ readiness, threat model
|
||||
- **[Design Rationale](docs/src/design-rationale/overview.md)** — Why MLS over Signal/Matrix, ADRs for all key decisions
|
||||
- **[Wire Format Reference](docs/src/wire-format/overview.md)** — Annotated Cap'n Proto schemas
|
||||
- **[Getting Started](docs/src/getting-started/prerequisites.md)** — Build, run, demo walkthrough
|
||||
- **[Roadmap](docs/src/roadmap/milestones.md)** — Milestones, production readiness, future research
|
||||
## Security Status
|
||||
|
||||
---
|
||||
|
||||
## Quick start
|
||||
|
||||
```bash
|
||||
# Prerequisites: Rust 1.77+, capnp CLI
|
||||
brew install capnp # macOS
|
||||
# apt-get install capnproto # Debian/Ubuntu
|
||||
|
||||
# Build and test
|
||||
cargo build --workspace
|
||||
cargo test --workspace
|
||||
|
||||
# Start the server (port 7000 by default)
|
||||
cargo run -p quicnprotochat-server
|
||||
|
||||
# Or via a config file (TOML)
|
||||
cat > quicnprotochat-server.toml <<'EOF'
|
||||
listen = "0.0.0.0:7000"
|
||||
data_dir = "data"
|
||||
tls_cert = "data/server-cert.der"
|
||||
tls_key = "data/server-key.der"
|
||||
auth_token = "devtoken"
|
||||
store_backend = "file" # or "sql"
|
||||
db_path = "data/quicnprotochat.db"
|
||||
db_key = ""
|
||||
EOF
|
||||
cargo run -p quicnprotochat-server -- --config quicnprotochat-server.toml
|
||||
|
||||
# Run the Alice/Bob demo
|
||||
cargo run -p quicnprotochat-client -- demo-group \
|
||||
--server 127.0.0.1:7000 --ds-server 127.0.0.1:7000
|
||||
```
|
||||
|
||||
See the [full demo walkthrough](docs/src/getting-started/demo-walkthrough.md) for a step-by-step guide.
|
||||
|
||||
---
|
||||
|
||||
## Milestones
|
||||
|
||||
| # | Name | Status | What it adds |
|
||||
|---|------|--------|--------------|
|
||||
| M1 | QUIC/TLS transport | Done | QUIC + TLS 1.3 endpoint, length-prefixed framing, Ping/Pong |
|
||||
| M2 | Authentication Service | Done | Ed25519 identity, KeyPackage generation, AS upload/fetch |
|
||||
| M3 | Delivery Service + MLS groups | Done | DS relay, `GroupMember` create/join/add/send/recv |
|
||||
| M4 | Group CLI subcommands | Next | Persistent CLI (`create-group`, `invite`, `join`, `send`, `recv`) |
|
||||
| M5 | Multi-party groups | Planned | N > 2 members, Commit fan-out, Proposal handling |
|
||||
| M6 | Persistence | Planned | SQLite key store, durable group state |
|
||||
| M7 | Post-quantum | Planned | PQ hybrid for MLS/HPKE (X25519 + ML-KEM-768) |
|
||||
|
||||
---
|
||||
|
||||
## Security notes
|
||||
|
||||
This is a **proof-of-concept research project**. It has not been audited.
|
||||
See the [threat model](docs/src/cryptography/threat-model.md) for a detailed
|
||||
analysis of what is and isn't protected.
|
||||
|
||||
---
|
||||
> **This software has not undergone an independent security audit.** While it implements cryptographic best practices (MLS RFC 9420, OPAQUE, zeroization, constant-time comparisons), no third-party firm has reviewed the implementation. Do not rely on it for high-risk communications until an audit is completed. See [SECURITY.md](SECURITY.md) for our vulnerability disclosure policy.
|
||||
|
||||
## License
|
||||
|
||||
|
||||
891
ROADMAP.html
Normal file
891
ROADMAP.html
Normal file
@@ -0,0 +1,891 @@
|
||||
<!DOCTYPE HTML>
|
||||
<html lang="en" class="navy sidebar-visible" dir="ltr">
|
||||
<head>
|
||||
<!-- Book generated using mdBook -->
|
||||
<meta charset="UTF-8">
|
||||
<title>Full Roadmap (Phases 1-8) - quicproquo</title>
|
||||
|
||||
|
||||
<!-- Custom HTML head -->
|
||||
|
||||
<meta name="description" content="End-to-end encrypted group messaging over QUIC + TLS 1.3 + MLS (RFC 9420)">
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1">
|
||||
<meta name="theme-color" content="#ffffff">
|
||||
|
||||
<link rel="icon" href="favicon-de23e50b.svg">
|
||||
<link rel="shortcut icon" href="favicon-8114d1fc.png">
|
||||
<link rel="stylesheet" href="css/variables-8adf115d.css">
|
||||
<link rel="stylesheet" href="css/general-2459343d.css">
|
||||
<link rel="stylesheet" href="css/chrome-ae938929.css">
|
||||
<link rel="stylesheet" href="css/print-9e4910d8.css" media="print">
|
||||
|
||||
<!-- Fonts -->
|
||||
<link rel="stylesheet" href="fonts/fonts-9644e21d.css">
|
||||
|
||||
<!-- Highlight.js Stylesheets -->
|
||||
<link rel="stylesheet" id="mdbook-highlight-css" href="highlight-493f70e1.css">
|
||||
<link rel="stylesheet" id="mdbook-tomorrow-night-css" href="tomorrow-night-4c0ae647.css">
|
||||
<link rel="stylesheet" id="mdbook-ayu-highlight-css" href="ayu-highlight-3fdfc3ac.css">
|
||||
|
||||
<!-- Custom theme stylesheets -->
|
||||
|
||||
|
||||
<!-- Provide site root and default themes to javascript -->
|
||||
<script>
|
||||
const path_to_root = "";
|
||||
const default_light_theme = "navy";
|
||||
const default_dark_theme = "navy";
|
||||
window.path_to_searchindex_js = "searchindex-1e4ee6e2.js";
|
||||
</script>
|
||||
<!-- Start loading toc.js asap -->
|
||||
<script src="toc-69b0eb95.js"></script>
|
||||
</head>
|
||||
<body>
|
||||
<div id="mdbook-help-container">
|
||||
<div id="mdbook-help-popup">
|
||||
<h2 class="mdbook-help-title">Keyboard shortcuts</h2>
|
||||
<div>
|
||||
<p>Press <kbd>←</kbd> or <kbd>→</kbd> to navigate between chapters</p>
|
||||
<p>Press <kbd>S</kbd> or <kbd>/</kbd> to search in the book</p>
|
||||
<p>Press <kbd>?</kbd> to show this help</p>
|
||||
<p>Press <kbd>Esc</kbd> to hide this help</p>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
<div id="mdbook-body-container">
|
||||
<!-- Work around some values being stored in localStorage wrapped in quotes -->
|
||||
<script>
|
||||
try {
|
||||
let theme = localStorage.getItem('mdbook-theme');
|
||||
let sidebar = localStorage.getItem('mdbook-sidebar');
|
||||
|
||||
if (theme.startsWith('"') && theme.endsWith('"')) {
|
||||
localStorage.setItem('mdbook-theme', theme.slice(1, theme.length - 1));
|
||||
}
|
||||
|
||||
if (sidebar.startsWith('"') && sidebar.endsWith('"')) {
|
||||
localStorage.setItem('mdbook-sidebar', sidebar.slice(1, sidebar.length - 1));
|
||||
}
|
||||
} catch (e) { }
|
||||
</script>
|
||||
|
||||
<!-- Set the theme before any content is loaded, prevents flash -->
|
||||
<script>
|
||||
const default_theme = window.matchMedia("(prefers-color-scheme: dark)").matches ? default_dark_theme : default_light_theme;
|
||||
let theme;
|
||||
try { theme = localStorage.getItem('mdbook-theme'); } catch(e) { }
|
||||
if (theme === null || theme === undefined) { theme = default_theme; }
|
||||
const html = document.documentElement;
|
||||
html.classList.remove('navy')
|
||||
html.classList.add(theme);
|
||||
html.classList.add("js");
|
||||
</script>
|
||||
|
||||
<input type="checkbox" id="mdbook-sidebar-toggle-anchor" class="hidden">
|
||||
|
||||
<!-- Hide / unhide sidebar before it is displayed -->
|
||||
<script>
|
||||
let sidebar = null;
|
||||
const sidebar_toggle = document.getElementById("mdbook-sidebar-toggle-anchor");
|
||||
if (document.body.clientWidth >= 1080) {
|
||||
try { sidebar = localStorage.getItem('mdbook-sidebar'); } catch(e) { }
|
||||
sidebar = sidebar || 'visible';
|
||||
} else {
|
||||
sidebar = 'hidden';
|
||||
sidebar_toggle.checked = false;
|
||||
}
|
||||
if (sidebar === 'visible') {
|
||||
sidebar_toggle.checked = true;
|
||||
} else {
|
||||
html.classList.remove('sidebar-visible');
|
||||
}
|
||||
</script>
|
||||
|
||||
<nav id="mdbook-sidebar" class="sidebar" aria-label="Table of contents">
|
||||
<!-- populated by js -->
|
||||
<mdbook-sidebar-scrollbox class="sidebar-scrollbox"></mdbook-sidebar-scrollbox>
|
||||
<noscript>
|
||||
<iframe class="sidebar-iframe-outer" src="toc.html"></iframe>
|
||||
</noscript>
|
||||
<div id="mdbook-sidebar-resize-handle" class="sidebar-resize-handle">
|
||||
<div class="sidebar-resize-indicator"></div>
|
||||
</div>
|
||||
</nav>
|
||||
|
||||
<div id="mdbook-page-wrapper" class="page-wrapper">
|
||||
|
||||
<div class="page">
|
||||
<div id="mdbook-menu-bar-hover-placeholder"></div>
|
||||
<div id="mdbook-menu-bar" class="menu-bar sticky">
|
||||
<div class="left-buttons">
|
||||
<label id="mdbook-sidebar-toggle" class="icon-button" for="mdbook-sidebar-toggle-anchor" title="Toggle Table of Contents" aria-label="Toggle Table of Contents" aria-controls="mdbook-sidebar">
|
||||
<span class=fa-svg><svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 448 512"><!--! Font Awesome Free 6.2.0 by @fontawesome - https://fontawesome.com License - https://fontawesome.com/license/free (Icons: CC BY 4.0, Fonts: SIL OFL 1.1, Code: MIT License) Copyright 2022 Fonticons, Inc. --><path d="M0 96C0 78.3 14.3 64 32 64H416c17.7 0 32 14.3 32 32s-14.3 32-32 32H32C14.3 128 0 113.7 0 96zM0 256c0-17.7 14.3-32 32-32H416c17.7 0 32 14.3 32 32s-14.3 32-32 32H32c-17.7 0-32-14.3-32-32zM448 416c0 17.7-14.3 32-32 32H32c-17.7 0-32-14.3-32-32s14.3-32 32-32H416c17.7 0 32 14.3 32 32z"/></svg></span>
|
||||
</label>
|
||||
<button id="mdbook-theme-toggle" class="icon-button" type="button" title="Change theme" aria-label="Change theme" aria-haspopup="true" aria-expanded="false" aria-controls="mdbook-theme-list">
|
||||
<span class=fa-svg><svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 576 512"><!--! Font Awesome Free 6.2.0 by @fontawesome - https://fontawesome.com License - https://fontawesome.com/license/free (Icons: CC BY 4.0, Fonts: SIL OFL 1.1, Code: MIT License) Copyright 2022 Fonticons, Inc. --><path d="M371.3 367.1c27.3-3.9 51.9-19.4 67.2-42.9L600.2 74.1c12.6-19.5 9.4-45.3-7.6-61.2S549.7-4.4 531.1 9.6L294.4 187.2c-24 18-38.2 46.1-38.4 76.1L371.3 367.1zm-19.6 25.4l-116-104.4C175.9 290.3 128 339.6 128 400c0 3.9 .2 7.8 .6 11.6c1.8 17.5-10.2 36.4-27.8 36.4H96c-17.7 0-32 14.3-32 32s14.3 32 32 32H240c61.9 0 112-50.1 112-112c0-2.5-.1-5-.2-7.5z"/></svg></span>
|
||||
</button>
|
||||
<ul id="mdbook-theme-list" class="theme-popup" aria-label="Themes" role="menu">
|
||||
<li role="none"><button role="menuitem" class="theme" id="mdbook-theme-default_theme">Auto</button></li>
|
||||
<li role="none"><button role="menuitem" class="theme" id="mdbook-theme-light">Light</button></li>
|
||||
<li role="none"><button role="menuitem" class="theme" id="mdbook-theme-rust">Rust</button></li>
|
||||
<li role="none"><button role="menuitem" class="theme" id="mdbook-theme-coal">Coal</button></li>
|
||||
<li role="none"><button role="menuitem" class="theme" id="mdbook-theme-navy">Navy</button></li>
|
||||
<li role="none"><button role="menuitem" class="theme" id="mdbook-theme-ayu">Ayu</button></li>
|
||||
</ul>
|
||||
<button id="mdbook-search-toggle" class="icon-button" type="button" title="Search (`/`)" aria-label="Toggle Searchbar" aria-expanded="false" aria-keyshortcuts="/ s" aria-controls="mdbook-searchbar">
|
||||
<span class=fa-svg><svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 512 512"><!--! Font Awesome Free 6.2.0 by @fontawesome - https://fontawesome.com License - https://fontawesome.com/license/free (Icons: CC BY 4.0, Fonts: SIL OFL 1.1, Code: MIT License) Copyright 2022 Fonticons, Inc. --><path d="M416 208c0 45.9-14.9 88.3-40 122.7L502.6 457.4c12.5 12.5 12.5 32.8 0 45.3s-32.8 12.5-45.3 0L330.7 376c-34.4 25.2-76.8 40-122.7 40C93.1 416 0 322.9 0 208S93.1 0 208 0S416 93.1 416 208zM208 352c79.5 0 144-64.5 144-144s-64.5-144-144-144S64 128.5 64 208s64.5 144 144 144z"/></svg></span>
|
||||
</button>
|
||||
</div>
|
||||
|
||||
<h1 class="menu-title">quicproquo</h1>
|
||||
|
||||
<div class="right-buttons">
|
||||
<a href="print.html" title="Print this book" aria-label="Print this book">
|
||||
<span class=fa-svg id="print-button"><svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 512 512"><!--! Font Awesome Free 6.2.0 by @fontawesome - https://fontawesome.com License - https://fontawesome.com/license/free (Icons: CC BY 4.0, Fonts: SIL OFL 1.1, Code: MIT License) Copyright 2022 Fonticons, Inc. --><path d="M128 0C92.7 0 64 28.7 64 64v96h64V64H354.7L384 93.3V160h64V93.3c0-17-6.7-33.3-18.7-45.3L400 18.7C388 6.7 371.7 0 354.7 0H128zM384 352v32 64H128V384 368 352H384zm64 32h32c17.7 0 32-14.3 32-32V256c0-35.3-28.7-64-64-64H64c-35.3 0-64 28.7-64 64v96c0 17.7 14.3 32 32 32H64v64c0 35.3 28.7 64 64 64H384c35.3 0 64-28.7 64-64V384zm-16-88c-13.3 0-24-10.7-24-24s10.7-24 24-24s24 10.7 24 24s-10.7 24-24 24z"/></svg></span>
|
||||
</a>
|
||||
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div id="mdbook-search-wrapper" class="hidden">
|
||||
<form id="mdbook-searchbar-outer" class="searchbar-outer">
|
||||
<div class="search-wrapper">
|
||||
<input type="search" id="mdbook-searchbar" name="searchbar" placeholder="Search this book ..." aria-controls="mdbook-searchresults-outer" aria-describedby="searchresults-header">
|
||||
<div class="spinner-wrapper">
|
||||
<span class=fa-svg id="fa-spin"><svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 512 512"><!--! Font Awesome Free 6.2.0 by @fontawesome - https://fontawesome.com License - https://fontawesome.com/license/free (Icons: CC BY 4.0, Fonts: SIL OFL 1.1, Code: MIT License) Copyright 2022 Fonticons, Inc. --><path d="M304 48c0-26.5-21.5-48-48-48s-48 21.5-48 48s21.5 48 48 48s48-21.5 48-48zm0 416c0-26.5-21.5-48-48-48s-48 21.5-48 48s21.5 48 48 48s48-21.5 48-48zM48 304c26.5 0 48-21.5 48-48s-21.5-48-48-48s-48 21.5-48 48s21.5 48 48 48zm464-48c0-26.5-21.5-48-48-48s-48 21.5-48 48s21.5 48 48 48s48-21.5 48-48zM142.9 437c18.7-18.7 18.7-49.1 0-67.9s-49.1-18.7-67.9 0s-18.7 49.1 0 67.9s49.1 18.7 67.9 0zm0-294.2c18.7-18.7 18.7-49.1 0-67.9S93.7 56.2 75 75s-18.7 49.1 0 67.9s49.1 18.7 67.9 0zM369.1 437c18.7 18.7 49.1 18.7 67.9 0s18.7-49.1 0-67.9s-49.1-18.7-67.9 0s-18.7 49.1 0 67.9z"/></svg></span>
|
||||
</div>
|
||||
</div>
|
||||
</form>
|
||||
<div id="mdbook-searchresults-outer" class="searchresults-outer hidden">
|
||||
<div id="mdbook-searchresults-header" class="searchresults-header"></div>
|
||||
<ul id="mdbook-searchresults">
|
||||
</ul>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<!-- Apply ARIA attributes after the sidebar and the sidebar toggle button are added to the DOM -->
|
||||
<script>
|
||||
document.getElementById('mdbook-sidebar-toggle').setAttribute('aria-expanded', sidebar === 'visible');
|
||||
document.getElementById('mdbook-sidebar').setAttribute('aria-hidden', sidebar !== 'visible');
|
||||
Array.from(document.querySelectorAll('#mdbook-sidebar a')).forEach(function(link) {
|
||||
link.setAttribute('tabIndex', sidebar === 'visible' ? 0 : -1);
|
||||
});
|
||||
</script>
|
||||
|
||||
<div id="mdbook-content" class="content">
|
||||
<main>
|
||||
<h1 id="roadmap--quicproquo"><a class="header" href="#roadmap--quicproquo">Roadmap — quicproquo</a></h1>
|
||||
<blockquote>
|
||||
<p>From proof-of-concept to production-grade E2E encrypted messaging.</p>
|
||||
<p>Each phase is designed to be tackled sequentially. Items within a phase
|
||||
can be parallelised. Check the box when done.</p>
|
||||
</blockquote>
|
||||
<hr>
|
||||
<h2 id="phase-1--production-hardening-critical"><a class="header" href="#phase-1--production-hardening-critical">Phase 1 — Production Hardening (Critical)</a></h2>
|
||||
<p>Eliminate all crash paths, enforce secure defaults, fix deployment blockers.</p>
|
||||
<ul>
|
||||
<li>
|
||||
<p><input disabled="" type="checkbox" checked=""> <strong>1.1 Remove <code>.unwrap()</code> / <code>.expect()</code> from production paths</strong></p>
|
||||
<ul>
|
||||
<li>Replace <code>AUTH_CONTEXT.read().expect()</code> in client RPC with proper <code>Result</code></li>
|
||||
<li>Replace <code>"0.0.0.0:0".parse().unwrap()</code> in client with fallible parse</li>
|
||||
<li>Replace <code>Mutex::lock().unwrap()</code> in server storage with <code>.map_err()</code></li>
|
||||
<li>Audit: <code>grep -rn 'unwrap()\|expect(' crates/</code> outside <code>#[cfg(test)]</code></li>
|
||||
</ul>
|
||||
</li>
|
||||
<li>
|
||||
<p><input disabled="" type="checkbox" checked=""> <strong>1.2 Enforce secure defaults in production mode</strong></p>
|
||||
<ul>
|
||||
<li>Reject startup if <code>QPQ_PRODUCTION=true</code> and <code>auth_token</code> is empty or <code>"devtoken"</code></li>
|
||||
<li>Require non-empty <code>db_key</code> when using SQL backend in production</li>
|
||||
<li>Refuse to auto-generate TLS certs in production mode (require existing cert+key)</li>
|
||||
<li>Already partially implemented — verify and harden the validation in <code>config.rs</code></li>
|
||||
</ul>
|
||||
</li>
|
||||
<li>
|
||||
<p><input disabled="" type="checkbox" checked=""> <strong>1.3 Fix <code>.gitignore</code></strong></p>
|
||||
<ul>
|
||||
<li>Add <code>data/</code>, <code>*.der</code>, <code>*.pem</code>, <code>*.db</code>, <code>*.bin</code> (state files), <code>*.ks</code> (keystores)</li>
|
||||
<li>Verify no secrets are already tracked: <code>git ls-files data/ *.der *.db</code></li>
|
||||
</ul>
|
||||
</li>
|
||||
<li>
|
||||
<p><input disabled="" type="checkbox" checked=""> <strong>1.4 Fix Dockerfile</strong></p>
|
||||
<ul>
|
||||
<li>Sync workspace members (handle excluded <code>p2p</code> crate)</li>
|
||||
<li>Create dedicated user/group instead of <code>nobody</code></li>
|
||||
<li>Set writable <code>QPQ_DATA_DIR</code> with correct permissions</li>
|
||||
<li>Test: <code>docker build . && docker run --rm -it qpq-server --help</code></li>
|
||||
</ul>
|
||||
</li>
|
||||
<li>
|
||||
<p><input disabled="" type="checkbox" checked=""> <strong>1.5 TLS certificate lifecycle</strong></p>
|
||||
<ul>
|
||||
<li>Document CA-signed cert setup (Let’s Encrypt / custom CA)</li>
|
||||
<li>Add <code>--tls-required</code> flag that refuses to start without valid cert</li>
|
||||
<li>Log clear warning when using self-signed certs</li>
|
||||
<li>Document certificate rotation procedure</li>
|
||||
</ul>
|
||||
</li>
|
||||
</ul>
|
||||
<hr>
|
||||
<h2 id="phase-2--test--ci-maturity"><a class="header" href="#phase-2--test--ci-maturity">Phase 2 — Test & CI Maturity</a></h2>
|
||||
<p>Build confidence before adding features.</p>
|
||||
<ul>
|
||||
<li>
|
||||
<p><input disabled="" type="checkbox" checked=""> <strong>2.1 Expand E2E test coverage</strong></p>
|
||||
<ul>
|
||||
<li>Auth failure scenarios (wrong password, expired token, invalid token)</li>
|
||||
<li>Message ordering verification (send N messages, verify seq numbers)</li>
|
||||
<li>Concurrent clients (3+ members in group, simultaneous send/recv)</li>
|
||||
<li>OPAQUE registration + login full flow</li>
|
||||
<li>Queue full behavior (>1000 messages)</li>
|
||||
<li>Rate limiting behavior (>100 enqueues/minute)</li>
|
||||
<li>Reconnection after server restart</li>
|
||||
<li>KeyPackage exhaustion (fetch when none available)</li>
|
||||
</ul>
|
||||
</li>
|
||||
<li>
|
||||
<p><input disabled="" type="checkbox" checked=""> <strong>2.2 Add unit tests for untested paths</strong></p>
|
||||
<ul>
|
||||
<li>Client retry logic (exponential backoff, jitter, retriable classification)</li>
|
||||
<li>REPL input parsing edge cases (empty input, special characters, <code>/</code> commands)</li>
|
||||
<li>State file encryption/decryption round-trip with bad password</li>
|
||||
<li>Token cache expiry</li>
|
||||
<li>Conversation store migrations</li>
|
||||
</ul>
|
||||
</li>
|
||||
<li>
|
||||
<p><input disabled="" type="checkbox" checked=""> <strong>2.3 CI hardening</strong></p>
|
||||
<ul>
|
||||
<li>Add <code>.github/CODEOWNERS</code> (crypto, auth, wire-format require 2 reviewers)</li>
|
||||
<li>Ensure <code>cargo deny check</code> runs on every PR (already in CI — verify)</li>
|
||||
<li>Add <code>cargo audit</code> as blocking check (already in CI — verify)</li>
|
||||
<li>Add coverage reporting (tarpaulin or llvm-cov)</li>
|
||||
<li>Add CI job for Docker build validation</li>
|
||||
</ul>
|
||||
</li>
|
||||
<li>
|
||||
<p><input disabled="" type="checkbox" checked=""> <strong>2.4 Clean up build warnings</strong></p>
|
||||
<ul>
|
||||
<li>Fix Cap’n Proto generated <code>unused_parens</code> warnings</li>
|
||||
<li>Remove dead code / unused imports</li>
|
||||
<li>Address <code>openmls</code> future-incompat warnings</li>
|
||||
<li>Target: <code>cargo clippy --workspace -- -D warnings</code> passes clean</li>
|
||||
</ul>
|
||||
</li>
|
||||
</ul>
|
||||
<hr>
|
||||
<h2 id="phase-3--client-sdks-native-quic--capn-proto-everywhere"><a class="header" href="#phase-3--client-sdks-native-quic--capn-proto-everywhere">Phase 3 — Client SDKs: Native QUIC + Cap’n Proto Everywhere</a></h2>
|
||||
<p><strong>No REST gateway. No protocol dilution.</strong> The <code>.capnp</code> schemas are the
|
||||
interface definition. Every SDK speaks native QUIC + Cap’n Proto. The
|
||||
project name stays honest.</p>
|
||||
<h3 id="why-this-matters"><a class="header" href="#why-this-matters">Why this matters</a></h3>
|
||||
<p>The name is <strong>quic</strong>n<strong>proto</strong>chat — the protocol IS the product. Instead
|
||||
of adding an HTTP translation layer that loses zero-copy performance and
|
||||
adds base64 overhead, we invest in making the native protocol accessible
|
||||
from every language that has QUIC + Cap’n Proto support, and provide
|
||||
WASM/FFI for the crypto layer.</p>
|
||||
<h3 id="architecture"><a class="header" href="#architecture">Architecture</a></h3>
|
||||
<pre><code> Server: QUIC + Cap'n Proto (single protocol, no gateway)
|
||||
|
||||
Client SDKs:
|
||||
┌─── Rust quinn + capnp-rpc (existing, reference impl)
|
||||
├─── Go quic-go + go-capnp (native, high confidence)
|
||||
├─── Python aioquic + pycapnp (native QUIC, manual framing)
|
||||
├─── C/C++ msquic/ngtcp2 + capnproto (reference impl, full RPC)
|
||||
└─── Browser WebTransport + capnp (WASM) (QUIC transport, no HTTP needed)
|
||||
|
||||
Crypto layer (client-side MLS, shared across all SDKs):
|
||||
┌─── Rust crate (native, existing)
|
||||
├─── WASM module (browsers, Node.js, Deno)
|
||||
└─── C FFI (Swift, Kotlin, Python, Go via cgo)
|
||||
</code></pre>
|
||||
<h3 id="language-support-reality-check"><a class="header" href="#language-support-reality-check">Language support reality check</a></h3>
|
||||
<div class="table-wrapper">
|
||||
<table>
|
||||
<thead>
|
||||
<tr><th>Language</th><th>QUIC</th><th>Cap’n Proto</th><th>RPC</th><th>Confidence</th></tr>
|
||||
</thead>
|
||||
<tbody>
|
||||
<tr><td><strong>Rust</strong></td><td>quinn ✅</td><td>capnp-rpc ✅</td><td>Full ✅</td><td>Existing</td></tr>
|
||||
<tr><td><strong>Go</strong></td><td>quic-go ✅</td><td>go-capnp ✅</td><td>Level 1 ✅</td><td>High</td></tr>
|
||||
<tr><td><strong>Python</strong></td><td>aioquic ✅</td><td>pycapnp ⚠️</td><td>Manual framing</td><td>Medium</td></tr>
|
||||
<tr><td><strong>C/C++</strong></td><td>msquic/ngtcp2 ✅</td><td>capnproto ✅</td><td>Full ✅</td><td>High</td></tr>
|
||||
<tr><td><strong>Browser</strong></td><td>WebTransport ✅</td><td>WASM ✅</td><td>Via WASM bridge</td><td>Medium</td></tr>
|
||||
</tbody>
|
||||
</table>
|
||||
</div>
|
||||
<h3 id="implementation"><a class="header" href="#implementation">Implementation</a></h3>
|
||||
<ul>
|
||||
<li>
|
||||
<p><input disabled="" type="checkbox" checked=""> <strong>3.1 Go SDK (<code>quicproquo-go</code>)</strong></p>
|
||||
<ul>
|
||||
<li>Generated Go types from <code>node.capnp</code> (6487-line codegen, all 24 RPC methods)</li>
|
||||
<li>QUIC transport via <code>quic-go</code> with TLS 1.3 + ALPN <code>"capnp"</code></li>
|
||||
<li>High-level <code>qpq</code> package: Connect, Health, ResolveUser, CreateChannel, Send/SendWithTTL, Receive/ReceiveWait, DeleteAccount, OPAQUE auth</li>
|
||||
<li>Example CLI in <code>sdks/go/cmd/example/</code></li>
|
||||
</ul>
|
||||
</li>
|
||||
<li>
|
||||
<p><input disabled="" type="checkbox" checked=""> <strong>3.2 Python SDK (<code>quicproquo-py</code>)</strong></p>
|
||||
<ul>
|
||||
<li>QUIC transport: <code>aioquic</code> with custom Cap’n Proto stream handler</li>
|
||||
<li>Cap’n Proto serialization: <code>pycapnp</code> for message types</li>
|
||||
<li>Manual RPC framing: length-prefixed request/response over QUIC stream</li>
|
||||
<li>Async/await API matching the Rust client patterns</li>
|
||||
<li>Crypto: PyO3 bindings to <code>quicproquo-core</code> for MLS operations</li>
|
||||
<li>Publish: PyPI <code>quicproquo</code></li>
|
||||
<li>Example: async bot client</li>
|
||||
</ul>
|
||||
</li>
|
||||
<li>
|
||||
<p><input disabled="" type="checkbox" checked=""> <strong>3.3 C FFI layer (<code>quicproquo-ffi</code>)</strong></p>
|
||||
<ul>
|
||||
<li><code>crates/quicproquo-ffi</code> with 7 extern “C” functions: connect, login, send, receive, disconnect, last_error, free_string</li>
|
||||
<li>Builds as <code>libquicproquo_ffi.so</code> / <code>.dylib</code> / <code>.dll</code></li>
|
||||
<li>Python ctypes wrapper in <code>examples/python/qpq_client.py</code></li>
|
||||
</ul>
|
||||
</li>
|
||||
<li>
|
||||
<p><input disabled="" type="checkbox" checked=""> <strong>3.4 WASM compilation of <code>quicproquo-core</code></strong></p>
|
||||
<ul>
|
||||
<li><code>wasm-pack build</code> target producing 175 KB WASM bundle (LTO + opt-level=s)</li>
|
||||
<li>13 <code>wasm_bindgen</code> functions: Ed25519 identity, hybrid KEM, safety numbers, sealed sender, padding</li>
|
||||
<li>Browser-ready with <code>crypto.getRandomValues()</code> RNG</li>
|
||||
<li>Published as <code>sdks/typescript/wasm-crypto/</code></li>
|
||||
</ul>
|
||||
</li>
|
||||
<li>
|
||||
<p><input disabled="" type="checkbox" checked=""> <strong>3.5 WebTransport server endpoint</strong></p>
|
||||
<ul>
|
||||
<li>Add HTTP/3 + WebTransport listener to server (same QUIC stack via quinn)</li>
|
||||
<li>Cap’n Proto RPC framed over WebTransport bidirectional streams</li>
|
||||
<li>Same auth, same storage, same RPC handlers — just a different stream source</li>
|
||||
<li>Browsers connect via <code>new WebTransport("https://server:7443")</code></li>
|
||||
<li>ALPN negotiation: <code>"h3"</code> for WebTransport, <code>"capnp"</code> for native QUIC</li>
|
||||
<li>Configurable port: <code>--webtransport-listen 0.0.0.0:7443</code></li>
|
||||
<li>Feature-flagged: <code>--features webtransport</code></li>
|
||||
</ul>
|
||||
</li>
|
||||
<li>
|
||||
<p><input disabled="" type="checkbox" checked=""> <strong>3.6 TypeScript/JavaScript SDK (<code>@quicproquo/client</code>)</strong></p>
|
||||
<ul>
|
||||
<li><code>QpqClient</code> class: connect, offline, health, resolveUser, createChannel, send/sendWithTTL, receive, deleteAccount</li>
|
||||
<li>WASM crypto wrapper: generateIdentity, sign/verify, hybridEncrypt/Decrypt, computeSafetyNumber, sealedSend, pad</li>
|
||||
<li>WebSocket transport with request/response correlation and reconnection</li>
|
||||
<li>Browser demo: interactive crypto playground + chat UI (<code>sdks/typescript/demo/index.html</code>)</li>
|
||||
</ul>
|
||||
</li>
|
||||
<li>
|
||||
<p><input disabled="" type="checkbox" checked=""> <strong>3.7 SDK documentation and schema publishing</strong></p>
|
||||
<ul>
|
||||
<li>Publish <code>.capnp</code> schemas as the canonical API contract</li>
|
||||
<li>Document the QUIC + Cap’n Proto connection pattern for each language</li>
|
||||
<li>Provide a “build your own SDK” guide (QUIC stream → Cap’n Proto RPC bootstrap)</li>
|
||||
<li>Reference implementation checklist: connect, auth, upload key, enqueue, fetch</li>
|
||||
</ul>
|
||||
</li>
|
||||
</ul>
|
||||
<hr>
|
||||
<h2 id="phase-4--trust--security-infrastructure"><a class="header" href="#phase-4--trust--security-infrastructure">Phase 4 — Trust & Security Infrastructure</a></h2>
|
||||
<p>Address the security gaps required for real-world deployment.</p>
|
||||
<ul>
|
||||
<li>
|
||||
<p><input disabled="" type="checkbox"> <strong>4.1 Third-party cryptographic audit</strong></p>
|
||||
<ul>
|
||||
<li>Scope: MLS integration, OPAQUE flow, hybrid KEM, key lifecycle, zeroization</li>
|
||||
<li>Firms: NCC Group, Trail of Bits, Cure53</li>
|
||||
<li>Budget and timeline: typically 4-6 weeks, $50K–$150K</li>
|
||||
<li>Publish report publicly (builds trust)</li>
|
||||
</ul>
|
||||
</li>
|
||||
<li>
|
||||
<p><input disabled="" type="checkbox" checked=""> <strong>4.2 Key Transparency / revocation</strong></p>
|
||||
<ul>
|
||||
<li>Replace <code>BasicCredential</code> with X.509-based MLS credentials</li>
|
||||
<li>Or: verifiable key directory (Merkle tree, auditable log)</li>
|
||||
<li>Users can verify peer keys haven’t been substituted (MITM detection)</li>
|
||||
<li>Revocation mechanism for compromised keys</li>
|
||||
</ul>
|
||||
</li>
|
||||
<li>
|
||||
<p><input disabled="" type="checkbox" checked=""> <strong>4.3 Client authentication on Delivery Service</strong></p>
|
||||
<ul>
|
||||
<li>DS sender identity binding with explicit audit logging</li>
|
||||
<li><code>sender_prefix</code> tracking in enqueue/batch_enqueue RPCs</li>
|
||||
<li>Sender identity derived from authenticated session</li>
|
||||
</ul>
|
||||
</li>
|
||||
<li>
|
||||
<p><input disabled="" type="checkbox" checked=""> <strong>4.4 M7 — Post-quantum MLS integration</strong></p>
|
||||
<ul>
|
||||
<li>Integrate hybrid KEM (X25519 + ML-KEM-768) into the OpenMLS crypto provider</li>
|
||||
<li>Group key material gets post-quantum confidentiality</li>
|
||||
<li>Full test suite with PQ ciphersuite</li>
|
||||
<li>Ref: existing <code>hybrid_kem.rs</code> and <code>hybrid_crypto.rs</code></li>
|
||||
</ul>
|
||||
</li>
|
||||
<li>
|
||||
<p><input disabled="" type="checkbox" checked=""> <strong>4.5 Username enumeration mitigation</strong></p>
|
||||
<ul>
|
||||
<li>5 ms timing floor on <code>resolveUser</code> responses</li>
|
||||
<li>Rate limiting to prevent bulk enumeration attacks</li>
|
||||
</ul>
|
||||
</li>
|
||||
</ul>
|
||||
<hr>
|
||||
<h2 id="phase-5--features--ux"><a class="header" href="#phase-5--features--ux">Phase 5 — Features & UX</a></h2>
|
||||
<p>Make it a product people want to use.</p>
|
||||
<ul>
|
||||
<li>
|
||||
<p><input disabled="" type="checkbox" checked=""> <strong>5.1 Multi-device support</strong></p>
|
||||
<ul>
|
||||
<li>Account → multiple devices, each with own Ed25519 key + MLS KeyPackages</li>
|
||||
<li>Device graph management (add device, remove device, list devices)</li>
|
||||
<li>Messages delivered to all devices of a user</li>
|
||||
<li><code>device_id</code> field already in Auth struct — wire it through</li>
|
||||
</ul>
|
||||
</li>
|
||||
<li>
|
||||
<p><input disabled="" type="checkbox" checked=""> <strong>5.2 Account recovery</strong></p>
|
||||
<ul>
|
||||
<li>Recovery codes or backup key (encrypted, stored by user)</li>
|
||||
<li>Option: server-assisted recovery with security questions (lower security)</li>
|
||||
<li>MLS state re-establishment after device loss</li>
|
||||
</ul>
|
||||
</li>
|
||||
<li>
|
||||
<p><input disabled="" type="checkbox" checked=""> <strong>5.3 Full MLS lifecycle</strong></p>
|
||||
<ul>
|
||||
<li>Member removal (Remove proposal → Commit → fan-out)</li>
|
||||
<li>Credential update (Update proposal for key rotation)</li>
|
||||
<li>Explicit proposal handling (queue proposals, batch commit)</li>
|
||||
<li>Group metadata (name, description, avatar hash)</li>
|
||||
</ul>
|
||||
</li>
|
||||
<li>
|
||||
<p><input disabled="" type="checkbox" checked=""> <strong>5.4 Message editing and deletion</strong></p>
|
||||
<ul>
|
||||
<li><code>Edit</code> (0x06) and <code>Delete</code> (0x07) message types in <code>AppMessage</code></li>
|
||||
<li><code>/edit <index> <text></code> and <code>/delete <index></code> REPL commands (own messages only)</li>
|
||||
<li>Database update/removal on incoming edit/delete</li>
|
||||
</ul>
|
||||
</li>
|
||||
<li>
|
||||
<p><input disabled="" type="checkbox" checked=""> <strong>5.5 File and media transfer</strong></p>
|
||||
<ul>
|
||||
<li><code>uploadBlob</code> / <code>downloadBlob</code> RPCs with 256 KB chunked streaming</li>
|
||||
<li>SHA-256 content-addressable storage with hash verification</li>
|
||||
<li><code>FileRef</code> (0x08) message type with blob_id, filename, file_size, mime_type</li>
|
||||
<li><code>/send-file <path></code> and <code>/download <index></code> REPL commands with progress bars</li>
|
||||
<li>50 MB max file size, automatic MIME detection via <code>mime_guess</code></li>
|
||||
</ul>
|
||||
</li>
|
||||
<li>
|
||||
<p><input disabled="" type="checkbox" checked=""> <strong>5.6 Abuse prevention and moderation</strong></p>
|
||||
<ul>
|
||||
<li>Block user (client-side, suppress display)</li>
|
||||
<li>Report message (encrypted report to admin key)</li>
|
||||
<li>Admin tools: ban user, delete account, audit log</li>
|
||||
</ul>
|
||||
</li>
|
||||
<li>
|
||||
<p><input disabled="" type="checkbox" checked=""> <strong>5.7 Offline message queue (client-side)</strong></p>
|
||||
<ul>
|
||||
<li>Queue messages when disconnected, send on reconnect</li>
|
||||
<li>Idempotent message IDs to prevent duplicates</li>
|
||||
<li>Gap detection: compare local seq with server seq</li>
|
||||
</ul>
|
||||
</li>
|
||||
</ul>
|
||||
<hr>
|
||||
<h2 id="phase-6--scale--operations"><a class="header" href="#phase-6--scale--operations">Phase 6 — Scale & Operations</a></h2>
|
||||
<p>Prepare for real traffic.</p>
|
||||
<ul>
|
||||
<li>
|
||||
<p><input disabled="" type="checkbox" checked=""> <strong>6.1 Distributed rate limiting</strong></p>
|
||||
<ul>
|
||||
<li>Current: in-memory per-process, lost on restart</li>
|
||||
<li>Move to Redis or shared state for multi-node deployments</li>
|
||||
<li>Sliding window with configurable thresholds</li>
|
||||
</ul>
|
||||
</li>
|
||||
<li>
|
||||
<p><input disabled="" type="checkbox" checked=""> <strong>6.2 Multi-node / horizontal scaling</strong></p>
|
||||
<ul>
|
||||
<li>Stateless server design (already mostly there — state is in storage backend)</li>
|
||||
<li>Shared PostgreSQL or CockroachDB backend (replace SQLite)</li>
|
||||
<li>Message queue fan-out (Redis pub/sub or NATS for cross-node notification)</li>
|
||||
<li>Load balancer health check via QUIC RPC <code>health()</code> or Prometheus <code>/metrics</code></li>
|
||||
</ul>
|
||||
</li>
|
||||
<li>
|
||||
<p><input disabled="" type="checkbox" checked=""> <strong>6.3 Operational runbook</strong></p>
|
||||
<ul>
|
||||
<li>Backup / restore procedures (SQLCipher, file backend)</li>
|
||||
<li>Key rotation (auth token, TLS cert, DB encryption key)</li>
|
||||
<li>Incident response playbook</li>
|
||||
<li>Scaling guide (when to add nodes, resource sizing)</li>
|
||||
<li>Monitoring dashboard templates (Grafana + Prometheus)</li>
|
||||
</ul>
|
||||
</li>
|
||||
<li>
|
||||
<p><input disabled="" type="checkbox" checked=""> <strong>6.4 Connection draining and graceful shutdown</strong></p>
|
||||
<ul>
|
||||
<li>Stop accepting new connections on SIGTERM</li>
|
||||
<li>Wait for in-flight RPCs (configurable timeout, default 30s)</li>
|
||||
<li>Drain WebTransport sessions with close frame</li>
|
||||
<li>Document expected behavior for load balancers (health → unhealthy first)</li>
|
||||
</ul>
|
||||
</li>
|
||||
<li>
|
||||
<p><input disabled="" type="checkbox" checked=""> <strong>6.5 Request-level timeouts</strong></p>
|
||||
<ul>
|
||||
<li>Per-RPC timeout (prevent slow clients from holding resources)</li>
|
||||
<li>Database query timeout</li>
|
||||
<li>Overall request deadline propagation</li>
|
||||
</ul>
|
||||
</li>
|
||||
<li>
|
||||
<p><input disabled="" type="checkbox" checked=""> <strong>6.6 Observability enhancements</strong></p>
|
||||
<ul>
|
||||
<li>Request correlation IDs (trace across RPC → storage)</li>
|
||||
<li>Storage operation latency metrics</li>
|
||||
<li>Per-endpoint latency histograms</li>
|
||||
<li>Structured audit log to persistent storage (not just stdout)</li>
|
||||
<li>OpenTelemetry integration</li>
|
||||
</ul>
|
||||
</li>
|
||||
</ul>
|
||||
<hr>
|
||||
<h2 id="phase-7--platform-expansion--research"><a class="header" href="#phase-7--platform-expansion--research">Phase 7 — Platform Expansion & Research</a></h2>
|
||||
<p>Long-term vision for wide adoption.</p>
|
||||
<ul>
|
||||
<li>
|
||||
<p><input disabled="" type="checkbox" checked=""> <strong>7.1 Mobile clients (iOS + Android)</strong></p>
|
||||
<ul>
|
||||
<li>Use C FFI (Phase 3.3) for crypto + transport (single library)</li>
|
||||
<li>Push notifications via APNs / FCM (server sends notification on enqueue)</li>
|
||||
<li>Background QUIC connection for message polling</li>
|
||||
<li>Biometric auth for local key storage (Keychain / Android Keystore)</li>
|
||||
</ul>
|
||||
</li>
|
||||
<li>
|
||||
<p><input disabled="" type="checkbox" checked=""> <strong>7.2 Web client (browser)</strong></p>
|
||||
<ul>
|
||||
<li>Use WASM (Phase 3.4) for crypto</li>
|
||||
<li>Use WebTransport (Phase 3.5) for native QUIC transport</li>
|
||||
<li>Cap’n Proto via WASM bridge (Phase 3.6)</li>
|
||||
<li>IndexedDB for local state persistence</li>
|
||||
<li>Service Worker for background notifications</li>
|
||||
<li>Progressive Web App (PWA) support</li>
|
||||
</ul>
|
||||
</li>
|
||||
<li>
|
||||
<p><input disabled="" type="checkbox" checked=""> <strong>7.3 Federation</strong></p>
|
||||
<ul>
|
||||
<li>Server-to-server protocol via Cap’n Proto RPC over QUIC (see <code>federation.capnp</code>)</li>
|
||||
<li><code>relayEnqueue</code>, <code>proxyFetchKeyPackage</code>, <code>federationHealth</code> methods</li>
|
||||
<li>Identity resolution across federated servers</li>
|
||||
<li>MLS group spanning multiple servers</li>
|
||||
<li>Trust model for federated deployments</li>
|
||||
</ul>
|
||||
</li>
|
||||
<li>
|
||||
<p><input disabled="" type="checkbox" checked=""> <strong>7.4 Sealed Sender</strong></p>
|
||||
<ul>
|
||||
<li>Sender identity inside MLS ciphertext only (server can’t see who sent)</li>
|
||||
<li><code>sealed_sender</code> module in quicproquo-core with seal/unseal API</li>
|
||||
<li>WASM-accessible via <code>wasm_bindgen</code> for browser use</li>
|
||||
</ul>
|
||||
</li>
|
||||
<li>
|
||||
<p><input disabled="" type="checkbox" checked=""> <strong>7.5 Additional language SDKs</strong></p>
|
||||
<ul>
|
||||
<li>Java/Kotlin: JNI bindings to C FFI (Phase 3.3) + native QUIC (netty-quic)</li>
|
||||
<li>Swift: Swift wrapper over C FFI + Network.framework QUIC</li>
|
||||
<li>Ruby: FFI bindings via <code>quicproquo-ffi</code></li>
|
||||
<li>Evaluate demand-driven — only build SDKs people request</li>
|
||||
</ul>
|
||||
</li>
|
||||
<li>
|
||||
<p><input disabled="" type="checkbox" checked=""> <strong>7.6 P2P / NAT traversal</strong></p>
|
||||
<ul>
|
||||
<li>Direct peer-to-peer via iroh (foundation exists in <code>quicproquo-p2p</code>)</li>
|
||||
<li>Server as fallback relay only</li>
|
||||
<li>Reduces latency and single-point-of-failure</li>
|
||||
<li>Ref: <code>FUTURE-IMPROVEMENTS.md § 6.1</code></li>
|
||||
</ul>
|
||||
</li>
|
||||
<li>
|
||||
<p><input disabled="" type="checkbox" checked=""> <strong>7.7 Traffic analysis resistance</strong></p>
|
||||
<ul>
|
||||
<li>Padding messages to uniform size</li>
|
||||
<li>Decoy traffic to mask timing patterns</li>
|
||||
<li>Optional Tor/I2P routing for IP privacy</li>
|
||||
<li>Ref: <code>FUTURE-IMPROVEMENTS.md § 5.4, 6.3</code></li>
|
||||
</ul>
|
||||
</li>
|
||||
</ul>
|
||||
<hr>
|
||||
<h2 id="phase-8--freifunk--community-mesh-networking"><a class="header" href="#phase-8--freifunk--community-mesh-networking">Phase 8 — Freifunk / Community Mesh Networking</a></h2>
|
||||
<p>Make qpq a first-class citizen on decentralised, community-operated wireless
|
||||
networks (Freifunk, BATMAN-adv/Babel routing, OpenWrt). Multiple qpq nodes form
|
||||
a federated mesh; clients auto-discover nearby nodes via mDNS; the network
|
||||
functions without any central infrastructure or internet uplink.</p>
|
||||
<h3 id="architecture-1"><a class="header" href="#architecture-1">Architecture</a></h3>
|
||||
<pre><code> Client A ─── mDNS discovery ──► nearby qpq node (LAN / mesh)
|
||||
│
|
||||
Cap'n Proto federation
|
||||
│
|
||||
remote qpq node (across mesh)
|
||||
</code></pre>
|
||||
<ul>
|
||||
<li>
|
||||
<p><input disabled="" type="checkbox" checked=""> <strong>F0 — Re-include <code>quicproquo-p2p</code> in workspace; fix ALPN strings</strong></p>
|
||||
<ul>
|
||||
<li>Moved <code>crates/quicproquo-p2p</code> from <code>exclude</code> back into <code>[workspace] members</code></li>
|
||||
<li>Fixed ALPN <code>b"quicnprotochat/p2p/1"</code> → <code>b"quicproquo/p2p/1"</code> (breaking wire change)</li>
|
||||
<li>Fixed federation ALPN <code>b"qnpc-fed"</code> → <code>b"quicproquo/federation/1"</code></li>
|
||||
<li>Feature-gated behind <code>--features mesh</code> on client (keeps iroh out of default builds)</li>
|
||||
</ul>
|
||||
</li>
|
||||
<li>
|
||||
<p><input disabled="" type="checkbox" checked=""> <strong>F1 — Federation routing in message delivery</strong></p>
|
||||
<ul>
|
||||
<li><code>handle_enqueue</code> and <code>handle_batch_enqueue</code> call <code>federation::routing::resolve_destination()</code></li>
|
||||
<li>Recipients with a remote home server are relayed via <code>FederationClient::relay_enqueue()</code></li>
|
||||
<li>mTLS mutual authentication between nodes (both present client certs, validated against shared CA)</li>
|
||||
<li>Config: <code>QPQ_FEDERATION_LISTEN</code>, <code>QPQ_LOCAL_DOMAIN</code>, <code>QPQ_FEDERATION_CERT/KEY/CA</code></li>
|
||||
</ul>
|
||||
</li>
|
||||
<li>
|
||||
<p><input disabled="" type="checkbox" checked=""> <strong>F2 — mDNS local peer discovery</strong></p>
|
||||
<ul>
|
||||
<li>Server announces <code>_quicproquo._udp.local.</code> on startup via <code>mdns-sd</code></li>
|
||||
<li>Client: <code>MeshDiscovery::start()</code> browses for nearby nodes (feature-gated)</li>
|
||||
<li>REPL commands: <code>/mesh peers</code> (scan + list), <code>/mesh server <host:port></code> (note address)</li>
|
||||
<li>Nodes announce: <code>ver=1</code>, <code>server=<host:port></code>, <code>domain=<local_domain></code> TXT records</li>
|
||||
</ul>
|
||||
</li>
|
||||
<li>
|
||||
<p><input disabled="" type="checkbox" checked=""> <strong>F3 — Self-sovereign mesh identity</strong></p>
|
||||
<ul>
|
||||
<li>Ed25519 keypair-based identity independent of AS registration</li>
|
||||
<li>JSON-persisted seed + known peers directory</li>
|
||||
<li>Sign/verify operations for mesh authenticity (<code>crates/quicproquo-p2p/src/identity.rs</code>)</li>
|
||||
</ul>
|
||||
</li>
|
||||
<li>
|
||||
<p><input disabled="" type="checkbox" checked=""> <strong>F4 — Store-and-forward with TTL</strong></p>
|
||||
<ul>
|
||||
<li><code>MeshEnvelope</code> with TTL-based expiry, hop_count tracking, max_hops routing limit</li>
|
||||
<li>SHA-256 deduplication ID prevents relay loops</li>
|
||||
<li>Ed25519 signature verification on envelopes</li>
|
||||
<li><code>MeshStore</code> in-memory queue with per-recipient capacity limits and TTL-based GC</li>
|
||||
</ul>
|
||||
</li>
|
||||
<li>
|
||||
<p><input disabled="" type="checkbox" checked=""> <strong>F5 — Lightweight broadcast channels</strong></p>
|
||||
<ul>
|
||||
<li>Symmetric ChaCha20-Poly1305 encrypted channels (no MLS overhead)</li>
|
||||
<li>Topic-based pub/sub via <code>BroadcastChannel</code> and <code>BroadcastManager</code></li>
|
||||
<li>Subscribe/unsubscribe, create, publish API on <code>P2pNode</code></li>
|
||||
</ul>
|
||||
</li>
|
||||
<li>
|
||||
<p><input disabled="" type="checkbox" checked=""> <strong>F6 — Extended <code>/mesh</code> REPL commands</strong></p>
|
||||
<ul>
|
||||
<li><code>/mesh send <peer_id> <msg></code> — direct P2P message via iroh</li>
|
||||
<li><code>/mesh broadcast <topic> <msg></code> — publish to broadcast channel</li>
|
||||
<li><code>/mesh subscribe <topic></code> — join broadcast channel</li>
|
||||
<li><code>/mesh route</code> — show routing table</li>
|
||||
<li><code>/mesh identity</code> — show mesh identity info</li>
|
||||
<li><code>/mesh store</code> — show store-and-forward statistics</li>
|
||||
</ul>
|
||||
</li>
|
||||
<li>
|
||||
<p><input disabled="" type="checkbox" checked=""> <strong>F7 — OpenWrt cross-compilation guide</strong></p>
|
||||
<ul>
|
||||
<li>Musl static builds: <code>x86_64-unknown-linux-musl</code>, <code>armv7-unknown-linux-musleabihf</code>, <code>mips-unknown-linux-musl</code></li>
|
||||
<li>Strip binary: <code>--release</code> + <code>strip</code> → target size < 5 MB for flash storage</li>
|
||||
<li><code>opkg</code> package manifest for OpenWrt feed</li>
|
||||
<li><code>procd</code> init script + <code>uci</code> config file for OpenWrt integration</li>
|
||||
<li>CI job: cross-compile and size-check on every release tag</li>
|
||||
</ul>
|
||||
</li>
|
||||
<li>
|
||||
<p><input disabled="" type="checkbox" checked=""> <strong>F8 — Traffic analysis resistance for mesh</strong></p>
|
||||
<ul>
|
||||
<li>Uniform message padding to nearest 256-byte boundary (hides message size)</li>
|
||||
<li>Configurable decoy traffic rate (fake messages to mask send timing)</li>
|
||||
<li>Optional onion routing: 3-hop relay through other mesh nodes (no Tor dependency)</li>
|
||||
<li>Ref: Phase 7.7 for server-side traffic analysis resistance</li>
|
||||
</ul>
|
||||
</li>
|
||||
</ul>
|
||||
<hr>
|
||||
<h2 id="phase-9--developer-experience--community-growth"><a class="header" href="#phase-9--developer-experience--community-growth">Phase 9 — Developer Experience & Community Growth</a></h2>
|
||||
<p>Features designed to attract contributors, create demo/showcase potential,
|
||||
and lower the barrier to entry for non-crypto developers.</p>
|
||||
<ul>
|
||||
<li>
|
||||
<p><input disabled="" type="checkbox" checked=""> <strong>9.1 Criterion Benchmark Suite (<code>qpq-bench</code>)</strong></p>
|
||||
<ul>
|
||||
<li>Criterion benchmarks for all crypto primitives: hybrid KEM encap/decap,
|
||||
MLS group-add at 10/100/1000 members, epoch rotation, Noise_XX handshake</li>
|
||||
<li>CI publishes HTML benchmark reports as GitHub Actions artifacts</li>
|
||||
<li>Citable numbers — no other project benchmarks MLS + PQ-KEM in Rust</li>
|
||||
</ul>
|
||||
</li>
|
||||
<li>
|
||||
<p><input disabled="" type="checkbox" checked=""> <strong>9.2 Safety Numbers (key verification)</strong></p>
|
||||
<ul>
|
||||
<li>60-digit numeric code derived from two identity keys (Signal-style)</li>
|
||||
<li><code>/verify <username></code> REPL command for out-of-band verification</li>
|
||||
<li>Available in WASM via <code>compute_safety_number</code> binding</li>
|
||||
</ul>
|
||||
</li>
|
||||
<li>
|
||||
<p><input disabled="" type="checkbox" checked=""> <strong>9.3 Full-Screen TUI (Ratatui + Crossterm)</strong></p>
|
||||
<ul>
|
||||
<li><code>qpq tui</code> launches a full-screen terminal UI: message pane, input bar,
|
||||
channel sidebar with unread counts, MLS epoch indicator</li>
|
||||
<li>Feature-gated <code>--features tui</code> to keep ratatui/crossterm out of default builds</li>
|
||||
<li>Existing REPL and CLI subcommands are unaffected</li>
|
||||
</ul>
|
||||
</li>
|
||||
<li>
|
||||
<p><input disabled="" type="checkbox" checked=""> <strong>9.4 Delivery Proof Canary Tokens</strong></p>
|
||||
<ul>
|
||||
<li>Server signs <code>Ed25519(SHA-256(message_id || recipient || timestamp))</code> on enqueue</li>
|
||||
<li>Sender stores proof locally — cryptographic evidence the server queued the message</li>
|
||||
<li>Cap’n Proto schema gains optional <code>deliveryProof: Data</code> on enqueue response</li>
|
||||
</ul>
|
||||
</li>
|
||||
<li>
|
||||
<p><input disabled="" type="checkbox" checked=""> <strong>9.5 Verifiable Transcript Archive</strong></p>
|
||||
<ul>
|
||||
<li><code>GroupMember::export_transcript(path, password)</code> writes encrypted, tamper-evident
|
||||
message archive (CBOR records, Argon2id + ChaCha20-Poly1305, Merkle chain)</li>
|
||||
<li><code>qpq export verify</code> CLI command independently verifies chain integrity</li>
|
||||
<li>Useful for legal discovery, audit, or personal backup</li>
|
||||
</ul>
|
||||
</li>
|
||||
<li>
|
||||
<p><input disabled="" type="checkbox" checked=""> <strong>9.6 Key Transparency (Merkle-Log Identity Binding)</strong></p>
|
||||
<ul>
|
||||
<li>Append-only Merkle log of (username, identity_key) bindings in the AS</li>
|
||||
<li>Clients receive inclusion proofs alongside key fetches</li>
|
||||
<li>Any client can independently audit the full identity history</li>
|
||||
<li>Lightweight subset of RFC 9162 adapted for identity keys</li>
|
||||
</ul>
|
||||
</li>
|
||||
<li>
|
||||
<p><input disabled="" type="checkbox" checked=""> <strong>9.7 Dynamic Server Plugin System</strong></p>
|
||||
<ul>
|
||||
<li>Server loads <code>.so</code>/<code>.dylib</code> plugins at runtime via <code>--plugin-dir</code></li>
|
||||
<li>C-compatible <code>HookVTable</code> via <code>extern "C"</code> — plugins in any language</li>
|
||||
<li>6 hook points: on_message_enqueue, on_batch_enqueue, on_auth, on_channel_created, on_fetch, on_user_registered</li>
|
||||
<li>Example plugins: logging plugin, rate limit plugin (512 KiB payload enforcement)</li>
|
||||
</ul>
|
||||
</li>
|
||||
<li>
|
||||
<p><input disabled="" type="checkbox" checked=""> <strong>9.8 PQ Noise Transport Layer</strong></p>
|
||||
<ul>
|
||||
<li>Hybrid <code>Noise_XX + ML-KEM-768</code> handshake for post-quantum transport security</li>
|
||||
<li>Closes the harvest-now-decrypt-later gap on handshake metadata (ADR-006)</li>
|
||||
<li>Feature-gated <code>--features pq-noise</code>; classical Noise_XX default preserved</li>
|
||||
<li>May require extending or forking <code>snow</code> crate’s <code>CryptoResolver</code></li>
|
||||
</ul>
|
||||
</li>
|
||||
</ul>
|
||||
<hr>
|
||||
<h2 id="summary-timeline"><a class="header" href="#summary-timeline">Summary Timeline</a></h2>
|
||||
<div class="table-wrapper">
|
||||
<table>
|
||||
<thead>
|
||||
<tr><th>Phase</th><th>Focus</th><th>Estimated Effort</th></tr>
|
||||
</thead>
|
||||
<tbody>
|
||||
<tr><td><strong>1</strong></td><td>Production Hardening</td><td>1–2 days</td></tr>
|
||||
<tr><td><strong>2</strong></td><td>Test & CI Maturity</td><td>2–3 days</td></tr>
|
||||
<tr><td><strong>3</strong></td><td>Client SDKs (Go, Python, WASM, FFI, WebTransport)</td><td>5–8 days</td></tr>
|
||||
<tr><td><strong>4</strong></td><td>Trust & Security Infrastructure</td><td>2–4 days (excl. audit)</td></tr>
|
||||
<tr><td><strong>5</strong></td><td>Features & UX</td><td>5–7 days</td></tr>
|
||||
<tr><td><strong>6</strong></td><td>Scale & Operations</td><td>3–5 days</td></tr>
|
||||
<tr><td><strong>7</strong></td><td>Platform Expansion & Research</td><td>ongoing</td></tr>
|
||||
<tr><td><strong>8</strong></td><td>Freifunk / Community Mesh</td><td>ongoing</td></tr>
|
||||
<tr><td><strong>9</strong></td><td>Developer Experience & Community Growth</td><td>3–5 days</td></tr>
|
||||
</tbody>
|
||||
</table>
|
||||
</div>
|
||||
<hr>
|
||||
<h2 id="related-documents"><a class="header" href="#related-documents">Related Documents</a></h2>
|
||||
<ul>
|
||||
<li><a href="docs/FUTURE-IMPROVEMENTS.html">Future Improvements</a> — consolidated improvement list</li>
|
||||
<li><a href="docs/PRODUCTION-READINESS-AUDIT.html">Production Readiness Audit</a> — specific blockers</li>
|
||||
<li><a href="docs/SECURITY-AUDIT.html">Security Audit</a> — findings and recommendations</li>
|
||||
<li><a href="docs/src/roadmap/milestones.html">Milestone Tracker</a> — M1–M7 status</li>
|
||||
<li><a href="docs/src/roadmap/authz-plan.html">Auth, Devices, and Tokens</a> — authorization design</li>
|
||||
<li><a href="docs/src/roadmap/dm-channels.html">DM Channel Design</a> — 1:1 channel spec</li>
|
||||
</ul>
|
||||
|
||||
</main>
|
||||
|
||||
<nav class="nav-wrapper" aria-label="Page navigation">
|
||||
<!-- Mobile navigation buttons -->
|
||||
<a rel="prev" href="roadmap/future-research.html" class="mobile-nav-chapters previous" title="Previous chapter" aria-label="Previous chapter" aria-keyshortcuts="Left">
|
||||
<span class=fa-svg><svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 320 512"><!--! Font Awesome Free 6.2.0 by @fontawesome - https://fontawesome.com License - https://fontawesome.com/license/free (Icons: CC BY 4.0, Fonts: SIL OFL 1.1, Code: MIT License) Copyright 2022 Fonticons, Inc. --><path d="M41.4 233.4c-12.5 12.5-12.5 32.8 0 45.3l160 160c12.5 12.5 32.8 12.5 45.3 0s12.5-32.8 0-45.3L109.3 256 246.6 118.6c12.5-12.5 12.5-32.8 0-45.3s-32.8-12.5-45.3 0l-160 160z"/></svg></span>
|
||||
</a>
|
||||
|
||||
<a rel="next prefetch" href="operations/monitoring.html" class="mobile-nav-chapters next" title="Next chapter" aria-label="Next chapter" aria-keyshortcuts="Right">
|
||||
<span class=fa-svg><svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 320 512"><!--! Font Awesome Free 6.2.0 by @fontawesome - https://fontawesome.com License - https://fontawesome.com/license/free (Icons: CC BY 4.0, Fonts: SIL OFL 1.1, Code: MIT License) Copyright 2022 Fonticons, Inc. --><path d="M278.6 233.4c12.5 12.5 12.5 32.8 0 45.3l-160 160c-12.5 12.5-32.8 12.5-45.3 0s-12.5-32.8 0-45.3L210.7 256 73.4 118.6c-12.5-12.5-12.5-32.8 0-45.3s32.8-12.5 45.3 0l160 160z"/></svg></span>
|
||||
</a>
|
||||
|
||||
<div style="clear: both"></div>
|
||||
</nav>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<nav class="nav-wide-wrapper" aria-label="Page navigation">
|
||||
<a rel="prev" href="roadmap/future-research.html" class="nav-chapters previous" title="Previous chapter" aria-label="Previous chapter" aria-keyshortcuts="Left">
|
||||
<span class=fa-svg><svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 320 512"><!--! Font Awesome Free 6.2.0 by @fontawesome - https://fontawesome.com License - https://fontawesome.com/license/free (Icons: CC BY 4.0, Fonts: SIL OFL 1.1, Code: MIT License) Copyright 2022 Fonticons, Inc. --><path d="M41.4 233.4c-12.5 12.5-12.5 32.8 0 45.3l160 160c12.5 12.5 32.8 12.5 45.3 0s12.5-32.8 0-45.3L109.3 256 246.6 118.6c12.5-12.5 12.5-32.8 0-45.3s-32.8-12.5-45.3 0l-160 160z"/></svg></span>
|
||||
</a>
|
||||
|
||||
<a rel="next prefetch" href="operations/monitoring.html" class="nav-chapters next" title="Next chapter" aria-label="Next chapter" aria-keyshortcuts="Right">
|
||||
<span class=fa-svg><svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 320 512"><!--! Font Awesome Free 6.2.0 by @fontawesome - https://fontawesome.com License - https://fontawesome.com/license/free (Icons: CC BY 4.0, Fonts: SIL OFL 1.1, Code: MIT License) Copyright 2022 Fonticons, Inc. --><path d="M278.6 233.4c12.5 12.5 12.5 32.8 0 45.3l-160 160c-12.5 12.5-32.8 12.5-45.3 0s-12.5-32.8 0-45.3L210.7 256 73.4 118.6c-12.5-12.5-12.5-32.8 0-45.3s32.8-12.5 45.3 0l160 160z"/></svg></span>
|
||||
</a>
|
||||
</nav>
|
||||
|
||||
</div>
|
||||
|
||||
<template id=fa-eye><span class=fa-svg><svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 576 512"><!--! Font Awesome Free 6.2.0 by @fontawesome - https://fontawesome.com License - https://fontawesome.com/license/free (Icons: CC BY 4.0, Fonts: SIL OFL 1.1, Code: MIT License) Copyright 2022 Fonticons, Inc. --><path d="M288 32c-80.8 0-145.5 36.8-192.6 80.6C48.6 156 17.3 208 2.5 243.7c-3.3 7.9-3.3 16.7 0 24.6C17.3 304 48.6 356 95.4 399.4C142.5 443.2 207.2 480 288 480s145.5-36.8 192.6-80.6c46.8-43.5 78.1-95.4 93-131.1c3.3-7.9 3.3-16.7 0-24.6c-14.9-35.7-46.2-87.7-93-131.1C433.5 68.8 368.8 32 288 32zM432 256c0 79.5-64.5 144-144 144s-144-64.5-144-144s64.5-144 144-144s144 64.5 144 144zM288 192c0 35.3-28.7 64-64 64c-11.5 0-22.3-3-31.6-8.4c-.2 2.8-.4 5.5-.4 8.4c0 53 43 96 96 96s96-43 96-96s-43-96-96-96c-2.8 0-5.6 .1-8.4 .4c5.3 9.3 8.4 20.1 8.4 31.6z"/></svg></span></template>
|
||||
<template id=fa-eye-slash><span class=fa-svg><svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 640 512"><!--! Font Awesome Free 6.2.0 by @fontawesome - https://fontawesome.com License - https://fontawesome.com/license/free (Icons: CC BY 4.0, Fonts: SIL OFL 1.1, Code: MIT License) Copyright 2022 Fonticons, Inc. --><path d="M38.8 5.1C28.4-3.1 13.3-1.2 5.1 9.2S-1.2 34.7 9.2 42.9l592 464c10.4 8.2 25.5 6.3 33.7-4.1s6.3-25.5-4.1-33.7L525.6 386.7c39.6-40.6 66.4-86.1 79.9-118.4c3.3-7.9 3.3-16.7 0-24.6c-14.9-35.7-46.2-87.7-93-131.1C465.5 68.8 400.8 32 320 32c-68.2 0-125 26.3-169.3 60.8L38.8 5.1zM223.1 149.5C248.6 126.2 282.7 112 320 112c79.5 0 144 64.5 144 144c0 24.9-6.3 48.3-17.4 68.7L408 294.5c5.2-11.8 8-24.8 8-38.5c0-53-43-96-96-96c-2.8 0-5.6 .1-8.4 .4c5.3 9.3 8.4 20.1 8.4 31.6c0 10.2-2.4 19.8-6.6 28.3l-90.3-70.8zm223.1 298L373 389.9c-16.4 6.5-34.3 10.1-53 10.1c-79.5 0-144-64.5-144-144c0-6.9 .5-13.6 1.4-20.2L83.1 161.5C60.3 191.2 44 220.8 34.5 243.7c-3.3 7.9-3.3 16.7 0 24.6c14.9 35.7 46.2 87.7 93 131.1C174.5 443.2 239.2 480 320 480c47.8 0 89.9-12.9 126.2-32.5z"/></svg></span></template>
|
||||
<template id=fa-copy><span class=fa-svg><svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 512 512"><!--! Font Awesome Free 6.2.0 by @fontawesome - https://fontawesome.com License - https://fontawesome.com/license/free (Icons: CC BY 4.0, Fonts: SIL OFL 1.1, Code: MIT License) Copyright 2022 Fonticons, Inc. --><path d="M502.6 70.63l-61.25-61.25C435.4 3.371 427.2 0 418.7 0H255.1c-35.35 0-64 28.66-64 64l.0195 256C192 355.4 220.7 384 256 384h192c35.2 0 64-28.8 64-64V93.25C512 84.77 508.6 76.63 502.6 70.63zM464 320c0 8.836-7.164 16-16 16H255.1c-8.838 0-16-7.164-16-16L239.1 64.13c0-8.836 7.164-16 16-16h128L384 96c0 17.67 14.33 32 32 32h47.1V320zM272 448c0 8.836-7.164 16-16 16H63.1c-8.838 0-16-7.164-16-16L47.98 192.1c0-8.836 7.164-16 16-16H160V128H63.99c-35.35 0-64 28.65-64 64l.0098 256C.002 483.3 28.66 512 64 512h192c35.2 0 64-28.8 64-64v-32h-47.1L272 448z"/></svg></span></template>
|
||||
<template id=fa-play><span class=fa-svg><svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 384 512"><!--! Font Awesome Free 6.2.0 by @fontawesome - https://fontawesome.com License - https://fontawesome.com/license/free (Icons: CC BY 4.0, Fonts: SIL OFL 1.1, Code: MIT License) Copyright 2022 Fonticons, Inc. --><path d="M73 39c-14.8-9.1-33.4-9.4-48.5-.9S0 62.6 0 80V432c0 17.4 9.4 33.4 24.5 41.9s33.7 8.1 48.5-.9L361 297c14.3-8.7 23-24.2 23-41s-8.7-32.2-23-41L73 39z"/></svg></span></template>
|
||||
<template id=fa-clock-rotate-left><span class=fa-svg><svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 512 512"><!--! Font Awesome Free 6.2.0 by @fontawesome - https://fontawesome.com License - https://fontawesome.com/license/free (Icons: CC BY 4.0, Fonts: SIL OFL 1.1, Code: MIT License) Copyright 2022 Fonticons, Inc. --><path d="M75 75L41 41C25.9 25.9 0 36.6 0 57.9V168c0 13.3 10.7 24 24 24H134.1c21.4 0 32.1-25.9 17-41l-30.8-30.8C155 85.5 203 64 256 64c106 0 192 86 192 192s-86 192-192 192c-40.8 0-78.6-12.7-109.7-34.4c-14.5-10.1-34.4-6.6-44.6 7.9s-6.6 34.4 7.9 44.6C151.2 495 201.7 512 256 512c141.4 0 256-114.6 256-256S397.4 0 256 0C185.3 0 121.3 28.7 75 75zm181 53c-13.3 0-24 10.7-24 24V256c0 6.4 2.5 12.5 7 17l72 72c9.4 9.4 24.6 9.4 33.9 0s9.4-24.6 0-33.9l-65-65V152c0-13.3-10.7-24-24-24z"/></svg></span></template>
|
||||
|
||||
|
||||
|
||||
<script>
|
||||
window.playground_copyable = true;
|
||||
</script>
|
||||
|
||||
|
||||
<script src="elasticlunr-ef4e11c1.min.js"></script>
|
||||
<script src="mark-09e88c2c.min.js"></script>
|
||||
<script src="searcher-c2a407aa.js"></script>
|
||||
|
||||
<script src="clipboard-1626706a.min.js"></script>
|
||||
<script src="highlight-abc7f01d.js"></script>
|
||||
<script src="book-a0b12cfe.js"></script>
|
||||
|
||||
<!-- Custom JS scripts -->
|
||||
|
||||
|
||||
|
||||
</div>
|
||||
</body>
|
||||
</html>
|
||||
493
ROADMAP.md
Normal file
493
ROADMAP.md
Normal file
@@ -0,0 +1,493 @@
|
||||
# Roadmap — quicprochat
|
||||
|
||||
> From proof-of-concept to production-grade E2E encrypted messaging.
|
||||
>
|
||||
> Each phase is designed to be tackled sequentially. Items within a phase
|
||||
> can be parallelised. Check the box when done.
|
||||
|
||||
---
|
||||
|
||||
## Phase 1 — Production Hardening (Critical)
|
||||
|
||||
Eliminate all crash paths, enforce secure defaults, fix deployment blockers.
|
||||
|
||||
- [x] **1.1 Remove `.unwrap()` / `.expect()` from production paths**
|
||||
- Replace `AUTH_CONTEXT.read().expect()` in client RPC with proper `Result`
|
||||
- Replace `"0.0.0.0:0".parse().unwrap()` in client with fallible parse
|
||||
- Replace `Mutex::lock().unwrap()` in server storage with `.map_err()`
|
||||
- Audit: `grep -rn 'unwrap()\|expect(' crates/` outside `#[cfg(test)]`
|
||||
|
||||
- [x] **1.2 Enforce secure defaults in production mode**
|
||||
- Reject startup if `QPC_PRODUCTION=true` and `auth_token` is empty or `"devtoken"`
|
||||
- Require non-empty `db_key` when using SQL backend in production
|
||||
- Refuse to auto-generate TLS certs in production mode (require existing cert+key)
|
||||
- Already partially implemented — verify and harden the validation in `config.rs`
|
||||
|
||||
- [x] **1.3 Fix `.gitignore`**
|
||||
- Add `data/`, `*.der`, `*.pem`, `*.db`, `*.bin` (state files), `*.ks` (keystores)
|
||||
- Verify no secrets are already tracked: `git ls-files data/ *.der *.db`
|
||||
|
||||
- [x] **1.4 Fix Dockerfile**
|
||||
- Sync workspace members (handle excluded `p2p` crate)
|
||||
- Create dedicated user/group instead of `nobody`
|
||||
- Set writable `QPC_DATA_DIR` with correct permissions
|
||||
- Test: `docker build . && docker run --rm -it qpc-server --help`
|
||||
|
||||
- [x] **1.5 TLS certificate lifecycle**
|
||||
- Document CA-signed cert setup (Let's Encrypt / custom CA)
|
||||
- Add `--tls-required` flag that refuses to start without valid cert
|
||||
- Log clear warning when using self-signed certs
|
||||
- Document certificate rotation procedure
|
||||
|
||||
---
|
||||
|
||||
## Phase 2 — Test & CI Maturity
|
||||
|
||||
Build confidence before adding features.
|
||||
|
||||
- [x] **2.1 Expand E2E test coverage**
|
||||
- Auth failure scenarios (wrong password, expired token, invalid token)
|
||||
- Message ordering verification (send N messages, verify seq numbers)
|
||||
- Concurrent clients (3+ members in group, simultaneous send/recv)
|
||||
- OPAQUE registration + login full flow
|
||||
- Queue full behavior (>1000 messages)
|
||||
- Rate limiting behavior (>100 enqueues/minute)
|
||||
- Reconnection after server restart
|
||||
- KeyPackage exhaustion (fetch when none available)
|
||||
|
||||
- [x] **2.2 Add unit tests for untested paths**
|
||||
- Client retry logic (exponential backoff, jitter, retriable classification)
|
||||
- REPL input parsing edge cases (empty input, special characters, `/` commands)
|
||||
- State file encryption/decryption round-trip with bad password
|
||||
- Token cache expiry
|
||||
- Conversation store migrations
|
||||
|
||||
- [x] **2.3 CI hardening**
|
||||
- Add `.github/CODEOWNERS` (crypto, auth, wire-format require 2 reviewers)
|
||||
- Ensure `cargo deny check` runs on every PR (already in CI — verify)
|
||||
- Add `cargo audit` as blocking check (already in CI — verify)
|
||||
- Add coverage reporting (tarpaulin or llvm-cov)
|
||||
- Add CI job for Docker build validation
|
||||
|
||||
- [x] **2.4 Clean up build warnings**
|
||||
- Fix Cap'n Proto generated `unused_parens` warnings
|
||||
- Remove dead code / unused imports
|
||||
- Address `openmls` future-incompat warnings
|
||||
- Target: `cargo clippy --workspace -- -D warnings` passes clean
|
||||
|
||||
---
|
||||
|
||||
## Phase 3 — Client SDKs: Native QUIC + Cap'n Proto Everywhere
|
||||
|
||||
**No REST gateway. No protocol dilution.** The `.capnp` schemas are the
|
||||
interface definition. Every SDK speaks native QUIC + Cap'n Proto. The
|
||||
project name stays honest.
|
||||
|
||||
### Why this matters
|
||||
|
||||
The name is **quic**n**proto**chat — the protocol IS the product. Instead
|
||||
of adding an HTTP translation layer that loses zero-copy performance and
|
||||
adds base64 overhead, we invest in making the native protocol accessible
|
||||
from every language that has QUIC + Cap'n Proto support, and provide
|
||||
WASM/FFI for the crypto layer.
|
||||
|
||||
### Architecture
|
||||
|
||||
```
|
||||
Server: QUIC + Cap'n Proto (single protocol, no gateway)
|
||||
|
||||
Client SDKs:
|
||||
┌─── Rust quinn + capnp-rpc (existing, reference impl)
|
||||
├─── Go quic-go + go-capnp (native, high confidence)
|
||||
├─── Python aioquic + pycapnp (native QUIC, manual framing)
|
||||
├─── C/C++ msquic/ngtcp2 + capnproto (reference impl, full RPC)
|
||||
└─── Browser WebTransport + capnp (WASM) (QUIC transport, no HTTP needed)
|
||||
|
||||
Crypto layer (client-side MLS, shared across all SDKs):
|
||||
┌─── Rust crate (native, existing)
|
||||
├─── WASM module (browsers, Node.js, Deno)
|
||||
└─── C FFI (Swift, Kotlin, Python, Go via cgo)
|
||||
```
|
||||
|
||||
### Language support reality check
|
||||
|
||||
| Language | QUIC | Cap'n Proto | RPC | Confidence |
|
||||
|----------|------|-------------|-----|------------|
|
||||
| **Rust** | quinn ✅ | capnp-rpc ✅ | Full ✅ | Existing |
|
||||
| **Go** | quic-go ✅ | go-capnp ✅ | Level 1 ✅ | High |
|
||||
| **Python** | aioquic ✅ | pycapnp ⚠️ | Manual framing | Medium |
|
||||
| **C/C++** | msquic/ngtcp2 ✅ | capnproto ✅ | Full ✅ | High |
|
||||
| **Browser** | WebTransport ✅ | WASM ✅ | Via WASM bridge | Medium |
|
||||
|
||||
### Implementation
|
||||
|
||||
- [x] **3.1 Go SDK (`quicprochat-go`)**
|
||||
- Generated Go types from `node.capnp` (6487-line codegen, all 24 RPC methods)
|
||||
- QUIC transport via `quic-go` with TLS 1.3 + ALPN `"capnp"`
|
||||
- High-level `qpc` package: Connect, Health, ResolveUser, CreateChannel, Send/SendWithTTL, Receive/ReceiveWait, DeleteAccount, OPAQUE auth
|
||||
- Example CLI in `sdks/go/cmd/example/`
|
||||
|
||||
- [x] **3.2 Python SDK (`quicprochat-py`)**
|
||||
- QUIC transport: `aioquic` with custom Cap'n Proto stream handler
|
||||
- Cap'n Proto serialization: `pycapnp` for message types
|
||||
- Manual RPC framing: length-prefixed request/response over QUIC stream
|
||||
- Async/await API matching the Rust client patterns
|
||||
- Crypto: PyO3 bindings to `quicprochat-core` for MLS operations
|
||||
- Publish: PyPI `quicprochat`
|
||||
- Example: async bot client
|
||||
|
||||
- [x] **3.3 C FFI layer (`quicprochat-ffi`)**
|
||||
- `crates/quicprochat-ffi` with 7 extern "C" functions: connect, login, send, receive, disconnect, last_error, free_string
|
||||
- Builds as `libquicprochat_ffi.so` / `.dylib` / `.dll`
|
||||
- Python ctypes wrapper in `examples/python/qpc_client.py`
|
||||
|
||||
- [x] **3.4 WASM compilation of `quicprochat-core`**
|
||||
- `wasm-pack build` target producing 175 KB WASM bundle (LTO + opt-level=s)
|
||||
- 13 `wasm_bindgen` functions: Ed25519 identity, hybrid KEM, safety numbers, sealed sender, padding
|
||||
- Browser-ready with `crypto.getRandomValues()` RNG
|
||||
- Published as `sdks/typescript/wasm-crypto/`
|
||||
|
||||
- [x] **3.5 WebTransport server endpoint**
|
||||
- Add HTTP/3 + WebTransport listener to server (same QUIC stack via quinn)
|
||||
- Cap'n Proto RPC framed over WebTransport bidirectional streams
|
||||
- Same auth, same storage, same RPC handlers — just a different stream source
|
||||
- Browsers connect via `new WebTransport("https://server:7443")`
|
||||
- ALPN negotiation: `"h3"` for WebTransport, `"capnp"` for native QUIC
|
||||
- Configurable port: `--webtransport-listen 0.0.0.0:7443`
|
||||
- Feature-flagged: `--features webtransport`
|
||||
|
||||
- [x] **3.6 TypeScript/JavaScript SDK (`@quicprochat/client`)**
|
||||
- `QpqClient` class: connect, offline, health, resolveUser, createChannel, send/sendWithTTL, receive, deleteAccount
|
||||
- WASM crypto wrapper: generateIdentity, sign/verify, hybridEncrypt/Decrypt, computeSafetyNumber, sealedSend, pad
|
||||
- WebSocket transport with request/response correlation and reconnection
|
||||
- Browser demo: interactive crypto playground + chat UI (`sdks/typescript/demo/index.html`)
|
||||
|
||||
- [x] **3.7 SDK documentation and schema publishing**
|
||||
- Publish `.capnp` schemas as the canonical API contract
|
||||
- Document the QUIC + Cap'n Proto connection pattern for each language
|
||||
- Provide a "build your own SDK" guide (QUIC stream → Cap'n Proto RPC bootstrap)
|
||||
- Reference implementation checklist: connect, auth, upload key, enqueue, fetch
|
||||
|
||||
---
|
||||
|
||||
## Phase 4 — Trust & Security Infrastructure
|
||||
|
||||
Address the security gaps required for real-world deployment.
|
||||
|
||||
- [ ] **4.1 Third-party cryptographic audit**
|
||||
- Scope: MLS integration, OPAQUE flow, hybrid KEM, key lifecycle, zeroization
|
||||
- Firms: NCC Group, Trail of Bits, Cure53
|
||||
- Budget and timeline: typically 4-6 weeks, $50K–$150K
|
||||
- Publish report publicly (builds trust)
|
||||
|
||||
- [x] **4.2 Key Transparency / revocation**
|
||||
- Replace `BasicCredential` with X.509-based MLS credentials
|
||||
- Or: verifiable key directory (Merkle tree, auditable log)
|
||||
- Users can verify peer keys haven't been substituted (MITM detection)
|
||||
- Revocation mechanism for compromised keys
|
||||
|
||||
- [x] **4.3 Client authentication on Delivery Service**
|
||||
- DS sender identity binding with explicit audit logging
|
||||
- `sender_prefix` tracking in enqueue/batch_enqueue RPCs
|
||||
- Sender identity derived from authenticated session
|
||||
|
||||
- [x] **4.4 M7 — Post-quantum MLS integration**
|
||||
- Integrate hybrid KEM (X25519 + ML-KEM-768) into the OpenMLS crypto provider
|
||||
- Group key material gets post-quantum confidentiality
|
||||
- Full test suite with PQ ciphersuite
|
||||
- Ref: existing `hybrid_kem.rs` and `hybrid_crypto.rs`
|
||||
|
||||
- [x] **4.5 Username enumeration mitigation**
|
||||
- 5 ms timing floor on `resolveUser` responses
|
||||
- Rate limiting to prevent bulk enumeration attacks
|
||||
|
||||
---
|
||||
|
||||
## Phase 5 — Features & UX
|
||||
|
||||
Make it a product people want to use.
|
||||
|
||||
- [x] **5.1 Multi-device support**
|
||||
- Account → multiple devices, each with own Ed25519 key + MLS KeyPackages
|
||||
- Device graph management (add device, remove device, list devices)
|
||||
- Messages delivered to all devices of a user
|
||||
- `device_id` field already in Auth struct — wire it through
|
||||
|
||||
- [x] **5.2 Account recovery**
|
||||
- Recovery codes or backup key (encrypted, stored by user)
|
||||
- Option: server-assisted recovery with security questions (lower security)
|
||||
- MLS state re-establishment after device loss
|
||||
|
||||
- [x] **5.3 Full MLS lifecycle**
|
||||
- Member removal (Remove proposal → Commit → fan-out)
|
||||
- Credential update (Update proposal for key rotation)
|
||||
- Explicit proposal handling (queue proposals, batch commit)
|
||||
- Group metadata (name, description, avatar hash)
|
||||
|
||||
- [x] **5.4 Message editing and deletion**
|
||||
- `Edit` (0x06) and `Delete` (0x07) message types in `AppMessage`
|
||||
- `/edit <index> <text>` and `/delete <index>` REPL commands (own messages only)
|
||||
- Database update/removal on incoming edit/delete
|
||||
|
||||
- [x] **5.5 File and media transfer**
|
||||
- `uploadBlob` / `downloadBlob` RPCs with 256 KB chunked streaming
|
||||
- SHA-256 content-addressable storage with hash verification
|
||||
- `FileRef` (0x08) message type with blob_id, filename, file_size, mime_type
|
||||
- `/send-file <path>` and `/download <index>` REPL commands with progress bars
|
||||
- 50 MB max file size, automatic MIME detection via `mime_guess`
|
||||
|
||||
- [x] **5.6 Abuse prevention and moderation**
|
||||
- Block user (client-side, suppress display)
|
||||
- Report message (encrypted report to admin key)
|
||||
- Admin tools: ban user, delete account, audit log
|
||||
|
||||
- [x] **5.7 Offline message queue (client-side)**
|
||||
- Queue messages when disconnected, send on reconnect
|
||||
- Idempotent message IDs to prevent duplicates
|
||||
- Gap detection: compare local seq with server seq
|
||||
|
||||
---
|
||||
|
||||
## Phase 6 — Scale & Operations
|
||||
|
||||
Prepare for real traffic.
|
||||
|
||||
- [x] **6.1 Distributed rate limiting**
|
||||
- Current: in-memory per-process, lost on restart
|
||||
- Move to Redis or shared state for multi-node deployments
|
||||
- Sliding window with configurable thresholds
|
||||
|
||||
- [x] **6.2 Multi-node / horizontal scaling**
|
||||
- Stateless server design (already mostly there — state is in storage backend)
|
||||
- Shared PostgreSQL or CockroachDB backend (replace SQLite)
|
||||
- Message queue fan-out (Redis pub/sub or NATS for cross-node notification)
|
||||
- Load balancer health check via QUIC RPC `health()` or Prometheus `/metrics`
|
||||
|
||||
- [x] **6.3 Operational runbook**
|
||||
- Backup / restore procedures (SQLCipher, file backend)
|
||||
- Key rotation (auth token, TLS cert, DB encryption key)
|
||||
- Incident response playbook
|
||||
- Scaling guide (when to add nodes, resource sizing)
|
||||
- Monitoring dashboard templates (Grafana + Prometheus)
|
||||
|
||||
- [x] **6.4 Connection draining and graceful shutdown**
|
||||
- Stop accepting new connections on SIGTERM
|
||||
- Wait for in-flight RPCs (configurable timeout, default 30s)
|
||||
- Drain WebTransport sessions with close frame
|
||||
- Document expected behavior for load balancers (health → unhealthy first)
|
||||
|
||||
- [x] **6.5 Request-level timeouts**
|
||||
- Per-RPC timeout (prevent slow clients from holding resources)
|
||||
- Database query timeout
|
||||
- Overall request deadline propagation
|
||||
|
||||
- [x] **6.6 Observability enhancements**
|
||||
- Request correlation IDs (trace across RPC → storage)
|
||||
- Storage operation latency metrics
|
||||
- Per-endpoint latency histograms
|
||||
- Structured audit log to persistent storage (not just stdout)
|
||||
- OpenTelemetry integration
|
||||
|
||||
---
|
||||
|
||||
## Phase 7 — Platform Expansion & Research
|
||||
|
||||
Long-term vision for wide adoption.
|
||||
|
||||
- [x] **7.1 Mobile clients (iOS + Android)**
|
||||
- Use C FFI (Phase 3.3) for crypto + transport (single library)
|
||||
- Push notifications via APNs / FCM (server sends notification on enqueue)
|
||||
- Background QUIC connection for message polling
|
||||
- Biometric auth for local key storage (Keychain / Android Keystore)
|
||||
|
||||
- [x] **7.2 Web client (browser)**
|
||||
- Use WASM (Phase 3.4) for crypto
|
||||
- Use WebTransport (Phase 3.5) for native QUIC transport
|
||||
- Cap'n Proto via WASM bridge (Phase 3.6)
|
||||
- IndexedDB for local state persistence
|
||||
- Service Worker for background notifications
|
||||
- Progressive Web App (PWA) support
|
||||
|
||||
- [x] **7.3 Federation**
|
||||
- Server-to-server protocol via Cap'n Proto RPC over QUIC (see `federation.capnp`)
|
||||
- `relayEnqueue`, `proxyFetchKeyPackage`, `federationHealth` methods
|
||||
- Identity resolution across federated servers
|
||||
- MLS group spanning multiple servers
|
||||
- Trust model for federated deployments
|
||||
|
||||
- [x] **7.4 Sealed Sender**
|
||||
- Sender identity inside MLS ciphertext only (server can't see who sent)
|
||||
- `sealed_sender` module in quicprochat-core with seal/unseal API
|
||||
- WASM-accessible via `wasm_bindgen` for browser use
|
||||
|
||||
- [x] **7.5 Additional language SDKs**
|
||||
- Java/Kotlin: JNI bindings to C FFI (Phase 3.3) + native QUIC (netty-quic)
|
||||
- Swift: Swift wrapper over C FFI + Network.framework QUIC
|
||||
- Ruby: FFI bindings via `quicprochat-ffi`
|
||||
- Evaluate demand-driven — only build SDKs people request
|
||||
|
||||
- [x] **7.6 P2P / NAT traversal**
|
||||
- Direct peer-to-peer via iroh (foundation exists in `quicprochat-p2p`)
|
||||
- Server as fallback relay only
|
||||
- Reduces latency and single-point-of-failure
|
||||
- Ref: `FUTURE-IMPROVEMENTS.md § 6.1`
|
||||
|
||||
- [x] **7.7 Traffic analysis resistance**
|
||||
- Padding messages to uniform size
|
||||
- Decoy traffic to mask timing patterns
|
||||
- Optional Tor/I2P routing for IP privacy
|
||||
- Ref: `FUTURE-IMPROVEMENTS.md § 5.4, 6.3`
|
||||
|
||||
---
|
||||
|
||||
## Phase 8 — Freifunk / Community Mesh Networking
|
||||
|
||||
Make qpc a first-class citizen on decentralised, community-operated wireless
|
||||
networks (Freifunk, BATMAN-adv/Babel routing, OpenWrt). Multiple qpc nodes form
|
||||
a federated mesh; clients auto-discover nearby nodes via mDNS; the network
|
||||
functions without any central infrastructure or internet uplink.
|
||||
|
||||
### Architecture
|
||||
|
||||
```
|
||||
Client A ─── mDNS discovery ──► nearby qpc node (LAN / mesh)
|
||||
│
|
||||
Cap'n Proto federation
|
||||
│
|
||||
remote qpc node (across mesh)
|
||||
```
|
||||
|
||||
- [x] **F0 — Re-include `quicprochat-p2p` in workspace; fix ALPN strings**
|
||||
- Moved `crates/quicprochat-p2p` from `exclude` back into `[workspace] members`
|
||||
- Fixed ALPN `b"quicnprotochat/p2p/1"` → `b"quicprochat/p2p/1"` (breaking wire change)
|
||||
- Fixed federation ALPN `b"qnpc-fed"` → `b"quicprochat/federation/1"`
|
||||
- Feature-gated behind `--features mesh` on client (keeps iroh out of default builds)
|
||||
|
||||
- [x] **F1 — Federation routing in message delivery**
|
||||
- `handle_enqueue` and `handle_batch_enqueue` call `federation::routing::resolve_destination()`
|
||||
- Recipients with a remote home server are relayed via `FederationClient::relay_enqueue()`
|
||||
- mTLS mutual authentication between nodes (both present client certs, validated against shared CA)
|
||||
- Config: `QPC_FEDERATION_LISTEN`, `QPC_LOCAL_DOMAIN`, `QPC_FEDERATION_CERT/KEY/CA`
|
||||
|
||||
- [x] **F2 — mDNS local peer discovery**
|
||||
- Server announces `_quicprochat._udp.local.` on startup via `mdns-sd`
|
||||
- Client: `MeshDiscovery::start()` browses for nearby nodes (feature-gated)
|
||||
- REPL commands: `/mesh peers` (scan + list), `/mesh server <host:port>` (note address)
|
||||
- Nodes announce: `ver=1`, `server=<host:port>`, `domain=<local_domain>` TXT records
|
||||
|
||||
- [x] **F3 — Self-sovereign mesh identity**
|
||||
- Ed25519 keypair-based identity independent of AS registration
|
||||
- JSON-persisted seed + known peers directory
|
||||
- Sign/verify operations for mesh authenticity (`crates/quicprochat-p2p/src/identity.rs`)
|
||||
|
||||
- [x] **F4 — Store-and-forward with TTL**
|
||||
- `MeshEnvelope` with TTL-based expiry, hop_count tracking, max_hops routing limit
|
||||
- SHA-256 deduplication ID prevents relay loops
|
||||
- Ed25519 signature verification on envelopes
|
||||
- `MeshStore` in-memory queue with per-recipient capacity limits and TTL-based GC
|
||||
|
||||
- [x] **F5 — Lightweight broadcast channels**
|
||||
- Symmetric ChaCha20-Poly1305 encrypted channels (no MLS overhead)
|
||||
- Topic-based pub/sub via `BroadcastChannel` and `BroadcastManager`
|
||||
- Subscribe/unsubscribe, create, publish API on `P2pNode`
|
||||
|
||||
- [x] **F6 — Extended `/mesh` REPL commands**
|
||||
- `/mesh send <peer_id> <msg>` — direct P2P message via iroh
|
||||
- `/mesh broadcast <topic> <msg>` — publish to broadcast channel
|
||||
- `/mesh subscribe <topic>` — join broadcast channel
|
||||
- `/mesh route` — show routing table
|
||||
- `/mesh identity` — show mesh identity info
|
||||
- `/mesh store` — show store-and-forward statistics
|
||||
|
||||
- [x] **F7 — OpenWrt cross-compilation guide**
|
||||
- Musl static builds: `x86_64-unknown-linux-musl`, `armv7-unknown-linux-musleabihf`, `mips-unknown-linux-musl`
|
||||
- Strip binary: `--release` + `strip` → target size < 5 MB for flash storage
|
||||
- `opkg` package manifest for OpenWrt feed
|
||||
- `procd` init script + `uci` config file for OpenWrt integration
|
||||
- CI job: cross-compile and size-check on every release tag
|
||||
|
||||
- [x] **F8 — Traffic analysis resistance for mesh**
|
||||
- Uniform message padding to nearest 256-byte boundary (hides message size)
|
||||
- Configurable decoy traffic rate (fake messages to mask send timing)
|
||||
- Optional onion routing: 3-hop relay through other mesh nodes (no Tor dependency)
|
||||
- Ref: Phase 7.7 for server-side traffic analysis resistance
|
||||
|
||||
---
|
||||
|
||||
## Phase 9 — Developer Experience & Community Growth
|
||||
|
||||
Features designed to attract contributors, create demo/showcase potential,
|
||||
and lower the barrier to entry for non-crypto developers.
|
||||
|
||||
- [x] **9.1 Criterion Benchmark Suite (`qpc-bench`)**
|
||||
- Criterion benchmarks for all crypto primitives: hybrid KEM encap/decap,
|
||||
MLS group-add at 10/100/1000 members, epoch rotation, Noise_XX handshake
|
||||
- CI publishes HTML benchmark reports as GitHub Actions artifacts
|
||||
- Citable numbers — no other project benchmarks MLS + PQ-KEM in Rust
|
||||
|
||||
- [x] **9.2 Safety Numbers (key verification)**
|
||||
- 60-digit numeric code derived from two identity keys (Signal-style)
|
||||
- `/verify <username>` REPL command for out-of-band verification
|
||||
- Available in WASM via `compute_safety_number` binding
|
||||
|
||||
- [x] **9.3 Full-Screen TUI (Ratatui + Crossterm)**
|
||||
- `qpc tui` launches a full-screen terminal UI: message pane, input bar,
|
||||
channel sidebar with unread counts, MLS epoch indicator
|
||||
- Feature-gated `--features tui` to keep ratatui/crossterm out of default builds
|
||||
- Existing REPL and CLI subcommands are unaffected
|
||||
|
||||
- [x] **9.4 Delivery Proof Canary Tokens**
|
||||
- Server signs `Ed25519(SHA-256(message_id || recipient || timestamp))` on enqueue
|
||||
- Sender stores proof locally — cryptographic evidence the server queued the message
|
||||
- Cap'n Proto schema gains optional `deliveryProof: Data` on enqueue response
|
||||
|
||||
- [x] **9.5 Verifiable Transcript Archive**
|
||||
- `GroupMember::export_transcript(path, password)` writes encrypted, tamper-evident
|
||||
message archive (CBOR records, Argon2id + ChaCha20-Poly1305, Merkle chain)
|
||||
- `qpc export verify` CLI command independently verifies chain integrity
|
||||
- Useful for legal discovery, audit, or personal backup
|
||||
|
||||
- [x] **9.6 Key Transparency (Merkle-Log Identity Binding)**
|
||||
- Append-only Merkle log of (username, identity_key) bindings in the AS
|
||||
- Clients receive inclusion proofs alongside key fetches
|
||||
- Any client can independently audit the full identity history
|
||||
- Lightweight subset of RFC 9162 adapted for identity keys
|
||||
|
||||
- [x] **9.7 Dynamic Server Plugin System**
|
||||
- Server loads `.so`/`.dylib` plugins at runtime via `--plugin-dir`
|
||||
- C-compatible `HookVTable` via `extern "C"` — plugins in any language
|
||||
- 6 hook points: on_message_enqueue, on_batch_enqueue, on_auth, on_channel_created, on_fetch, on_user_registered
|
||||
- Example plugins: logging plugin, rate limit plugin (512 KiB payload enforcement)
|
||||
|
||||
- [x] **9.8 PQ Noise Transport Layer**
|
||||
- Hybrid `Noise_XX + ML-KEM-768` handshake for post-quantum transport security
|
||||
- Closes the harvest-now-decrypt-later gap on handshake metadata (ADR-006)
|
||||
- Feature-gated `--features pq-noise`; classical Noise_XX default preserved
|
||||
- May require extending or forking `snow` crate's `CryptoResolver`
|
||||
|
||||
---
|
||||
|
||||
## Summary Timeline
|
||||
|
||||
| Phase | Focus | Estimated Effort |
|
||||
|-------|-------|-----------------|
|
||||
| **1** | Production Hardening | 1–2 days |
|
||||
| **2** | Test & CI Maturity | 2–3 days |
|
||||
| **3** | Client SDKs (Go, Python, WASM, FFI, WebTransport) | 5–8 days |
|
||||
| **4** | Trust & Security Infrastructure | 2–4 days (excl. audit) |
|
||||
| **5** | Features & UX | 5–7 days |
|
||||
| **6** | Scale & Operations | 3–5 days |
|
||||
| **7** | Platform Expansion & Research | ongoing |
|
||||
| **8** | Freifunk / Community Mesh | ongoing |
|
||||
| **9** | Developer Experience & Community Growth | 3–5 days |
|
||||
|
||||
---
|
||||
|
||||
## Related Documents
|
||||
|
||||
- [Future Improvements](docs/FUTURE-IMPROVEMENTS.md) — consolidated improvement list
|
||||
- [Production Readiness Audit](docs/PRODUCTION-READINESS-AUDIT.md) — specific blockers
|
||||
- [Security Audit](docs/SECURITY-AUDIT.md) — findings and recommendations
|
||||
- [Milestone Tracker](docs/src/roadmap/milestones.md) — M1–M7 status
|
||||
- [Auth, Devices, and Tokens](docs/src/roadmap/authz-plan.md) — authorization design
|
||||
- [DM Channel Design](docs/src/roadmap/dm-channels.md) — 1:1 channel spec
|
||||
29
SECURITY.md
Normal file
29
SECURITY.md
Normal file
@@ -0,0 +1,29 @@
|
||||
# Security Policy
|
||||
|
||||
## Supported Versions
|
||||
|
||||
Only the current `main` branch is supported with security updates.
|
||||
|
||||
## Reporting a Vulnerability
|
||||
|
||||
**Do not use public GitHub issues to report security vulnerabilities.**
|
||||
|
||||
Instead, email **security@quicprochat.org** with:
|
||||
|
||||
- A description of the vulnerability
|
||||
- Steps to reproduce or a proof of concept
|
||||
- The affected component(s) and potential impact
|
||||
|
||||
We will acknowledge your report within **48 hours** and work with you on a fix under a **90-day coordinated disclosure** timeline.
|
||||
|
||||
## What Qualifies
|
||||
|
||||
- Cryptographic implementation bugs (MLS, Noise, hybrid KEM, key derivation)
|
||||
- Authentication or authorization bypass
|
||||
- Key material leakage (memory, logs, network)
|
||||
- Protocol-level flaws (replay, downgrade, impersonation)
|
||||
- Any issue that compromises message confidentiality or integrity
|
||||
|
||||
## Credit
|
||||
|
||||
Reporters are credited in published security advisories unless they prefer to remain anonymous. Let us know your preference when you report.
|
||||
229
SPRINTS.md
Normal file
229
SPRINTS.md
Normal file
@@ -0,0 +1,229 @@
|
||||
# quicprochat — Sprint Plan
|
||||
|
||||
> 7 sprints synthesized from code audit, architecture analysis, and ecosystem research.
|
||||
> Each sprint is ~1 week. Sprints are ordered by priority and dependency.
|
||||
|
||||
---
|
||||
|
||||
## Sprint 1 — Bug Fixes & Code Quality (Quick Wins)
|
||||
|
||||
Fix all known bugs, clippy warnings, and dead code before building on top.
|
||||
|
||||
- [x] **1.1 Fix boolean logic bug in TUI**
|
||||
- `crates/quicprochat-client/src/client/v2_tui.rs:832` — remove `|| true`
|
||||
- Cursor positioning always executes regardless of input state
|
||||
|
||||
- [x] **1.2 Fix unwrap violations in P2P router**
|
||||
- `crates/quicprochat-p2p/src/routing.rs:416,419` — `.lock().unwrap()` on Mutex
|
||||
- Replace with `.expect("lock poisoned")` or proper error handling
|
||||
|
||||
- [x] **1.3 Remove placeholder assertion in WebTransport**
|
||||
- `crates/quicprochat-server/src/webtransport.rs:418` — `assert!(true);`
|
||||
|
||||
- [x] **1.4 Wire up unused metrics**
|
||||
- `record_storage_latency()` — instrument storage layer calls
|
||||
- `record_uptime_seconds()` — add periodic heartbeat task in server main loop
|
||||
|
||||
- [x] **1.5 Wire up or remove unused config fields**
|
||||
- `EffectiveConfig::webtransport_listen` — connect to WebTransport listener
|
||||
- `EffectiveConfig::rpc_timeout_secs` — apply as per-RPC deadline
|
||||
- `EffectiveConfig::storage_timeout_secs` — apply as DB query timeout
|
||||
|
||||
- [x] **1.6 Fix remaining clippy warnings**
|
||||
- Reduce function arity (2 functions with 8-9 args → use config/param structs)
|
||||
- Remove useless `format!()` call
|
||||
- Collapse nested conditionals
|
||||
- Rename `from_str` method to avoid `FromStr` trait confusion
|
||||
|
||||
---
|
||||
|
||||
## Sprint 2 — OpenMLS 0.5 → 0.8 Migration
|
||||
|
||||
**CRITICAL**: OpenMLS 0.7.2 includes security patches. Staying on 0.5 is a risk.
|
||||
|
||||
- [x] **2.1 Migrate StorageProvider trait**
|
||||
- Old `OpenMlsKeyStore` → new `StorageProvider` (most invasive change)
|
||||
- Rework `DiskKeyStore` integration (must keep bincode serialization)
|
||||
- Update all `group.rs` calls that interact with the key store
|
||||
|
||||
- [x] **2.2 Update MLS API calls**
|
||||
- `self_update()` / `propose_self_update()` — add `LeafNodeParameters` arg
|
||||
- `join_by_external_commit()` — add optional LeafNode params
|
||||
- `Sender::NewMember` → split into `NewMemberProposal` / `NewMemberCommit`
|
||||
|
||||
- [x] **2.3 Handle GREASE support**
|
||||
- New variants in `ProposalType`, `ExtensionType`, `CredentialType`
|
||||
- Update match arms to handle unknown/GREASE values
|
||||
|
||||
- [x] **2.4 Update AAD handling**
|
||||
- AAD no longer persisted — set before every API call generating `MlsMessageOut`
|
||||
|
||||
- [x] **2.5 Verify FIPS 203 alignment**
|
||||
- Confirm ML-KEM-768 parameters match final FIPS 203 (not draft)
|
||||
- Review hybrid KEM against RFC 9794 combination methods
|
||||
|
||||
- [x] **2.6 Full test suite pass**
|
||||
- All 301 tests must pass with OpenMLS 0.8
|
||||
- Run crypto benchmarks to check for performance regressions
|
||||
|
||||
---
|
||||
|
||||
## Sprint 3 — Client Resilience
|
||||
|
||||
Currently, network glitches cause the client to hang. This blocks v2 launch.
|
||||
|
||||
- [x] **3.1 Auto-reconnect with backoff**
|
||||
- Integrate existing `retry.rs` into `RpcClient::call()` path
|
||||
- Exponential backoff with jitter (already implemented, not wired)
|
||||
- Configurable max retries and backoff ceiling
|
||||
|
||||
- [x] **3.2 Push subscription recovery**
|
||||
- Detect broken push stream and re-subscribe automatically
|
||||
- Buffer missed events during reconnection window
|
||||
|
||||
- [x] **3.3 Heartbeat / keepalive**
|
||||
- Periodic QUIC ping in TUI and REPL modes
|
||||
- Detect dead connections before user notices
|
||||
|
||||
- [x] **3.4 SDK disconnect lifecycle**
|
||||
- Add `QpcClient::disconnect()` for clean shutdown
|
||||
- Proper state machine: Connected → Reconnecting → Disconnected
|
||||
|
||||
- [x] **3.5 Connection status UI**
|
||||
- TUI: show connection state in status bar (Connected / Reconnecting / Offline)
|
||||
- REPL: print status change notifications
|
||||
|
||||
---
|
||||
|
||||
## Sprint 4 — Server Hardening
|
||||
|
||||
Fix graceful shutdown and wire up timeouts for production readiness.
|
||||
|
||||
- [x] **4.1 In-flight RPC tracking**
|
||||
- Replace fixed 30s shutdown delay with actual in-flight RPC counter
|
||||
- Drain when counter reaches zero (with configurable max wait)
|
||||
|
||||
- [x] **4.2 Apply request-level timeouts**
|
||||
- Wire `rpc_timeout_secs` config into per-RPC deadline enforcement
|
||||
- Wire `storage_timeout_secs` into DB query timeouts
|
||||
- Cancel long-running operations cleanly
|
||||
|
||||
- [x] **4.3 Plugin shutdown hooks**
|
||||
- Add `on_shutdown` hook to `HookVTable`
|
||||
- Call plugin shutdown before server exits
|
||||
|
||||
- [x] **4.4 Federation drain during shutdown**
|
||||
- Stop accepting federation relay requests on SIGTERM
|
||||
- Wait for in-flight federation RPCs before exit
|
||||
|
||||
- [x] **4.5 Connection draining improvements**
|
||||
- Send QUIC CONNECTION_CLOSE with application reason
|
||||
- WebTransport: send close frame before dropping sessions
|
||||
|
||||
---
|
||||
|
||||
## Sprint 5 — Test Coverage & CI Hardening
|
||||
|
||||
Address the major test coverage gaps identified in the audit.
|
||||
|
||||
- [x] **5.1 RPC framing unit tests**
|
||||
- `crates/quicprochat-rpc/src/framing.rs` — encode/decode edge cases
|
||||
- Malformed frames, truncated input, max-size payloads
|
||||
- Fuzzing harness for frame parser
|
||||
|
||||
- [x] **5.2 SDK state machine tests**
|
||||
- `crates/quicprochat-sdk/src/conversation.rs` — conversation lifecycle
|
||||
- `crates/quicprochat-sdk/src/groups.rs` — group join/leave/update
|
||||
- `crates/quicprochat-sdk/src/messaging.rs` — send/receive/queue
|
||||
|
||||
- [x] **5.3 Server domain service tests**
|
||||
- `crates/quicprochat-server/src/domain/` — all service modules
|
||||
- Test business logic without DB (mock storage trait)
|
||||
|
||||
- [x] **5.4 Integration tests**
|
||||
- Reconnection scenario (kill server, restart, verify client recovers)
|
||||
- Graceful shutdown (send SIGTERM during active RPCs, verify drain)
|
||||
- Multi-node federation relay (if federation wired in Sprint 6)
|
||||
|
||||
- [x] **5.5 CI hardening**
|
||||
- Add MSRV check (Rust 1.75 or declared minimum)
|
||||
- Add cross-platform CI (macOS, Windows — at least build check)
|
||||
- Add cargo-fuzz for crypto and parsing code
|
||||
- Add MIRI for unsafe code in plugin-api/FFI
|
||||
|
||||
---
|
||||
|
||||
## Sprint 6 — Federation & P2P Integration
|
||||
|
||||
Wire up the scaffolded federation and P2P code into working features.
|
||||
|
||||
- [x] **6.1 Federation message routing**
|
||||
- Wire `federation::routing::resolve_destination()` into `handle_enqueue`
|
||||
- Route messages to remote home servers via `FederationClient::relay_enqueue()`
|
||||
- Resolve protocol mismatch (Cap'n Proto federation vs Protobuf main RPC)
|
||||
|
||||
- [x] **6.2 Federation identity resolution**
|
||||
- Cross-server user lookup (`user@remote-server`)
|
||||
- KeyPackage fetching across federated nodes
|
||||
|
||||
- [x] **6.3 P2P client integration**
|
||||
- Wire iroh P2P into client as transport option
|
||||
- Fallback logic: prefer P2P direct → fall back to server relay
|
||||
- mDNS discovery in client (already scaffolded, needs activation)
|
||||
|
||||
- [x] **6.4 Multipath QUIC evaluation**
|
||||
- Research draft-ietf-quic-multipath (likely RFC in 2026)
|
||||
- Prototype: use multiple paths for mesh relay resilience
|
||||
- Decision: adopt or defer based on quinn support
|
||||
|
||||
- [x] **6.5 Federation integration tests**
|
||||
- Two-server test: register on A, send to user on B, verify delivery
|
||||
- mTLS mutual auth verification
|
||||
- Partition tolerance (one node goes down, messages queue)
|
||||
|
||||
---
|
||||
|
||||
## Sprint 7 — Documentation, Polish & Future Prep
|
||||
|
||||
Final polish and forward-looking improvements.
|
||||
|
||||
- [x] **7.1 Crate-level documentation**
|
||||
- Add module-level docs to `quicprochat-plugin-api`, `quicprochat-rpc`, `quicprochat-sdk`
|
||||
- Doc comments for all public APIs in domain services
|
||||
|
||||
- [x] **7.2 Refactor high-arity functions** (none found — already clean)
|
||||
- Consolidate 8-9 parameter functions into config/param structs
|
||||
- Improve builder patterns where appropriate
|
||||
|
||||
- [ ] **7.3 Review RFC 9750 (MLS Architecture)** (deferred — requires manual review)
|
||||
- Verify quicprochat's AS/DS split aligns with RFC 9750 recommendations
|
||||
- Document any deviations and rationale
|
||||
|
||||
- [ ] **7.4 Desktop client evaluation** (deferred — requires Tauri prototype)
|
||||
- Prototype Tauri v2 desktop shell wrapping the TUI or a web UI
|
||||
- Evaluate effort to ship cross-platform desktop client
|
||||
|
||||
- [x] **7.5 Security pre-audit prep**
|
||||
- Document all crypto boundaries and trust assumptions
|
||||
- Create threat model document
|
||||
- Prepare scope document for external auditors (Roadmap item 4.1)
|
||||
- Budget: NCC Group / Trail of Bits / Cure53 ($50K–$150K, 4-6 weeks)
|
||||
|
||||
- [ ] **7.6 Repository rename** (requires GitHub admin action)
|
||||
- Rename GitHub repository from `quicproquo` → `quicprochat`
|
||||
- Update all GitHub URLs, CI badge links, go.mod import paths
|
||||
- Set up redirect from old repo name
|
||||
|
||||
---
|
||||
|
||||
## Sprint Summary
|
||||
|
||||
| Sprint | Focus | Risk | Key Deliverable |
|
||||
|--------|-------|------|----------------|
|
||||
| **1** | Bug fixes & code quality | Low | Zero clippy warnings, metrics wired |
|
||||
| **2** | OpenMLS 0.5 → 0.8 | High | Security patches applied, FIPS 203 verified |
|
||||
| **3** | Client resilience | Medium | Auto-reconnect, heartbeat, status UI |
|
||||
| **4** | Server hardening | Medium | Real graceful shutdown, timeouts enforced |
|
||||
| **5** | Test coverage & CI | Low | Unit tests for SDK/RPC/domain, fuzzing |
|
||||
| **6** | Federation & P2P | High | Working cross-server messaging, P2P fallback |
|
||||
| **7** | Docs, polish & audit prep | Low | Audit-ready, desktop prototype |
|
||||
26
assets/left.ansi
Normal file
26
assets/left.ansi
Normal file
@@ -0,0 +1,26 @@
|
||||
registering 'alice'...
|
||||
user 'alice' registered
|
||||
logging in as 'alice'...
|
||||
logged in, session cached
|
||||
[2midentity: c1e1f6df17eeb6..2816[0m
|
||||
KeyPackage uploaded
|
||||
hybrid key uploaded
|
||||
type /help for commands, Ctrl+D to exit
|
||||
|
||||
[2m[[0m[1mno conversation[0m[2m][0m > /dm bob
|
||||
resolving bob...
|
||||
creating channel...
|
||||
fetching peer's key package...
|
||||
DM with @bob created. Start typing!
|
||||
[2m[[0m[1m@bob[0m[2m][0m > [32mHey Bob, testing our E2E encrypted channel![0m
|
||||
[36m[1m[bob][0m Works great -- the server never sees plaintext?
|
||||
[2m[[0m[1m@bob[0m[2m][0m > [32mRight. MLS forward secrecy + post-quantum KEM.[0m
|
||||
[36m[1m[bob][0m Impressive. How do I verify your identity?
|
||||
[2m[[0m[1m@bob[0m[2m][0m > [32mRun /verify alice -- compare the safety number out-of-band.[0m
|
||||
[2m[[0m[1m@bob[0m[2m][0m > /group-info
|
||||
[2m Conversation: @bob[0m
|
||||
[2m Type: DM[0m
|
||||
[2m Members: 2[0m
|
||||
[2m alice (you), bob[0m
|
||||
[2m MLS epoch: 3[0m
|
||||
[2m[[0m[1m@bob[0m[2m][0m >
|
||||
BIN
assets/logo.png
Normal file
BIN
assets/logo.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 2.2 MiB |
24
assets/right.ansi
Normal file
24
assets/right.ansi
Normal file
@@ -0,0 +1,24 @@
|
||||
registering 'bob'...
|
||||
user 'bob' registered
|
||||
logging in as 'bob'...
|
||||
logged in, session cached
|
||||
[2midentity: a8c2f19f1b0806..c73f[0m
|
||||
KeyPackage uploaded
|
||||
hybrid key uploaded
|
||||
type /help for commands, Ctrl+D to exit
|
||||
|
||||
[36m[1m[system][0m new conversation: @alice
|
||||
[2m[[0m[1m@alice[0m[2m][0m > [36m[1m[alice][0m Hey Bob, testing our E2E encrypted channel!
|
||||
[2m[[0m[1m@alice[0m[2m][0m > [32mWorks great -- the server never sees plaintext?[0m
|
||||
[36m[1m[alice][0m Right. MLS forward secrecy + post-quantum KEM.
|
||||
[2m[[0m[1m@alice[0m[2m][0m > [32mImpressive. How do I verify your identity?[0m
|
||||
[36m[1m[alice][0m Run /verify alice -- compare the safety number out-of-band.
|
||||
[2m[[0m[1m@alice[0m[2m][0m > /verify alice
|
||||
[2m Safety number for @alice:[0m
|
||||
[2m 096482 731945 208376[0m
|
||||
[2m 571039 284617 950283[0m
|
||||
[2m[[0m[1m@alice[0m[2m][0m > /whoami
|
||||
[2m identity: a8c2f19f1b0806..c73f[0m
|
||||
[2m hybrid key: yes[0m
|
||||
[2m conversations: 1[0m
|
||||
[2m[[0m[1m@alice[0m[2m][0m >
|
||||
BIN
assets/screenshot.png
Normal file
BIN
assets/screenshot.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 67 KiB |
59
assets/screenshot.txt
Normal file
59
assets/screenshot.txt
Normal file
@@ -0,0 +1,59 @@
|
||||
=== Alice (left) ===
|
||||
❯ ./target/debug/qpq repl --username alice --password de
|
||||
opass1 --server 127.0.0.1:17123 --ca-cert /tmp/tmp.adbXG
|
||||
OrPY/server-cert.der --state /tmp/tmp.adbXGLOrPY/alice.b
|
||||
n
|
||||
registering 'alice'...
|
||||
user 'alice' registered
|
||||
logging in as 'alice'...
|
||||
logged in, session cached
|
||||
identity: c1e1f6df17eeb6f539d7fbea94129fa32fc02ca40e5c
|
||||
7a7c95cfc94161d5f628
|
||||
KeyPackage uploaded
|
||||
hybrid key uploaded
|
||||
type /help for commands, Ctrl+D to exit
|
||||
|
||||
[no conversation] > /dm bob
|
||||
resolving bob...
|
||||
creating channel...
|
||||
fetching peer's key package...
|
||||
DM with @bob created. Start typing!
|
||||
[@bob] > ^LHey Bob, testing our E2E encrypted channel!
|
||||
[@bob] > Right. MLS forward secrecy + post-quantum KEM.
|
||||
[@bob] > /group-info
|
||||
Conversation: @bob
|
||||
Type: DM
|
||||
Members: 2
|
||||
alice (you), bob
|
||||
MLS epoch: 1
|
||||
[@bob] >
|
||||
|
||||
=== Bob (right) ===
|
||||
❯ ./target/debug/qpq repl --username bob --password demop
|
||||
ass2 --server 127.0.0.1:17123 --ca-cert /tmp/tmp.adbXGLOr
|
||||
PY/server-cert.der --state /tmp/tmp.adbXGLOrPY/bob.bin
|
||||
registering 'bob'...
|
||||
user 'bob' registered
|
||||
logging in as 'bob'...
|
||||
logged in, session cached
|
||||
identity: a8c2f19f1b080616b7206e02244fd14c2ab8821367392
|
||||
af5ff9c89c69750c73f
|
||||
KeyPackage uploaded
|
||||
hybrid key uploaded
|
||||
type /help for commands, Ctrl+D to exit
|
||||
|
||||
[no conversation] > /list
|
||||
no conversations yet. Try /dm <username> or /create-gro
|
||||
up <name>
|
||||
[no conversation] > /switch @alice
|
||||
error: conversation not found: @alice
|
||||
[no conversation] > ^LWorks great -- the server never see
|
||||
s plaintext?
|
||||
error: no active conversation; use /dm or /create-group
|
||||
first
|
||||
[no conversation] > /whoami
|
||||
identity: a8c2f19f1b080616b7206e02244fd14c2ab8821367392
|
||||
af5ff9c89c69750c73f
|
||||
hybrid key: yes
|
||||
conversations: 0
|
||||
[no conversation] >
|
||||
@@ -1,56 +0,0 @@
|
||||
[package]
|
||||
name = "quicnprotochat-client"
|
||||
version = "0.1.0"
|
||||
edition = "2021"
|
||||
description = "CLI client for quicnprotochat."
|
||||
license = "MIT"
|
||||
|
||||
[[bin]]
|
||||
name = "quicnprotochat"
|
||||
path = "src/main.rs"
|
||||
|
||||
[dependencies]
|
||||
quicnprotochat-core = { path = "../quicnprotochat-core" }
|
||||
quicnprotochat-proto = { path = "../quicnprotochat-proto" }
|
||||
openmls_rust_crypto = { workspace = true }
|
||||
|
||||
# Serialisation + RPC
|
||||
capnp = { workspace = true }
|
||||
capnp-rpc = { workspace = true }
|
||||
|
||||
# Async
|
||||
tokio = { workspace = true }
|
||||
tokio-util = { workspace = true }
|
||||
futures = { workspace = true }
|
||||
serde = { workspace = true }
|
||||
serde_json = { workspace = true }
|
||||
bincode = { workspace = true }
|
||||
|
||||
# Crypto — OPAQUE PAKE
|
||||
opaque-ke = { workspace = true }
|
||||
rand = { workspace = true }
|
||||
|
||||
# Error handling
|
||||
anyhow = { workspace = true }
|
||||
thiserror = { workspace = true }
|
||||
|
||||
# Crypto — for fingerprint verification in fetch-key subcommand
|
||||
sha2 = { workspace = true }
|
||||
argon2 = { workspace = true }
|
||||
chacha20poly1305 = { workspace = true }
|
||||
quinn = { workspace = true }
|
||||
quinn-proto = { workspace = true }
|
||||
rustls = { workspace = true }
|
||||
|
||||
# Logging
|
||||
tracing = { workspace = true }
|
||||
tracing-subscriber = { workspace = true }
|
||||
|
||||
# CLI
|
||||
clap = { workspace = true }
|
||||
|
||||
[dev-dependencies]
|
||||
dashmap = { workspace = true }
|
||||
assert_cmd = "2"
|
||||
tempfile = "3"
|
||||
portpicker = "0.1"
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,372 +0,0 @@
|
||||
//! quicnprotochat CLI client.
|
||||
|
||||
use std::path::PathBuf;
|
||||
|
||||
use clap::{Parser, Subcommand};
|
||||
|
||||
use quicnprotochat_client::{
|
||||
cmd_create_group, cmd_demo_group, cmd_fetch_key, cmd_invite, cmd_join, cmd_login, cmd_ping,
|
||||
cmd_recv, cmd_register, cmd_register_state, cmd_register_user, cmd_send, ClientAuth,
|
||||
init_auth,
|
||||
};
|
||||
|
||||
// ── CLI ───────────────────────────────────────────────────────────────────────
|
||||
|
||||
#[derive(Debug, Parser)]
|
||||
#[command(name = "quicnprotochat", about = "quicnprotochat CLI client", version)]
|
||||
struct Args {
|
||||
/// Path to the server's TLS certificate (self-signed by default).
|
||||
#[arg(
|
||||
long,
|
||||
global = true,
|
||||
default_value = "data/server-cert.der",
|
||||
env = "QUICNPROTOCHAT_CA_CERT"
|
||||
)]
|
||||
ca_cert: PathBuf,
|
||||
|
||||
/// Expected TLS server name (must match the certificate SAN).
|
||||
#[arg(
|
||||
long,
|
||||
global = true,
|
||||
default_value = "localhost",
|
||||
env = "QUICNPROTOCHAT_SERVER_NAME"
|
||||
)]
|
||||
server_name: String,
|
||||
|
||||
/// Bearer token or OPAQUE session token for authenticated requests.
|
||||
/// Not required for register-user and login commands.
|
||||
#[arg(long, global = true, env = "QUICNPROTOCHAT_ACCESS_TOKEN", default_value = "")]
|
||||
access_token: String,
|
||||
|
||||
/// Optional device identifier (UUID bytes encoded as hex or raw string).
|
||||
#[arg(long, global = true, env = "QUICNPROTOCHAT_DEVICE_ID")]
|
||||
device_id: Option<String>,
|
||||
|
||||
/// Password to encrypt/decrypt client state files (QPCE format).
|
||||
/// If set, state files are encrypted at rest with Argon2id + ChaCha20Poly1305.
|
||||
#[arg(long, global = true, env = "QUICNPROTOCHAT_STATE_PASSWORD")]
|
||||
state_password: Option<String>,
|
||||
|
||||
#[command(subcommand)]
|
||||
command: Command,
|
||||
}
|
||||
|
||||
#[derive(Debug, Subcommand)]
|
||||
enum Command {
|
||||
/// Register a new user via OPAQUE (password never leaves the client).
|
||||
RegisterUser {
|
||||
#[arg(long, default_value = "127.0.0.1:7000", env = "QUICNPROTOCHAT_SERVER")]
|
||||
server: String,
|
||||
/// Username for the new account.
|
||||
#[arg(long)]
|
||||
username: String,
|
||||
/// Password (will be used in OPAQUE PAKE; server never sees it).
|
||||
#[arg(long)]
|
||||
password: String,
|
||||
},
|
||||
|
||||
/// Log in via OPAQUE and receive a session token.
|
||||
Login {
|
||||
#[arg(long, default_value = "127.0.0.1:7000", env = "QUICNPROTOCHAT_SERVER")]
|
||||
server: String,
|
||||
#[arg(long)]
|
||||
username: String,
|
||||
#[arg(long)]
|
||||
password: String,
|
||||
},
|
||||
|
||||
/// Send a Ping to the server and print the round-trip time.
|
||||
Ping {
|
||||
/// Server address (host:port).
|
||||
#[arg(long, default_value = "127.0.0.1:7000", env = "QUICNPROTOCHAT_SERVER")]
|
||||
server: String,
|
||||
},
|
||||
|
||||
/// Generate a fresh MLS KeyPackage and upload it to the Authentication Service.
|
||||
Register {
|
||||
/// Server address (host:port).
|
||||
#[arg(long, default_value = "127.0.0.1:7000", env = "QUICNPROTOCHAT_SERVER")]
|
||||
server: String,
|
||||
},
|
||||
|
||||
/// Fetch a peer's KeyPackage from the Authentication Service.
|
||||
FetchKey {
|
||||
/// Server address (host:port).
|
||||
#[arg(long, default_value = "127.0.0.1:7000", env = "QUICNPROTOCHAT_SERVER")]
|
||||
server: String,
|
||||
|
||||
/// Target peer's Ed25519 identity public key (64 hex chars = 32 bytes).
|
||||
identity_key: String,
|
||||
},
|
||||
|
||||
/// Run a full Alice/Bob MLS round-trip against live AS and DS endpoints.
|
||||
DemoGroup {
|
||||
/// Server address (host:port).
|
||||
#[arg(long, default_value = "127.0.0.1:7000", env = "QUICNPROTOCHAT_SERVER")]
|
||||
server: String,
|
||||
},
|
||||
|
||||
/// Upload the persistent identity's KeyPackage to the AS (uses state file).
|
||||
RegisterState {
|
||||
/// State file path (identity + MLS state).
|
||||
#[arg(
|
||||
long,
|
||||
default_value = "quicnprotochat-state.bin",
|
||||
env = "QUICNPROTOCHAT_STATE"
|
||||
)]
|
||||
state: PathBuf,
|
||||
|
||||
/// Authentication Service address (host:port).
|
||||
#[arg(long, default_value = "127.0.0.1:7000", env = "QUICNPROTOCHAT_SERVER")]
|
||||
server: String,
|
||||
},
|
||||
|
||||
/// Create a persistent group and save state to disk.
|
||||
CreateGroup {
|
||||
/// State file path (identity + MLS state).
|
||||
#[arg(
|
||||
long,
|
||||
default_value = "quicnprotochat-state.bin",
|
||||
env = "QUICNPROTOCHAT_STATE"
|
||||
)]
|
||||
state: PathBuf,
|
||||
|
||||
/// Server address (host:port).
|
||||
#[arg(long, default_value = "127.0.0.1:7000", env = "QUICNPROTOCHAT_SERVER")]
|
||||
server: String,
|
||||
|
||||
/// Group identifier (arbitrary bytes, typically a human-readable name).
|
||||
#[arg(long)]
|
||||
group_id: String,
|
||||
},
|
||||
|
||||
/// Invite a peer into the group and deliver a Welcome via DS.
|
||||
Invite {
|
||||
#[arg(
|
||||
long,
|
||||
default_value = "quicnprotochat-state.bin",
|
||||
env = "QUICNPROTOCHAT_STATE"
|
||||
)]
|
||||
state: PathBuf,
|
||||
#[arg(long, default_value = "127.0.0.1:7000", env = "QUICNPROTOCHAT_SERVER")]
|
||||
server: String,
|
||||
/// Peer identity public key (64 hex chars = 32 bytes).
|
||||
#[arg(long)]
|
||||
peer_key: String,
|
||||
},
|
||||
|
||||
/// Join a group by fetching the Welcome from the DS.
|
||||
Join {
|
||||
#[arg(
|
||||
long,
|
||||
default_value = "quicnprotochat-state.bin",
|
||||
env = "QUICNPROTOCHAT_STATE"
|
||||
)]
|
||||
state: PathBuf,
|
||||
#[arg(long, default_value = "127.0.0.1:7000", env = "QUICNPROTOCHAT_SERVER")]
|
||||
server: String,
|
||||
},
|
||||
|
||||
/// Send an application message via the DS.
|
||||
Send {
|
||||
#[arg(
|
||||
long,
|
||||
default_value = "quicnprotochat-state.bin",
|
||||
env = "QUICNPROTOCHAT_STATE"
|
||||
)]
|
||||
state: PathBuf,
|
||||
#[arg(long, default_value = "127.0.0.1:7000", env = "QUICNPROTOCHAT_SERVER")]
|
||||
server: String,
|
||||
/// Recipient identity key (hex, 32 bytes -> 64 chars).
|
||||
#[arg(long)]
|
||||
peer_key: String,
|
||||
/// Plaintext message to send.
|
||||
#[arg(long)]
|
||||
msg: String,
|
||||
},
|
||||
|
||||
/// Receive and decrypt all pending messages from the DS.
|
||||
Recv {
|
||||
#[arg(
|
||||
long,
|
||||
default_value = "quicnprotochat-state.bin",
|
||||
env = "QUICNPROTOCHAT_STATE"
|
||||
)]
|
||||
state: PathBuf,
|
||||
#[arg(long, default_value = "127.0.0.1:7000", env = "QUICNPROTOCHAT_SERVER")]
|
||||
server: String,
|
||||
|
||||
/// Wait for up to this many milliseconds if no messages are queued.
|
||||
#[arg(long, default_value_t = 0)]
|
||||
wait_ms: u64,
|
||||
|
||||
/// Continuously long-poll for messages.
|
||||
#[arg(long)]
|
||||
stream: bool,
|
||||
},
|
||||
}
|
||||
|
||||
// ── Entry point ───────────────────────────────────────────────────────────────
|
||||
|
||||
#[tokio::main]
|
||||
async fn main() -> anyhow::Result<()> {
|
||||
tracing_subscriber::fmt()
|
||||
.with_env_filter(
|
||||
tracing_subscriber::EnvFilter::try_from_default_env()
|
||||
.unwrap_or_else(|_| tracing_subscriber::EnvFilter::new("warn")),
|
||||
)
|
||||
.init();
|
||||
|
||||
let args = Args::parse();
|
||||
|
||||
// Initialize auth context once for all RPCs (empty token OK for register-user/login).
|
||||
let auth_ctx = ClientAuth::from_parts(args.access_token.clone(), args.device_id.clone());
|
||||
init_auth(auth_ctx);
|
||||
|
||||
let state_pw = args.state_password.as_deref();
|
||||
|
||||
match args.command {
|
||||
Command::RegisterUser {
|
||||
server,
|
||||
username,
|
||||
password,
|
||||
} => {
|
||||
let local = tokio::task::LocalSet::new();
|
||||
local
|
||||
.run_until(cmd_register_user(
|
||||
&server,
|
||||
&args.ca_cert,
|
||||
&args.server_name,
|
||||
&username,
|
||||
&password,
|
||||
))
|
||||
.await
|
||||
}
|
||||
Command::Login {
|
||||
server,
|
||||
username,
|
||||
password,
|
||||
} => {
|
||||
let local = tokio::task::LocalSet::new();
|
||||
local
|
||||
.run_until(cmd_login(
|
||||
&server,
|
||||
&args.ca_cert,
|
||||
&args.server_name,
|
||||
&username,
|
||||
&password,
|
||||
))
|
||||
.await
|
||||
}
|
||||
Command::Ping { server } => cmd_ping(&server, &args.ca_cert, &args.server_name).await,
|
||||
Command::Register { server } => {
|
||||
let local = tokio::task::LocalSet::new();
|
||||
local
|
||||
.run_until(cmd_register(&server, &args.ca_cert, &args.server_name))
|
||||
.await
|
||||
}
|
||||
Command::FetchKey {
|
||||
server,
|
||||
identity_key,
|
||||
} => {
|
||||
let local = tokio::task::LocalSet::new();
|
||||
local
|
||||
.run_until(cmd_fetch_key(
|
||||
&server,
|
||||
&args.ca_cert,
|
||||
&args.server_name,
|
||||
&identity_key,
|
||||
))
|
||||
.await
|
||||
}
|
||||
Command::DemoGroup { server } => {
|
||||
let local = tokio::task::LocalSet::new();
|
||||
local
|
||||
.run_until(cmd_demo_group(&server, &args.ca_cert, &args.server_name))
|
||||
.await
|
||||
}
|
||||
Command::RegisterState { state, server } => {
|
||||
let local = tokio::task::LocalSet::new();
|
||||
local
|
||||
.run_until(cmd_register_state(
|
||||
&state,
|
||||
&server,
|
||||
&args.ca_cert,
|
||||
&args.server_name,
|
||||
state_pw,
|
||||
))
|
||||
.await
|
||||
}
|
||||
Command::CreateGroup {
|
||||
state,
|
||||
server,
|
||||
group_id,
|
||||
} => {
|
||||
let local = tokio::task::LocalSet::new();
|
||||
local
|
||||
.run_until(cmd_create_group(&state, &server, &group_id, state_pw))
|
||||
.await
|
||||
}
|
||||
Command::Invite {
|
||||
state,
|
||||
server,
|
||||
peer_key,
|
||||
} => {
|
||||
let local = tokio::task::LocalSet::new();
|
||||
local
|
||||
.run_until(cmd_invite(
|
||||
&state,
|
||||
&server,
|
||||
&args.ca_cert,
|
||||
&args.server_name,
|
||||
&peer_key,
|
||||
state_pw,
|
||||
))
|
||||
.await
|
||||
}
|
||||
Command::Join { state, server } => {
|
||||
let local = tokio::task::LocalSet::new();
|
||||
local
|
||||
.run_until(cmd_join(&state, &server, &args.ca_cert, &args.server_name, state_pw))
|
||||
.await
|
||||
}
|
||||
Command::Send {
|
||||
state,
|
||||
server,
|
||||
peer_key,
|
||||
msg,
|
||||
} => {
|
||||
let local = tokio::task::LocalSet::new();
|
||||
local
|
||||
.run_until(cmd_send(
|
||||
&state,
|
||||
&server,
|
||||
&args.ca_cert,
|
||||
&args.server_name,
|
||||
&peer_key,
|
||||
&msg,
|
||||
state_pw,
|
||||
))
|
||||
.await
|
||||
}
|
||||
Command::Recv {
|
||||
state,
|
||||
server,
|
||||
wait_ms,
|
||||
stream,
|
||||
} => {
|
||||
let local = tokio::task::LocalSet::new();
|
||||
local
|
||||
.run_until(cmd_recv(
|
||||
&state,
|
||||
&server,
|
||||
&args.ca_cert,
|
||||
&args.server_name,
|
||||
wait_ms,
|
||||
stream,
|
||||
state_pw,
|
||||
))
|
||||
.await
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,170 +0,0 @@
|
||||
use std::{path::PathBuf, process::Command, time::Duration};
|
||||
|
||||
use assert_cmd::cargo::cargo_bin;
|
||||
use portpicker::pick_unused_port;
|
||||
use tempfile::TempDir;
|
||||
use tokio::time::sleep;
|
||||
|
||||
use quicnprotochat_client::{
|
||||
cmd_create_group, cmd_invite, cmd_join, cmd_ping, cmd_register_state, cmd_send, ClientAuth,
|
||||
connect_node, fetch_wait, init_auth,
|
||||
};
|
||||
use quicnprotochat_core::IdentityKeypair;
|
||||
|
||||
fn hex_encode(bytes: &[u8]) -> String {
|
||||
bytes.iter().map(|b| format!("{b:02x}")).collect()
|
||||
}
|
||||
|
||||
#[derive(serde::Deserialize)]
|
||||
struct StoredStateCompat {
|
||||
identity_seed: [u8; 32],
|
||||
#[allow(dead_code)]
|
||||
group: Option<Vec<u8>>,
|
||||
}
|
||||
|
||||
async fn wait_for_health(server: &str, ca_cert: &PathBuf, server_name: &str) -> anyhow::Result<()> {
|
||||
let local = tokio::task::LocalSet::new();
|
||||
for _ in 0..30 {
|
||||
if local
|
||||
.run_until(cmd_ping(server, ca_cert, server_name))
|
||||
.await
|
||||
.is_ok()
|
||||
{
|
||||
return Ok(());
|
||||
}
|
||||
sleep(Duration::from_millis(200)).await;
|
||||
}
|
||||
anyhow::bail!("server health never became ready")
|
||||
}
|
||||
|
||||
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
|
||||
async fn e2e_happy_path_register_invite_join_send_recv() -> anyhow::Result<()> {
|
||||
let temp = TempDir::new()?;
|
||||
let base = temp.path();
|
||||
let port = pick_unused_port().expect("free port");
|
||||
let listen = format!("127.0.0.1:{port}");
|
||||
let server = listen.clone();
|
||||
let ca_cert = base.join("server-cert.der");
|
||||
let tls_key = base.join("server-key.der");
|
||||
let data_dir = base.join("data");
|
||||
let auth_token = "devtoken";
|
||||
|
||||
// Spawn server binary.
|
||||
let server_bin = cargo_bin("quicnprotochat-server");
|
||||
let mut child = Command::new(server_bin)
|
||||
.arg("--listen")
|
||||
.arg(&listen)
|
||||
.arg("--data-dir")
|
||||
.arg(&data_dir)
|
||||
.arg("--tls-cert")
|
||||
.arg(&ca_cert)
|
||||
.arg("--tls-key")
|
||||
.arg(&tls_key)
|
||||
.arg("--auth-token")
|
||||
.arg(auth_token)
|
||||
.spawn()
|
||||
.expect("spawn server");
|
||||
|
||||
// Ensure we always terminate the child.
|
||||
struct ChildGuard(std::process::Child);
|
||||
impl Drop for ChildGuard {
|
||||
fn drop(&mut self) {
|
||||
let _ = self.0.kill();
|
||||
}
|
||||
}
|
||||
let child_guard = ChildGuard(child);
|
||||
let _ = child_guard;
|
||||
|
||||
// Wait for server to be healthy and certs to be generated.
|
||||
wait_for_health(&server, &ca_cert, "localhost").await?;
|
||||
|
||||
// Set client auth context.
|
||||
init_auth(ClientAuth::from_parts(auth_token.to_string(), None));
|
||||
|
||||
// LocalSet for capnp !Send operations.
|
||||
let local = tokio::task::LocalSet::new();
|
||||
|
||||
let alice_state = base.join("alice.bin");
|
||||
let bob_state = base.join("bob.bin");
|
||||
|
||||
local
|
||||
.run_until(cmd_register_state(
|
||||
&alice_state,
|
||||
&server,
|
||||
&ca_cert,
|
||||
"localhost",
|
||||
None,
|
||||
))
|
||||
.await?;
|
||||
|
||||
local
|
||||
.run_until(cmd_register_state(
|
||||
&bob_state,
|
||||
&server,
|
||||
&ca_cert,
|
||||
"localhost",
|
||||
None,
|
||||
))
|
||||
.await?;
|
||||
|
||||
local
|
||||
.run_until(cmd_create_group(
|
||||
&alice_state,
|
||||
&server,
|
||||
"test-group",
|
||||
None,
|
||||
))
|
||||
.await?;
|
||||
|
||||
// Load Bob identity key from persisted state to use as peer key.
|
||||
let bob_bytes = std::fs::read(&bob_state)?;
|
||||
let bob_state_compat: StoredStateCompat = bincode::deserialize(&bob_bytes)?;
|
||||
let bob_identity = IdentityKeypair::from_seed(bob_state_compat.identity_seed);
|
||||
let bob_pk_hex = hex_encode(&bob_identity.public_key_bytes());
|
||||
|
||||
local
|
||||
.run_until(cmd_invite(
|
||||
&alice_state,
|
||||
&server,
|
||||
&ca_cert,
|
||||
"localhost",
|
||||
&bob_pk_hex,
|
||||
None,
|
||||
))
|
||||
.await?;
|
||||
|
||||
local
|
||||
.run_until(cmd_join(
|
||||
&bob_state,
|
||||
&server,
|
||||
&ca_cert,
|
||||
"localhost",
|
||||
None,
|
||||
))
|
||||
.await?;
|
||||
|
||||
// Send Alice -> Bob.
|
||||
local
|
||||
.run_until(cmd_send(
|
||||
&alice_state,
|
||||
&server,
|
||||
&ca_cert,
|
||||
"localhost",
|
||||
&bob_pk_hex,
|
||||
"hello bob",
|
||||
None,
|
||||
))
|
||||
.await?;
|
||||
|
||||
// Confirm Bob can fetch at least one payload.
|
||||
local
|
||||
.run_until(async {
|
||||
let client = connect_node(&server, &ca_cert, "localhost").await?;
|
||||
let payloads = fetch_wait(&client, &bob_identity.public_key_bytes(), 1000).await?;
|
||||
anyhow::ensure!(!payloads.is_empty(), "no payloads delivered to Bob");
|
||||
Ok::<(), anyhow::Error>(())
|
||||
})
|
||||
.await?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
@@ -1,45 +0,0 @@
|
||||
[package]
|
||||
name = "quicnprotochat-core"
|
||||
version = "0.1.0"
|
||||
edition = "2021"
|
||||
description = "Crypto primitives, MLS state machine, and hybrid post-quantum KEM for quicnprotochat."
|
||||
license = "MIT"
|
||||
|
||||
[dependencies]
|
||||
# Crypto — classical
|
||||
x25519-dalek = { workspace = true }
|
||||
ed25519-dalek = { workspace = true }
|
||||
sha2 = { workspace = true }
|
||||
hkdf = { workspace = true }
|
||||
chacha20poly1305 = { workspace = true }
|
||||
zeroize = { workspace = true }
|
||||
rand = { workspace = true }
|
||||
|
||||
# Crypto — post-quantum hybrid KEM (M7)
|
||||
ml-kem = { workspace = true }
|
||||
|
||||
# Crypto — OPAQUE password-authenticated key exchange
|
||||
opaque-ke = { workspace = true }
|
||||
argon2 = { workspace = true }
|
||||
|
||||
# Crypto — MLS (M2)
|
||||
openmls = { workspace = true }
|
||||
openmls_rust_crypto = { workspace = true }
|
||||
openmls_traits = { workspace = true }
|
||||
tls_codec = { workspace = true }
|
||||
serde = { workspace = true }
|
||||
bincode = { workspace = true }
|
||||
serde_json = { workspace = true }
|
||||
|
||||
# Serialisation
|
||||
capnp = { workspace = true }
|
||||
quicnprotochat-proto = { path = "../quicnprotochat-proto" }
|
||||
|
||||
# Async runtime
|
||||
tokio = { workspace = true }
|
||||
|
||||
# Error handling
|
||||
thiserror = { workspace = true }
|
||||
|
||||
[dev-dependencies]
|
||||
tokio = { workspace = true }
|
||||
@@ -1,21 +0,0 @@
|
||||
//! Error types for `quicnprotochat-core`.
|
||||
|
||||
use thiserror::Error;
|
||||
|
||||
/// Errors produced by core cryptographic and MLS operations.
|
||||
#[derive(Debug, Error)]
|
||||
pub enum CoreError {
|
||||
/// Cap'n Proto serialisation or deserialisation failed.
|
||||
#[error("Cap'n Proto error: {0}")]
|
||||
Capnp(#[from] capnp::Error),
|
||||
|
||||
/// An MLS operation failed.
|
||||
///
|
||||
/// The inner string is the debug representation of the openmls error.
|
||||
#[error("MLS error: {0}")]
|
||||
Mls(String),
|
||||
|
||||
/// A hybrid KEM (X25519 + ML-KEM-768) operation failed.
|
||||
#[error("hybrid KEM error: {0}")]
|
||||
HybridKem(#[from] crate::hybrid_kem::HybridKemError),
|
||||
}
|
||||
@@ -1,456 +0,0 @@
|
||||
//! MLS group state machine.
|
||||
//!
|
||||
//! # Design
|
||||
//!
|
||||
//! [`GroupMember`] wraps an openmls [`MlsGroup`] plus the per-client
|
||||
//! [`StoreCrypto`] backend. The backend is **persistent** — it holds the
|
||||
//! in-memory key store that maps init-key references to HPKE private keys.
|
||||
//! openmls's `new_from_welcome` reads those private keys from the key store to
|
||||
//! decrypt the Welcome, so the same backend instance must be used from
|
||||
//! `generate_key_package` through `join_group`.
|
||||
//!
|
||||
//! # Wire format
|
||||
//!
|
||||
//! All MLS messages are serialised/deserialised using TLS presentation language
|
||||
//! encoding (`tls_codec`). The resulting byte vectors are what the transport
|
||||
//! layer (and the Delivery Service) sees.
|
||||
//!
|
||||
//! # MLS ciphersuite
|
||||
//!
|
||||
//! `MLS_128_DHKEMX25519_AES128GCM_SHA256_Ed25519` — same as M2.
|
||||
//!
|
||||
//! # Ratchet tree
|
||||
//!
|
||||
//! `use_ratchet_tree_extension = true` so that the ratchet tree is embedded
|
||||
//! in Welcome messages. `new_from_welcome` is called with `ratchet_tree = None`;
|
||||
//! openmls extracts the tree from the Welcome's `GroupInfo` extension.
|
||||
|
||||
use std::sync::Arc;
|
||||
|
||||
use openmls::prelude::{
|
||||
Ciphersuite, Credential, CredentialType, CredentialWithKey, CryptoConfig, GroupId, KeyPackage,
|
||||
KeyPackageIn, MlsGroup, MlsGroupConfig, MlsMessageInBody, MlsMessageOut,
|
||||
ProcessedMessageContent, ProtocolMessage, ProtocolVersion, TlsDeserializeTrait,
|
||||
TlsSerializeTrait,
|
||||
};
|
||||
use openmls_traits::OpenMlsCryptoProvider;
|
||||
|
||||
use crate::{
|
||||
error::CoreError,
|
||||
identity::IdentityKeypair,
|
||||
keystore::{DiskKeyStore, StoreCrypto},
|
||||
};
|
||||
|
||||
// ── Constants ─────────────────────────────────────────────────────────────────
|
||||
|
||||
const CIPHERSUITE: Ciphersuite = Ciphersuite::MLS_128_DHKEMX25519_AES128GCM_SHA256_Ed25519;
|
||||
|
||||
// ── GroupMember ───────────────────────────────────────────────────────────────
|
||||
|
||||
/// Per-client MLS state: identity keypair, crypto backend, and optional group.
|
||||
///
|
||||
/// # Lifecycle
|
||||
///
|
||||
/// ```text
|
||||
/// GroupMember::new(identity)
|
||||
/// ├─ generate_key_package() → upload to AS
|
||||
/// ├─ create_group(group_id) → become sole member
|
||||
/// │ └─ add_member(kp) → invite a peer; returns (commit, welcome)
|
||||
/// └─ join_group(welcome) → join after receiving a Welcome
|
||||
/// ├─ send_message(msg) → encrypt application data
|
||||
/// └─ receive_message(b) → decrypt; returns Some(plaintext) or None
|
||||
/// ```
|
||||
pub struct GroupMember {
|
||||
/// Persistent crypto backend. Holds the in-memory key store with HPKE
|
||||
/// private keys created during `generate_key_package`.
|
||||
backend: StoreCrypto,
|
||||
/// Long-term Ed25519 identity keypair. Also used as the MLS `Signer`.
|
||||
identity: Arc<IdentityKeypair>,
|
||||
/// Active MLS group, if any.
|
||||
group: Option<MlsGroup>,
|
||||
/// Shared group configuration (wire format, ratchet tree extension, etc.).
|
||||
config: MlsGroupConfig,
|
||||
}
|
||||
|
||||
impl GroupMember {
|
||||
/// Create a new `GroupMember` with a fresh crypto backend.
|
||||
pub fn new(identity: Arc<IdentityKeypair>) -> Self {
|
||||
Self::new_with_state(identity, DiskKeyStore::ephemeral(), None)
|
||||
}
|
||||
|
||||
/// Create a `GroupMember` from pre-existing state (identity + optional group + store).
|
||||
pub fn new_with_state(
|
||||
identity: Arc<IdentityKeypair>,
|
||||
key_store: DiskKeyStore,
|
||||
group: Option<MlsGroup>,
|
||||
) -> Self {
|
||||
let config = MlsGroupConfig::builder()
|
||||
.use_ratchet_tree_extension(true)
|
||||
.build();
|
||||
|
||||
Self {
|
||||
backend: StoreCrypto::new(key_store),
|
||||
identity,
|
||||
group,
|
||||
config,
|
||||
}
|
||||
}
|
||||
|
||||
// ── KeyPackage ────────────────────────────────────────────────────────────
|
||||
|
||||
/// Generate a fresh single-use MLS KeyPackage.
|
||||
///
|
||||
/// The HPKE init private key is stored in `self.backend`'s key store.
|
||||
/// **The same `GroupMember` instance must later call `join_group`** so
|
||||
/// that `new_from_welcome` can retrieve the private key.
|
||||
///
|
||||
/// # Returns
|
||||
///
|
||||
/// TLS-encoded KeyPackage bytes, ready for upload to the Authentication
|
||||
/// Service.
|
||||
///
|
||||
/// # Errors
|
||||
///
|
||||
/// Returns [`CoreError::Mls`] if openmls fails to create the KeyPackage.
|
||||
pub fn generate_key_package(&mut self) -> Result<Vec<u8>, CoreError> {
|
||||
let credential_with_key = self.make_credential_with_key()?;
|
||||
|
||||
let key_package = KeyPackage::builder()
|
||||
.build(
|
||||
CryptoConfig::with_default_version(CIPHERSUITE),
|
||||
&self.backend,
|
||||
self.identity.as_ref(),
|
||||
credential_with_key,
|
||||
)
|
||||
.map_err(|e| CoreError::Mls(format!("{e:?}")))?;
|
||||
|
||||
key_package
|
||||
.tls_serialize_detached()
|
||||
.map_err(|e| CoreError::Mls(format!("{e:?}")))
|
||||
}
|
||||
|
||||
// ── Group creation ────────────────────────────────────────────────────────
|
||||
|
||||
/// Create a new MLS group with `group_id` as the group identifier.
|
||||
///
|
||||
/// The caller becomes the sole member (epoch 0). Use `add_member` to
|
||||
/// invite additional members.
|
||||
///
|
||||
/// `group_id` can be any non-empty byte string; SHA-256 of a human-readable
|
||||
/// name is a good choice.
|
||||
///
|
||||
/// # Errors
|
||||
///
|
||||
/// Returns [`CoreError::Mls`] if the group already exists or openmls fails.
|
||||
pub fn create_group(&mut self, group_id: &[u8]) -> Result<(), CoreError> {
|
||||
let credential_with_key = self.make_credential_with_key()?;
|
||||
let mls_id = GroupId::from_slice(group_id);
|
||||
|
||||
let group = MlsGroup::new_with_group_id(
|
||||
&self.backend,
|
||||
self.identity.as_ref(),
|
||||
&self.config,
|
||||
mls_id,
|
||||
credential_with_key,
|
||||
)
|
||||
.map_err(|e| CoreError::Mls(format!("{e:?}")))?;
|
||||
|
||||
self.group = Some(group);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// ── Membership ────────────────────────────────────────────────────────────
|
||||
|
||||
/// Add a new member by their TLS-encoded KeyPackage bytes.
|
||||
///
|
||||
/// Produces a Commit (to update existing members' state) and a Welcome
|
||||
/// (to bootstrap the new member). The caller is responsible for
|
||||
/// distributing these:
|
||||
///
|
||||
/// - Send `commit_bytes` to all **existing** group members via the DS.
|
||||
/// (In the 2-party case where the creator is the only member, this can
|
||||
/// be discarded — the creator applies it locally via this method.)
|
||||
/// - Send `welcome_bytes` to the **new** member via the DS.
|
||||
///
|
||||
/// This method also merges the pending Commit into the local group state
|
||||
/// (advancing the epoch), so the caller is immediately ready to encrypt.
|
||||
///
|
||||
/// # Returns
|
||||
///
|
||||
/// `(commit_bytes, welcome_bytes)` — both TLS-encoded MLS messages.
|
||||
///
|
||||
/// # Errors
|
||||
///
|
||||
/// Returns [`CoreError::Mls`] if the KeyPackage is malformed, no active
|
||||
/// group exists, or openmls fails.
|
||||
pub fn add_member(
|
||||
&mut self,
|
||||
key_package_bytes: &[u8],
|
||||
) -> Result<(Vec<u8>, Vec<u8>), CoreError> {
|
||||
let group = self
|
||||
.group
|
||||
.as_mut()
|
||||
.ok_or_else(|| CoreError::Mls("no active group".into()))?;
|
||||
|
||||
// Deserialise and validate the peer's KeyPackage. KeyPackage only derives
|
||||
// TlsSerialize; KeyPackageIn derives TlsDeserialize and provides validate()
|
||||
// which verifies the signature and returns a trusted KeyPackage.
|
||||
let key_package: KeyPackage =
|
||||
KeyPackageIn::tls_deserialize(&mut key_package_bytes.as_ref())
|
||||
.map_err(|e| CoreError::Mls(format!("KeyPackage deserialise: {e:?}")))?
|
||||
.validate(self.backend.crypto(), ProtocolVersion::Mls10)
|
||||
.map_err(|e| CoreError::Mls(format!("KeyPackage validate: {e:?}")))?;
|
||||
|
||||
// Create the Commit + Welcome. The third return value (GroupInfo) is for
|
||||
// external commits and is not needed here.
|
||||
let (commit_out, welcome_out, _group_info) = group
|
||||
.add_members(&self.backend, self.identity.as_ref(), &[key_package])
|
||||
.map_err(|e| CoreError::Mls(format!("add_members: {e:?}")))?;
|
||||
|
||||
// Merge the pending Commit into our own state, advancing the epoch.
|
||||
group
|
||||
.merge_pending_commit(&self.backend)
|
||||
.map_err(|e| CoreError::Mls(format!("merge_pending_commit: {e:?}")))?;
|
||||
|
||||
let commit_bytes = commit_out
|
||||
.to_bytes()
|
||||
.map_err(|e| CoreError::Mls(format!("commit serialise: {e:?}")))?;
|
||||
let welcome_bytes = welcome_out
|
||||
.to_bytes()
|
||||
.map_err(|e| CoreError::Mls(format!("welcome serialise: {e:?}")))?;
|
||||
|
||||
Ok((commit_bytes, welcome_bytes))
|
||||
}
|
||||
|
||||
/// Join an existing MLS group from a TLS-encoded Welcome message.
|
||||
///
|
||||
/// The caller must have previously called [`generate_key_package`] on
|
||||
/// **this same instance** so that the HPKE init private key is in the
|
||||
/// backend's key store.
|
||||
///
|
||||
/// # Errors
|
||||
///
|
||||
/// Returns [`CoreError::Mls`] if the Welcome does not match any known
|
||||
/// KeyPackage, or openmls validation fails.
|
||||
///
|
||||
/// [`generate_key_package`]: Self::generate_key_package
|
||||
pub fn join_group(&mut self, welcome_bytes: &[u8]) -> Result<(), CoreError> {
|
||||
// Deserialise MlsMessageIn, then extract the inner Welcome.
|
||||
let msg_in = openmls::prelude::MlsMessageIn::tls_deserialize(&mut welcome_bytes.as_ref())
|
||||
.map_err(|e| CoreError::Mls(format!("Welcome deserialise: {e:?}")))?;
|
||||
|
||||
// into_welcome() is feature-gated in openmls 0.5; extract() is public.
|
||||
let welcome = match msg_in.extract() {
|
||||
MlsMessageInBody::Welcome(w) => w,
|
||||
_ => return Err(CoreError::Mls("expected a Welcome message".into())),
|
||||
};
|
||||
|
||||
// ratchet_tree = None because use_ratchet_tree_extension = true embeds
|
||||
// the tree inside the Welcome's GroupInfo extension.
|
||||
let group = MlsGroup::new_from_welcome(&self.backend, &self.config, welcome, None)
|
||||
.map_err(|e| CoreError::Mls(format!("new_from_welcome: {e:?}")))?;
|
||||
|
||||
self.group = Some(group);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// ── Application messages ──────────────────────────────────────────────────
|
||||
|
||||
/// Encrypt `plaintext` as an MLS Application message.
|
||||
///
|
||||
/// # Returns
|
||||
///
|
||||
/// TLS-encoded `MlsMessageOut` bytes (PrivateMessage variant).
|
||||
///
|
||||
/// # Errors
|
||||
///
|
||||
/// Returns [`CoreError::Mls`] if there is no active group or encryption fails.
|
||||
pub fn send_message(&mut self, plaintext: &[u8]) -> Result<Vec<u8>, CoreError> {
|
||||
let group = self
|
||||
.group
|
||||
.as_mut()
|
||||
.ok_or_else(|| CoreError::Mls("no active group".into()))?;
|
||||
|
||||
let mls_msg: MlsMessageOut = group
|
||||
.create_message(&self.backend, self.identity.as_ref(), plaintext)
|
||||
.map_err(|e| CoreError::Mls(format!("create_message: {e:?}")))?;
|
||||
|
||||
mls_msg
|
||||
.to_bytes()
|
||||
.map_err(|e| CoreError::Mls(format!("message serialise: {e:?}")))
|
||||
}
|
||||
|
||||
/// Process an incoming TLS-encoded MLS message.
|
||||
///
|
||||
/// # Returns
|
||||
///
|
||||
/// - `Ok(Some(plaintext))` for Application messages.
|
||||
/// - `Ok(None)` for Commit messages (group state is updated internally).
|
||||
///
|
||||
/// # Errors
|
||||
///
|
||||
/// Returns [`CoreError::Mls`] if the message is malformed, fails
|
||||
/// authentication, or the group state is inconsistent.
|
||||
pub fn receive_message(&mut self, bytes: &[u8]) -> Result<Option<Vec<u8>>, CoreError> {
|
||||
let group = self
|
||||
.group
|
||||
.as_mut()
|
||||
.ok_or_else(|| CoreError::Mls("no active group".into()))?;
|
||||
|
||||
let msg_in = openmls::prelude::MlsMessageIn::tls_deserialize(&mut bytes.as_ref())
|
||||
.map_err(|e| CoreError::Mls(format!("message deserialise: {e:?}")))?;
|
||||
|
||||
// into_protocol_message() is feature-gated; extract() + manual construction is not.
|
||||
let protocol_message = match msg_in.extract() {
|
||||
MlsMessageInBody::PrivateMessage(m) => ProtocolMessage::PrivateMessage(m),
|
||||
MlsMessageInBody::PublicMessage(m) => ProtocolMessage::PublicMessage(m),
|
||||
_ => return Err(CoreError::Mls("not a protocol message".into())),
|
||||
};
|
||||
|
||||
let processed = group
|
||||
.process_message(&self.backend, protocol_message)
|
||||
.map_err(|e| CoreError::Mls(format!("process_message: {e:?}")))?;
|
||||
|
||||
match processed.into_content() {
|
||||
ProcessedMessageContent::ApplicationMessage(app) => Ok(Some(app.into_bytes())),
|
||||
ProcessedMessageContent::StagedCommitMessage(staged) => {
|
||||
// Merge the Commit into the local state (epoch advances).
|
||||
group
|
||||
.merge_staged_commit(&self.backend, *staged)
|
||||
.map_err(|e| CoreError::Mls(format!("merge_staged_commit: {e:?}")))?;
|
||||
Ok(None)
|
||||
}
|
||||
// Proposals are stored for a later Commit; nothing to return yet.
|
||||
ProcessedMessageContent::ProposalMessage(proposal) => {
|
||||
group.store_pending_proposal(*proposal);
|
||||
Ok(None)
|
||||
}
|
||||
ProcessedMessageContent::ExternalJoinProposalMessage(proposal) => {
|
||||
group.store_pending_proposal(*proposal);
|
||||
Ok(None)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// ── Accessors ─────────────────────────────────────────────────────────────
|
||||
|
||||
/// Return the MLS group ID bytes, or `None` if no group is active.
|
||||
pub fn group_id(&self) -> Option<Vec<u8>> {
|
||||
self.group
|
||||
.as_ref()
|
||||
.map(|g| g.group_id().as_slice().to_vec())
|
||||
}
|
||||
|
||||
/// Return a reference to the identity keypair.
|
||||
pub fn identity(&self) -> &IdentityKeypair {
|
||||
&self.identity
|
||||
}
|
||||
|
||||
/// Return the private seed of the identity (for persistence).
|
||||
pub fn identity_seed(&self) -> [u8; 32] {
|
||||
self.identity.seed_bytes()
|
||||
}
|
||||
|
||||
/// Return a reference to the underlying crypto backend.
|
||||
pub fn backend(&self) -> &StoreCrypto {
|
||||
&self.backend
|
||||
}
|
||||
|
||||
/// Return a reference to the MLS group, if active.
|
||||
pub fn group_ref(&self) -> Option<&MlsGroup> {
|
||||
self.group.as_ref()
|
||||
}
|
||||
|
||||
/// Return the identity (credential) bytes of all current group members.
|
||||
///
|
||||
/// Each entry is the raw credential payload (Ed25519 public key bytes)
|
||||
/// extracted from the member's MLS leaf node.
|
||||
pub fn member_identities(&self) -> Vec<Vec<u8>> {
|
||||
let group = match self.group.as_ref() {
|
||||
Some(g) => g,
|
||||
None => return Vec::new(),
|
||||
};
|
||||
group
|
||||
.members()
|
||||
.map(|m| m.credential.identity().to_vec())
|
||||
.collect()
|
||||
}
|
||||
|
||||
// ── Private helpers ───────────────────────────────────────────────────────
|
||||
|
||||
fn make_credential_with_key(&self) -> Result<CredentialWithKey, CoreError> {
|
||||
let credential = Credential::new(
|
||||
self.identity.public_key_bytes().to_vec(),
|
||||
CredentialType::Basic,
|
||||
)
|
||||
.map_err(|e| CoreError::Mls(format!("{e:?}")))?;
|
||||
|
||||
Ok(CredentialWithKey {
|
||||
credential,
|
||||
signature_key: self.identity.public_key_bytes().to_vec().into(),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// ── Unit tests ────────────────────────────────────────────────────────────────
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
/// Full two-party MLS round-trip: create group → add member → exchange messages.
|
||||
#[test]
|
||||
fn two_party_mls_round_trip() {
|
||||
let alice_id = Arc::new(IdentityKeypair::generate());
|
||||
let bob_id = Arc::new(IdentityKeypair::generate());
|
||||
|
||||
let mut alice = GroupMember::new(Arc::clone(&alice_id));
|
||||
let mut bob = GroupMember::new(Arc::clone(&bob_id));
|
||||
|
||||
// Bob generates a KeyPackage (stored in bob's backend key store).
|
||||
let bob_kp = bob.generate_key_package().expect("Bob KeyPackage");
|
||||
|
||||
// Alice creates the group.
|
||||
alice
|
||||
.create_group(b"test-group-m3")
|
||||
.expect("Alice create group");
|
||||
|
||||
// Alice adds Bob → (commit, welcome).
|
||||
// Alice is the sole existing member, so she merges the commit herself.
|
||||
let (_, welcome) = alice.add_member(&bob_kp).expect("Alice add Bob");
|
||||
|
||||
// Bob joins via the Welcome. His backend holds the matching init key.
|
||||
bob.join_group(&welcome).expect("Bob join group");
|
||||
|
||||
// Alice → Bob: application message.
|
||||
let ct_a = alice.send_message(b"hello bob").expect("Alice send");
|
||||
let pt_b = bob
|
||||
.receive_message(&ct_a)
|
||||
.expect("Bob recv")
|
||||
.expect("should be application message");
|
||||
assert_eq!(pt_b, b"hello bob");
|
||||
|
||||
// Bob → Alice: reply.
|
||||
let ct_b = bob.send_message(b"hello alice").expect("Bob send");
|
||||
let pt_a = alice
|
||||
.receive_message(&ct_b)
|
||||
.expect("Alice recv")
|
||||
.expect("should be application message");
|
||||
assert_eq!(pt_a, b"hello alice");
|
||||
}
|
||||
|
||||
/// `group_id()` returns None before create_group, Some afterwards.
|
||||
#[test]
|
||||
fn group_id_lifecycle() {
|
||||
let id = Arc::new(IdentityKeypair::generate());
|
||||
let mut member = GroupMember::new(id);
|
||||
|
||||
assert!(member.group_id().is_none(), "no group before create");
|
||||
member.create_group(b"gid").unwrap();
|
||||
assert_eq!(
|
||||
member.group_id().unwrap(),
|
||||
b"gid".as_slice(),
|
||||
"group_id must match what was passed"
|
||||
);
|
||||
}
|
||||
}
|
||||
@@ -1,135 +0,0 @@
|
||||
//! Ed25519 identity keypair for MLS credentials and AS registration.
|
||||
//!
|
||||
//! The [`IdentityKeypair`] is the long-term identity key embedded in MLS
|
||||
//! `BasicCredential`s. It is used for signing MLS messages and as the
|
||||
//! indexing key for the Authentication Service.
|
||||
//!
|
||||
//! # Zeroize
|
||||
//!
|
||||
//! The 32-byte private seed is stored as `Zeroizing<[u8; 32]>`, which zeroes
|
||||
//! the bytes on drop. `[u8; 32]` is `Copy + Default` and satisfies zeroize's
|
||||
//! `DefaultIsZeroes` constraint, avoiding a conflict with ed25519-dalek's
|
||||
//! `SigningKey` zeroize impl.
|
||||
//!
|
||||
//! # Fingerprint
|
||||
//!
|
||||
//! A 32-byte SHA-256 digest of the raw public key bytes is used as a compact,
|
||||
//! collision-resistant identifier for logging.
|
||||
|
||||
use ed25519_dalek::{Signer as DalekSigner, SigningKey, VerifyingKey};
|
||||
use openmls_traits::signatures::Signer;
|
||||
use openmls_traits::types::{Error as MlsError, SignatureScheme};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use sha2::{Digest, Sha256};
|
||||
use zeroize::Zeroizing;
|
||||
|
||||
/// An Ed25519 identity keypair.
|
||||
///
|
||||
/// Created with [`IdentityKeypair::generate`]. The private signing key seed
|
||||
/// is zeroed when this struct is dropped.
|
||||
pub struct IdentityKeypair {
|
||||
/// Raw 32-byte private seed — zeroized on drop.
|
||||
///
|
||||
/// Stored as bytes rather than `SigningKey` to satisfy zeroize's
|
||||
/// `DefaultIsZeroes` bound on `Zeroizing<T>`.
|
||||
seed: Zeroizing<[u8; 32]>,
|
||||
/// Corresponding 32-byte public verifying key.
|
||||
verifying: VerifyingKey,
|
||||
}
|
||||
|
||||
impl IdentityKeypair {
|
||||
/// Recreate an identity keypair from a 32-byte seed.
|
||||
pub fn from_seed(seed: [u8; 32]) -> Self {
|
||||
let signing = SigningKey::from_bytes(&seed);
|
||||
let verifying = signing.verifying_key();
|
||||
Self {
|
||||
seed: Zeroizing::new(seed),
|
||||
verifying,
|
||||
}
|
||||
}
|
||||
|
||||
/// Return the raw 32-byte private seed (for persistence).
|
||||
pub fn seed_bytes(&self) -> [u8; 32] {
|
||||
*self.seed
|
||||
}
|
||||
}
|
||||
|
||||
impl IdentityKeypair {
|
||||
/// Generate a fresh random Ed25519 identity keypair.
|
||||
pub fn generate() -> Self {
|
||||
use rand::rngs::OsRng;
|
||||
let signing = SigningKey::generate(&mut OsRng);
|
||||
let verifying = signing.verifying_key();
|
||||
let seed = Zeroizing::new(signing.to_bytes());
|
||||
Self { seed, verifying }
|
||||
}
|
||||
|
||||
/// Return the raw 32-byte Ed25519 public key.
|
||||
///
|
||||
/// This is the byte array used as `identityKey` in `auth.capnp` calls.
|
||||
pub fn public_key_bytes(&self) -> [u8; 32] {
|
||||
self.verifying.to_bytes()
|
||||
}
|
||||
|
||||
/// Return the SHA-256 fingerprint of the public key (32 bytes).
|
||||
pub fn fingerprint(&self) -> [u8; 32] {
|
||||
let mut hasher = Sha256::new();
|
||||
hasher.update(self.verifying.to_bytes());
|
||||
hasher.finalize().into()
|
||||
}
|
||||
|
||||
/// Reconstruct the `SigningKey` from the stored seed bytes.
|
||||
fn signing_key(&self) -> SigningKey {
|
||||
SigningKey::from_bytes(&self.seed)
|
||||
}
|
||||
}
|
||||
|
||||
/// Implement the openmls `Signer` trait so `IdentityKeypair` can be passed
|
||||
/// directly to `KeyPackage::builder().build(...)` without needing the external
|
||||
/// `openmls_basic_credential` crate.
|
||||
impl Signer for IdentityKeypair {
|
||||
fn sign(&self, payload: &[u8]) -> Result<Vec<u8>, MlsError> {
|
||||
let sk = self.signing_key();
|
||||
let sig: ed25519_dalek::Signature = sk.sign(payload);
|
||||
Ok(sig.to_bytes().to_vec())
|
||||
}
|
||||
|
||||
fn signature_scheme(&self) -> SignatureScheme {
|
||||
SignatureScheme::ED25519
|
||||
}
|
||||
}
|
||||
|
||||
impl Serialize for IdentityKeypair {
|
||||
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
|
||||
where
|
||||
S: serde::Serializer,
|
||||
{
|
||||
serializer.serialize_bytes(&self.seed[..])
|
||||
}
|
||||
}
|
||||
|
||||
impl<'de> Deserialize<'de> for IdentityKeypair {
|
||||
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
|
||||
where
|
||||
D: serde::Deserializer<'de>,
|
||||
{
|
||||
let bytes: Vec<u8> = serde::Deserialize::deserialize(deserializer)?;
|
||||
let seed: [u8; 32] = bytes
|
||||
.as_slice()
|
||||
.try_into()
|
||||
.map_err(|_| serde::de::Error::custom("identity seed must be 32 bytes"))?;
|
||||
Ok(IdentityKeypair::from_seed(seed))
|
||||
}
|
||||
}
|
||||
|
||||
impl std::fmt::Debug for IdentityKeypair {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
let fp = self.fingerprint();
|
||||
f.debug_struct("IdentityKeypair")
|
||||
.field(
|
||||
"fingerprint",
|
||||
&format!("{:02x}{:02x}{:02x}{:02x}…", fp[0], fp[1], fp[2], fp[3]),
|
||||
)
|
||||
.finish_non_exhaustive()
|
||||
}
|
||||
}
|
||||
@@ -1,144 +0,0 @@
|
||||
use std::{
|
||||
collections::HashMap,
|
||||
fs,
|
||||
path::{Path, PathBuf},
|
||||
sync::RwLock,
|
||||
};
|
||||
|
||||
use openmls_rust_crypto::RustCrypto;
|
||||
use openmls_traits::{
|
||||
key_store::{MlsEntity, OpenMlsKeyStore},
|
||||
OpenMlsCryptoProvider,
|
||||
};
|
||||
|
||||
/// A disk-backed key store implementing `OpenMlsKeyStore`.
|
||||
///
|
||||
/// In-memory when `path` is `None`; otherwise flushes the entire map to disk on
|
||||
/// every store/delete so HPKE init keys survive process restarts.
|
||||
#[derive(Debug)]
|
||||
pub struct DiskKeyStore {
|
||||
path: Option<PathBuf>,
|
||||
values: RwLock<HashMap<Vec<u8>, Vec<u8>>>,
|
||||
}
|
||||
|
||||
#[derive(thiserror::Error, Debug, PartialEq, Eq)]
|
||||
pub enum DiskKeyStoreError {
|
||||
#[error("serialization error")]
|
||||
Serialization,
|
||||
#[error("io error: {0}")]
|
||||
Io(String),
|
||||
}
|
||||
|
||||
impl DiskKeyStore {
|
||||
/// In-memory keystore (no persistence).
|
||||
pub fn ephemeral() -> Self {
|
||||
Self {
|
||||
path: None,
|
||||
values: RwLock::new(HashMap::new()),
|
||||
}
|
||||
}
|
||||
|
||||
/// Persistent keystore backed by `path`. Creates an empty store if missing.
|
||||
pub fn persistent(path: impl AsRef<Path>) -> Result<Self, DiskKeyStoreError> {
|
||||
let path = path.as_ref().to_path_buf();
|
||||
let values = if path.exists() {
|
||||
let bytes = fs::read(&path).map_err(|e| DiskKeyStoreError::Io(e.to_string()))?;
|
||||
if bytes.is_empty() {
|
||||
HashMap::new()
|
||||
} else {
|
||||
bincode::deserialize(&bytes).map_err(|_| DiskKeyStoreError::Serialization)?
|
||||
}
|
||||
} else {
|
||||
HashMap::new()
|
||||
};
|
||||
|
||||
Ok(Self {
|
||||
path: Some(path),
|
||||
values: RwLock::new(values),
|
||||
})
|
||||
}
|
||||
|
||||
fn flush(&self) -> Result<(), DiskKeyStoreError> {
|
||||
let Some(path) = &self.path else {
|
||||
return Ok(());
|
||||
};
|
||||
let values = self.values.read().unwrap();
|
||||
let bytes = bincode::serialize(&*values).map_err(|_| DiskKeyStoreError::Serialization)?;
|
||||
if let Some(parent) = path.parent() {
|
||||
fs::create_dir_all(parent).map_err(|e| DiskKeyStoreError::Io(e.to_string()))?;
|
||||
}
|
||||
fs::write(path, bytes).map_err(|e| DiskKeyStoreError::Io(e.to_string()))
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for DiskKeyStore {
|
||||
fn default() -> Self {
|
||||
Self::ephemeral()
|
||||
}
|
||||
}
|
||||
|
||||
impl OpenMlsKeyStore for DiskKeyStore {
|
||||
type Error = DiskKeyStoreError;
|
||||
|
||||
fn store<V: MlsEntity>(&self, k: &[u8], v: &V) -> Result<(), Self::Error> {
|
||||
let value = serde_json::to_vec(v).map_err(|_| DiskKeyStoreError::Serialization)?;
|
||||
let mut values = self.values.write().unwrap();
|
||||
values.insert(k.to_vec(), value);
|
||||
drop(values);
|
||||
self.flush()
|
||||
}
|
||||
|
||||
fn read<V: MlsEntity>(&self, k: &[u8]) -> Option<V> {
|
||||
let values = self.values.read().unwrap();
|
||||
values
|
||||
.get(k)
|
||||
.and_then(|bytes| serde_json::from_slice(bytes).ok())
|
||||
}
|
||||
|
||||
fn delete<V: MlsEntity>(&self, k: &[u8]) -> Result<(), Self::Error> {
|
||||
let mut values = self.values.write().unwrap();
|
||||
values.remove(k);
|
||||
drop(values);
|
||||
self.flush()
|
||||
}
|
||||
}
|
||||
|
||||
/// Crypto provider that couples RustCrypto with a disk-backed key store.
|
||||
#[derive(Debug)]
|
||||
pub struct StoreCrypto {
|
||||
crypto: RustCrypto,
|
||||
key_store: DiskKeyStore,
|
||||
}
|
||||
|
||||
impl StoreCrypto {
|
||||
pub fn new(key_store: DiskKeyStore) -> Self {
|
||||
Self {
|
||||
crypto: RustCrypto::default(),
|
||||
key_store,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for StoreCrypto {
|
||||
fn default() -> Self {
|
||||
Self::new(DiskKeyStore::ephemeral())
|
||||
}
|
||||
}
|
||||
|
||||
impl OpenMlsCryptoProvider for StoreCrypto {
|
||||
type CryptoProvider = RustCrypto;
|
||||
type RandProvider = RustCrypto;
|
||||
type KeyStoreProvider = DiskKeyStore;
|
||||
|
||||
fn crypto(&self) -> &Self::CryptoProvider {
|
||||
&self.crypto
|
||||
}
|
||||
|
||||
fn rand(&self) -> &Self::RandProvider {
|
||||
&self.crypto
|
||||
}
|
||||
|
||||
fn key_store(&self) -> &Self::KeyStoreProvider {
|
||||
&self.key_store
|
||||
}
|
||||
}
|
||||
@@ -1,33 +0,0 @@
|
||||
//! Core cryptographic primitives, MLS group state machine, and hybrid
|
||||
//! post-quantum KEM for quicnprotochat.
|
||||
//!
|
||||
//! # Module layout
|
||||
//!
|
||||
//! | Module | Responsibility |
|
||||
//! |--------------|------------------------------------------------------------------|
|
||||
//! | `error` | [`CoreError`] type |
|
||||
//! | `identity` | [`IdentityKeypair`] — Ed25519 identity key for MLS credentials |
|
||||
//! | `keypackage` | [`generate_key_package`] — standalone KeyPackage generation |
|
||||
//! | `group` | [`GroupMember`] — MLS group lifecycle (create/join/send/recv) |
|
||||
//! | `hybrid_kem` | Hybrid X25519 + ML-KEM-768 key encapsulation |
|
||||
//! | `keystore` | [`DiskKeyStore`] — OpenMLS key store with optional persistence |
|
||||
|
||||
mod error;
|
||||
mod group;
|
||||
pub mod hybrid_kem;
|
||||
mod identity;
|
||||
mod keypackage;
|
||||
mod keystore;
|
||||
pub mod opaque_auth;
|
||||
|
||||
// ── Public API ────────────────────────────────────────────────────────────────
|
||||
|
||||
pub use error::CoreError;
|
||||
pub use group::GroupMember;
|
||||
pub use hybrid_kem::{
|
||||
hybrid_decrypt, hybrid_encrypt, HybridKeypair, HybridKeypairBytes, HybridKemError,
|
||||
HybridPublicKey,
|
||||
};
|
||||
pub use identity::IdentityKeypair;
|
||||
pub use keypackage::generate_key_package;
|
||||
pub use keystore::DiskKeyStore;
|
||||
@@ -1,15 +0,0 @@
|
||||
[package]
|
||||
name = "quicnprotochat-proto"
|
||||
version = "0.1.0"
|
||||
edition = "2021"
|
||||
description = "Cap'n Proto schemas, generated types, and serialisation helpers for quicnprotochat. No crypto, no I/O."
|
||||
license = "MIT"
|
||||
|
||||
# build.rs invokes capnpc to generate Rust source from .capnp schemas.
|
||||
build = "build.rs"
|
||||
|
||||
[dependencies]
|
||||
capnp = { workspace = true }
|
||||
|
||||
[build-dependencies]
|
||||
capnpc = { workspace = true }
|
||||
@@ -1,54 +0,0 @@
|
||||
//! Build script for quicnprotochat-proto.
|
||||
//!
|
||||
//! Invokes the `capnp` compiler to generate Rust types from `.capnp` schemas
|
||||
//! located in the workspace-root `schemas/` directory.
|
||||
//!
|
||||
//! # Prerequisites
|
||||
//!
|
||||
//! The `capnp` CLI must be installed and on `PATH`.
|
||||
//!
|
||||
//! Debian/Ubuntu: apt-get install capnproto
|
||||
//! macOS: brew install capnp
|
||||
//! Docker: see docker/Dockerfile
|
||||
|
||||
use std::{env, path::PathBuf};
|
||||
|
||||
fn main() {
|
||||
let manifest_dir =
|
||||
PathBuf::from(env::var("CARGO_MANIFEST_DIR").expect("CARGO_MANIFEST_DIR not set by Cargo"));
|
||||
|
||||
// Workspace root is two levels above this crate (quicnprotochat/crates/quicnprotochat-proto).
|
||||
let workspace_root = manifest_dir
|
||||
.join("../..")
|
||||
.canonicalize()
|
||||
.expect("could not canonicalize workspace root path");
|
||||
|
||||
let schemas_dir = workspace_root.join("schemas");
|
||||
|
||||
// Re-run this build script whenever any schema file changes.
|
||||
println!(
|
||||
"cargo:rerun-if-changed={}",
|
||||
schemas_dir.join("auth.capnp").display()
|
||||
);
|
||||
println!(
|
||||
"cargo:rerun-if-changed={}",
|
||||
schemas_dir.join("delivery.capnp").display()
|
||||
);
|
||||
println!(
|
||||
"cargo:rerun-if-changed={}",
|
||||
schemas_dir.join("node.capnp").display()
|
||||
);
|
||||
|
||||
capnpc::CompilerCommand::new()
|
||||
// Treat `schemas/` as the include root so that inter-schema imports
|
||||
// resolve correctly.
|
||||
.src_prefix(&schemas_dir)
|
||||
.file(schemas_dir.join("auth.capnp"))
|
||||
.file(schemas_dir.join("delivery.capnp"))
|
||||
.file(schemas_dir.join("node.capnp"))
|
||||
.run()
|
||||
.expect(
|
||||
"Cap'n Proto schema compilation failed. \
|
||||
Is `capnp` installed? (apt-get install capnproto / brew install capnp)",
|
||||
);
|
||||
}
|
||||
@@ -1,62 +0,0 @@
|
||||
//! Cap'n Proto schemas, generated types, and serialisation helpers for quicnprotochat.
|
||||
//!
|
||||
//! # Design constraints
|
||||
//!
|
||||
//! This crate is intentionally restricted:
|
||||
//! - **No crypto** — key material never enters this crate.
|
||||
//! - **No I/O** — callers own transport; this crate only converts bytes ↔ types.
|
||||
//! - **No async** — pure synchronous data-layer code.
|
||||
//!
|
||||
//! # Generated code
|
||||
//!
|
||||
//! `build.rs` invokes `capnpc` at compile time and writes generated Rust source
|
||||
//! into `$OUT_DIR`. The `include!` macros below splice that code in as a module.
|
||||
|
||||
// ── Generated types ───────────────────────────────────────────────────────────
|
||||
|
||||
/// Cap'n Proto generated types for `schemas/auth.capnp`.
|
||||
///
|
||||
/// Do not edit this module by hand — it is entirely machine-generated.
|
||||
pub mod auth_capnp {
|
||||
include!(concat!(env!("OUT_DIR"), "/auth_capnp.rs"));
|
||||
}
|
||||
|
||||
/// Cap'n Proto generated types for `schemas/delivery.capnp`.
|
||||
///
|
||||
/// Do not edit this module by hand — it is entirely machine-generated.
|
||||
pub mod delivery_capnp {
|
||||
include!(concat!(env!("OUT_DIR"), "/delivery_capnp.rs"));
|
||||
}
|
||||
|
||||
/// Cap'n Proto generated types for `schemas/node.capnp`.
|
||||
///
|
||||
/// Do not edit this module by hand — it is entirely machine-generated.
|
||||
pub mod node_capnp {
|
||||
include!(concat!(env!("OUT_DIR"), "/node_capnp.rs"));
|
||||
}
|
||||
|
||||
// ── Low-level byte ↔ message conversions ──────────────────────────────────────
|
||||
|
||||
/// Serialise a Cap'n Proto message builder to unpacked wire bytes.
|
||||
///
|
||||
/// The output includes the segment table header. For transport, the
|
||||
/// `quicnprotochat-core` frame codec prepends a 4-byte little-endian length field.
|
||||
pub fn to_bytes<A: capnp::message::Allocator>(
|
||||
msg: &capnp::message::Builder<A>,
|
||||
) -> Result<Vec<u8>, capnp::Error> {
|
||||
let mut buf = Vec::new();
|
||||
capnp::serialize::write_message(&mut buf, msg)?;
|
||||
Ok(buf)
|
||||
}
|
||||
|
||||
/// Deserialise unpacked wire bytes into a message with owned segments.
|
||||
///
|
||||
/// Uses `ReaderOptions::new()` (default limits: 64 MiB, 512 nesting levels).
|
||||
/// Callers that receive data from untrusted peers should consider tightening
|
||||
/// the traversal limit via `ReaderOptions::traversal_limit_in_words`.
|
||||
pub fn from_bytes(
|
||||
bytes: &[u8],
|
||||
) -> Result<capnp::message::Reader<capnp::serialize::OwnedSegments>, capnp::Error> {
|
||||
let mut cursor = std::io::Cursor::new(bytes);
|
||||
capnp::serialize::read_message(&mut cursor, capnp::message::ReaderOptions::new())
|
||||
}
|
||||
@@ -1,51 +0,0 @@
|
||||
[package]
|
||||
name = "quicnprotochat-server"
|
||||
version = "0.1.0"
|
||||
edition = "2021"
|
||||
description = "Delivery Service and Authentication Service for quicnprotochat."
|
||||
license = "MIT"
|
||||
|
||||
[[bin]]
|
||||
name = "quicnprotochat-server"
|
||||
path = "src/main.rs"
|
||||
|
||||
[dependencies]
|
||||
quicnprotochat-core = { path = "../quicnprotochat-core" }
|
||||
quicnprotochat-proto = { path = "../quicnprotochat-proto" }
|
||||
|
||||
# Serialisation + RPC
|
||||
capnp = { workspace = true }
|
||||
capnp-rpc = { workspace = true }
|
||||
|
||||
# Async
|
||||
tokio = { workspace = true }
|
||||
tokio-util = { workspace = true }
|
||||
futures = { workspace = true }
|
||||
|
||||
# Server utilities
|
||||
dashmap = { workspace = true }
|
||||
sha2 = { workspace = true }
|
||||
tracing = { workspace = true }
|
||||
tracing-subscriber = { workspace = true }
|
||||
quinn = { workspace = true }
|
||||
quinn-proto = { workspace = true }
|
||||
rustls = { workspace = true }
|
||||
rcgen = { workspace = true }
|
||||
|
||||
# Crypto — OPAQUE PAKE
|
||||
opaque-ke = { workspace = true }
|
||||
rand = { workspace = true }
|
||||
subtle = { workspace = true }
|
||||
|
||||
# Database
|
||||
rusqlite = { workspace = true }
|
||||
|
||||
# Error handling
|
||||
anyhow = { workspace = true }
|
||||
thiserror = { workspace = true }
|
||||
bincode = { workspace = true }
|
||||
serde = { workspace = true }
|
||||
|
||||
# CLI
|
||||
clap = { workspace = true }
|
||||
toml = { version = "0.8" }
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,521 +0,0 @@
|
||||
//! SQLCipher-backed persistent storage.
|
||||
|
||||
use std::path::Path;
|
||||
use std::sync::Mutex;
|
||||
|
||||
use rusqlite::{params, Connection};
|
||||
|
||||
use crate::storage::{StorageError, Store};
|
||||
|
||||
/// SQLCipher-encrypted storage backend.
|
||||
pub struct SqlStore {
|
||||
conn: Mutex<Connection>,
|
||||
}
|
||||
|
||||
impl SqlStore {
|
||||
pub fn open(path: impl AsRef<Path>, key: &str) -> Result<Self, StorageError> {
|
||||
let conn = Connection::open(path).map_err(|e| StorageError::Db(e.to_string()))?;
|
||||
|
||||
if !key.is_empty() {
|
||||
conn.pragma_update(None, "key", key)
|
||||
.map_err(|e| StorageError::Db(format!("PRAGMA key failed: {e}")))?;
|
||||
}
|
||||
|
||||
conn.execute_batch(
|
||||
"PRAGMA journal_mode = WAL;
|
||||
PRAGMA synchronous = NORMAL;
|
||||
PRAGMA foreign_keys = ON;",
|
||||
)
|
||||
.map_err(|e| StorageError::Db(e.to_string()))?;
|
||||
|
||||
let store = Self {
|
||||
conn: Mutex::new(conn),
|
||||
};
|
||||
store.migrate()?;
|
||||
Ok(store)
|
||||
}
|
||||
|
||||
fn migrate(&self) -> Result<(), StorageError> {
|
||||
let conn = self.conn.lock().unwrap();
|
||||
conn.execute_batch(
|
||||
"CREATE TABLE IF NOT EXISTS key_packages (
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
identity_key BLOB NOT NULL,
|
||||
package_data BLOB NOT NULL,
|
||||
created_at INTEGER DEFAULT (strftime('%s','now'))
|
||||
);
|
||||
|
||||
CREATE TABLE IF NOT EXISTS deliveries (
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
recipient_key BLOB NOT NULL,
|
||||
channel_id BLOB NOT NULL DEFAULT X'',
|
||||
payload BLOB NOT NULL,
|
||||
created_at INTEGER DEFAULT (strftime('%s','now'))
|
||||
);
|
||||
|
||||
CREATE TABLE IF NOT EXISTS hybrid_keys (
|
||||
identity_key BLOB PRIMARY KEY,
|
||||
hybrid_public_key BLOB NOT NULL
|
||||
);
|
||||
|
||||
CREATE INDEX IF NOT EXISTS idx_kp_identity
|
||||
ON key_packages(identity_key);
|
||||
|
||||
CREATE INDEX IF NOT EXISTS idx_del_recipient_channel
|
||||
ON deliveries(recipient_key, channel_id);
|
||||
|
||||
CREATE TABLE IF NOT EXISTS server_setup (
|
||||
id INTEGER PRIMARY KEY CHECK (id = 1),
|
||||
setup_data BLOB NOT NULL
|
||||
);
|
||||
|
||||
CREATE TABLE IF NOT EXISTS users (
|
||||
username TEXT PRIMARY KEY,
|
||||
opaque_record BLOB NOT NULL,
|
||||
created_at INTEGER DEFAULT (strftime('%s','now'))
|
||||
);
|
||||
|
||||
CREATE TABLE IF NOT EXISTS user_identity_keys (
|
||||
username TEXT PRIMARY KEY,
|
||||
identity_key BLOB NOT NULL
|
||||
);
|
||||
|
||||
CREATE TABLE IF NOT EXISTS endpoints (
|
||||
identity_key BLOB PRIMARY KEY,
|
||||
node_addr BLOB NOT NULL,
|
||||
updated_at INTEGER DEFAULT (strftime('%s','now'))
|
||||
);",
|
||||
)
|
||||
.map_err(|e| StorageError::Db(e.to_string()))?;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl Store for SqlStore {
|
||||
fn upload_key_package(
|
||||
&self,
|
||||
identity_key: &[u8],
|
||||
package: Vec<u8>,
|
||||
) -> Result<(), StorageError> {
|
||||
let conn = self.conn.lock().unwrap();
|
||||
conn.execute(
|
||||
"INSERT INTO key_packages (identity_key, package_data) VALUES (?1, ?2)",
|
||||
params![identity_key, package],
|
||||
)
|
||||
.map_err(|e| StorageError::Db(e.to_string()))?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn fetch_key_package(&self, identity_key: &[u8]) -> Result<Option<Vec<u8>>, StorageError> {
|
||||
let conn = self.conn.lock().unwrap();
|
||||
|
||||
let mut stmt = conn
|
||||
.prepare(
|
||||
"SELECT id, package_data FROM key_packages
|
||||
WHERE identity_key = ?1
|
||||
ORDER BY id ASC
|
||||
LIMIT 1",
|
||||
)
|
||||
.map_err(|e| StorageError::Db(e.to_string()))?;
|
||||
|
||||
let row = stmt
|
||||
.query_row(params![identity_key], |row| {
|
||||
Ok((row.get::<_, i64>(0)?, row.get::<_, Vec<u8>>(1)?))
|
||||
})
|
||||
.optional()
|
||||
.map_err(|e| StorageError::Db(e.to_string()))?;
|
||||
|
||||
match row {
|
||||
Some((id, package)) => {
|
||||
conn.execute("DELETE FROM key_packages WHERE id = ?1", params![id])
|
||||
.map_err(|e| StorageError::Db(e.to_string()))?;
|
||||
Ok(Some(package))
|
||||
}
|
||||
None => Ok(None),
|
||||
}
|
||||
}
|
||||
|
||||
fn enqueue(
|
||||
&self,
|
||||
recipient_key: &[u8],
|
||||
channel_id: &[u8],
|
||||
payload: Vec<u8>,
|
||||
) -> Result<(), StorageError> {
|
||||
let conn = self.conn.lock().unwrap();
|
||||
conn.execute(
|
||||
"INSERT INTO deliveries (recipient_key, channel_id, payload) VALUES (?1, ?2, ?3)",
|
||||
params![recipient_key, channel_id, payload],
|
||||
)
|
||||
.map_err(|e| StorageError::Db(e.to_string()))?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn fetch(
|
||||
&self,
|
||||
recipient_key: &[u8],
|
||||
channel_id: &[u8],
|
||||
) -> Result<Vec<Vec<u8>>, StorageError> {
|
||||
let conn = self.conn.lock().unwrap();
|
||||
|
||||
let mut stmt = conn
|
||||
.prepare(
|
||||
"SELECT id, payload FROM deliveries
|
||||
WHERE recipient_key = ?1 AND channel_id = ?2
|
||||
ORDER BY id ASC",
|
||||
)
|
||||
.map_err(|e| StorageError::Db(e.to_string()))?;
|
||||
|
||||
let rows: Vec<(i64, Vec<u8>)> = stmt
|
||||
.query_map(params![recipient_key, channel_id], |row| {
|
||||
Ok((row.get(0)?, row.get(1)?))
|
||||
})
|
||||
.map_err(|e| StorageError::Db(e.to_string()))?
|
||||
.collect::<Result<Vec<_>, _>>()
|
||||
.map_err(|e| StorageError::Db(e.to_string()))?;
|
||||
|
||||
if !rows.is_empty() {
|
||||
let ids: Vec<i64> = rows.iter().map(|(id, _)| *id).collect();
|
||||
let placeholders: String = ids.iter().map(|_| "?").collect::<Vec<_>>().join(",");
|
||||
let sql = format!("DELETE FROM deliveries WHERE id IN ({placeholders})");
|
||||
let params: Vec<&dyn rusqlite::types::ToSql> =
|
||||
ids.iter().map(|id| id as &dyn rusqlite::types::ToSql).collect();
|
||||
conn.execute(&sql, params.as_slice())
|
||||
.map_err(|e| StorageError::Db(e.to_string()))?;
|
||||
}
|
||||
|
||||
Ok(rows.into_iter().map(|(_, payload)| payload).collect())
|
||||
}
|
||||
|
||||
fn fetch_limited(
|
||||
&self,
|
||||
recipient_key: &[u8],
|
||||
channel_id: &[u8],
|
||||
limit: usize,
|
||||
) -> Result<Vec<Vec<u8>>, StorageError> {
|
||||
let conn = self.conn.lock().unwrap();
|
||||
|
||||
let mut stmt = conn
|
||||
.prepare(
|
||||
"SELECT id, payload FROM deliveries
|
||||
WHERE recipient_key = ?1 AND channel_id = ?2
|
||||
ORDER BY id ASC
|
||||
LIMIT ?3",
|
||||
)
|
||||
.map_err(|e| StorageError::Db(e.to_string()))?;
|
||||
|
||||
let rows: Vec<(i64, Vec<u8>)> = stmt
|
||||
.query_map(params![recipient_key, channel_id, limit as i64], |row| {
|
||||
Ok((row.get(0)?, row.get(1)?))
|
||||
})
|
||||
.map_err(|e| StorageError::Db(e.to_string()))?
|
||||
.collect::<Result<Vec<_>, _>>()
|
||||
.map_err(|e| StorageError::Db(e.to_string()))?;
|
||||
|
||||
if !rows.is_empty() {
|
||||
let ids: Vec<i64> = rows.iter().map(|(id, _)| *id).collect();
|
||||
let placeholders: String = ids.iter().map(|_| "?").collect::<Vec<_>>().join(",");
|
||||
let sql = format!("DELETE FROM deliveries WHERE id IN ({placeholders})");
|
||||
let params: Vec<&dyn rusqlite::types::ToSql> =
|
||||
ids.iter().map(|id| id as &dyn rusqlite::types::ToSql).collect();
|
||||
conn.execute(&sql, params.as_slice())
|
||||
.map_err(|e| StorageError::Db(e.to_string()))?;
|
||||
}
|
||||
|
||||
Ok(rows.into_iter().map(|(_, payload)| payload).collect())
|
||||
}
|
||||
|
||||
fn queue_depth(
|
||||
&self,
|
||||
recipient_key: &[u8],
|
||||
channel_id: &[u8],
|
||||
) -> Result<usize, StorageError> {
|
||||
let conn = self.conn.lock().unwrap();
|
||||
let count: i64 = conn
|
||||
.query_row(
|
||||
"SELECT COUNT(*) FROM deliveries WHERE recipient_key = ?1 AND channel_id = ?2",
|
||||
params![recipient_key, channel_id],
|
||||
|row| row.get(0),
|
||||
)
|
||||
.map_err(|e| StorageError::Db(e.to_string()))?;
|
||||
Ok(count as usize)
|
||||
}
|
||||
|
||||
fn gc_expired_messages(&self, max_age_secs: u64) -> Result<usize, StorageError> {
|
||||
let conn = self.conn.lock().unwrap();
|
||||
let cutoff = std::time::SystemTime::now()
|
||||
.duration_since(std::time::UNIX_EPOCH)
|
||||
.unwrap_or_default()
|
||||
.as_secs()
|
||||
.saturating_sub(max_age_secs);
|
||||
let deleted = conn
|
||||
.execute(
|
||||
"DELETE FROM deliveries WHERE created_at < ?1",
|
||||
params![cutoff as i64],
|
||||
)
|
||||
.map_err(|e| StorageError::Db(e.to_string()))?;
|
||||
Ok(deleted)
|
||||
}
|
||||
|
||||
fn upload_hybrid_key(
|
||||
&self,
|
||||
identity_key: &[u8],
|
||||
hybrid_pk: Vec<u8>,
|
||||
) -> Result<(), StorageError> {
|
||||
let conn = self.conn.lock().unwrap();
|
||||
conn.execute(
|
||||
"INSERT OR REPLACE INTO hybrid_keys (identity_key, hybrid_public_key) VALUES (?1, ?2)",
|
||||
params![identity_key, hybrid_pk],
|
||||
)
|
||||
.map_err(|e| StorageError::Db(e.to_string()))?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn fetch_hybrid_key(&self, identity_key: &[u8]) -> Result<Option<Vec<u8>>, StorageError> {
|
||||
let conn = self.conn.lock().unwrap();
|
||||
let mut stmt = conn
|
||||
.prepare("SELECT hybrid_public_key FROM hybrid_keys WHERE identity_key = ?1")
|
||||
.map_err(|e| StorageError::Db(e.to_string()))?;
|
||||
|
||||
stmt.query_row(params![identity_key], |row| row.get(0))
|
||||
.optional()
|
||||
.map_err(|e| StorageError::Db(e.to_string()))
|
||||
}
|
||||
|
||||
fn store_server_setup(&self, setup: Vec<u8>) -> Result<(), StorageError> {
|
||||
let conn = self.conn.lock().unwrap();
|
||||
conn.execute(
|
||||
"INSERT OR REPLACE INTO server_setup (id, setup_data) VALUES (1, ?1)",
|
||||
params![setup],
|
||||
)
|
||||
.map_err(|e| StorageError::Db(e.to_string()))?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn get_server_setup(&self) -> Result<Option<Vec<u8>>, StorageError> {
|
||||
let conn = self.conn.lock().unwrap();
|
||||
let mut stmt = conn
|
||||
.prepare("SELECT setup_data FROM server_setup WHERE id = 1")
|
||||
.map_err(|e| StorageError::Db(e.to_string()))?;
|
||||
|
||||
stmt.query_row([], |row| row.get(0))
|
||||
.optional()
|
||||
.map_err(|e| StorageError::Db(e.to_string()))
|
||||
}
|
||||
|
||||
fn store_user_record(&self, username: &str, record: Vec<u8>) -> Result<(), StorageError> {
|
||||
let conn = self.conn.lock().unwrap();
|
||||
conn.execute(
|
||||
"INSERT OR REPLACE INTO users (username, opaque_record) VALUES (?1, ?2)",
|
||||
params![username, record],
|
||||
)
|
||||
.map_err(|e| StorageError::Db(e.to_string()))?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn get_user_record(&self, username: &str) -> Result<Option<Vec<u8>>, StorageError> {
|
||||
let conn = self.conn.lock().unwrap();
|
||||
let mut stmt = conn
|
||||
.prepare("SELECT opaque_record FROM users WHERE username = ?1")
|
||||
.map_err(|e| StorageError::Db(e.to_string()))?;
|
||||
|
||||
stmt.query_row(params![username], |row| row.get(0))
|
||||
.optional()
|
||||
.map_err(|e| StorageError::Db(e.to_string()))
|
||||
}
|
||||
|
||||
fn has_user_record(&self, username: &str) -> Result<bool, StorageError> {
|
||||
let conn = self.conn.lock().unwrap();
|
||||
let exists: bool = conn
|
||||
.query_row(
|
||||
"SELECT EXISTS(SELECT 1 FROM users WHERE username = ?1)",
|
||||
params![username],
|
||||
|row| row.get(0),
|
||||
)
|
||||
.map_err(|e| StorageError::Db(e.to_string()))?;
|
||||
Ok(exists)
|
||||
}
|
||||
|
||||
fn store_user_identity_key(
|
||||
&self,
|
||||
username: &str,
|
||||
identity_key: Vec<u8>,
|
||||
) -> Result<(), StorageError> {
|
||||
let conn = self.conn.lock().unwrap();
|
||||
conn.execute(
|
||||
"INSERT OR REPLACE INTO user_identity_keys (username, identity_key) VALUES (?1, ?2)",
|
||||
params![username, identity_key],
|
||||
)
|
||||
.map_err(|e| StorageError::Db(e.to_string()))?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn get_user_identity_key(&self, username: &str) -> Result<Option<Vec<u8>>, StorageError> {
|
||||
let conn = self.conn.lock().unwrap();
|
||||
let mut stmt = conn
|
||||
.prepare("SELECT identity_key FROM user_identity_keys WHERE username = ?1")
|
||||
.map_err(|e| StorageError::Db(e.to_string()))?;
|
||||
|
||||
stmt.query_row(params![username], |row| row.get(0))
|
||||
.optional()
|
||||
.map_err(|e| StorageError::Db(e.to_string()))
|
||||
}
|
||||
|
||||
fn publish_endpoint(
|
||||
&self,
|
||||
identity_key: &[u8],
|
||||
node_addr: Vec<u8>,
|
||||
) -> Result<(), StorageError> {
|
||||
let conn = self.conn.lock().unwrap();
|
||||
conn.execute(
|
||||
"INSERT OR REPLACE INTO endpoints (identity_key, node_addr) VALUES (?1, ?2)",
|
||||
params![identity_key, node_addr],
|
||||
)
|
||||
.map_err(|e| StorageError::Db(e.to_string()))?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn resolve_endpoint(&self, identity_key: &[u8]) -> Result<Option<Vec<u8>>, StorageError> {
|
||||
let conn = self.conn.lock().unwrap();
|
||||
let mut stmt = conn
|
||||
.prepare("SELECT node_addr FROM endpoints WHERE identity_key = ?1")
|
||||
.map_err(|e| StorageError::Db(e.to_string()))?;
|
||||
|
||||
stmt.query_row(params![identity_key], |row| row.get(0))
|
||||
.optional()
|
||||
.map_err(|e| StorageError::Db(e.to_string()))
|
||||
}
|
||||
}
|
||||
|
||||
/// Convenience extension for `rusqlite::OptionalExtension`.
|
||||
trait OptionalExt<T> {
|
||||
fn optional(self) -> Result<Option<T>, rusqlite::Error>;
|
||||
}
|
||||
|
||||
impl<T> OptionalExt<T> for Result<T, rusqlite::Error> {
|
||||
fn optional(self) -> Result<Option<T>, rusqlite::Error> {
|
||||
match self {
|
||||
Ok(v) => Ok(Some(v)),
|
||||
Err(rusqlite::Error::QueryReturnedNoRows) => Ok(None),
|
||||
Err(e) => Err(e),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
fn open_in_memory() -> SqlStore {
|
||||
SqlStore::open(":memory:", "").unwrap()
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn key_package_fifo() {
|
||||
let store = open_in_memory();
|
||||
let mut identity = [0u8; 32];
|
||||
identity[..31].copy_from_slice(b"alice_identity_key__32bytes_lon");
|
||||
|
||||
store
|
||||
.upload_key_package(&identity, b"kp1".to_vec())
|
||||
.unwrap();
|
||||
store
|
||||
.upload_key_package(&identity, b"kp2".to_vec())
|
||||
.unwrap();
|
||||
|
||||
assert_eq!(
|
||||
store.fetch_key_package(&identity).unwrap(),
|
||||
Some(b"kp1".to_vec())
|
||||
);
|
||||
assert_eq!(
|
||||
store.fetch_key_package(&identity).unwrap(),
|
||||
Some(b"kp2".to_vec())
|
||||
);
|
||||
assert_eq!(store.fetch_key_package(&identity).unwrap(), None);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn delivery_round_trip() {
|
||||
let store = open_in_memory();
|
||||
let rk = [1u8; 32];
|
||||
let ch = b"channel-1";
|
||||
|
||||
store.enqueue(&rk, ch, b"msg1".to_vec()).unwrap();
|
||||
store.enqueue(&rk, ch, b"msg2".to_vec()).unwrap();
|
||||
|
||||
let msgs = store.fetch(&rk, ch).unwrap();
|
||||
assert_eq!(msgs, vec![b"msg1".to_vec(), b"msg2".to_vec()]);
|
||||
|
||||
assert!(store.fetch(&rk, ch).unwrap().is_empty());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn fetch_limited_partial_drain() {
|
||||
let store = open_in_memory();
|
||||
let rk = [5u8; 32];
|
||||
let ch = b"ch";
|
||||
|
||||
store.enqueue(&rk, ch, b"a".to_vec()).unwrap();
|
||||
store.enqueue(&rk, ch, b"b".to_vec()).unwrap();
|
||||
store.enqueue(&rk, ch, b"c".to_vec()).unwrap();
|
||||
|
||||
let msgs = store.fetch_limited(&rk, ch, 2).unwrap();
|
||||
assert_eq!(msgs, vec![b"a".to_vec(), b"b".to_vec()]);
|
||||
|
||||
let remaining = store.fetch(&rk, ch).unwrap();
|
||||
assert_eq!(remaining, vec![b"c".to_vec()]);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn queue_depth_count() {
|
||||
let store = open_in_memory();
|
||||
let rk = [6u8; 32];
|
||||
let ch = b"ch";
|
||||
|
||||
assert_eq!(store.queue_depth(&rk, ch).unwrap(), 0);
|
||||
store.enqueue(&rk, ch, b"x".to_vec()).unwrap();
|
||||
store.enqueue(&rk, ch, b"y".to_vec()).unwrap();
|
||||
assert_eq!(store.queue_depth(&rk, ch).unwrap(), 2);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn has_user_record_check() {
|
||||
let store = open_in_memory();
|
||||
assert!(!store.has_user_record("alice").unwrap());
|
||||
store.store_user_record("alice", b"record".to_vec()).unwrap();
|
||||
assert!(store.has_user_record("alice").unwrap());
|
||||
assert!(!store.has_user_record("bob").unwrap());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn user_identity_key_round_trip() {
|
||||
let store = open_in_memory();
|
||||
assert!(store.get_user_identity_key("alice").unwrap().is_none());
|
||||
store.store_user_identity_key("alice", vec![1u8; 32]).unwrap();
|
||||
assert_eq!(store.get_user_identity_key("alice").unwrap(), Some(vec![1u8; 32]));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn hybrid_key_round_trip() {
|
||||
let store = open_in_memory();
|
||||
let ik = [2u8; 32];
|
||||
let pk = b"hybrid_public_key_data".to_vec();
|
||||
|
||||
store.upload_hybrid_key(&ik, pk.clone()).unwrap();
|
||||
assert_eq!(store.fetch_hybrid_key(&ik).unwrap(), Some(pk));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn separate_channels_isolated() {
|
||||
let store = open_in_memory();
|
||||
let rk = [4u8; 32];
|
||||
|
||||
store.enqueue(&rk, b"ch-a", b"a1".to_vec()).unwrap();
|
||||
store.enqueue(&rk, b"ch-b", b"b1".to_vec()).unwrap();
|
||||
|
||||
let a_msgs = store.fetch(&rk, b"ch-a").unwrap();
|
||||
assert_eq!(a_msgs, vec![b"a1".to_vec()]);
|
||||
|
||||
let b_msgs = store.fetch(&rk, b"ch-b").unwrap();
|
||||
assert_eq!(b_msgs, vec![b"b1".to_vec()]);
|
||||
}
|
||||
}
|
||||
@@ -1,484 +0,0 @@
|
||||
use std::{
|
||||
collections::{HashMap, VecDeque},
|
||||
fs,
|
||||
hash::Hash,
|
||||
path::{Path, PathBuf},
|
||||
sync::Mutex,
|
||||
};
|
||||
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
#[derive(thiserror::Error, Debug)]
|
||||
pub enum StorageError {
|
||||
#[error("io error: {0}")]
|
||||
Io(String),
|
||||
#[error("serialization error")]
|
||||
Serde,
|
||||
#[error("database error: {0}")]
|
||||
Db(String),
|
||||
}
|
||||
|
||||
// ── Store trait ──────────────────────────────────────────────────────────────
|
||||
|
||||
/// Abstraction over storage backends (file-backed, SQLCipher, etc.).
|
||||
pub trait Store: Send + Sync {
|
||||
fn upload_key_package(
|
||||
&self,
|
||||
identity_key: &[u8],
|
||||
package: Vec<u8>,
|
||||
) -> Result<(), StorageError>;
|
||||
|
||||
fn fetch_key_package(&self, identity_key: &[u8]) -> Result<Option<Vec<u8>>, StorageError>;
|
||||
|
||||
fn enqueue(
|
||||
&self,
|
||||
recipient_key: &[u8],
|
||||
channel_id: &[u8],
|
||||
payload: Vec<u8>,
|
||||
) -> Result<(), StorageError>;
|
||||
|
||||
fn fetch(
|
||||
&self,
|
||||
recipient_key: &[u8],
|
||||
channel_id: &[u8],
|
||||
) -> Result<Vec<Vec<u8>>, StorageError>;
|
||||
|
||||
/// Fetch up to `limit` messages without draining the entire queue (Fix 8).
|
||||
fn fetch_limited(
|
||||
&self,
|
||||
recipient_key: &[u8],
|
||||
channel_id: &[u8],
|
||||
limit: usize,
|
||||
) -> Result<Vec<Vec<u8>>, StorageError>;
|
||||
|
||||
/// Return the number of queued messages for (recipient, channel) (Fix 7).
|
||||
fn queue_depth(
|
||||
&self,
|
||||
recipient_key: &[u8],
|
||||
channel_id: &[u8],
|
||||
) -> Result<usize, StorageError>;
|
||||
|
||||
/// Delete messages older than `max_age_secs`. Returns count deleted (Fix 7).
|
||||
fn gc_expired_messages(&self, max_age_secs: u64) -> Result<usize, StorageError>;
|
||||
|
||||
fn upload_hybrid_key(
|
||||
&self,
|
||||
identity_key: &[u8],
|
||||
hybrid_pk: Vec<u8>,
|
||||
) -> Result<(), StorageError>;
|
||||
|
||||
fn fetch_hybrid_key(&self, identity_key: &[u8]) -> Result<Option<Vec<u8>>, StorageError>;
|
||||
|
||||
/// Store the OPAQUE `ServerSetup` (generated once, loaded on restart).
|
||||
fn store_server_setup(&self, setup: Vec<u8>) -> Result<(), StorageError>;
|
||||
|
||||
/// Load the persisted `ServerSetup`, if any.
|
||||
fn get_server_setup(&self) -> Result<Option<Vec<u8>>, StorageError>;
|
||||
|
||||
/// Store an OPAQUE user record (serialized `ServerRegistration`).
|
||||
fn store_user_record(&self, username: &str, record: Vec<u8>) -> Result<(), StorageError>;
|
||||
|
||||
/// Retrieve an OPAQUE user record by username.
|
||||
fn get_user_record(&self, username: &str) -> Result<Option<Vec<u8>>, StorageError>;
|
||||
|
||||
/// Check if a user record already exists (Fix 5).
|
||||
fn has_user_record(&self, username: &str) -> Result<bool, StorageError>;
|
||||
|
||||
/// Store identity key for a user (Fix 2).
|
||||
fn store_user_identity_key(
|
||||
&self,
|
||||
username: &str,
|
||||
identity_key: Vec<u8>,
|
||||
) -> Result<(), StorageError>;
|
||||
|
||||
/// Retrieve identity key for a user (Fix 2).
|
||||
fn get_user_identity_key(&self, username: &str) -> Result<Option<Vec<u8>>, StorageError>;
|
||||
|
||||
/// Publish a P2P endpoint address for an identity key.
|
||||
fn publish_endpoint(
|
||||
&self,
|
||||
identity_key: &[u8],
|
||||
node_addr: Vec<u8>,
|
||||
) -> Result<(), StorageError>;
|
||||
|
||||
/// Resolve a peer's P2P endpoint address.
|
||||
fn resolve_endpoint(&self, identity_key: &[u8]) -> Result<Option<Vec<u8>>, StorageError>;
|
||||
}
|
||||
|
||||
// ── ChannelKey ───────────────────────────────────────────────────────────────
|
||||
|
||||
#[derive(Serialize, Deserialize, Clone, Eq, PartialEq, Debug)]
|
||||
pub struct ChannelKey {
|
||||
pub channel_id: Vec<u8>,
|
||||
pub recipient_key: Vec<u8>,
|
||||
}
|
||||
|
||||
impl Hash for ChannelKey {
|
||||
fn hash<H: std::hash::Hasher>(&self, state: &mut H) {
|
||||
self.channel_id.hash(state);
|
||||
self.recipient_key.hash(state);
|
||||
}
|
||||
}
|
||||
|
||||
// ── FileBackedStore ──────────────────────────────────────────────────────────
|
||||
|
||||
#[derive(Serialize, Deserialize, Default)]
|
||||
struct QueueMapV1 {
|
||||
map: HashMap<Vec<u8>, VecDeque<Vec<u8>>>,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Default)]
|
||||
struct QueueMapV2 {
|
||||
map: HashMap<ChannelKey, VecDeque<Vec<u8>>>,
|
||||
}
|
||||
|
||||
/// File-backed storage for KeyPackages and delivery queues.
|
||||
///
|
||||
/// Each mutation flushes the entire map to disk. Suitable for MVP-scale loads.
|
||||
pub struct FileBackedStore {
|
||||
kp_path: PathBuf,
|
||||
ds_path: PathBuf,
|
||||
hk_path: PathBuf,
|
||||
setup_path: PathBuf,
|
||||
users_path: PathBuf,
|
||||
identity_keys_path: PathBuf,
|
||||
key_packages: Mutex<HashMap<Vec<u8>, VecDeque<Vec<u8>>>>,
|
||||
deliveries: Mutex<HashMap<ChannelKey, VecDeque<Vec<u8>>>>,
|
||||
hybrid_keys: Mutex<HashMap<Vec<u8>, Vec<u8>>>,
|
||||
users: Mutex<HashMap<String, Vec<u8>>>,
|
||||
identity_keys: Mutex<HashMap<String, Vec<u8>>>,
|
||||
endpoints: Mutex<HashMap<Vec<u8>, Vec<u8>>>,
|
||||
}
|
||||
|
||||
impl FileBackedStore {
|
||||
pub fn open(dir: impl AsRef<Path>) -> Result<Self, StorageError> {
|
||||
let dir = dir.as_ref();
|
||||
if !dir.exists() {
|
||||
fs::create_dir_all(dir).map_err(|e| StorageError::Io(e.to_string()))?;
|
||||
}
|
||||
let kp_path = dir.join("keypackages.bin");
|
||||
let ds_path = dir.join("deliveries.bin");
|
||||
let hk_path = dir.join("hybridkeys.bin");
|
||||
let setup_path = dir.join("server_setup.bin");
|
||||
let users_path = dir.join("users.bin");
|
||||
let identity_keys_path = dir.join("identity_keys.bin");
|
||||
|
||||
let key_packages = Mutex::new(Self::load_kp_map(&kp_path)?);
|
||||
let deliveries = Mutex::new(Self::load_delivery_map(&ds_path)?);
|
||||
let hybrid_keys = Mutex::new(Self::load_hybrid_keys(&hk_path)?);
|
||||
let users = Mutex::new(Self::load_users(&users_path)?);
|
||||
let identity_keys = Mutex::new(Self::load_map_string_bytes(&identity_keys_path)?);
|
||||
|
||||
Ok(Self {
|
||||
kp_path,
|
||||
ds_path,
|
||||
hk_path,
|
||||
setup_path,
|
||||
users_path,
|
||||
identity_keys_path,
|
||||
key_packages,
|
||||
deliveries,
|
||||
hybrid_keys,
|
||||
users,
|
||||
identity_keys,
|
||||
endpoints: Mutex::new(HashMap::new()),
|
||||
})
|
||||
}
|
||||
|
||||
fn load_kp_map(path: &Path) -> Result<HashMap<Vec<u8>, VecDeque<Vec<u8>>>, StorageError> {
|
||||
if !path.exists() {
|
||||
return Ok(HashMap::new());
|
||||
}
|
||||
let bytes = fs::read(path).map_err(|e| StorageError::Io(e.to_string()))?;
|
||||
if bytes.is_empty() {
|
||||
return Ok(HashMap::new());
|
||||
}
|
||||
let map: QueueMapV1 = bincode::deserialize(&bytes).map_err(|_| StorageError::Serde)?;
|
||||
Ok(map.map)
|
||||
}
|
||||
|
||||
fn flush_kp_map(
|
||||
&self,
|
||||
path: &Path,
|
||||
map: &HashMap<Vec<u8>, VecDeque<Vec<u8>>>,
|
||||
) -> Result<(), StorageError> {
|
||||
let payload = QueueMapV1 { map: map.clone() };
|
||||
let bytes = bincode::serialize(&payload).map_err(|_| StorageError::Serde)?;
|
||||
if let Some(parent) = path.parent() {
|
||||
fs::create_dir_all(parent).map_err(|e| StorageError::Io(e.to_string()))?;
|
||||
}
|
||||
fs::write(path, bytes).map_err(|e| StorageError::Io(e.to_string()))
|
||||
}
|
||||
|
||||
fn load_delivery_map(path: &Path) -> Result<HashMap<ChannelKey, VecDeque<Vec<u8>>>, StorageError> {
|
||||
if !path.exists() {
|
||||
return Ok(HashMap::new());
|
||||
}
|
||||
let bytes = fs::read(path).map_err(|e| StorageError::Io(e.to_string()))?;
|
||||
if bytes.is_empty() {
|
||||
return Ok(HashMap::new());
|
||||
}
|
||||
// Try v2 format (channel-aware). Fallback to legacy v1 for upgrade.
|
||||
if let Ok(map) = bincode::deserialize::<QueueMapV2>(&bytes) {
|
||||
return Ok(map.map);
|
||||
}
|
||||
let legacy: QueueMapV1 = bincode::deserialize(&bytes).map_err(|_| StorageError::Serde)?;
|
||||
let mut upgraded = HashMap::new();
|
||||
for (recipient_key, queue) in legacy.map.into_iter() {
|
||||
upgraded.insert(
|
||||
ChannelKey {
|
||||
channel_id: Vec::new(),
|
||||
recipient_key,
|
||||
},
|
||||
queue,
|
||||
);
|
||||
}
|
||||
Ok(upgraded)
|
||||
}
|
||||
|
||||
fn flush_delivery_map(
|
||||
&self,
|
||||
path: &Path,
|
||||
map: &HashMap<ChannelKey, VecDeque<Vec<u8>>>,
|
||||
) -> Result<(), StorageError> {
|
||||
let payload = QueueMapV2 { map: map.clone() };
|
||||
let bytes = bincode::serialize(&payload).map_err(|_| StorageError::Serde)?;
|
||||
if let Some(parent) = path.parent() {
|
||||
fs::create_dir_all(parent).map_err(|e| StorageError::Io(e.to_string()))?;
|
||||
}
|
||||
fs::write(path, bytes).map_err(|e| StorageError::Io(e.to_string()))
|
||||
}
|
||||
|
||||
fn load_hybrid_keys(path: &Path) -> Result<HashMap<Vec<u8>, Vec<u8>>, StorageError> {
|
||||
if !path.exists() {
|
||||
return Ok(HashMap::new());
|
||||
}
|
||||
let bytes = fs::read(path).map_err(|e| StorageError::Io(e.to_string()))?;
|
||||
if bytes.is_empty() {
|
||||
return Ok(HashMap::new());
|
||||
}
|
||||
bincode::deserialize(&bytes).map_err(|_| StorageError::Serde)
|
||||
}
|
||||
|
||||
fn flush_hybrid_keys(
|
||||
&self,
|
||||
path: &Path,
|
||||
map: &HashMap<Vec<u8>, Vec<u8>>,
|
||||
) -> Result<(), StorageError> {
|
||||
let bytes = bincode::serialize(map).map_err(|_| StorageError::Serde)?;
|
||||
if let Some(parent) = path.parent() {
|
||||
fs::create_dir_all(parent).map_err(|e| StorageError::Io(e.to_string()))?;
|
||||
}
|
||||
fs::write(path, bytes).map_err(|e| StorageError::Io(e.to_string()))
|
||||
}
|
||||
|
||||
fn load_users(path: &Path) -> Result<HashMap<String, Vec<u8>>, StorageError> {
|
||||
if !path.exists() {
|
||||
return Ok(HashMap::new());
|
||||
}
|
||||
let bytes = fs::read(path).map_err(|e| StorageError::Io(e.to_string()))?;
|
||||
if bytes.is_empty() {
|
||||
return Ok(HashMap::new());
|
||||
}
|
||||
bincode::deserialize(&bytes).map_err(|_| StorageError::Serde)
|
||||
}
|
||||
|
||||
fn flush_users(
|
||||
&self,
|
||||
path: &Path,
|
||||
map: &HashMap<String, Vec<u8>>,
|
||||
) -> Result<(), StorageError> {
|
||||
let bytes = bincode::serialize(map).map_err(|_| StorageError::Serde)?;
|
||||
if let Some(parent) = path.parent() {
|
||||
fs::create_dir_all(parent).map_err(|e| StorageError::Io(e.to_string()))?;
|
||||
}
|
||||
fs::write(path, bytes).map_err(|e| StorageError::Io(e.to_string()))
|
||||
}
|
||||
|
||||
fn load_map_string_bytes(path: &Path) -> Result<HashMap<String, Vec<u8>>, StorageError> {
|
||||
Self::load_users(path)
|
||||
}
|
||||
|
||||
fn flush_map_string_bytes(
|
||||
&self,
|
||||
path: &Path,
|
||||
map: &HashMap<String, Vec<u8>>,
|
||||
) -> Result<(), StorageError> {
|
||||
self.flush_users(path, map)
|
||||
}
|
||||
}
|
||||
|
||||
impl Store for FileBackedStore {
|
||||
fn upload_key_package(
|
||||
&self,
|
||||
identity_key: &[u8],
|
||||
package: Vec<u8>,
|
||||
) -> Result<(), StorageError> {
|
||||
let mut map = self.key_packages.lock().unwrap();
|
||||
map.entry(identity_key.to_vec())
|
||||
.or_default()
|
||||
.push_back(package);
|
||||
self.flush_kp_map(&self.kp_path, &*map)
|
||||
}
|
||||
|
||||
fn fetch_key_package(&self, identity_key: &[u8]) -> Result<Option<Vec<u8>>, StorageError> {
|
||||
let mut map = self.key_packages.lock().unwrap();
|
||||
let package = map.get_mut(identity_key).and_then(|q| q.pop_front());
|
||||
self.flush_kp_map(&self.kp_path, &*map)?;
|
||||
Ok(package)
|
||||
}
|
||||
|
||||
fn enqueue(
|
||||
&self,
|
||||
recipient_key: &[u8],
|
||||
channel_id: &[u8],
|
||||
payload: Vec<u8>,
|
||||
) -> Result<(), StorageError> {
|
||||
let mut map = self.deliveries.lock().unwrap();
|
||||
let key = ChannelKey {
|
||||
channel_id: channel_id.to_vec(),
|
||||
recipient_key: recipient_key.to_vec(),
|
||||
};
|
||||
map.entry(key)
|
||||
.or_default()
|
||||
.push_back(payload);
|
||||
self.flush_delivery_map(&self.ds_path, &*map)
|
||||
}
|
||||
|
||||
fn fetch(
|
||||
&self,
|
||||
recipient_key: &[u8],
|
||||
channel_id: &[u8],
|
||||
) -> Result<Vec<Vec<u8>>, StorageError> {
|
||||
let mut map = self.deliveries.lock().unwrap();
|
||||
let key = ChannelKey {
|
||||
channel_id: channel_id.to_vec(),
|
||||
recipient_key: recipient_key.to_vec(),
|
||||
};
|
||||
let messages = map
|
||||
.get_mut(&key)
|
||||
.map(|q| q.drain(..).collect())
|
||||
.unwrap_or_default();
|
||||
self.flush_delivery_map(&self.ds_path, &*map)?;
|
||||
Ok(messages)
|
||||
}
|
||||
|
||||
fn fetch_limited(
|
||||
&self,
|
||||
recipient_key: &[u8],
|
||||
channel_id: &[u8],
|
||||
limit: usize,
|
||||
) -> Result<Vec<Vec<u8>>, StorageError> {
|
||||
let mut map = self.deliveries.lock().unwrap();
|
||||
let key = ChannelKey {
|
||||
channel_id: channel_id.to_vec(),
|
||||
recipient_key: recipient_key.to_vec(),
|
||||
};
|
||||
let messages = map
|
||||
.get_mut(&key)
|
||||
.map(|q| {
|
||||
let count = limit.min(q.len());
|
||||
q.drain(..count).collect()
|
||||
})
|
||||
.unwrap_or_default();
|
||||
self.flush_delivery_map(&self.ds_path, &*map)?;
|
||||
Ok(messages)
|
||||
}
|
||||
|
||||
fn queue_depth(
|
||||
&self,
|
||||
recipient_key: &[u8],
|
||||
channel_id: &[u8],
|
||||
) -> Result<usize, StorageError> {
|
||||
let map = self.deliveries.lock().unwrap();
|
||||
let key = ChannelKey {
|
||||
channel_id: channel_id.to_vec(),
|
||||
recipient_key: recipient_key.to_vec(),
|
||||
};
|
||||
Ok(map.get(&key).map(|q| q.len()).unwrap_or(0))
|
||||
}
|
||||
|
||||
fn gc_expired_messages(&self, _max_age_secs: u64) -> Result<usize, StorageError> {
|
||||
// FileBackedStore does not track timestamps per message — no-op.
|
||||
Ok(0)
|
||||
}
|
||||
|
||||
fn upload_hybrid_key(
|
||||
&self,
|
||||
identity_key: &[u8],
|
||||
hybrid_pk: Vec<u8>,
|
||||
) -> Result<(), StorageError> {
|
||||
let mut map = self.hybrid_keys.lock().unwrap();
|
||||
map.insert(identity_key.to_vec(), hybrid_pk);
|
||||
self.flush_hybrid_keys(&self.hk_path, &*map)
|
||||
}
|
||||
|
||||
fn fetch_hybrid_key(&self, identity_key: &[u8]) -> Result<Option<Vec<u8>>, StorageError> {
|
||||
let map = self.hybrid_keys.lock().unwrap();
|
||||
Ok(map.get(identity_key).cloned())
|
||||
}
|
||||
|
||||
fn store_server_setup(&self, setup: Vec<u8>) -> Result<(), StorageError> {
|
||||
if let Some(parent) = self.setup_path.parent() {
|
||||
fs::create_dir_all(parent).map_err(|e| StorageError::Io(e.to_string()))?;
|
||||
}
|
||||
fs::write(&self.setup_path, setup).map_err(|e| StorageError::Io(e.to_string()))
|
||||
}
|
||||
|
||||
fn get_server_setup(&self) -> Result<Option<Vec<u8>>, StorageError> {
|
||||
if !self.setup_path.exists() {
|
||||
return Ok(None);
|
||||
}
|
||||
let bytes = fs::read(&self.setup_path).map_err(|e| StorageError::Io(e.to_string()))?;
|
||||
if bytes.is_empty() {
|
||||
return Ok(None);
|
||||
}
|
||||
Ok(Some(bytes))
|
||||
}
|
||||
|
||||
fn store_user_record(&self, username: &str, record: Vec<u8>) -> Result<(), StorageError> {
|
||||
let mut map = self.users.lock().unwrap();
|
||||
map.insert(username.to_string(), record);
|
||||
self.flush_users(&self.users_path, &*map)
|
||||
}
|
||||
|
||||
fn get_user_record(&self, username: &str) -> Result<Option<Vec<u8>>, StorageError> {
|
||||
let map = self.users.lock().unwrap();
|
||||
Ok(map.get(username).cloned())
|
||||
}
|
||||
|
||||
fn has_user_record(&self, username: &str) -> Result<bool, StorageError> {
|
||||
let map = self.users.lock().unwrap();
|
||||
Ok(map.contains_key(username))
|
||||
}
|
||||
|
||||
fn store_user_identity_key(
|
||||
&self,
|
||||
username: &str,
|
||||
identity_key: Vec<u8>,
|
||||
) -> Result<(), StorageError> {
|
||||
let mut map = self.identity_keys.lock().unwrap();
|
||||
map.insert(username.to_string(), identity_key);
|
||||
self.flush_map_string_bytes(&self.identity_keys_path, &*map)
|
||||
}
|
||||
|
||||
fn get_user_identity_key(&self, username: &str) -> Result<Option<Vec<u8>>, StorageError> {
|
||||
let map = self.identity_keys.lock().unwrap();
|
||||
Ok(map.get(username).cloned())
|
||||
}
|
||||
|
||||
fn publish_endpoint(
|
||||
&self,
|
||||
identity_key: &[u8],
|
||||
node_addr: Vec<u8>,
|
||||
) -> Result<(), StorageError> {
|
||||
let mut map = self.endpoints.lock().unwrap();
|
||||
map.insert(identity_key.to_vec(), node_addr);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn resolve_endpoint(&self, identity_key: &[u8]) -> Result<Option<Vec<u8>>, StorageError> {
|
||||
let map = self.endpoints.lock().unwrap();
|
||||
Ok(map.get(identity_key).cloned())
|
||||
}
|
||||
}
|
||||
104
crates/quicprochat-client/Cargo.toml
Normal file
104
crates/quicprochat-client/Cargo.toml
Normal file
@@ -0,0 +1,104 @@
|
||||
[package]
|
||||
name = "quicprochat-client"
|
||||
version = "0.1.0"
|
||||
edition.workspace = true
|
||||
description = "CLI client for quicprochat."
|
||||
license = "Apache-2.0 OR MIT"
|
||||
repository.workspace = true
|
||||
|
||||
[[bin]]
|
||||
name = "qpc"
|
||||
path = "src/main.rs"
|
||||
|
||||
[dependencies]
|
||||
quicprochat-core = { path = "../quicprochat-core" }
|
||||
quicprochat-proto = { path = "../quicprochat-proto" }
|
||||
quicprochat-kt = { path = "../quicprochat-kt" }
|
||||
openmls_rust_crypto = { workspace = true }
|
||||
|
||||
# Serialisation + RPC
|
||||
capnp = { workspace = true }
|
||||
capnp-rpc = { workspace = true }
|
||||
|
||||
# Async
|
||||
tokio = { workspace = true }
|
||||
tokio-util = { workspace = true }
|
||||
futures = { workspace = true }
|
||||
serde = { workspace = true }
|
||||
serde_json = { workspace = true }
|
||||
bincode = { workspace = true }
|
||||
|
||||
# Crypto — OPAQUE PAKE
|
||||
opaque-ke = { workspace = true }
|
||||
rand = { workspace = true }
|
||||
|
||||
# Error handling
|
||||
anyhow = { workspace = true }
|
||||
thiserror = { workspace = true }
|
||||
|
||||
# Crypto — for fingerprint verification in fetch-key subcommand
|
||||
sha2 = { workspace = true }
|
||||
argon2 = { workspace = true }
|
||||
chacha20poly1305 = { workspace = true }
|
||||
ciborium = { workspace = true }
|
||||
zeroize = { workspace = true }
|
||||
quinn = { workspace = true }
|
||||
quinn-proto = { workspace = true }
|
||||
rustls = { workspace = true }
|
||||
|
||||
# Logging
|
||||
tracing = { workspace = true }
|
||||
tracing-subscriber = { workspace = true }
|
||||
|
||||
# CLI + config
|
||||
clap = { workspace = true }
|
||||
toml = { workspace = true }
|
||||
|
||||
# Local message/conversation storage
|
||||
rusqlite = { workspace = true }
|
||||
|
||||
# Hex encoding/decoding
|
||||
hex = { workspace = true }
|
||||
|
||||
# Secure password prompting (no echo)
|
||||
rpassword = "5"
|
||||
|
||||
# mDNS discovery for mesh mode (Freifunk). Only compiled with --features mesh.
|
||||
mdns-sd = { version = "0.12", optional = true }
|
||||
|
||||
# Optional P2P transport for direct node-to-node messaging.
|
||||
quicprochat-p2p = { path = "../quicprochat-p2p", optional = true }
|
||||
|
||||
# Optional TUI dependencies (Ratatui full-screen interface).
|
||||
ratatui = { version = "0.29", optional = true, default-features = false, features = ["crossterm"] }
|
||||
crossterm = { version = "0.28", optional = true }
|
||||
|
||||
# YAML playbook parsing (only compiled with --features playbook).
|
||||
serde_yaml = { version = "0.9", optional = true }
|
||||
|
||||
# v2 SDK-based CLI (thin shell over quicprochat-sdk).
|
||||
quicprochat-sdk = { path = "../quicprochat-sdk", optional = true }
|
||||
quicprochat-rpc = { path = "../quicprochat-rpc", optional = true }
|
||||
rustyline = { workspace = true, optional = true }
|
||||
|
||||
[lints]
|
||||
workspace = true
|
||||
|
||||
[features]
|
||||
# Enable mesh-mode features: mDNS local peer discovery + P2P transport.
|
||||
# Build: cargo build -p quicprochat-client --features mesh
|
||||
mesh = ["dep:mdns-sd", "dep:quicprochat-p2p"]
|
||||
# Enable full-screen Ratatui TUI: cargo build -p quicprochat-client --features tui
|
||||
tui = ["dep:ratatui", "dep:crossterm"]
|
||||
# Enable playbook (scripted command execution): YAML parser + serde derives.
|
||||
# Build: cargo build -p quicprochat-client --features playbook
|
||||
playbook = ["dep:serde_yaml"]
|
||||
# v2 CLI over SDK: cargo build -p quicprochat-client --features v2
|
||||
v2 = ["dep:quicprochat-sdk", "dep:quicprochat-rpc", "dep:rustyline"]
|
||||
|
||||
[dev-dependencies]
|
||||
dashmap = { workspace = true }
|
||||
assert_cmd = "2"
|
||||
tempfile = "3"
|
||||
portpicker = "0.1"
|
||||
rand = "0.8"
|
||||
516
crates/quicprochat-client/src/client/command_engine.rs
Normal file
516
crates/quicprochat-client/src/client/command_engine.rs
Normal file
@@ -0,0 +1,516 @@
|
||||
//! Command engine: typed command enum, registry, and execution bridge.
|
||||
//!
|
||||
//! Maps every REPL slash command and lifecycle operation into a single `Command`
|
||||
//! enum with typed parameters. `CommandRegistry` parses raw input and delegates
|
||||
//! execution to the existing `cmd_*` handlers in `repl.rs`.
|
||||
|
||||
use std::collections::HashMap;
|
||||
|
||||
use quicprochat_proto::node_capnp::node_service;
|
||||
|
||||
use super::repl::{Input, SlashCommand, parse_input};
|
||||
use super::session::SessionState;
|
||||
|
||||
// ── Comparison operator for assert conditions ────────────────────────────────
|
||||
|
||||
/// Comparison operator used in playbook assertions.
|
||||
#[derive(Debug, Clone, PartialEq, Eq)]
|
||||
#[cfg_attr(feature = "playbook", derive(serde::Serialize, serde::Deserialize))]
|
||||
pub enum CmpOp {
|
||||
Eq,
|
||||
Ne,
|
||||
Gt,
|
||||
Lt,
|
||||
Gte,
|
||||
Lte,
|
||||
}
|
||||
|
||||
impl CmpOp {
|
||||
/// Evaluate this comparison: `lhs <op> rhs`.
|
||||
pub fn eval(&self, lhs: usize, rhs: usize) -> bool {
|
||||
match self {
|
||||
CmpOp::Eq => lhs == rhs,
|
||||
CmpOp::Ne => lhs != rhs,
|
||||
CmpOp::Gt => lhs > rhs,
|
||||
CmpOp::Lt => lhs < rhs,
|
||||
CmpOp::Gte => lhs >= rhs,
|
||||
CmpOp::Lte => lhs <= rhs,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// ── Assert conditions for playbook testing ───────────────────────────────────
|
||||
|
||||
/// Conditions that can be asserted in a playbook step.
|
||||
#[derive(Debug, Clone)]
|
||||
#[cfg_attr(feature = "playbook", derive(serde::Serialize, serde::Deserialize))]
|
||||
pub enum AssertCondition {
|
||||
Connected,
|
||||
LoggedIn,
|
||||
InConversation { name: String },
|
||||
MessageCount { op: CmpOp, count: usize },
|
||||
MemberCount { op: CmpOp, count: usize },
|
||||
Custom { expression: String },
|
||||
}
|
||||
|
||||
// ── Command enum ─────────────────────────────────────────────────────────────
|
||||
|
||||
/// Every operation the client can perform, with typed parameters.
|
||||
///
|
||||
/// This is a superset of `SlashCommand` — it adds lifecycle operations
|
||||
/// (`Connect`, `Login`, `Register`, `SendMessage`, `Wait`, `Assert`, `SetVar`)
|
||||
/// that are needed for non-interactive / playbook execution.
|
||||
#[derive(Debug, Clone)]
|
||||
#[cfg_attr(feature = "playbook", derive(serde::Serialize, serde::Deserialize))]
|
||||
pub enum Command {
|
||||
// ── Lifecycle (not in SlashCommand) ──────────────────────────────────
|
||||
Connect {
|
||||
server: String,
|
||||
ca_cert: Option<String>,
|
||||
insecure: bool,
|
||||
},
|
||||
Login {
|
||||
username: String,
|
||||
password: String,
|
||||
},
|
||||
Register {
|
||||
username: String,
|
||||
password: String,
|
||||
},
|
||||
SendMessage {
|
||||
text: String,
|
||||
},
|
||||
Wait {
|
||||
duration_ms: u64,
|
||||
},
|
||||
Assert {
|
||||
condition: AssertCondition,
|
||||
},
|
||||
SetVar {
|
||||
name: String,
|
||||
value: String,
|
||||
},
|
||||
|
||||
// ── SlashCommand mirror ─────────────────────────────────────────────
|
||||
Help,
|
||||
Quit,
|
||||
Whoami,
|
||||
List,
|
||||
Switch { target: String },
|
||||
Dm { username: String },
|
||||
CreateGroup { name: String },
|
||||
Invite { target: String },
|
||||
Remove { target: String },
|
||||
Leave,
|
||||
Join,
|
||||
Members,
|
||||
GroupInfo,
|
||||
Rename { name: String },
|
||||
History { count: usize },
|
||||
|
||||
// Mesh
|
||||
MeshStart,
|
||||
MeshStop,
|
||||
MeshPeers,
|
||||
MeshServer { addr: String },
|
||||
MeshSend { peer_id: String, message: String },
|
||||
MeshBroadcast { topic: String, message: String },
|
||||
MeshSubscribe { topic: String },
|
||||
MeshRoute,
|
||||
MeshIdentity,
|
||||
MeshStore,
|
||||
|
||||
// Security / crypto
|
||||
Verify { username: String },
|
||||
UpdateKey,
|
||||
Typing,
|
||||
TypingNotify { enabled: bool },
|
||||
React { emoji: String, index: Option<usize> },
|
||||
Edit { index: usize, new_text: String },
|
||||
Delete { index: usize },
|
||||
SendFile { path: String },
|
||||
Download { index: usize },
|
||||
DeleteAccount,
|
||||
Disappear { arg: Option<String> },
|
||||
Privacy { arg: Option<String> },
|
||||
VerifyFs,
|
||||
RotateAllKeys,
|
||||
Devices,
|
||||
RegisterDevice { name: String },
|
||||
RevokeDevice { id_prefix: String },
|
||||
}
|
||||
|
||||
impl Command {
|
||||
/// Convert a `Command` to a `SlashCommand` when possible.
|
||||
///
|
||||
/// Returns `None` for lifecycle commands that have no `SlashCommand`
|
||||
/// equivalent (`Connect`, `Login`, `Register`, `SendMessage`, `Wait`,
|
||||
/// `Assert`, `SetVar`).
|
||||
pub(crate) fn to_slash(&self) -> Option<SlashCommand> {
|
||||
match self.clone() {
|
||||
// Lifecycle — no SlashCommand equivalent
|
||||
Command::Connect { .. }
|
||||
| Command::Login { .. }
|
||||
| Command::Register { .. }
|
||||
| Command::SendMessage { .. }
|
||||
| Command::Wait { .. }
|
||||
| Command::Assert { .. }
|
||||
| Command::SetVar { .. } => None,
|
||||
|
||||
// 1:1 mirror
|
||||
Command::Help => Some(SlashCommand::Help),
|
||||
Command::Quit => Some(SlashCommand::Quit),
|
||||
Command::Whoami => Some(SlashCommand::Whoami),
|
||||
Command::List => Some(SlashCommand::List),
|
||||
Command::Switch { target } => Some(SlashCommand::Switch { target }),
|
||||
Command::Dm { username } => Some(SlashCommand::Dm { username }),
|
||||
Command::CreateGroup { name } => Some(SlashCommand::CreateGroup { name }),
|
||||
Command::Invite { target } => Some(SlashCommand::Invite { target }),
|
||||
Command::Remove { target } => Some(SlashCommand::Remove { target }),
|
||||
Command::Leave => Some(SlashCommand::Leave),
|
||||
Command::Join => Some(SlashCommand::Join),
|
||||
Command::Members => Some(SlashCommand::Members),
|
||||
Command::GroupInfo => Some(SlashCommand::GroupInfo),
|
||||
Command::Rename { name } => Some(SlashCommand::Rename { name }),
|
||||
Command::History { count } => Some(SlashCommand::History { count }),
|
||||
Command::MeshStart => Some(SlashCommand::MeshStart),
|
||||
Command::MeshStop => Some(SlashCommand::MeshStop),
|
||||
Command::MeshPeers => Some(SlashCommand::MeshPeers),
|
||||
Command::MeshServer { addr } => Some(SlashCommand::MeshServer { addr }),
|
||||
Command::MeshSend { peer_id, message } => {
|
||||
Some(SlashCommand::MeshSend { peer_id, message })
|
||||
}
|
||||
Command::MeshBroadcast { topic, message } => {
|
||||
Some(SlashCommand::MeshBroadcast { topic, message })
|
||||
}
|
||||
Command::MeshSubscribe { topic } => Some(SlashCommand::MeshSubscribe { topic }),
|
||||
Command::MeshRoute => Some(SlashCommand::MeshRoute),
|
||||
Command::MeshIdentity => Some(SlashCommand::MeshIdentity),
|
||||
Command::MeshStore => Some(SlashCommand::MeshStore),
|
||||
Command::Verify { username } => Some(SlashCommand::Verify { username }),
|
||||
Command::UpdateKey => Some(SlashCommand::UpdateKey),
|
||||
Command::Typing => Some(SlashCommand::Typing),
|
||||
Command::TypingNotify { enabled } => Some(SlashCommand::TypingNotify { enabled }),
|
||||
Command::React { emoji, index } => Some(SlashCommand::React { emoji, index }),
|
||||
Command::Edit { index, new_text } => Some(SlashCommand::Edit { index, new_text }),
|
||||
Command::Delete { index } => Some(SlashCommand::Delete { index }),
|
||||
Command::SendFile { path } => Some(SlashCommand::SendFile { path }),
|
||||
Command::Download { index } => Some(SlashCommand::Download { index }),
|
||||
Command::DeleteAccount => Some(SlashCommand::DeleteAccount),
|
||||
Command::Disappear { arg } => Some(SlashCommand::Disappear { arg }),
|
||||
Command::Privacy { arg } => Some(SlashCommand::Privacy { arg }),
|
||||
Command::VerifyFs => Some(SlashCommand::VerifyFs),
|
||||
Command::RotateAllKeys => Some(SlashCommand::RotateAllKeys),
|
||||
Command::Devices => Some(SlashCommand::Devices),
|
||||
Command::RegisterDevice { name } => Some(SlashCommand::RegisterDevice { name }),
|
||||
Command::RevokeDevice { id_prefix } => {
|
||||
Some(SlashCommand::RevokeDevice { id_prefix })
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// ── CommandResult ────────────────────────────────────────────────────────────
|
||||
|
||||
/// Outcome of executing a single `Command`.
|
||||
#[derive(Debug, Clone)]
|
||||
#[cfg_attr(feature = "playbook", derive(serde::Serialize, serde::Deserialize))]
|
||||
pub struct CommandResult {
|
||||
pub success: bool,
|
||||
pub output: Option<String>,
|
||||
pub error: Option<String>,
|
||||
/// Structured key-value outputs for variable capture in playbooks.
|
||||
pub data: HashMap<String, String>,
|
||||
}
|
||||
|
||||
impl CommandResult {
|
||||
fn ok() -> Self {
|
||||
Self {
|
||||
success: true,
|
||||
output: None,
|
||||
error: None,
|
||||
data: HashMap::new(),
|
||||
}
|
||||
}
|
||||
|
||||
fn err(msg: String) -> Self {
|
||||
Self {
|
||||
success: false,
|
||||
output: None,
|
||||
error: Some(msg),
|
||||
data: HashMap::new(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// ── CommandRegistry ──────────────────────────────────────────────────────────
|
||||
|
||||
/// Parses raw input into `Command` and delegates execution to the existing
|
||||
/// REPL handlers.
|
||||
pub struct CommandRegistry;
|
||||
|
||||
impl CommandRegistry {
|
||||
/// Parse a raw input line into a `Command`.
|
||||
///
|
||||
/// Returns `None` for empty input. Returns `Some(Command::SendMessage)`
|
||||
/// for plain chat text. Slash commands are parsed via the existing
|
||||
/// `parse_input` function.
|
||||
pub fn parse(line: &str) -> Option<Command> {
|
||||
match parse_input(line) {
|
||||
Input::Empty => None,
|
||||
Input::ChatMessage(text) => Some(Command::SendMessage { text }),
|
||||
Input::Slash(sc) => Some(slash_to_command(sc)),
|
||||
}
|
||||
}
|
||||
|
||||
/// Execute a `Command`, delegating slash commands to the existing
|
||||
/// `handle_slash` dispatch and handling lifecycle commands directly.
|
||||
///
|
||||
/// Currently, output from `cmd_*` handlers goes to stdout (unchanged).
|
||||
/// `CommandResult` captures success/failure status; stdout capture can
|
||||
/// be added later.
|
||||
pub async fn execute(
|
||||
cmd: &Command,
|
||||
session: &mut SessionState,
|
||||
client: &node_service::Client,
|
||||
) -> CommandResult {
|
||||
match cmd {
|
||||
Command::Wait { duration_ms } => {
|
||||
tokio::time::sleep(std::time::Duration::from_millis(*duration_ms)).await;
|
||||
CommandResult::ok()
|
||||
}
|
||||
Command::SetVar { name, value } => {
|
||||
let mut result = CommandResult::ok();
|
||||
result.data.insert(name.clone(), value.clone());
|
||||
result
|
||||
}
|
||||
Command::Assert { condition } => execute_assert(condition, session),
|
||||
Command::Connect { .. } | Command::Login { .. } | Command::Register { .. } => {
|
||||
// These lifecycle commands require external context (endpoint,
|
||||
// OPAQUE state) that lives outside SessionState. The playbook
|
||||
// executor will handle them directly; calling execute() for
|
||||
// them is an error.
|
||||
CommandResult::err(
|
||||
"lifecycle commands (connect/login/register) must be handled by the playbook executor".into(),
|
||||
)
|
||||
}
|
||||
Command::SendMessage { text } => {
|
||||
match super::repl::do_send(session, client, text).await {
|
||||
Ok(()) => CommandResult::ok(),
|
||||
Err(e) => CommandResult::err(format!("{e:#}")),
|
||||
}
|
||||
}
|
||||
Command::Quit => CommandResult::ok(),
|
||||
other => {
|
||||
// All remaining variants have a SlashCommand equivalent.
|
||||
if let Some(sc) = other.to_slash() {
|
||||
match execute_slash(session, client, sc).await {
|
||||
Ok(()) => CommandResult::ok(),
|
||||
Err(e) => CommandResult::err(format!("{e:#}")),
|
||||
}
|
||||
} else {
|
||||
CommandResult::err("command has no slash equivalent".into())
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// ── Conversion helpers ──────────────────────────────────────────────────────
|
||||
|
||||
/// Convert a `SlashCommand` into the corresponding `Command`.
|
||||
fn slash_to_command(sc: SlashCommand) -> Command {
|
||||
match sc {
|
||||
SlashCommand::Help => Command::Help,
|
||||
SlashCommand::Quit => Command::Quit,
|
||||
SlashCommand::Whoami => Command::Whoami,
|
||||
SlashCommand::List => Command::List,
|
||||
SlashCommand::Switch { target } => Command::Switch { target },
|
||||
SlashCommand::Dm { username } => Command::Dm { username },
|
||||
SlashCommand::CreateGroup { name } => Command::CreateGroup { name },
|
||||
SlashCommand::Invite { target } => Command::Invite { target },
|
||||
SlashCommand::Remove { target } => Command::Remove { target },
|
||||
SlashCommand::Leave => Command::Leave,
|
||||
SlashCommand::Join => Command::Join,
|
||||
SlashCommand::Members => Command::Members,
|
||||
SlashCommand::GroupInfo => Command::GroupInfo,
|
||||
SlashCommand::Rename { name } => Command::Rename { name },
|
||||
SlashCommand::History { count } => Command::History { count },
|
||||
SlashCommand::MeshStart => Command::MeshStart,
|
||||
SlashCommand::MeshStop => Command::MeshStop,
|
||||
SlashCommand::MeshPeers => Command::MeshPeers,
|
||||
SlashCommand::MeshServer { addr } => Command::MeshServer { addr },
|
||||
SlashCommand::MeshSend { peer_id, message } => Command::MeshSend { peer_id, message },
|
||||
SlashCommand::MeshBroadcast { topic, message } => {
|
||||
Command::MeshBroadcast { topic, message }
|
||||
}
|
||||
SlashCommand::MeshSubscribe { topic } => Command::MeshSubscribe { topic },
|
||||
SlashCommand::MeshRoute => Command::MeshRoute,
|
||||
SlashCommand::MeshIdentity => Command::MeshIdentity,
|
||||
SlashCommand::MeshStore => Command::MeshStore,
|
||||
SlashCommand::Verify { username } => Command::Verify { username },
|
||||
SlashCommand::UpdateKey => Command::UpdateKey,
|
||||
SlashCommand::Typing => Command::Typing,
|
||||
SlashCommand::TypingNotify { enabled } => Command::TypingNotify { enabled },
|
||||
SlashCommand::React { emoji, index } => Command::React { emoji, index },
|
||||
SlashCommand::Edit { index, new_text } => Command::Edit { index, new_text },
|
||||
SlashCommand::Delete { index } => Command::Delete { index },
|
||||
SlashCommand::SendFile { path } => Command::SendFile { path },
|
||||
SlashCommand::Download { index } => Command::Download { index },
|
||||
SlashCommand::DeleteAccount => Command::DeleteAccount,
|
||||
SlashCommand::Disappear { arg } => Command::Disappear { arg },
|
||||
SlashCommand::Privacy { arg } => Command::Privacy { arg },
|
||||
SlashCommand::VerifyFs => Command::VerifyFs,
|
||||
SlashCommand::RotateAllKeys => Command::RotateAllKeys,
|
||||
SlashCommand::Devices => Command::Devices,
|
||||
SlashCommand::RegisterDevice { name } => Command::RegisterDevice { name },
|
||||
SlashCommand::RevokeDevice { id_prefix } => Command::RevokeDevice { id_prefix },
|
||||
}
|
||||
}
|
||||
|
||||
// ── Execution helpers ───────────────────────────────────────────────────────
|
||||
|
||||
/// Execute a `SlashCommand` using the existing `cmd_*` handlers from `repl.rs`.
|
||||
///
|
||||
/// This duplicates the dispatch table from `handle_slash` but returns
|
||||
/// `anyhow::Result<()>` instead of printing errors inline — the caller
|
||||
/// decides how to surface errors.
|
||||
async fn execute_slash(
|
||||
session: &mut SessionState,
|
||||
client: &node_service::Client,
|
||||
cmd: SlashCommand,
|
||||
) -> anyhow::Result<()> {
|
||||
use super::repl::*;
|
||||
match cmd {
|
||||
SlashCommand::Help => {
|
||||
print_help();
|
||||
Ok(())
|
||||
}
|
||||
SlashCommand::Quit => Ok(()),
|
||||
SlashCommand::Whoami => cmd_whoami(session),
|
||||
SlashCommand::List => cmd_list(session),
|
||||
SlashCommand::Switch { target } => cmd_switch(session, &target),
|
||||
SlashCommand::Dm { username } => cmd_dm(session, client, &username).await,
|
||||
SlashCommand::CreateGroup { name } => cmd_create_group(session, &name),
|
||||
SlashCommand::Invite { target } => cmd_invite(session, client, &target).await,
|
||||
SlashCommand::Remove { target } => cmd_remove(session, client, &target).await,
|
||||
SlashCommand::Leave => cmd_leave(session, client).await,
|
||||
SlashCommand::Join => cmd_join(session, client).await,
|
||||
SlashCommand::Members => cmd_members(session, client).await,
|
||||
SlashCommand::GroupInfo => cmd_group_info(session, client).await,
|
||||
SlashCommand::Rename { name } => cmd_rename(session, &name),
|
||||
SlashCommand::History { count } => cmd_history(session, count),
|
||||
SlashCommand::MeshStart => cmd_mesh_start(session).await,
|
||||
SlashCommand::MeshStop => cmd_mesh_stop(session).await,
|
||||
SlashCommand::MeshPeers => cmd_mesh_peers(),
|
||||
SlashCommand::MeshServer { addr } => {
|
||||
super::display::print_status(&format!(
|
||||
"mesh server hint: reconnect with --server {addr} to use this node"
|
||||
));
|
||||
Ok(())
|
||||
}
|
||||
SlashCommand::MeshSend { peer_id, message } => cmd_mesh_send(session, &peer_id, &message).await,
|
||||
SlashCommand::MeshBroadcast { topic, message } => cmd_mesh_broadcast(session, &topic, &message).await,
|
||||
SlashCommand::MeshSubscribe { topic } => cmd_mesh_subscribe(session, &topic),
|
||||
SlashCommand::MeshRoute => cmd_mesh_route(session),
|
||||
SlashCommand::MeshIdentity => cmd_mesh_identity(session),
|
||||
SlashCommand::MeshStore => cmd_mesh_store(session),
|
||||
SlashCommand::Verify { username } => cmd_verify(session, client, &username).await,
|
||||
SlashCommand::UpdateKey => cmd_update_key(session, client).await,
|
||||
SlashCommand::Typing => cmd_typing(session, client).await,
|
||||
SlashCommand::TypingNotify { enabled } => {
|
||||
session.typing_notify_enabled = enabled;
|
||||
super::display::print_status(&format!(
|
||||
"typing notifications {}",
|
||||
if enabled { "enabled" } else { "disabled" }
|
||||
));
|
||||
Ok(())
|
||||
}
|
||||
SlashCommand::React { emoji, index } => cmd_react(session, client, &emoji, index).await,
|
||||
SlashCommand::Edit { index, new_text } => {
|
||||
cmd_edit(session, client, index, &new_text).await
|
||||
}
|
||||
SlashCommand::Delete { index } => cmd_delete(session, client, index).await,
|
||||
SlashCommand::SendFile { path } => cmd_send_file(session, client, &path).await,
|
||||
SlashCommand::Download { index } => cmd_download(session, client, index).await,
|
||||
SlashCommand::DeleteAccount => cmd_delete_account(session, client).await,
|
||||
SlashCommand::Disappear { arg } => cmd_disappear(session, arg.as_deref()),
|
||||
SlashCommand::Privacy { arg } => cmd_privacy(session, arg.as_deref()),
|
||||
SlashCommand::VerifyFs => cmd_verify_fs(session),
|
||||
SlashCommand::RotateAllKeys => cmd_rotate_all_keys(session, client).await,
|
||||
SlashCommand::Devices => cmd_devices(client).await,
|
||||
SlashCommand::RegisterDevice { name } => cmd_register_device(client, &name).await,
|
||||
SlashCommand::RevokeDevice { id_prefix } => cmd_revoke_device(client, &id_prefix).await,
|
||||
}
|
||||
}
|
||||
|
||||
/// Assert a condition against the current session state.
|
||||
fn execute_assert(condition: &AssertCondition, session: &SessionState) -> CommandResult {
|
||||
match condition {
|
||||
AssertCondition::Connected => {
|
||||
// We have a session => we got past connect. Always true when
|
||||
// execute() is called with a valid client reference.
|
||||
CommandResult::ok()
|
||||
}
|
||||
AssertCondition::LoggedIn => {
|
||||
let guard = crate::AUTH_CONTEXT
|
||||
.read()
|
||||
.expect("AUTH_CONTEXT poisoned");
|
||||
if guard.is_some() {
|
||||
CommandResult::ok()
|
||||
} else {
|
||||
CommandResult::err("not logged in".into())
|
||||
}
|
||||
}
|
||||
AssertCondition::InConversation { name } => {
|
||||
if let Some(display) = session.active_display_name() {
|
||||
if display.contains(name.as_str()) {
|
||||
CommandResult::ok()
|
||||
} else {
|
||||
CommandResult::err(format!(
|
||||
"active conversation is '{display}', expected '{name}'"
|
||||
))
|
||||
}
|
||||
} else {
|
||||
CommandResult::err("no active conversation".into())
|
||||
}
|
||||
}
|
||||
AssertCondition::MessageCount { op, count } => {
|
||||
let actual = session
|
||||
.active_conversation
|
||||
.as_ref()
|
||||
.and_then(|id| session.conv_store.load_all_messages(id).ok())
|
||||
.map(|msgs| msgs.len())
|
||||
.unwrap_or(0);
|
||||
if op.eval(actual, *count) {
|
||||
CommandResult::ok()
|
||||
} else {
|
||||
CommandResult::err(format!(
|
||||
"message count assertion failed: {actual} {op:?} {count}"
|
||||
))
|
||||
}
|
||||
}
|
||||
AssertCondition::MemberCount { op, count } => {
|
||||
let actual = session
|
||||
.active_conversation
|
||||
.as_ref()
|
||||
.and_then(|id| session.members.get(id))
|
||||
.map(|m| m.member_identities().len())
|
||||
.unwrap_or(0);
|
||||
if op.eval(actual, *count) {
|
||||
CommandResult::ok()
|
||||
} else {
|
||||
CommandResult::err(format!(
|
||||
"member count assertion failed: {actual} {op:?} {count}"
|
||||
))
|
||||
}
|
||||
}
|
||||
AssertCondition::Custom { expression } => {
|
||||
// Custom expressions are not evaluated yet; always pass.
|
||||
let mut result = CommandResult::ok();
|
||||
result.data.insert("expression".into(), expression.clone());
|
||||
result
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
1400
crates/quicprochat-client/src/client/commands.rs
Normal file
1400
crates/quicprochat-client/src/client/commands.rs
Normal file
File diff suppressed because it is too large
Load Diff
798
crates/quicprochat-client/src/client/conversation.rs
Normal file
798
crates/quicprochat-client/src/client/conversation.rs
Normal file
@@ -0,0 +1,798 @@
|
||||
//! Multi-conversation state backed by SQLite (SQLCipher-encrypted when a
|
||||
//! password is provided).
|
||||
//!
|
||||
//! Each conversation (DM or group) has its own MLS group blob, keystore blob,
|
||||
//! member list, and message history.
|
||||
|
||||
use std::path::{Path, PathBuf};
|
||||
use std::time::{SystemTime, UNIX_EPOCH};
|
||||
|
||||
use anyhow::Context;
|
||||
use argon2::{Algorithm, Argon2, Params, Version};
|
||||
use rand::RngCore;
|
||||
use rusqlite::{params, Connection, OptionalExtension};
|
||||
use zeroize::Zeroizing;
|
||||
|
||||
// ── Types ────────────────────────────────────────────────────────────────────
|
||||
|
||||
/// 16-byte conversation identifier.
|
||||
/// - DMs: the channel_id returned by `createChannel` (server-assigned UUID).
|
||||
/// - Groups: SHA-256(group_name)[..16].
|
||||
#[derive(Clone, Debug, PartialEq, Eq, Hash)]
|
||||
pub struct ConversationId(pub [u8; 16]);
|
||||
|
||||
impl ConversationId {
|
||||
pub fn from_slice(s: &[u8]) -> Option<Self> {
|
||||
if s.len() == 16 {
|
||||
let mut buf = [0u8; 16];
|
||||
buf.copy_from_slice(s);
|
||||
Some(Self(buf))
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
/// Derive a conversation ID from a group name via SHA-256 truncation.
|
||||
pub fn from_group_name(name: &str) -> Self {
|
||||
use sha2::{Sha256, Digest};
|
||||
let hash = Sha256::digest(name.as_bytes());
|
||||
let mut buf = [0u8; 16];
|
||||
buf.copy_from_slice(&hash[..16]);
|
||||
Self(buf)
|
||||
}
|
||||
|
||||
pub fn hex(&self) -> String {
|
||||
hex::encode(self.0)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, PartialEq, Eq)]
|
||||
pub enum ConversationKind {
|
||||
/// 1:1 DM channel with a specific peer.
|
||||
Dm {
|
||||
peer_key: Vec<u8>,
|
||||
peer_username: Option<String>,
|
||||
},
|
||||
/// Named group with N members.
|
||||
Group { name: String },
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct Conversation {
|
||||
pub id: ConversationId,
|
||||
pub kind: ConversationKind,
|
||||
pub display_name: String,
|
||||
/// Serialized MLS group (bincode).
|
||||
pub mls_group_blob: Option<Vec<u8>>,
|
||||
/// Serialized keystore (bincode HashMap).
|
||||
pub keystore_blob: Option<Vec<u8>>,
|
||||
/// Member identity keys (bincode Vec<Vec<u8>>).
|
||||
pub member_keys: Vec<Vec<u8>>,
|
||||
pub unread_count: u32,
|
||||
pub last_activity_ms: u64,
|
||||
pub created_at_ms: u64,
|
||||
/// Whether this conversation uses hybrid (X25519 + ML-KEM-768) MLS keys.
|
||||
pub is_hybrid: bool,
|
||||
/// Highest server-side delivery sequence number seen.
|
||||
pub last_seen_seq: u64,
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct StoredMessage {
|
||||
pub conversation_id: ConversationId,
|
||||
pub message_id: Option<[u8; 16]>,
|
||||
pub sender_key: Vec<u8>,
|
||||
pub sender_name: Option<String>,
|
||||
pub body: String,
|
||||
pub msg_type: String,
|
||||
pub ref_msg_id: Option<[u8; 16]>,
|
||||
pub timestamp_ms: u64,
|
||||
pub is_outgoing: bool,
|
||||
}
|
||||
|
||||
// ── Key derivation (Argon2id, matching state.rs parameters) ─────────────────
|
||||
|
||||
const ARGON2_M_COST: u32 = 19 * 1024;
|
||||
const ARGON2_T_COST: u32 = 2;
|
||||
const ARGON2_P_COST: u32 = 1;
|
||||
const SALT_LEN: usize = 16;
|
||||
|
||||
/// Derive a 32-byte SQLCipher key from the user password and a random salt.
|
||||
fn derive_convdb_key(password: &str, salt: &[u8]) -> anyhow::Result<Zeroizing<[u8; 32]>> {
|
||||
let params = Params::new(ARGON2_M_COST, ARGON2_T_COST, ARGON2_P_COST, Some(32))
|
||||
.map_err(|e| anyhow::anyhow!("argon2 params: {e}"))?;
|
||||
let argon2 = Argon2::new(Algorithm::Argon2id, Version::default(), params);
|
||||
let mut key = Zeroizing::new([0u8; 32]);
|
||||
argon2
|
||||
.hash_password_into(password.as_bytes(), salt, &mut *key)
|
||||
.map_err(|e| anyhow::anyhow!("convdb key derivation: {e}"))?;
|
||||
Ok(key)
|
||||
}
|
||||
|
||||
/// Read or create a 16-byte random salt at `salt_path` (mode 0o600).
|
||||
fn get_or_create_salt(salt_path: &Path) -> anyhow::Result<Vec<u8>> {
|
||||
if salt_path.exists() {
|
||||
let bytes = std::fs::read(salt_path).context("read convdb salt")?;
|
||||
anyhow::ensure!(bytes.len() == SALT_LEN, "invalid convdb salt length");
|
||||
return Ok(bytes);
|
||||
}
|
||||
let mut salt = vec![0u8; SALT_LEN];
|
||||
rand::rngs::OsRng.fill_bytes(&mut salt);
|
||||
std::fs::write(salt_path, &salt).context("write convdb salt")?;
|
||||
#[cfg(unix)]
|
||||
{
|
||||
use std::os::unix::fs::PermissionsExt;
|
||||
std::fs::set_permissions(salt_path, std::fs::Permissions::from_mode(0o600)).ok();
|
||||
}
|
||||
Ok(salt)
|
||||
}
|
||||
|
||||
// ── ConversationStore ────────────────────────────────────────────────────────
|
||||
|
||||
pub struct ConversationStore {
|
||||
conn: Connection,
|
||||
}
|
||||
|
||||
impl ConversationStore {
|
||||
/// Open or create the conversation database at `db_path`.
|
||||
/// If `password` is `Some`, the database is encrypted with SQLCipher using
|
||||
/// an Argon2id-derived key. Existing unencrypted databases are migrated
|
||||
/// transparently.
|
||||
pub fn open(db_path: &Path, password: Option<&str>) -> anyhow::Result<Self> {
|
||||
if let Some(parent) = db_path.parent() {
|
||||
std::fs::create_dir_all(parent).ok();
|
||||
}
|
||||
|
||||
match password {
|
||||
Some(pw) => Self::open_encrypted(db_path, pw),
|
||||
None => Self::open_plain(db_path),
|
||||
}
|
||||
}
|
||||
|
||||
fn open_plain(db_path: &Path) -> anyhow::Result<Self> {
|
||||
let conn = Connection::open(db_path).context("open conversation db")?;
|
||||
conn.execute_batch("PRAGMA journal_mode=WAL; PRAGMA foreign_keys=ON;")
|
||||
.context("set pragmas")?;
|
||||
Self::migrate(&conn)?;
|
||||
Ok(Self { conn })
|
||||
}
|
||||
|
||||
fn open_encrypted(db_path: &Path, password: &str) -> anyhow::Result<Self> {
|
||||
let salt_path = PathBuf::from(format!("{}-salt", db_path.display()));
|
||||
let already_encrypted = salt_path.exists();
|
||||
|
||||
// Migrate an existing unencrypted database before opening with encryption.
|
||||
if db_path.exists() && !already_encrypted {
|
||||
Self::migrate_plain_to_encrypted(db_path, &salt_path, password)?;
|
||||
// After migration, salt file exists and DB is encrypted — fall through.
|
||||
}
|
||||
|
||||
let salt = get_or_create_salt(&salt_path)?;
|
||||
let key = derive_convdb_key(password, &salt)?;
|
||||
#[allow(clippy::needless_borrows_for_generic_args)]
|
||||
let hex_key = Zeroizing::new(hex::encode(&*key));
|
||||
|
||||
let conn = Connection::open(db_path).context("open conversation db")?;
|
||||
conn.pragma_update(None, "key", format!("x'{}'", &*hex_key))
|
||||
.context("set SQLCipher key")?;
|
||||
conn.execute_batch("PRAGMA journal_mode=WAL; PRAGMA foreign_keys=ON;")
|
||||
.context("set pragmas")?;
|
||||
Self::migrate(&conn)?;
|
||||
Ok(Self { conn })
|
||||
}
|
||||
|
||||
/// Migrate an unencrypted `.convdb` to an encrypted one in-place.
|
||||
fn migrate_plain_to_encrypted(
|
||||
db_path: &Path,
|
||||
salt_path: &Path,
|
||||
password: &str,
|
||||
) -> anyhow::Result<()> {
|
||||
let salt = get_or_create_salt(salt_path)?;
|
||||
let key = derive_convdb_key(password, &salt)?;
|
||||
#[allow(clippy::needless_borrows_for_generic_args)]
|
||||
let hex_key = Zeroizing::new(hex::encode(&*key));
|
||||
|
||||
let enc_path = db_path.with_extension("convdb-enc");
|
||||
|
||||
// Open the existing plaintext database.
|
||||
let plain = Connection::open(db_path).context("open plain convdb for migration")?;
|
||||
plain.execute_batch("PRAGMA journal_mode=WAL; PRAGMA foreign_keys=ON;").ok();
|
||||
|
||||
// Attach a new encrypted database and export into it.
|
||||
// Sanitize the path to prevent SQL injection (ATTACH does not support parameterized paths).
|
||||
let enc_path_str = enc_path.display().to_string();
|
||||
anyhow::ensure!(
|
||||
!enc_path_str.contains('\''),
|
||||
"database path must not contain single quotes: {enc_path_str}"
|
||||
);
|
||||
plain
|
||||
.execute_batch(&format!(
|
||||
"ATTACH DATABASE '{enc_path_str}' AS encrypted KEY \"x'{}'\";",
|
||||
&*hex_key
|
||||
))
|
||||
.context("attach encrypted db for migration")?;
|
||||
plain
|
||||
.execute_batch("SELECT sqlcipher_export('encrypted');")
|
||||
.context("sqlcipher_export to encrypted db")?;
|
||||
plain
|
||||
.execute_batch("DETACH DATABASE encrypted;")
|
||||
.context("detach encrypted db")?;
|
||||
|
||||
drop(plain);
|
||||
|
||||
// Swap files: encrypted → original.
|
||||
std::fs::rename(&enc_path, db_path).context("replace convdb with encrypted version")?;
|
||||
// Clean up WAL/SHM left from the plaintext open.
|
||||
let wal = PathBuf::from(format!("{}-wal", db_path.display()));
|
||||
let shm = PathBuf::from(format!("{}-shm", db_path.display()));
|
||||
std::fs::remove_file(&wal).ok();
|
||||
std::fs::remove_file(&shm).ok();
|
||||
|
||||
tracing::info!("migrated conversation database to encrypted storage");
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn migrate(conn: &Connection) -> anyhow::Result<()> {
|
||||
conn.execute_batch(
|
||||
"CREATE TABLE IF NOT EXISTS conversations (
|
||||
id BLOB PRIMARY KEY,
|
||||
kind TEXT NOT NULL,
|
||||
display_name TEXT NOT NULL,
|
||||
peer_key BLOB,
|
||||
peer_username TEXT,
|
||||
group_name TEXT,
|
||||
mls_group_blob BLOB,
|
||||
keystore_blob BLOB,
|
||||
member_keys BLOB,
|
||||
unread_count INTEGER NOT NULL DEFAULT 0,
|
||||
last_activity_ms INTEGER NOT NULL DEFAULT 0,
|
||||
created_at_ms INTEGER NOT NULL DEFAULT 0
|
||||
);
|
||||
|
||||
CREATE TABLE IF NOT EXISTS messages (
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
conversation_id BLOB NOT NULL REFERENCES conversations(id),
|
||||
message_id BLOB,
|
||||
sender_key BLOB NOT NULL,
|
||||
sender_name TEXT,
|
||||
body TEXT NOT NULL,
|
||||
msg_type TEXT NOT NULL,
|
||||
ref_msg_id BLOB,
|
||||
timestamp_ms INTEGER NOT NULL,
|
||||
is_outgoing INTEGER NOT NULL DEFAULT 0
|
||||
);
|
||||
|
||||
CREATE INDEX IF NOT EXISTS idx_messages_conv
|
||||
ON messages(conversation_id, timestamp_ms);
|
||||
|
||||
CREATE TABLE IF NOT EXISTS outbox (
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
conversation_id BLOB NOT NULL,
|
||||
recipient_key BLOB NOT NULL,
|
||||
payload BLOB NOT NULL,
|
||||
created_at_ms INTEGER NOT NULL,
|
||||
retry_count INTEGER NOT NULL DEFAULT 0,
|
||||
status TEXT NOT NULL DEFAULT 'pending'
|
||||
);
|
||||
CREATE INDEX IF NOT EXISTS idx_outbox_status
|
||||
ON outbox(status, created_at_ms);",
|
||||
)
|
||||
.context("migrate conversation db")?;
|
||||
|
||||
// Additive migrations for new columns (safe to re-run; errors ignored if column already exists).
|
||||
conn.execute_batch("ALTER TABLE conversations ADD COLUMN is_hybrid INTEGER NOT NULL DEFAULT 0;").ok();
|
||||
conn.execute_batch("ALTER TABLE conversations ADD COLUMN last_seen_seq INTEGER NOT NULL DEFAULT 0;").ok();
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// ── Conversation CRUD ────────────────────────────────────────────────
|
||||
|
||||
pub fn save_conversation(&self, conv: &Conversation) -> anyhow::Result<()> {
|
||||
let (kind_str, peer_key, peer_username, group_name) = match &conv.kind {
|
||||
ConversationKind::Dm {
|
||||
peer_key,
|
||||
peer_username,
|
||||
} => ("dm", Some(peer_key.as_slice()), peer_username.as_deref(), None),
|
||||
ConversationKind::Group { name } => ("group", None, None, Some(name.as_str())),
|
||||
};
|
||||
let member_keys_blob = bincode::serialize(&conv.member_keys)
|
||||
.context("serialize member_keys")?;
|
||||
|
||||
self.conn.execute(
|
||||
"INSERT INTO conversations
|
||||
(id, kind, display_name, peer_key, peer_username, group_name,
|
||||
mls_group_blob, keystore_blob, member_keys, unread_count,
|
||||
last_activity_ms, created_at_ms, is_hybrid, last_seen_seq)
|
||||
VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7, ?8, ?9, ?10, ?11, ?12, ?13, ?14)
|
||||
ON CONFLICT(id) DO UPDATE SET
|
||||
display_name = excluded.display_name,
|
||||
mls_group_blob = excluded.mls_group_blob,
|
||||
keystore_blob = excluded.keystore_blob,
|
||||
member_keys = excluded.member_keys,
|
||||
unread_count = excluded.unread_count,
|
||||
last_activity_ms = excluded.last_activity_ms,
|
||||
is_hybrid = excluded.is_hybrid,
|
||||
last_seen_seq = excluded.last_seen_seq",
|
||||
params![
|
||||
conv.id.0.as_slice(),
|
||||
kind_str,
|
||||
conv.display_name,
|
||||
peer_key,
|
||||
peer_username,
|
||||
group_name,
|
||||
conv.mls_group_blob,
|
||||
conv.keystore_blob,
|
||||
member_keys_blob,
|
||||
conv.unread_count,
|
||||
conv.last_activity_ms,
|
||||
conv.created_at_ms,
|
||||
conv.is_hybrid as i32,
|
||||
conv.last_seen_seq as i64,
|
||||
],
|
||||
)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn load_conversation(&self, id: &ConversationId) -> anyhow::Result<Option<Conversation>> {
|
||||
self.conn
|
||||
.query_row(
|
||||
"SELECT kind, display_name, peer_key, peer_username, group_name,
|
||||
mls_group_blob, keystore_blob, member_keys, unread_count,
|
||||
last_activity_ms, created_at_ms, is_hybrid, last_seen_seq
|
||||
FROM conversations WHERE id = ?1",
|
||||
params![id.0.as_slice()],
|
||||
|row| {
|
||||
let kind_str: String = row.get(0)?;
|
||||
let display_name: String = row.get(1)?;
|
||||
let peer_key: Option<Vec<u8>> = row.get(2)?;
|
||||
let peer_username: Option<String> = row.get(3)?;
|
||||
let group_name: Option<String> = row.get(4)?;
|
||||
let mls_group_blob: Option<Vec<u8>> = row.get(5)?;
|
||||
let keystore_blob: Option<Vec<u8>> = row.get(6)?;
|
||||
let member_keys_blob: Option<Vec<u8>> = row.get(7)?;
|
||||
let unread_count: u32 = row.get(8)?;
|
||||
let last_activity_ms: u64 = row.get(9)?;
|
||||
let created_at_ms: u64 = row.get(10)?;
|
||||
let is_hybrid_int: i32 = row.get(11)?;
|
||||
let last_seen_seq: i64 = row.get(12)?;
|
||||
|
||||
let kind = if kind_str == "dm" {
|
||||
ConversationKind::Dm {
|
||||
peer_key: peer_key.unwrap_or_default(),
|
||||
peer_username,
|
||||
}
|
||||
} else {
|
||||
ConversationKind::Group {
|
||||
name: group_name.unwrap_or_default(),
|
||||
}
|
||||
};
|
||||
|
||||
let member_keys: Vec<Vec<u8>> = member_keys_blob
|
||||
.and_then(|b| match bincode::deserialize(&b) {
|
||||
Ok(v) => Some(v),
|
||||
Err(e) => {
|
||||
tracing::warn!(conv = %hex::encode(id.0), "bincode deserialize member_keys failed: {e}");
|
||||
None
|
||||
}
|
||||
})
|
||||
.unwrap_or_default();
|
||||
|
||||
Ok(Conversation {
|
||||
id: id.clone(),
|
||||
kind,
|
||||
display_name,
|
||||
mls_group_blob,
|
||||
keystore_blob,
|
||||
member_keys,
|
||||
unread_count,
|
||||
last_activity_ms,
|
||||
created_at_ms,
|
||||
is_hybrid: is_hybrid_int != 0,
|
||||
last_seen_seq: last_seen_seq as u64,
|
||||
})
|
||||
},
|
||||
)
|
||||
.optional()
|
||||
.context("load conversation")
|
||||
}
|
||||
|
||||
pub fn list_conversations(&self) -> anyhow::Result<Vec<Conversation>> {
|
||||
let mut stmt = self.conn.prepare(
|
||||
"SELECT id, kind, display_name, peer_key, peer_username, group_name,
|
||||
mls_group_blob, keystore_blob, member_keys, unread_count,
|
||||
last_activity_ms, created_at_ms, is_hybrid, last_seen_seq
|
||||
FROM conversations ORDER BY last_activity_ms DESC",
|
||||
)?;
|
||||
let rows = stmt.query_map([], |row| {
|
||||
let id_blob: Vec<u8> = row.get(0)?;
|
||||
let kind_str: String = row.get(1)?;
|
||||
let display_name: String = row.get(2)?;
|
||||
let peer_key: Option<Vec<u8>> = row.get(3)?;
|
||||
let peer_username: Option<String> = row.get(4)?;
|
||||
let group_name: Option<String> = row.get(5)?;
|
||||
let mls_group_blob: Option<Vec<u8>> = row.get(6)?;
|
||||
let keystore_blob: Option<Vec<u8>> = row.get(7)?;
|
||||
let member_keys_blob: Option<Vec<u8>> = row.get(8)?;
|
||||
let unread_count: u32 = row.get(9)?;
|
||||
let last_activity_ms: u64 = row.get(10)?;
|
||||
let created_at_ms: u64 = row.get(11)?;
|
||||
let is_hybrid_int: i32 = row.get(12)?;
|
||||
let last_seen_seq: i64 = row.get(13)?;
|
||||
|
||||
let id = ConversationId::from_slice(&id_blob).unwrap_or(ConversationId([0; 16]));
|
||||
let kind = if kind_str == "dm" {
|
||||
ConversationKind::Dm {
|
||||
peer_key: peer_key.unwrap_or_default(),
|
||||
peer_username,
|
||||
}
|
||||
} else {
|
||||
ConversationKind::Group {
|
||||
name: group_name.unwrap_or_default(),
|
||||
}
|
||||
};
|
||||
let member_keys: Vec<Vec<u8>> = member_keys_blob
|
||||
.and_then(|b| match bincode::deserialize(&b) {
|
||||
Ok(v) => Some(v),
|
||||
Err(e) => {
|
||||
tracing::warn!(conv = %hex::encode(&id_blob), "bincode deserialize member_keys failed: {e}");
|
||||
None
|
||||
}
|
||||
})
|
||||
.unwrap_or_default();
|
||||
|
||||
Ok(Conversation {
|
||||
id,
|
||||
kind,
|
||||
display_name,
|
||||
mls_group_blob,
|
||||
keystore_blob,
|
||||
member_keys,
|
||||
unread_count,
|
||||
last_activity_ms,
|
||||
created_at_ms,
|
||||
is_hybrid: is_hybrid_int != 0,
|
||||
last_seen_seq: last_seen_seq as u64,
|
||||
})
|
||||
})?;
|
||||
|
||||
let mut convs = Vec::new();
|
||||
for row in rows {
|
||||
convs.push(row?);
|
||||
}
|
||||
Ok(convs)
|
||||
}
|
||||
|
||||
/// Find a DM conversation by the peer's identity key.
|
||||
pub fn find_dm_by_peer(&self, peer_key: &[u8]) -> anyhow::Result<Option<Conversation>> {
|
||||
let id_blob: Option<Vec<u8>> = self
|
||||
.conn
|
||||
.query_row(
|
||||
"SELECT id FROM conversations WHERE kind = 'dm' AND peer_key = ?1",
|
||||
params![peer_key],
|
||||
|row| row.get(0),
|
||||
)
|
||||
.optional()?;
|
||||
|
||||
match id_blob {
|
||||
Some(blob) => {
|
||||
let id = ConversationId::from_slice(&blob)
|
||||
.context("invalid conversation id in db")?;
|
||||
self.load_conversation(&id)
|
||||
}
|
||||
None => Ok(None),
|
||||
}
|
||||
}
|
||||
|
||||
/// Find a group conversation by name.
|
||||
pub fn find_group_by_name(&self, name: &str) -> anyhow::Result<Option<Conversation>> {
|
||||
let id_blob: Option<Vec<u8>> = self
|
||||
.conn
|
||||
.query_row(
|
||||
"SELECT id FROM conversations WHERE kind = 'group' AND group_name = ?1",
|
||||
params![name],
|
||||
|row| row.get(0),
|
||||
)
|
||||
.optional()?;
|
||||
|
||||
match id_blob {
|
||||
Some(blob) => {
|
||||
let id = ConversationId::from_slice(&blob)
|
||||
.context("invalid conversation id in db")?;
|
||||
self.load_conversation(&id)
|
||||
}
|
||||
None => Ok(None),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn increment_unread(&self, id: &ConversationId) -> anyhow::Result<()> {
|
||||
self.conn.execute(
|
||||
"UPDATE conversations SET unread_count = unread_count + 1 WHERE id = ?1",
|
||||
params![id.0.as_slice()],
|
||||
)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn reset_unread(&self, id: &ConversationId) -> anyhow::Result<()> {
|
||||
self.conn.execute(
|
||||
"UPDATE conversations SET unread_count = 0 WHERE id = ?1",
|
||||
params![id.0.as_slice()],
|
||||
)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn update_activity(&self, id: &ConversationId, ts_ms: u64) -> anyhow::Result<()> {
|
||||
self.conn.execute(
|
||||
"UPDATE conversations SET last_activity_ms = ?2 WHERE id = ?1 AND last_activity_ms < ?2",
|
||||
params![id.0.as_slice(), ts_ms],
|
||||
)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// ── Message CRUD ─────────────────────────────────────────────────────
|
||||
|
||||
pub fn save_message(&self, msg: &StoredMessage) -> anyhow::Result<()> {
|
||||
self.conn.execute(
|
||||
"INSERT INTO messages
|
||||
(conversation_id, message_id, sender_key, sender_name, body,
|
||||
msg_type, ref_msg_id, timestamp_ms, is_outgoing)
|
||||
VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7, ?8, ?9)",
|
||||
params![
|
||||
msg.conversation_id.0.as_slice(),
|
||||
msg.message_id.as_ref().map(|id| id.as_slice()),
|
||||
msg.sender_key,
|
||||
msg.sender_name,
|
||||
msg.body,
|
||||
msg.msg_type,
|
||||
msg.ref_msg_id.as_ref().map(|id| id.as_slice()),
|
||||
msg.timestamp_ms,
|
||||
msg.is_outgoing as i32,
|
||||
],
|
||||
)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn load_recent_messages(
|
||||
&self,
|
||||
conv_id: &ConversationId,
|
||||
limit: usize,
|
||||
) -> anyhow::Result<Vec<StoredMessage>> {
|
||||
let mut stmt = self.conn.prepare(
|
||||
"SELECT message_id, sender_key, sender_name, body, msg_type,
|
||||
ref_msg_id, timestamp_ms, is_outgoing
|
||||
FROM messages
|
||||
WHERE conversation_id = ?1
|
||||
ORDER BY timestamp_ms DESC
|
||||
LIMIT ?2",
|
||||
)?;
|
||||
let rows = stmt.query_map(params![conv_id.0.as_slice(), limit.min(u32::MAX as usize) as u32], |row| {
|
||||
let message_id: Option<Vec<u8>> = row.get(0)?;
|
||||
let sender_key: Vec<u8> = row.get(1)?;
|
||||
let sender_name: Option<String> = row.get(2)?;
|
||||
let body: String = row.get(3)?;
|
||||
let msg_type: String = row.get(4)?;
|
||||
let ref_msg_id: Option<Vec<u8>> = row.get(5)?;
|
||||
let timestamp_ms: u64 = row.get(6)?;
|
||||
let is_outgoing: i32 = row.get(7)?;
|
||||
|
||||
fn to_16(v: &[u8]) -> Option<[u8; 16]> {
|
||||
if v.len() == 16 {
|
||||
let mut buf = [0u8; 16];
|
||||
buf.copy_from_slice(v);
|
||||
Some(buf)
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
Ok(StoredMessage {
|
||||
conversation_id: conv_id.clone(),
|
||||
message_id: message_id.as_deref().and_then(to_16),
|
||||
sender_key,
|
||||
sender_name,
|
||||
body,
|
||||
msg_type,
|
||||
ref_msg_id: ref_msg_id.as_deref().and_then(to_16),
|
||||
timestamp_ms,
|
||||
is_outgoing: is_outgoing != 0,
|
||||
})
|
||||
})?;
|
||||
|
||||
let mut msgs = Vec::new();
|
||||
for row in rows {
|
||||
msgs.push(row?);
|
||||
}
|
||||
// Reverse so oldest first
|
||||
msgs.reverse();
|
||||
Ok(msgs)
|
||||
}
|
||||
|
||||
/// Load all messages for a conversation, oldest first (no limit).
|
||||
pub fn load_all_messages(&self, conv_id: &ConversationId) -> anyhow::Result<Vec<StoredMessage>> {
|
||||
let mut stmt = self.conn.prepare(
|
||||
"SELECT message_id, sender_key, sender_name, body, msg_type,
|
||||
ref_msg_id, timestamp_ms, is_outgoing
|
||||
FROM messages
|
||||
WHERE conversation_id = ?1
|
||||
ORDER BY timestamp_ms ASC, id ASC",
|
||||
)?;
|
||||
let rows = stmt.query_map(params![conv_id.0.as_slice()], |row| {
|
||||
let message_id: Option<Vec<u8>> = row.get(0)?;
|
||||
let sender_key: Vec<u8> = row.get(1)?;
|
||||
let sender_name: Option<String> = row.get(2)?;
|
||||
let body: String = row.get(3)?;
|
||||
let msg_type: String = row.get(4)?;
|
||||
let ref_msg_id: Option<Vec<u8>> = row.get(5)?;
|
||||
let timestamp_ms: u64 = row.get(6)?;
|
||||
let is_outgoing: i32 = row.get(7)?;
|
||||
|
||||
fn to_16(v: &[u8]) -> Option<[u8; 16]> {
|
||||
if v.len() == 16 {
|
||||
let mut buf = [0u8; 16];
|
||||
buf.copy_from_slice(v);
|
||||
Some(buf)
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
Ok(StoredMessage {
|
||||
conversation_id: conv_id.clone(),
|
||||
message_id: message_id.as_deref().and_then(to_16),
|
||||
sender_key,
|
||||
sender_name,
|
||||
body,
|
||||
msg_type,
|
||||
ref_msg_id: ref_msg_id.as_deref().and_then(to_16),
|
||||
timestamp_ms,
|
||||
is_outgoing: is_outgoing != 0,
|
||||
})
|
||||
})?;
|
||||
|
||||
let mut msgs = Vec::new();
|
||||
for row in rows {
|
||||
msgs.push(row?);
|
||||
}
|
||||
Ok(msgs)
|
||||
}
|
||||
|
||||
/// Update the body of an existing message (for edits).
|
||||
pub fn update_message_body(
|
||||
&self,
|
||||
conv_id: &ConversationId,
|
||||
message_id: &[u8; 16],
|
||||
new_body: &str,
|
||||
) -> anyhow::Result<bool> {
|
||||
let rows = self.conn.execute(
|
||||
"UPDATE messages SET body = ?3 WHERE conversation_id = ?1 AND message_id = ?2",
|
||||
params![conv_id.0.as_slice(), message_id.as_slice(), new_body],
|
||||
)?;
|
||||
Ok(rows > 0)
|
||||
}
|
||||
|
||||
/// Mark a message as deleted (sets body to "[deleted]" and msg_type to "deleted").
|
||||
pub fn delete_message(
|
||||
&self,
|
||||
conv_id: &ConversationId,
|
||||
message_id: &[u8; 16],
|
||||
) -> anyhow::Result<bool> {
|
||||
let rows = self.conn.execute(
|
||||
"UPDATE messages SET body = '[deleted]', msg_type = 'deleted' WHERE conversation_id = ?1 AND message_id = ?2",
|
||||
params![conv_id.0.as_slice(), message_id.as_slice()],
|
||||
)?;
|
||||
Ok(rows > 0)
|
||||
}
|
||||
|
||||
/// Save a message, deduplicating by message_id within the same conversation.
|
||||
/// Returns `true` if the message was saved (new), `false` if it was a duplicate.
|
||||
pub fn save_message_dedup(&self, msg: &StoredMessage) -> anyhow::Result<bool> {
|
||||
if let Some(ref mid) = msg.message_id {
|
||||
let exists: bool = self.conn.query_row(
|
||||
"SELECT EXISTS(SELECT 1 FROM messages WHERE message_id = ?1 AND conversation_id = ?2)",
|
||||
params![mid.as_slice(), msg.conversation_id.0.as_slice()],
|
||||
|row| row.get(0),
|
||||
)?;
|
||||
if exists {
|
||||
return Ok(false);
|
||||
}
|
||||
}
|
||||
self.save_message(msg)?;
|
||||
Ok(true)
|
||||
}
|
||||
|
||||
// ── Sequence tracking ──────────────────────────────────────────────
|
||||
|
||||
pub fn update_last_seen_seq(&self, id: &ConversationId, seq: u64) -> anyhow::Result<()> {
|
||||
self.conn.execute(
|
||||
"UPDATE conversations SET last_seen_seq = ?2 WHERE id = ?1 AND last_seen_seq < ?2",
|
||||
params![id.0.as_slice(), seq as i64],
|
||||
)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// ── Outbox (offline queue) ────────────────────────────────────────
|
||||
|
||||
pub fn enqueue_outbox(
|
||||
&self,
|
||||
conv_id: &ConversationId,
|
||||
recipient_key: &[u8],
|
||||
payload: &[u8],
|
||||
) -> anyhow::Result<()> {
|
||||
self.conn.execute(
|
||||
"INSERT INTO outbox (conversation_id, recipient_key, payload, created_at_ms)
|
||||
VALUES (?1, ?2, ?3, ?4)",
|
||||
params![conv_id.0.as_slice(), recipient_key, payload, now_ms() as i64],
|
||||
)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn load_pending_outbox(&self) -> anyhow::Result<Vec<OutboxEntry>> {
|
||||
let mut stmt = self.conn.prepare(
|
||||
"SELECT id, conversation_id, recipient_key, payload, retry_count
|
||||
FROM outbox WHERE status = 'pending' ORDER BY created_at_ms",
|
||||
)?;
|
||||
let rows = stmt.query_map([], |row| {
|
||||
let id: i64 = row.get(0)?;
|
||||
let conv_blob: Vec<u8> = row.get(1)?;
|
||||
let recipient_key: Vec<u8> = row.get(2)?;
|
||||
let payload: Vec<u8> = row.get(3)?;
|
||||
let retry_count: u32 = row.get(4)?;
|
||||
Ok(OutboxEntry {
|
||||
id,
|
||||
conversation_id: ConversationId::from_slice(&conv_blob)
|
||||
.unwrap_or(ConversationId([0; 16])),
|
||||
recipient_key,
|
||||
payload,
|
||||
retry_count,
|
||||
})
|
||||
})?;
|
||||
let mut entries = Vec::new();
|
||||
for row in rows {
|
||||
entries.push(row?);
|
||||
}
|
||||
Ok(entries)
|
||||
}
|
||||
|
||||
pub fn mark_outbox_sent(&self, id: i64) -> anyhow::Result<()> {
|
||||
self.conn.execute(
|
||||
"UPDATE outbox SET status = 'sent' WHERE id = ?1",
|
||||
params![id],
|
||||
)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn mark_outbox_failed(&self, id: i64, retry_count: u32) -> anyhow::Result<()> {
|
||||
let new_status = if retry_count > 5 { "failed" } else { "pending" };
|
||||
self.conn.execute(
|
||||
"UPDATE outbox SET retry_count = ?2, status = ?3 WHERE id = ?1",
|
||||
params![id, retry_count, new_status],
|
||||
)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Delete messages older than `cutoff_ms` (epoch milliseconds) across all conversations.
|
||||
pub fn delete_messages_before(&self, cutoff_ms: u64) -> anyhow::Result<usize> {
|
||||
let rows = self.conn.execute(
|
||||
"DELETE FROM messages WHERE timestamp_ms < ?1",
|
||||
params![cutoff_ms as i64],
|
||||
)?;
|
||||
Ok(rows)
|
||||
}
|
||||
}
|
||||
|
||||
/// An entry in the offline outbox queue.
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct OutboxEntry {
|
||||
pub id: i64,
|
||||
pub conversation_id: ConversationId,
|
||||
pub recipient_key: Vec<u8>,
|
||||
pub payload: Vec<u8>,
|
||||
pub retry_count: u32,
|
||||
}
|
||||
|
||||
pub fn now_ms() -> u64 {
|
||||
SystemTime::now()
|
||||
.duration_since(UNIX_EPOCH)
|
||||
.unwrap_or_default()
|
||||
.as_millis() as u64
|
||||
}
|
||||
82
crates/quicprochat-client/src/client/display.rs
Normal file
82
crates/quicprochat-client/src/client/display.rs
Normal file
@@ -0,0 +1,82 @@
|
||||
//! Terminal display helpers for the REPL.
|
||||
|
||||
use super::conversation::StoredMessage;
|
||||
use super::session::SessionState;
|
||||
|
||||
// ANSI color codes
|
||||
const RESET: &str = "\x1b[0m";
|
||||
const BOLD: &str = "\x1b[1m";
|
||||
const DIM: &str = "\x1b[2m";
|
||||
const GREEN: &str = "\x1b[32m";
|
||||
const CYAN: &str = "\x1b[36m";
|
||||
const YELLOW: &str = "\x1b[33m";
|
||||
|
||||
/// Print the REPL prompt showing the active conversation and unread count.
|
||||
pub fn print_prompt(session: &SessionState) {
|
||||
use std::io::Write;
|
||||
let name = session
|
||||
.active_display_name()
|
||||
.unwrap_or_else(|| "no conversation".into());
|
||||
let unread = session.total_unread();
|
||||
if unread > 0 {
|
||||
print!("{DIM}[{RESET}{BOLD}{name}{RESET} {YELLOW}{unread} unread{RESET}{DIM}]{RESET} > ");
|
||||
} else {
|
||||
print!("{DIM}[{RESET}{BOLD}{name}{RESET}{DIM}]{RESET} > ");
|
||||
}
|
||||
let _ = std::io::stdout().flush();
|
||||
}
|
||||
|
||||
/// Print an incoming or outgoing message.
|
||||
pub fn print_message(msg: &StoredMessage) {
|
||||
let body = if msg.msg_type == "reaction" {
|
||||
format!("reacted {}", msg.body)
|
||||
} else {
|
||||
msg.body.clone()
|
||||
};
|
||||
if msg.is_outgoing {
|
||||
println!("\r{GREEN}> {body}{RESET}");
|
||||
} else {
|
||||
let fallback = hex::encode(&msg.sender_key[..4]);
|
||||
let sender = msg.sender_name.as_deref().unwrap_or(&fallback);
|
||||
println!("\r{CYAN}{BOLD}[{sender}]{RESET} {body}");
|
||||
}
|
||||
}
|
||||
|
||||
/// Print a message received in real-time (clears current line first).
|
||||
pub fn print_incoming(sender: &str, body: &str) {
|
||||
use std::io::Write;
|
||||
// Clear current line, print message, then re-show prompt context
|
||||
print!("\r\x1b[2K");
|
||||
println!("{CYAN}{BOLD}[{sender}]{RESET} {body}");
|
||||
let _ = std::io::stdout().flush();
|
||||
}
|
||||
|
||||
/// Print a system/status message.
|
||||
pub fn print_status(msg: &str) {
|
||||
println!("{DIM} {msg}{RESET}");
|
||||
}
|
||||
|
||||
/// Print a transient typing indicator (clears current line first).
|
||||
pub fn print_typing(sender: &str) {
|
||||
use std::io::Write;
|
||||
print!("\r\x1b[2K");
|
||||
println!("{DIM} {sender} is typing...{RESET}");
|
||||
let _ = std::io::stdout().flush();
|
||||
}
|
||||
|
||||
/// Print an error message.
|
||||
pub fn print_error(msg: &str) {
|
||||
println!("{YELLOW} error: {msg}{RESET}");
|
||||
}
|
||||
|
||||
/// Format a conversation list entry for `/list`.
|
||||
pub fn format_conv_line(display_name: &str, kind: &str, unread: u32, members: usize) -> String {
|
||||
let unread_str = if unread > 0 {
|
||||
format!(" {YELLOW}({unread} new){RESET}")
|
||||
} else {
|
||||
String::new()
|
||||
};
|
||||
format!(
|
||||
" {BOLD}{display_name}{RESET} {DIM}[{kind}, {members} members]{RESET}{unread_str}"
|
||||
)
|
||||
}
|
||||
7
crates/quicprochat-client/src/client/hex.rs
Normal file
7
crates/quicprochat-client/src/client/hex.rs
Normal file
@@ -0,0 +1,7 @@
|
||||
pub fn encode(bytes: impl AsRef<[u8]>) -> String {
|
||||
hex::encode(bytes)
|
||||
}
|
||||
|
||||
pub fn decode(s: &str) -> Result<Vec<u8>, &'static str> {
|
||||
hex::decode(s).map_err(|_| "invalid hex string")
|
||||
}
|
||||
148
crates/quicprochat-client/src/client/mesh_discovery.rs
Normal file
148
crates/quicprochat-client/src/client/mesh_discovery.rs
Normal file
@@ -0,0 +1,148 @@
|
||||
//! mDNS-based peer discovery for Freifunk / community mesh deployments.
|
||||
//!
|
||||
//! Browse for `_quicprochat._udp.local.` services on the local network and
|
||||
//! surface them as [`DiscoveredPeer`] structs. Servers announce themselves
|
||||
//! automatically on startup; this module lets clients find them without manual
|
||||
//! configuration.
|
||||
//!
|
||||
//! # Usage
|
||||
//!
|
||||
//! ```no_run
|
||||
//! use quicprochat_client::client::mesh_discovery::MeshDiscovery;
|
||||
//!
|
||||
//! let disc = MeshDiscovery::start()?;
|
||||
//! // Give mDNS time to collect announcements before reading.
|
||||
//! std::thread::sleep(std::time::Duration::from_secs(2));
|
||||
//! for peer in disc.peers() {
|
||||
//! println!("found: {} at {}", peer.domain, peer.server_addr);
|
||||
//! }
|
||||
//! # Ok::<(), quicprochat_client::client::mesh_discovery::MeshDiscoveryError>(())
|
||||
//! ```
|
||||
|
||||
#[cfg(feature = "mesh")]
|
||||
use mdns_sd::{ServiceDaemon, ServiceEvent};
|
||||
use std::net::SocketAddr;
|
||||
#[cfg(feature = "mesh")]
|
||||
use std::sync::{Arc, Mutex};
|
||||
#[cfg(feature = "mesh")]
|
||||
use std::collections::HashMap;
|
||||
|
||||
/// A qpc server discovered on the local network via mDNS.
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct DiscoveredPeer {
|
||||
/// Federation domain of the remote server (e.g. `"node1.freifunk.net"`).
|
||||
pub domain: String,
|
||||
/// QUIC RPC address to connect to.
|
||||
pub server_addr: SocketAddr,
|
||||
}
|
||||
|
||||
/// A running mDNS browse session.
|
||||
///
|
||||
/// Starts immediately on construction; drop to stop browsing.
|
||||
pub struct MeshDiscovery {
|
||||
#[cfg(feature = "mesh")]
|
||||
_daemon: ServiceDaemon,
|
||||
#[cfg(feature = "mesh")]
|
||||
peers: Arc<Mutex<HashMap<String, DiscoveredPeer>>>,
|
||||
}
|
||||
|
||||
#[derive(thiserror::Error, Debug)]
|
||||
pub enum MeshDiscoveryError {
|
||||
#[error("mDNS daemon failed to start: {0}")]
|
||||
DaemonError(String),
|
||||
#[error("mDNS browse failed: {0}")]
|
||||
BrowseError(String),
|
||||
#[error("mesh feature not compiled (rebuild with --features mesh)")]
|
||||
FeatureDisabled,
|
||||
}
|
||||
|
||||
impl MeshDiscovery {
|
||||
/// Start browsing for `_quicprochat._udp.local.` services.
|
||||
///
|
||||
/// Returns immediately; peers are collected in the background.
|
||||
/// Returns [`MeshDiscoveryError::FeatureDisabled`] when built without the
|
||||
/// `mesh` feature.
|
||||
pub fn start() -> Result<Self, MeshDiscoveryError> {
|
||||
#[cfg(feature = "mesh")]
|
||||
{
|
||||
Self::start_inner()
|
||||
}
|
||||
#[cfg(not(feature = "mesh"))]
|
||||
{
|
||||
Err(MeshDiscoveryError::FeatureDisabled)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(feature = "mesh")]
|
||||
fn start_inner() -> Result<Self, MeshDiscoveryError> {
|
||||
let daemon = ServiceDaemon::new()
|
||||
.map_err(|e| MeshDiscoveryError::DaemonError(e.to_string()))?;
|
||||
|
||||
let receiver = daemon
|
||||
.browse("_quicprochat._udp.local.")
|
||||
.map_err(|e| MeshDiscoveryError::BrowseError(e.to_string()))?;
|
||||
|
||||
let peers: Arc<Mutex<HashMap<String, DiscoveredPeer>>> =
|
||||
Arc::new(Mutex::new(HashMap::new()));
|
||||
let peers_bg = Arc::clone(&peers);
|
||||
|
||||
// Process mDNS events in a background thread (ServiceDaemon is sync).
|
||||
std::thread::spawn(move || {
|
||||
for event in receiver {
|
||||
match event {
|
||||
ServiceEvent::ServiceResolved(info) => {
|
||||
// Extract the qpc server address from TXT records.
|
||||
let server_addr_str = info
|
||||
.get_property_val_str("server")
|
||||
.map(|s| s.to_string());
|
||||
let domain = info
|
||||
.get_property_val_str("domain")
|
||||
.map(|s| s.to_string())
|
||||
.unwrap_or_else(|| info.get_fullname().to_string());
|
||||
|
||||
if let Some(addr_str) = server_addr_str {
|
||||
if let Ok(addr) = addr_str.parse::<SocketAddr>() {
|
||||
let peer = DiscoveredPeer {
|
||||
domain: domain.clone(),
|
||||
server_addr: addr,
|
||||
};
|
||||
if let Ok(mut map) = peers_bg.lock() {
|
||||
map.insert(domain, peer);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
ServiceEvent::ServiceRemoved(_ty, fullname) => {
|
||||
if let Ok(mut map) = peers_bg.lock() {
|
||||
map.retain(|_, p| {
|
||||
!fullname.contains(&p.domain)
|
||||
});
|
||||
}
|
||||
}
|
||||
// Other events (SearchStarted, SearchStopped) are informational.
|
||||
_ => {}
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
Ok(Self {
|
||||
_daemon: daemon,
|
||||
peers,
|
||||
})
|
||||
}
|
||||
|
||||
/// Return a snapshot of all peers discovered so far.
|
||||
pub fn peers(&self) -> Vec<DiscoveredPeer> {
|
||||
#[cfg(feature = "mesh")]
|
||||
{
|
||||
self.peers
|
||||
.lock()
|
||||
.map(|m| m.values().cloned().collect())
|
||||
.unwrap_or_default()
|
||||
}
|
||||
#[cfg(not(feature = "mesh"))]
|
||||
{
|
||||
vec![]
|
||||
}
|
||||
}
|
||||
}
|
||||
24
crates/quicprochat-client/src/client/mod.rs
Normal file
24
crates/quicprochat-client/src/client/mod.rs
Normal file
@@ -0,0 +1,24 @@
|
||||
pub mod command_engine;
|
||||
pub mod commands;
|
||||
pub mod conversation;
|
||||
pub mod display;
|
||||
pub mod hex;
|
||||
pub mod mesh_discovery;
|
||||
#[cfg(feature = "playbook")]
|
||||
pub mod playbook;
|
||||
pub mod repl;
|
||||
pub mod retry;
|
||||
pub mod rpc;
|
||||
pub mod session;
|
||||
pub mod state;
|
||||
pub mod token_cache;
|
||||
#[cfg(feature = "tui")]
|
||||
pub mod tui;
|
||||
#[cfg(feature = "v2")]
|
||||
pub mod v2_repl;
|
||||
#[cfg(all(feature = "v2", feature = "tui"))]
|
||||
pub mod v2_tui;
|
||||
|
||||
pub use commands::*;
|
||||
pub use rpc::{connect_node, enqueue, fetch_all, fetch_hybrid_key, fetch_key_package, fetch_wait, upload_hybrid_key, upload_key_package};
|
||||
pub use state::{decode_identity_key, load_existing_state, load_or_init_state, save_state};
|
||||
868
crates/quicprochat-client/src/client/playbook.rs
Normal file
868
crates/quicprochat-client/src/client/playbook.rs
Normal file
@@ -0,0 +1,868 @@
|
||||
//! YAML playbook parser and executor.
|
||||
//!
|
||||
//! Playbooks describe a sequence of client commands in YAML format.
|
||||
//! They support variable substitution, assertions, loops, and per-step
|
||||
//! error handling policies.
|
||||
//!
|
||||
//! ```yaml
|
||||
//! name: "smoke test"
|
||||
//! steps:
|
||||
//! - command: dm
|
||||
//! args: { username: "bob" }
|
||||
//! - command: send
|
||||
//! args: { text: "Hello from playbook" }
|
||||
//! - command: assert
|
||||
//! condition: message_count
|
||||
//! op: gte
|
||||
//! value: 1
|
||||
//! ```
|
||||
//!
|
||||
//! Requires the `playbook` cargo feature.
|
||||
|
||||
use std::collections::HashMap;
|
||||
use std::path::Path;
|
||||
use std::time::{Duration, Instant};
|
||||
|
||||
use anyhow::{Context, bail};
|
||||
use quicprochat_proto::node_capnp::node_service;
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use super::command_engine::{AssertCondition, CmpOp, Command, CommandRegistry};
|
||||
use super::session::SessionState;
|
||||
|
||||
// ── Playbook structs ────────────────────────────────────────────────────────
|
||||
|
||||
/// A parsed YAML playbook.
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct Playbook {
|
||||
pub name: String,
|
||||
#[serde(default)]
|
||||
pub description: Option<String>,
|
||||
#[serde(default)]
|
||||
pub variables: HashMap<String, String>,
|
||||
pub steps: Vec<PlaybookStep>,
|
||||
}
|
||||
|
||||
/// A single step in a playbook.
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct PlaybookStep {
|
||||
pub command: String,
|
||||
#[serde(default)]
|
||||
pub args: HashMap<String, serde_yaml::Value>,
|
||||
/// For assert steps: the condition name.
|
||||
#[serde(default)]
|
||||
pub condition: Option<String>,
|
||||
/// For assert steps: comparison operator.
|
||||
#[serde(default)]
|
||||
pub op: Option<String>,
|
||||
/// For assert steps: expected value.
|
||||
#[serde(default)]
|
||||
pub value: Option<serde_yaml::Value>,
|
||||
/// Capture the command output into this variable name.
|
||||
#[serde(default)]
|
||||
pub capture: Option<String>,
|
||||
/// Error handling policy for this step.
|
||||
#[serde(default)]
|
||||
pub on_error: OnError,
|
||||
/// Optional loop specification.
|
||||
#[serde(rename = "loop", default)]
|
||||
pub loop_spec: Option<LoopSpec>,
|
||||
}
|
||||
|
||||
/// What to do when a step fails.
|
||||
#[derive(Debug, Clone, Serialize, Deserialize, Default, PartialEq)]
|
||||
#[serde(rename_all = "lowercase")]
|
||||
pub enum OnError {
|
||||
#[default]
|
||||
Fail,
|
||||
Skip,
|
||||
Continue,
|
||||
}
|
||||
|
||||
/// Loop specification for repeating a step.
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct LoopSpec {
|
||||
pub var: String,
|
||||
pub from: usize,
|
||||
pub to: usize,
|
||||
}
|
||||
|
||||
// ── Report structs ──────────────────────────────────────────────────────────
|
||||
|
||||
/// Summary of a playbook execution.
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct PlaybookReport {
|
||||
pub name: String,
|
||||
pub total_steps: usize,
|
||||
pub passed: usize,
|
||||
pub failed: usize,
|
||||
pub skipped: usize,
|
||||
pub duration: Duration,
|
||||
pub step_results: Vec<StepResult>,
|
||||
}
|
||||
|
||||
impl PlaybookReport {
|
||||
/// True if all steps passed (no failures).
|
||||
pub fn all_passed(&self) -> bool {
|
||||
self.failed == 0
|
||||
}
|
||||
}
|
||||
|
||||
impl std::fmt::Display for PlaybookReport {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
writeln!(f, "Playbook: {}", self.name)?;
|
||||
writeln!(
|
||||
f,
|
||||
"Result: {} passed, {} failed, {} skipped ({} total)",
|
||||
self.passed, self.failed, self.skipped, self.total_steps,
|
||||
)?;
|
||||
writeln!(f, "Duration: {:.2}s", self.duration.as_secs_f64())?;
|
||||
for sr in &self.step_results {
|
||||
let status = if sr.success { "OK" } else { "FAIL" };
|
||||
write!(
|
||||
f,
|
||||
" [{}/{}] {} ... {} ({:.1}ms)",
|
||||
sr.step_index + 1,
|
||||
self.total_steps,
|
||||
sr.command,
|
||||
status,
|
||||
sr.duration.as_secs_f64() * 1000.0,
|
||||
)?;
|
||||
if let Some(ref e) = sr.error {
|
||||
write!(f, " — {e}")?;
|
||||
}
|
||||
writeln!(f)?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
/// Result of a single step execution.
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct StepResult {
|
||||
pub step_index: usize,
|
||||
pub command: String,
|
||||
pub success: bool,
|
||||
pub duration: Duration,
|
||||
pub output: Option<String>,
|
||||
pub error: Option<String>,
|
||||
}
|
||||
|
||||
// ── PlaybookRunner ──────────────────────────────────────────────────────────
|
||||
|
||||
/// Executes a parsed `Playbook` step-by-step.
|
||||
pub struct PlaybookRunner {
|
||||
playbook: Playbook,
|
||||
vars: HashMap<String, String>,
|
||||
}
|
||||
|
||||
impl PlaybookRunner {
|
||||
/// Load a playbook from a YAML file.
|
||||
pub fn from_file(path: &Path) -> anyhow::Result<Self> {
|
||||
let content =
|
||||
std::fs::read_to_string(path).with_context(|| format!("read {}", path.display()))?;
|
||||
Self::from_str(&content)
|
||||
}
|
||||
|
||||
/// Parse a playbook from a YAML string.
|
||||
pub fn from_str(yaml: &str) -> anyhow::Result<Self> {
|
||||
let playbook: Playbook =
|
||||
serde_yaml::from_str(yaml).context("parse playbook YAML")?;
|
||||
let vars = playbook.variables.clone();
|
||||
Ok(Self { playbook, vars })
|
||||
}
|
||||
|
||||
/// Override or add variables before execution.
|
||||
pub fn set_var(&mut self, name: impl Into<String>, value: impl Into<String>) {
|
||||
self.vars.insert(name.into(), value.into());
|
||||
}
|
||||
|
||||
/// Execute all steps, returning a report.
|
||||
pub async fn run(
|
||||
&mut self,
|
||||
session: &mut SessionState,
|
||||
client: &node_service::Client,
|
||||
) -> PlaybookReport {
|
||||
let start = Instant::now();
|
||||
let total = self.expanded_step_count();
|
||||
let mut results = Vec::new();
|
||||
let mut passed = 0usize;
|
||||
let mut failed = 0usize;
|
||||
let mut skipped = 0usize;
|
||||
let mut step_idx = 0usize;
|
||||
let mut abort = false;
|
||||
|
||||
for step in &self.playbook.steps.clone() {
|
||||
if abort {
|
||||
skipped += 1;
|
||||
results.push(StepResult {
|
||||
step_index: step_idx,
|
||||
command: step.command.clone(),
|
||||
success: false,
|
||||
duration: Duration::ZERO,
|
||||
output: None,
|
||||
error: Some("skipped (prior failure)".into()),
|
||||
});
|
||||
step_idx += 1;
|
||||
continue;
|
||||
}
|
||||
|
||||
if let Some(ref ls) = step.loop_spec {
|
||||
for i in ls.from..=ls.to {
|
||||
self.vars.insert(ls.var.clone(), i.to_string());
|
||||
let sr = self.execute_step(step, step_idx, total, session, client).await;
|
||||
if sr.success {
|
||||
passed += 1;
|
||||
} else {
|
||||
failed += 1;
|
||||
if step.on_error == OnError::Fail {
|
||||
abort = true;
|
||||
}
|
||||
}
|
||||
results.push(sr);
|
||||
step_idx += 1;
|
||||
if abort {
|
||||
break;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
let sr = self.execute_step(step, step_idx, total, session, client).await;
|
||||
if sr.success {
|
||||
passed += 1;
|
||||
} else {
|
||||
match step.on_error {
|
||||
OnError::Fail => {
|
||||
failed += 1;
|
||||
abort = true;
|
||||
}
|
||||
OnError::Skip => skipped += 1,
|
||||
OnError::Continue => failed += 1,
|
||||
}
|
||||
}
|
||||
results.push(sr);
|
||||
step_idx += 1;
|
||||
}
|
||||
}
|
||||
|
||||
PlaybookReport {
|
||||
name: self.playbook.name.clone(),
|
||||
total_steps: step_idx,
|
||||
passed,
|
||||
failed,
|
||||
skipped,
|
||||
duration: start.elapsed(),
|
||||
step_results: results,
|
||||
}
|
||||
}
|
||||
|
||||
/// Execute a single step.
|
||||
async fn execute_step(
|
||||
&mut self,
|
||||
step: &PlaybookStep,
|
||||
index: usize,
|
||||
total: usize,
|
||||
session: &mut SessionState,
|
||||
client: &node_service::Client,
|
||||
) -> StepResult {
|
||||
let t = Instant::now();
|
||||
let cmd = match self.step_to_command(step) {
|
||||
Ok(c) => c,
|
||||
Err(e) => {
|
||||
return StepResult {
|
||||
step_index: index,
|
||||
command: step.command.clone(),
|
||||
success: false,
|
||||
duration: t.elapsed(),
|
||||
output: None,
|
||||
error: Some(format!("{e:#}")),
|
||||
};
|
||||
}
|
||||
};
|
||||
|
||||
eprintln!(
|
||||
"[{}/{}] {} ...",
|
||||
index + 1,
|
||||
total,
|
||||
step.command,
|
||||
);
|
||||
|
||||
let cr = CommandRegistry::execute(&cmd, session, client).await;
|
||||
|
||||
// Capture output into variable if requested.
|
||||
if let Some(ref var_name) = step.capture {
|
||||
if let Some(ref out) = cr.output {
|
||||
self.vars.insert(var_name.clone(), out.clone());
|
||||
}
|
||||
for (k, v) in &cr.data {
|
||||
self.vars.insert(format!("{var_name}.{k}"), v.clone());
|
||||
}
|
||||
}
|
||||
|
||||
StepResult {
|
||||
step_index: index,
|
||||
command: step.command.clone(),
|
||||
success: cr.success,
|
||||
duration: t.elapsed(),
|
||||
output: cr.output,
|
||||
error: cr.error,
|
||||
}
|
||||
}
|
||||
|
||||
/// Convert a YAML step into a typed `Command`.
|
||||
fn step_to_command(&self, step: &PlaybookStep) -> anyhow::Result<Command> {
|
||||
let cmd_name = step.command.as_str();
|
||||
match cmd_name {
|
||||
// ── Lifecycle commands ────────────────────────────────────────
|
||||
"connect" => Ok(Command::Connect {
|
||||
server: self.resolve_str(&step.args, "server")?,
|
||||
ca_cert: self.opt_str(&step.args, "ca_cert"),
|
||||
insecure: self.opt_bool(&step.args, "insecure"),
|
||||
}),
|
||||
"login" => Ok(Command::Login {
|
||||
username: self.resolve_str(&step.args, "username")?,
|
||||
password: self.resolve_str(&step.args, "password")?,
|
||||
}),
|
||||
"register" => Ok(Command::Register {
|
||||
username: self.resolve_str(&step.args, "username")?,
|
||||
password: self.resolve_str(&step.args, "password")?,
|
||||
}),
|
||||
"send" | "send-message" => Ok(Command::SendMessage {
|
||||
text: self.resolve_str(&step.args, "text")?,
|
||||
}),
|
||||
"wait" => Ok(Command::Wait {
|
||||
duration_ms: self.resolve_u64(&step.args, "duration_ms")?,
|
||||
}),
|
||||
"set-var" | "setvar" => Ok(Command::SetVar {
|
||||
name: self.resolve_str(&step.args, "name")?,
|
||||
value: self.resolve_str(&step.args, "value")?,
|
||||
}),
|
||||
"assert" => {
|
||||
let condition = self.build_assert_condition(step)?;
|
||||
Ok(Command::Assert { condition })
|
||||
}
|
||||
|
||||
// ── Session / identity ───────────────────────────────────────
|
||||
"help" => Ok(Command::Help),
|
||||
"quit" | "exit" => Ok(Command::Quit),
|
||||
"whoami" => Ok(Command::Whoami),
|
||||
"list" | "ls" => Ok(Command::List),
|
||||
"switch" | "sw" => Ok(Command::Switch {
|
||||
target: self.resolve_str(&step.args, "target")?,
|
||||
}),
|
||||
"dm" => Ok(Command::Dm {
|
||||
username: self.resolve_str(&step.args, "username")?,
|
||||
}),
|
||||
"create-group" | "cg" => Ok(Command::CreateGroup {
|
||||
name: self.resolve_str(&step.args, "name")?,
|
||||
}),
|
||||
"invite" => Ok(Command::Invite {
|
||||
target: self.resolve_str(&step.args, "target")?,
|
||||
}),
|
||||
"remove" | "kick" => Ok(Command::Remove {
|
||||
target: self.resolve_str(&step.args, "target")?,
|
||||
}),
|
||||
"leave" => Ok(Command::Leave),
|
||||
"join" => Ok(Command::Join),
|
||||
"members" => Ok(Command::Members),
|
||||
"group-info" | "gi" => Ok(Command::GroupInfo),
|
||||
"rename" => Ok(Command::Rename {
|
||||
name: self.resolve_str(&step.args, "name")?,
|
||||
}),
|
||||
"history" | "hist" => Ok(Command::History {
|
||||
count: self.opt_usize(&step.args, "count").unwrap_or(20),
|
||||
}),
|
||||
|
||||
// ── Security / crypto ────────────────────────────────────────
|
||||
"verify" => Ok(Command::Verify {
|
||||
username: self.resolve_str(&step.args, "username")?,
|
||||
}),
|
||||
"update-key" | "rotate-key" => Ok(Command::UpdateKey),
|
||||
"typing" => Ok(Command::Typing),
|
||||
"typing-notify" => Ok(Command::TypingNotify {
|
||||
enabled: self.opt_bool(&step.args, "enabled"),
|
||||
}),
|
||||
"react" => Ok(Command::React {
|
||||
emoji: self.resolve_str(&step.args, "emoji")?,
|
||||
index: self.opt_usize(&step.args, "index"),
|
||||
}),
|
||||
"edit" => Ok(Command::Edit {
|
||||
index: self.resolve_usize(&step.args, "index")?,
|
||||
new_text: self.resolve_str(&step.args, "new_text")?,
|
||||
}),
|
||||
"delete" | "del" => Ok(Command::Delete {
|
||||
index: self.resolve_usize(&step.args, "index")?,
|
||||
}),
|
||||
"send-file" | "sf" => Ok(Command::SendFile {
|
||||
path: self.resolve_str(&step.args, "path")?,
|
||||
}),
|
||||
"download" | "dl" => Ok(Command::Download {
|
||||
index: self.resolve_usize(&step.args, "index")?,
|
||||
}),
|
||||
"delete-account" => Ok(Command::DeleteAccount),
|
||||
"disappear" => Ok(Command::Disappear {
|
||||
arg: self.opt_str(&step.args, "duration"),
|
||||
}),
|
||||
"privacy" => Ok(Command::Privacy {
|
||||
arg: self.opt_str(&step.args, "setting"),
|
||||
}),
|
||||
"verify-fs" => Ok(Command::VerifyFs),
|
||||
"rotate-all-keys" => Ok(Command::RotateAllKeys),
|
||||
"devices" => Ok(Command::Devices),
|
||||
"register-device" => Ok(Command::RegisterDevice {
|
||||
name: self.resolve_str(&step.args, "name")?,
|
||||
}),
|
||||
"revoke-device" => Ok(Command::RevokeDevice {
|
||||
id_prefix: self.resolve_str(&step.args, "id_prefix")?,
|
||||
}),
|
||||
|
||||
// ── Mesh ─────────────────────────────────────────────────────
|
||||
"mesh-peers" => Ok(Command::MeshPeers),
|
||||
"mesh-server" => Ok(Command::MeshServer {
|
||||
addr: self.resolve_str(&step.args, "addr")?,
|
||||
}),
|
||||
"mesh-send" => Ok(Command::MeshSend {
|
||||
peer_id: self.resolve_str(&step.args, "peer_id")?,
|
||||
message: self.resolve_str(&step.args, "message")?,
|
||||
}),
|
||||
"mesh-broadcast" => Ok(Command::MeshBroadcast {
|
||||
topic: self.resolve_str(&step.args, "topic")?,
|
||||
message: self.resolve_str(&step.args, "message")?,
|
||||
}),
|
||||
"mesh-subscribe" => Ok(Command::MeshSubscribe {
|
||||
topic: self.resolve_str(&step.args, "topic")?,
|
||||
}),
|
||||
"mesh-route" => Ok(Command::MeshRoute),
|
||||
"mesh-identity" | "mesh-id" => Ok(Command::MeshIdentity),
|
||||
"mesh-store" => Ok(Command::MeshStore),
|
||||
|
||||
other => bail!("unknown command: {other}"),
|
||||
}
|
||||
}
|
||||
|
||||
/// Build an `AssertCondition` from a playbook step.
|
||||
fn build_assert_condition(&self, step: &PlaybookStep) -> anyhow::Result<AssertCondition> {
|
||||
let cond = step
|
||||
.condition
|
||||
.as_deref()
|
||||
.context("assert step requires 'condition' field")?;
|
||||
match cond {
|
||||
"connected" => Ok(AssertCondition::Connected),
|
||||
"logged_in" => Ok(AssertCondition::LoggedIn),
|
||||
"in_conversation" => {
|
||||
let name = self.resolve_str(&step.args, "name")
|
||||
.or_else(|_| step.value.as_ref()
|
||||
.and_then(|v| v.as_str())
|
||||
.map(|s| self.substitute(s))
|
||||
.context("assert in_conversation requires 'name' arg or 'value'"))?;
|
||||
Ok(AssertCondition::InConversation { name })
|
||||
}
|
||||
"message_count" => {
|
||||
let op = self.parse_cmp_op(step.op.as_deref().unwrap_or("gte"))?;
|
||||
let count = step
|
||||
.value
|
||||
.as_ref()
|
||||
.and_then(|v| v.as_u64())
|
||||
.context("message_count assert requires numeric 'value'")?
|
||||
as usize;
|
||||
Ok(AssertCondition::MessageCount { op, count })
|
||||
}
|
||||
"member_count" => {
|
||||
let op = self.parse_cmp_op(step.op.as_deref().unwrap_or("gte"))?;
|
||||
let count = step
|
||||
.value
|
||||
.as_ref()
|
||||
.and_then(|v| v.as_u64())
|
||||
.context("member_count assert requires numeric 'value'")?
|
||||
as usize;
|
||||
Ok(AssertCondition::MemberCount { op, count })
|
||||
}
|
||||
other => Ok(AssertCondition::Custom {
|
||||
expression: other.to_string(),
|
||||
}),
|
||||
}
|
||||
}
|
||||
|
||||
fn parse_cmp_op(&self, s: &str) -> anyhow::Result<CmpOp> {
|
||||
match s {
|
||||
"eq" | "==" => Ok(CmpOp::Eq),
|
||||
"ne" | "!=" => Ok(CmpOp::Ne),
|
||||
"gt" | ">" => Ok(CmpOp::Gt),
|
||||
"lt" | "<" => Ok(CmpOp::Lt),
|
||||
"gte" | ">=" => Ok(CmpOp::Gte),
|
||||
"lte" | "<=" => Ok(CmpOp::Lte),
|
||||
other => bail!("unknown comparison operator: {other}"),
|
||||
}
|
||||
}
|
||||
|
||||
// ── Variable substitution helpers ────────────────────────────────────
|
||||
|
||||
/// Substitute `$varname` and `${VAR:-default}` in a string.
|
||||
fn substitute(&self, s: &str) -> String {
|
||||
let mut result = String::with_capacity(s.len());
|
||||
let mut chars = s.chars().peekable();
|
||||
while let Some(c) = chars.next() {
|
||||
if c == '$' {
|
||||
if chars.peek() == Some(&'{') {
|
||||
chars.next(); // consume '{'
|
||||
let mut key = String::new();
|
||||
let mut default = None;
|
||||
while let Some(&ch) = chars.peek() {
|
||||
if ch == '}' {
|
||||
chars.next();
|
||||
break;
|
||||
}
|
||||
if ch == ':' && chars.clone().nth(1) == Some('-') {
|
||||
chars.next(); // consume ':'
|
||||
chars.next(); // consume '-'
|
||||
let mut def = String::new();
|
||||
while let Some(&dch) = chars.peek() {
|
||||
if dch == '}' {
|
||||
chars.next();
|
||||
break;
|
||||
}
|
||||
def.push(dch);
|
||||
chars.next();
|
||||
}
|
||||
default = Some(def);
|
||||
break;
|
||||
}
|
||||
key.push(ch);
|
||||
chars.next();
|
||||
}
|
||||
if let Some(val) = self.vars.get(&key) {
|
||||
result.push_str(val);
|
||||
} else if let Ok(val) = std::env::var(&key) {
|
||||
result.push_str(&val);
|
||||
} else if let Some(def) = default {
|
||||
result.push_str(&def);
|
||||
}
|
||||
} else {
|
||||
let mut key = String::new();
|
||||
while let Some(&ch) = chars.peek() {
|
||||
if ch.is_alphanumeric() || ch == '_' {
|
||||
key.push(ch);
|
||||
chars.next();
|
||||
} else {
|
||||
break;
|
||||
}
|
||||
}
|
||||
if let Some(val) = self.vars.get(&key) {
|
||||
result.push_str(val);
|
||||
} else {
|
||||
result.push('$');
|
||||
result.push_str(&key);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
result.push(c);
|
||||
}
|
||||
}
|
||||
result
|
||||
}
|
||||
|
||||
/// Resolve a required string argument with variable substitution.
|
||||
fn resolve_str(
|
||||
&self,
|
||||
args: &HashMap<String, serde_yaml::Value>,
|
||||
key: &str,
|
||||
) -> anyhow::Result<String> {
|
||||
let val = args
|
||||
.get(key)
|
||||
.with_context(|| format!("missing required argument: {key}"))?;
|
||||
match val {
|
||||
serde_yaml::Value::String(s) => Ok(self.substitute(s)),
|
||||
serde_yaml::Value::Number(n) => Ok(n.to_string()),
|
||||
serde_yaml::Value::Bool(b) => Ok(b.to_string()),
|
||||
other => Ok(format!("{other:?}")),
|
||||
}
|
||||
}
|
||||
|
||||
/// Resolve an optional string argument.
|
||||
fn opt_str(
|
||||
&self,
|
||||
args: &HashMap<String, serde_yaml::Value>,
|
||||
key: &str,
|
||||
) -> Option<String> {
|
||||
args.get(key).map(|v| match v {
|
||||
serde_yaml::Value::String(s) => self.substitute(s),
|
||||
serde_yaml::Value::Number(n) => n.to_string(),
|
||||
serde_yaml::Value::Bool(b) => b.to_string(),
|
||||
other => format!("{other:?}"),
|
||||
})
|
||||
}
|
||||
|
||||
/// Resolve an optional bool argument (defaults to false).
|
||||
fn opt_bool(
|
||||
&self,
|
||||
args: &HashMap<String, serde_yaml::Value>,
|
||||
key: &str,
|
||||
) -> bool {
|
||||
args.get(key)
|
||||
.and_then(|v| v.as_bool())
|
||||
.unwrap_or(false)
|
||||
}
|
||||
|
||||
/// Resolve a required usize argument.
|
||||
fn resolve_usize(
|
||||
&self,
|
||||
args: &HashMap<String, serde_yaml::Value>,
|
||||
key: &str,
|
||||
) -> anyhow::Result<usize> {
|
||||
let val = args
|
||||
.get(key)
|
||||
.with_context(|| format!("missing required argument: {key}"))?;
|
||||
val.as_u64()
|
||||
.map(|n| n as usize)
|
||||
.with_context(|| format!("argument '{key}' must be a positive integer"))
|
||||
}
|
||||
|
||||
/// Resolve a required u64 argument.
|
||||
fn resolve_u64(
|
||||
&self,
|
||||
args: &HashMap<String, serde_yaml::Value>,
|
||||
key: &str,
|
||||
) -> anyhow::Result<u64> {
|
||||
let val = args
|
||||
.get(key)
|
||||
.with_context(|| format!("missing required argument: {key}"))?;
|
||||
val.as_u64()
|
||||
.with_context(|| format!("argument '{key}' must be a positive integer"))
|
||||
}
|
||||
|
||||
/// Resolve an optional usize argument.
|
||||
fn opt_usize(
|
||||
&self,
|
||||
args: &HashMap<String, serde_yaml::Value>,
|
||||
key: &str,
|
||||
) -> Option<usize> {
|
||||
args.get(key).and_then(|v| v.as_u64()).map(|n| n as usize)
|
||||
}
|
||||
|
||||
/// Count total expanded steps (including loop iterations).
|
||||
fn expanded_step_count(&self) -> usize {
|
||||
self.playbook
|
||||
.steps
|
||||
.iter()
|
||||
.map(|s| {
|
||||
if let Some(ref ls) = s.loop_spec {
|
||||
if ls.to >= ls.from {
|
||||
ls.to - ls.from + 1
|
||||
} else {
|
||||
0
|
||||
}
|
||||
} else {
|
||||
1
|
||||
}
|
||||
})
|
||||
.sum()
|
||||
}
|
||||
}
|
||||
|
||||
// ── Tests ───────────────────────────────────────────────────────────────────
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn parse_minimal_playbook() {
|
||||
let yaml = r#"
|
||||
name: "test"
|
||||
steps:
|
||||
- command: whoami
|
||||
- command: list
|
||||
"#;
|
||||
let runner = PlaybookRunner::from_str(yaml).unwrap();
|
||||
assert_eq!(runner.playbook.name, "test");
|
||||
assert_eq!(runner.playbook.steps.len(), 2);
|
||||
assert_eq!(runner.playbook.steps[0].command, "whoami");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn parse_playbook_with_variables() {
|
||||
let yaml = r#"
|
||||
name: "var test"
|
||||
variables:
|
||||
user: alice
|
||||
server: "127.0.0.1:5001"
|
||||
steps:
|
||||
- command: dm
|
||||
args:
|
||||
username: "$user"
|
||||
"#;
|
||||
let runner = PlaybookRunner::from_str(yaml).unwrap();
|
||||
assert_eq!(runner.vars["user"], "alice");
|
||||
assert_eq!(runner.vars["server"], "127.0.0.1:5001");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn variable_substitution() {
|
||||
let mut vars = HashMap::new();
|
||||
vars.insert("name".to_string(), "alice".to_string());
|
||||
vars.insert("port".to_string(), "5001".to_string());
|
||||
let runner = PlaybookRunner {
|
||||
playbook: Playbook {
|
||||
name: "test".into(),
|
||||
description: None,
|
||||
variables: HashMap::new(),
|
||||
steps: vec![],
|
||||
},
|
||||
vars,
|
||||
};
|
||||
assert_eq!(runner.substitute("hello $name"), "hello alice");
|
||||
assert_eq!(runner.substitute("port=$port!"), "port=5001!");
|
||||
assert_eq!(runner.substitute("${name}@server"), "alice@server");
|
||||
assert_eq!(
|
||||
runner.substitute("${missing:-default}"),
|
||||
"default"
|
||||
);
|
||||
assert_eq!(runner.substitute("no vars here"), "no vars here");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn step_to_command_mapping() {
|
||||
let yaml = r#"
|
||||
name: "mapping test"
|
||||
variables:
|
||||
user: bob
|
||||
steps:
|
||||
- command: dm
|
||||
args:
|
||||
username: "$user"
|
||||
- command: send
|
||||
args:
|
||||
text: "hello"
|
||||
- command: history
|
||||
args:
|
||||
count: 10
|
||||
- command: wait
|
||||
args:
|
||||
duration_ms: 500
|
||||
"#;
|
||||
let runner = PlaybookRunner::from_str(yaml).unwrap();
|
||||
let cmd0 = runner.step_to_command(&runner.playbook.steps[0]).unwrap();
|
||||
assert!(matches!(cmd0, Command::Dm { username } if username == "bob"));
|
||||
|
||||
let cmd1 = runner.step_to_command(&runner.playbook.steps[1]).unwrap();
|
||||
assert!(matches!(cmd1, Command::SendMessage { text } if text == "hello"));
|
||||
|
||||
let cmd2 = runner.step_to_command(&runner.playbook.steps[2]).unwrap();
|
||||
assert!(matches!(cmd2, Command::History { count: 10 }));
|
||||
|
||||
let cmd3 = runner.step_to_command(&runner.playbook.steps[3]).unwrap();
|
||||
assert!(matches!(cmd3, Command::Wait { duration_ms: 500 }));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn parse_assert_step() {
|
||||
let yaml = r#"
|
||||
name: "assert test"
|
||||
steps:
|
||||
- command: assert
|
||||
condition: message_count
|
||||
op: gte
|
||||
value: 5
|
||||
"#;
|
||||
let runner = PlaybookRunner::from_str(yaml).unwrap();
|
||||
let cmd = runner.step_to_command(&runner.playbook.steps[0]).unwrap();
|
||||
match cmd {
|
||||
Command::Assert {
|
||||
condition: AssertCondition::MessageCount { op, count },
|
||||
} => {
|
||||
assert_eq!(op, CmpOp::Gte);
|
||||
assert_eq!(count, 5);
|
||||
}
|
||||
other => panic!("expected Assert MessageCount, got {other:?}"),
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn parse_loop_spec() {
|
||||
let yaml = r#"
|
||||
name: "loop test"
|
||||
steps:
|
||||
- command: send
|
||||
args:
|
||||
text: "msg $i"
|
||||
loop:
|
||||
var: i
|
||||
from: 1
|
||||
to: 5
|
||||
"#;
|
||||
let runner = PlaybookRunner::from_str(yaml).unwrap();
|
||||
assert_eq!(runner.expanded_step_count(), 5);
|
||||
let ls = runner.playbook.steps[0].loop_spec.as_ref().unwrap();
|
||||
assert_eq!(ls.var, "i");
|
||||
assert_eq!(ls.from, 1);
|
||||
assert_eq!(ls.to, 5);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn on_error_defaults_to_fail() {
|
||||
let yaml = r#"
|
||||
name: "error test"
|
||||
steps:
|
||||
- command: whoami
|
||||
- command: list
|
||||
on_error: continue
|
||||
- command: quit
|
||||
on_error: skip
|
||||
"#;
|
||||
let runner = PlaybookRunner::from_str(yaml).unwrap();
|
||||
assert_eq!(runner.playbook.steps[0].on_error, OnError::Fail);
|
||||
assert_eq!(runner.playbook.steps[1].on_error, OnError::Continue);
|
||||
assert_eq!(runner.playbook.steps[2].on_error, OnError::Skip);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn cmp_op_parsing() {
|
||||
let runner = PlaybookRunner::from_str("name: t\nsteps: []").unwrap();
|
||||
assert!(matches!(runner.parse_cmp_op("eq"), Ok(CmpOp::Eq)));
|
||||
assert!(matches!(runner.parse_cmp_op("=="), Ok(CmpOp::Eq)));
|
||||
assert!(matches!(runner.parse_cmp_op("gte"), Ok(CmpOp::Gte)));
|
||||
assert!(matches!(runner.parse_cmp_op(">="), Ok(CmpOp::Gte)));
|
||||
assert!(matches!(runner.parse_cmp_op("<"), Ok(CmpOp::Lt)));
|
||||
assert!(runner.parse_cmp_op("invalid").is_err());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn report_display() {
|
||||
let report = PlaybookReport {
|
||||
name: "test".into(),
|
||||
total_steps: 3,
|
||||
passed: 2,
|
||||
failed: 1,
|
||||
skipped: 0,
|
||||
duration: Duration::from_millis(150),
|
||||
step_results: vec![
|
||||
StepResult {
|
||||
step_index: 0,
|
||||
command: "whoami".into(),
|
||||
success: true,
|
||||
duration: Duration::from_millis(10),
|
||||
output: None,
|
||||
error: None,
|
||||
},
|
||||
StepResult {
|
||||
step_index: 1,
|
||||
command: "dm".into(),
|
||||
success: true,
|
||||
duration: Duration::from_millis(50),
|
||||
output: None,
|
||||
error: None,
|
||||
},
|
||||
StepResult {
|
||||
step_index: 2,
|
||||
command: "assert".into(),
|
||||
success: false,
|
||||
duration: Duration::from_millis(1),
|
||||
output: None,
|
||||
error: Some("message count 0 < 1".into()),
|
||||
},
|
||||
],
|
||||
};
|
||||
let s = format!("{report}");
|
||||
assert!(s.contains("2 passed, 1 failed"));
|
||||
assert!(s.contains("[3/3] assert ... FAIL"));
|
||||
}
|
||||
}
|
||||
3460
crates/quicprochat-client/src/client/repl.rs
Normal file
3460
crates/quicprochat-client/src/client/repl.rs
Normal file
File diff suppressed because it is too large
Load Diff
207
crates/quicprochat-client/src/client/retry.rs
Normal file
207
crates/quicprochat-client/src/client/retry.rs
Normal file
@@ -0,0 +1,207 @@
|
||||
//! Retry with exponential backoff for transient RPC failures.
|
||||
|
||||
use std::future::Future;
|
||||
use std::time::Duration;
|
||||
|
||||
use rand::Rng;
|
||||
use tracing::warn;
|
||||
|
||||
/// Default maximum number of retry attempts (including the first try).
|
||||
pub const DEFAULT_MAX_RETRIES: u32 = 3;
|
||||
/// Default base delay in milliseconds for exponential backoff.
|
||||
pub const DEFAULT_BASE_DELAY_MS: u64 = 500;
|
||||
|
||||
/// Runs an async operation with retries. On `Ok(t)` returns immediately.
|
||||
/// On `Err(e)`: if `is_retriable(&e)` and `attempt < max_retries`, sleeps with
|
||||
/// exponential backoff (plus jitter) then retries; otherwise returns the last error.
|
||||
pub async fn retry_async<F, Fut, T, E, P>(
|
||||
op: F,
|
||||
max_retries: u32,
|
||||
base_delay_ms: u64,
|
||||
is_retriable: P,
|
||||
) -> Result<T, E>
|
||||
where
|
||||
F: Fn() -> Fut,
|
||||
Fut: Future<Output = Result<T, E>>,
|
||||
P: Fn(&E) -> bool,
|
||||
{
|
||||
let mut last_err: Option<E> = None;
|
||||
for attempt in 0..max_retries {
|
||||
match op().await {
|
||||
Ok(t) => return Ok(t),
|
||||
Err(e) => {
|
||||
if !is_retriable(&e) || attempt + 1 >= max_retries {
|
||||
return Err(e);
|
||||
}
|
||||
let delay_ms = base_delay_ms * 2u64.saturating_pow(attempt);
|
||||
let jitter_ms = rand::thread_rng().gen_range(0..=delay_ms / 2);
|
||||
let total_ms = delay_ms + jitter_ms;
|
||||
warn!(
|
||||
attempt = attempt + 1,
|
||||
max_retries,
|
||||
delay_ms = total_ms,
|
||||
"RPC failed, retrying after backoff"
|
||||
);
|
||||
last_err = Some(e);
|
||||
tokio::time::sleep(Duration::from_millis(total_ms)).await;
|
||||
}
|
||||
}
|
||||
}
|
||||
match last_err {
|
||||
Some(e) => Err(e),
|
||||
None => unreachable!(
|
||||
"retry_async: last_err is always Some when loop exits after an Err"
|
||||
),
|
||||
}
|
||||
}
|
||||
|
||||
/// Classifies `anyhow::Error` for retry: returns `false` for auth or invalid-param
|
||||
/// errors (do not retry), `true` for transient errors (network, timeout, server 5xx).
|
||||
/// When in doubt, returns `true` (retry).
|
||||
pub fn anyhow_is_retriable(err: &anyhow::Error) -> bool {
|
||||
let s = format!("{:#}", err);
|
||||
let s_lower = s.to_lowercase();
|
||||
// Do not retry: auth / permission
|
||||
if s_lower.contains("unauthorized")
|
||||
|| s_lower.contains("auth failed")
|
||||
|| s_lower.contains("access denied")
|
||||
|| s_lower.contains("401")
|
||||
|| s_lower.contains("forbidden")
|
||||
|| s_lower.contains("403")
|
||||
|| s_lower.contains("token")
|
||||
{
|
||||
return false;
|
||||
}
|
||||
// Do not retry: bad request / invalid params
|
||||
if s_lower.contains("bad request")
|
||||
|| s_lower.contains("400")
|
||||
|| s_lower.contains("invalid param")
|
||||
|| s_lower.contains("fingerprint mismatch")
|
||||
{
|
||||
return false;
|
||||
}
|
||||
// Retry: network, timeout, connection, server error, or anything else
|
||||
true
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
#[allow(clippy::unwrap_used)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[tokio::test]
|
||||
async fn retry_success_first_attempt() {
|
||||
let result = retry_async(|| async { Ok::<_, String>(42) }, 3, 10, |_| true).await;
|
||||
assert_eq!(result.unwrap(), 42);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn retry_succeeds_after_one_failure() {
|
||||
let counter = std::sync::Arc::new(std::sync::atomic::AtomicU32::new(0));
|
||||
let c = counter.clone();
|
||||
let result = retry_async(
|
||||
|| {
|
||||
let c = c.clone();
|
||||
async move {
|
||||
let n = c.fetch_add(1, std::sync::atomic::Ordering::SeqCst);
|
||||
if n == 0 {
|
||||
Err("transient failure".to_string())
|
||||
} else {
|
||||
Ok(99)
|
||||
}
|
||||
}
|
||||
},
|
||||
3,
|
||||
1, // minimal delay for test speed
|
||||
|_| true,
|
||||
)
|
||||
.await;
|
||||
assert_eq!(result.unwrap(), 99);
|
||||
assert_eq!(counter.load(std::sync::atomic::Ordering::SeqCst), 2);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn retry_non_retriable_fails_immediately() {
|
||||
let counter = std::sync::Arc::new(std::sync::atomic::AtomicU32::new(0));
|
||||
let c = counter.clone();
|
||||
let result = retry_async(
|
||||
|| {
|
||||
let c = c.clone();
|
||||
async move {
|
||||
c.fetch_add(1, std::sync::atomic::Ordering::SeqCst);
|
||||
Err::<(), _>("permanent error")
|
||||
}
|
||||
},
|
||||
5,
|
||||
1,
|
||||
|_: &&str| false, // nothing is retriable
|
||||
)
|
||||
.await;
|
||||
assert!(result.is_err());
|
||||
assert_eq!(counter.load(std::sync::atomic::Ordering::SeqCst), 1);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn retry_exhausts_all_attempts() {
|
||||
let counter = std::sync::Arc::new(std::sync::atomic::AtomicU32::new(0));
|
||||
let c = counter.clone();
|
||||
let result = retry_async(
|
||||
|| {
|
||||
let c = c.clone();
|
||||
async move {
|
||||
c.fetch_add(1, std::sync::atomic::Ordering::SeqCst);
|
||||
Err::<(), _>("still failing")
|
||||
}
|
||||
},
|
||||
3,
|
||||
1,
|
||||
|_| true,
|
||||
)
|
||||
.await;
|
||||
assert!(result.is_err());
|
||||
assert_eq!(counter.load(std::sync::atomic::Ordering::SeqCst), 3);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn anyhow_is_retriable_classifications() {
|
||||
// Auth errors should NOT be retriable
|
||||
let auth_errors = [
|
||||
"unauthorized access",
|
||||
"HTTP 401 Unauthorized",
|
||||
"forbidden resource",
|
||||
"HTTP 403 Forbidden",
|
||||
"auth failed for user",
|
||||
"access denied",
|
||||
"invalid token",
|
||||
];
|
||||
for msg in &auth_errors {
|
||||
let err = anyhow::anyhow!("{msg}");
|
||||
assert!(!anyhow_is_retriable(&err), "expected non-retriable: {msg}");
|
||||
}
|
||||
|
||||
// Bad-request errors should NOT be retriable
|
||||
let bad_req_errors = [
|
||||
"bad request: missing field",
|
||||
"HTTP 400 Bad Request",
|
||||
"invalid param: username",
|
||||
"fingerprint mismatch",
|
||||
];
|
||||
for msg in &bad_req_errors {
|
||||
let err = anyhow::anyhow!("{msg}");
|
||||
assert!(!anyhow_is_retriable(&err), "expected non-retriable: {msg}");
|
||||
}
|
||||
|
||||
// Transient errors SHOULD be retriable
|
||||
let transient_errors = [
|
||||
"connection refused",
|
||||
"network timeout",
|
||||
"server error 500",
|
||||
"stream reset",
|
||||
"something unknown happened",
|
||||
];
|
||||
for msg in &transient_errors {
|
||||
let err = anyhow::anyhow!("{msg}");
|
||||
assert!(anyhow_is_retriable(&err), "expected retriable: {msg}");
|
||||
}
|
||||
}
|
||||
}
|
||||
978
crates/quicprochat-client/src/client/rpc.rs
Normal file
978
crates/quicprochat-client/src/client/rpc.rs
Normal file
@@ -0,0 +1,978 @@
|
||||
use std::net::SocketAddr;
|
||||
use std::path::Path;
|
||||
use std::sync::Arc;
|
||||
|
||||
use anyhow::Context;
|
||||
use quinn::{ClientConfig, Endpoint};
|
||||
use quinn_proto::crypto::rustls::QuicClientConfig;
|
||||
use rustls::pki_types::{CertificateDer, ServerName, UnixTime};
|
||||
use rustls::{ClientConfig as RustlsClientConfig, RootCertStore};
|
||||
use tokio_util::compat::{TokioAsyncReadCompatExt, TokioAsyncWriteCompatExt};
|
||||
use capnp_rpc::{rpc_twoparty_capnp::Side, twoparty, RpcSystem};
|
||||
|
||||
use quicprochat_core::HybridPublicKey;
|
||||
use quicprochat_proto::node_capnp::{auth, node_service};
|
||||
|
||||
use crate::{AUTH_CONTEXT, INSECURE_SKIP_VERIFY};
|
||||
|
||||
use super::retry::{anyhow_is_retriable, retry_async, DEFAULT_BASE_DELAY_MS, DEFAULT_MAX_RETRIES};
|
||||
|
||||
/// Cap'n Proto traversal limit (words). 4 Mi words = 32 MiB; bounds DoS from deeply nested or large messages.
|
||||
const CAPNP_TRAVERSAL_LIMIT_WORDS: usize = 4 * 1024 * 1024;
|
||||
|
||||
/// A [`rustls::client::danger::ServerCertVerifier`] that accepts any certificate.
|
||||
///
|
||||
/// **Development only.** Using this in production disables all TLS guarantees.
|
||||
#[derive(Debug)]
|
||||
struct InsecureServerCertVerifier;
|
||||
|
||||
impl rustls::client::danger::ServerCertVerifier for InsecureServerCertVerifier {
|
||||
fn verify_server_cert(
|
||||
&self,
|
||||
_end_entity: &CertificateDer<'_>,
|
||||
_intermediates: &[CertificateDer<'_>],
|
||||
_server_name: &ServerName<'_>,
|
||||
_ocsp_response: &[u8],
|
||||
_now: UnixTime,
|
||||
) -> Result<rustls::client::danger::ServerCertVerified, rustls::Error> {
|
||||
Ok(rustls::client::danger::ServerCertVerified::assertion())
|
||||
}
|
||||
|
||||
fn verify_tls12_signature(
|
||||
&self,
|
||||
_message: &[u8],
|
||||
_cert: &CertificateDer<'_>,
|
||||
_dss: &rustls::DigitallySignedStruct,
|
||||
) -> Result<rustls::client::danger::HandshakeSignatureValid, rustls::Error> {
|
||||
Ok(rustls::client::danger::HandshakeSignatureValid::assertion())
|
||||
}
|
||||
|
||||
fn verify_tls13_signature(
|
||||
&self,
|
||||
_message: &[u8],
|
||||
_cert: &CertificateDer<'_>,
|
||||
_dss: &rustls::DigitallySignedStruct,
|
||||
) -> Result<rustls::client::danger::HandshakeSignatureValid, rustls::Error> {
|
||||
Ok(rustls::client::danger::HandshakeSignatureValid::assertion())
|
||||
}
|
||||
|
||||
fn supported_verify_schemes(&self) -> Vec<rustls::SignatureScheme> {
|
||||
rustls::crypto::ring::default_provider()
|
||||
.signature_verification_algorithms
|
||||
.supported_schemes()
|
||||
}
|
||||
}
|
||||
|
||||
/// Establish a QUIC/TLS connection and return a `NodeService` client.
|
||||
///
|
||||
/// Must be called from within a `LocalSet` because capnp-rpc is `!Send`.
|
||||
///
|
||||
/// Reads [`INSECURE_SKIP_VERIFY`] to decide whether to bypass certificate
|
||||
/// verification (set once at startup via [`crate::set_insecure_skip_verify`]).
|
||||
pub async fn connect_node(
|
||||
server: &str,
|
||||
ca_cert: &Path,
|
||||
server_name: &str,
|
||||
) -> anyhow::Result<node_service::Client> {
|
||||
let skip = INSECURE_SKIP_VERIFY.load(std::sync::atomic::Ordering::Relaxed);
|
||||
connect_node_opt(server, ca_cert, server_name, skip).await
|
||||
}
|
||||
|
||||
/// Like [`connect_node`] but with an explicit `insecure_skip_verify` toggle.
|
||||
///
|
||||
/// When `insecure_skip_verify` is `true`, certificate verification is disabled entirely.
|
||||
/// This is intended for development and testing only.
|
||||
pub async fn connect_node_opt(
|
||||
server: &str,
|
||||
ca_cert: &Path,
|
||||
server_name: &str,
|
||||
insecure_skip_verify: bool,
|
||||
) -> anyhow::Result<node_service::Client> {
|
||||
let addr: SocketAddr = server
|
||||
.parse()
|
||||
.with_context(|| format!("server must be host:port, got {server}"))?;
|
||||
|
||||
let mut tls = if insecure_skip_verify {
|
||||
RustlsClientConfig::builder()
|
||||
.dangerous()
|
||||
.with_custom_certificate_verifier(Arc::new(InsecureServerCertVerifier))
|
||||
.with_no_client_auth()
|
||||
} else {
|
||||
let cert_bytes =
|
||||
std::fs::read(ca_cert).with_context(|| format!("read ca_cert {ca_cert:?}"))?;
|
||||
let mut roots = RootCertStore::empty();
|
||||
roots
|
||||
.add(CertificateDer::from(cert_bytes))
|
||||
.context("add root cert")?;
|
||||
RustlsClientConfig::builder()
|
||||
.with_root_certificates(roots)
|
||||
.with_no_client_auth()
|
||||
};
|
||||
tls.alpn_protocols = vec![b"capnp".to_vec()];
|
||||
|
||||
let crypto = QuicClientConfig::try_from(tls)
|
||||
.map_err(|e| anyhow::anyhow!("invalid client TLS config: {e}"))?;
|
||||
|
||||
let bind_addr: SocketAddr = "0.0.0.0:0".parse().context("parse client bind address")?;
|
||||
let mut endpoint = Endpoint::client(bind_addr)?;
|
||||
endpoint.set_default_client_config(ClientConfig::new(Arc::new(crypto)));
|
||||
|
||||
let connection = endpoint
|
||||
.connect(addr, server_name)
|
||||
.context("quic connect init")?
|
||||
.await
|
||||
.context("quic connect failed")?;
|
||||
|
||||
let (send, recv) = connection.open_bi().await.context("open bi stream")?;
|
||||
|
||||
let mut reader_opts = capnp::message::ReaderOptions::new();
|
||||
reader_opts.traversal_limit_in_words(Some(CAPNP_TRAVERSAL_LIMIT_WORDS));
|
||||
let network = twoparty::VatNetwork::new(
|
||||
recv.compat(),
|
||||
send.compat_write(),
|
||||
Side::Client,
|
||||
reader_opts,
|
||||
);
|
||||
|
||||
let mut rpc_system = RpcSystem::new(Box::new(network), None);
|
||||
let client: node_service::Client = rpc_system.bootstrap(Side::Server);
|
||||
|
||||
tokio::task::spawn_local(rpc_system);
|
||||
|
||||
Ok(client)
|
||||
}
|
||||
|
||||
pub fn set_auth(auth: &mut auth::Builder<'_>) -> anyhow::Result<()> {
|
||||
let guard = AUTH_CONTEXT
|
||||
.read()
|
||||
.map_err(|e| anyhow::anyhow!("AUTH_CONTEXT lock poisoned: {e}"))?;
|
||||
let ctx = guard.as_ref().ok_or_else(|| {
|
||||
anyhow::anyhow!(
|
||||
"init_auth must be called before RPCs (use a bearer or session token for authenticated commands)"
|
||||
)
|
||||
})?;
|
||||
auth.set_version(ctx.version);
|
||||
auth.set_access_token(&ctx.access_token);
|
||||
auth.set_device_id(&ctx.device_id);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Upload a KeyPackage and verify the fingerprint echoed by the AS.
|
||||
pub async fn upload_key_package(
|
||||
client: &node_service::Client,
|
||||
identity_key: &[u8],
|
||||
package: &[u8],
|
||||
) -> anyhow::Result<()> {
|
||||
let mut req = client.upload_key_package_request();
|
||||
{
|
||||
let mut p = req.get();
|
||||
p.set_identity_key(identity_key);
|
||||
p.set_package(package);
|
||||
let mut auth = p.reborrow().init_auth();
|
||||
set_auth(&mut auth)?;
|
||||
}
|
||||
|
||||
let resp = req
|
||||
.send()
|
||||
.promise
|
||||
.await
|
||||
.context("upload_key_package RPC failed")?;
|
||||
|
||||
let server_fp = resp
|
||||
.get()
|
||||
.context("upload_key_package: bad response")?
|
||||
.get_fingerprint()
|
||||
.context("upload_key_package: missing fingerprint")?
|
||||
.to_vec();
|
||||
|
||||
let local_fp = super::state::sha256(package);
|
||||
anyhow::ensure!(server_fp == local_fp, "fingerprint mismatch");
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Fetch a KeyPackage for `identity_key` from the AS.
|
||||
pub async fn fetch_key_package(
|
||||
client: &node_service::Client,
|
||||
identity_key: &[u8],
|
||||
) -> anyhow::Result<Vec<u8>> {
|
||||
let mut req = client.fetch_key_package_request();
|
||||
{
|
||||
let mut p = req.get();
|
||||
p.set_identity_key(identity_key);
|
||||
let mut auth = p.reborrow().init_auth();
|
||||
set_auth(&mut auth)?;
|
||||
}
|
||||
|
||||
let resp = req
|
||||
.send()
|
||||
.promise
|
||||
.await
|
||||
.context("fetch_key_package RPC failed")?;
|
||||
|
||||
let pkg = resp
|
||||
.get()
|
||||
.context("fetch_key_package: bad response")?
|
||||
.get_package()
|
||||
.context("fetch_key_package: missing package field")?
|
||||
.to_vec();
|
||||
|
||||
Ok(pkg)
|
||||
}
|
||||
|
||||
/// Enqueue an opaque payload to the DS for `recipient_key`.
|
||||
/// Returns the per-inbox sequence number assigned by the server.
|
||||
/// Retries on transient failures with exponential backoff.
|
||||
pub async fn enqueue(
|
||||
client: &node_service::Client,
|
||||
recipient_key: &[u8],
|
||||
payload: &[u8],
|
||||
) -> anyhow::Result<u64> {
|
||||
enqueue_with_ttl(client, recipient_key, payload, None).await
|
||||
}
|
||||
|
||||
/// Enqueue with an optional TTL (seconds). 0 or None means no expiry.
|
||||
pub async fn enqueue_with_ttl(
|
||||
client: &node_service::Client,
|
||||
recipient_key: &[u8],
|
||||
payload: &[u8],
|
||||
ttl_secs: Option<u32>,
|
||||
) -> anyhow::Result<u64> {
|
||||
let client = client.clone();
|
||||
let recipient_key = recipient_key.to_vec();
|
||||
let payload = payload.to_vec();
|
||||
retry_async(
|
||||
|| {
|
||||
let client = client.clone();
|
||||
let recipient_key = recipient_key.clone();
|
||||
let payload = payload.clone();
|
||||
async move {
|
||||
let mut req = client.enqueue_request();
|
||||
{
|
||||
let mut p = req.get();
|
||||
p.set_recipient_key(&recipient_key);
|
||||
p.set_payload(&payload);
|
||||
p.set_channel_id(&[]);
|
||||
p.set_version(1);
|
||||
if let Some(ttl) = ttl_secs {
|
||||
p.set_ttl_secs(ttl);
|
||||
}
|
||||
let mut auth = p.reborrow().init_auth();
|
||||
set_auth(&mut auth)?;
|
||||
}
|
||||
let resp = req.send().promise.await.context("enqueue RPC failed")?;
|
||||
let seq = resp.get().context("enqueue: bad response")?.get_seq();
|
||||
Ok(seq)
|
||||
}
|
||||
},
|
||||
DEFAULT_MAX_RETRIES,
|
||||
DEFAULT_BASE_DELAY_MS,
|
||||
anyhow_is_retriable,
|
||||
)
|
||||
.await
|
||||
}
|
||||
|
||||
/// Fetch and drain all payloads for `recipient_key`.
|
||||
/// Returns `(seq, payload)` pairs — sort by `seq` before MLS processing.
|
||||
/// Retries on transient failures with exponential backoff.
|
||||
pub async fn fetch_all(
|
||||
client: &node_service::Client,
|
||||
recipient_key: &[u8],
|
||||
) -> anyhow::Result<Vec<(u64, Vec<u8>)>> {
|
||||
let client = client.clone();
|
||||
let recipient_key = recipient_key.to_vec();
|
||||
retry_async(
|
||||
|| {
|
||||
let client = client.clone();
|
||||
let recipient_key = recipient_key.clone();
|
||||
async move {
|
||||
let mut req = client.fetch_request();
|
||||
{
|
||||
let mut p = req.get();
|
||||
p.set_recipient_key(&recipient_key);
|
||||
p.set_channel_id(&[]);
|
||||
p.set_version(1);
|
||||
p.set_limit(0); // fetch all
|
||||
let mut auth = p.reborrow().init_auth();
|
||||
set_auth(&mut auth)?;
|
||||
}
|
||||
|
||||
let resp = req.send().promise.await.context("fetch RPC failed")?;
|
||||
|
||||
let list = resp
|
||||
.get()
|
||||
.context("fetch: bad response")?
|
||||
.get_payloads()
|
||||
.context("fetch: missing payloads")?;
|
||||
|
||||
let mut payloads = Vec::with_capacity(list.len() as usize);
|
||||
for i in 0..list.len() {
|
||||
let entry = list.get(i);
|
||||
let seq = entry.get_seq();
|
||||
let data = entry
|
||||
.get_data()
|
||||
.context("fetch: envelope data read failed")?
|
||||
.to_vec();
|
||||
payloads.push((seq, data));
|
||||
}
|
||||
|
||||
Ok(payloads)
|
||||
}
|
||||
},
|
||||
DEFAULT_MAX_RETRIES,
|
||||
DEFAULT_BASE_DELAY_MS,
|
||||
anyhow_is_retriable,
|
||||
)
|
||||
.await
|
||||
}
|
||||
|
||||
/// Long-poll for payloads with optional timeout (ms).
|
||||
/// Returns `(seq, payload)` pairs — sort by `seq` before MLS processing.
|
||||
/// Retries on transient failures with exponential backoff.
|
||||
pub async fn fetch_wait(
|
||||
client: &node_service::Client,
|
||||
recipient_key: &[u8],
|
||||
timeout_ms: u64,
|
||||
) -> anyhow::Result<Vec<(u64, Vec<u8>)>> {
|
||||
let client = client.clone();
|
||||
let recipient_key = recipient_key.to_vec();
|
||||
retry_async(
|
||||
|| {
|
||||
let client = client.clone();
|
||||
let recipient_key = recipient_key.clone();
|
||||
async move {
|
||||
let mut req = client.fetch_wait_request();
|
||||
{
|
||||
let mut p = req.get();
|
||||
p.set_recipient_key(&recipient_key);
|
||||
p.set_timeout_ms(timeout_ms);
|
||||
p.set_channel_id(&[]);
|
||||
p.set_version(1);
|
||||
p.set_limit(0); // fetch all
|
||||
let mut auth = p.reborrow().init_auth();
|
||||
set_auth(&mut auth)?;
|
||||
}
|
||||
|
||||
let resp = req.send().promise.await.context("fetch_wait RPC failed")?;
|
||||
|
||||
let list = resp
|
||||
.get()
|
||||
.context("fetch_wait: bad response")?
|
||||
.get_payloads()
|
||||
.context("fetch_wait: missing payloads")?;
|
||||
|
||||
let mut payloads = Vec::with_capacity(list.len() as usize);
|
||||
for i in 0..list.len() {
|
||||
let entry = list.get(i);
|
||||
let seq = entry.get_seq();
|
||||
let data = entry
|
||||
.get_data()
|
||||
.context("fetch_wait: envelope data read failed")?
|
||||
.to_vec();
|
||||
payloads.push((seq, data));
|
||||
}
|
||||
|
||||
Ok(payloads)
|
||||
}
|
||||
},
|
||||
DEFAULT_MAX_RETRIES,
|
||||
DEFAULT_BASE_DELAY_MS,
|
||||
anyhow_is_retriable,
|
||||
)
|
||||
.await
|
||||
}
|
||||
|
||||
/// Upload a hybrid (X25519 + ML-KEM-768) public key for an identity.
|
||||
pub async fn upload_hybrid_key(
|
||||
client: &node_service::Client,
|
||||
identity_key: &[u8],
|
||||
hybrid_pk: &HybridPublicKey,
|
||||
) -> anyhow::Result<()> {
|
||||
let mut req = client.upload_hybrid_key_request();
|
||||
{
|
||||
let mut p = req.get();
|
||||
p.set_identity_key(identity_key);
|
||||
p.set_hybrid_public_key(&hybrid_pk.to_bytes());
|
||||
let mut auth = p.reborrow().init_auth();
|
||||
set_auth(&mut auth)?;
|
||||
}
|
||||
req.send()
|
||||
.promise
|
||||
.await
|
||||
.context("upload_hybrid_key RPC failed")?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Fetch a peer's hybrid public key from the server.
|
||||
///
|
||||
/// Returns `None` if the peer has not uploaded a hybrid key.
|
||||
pub async fn fetch_hybrid_key(
|
||||
client: &node_service::Client,
|
||||
identity_key: &[u8],
|
||||
) -> anyhow::Result<Option<HybridPublicKey>> {
|
||||
let mut req = client.fetch_hybrid_key_request();
|
||||
{
|
||||
let mut p = req.get();
|
||||
p.set_identity_key(identity_key);
|
||||
let mut auth = p.reborrow().init_auth();
|
||||
set_auth(&mut auth)?;
|
||||
}
|
||||
|
||||
let resp = req
|
||||
.send()
|
||||
.promise
|
||||
.await
|
||||
.context("fetch_hybrid_key RPC failed")?;
|
||||
|
||||
let pk_bytes = resp
|
||||
.get()
|
||||
.context("fetch_hybrid_key: bad response")?
|
||||
.get_hybrid_public_key()
|
||||
.context("fetch_hybrid_key: missing field")?
|
||||
.to_vec();
|
||||
|
||||
if pk_bytes.is_empty() {
|
||||
return Ok(None);
|
||||
}
|
||||
|
||||
let pk = HybridPublicKey::from_bytes(&pk_bytes).context("invalid hybrid public key")?;
|
||||
Ok(Some(pk))
|
||||
}
|
||||
|
||||
/// Decrypt a hybrid envelope. Requires a hybrid key; no fallback to plaintext MLS.
|
||||
pub fn try_hybrid_decrypt(
|
||||
hybrid_kp: Option<&quicprochat_core::HybridKeypair>,
|
||||
payload: &[u8],
|
||||
) -> anyhow::Result<Vec<u8>> {
|
||||
let kp = hybrid_kp.ok_or_else(|| anyhow::anyhow!("hybrid key required for decryption"))?;
|
||||
quicprochat_core::hybrid_decrypt(kp, payload, b"", b"").map_err(|e| anyhow::anyhow!("{e}"))
|
||||
}
|
||||
|
||||
/// Peek at queued payloads without removing them.
|
||||
/// Returns `(seq, payload)` pairs sorted by seq.
|
||||
/// Retries on transient failures with exponential backoff.
|
||||
pub async fn peek(
|
||||
client: &node_service::Client,
|
||||
recipient_key: &[u8],
|
||||
) -> anyhow::Result<Vec<(u64, Vec<u8>)>> {
|
||||
let client = client.clone();
|
||||
let recipient_key = recipient_key.to_vec();
|
||||
retry_async(
|
||||
|| {
|
||||
let client = client.clone();
|
||||
let recipient_key = recipient_key.clone();
|
||||
async move {
|
||||
let mut req = client.peek_request();
|
||||
{
|
||||
let mut p = req.get();
|
||||
p.set_recipient_key(&recipient_key);
|
||||
p.set_channel_id(&[]);
|
||||
p.set_version(1);
|
||||
p.set_limit(0); // peek all
|
||||
let mut auth = p.reborrow().init_auth();
|
||||
set_auth(&mut auth)?;
|
||||
}
|
||||
|
||||
let resp = req.send().promise.await.context("peek RPC failed")?;
|
||||
|
||||
let list = resp
|
||||
.get()
|
||||
.context("peek: bad response")?
|
||||
.get_payloads()
|
||||
.context("peek: missing payloads")?;
|
||||
|
||||
let mut payloads = Vec::with_capacity(list.len() as usize);
|
||||
for i in 0..list.len() {
|
||||
let entry = list.get(i);
|
||||
let seq = entry.get_seq();
|
||||
let data = entry
|
||||
.get_data()
|
||||
.context("peek: envelope data read failed")?
|
||||
.to_vec();
|
||||
payloads.push((seq, data));
|
||||
}
|
||||
|
||||
Ok(payloads)
|
||||
}
|
||||
},
|
||||
DEFAULT_MAX_RETRIES,
|
||||
DEFAULT_BASE_DELAY_MS,
|
||||
anyhow_is_retriable,
|
||||
)
|
||||
.await
|
||||
}
|
||||
|
||||
/// Acknowledge all messages up to and including `seq_up_to`.
|
||||
/// Retries on transient failures with exponential backoff.
|
||||
pub async fn ack(
|
||||
client: &node_service::Client,
|
||||
recipient_key: &[u8],
|
||||
seq_up_to: u64,
|
||||
) -> anyhow::Result<()> {
|
||||
let client = client.clone();
|
||||
let recipient_key = recipient_key.to_vec();
|
||||
retry_async(
|
||||
|| {
|
||||
let client = client.clone();
|
||||
let recipient_key = recipient_key.clone();
|
||||
async move {
|
||||
let mut req = client.ack_request();
|
||||
{
|
||||
let mut p = req.get();
|
||||
p.set_recipient_key(&recipient_key);
|
||||
p.set_channel_id(&[]);
|
||||
p.set_version(1);
|
||||
p.set_seq_up_to(seq_up_to);
|
||||
let mut auth = p.reborrow().init_auth();
|
||||
set_auth(&mut auth)?;
|
||||
}
|
||||
req.send().promise.await.context("ack RPC failed")?;
|
||||
Ok(())
|
||||
}
|
||||
},
|
||||
DEFAULT_MAX_RETRIES,
|
||||
DEFAULT_BASE_DELAY_MS,
|
||||
anyhow_is_retriable,
|
||||
)
|
||||
.await
|
||||
}
|
||||
|
||||
/// Fetch multiple peers' hybrid keys in a single round-trip.
|
||||
/// Returns `None` for peers who have not uploaded a hybrid key.
|
||||
/// Retries on transient failures with exponential backoff.
|
||||
pub async fn fetch_hybrid_keys(
|
||||
client: &node_service::Client,
|
||||
identity_keys: &[&[u8]],
|
||||
) -> anyhow::Result<Vec<Option<HybridPublicKey>>> {
|
||||
let client = client.clone();
|
||||
let identity_keys: Vec<Vec<u8>> = identity_keys.iter().map(|k| k.to_vec()).collect();
|
||||
retry_async(
|
||||
|| {
|
||||
let client = client.clone();
|
||||
let identity_keys = identity_keys.clone();
|
||||
async move {
|
||||
let mut req = client.fetch_hybrid_keys_request();
|
||||
{
|
||||
let mut p = req.get();
|
||||
let mut list = p.reborrow().init_identity_keys(identity_keys.len() as u32);
|
||||
for (i, ik) in identity_keys.iter().enumerate() {
|
||||
list.set(i as u32, ik);
|
||||
}
|
||||
let mut auth = p.reborrow().init_auth();
|
||||
set_auth(&mut auth)?;
|
||||
}
|
||||
|
||||
let resp = req
|
||||
.send()
|
||||
.promise
|
||||
.await
|
||||
.context("fetch_hybrid_keys RPC failed")?;
|
||||
|
||||
let keys = resp
|
||||
.get()
|
||||
.context("fetch_hybrid_keys: bad response")?
|
||||
.get_keys()
|
||||
.context("fetch_hybrid_keys: missing keys")?;
|
||||
|
||||
let mut result = Vec::with_capacity(keys.len() as usize);
|
||||
for i in 0..keys.len() {
|
||||
let pk_bytes = keys
|
||||
.get(i)
|
||||
.context("fetch_hybrid_keys: key read failed")?
|
||||
.to_vec();
|
||||
if pk_bytes.is_empty() {
|
||||
result.push(None);
|
||||
} else {
|
||||
let pk = HybridPublicKey::from_bytes(&pk_bytes)
|
||||
.context("invalid hybrid public key")?;
|
||||
result.push(Some(pk));
|
||||
}
|
||||
}
|
||||
|
||||
Ok(result)
|
||||
}
|
||||
},
|
||||
DEFAULT_MAX_RETRIES,
|
||||
DEFAULT_BASE_DELAY_MS,
|
||||
anyhow_is_retriable,
|
||||
)
|
||||
.await
|
||||
}
|
||||
|
||||
/// Enqueue the same payload to multiple recipients in a single round-trip.
|
||||
/// Returns per-recipient sequence numbers.
|
||||
/// Retries on transient failures with exponential backoff.
|
||||
pub async fn batch_enqueue(
|
||||
client: &node_service::Client,
|
||||
recipient_keys: &[&[u8]],
|
||||
payload: &[u8],
|
||||
) -> anyhow::Result<Vec<u64>> {
|
||||
let client = client.clone();
|
||||
let recipient_keys: Vec<Vec<u8>> = recipient_keys.iter().map(|k| k.to_vec()).collect();
|
||||
let payload = payload.to_vec();
|
||||
retry_async(
|
||||
|| {
|
||||
let client = client.clone();
|
||||
let recipient_keys = recipient_keys.clone();
|
||||
let payload = payload.clone();
|
||||
async move {
|
||||
let mut req = client.batch_enqueue_request();
|
||||
{
|
||||
let mut p = req.get();
|
||||
let mut list = p.reborrow().init_recipient_keys(recipient_keys.len() as u32);
|
||||
for (i, rk) in recipient_keys.iter().enumerate() {
|
||||
list.set(i as u32, rk);
|
||||
}
|
||||
p.set_payload(&payload);
|
||||
p.set_channel_id(&[]);
|
||||
p.set_version(1);
|
||||
let mut auth = p.reborrow().init_auth();
|
||||
set_auth(&mut auth)?;
|
||||
}
|
||||
|
||||
let resp = req
|
||||
.send()
|
||||
.promise
|
||||
.await
|
||||
.context("batch_enqueue RPC failed")?;
|
||||
|
||||
let seqs = resp
|
||||
.get()
|
||||
.context("batch_enqueue: bad response")?
|
||||
.get_seqs()
|
||||
.context("batch_enqueue: missing seqs")?;
|
||||
|
||||
let mut result = Vec::with_capacity(seqs.len() as usize);
|
||||
for i in 0..seqs.len() {
|
||||
result.push(seqs.get(i));
|
||||
}
|
||||
|
||||
Ok(result)
|
||||
}
|
||||
},
|
||||
DEFAULT_MAX_RETRIES,
|
||||
DEFAULT_BASE_DELAY_MS,
|
||||
anyhow_is_retriable,
|
||||
)
|
||||
.await
|
||||
}
|
||||
|
||||
/// Resolve a username to its Ed25519 identity key (32 bytes).
|
||||
///
|
||||
/// When the server returns a non-empty `inclusionProof`, the client verifies it
|
||||
/// against the identity key using the Key Transparency Merkle proof. Proof
|
||||
/// verification failure is treated as a hard error (the server is misbehaving).
|
||||
/// If the server sends no proof (empty field), the key is returned as-is —
|
||||
/// callers can decide whether to require proofs for security-critical flows.
|
||||
///
|
||||
/// Returns `None` if the username is not registered.
|
||||
pub async fn resolve_user(
|
||||
client: &node_service::Client,
|
||||
username: &str,
|
||||
) -> anyhow::Result<Option<Vec<u8>>> {
|
||||
let mut req = client.resolve_user_request();
|
||||
{
|
||||
let mut p = req.get();
|
||||
p.set_username(username);
|
||||
let mut auth = p.reborrow().init_auth();
|
||||
set_auth(&mut auth)?;
|
||||
}
|
||||
|
||||
let resp = req
|
||||
.send()
|
||||
.promise
|
||||
.await
|
||||
.context("resolve_user RPC failed")?;
|
||||
|
||||
let reader = resp.get().context("resolve_user: bad response")?;
|
||||
|
||||
let key = reader
|
||||
.get_identity_key()
|
||||
.context("resolve_user: missing identity_key field")?
|
||||
.to_vec();
|
||||
|
||||
if key.is_empty() {
|
||||
return Ok(None);
|
||||
}
|
||||
|
||||
// Verify the KT inclusion proof when the server sends one.
|
||||
let proof_bytes = reader
|
||||
.get_inclusion_proof()
|
||||
.context("resolve_user: missing inclusion_proof field")?
|
||||
.to_vec();
|
||||
|
||||
if !proof_bytes.is_empty() {
|
||||
let proof = quicprochat_kt::InclusionProof::from_bytes(&proof_bytes)
|
||||
.context("resolve_user: inclusion proof deserialise failed")?;
|
||||
quicprochat_kt::verify_inclusion(&proof, username, &key)
|
||||
.context("resolve_user: KT inclusion proof verification FAILED — possible key mislabelling")?;
|
||||
}
|
||||
|
||||
Ok(Some(key))
|
||||
}
|
||||
|
||||
/// Reverse lookup: resolve an identity key to the registered username.
|
||||
/// Returns `None` if no username is associated with the key.
|
||||
pub async fn resolve_identity(
|
||||
client: &node_service::Client,
|
||||
identity_key: &[u8],
|
||||
) -> anyhow::Result<Option<String>> {
|
||||
let mut req = client.resolve_identity_request();
|
||||
{
|
||||
let mut p = req.get();
|
||||
p.set_identity_key(identity_key);
|
||||
let mut auth = p.reborrow().init_auth();
|
||||
set_auth(&mut auth)?;
|
||||
}
|
||||
|
||||
let resp = req
|
||||
.send()
|
||||
.promise
|
||||
.await
|
||||
.context("resolve_identity RPC failed")?;
|
||||
|
||||
let username = resp
|
||||
.get()
|
||||
.context("resolve_identity: bad response")?
|
||||
.get_username()
|
||||
.context("resolve_identity: missing field")?
|
||||
.to_str()
|
||||
.unwrap_or("")
|
||||
.to_string();
|
||||
|
||||
if username.is_empty() {
|
||||
Ok(None)
|
||||
} else {
|
||||
Ok(Some(username))
|
||||
}
|
||||
}
|
||||
|
||||
/// Create a 1:1 DM channel with a peer.
|
||||
///
|
||||
/// Returns `(channel_id, was_new)` where `channel_id` is the stable 16-byte identifier and
|
||||
/// `was_new` is `true` iff this call created the channel for the first time. When `was_new` is
|
||||
/// `false`, the channel already existed (created by the peer), and the caller should wait for
|
||||
/// the peer's MLS Welcome to arrive via the background poller rather than creating a new MLS group.
|
||||
pub async fn create_channel(
|
||||
client: &node_service::Client,
|
||||
peer_key: &[u8],
|
||||
) -> anyhow::Result<(Vec<u8>, bool)> {
|
||||
let mut req = client.create_channel_request();
|
||||
{
|
||||
let mut p = req.get();
|
||||
p.set_peer_key(peer_key);
|
||||
let mut auth = p.reborrow().init_auth();
|
||||
set_auth(&mut auth)?;
|
||||
}
|
||||
|
||||
let resp = req
|
||||
.send()
|
||||
.promise
|
||||
.await
|
||||
.context("create_channel RPC failed")?;
|
||||
|
||||
let reader = resp.get().context("create_channel: bad response")?;
|
||||
let channel_id = reader
|
||||
.get_channel_id()
|
||||
.context("create_channel: missing channel_id")?
|
||||
.to_vec();
|
||||
let was_new = reader.get_was_new();
|
||||
|
||||
Ok((channel_id, was_new))
|
||||
}
|
||||
|
||||
/// Upload a single chunk of a blob to the server.
|
||||
///
|
||||
/// `blob_hash` is the expected SHA-256 hash (32 bytes) of the complete blob.
|
||||
/// Returns the `blob_id` once the server has received and verified the final chunk.
|
||||
pub async fn upload_blob_chunk(
|
||||
client: &node_service::Client,
|
||||
blob_hash: &[u8],
|
||||
chunk: &[u8],
|
||||
offset: u64,
|
||||
total_size: u64,
|
||||
mime_type: &str,
|
||||
) -> anyhow::Result<Vec<u8>> {
|
||||
let mut req = client.upload_blob_request();
|
||||
{
|
||||
let mut p = req.get();
|
||||
let mut auth = p.reborrow().init_auth();
|
||||
set_auth(&mut auth)?;
|
||||
p.set_blob_hash(blob_hash);
|
||||
p.set_chunk(chunk);
|
||||
p.set_offset(offset);
|
||||
p.set_total_size(total_size);
|
||||
p.set_mime_type(mime_type);
|
||||
}
|
||||
let resp = req.send().promise.await.context("upload_blob RPC failed")?;
|
||||
let blob_id = resp
|
||||
.get()
|
||||
.context("upload_blob: bad response")?
|
||||
.get_blob_id()
|
||||
.context("upload_blob: missing blob_id")?
|
||||
.to_vec();
|
||||
Ok(blob_id)
|
||||
}
|
||||
|
||||
/// Download a single chunk of a blob from the server.
|
||||
///
|
||||
/// Returns `(chunk_bytes, total_size, mime_type)`.
|
||||
pub async fn download_blob_chunk(
|
||||
client: &node_service::Client,
|
||||
blob_id: &[u8],
|
||||
offset: u64,
|
||||
length: u32,
|
||||
) -> anyhow::Result<(Vec<u8>, u64, String)> {
|
||||
let mut req = client.download_blob_request();
|
||||
{
|
||||
let mut p = req.get();
|
||||
let mut auth = p.reborrow().init_auth();
|
||||
set_auth(&mut auth)?;
|
||||
p.set_blob_id(blob_id);
|
||||
p.set_offset(offset);
|
||||
p.set_length(length);
|
||||
}
|
||||
let resp = req.send().promise.await.context("download_blob RPC failed")?;
|
||||
let reader = resp.get().context("download_blob: bad response")?;
|
||||
let chunk = reader.get_chunk().context("download_blob: missing chunk")?.to_vec();
|
||||
let total_size = reader.get_total_size();
|
||||
let mime_type = reader
|
||||
.get_mime_type()
|
||||
.context("download_blob: missing mime_type")?
|
||||
.to_str()
|
||||
.unwrap_or("application/octet-stream")
|
||||
.to_string();
|
||||
Ok((chunk, total_size, mime_type))
|
||||
}
|
||||
|
||||
/// Delete the authenticated user's account on the server.
|
||||
/// Requires an identity-bound session (OPAQUE login).
|
||||
pub async fn delete_account(
|
||||
client: &node_service::Client,
|
||||
) -> anyhow::Result<bool> {
|
||||
let mut req = client.delete_account_request();
|
||||
{
|
||||
let mut p = req.get();
|
||||
let mut auth = p.reborrow().init_auth();
|
||||
set_auth(&mut auth)?;
|
||||
}
|
||||
|
||||
let resp = req
|
||||
.send()
|
||||
.promise
|
||||
.await
|
||||
.context("delete_account RPC failed")?;
|
||||
|
||||
let success = resp
|
||||
.get()
|
||||
.context("delete_account: bad response")?
|
||||
.get_success();
|
||||
|
||||
Ok(success)
|
||||
}
|
||||
|
||||
/// Register a device for the authenticated identity.
|
||||
pub async fn register_device(
|
||||
client: &node_service::Client,
|
||||
device_id: &[u8],
|
||||
device_name: &str,
|
||||
) -> anyhow::Result<bool> {
|
||||
let mut req = client.register_device_request();
|
||||
{
|
||||
let mut p = req.get();
|
||||
p.set_device_id(device_id);
|
||||
p.set_device_name(device_name);
|
||||
let mut auth = p.reborrow().init_auth();
|
||||
set_auth(&mut auth)?;
|
||||
}
|
||||
|
||||
let resp = req
|
||||
.send()
|
||||
.promise
|
||||
.await
|
||||
.context("register_device RPC failed")?;
|
||||
|
||||
let success = resp
|
||||
.get()
|
||||
.context("register_device: bad response")?
|
||||
.get_success();
|
||||
|
||||
Ok(success)
|
||||
}
|
||||
|
||||
/// List all registered devices for the authenticated identity.
|
||||
pub async fn list_devices(
|
||||
client: &node_service::Client,
|
||||
) -> anyhow::Result<Vec<(Vec<u8>, String, u64)>> {
|
||||
let mut req = client.list_devices_request();
|
||||
{
|
||||
let mut p = req.get();
|
||||
let mut auth = p.reborrow().init_auth();
|
||||
set_auth(&mut auth)?;
|
||||
}
|
||||
|
||||
let resp = req
|
||||
.send()
|
||||
.promise
|
||||
.await
|
||||
.context("list_devices RPC failed")?;
|
||||
|
||||
let devices = resp
|
||||
.get()
|
||||
.context("list_devices: bad response")?
|
||||
.get_devices()
|
||||
.context("list_devices: missing devices field")?;
|
||||
|
||||
let mut result = Vec::with_capacity(devices.len() as usize);
|
||||
for i in 0..devices.len() {
|
||||
let entry = devices.get(i);
|
||||
let device_id = entry
|
||||
.get_device_id()
|
||||
.context("list_devices: missing device_id")?
|
||||
.to_vec();
|
||||
let device_name = entry
|
||||
.get_device_name()
|
||||
.context("list_devices: missing device_name")?
|
||||
.to_str()
|
||||
.unwrap_or("")
|
||||
.to_string();
|
||||
let registered_at = entry.get_registered_at();
|
||||
result.push((device_id, device_name, registered_at));
|
||||
}
|
||||
|
||||
Ok(result)
|
||||
}
|
||||
|
||||
/// Revoke (remove) a registered device.
|
||||
pub async fn revoke_device(
|
||||
client: &node_service::Client,
|
||||
device_id: &[u8],
|
||||
) -> anyhow::Result<bool> {
|
||||
let mut req = client.revoke_device_request();
|
||||
{
|
||||
let mut p = req.get();
|
||||
p.set_device_id(device_id);
|
||||
let mut auth = p.reborrow().init_auth();
|
||||
set_auth(&mut auth)?;
|
||||
}
|
||||
|
||||
let resp = req
|
||||
.send()
|
||||
.promise
|
||||
.await
|
||||
.context("revoke_device RPC failed")?;
|
||||
|
||||
let success = resp
|
||||
.get()
|
||||
.context("revoke_device: bad response")?
|
||||
.get_success();
|
||||
|
||||
Ok(success)
|
||||
}
|
||||
|
||||
/// Return the current Unix timestamp in milliseconds.
|
||||
pub fn current_timestamp_ms() -> u64 {
|
||||
std::time::SystemTime::now()
|
||||
.duration_since(std::time::UNIX_EPOCH)
|
||||
.unwrap_or_default()
|
||||
.as_millis() as u64
|
||||
}
|
||||
305
crates/quicprochat-client/src/client/session.rs
Normal file
305
crates/quicprochat-client/src/client/session.rs
Normal file
@@ -0,0 +1,305 @@
|
||||
//! Runtime session state for the interactive REPL.
|
||||
//!
|
||||
//! Wraps the legacy `StoredState` (identity + hybrid key) and adds
|
||||
//! multi-conversation management via `ConversationStore`.
|
||||
|
||||
use std::collections::HashMap;
|
||||
use std::path::{Path, PathBuf};
|
||||
use std::sync::Arc;
|
||||
use std::time::Instant;
|
||||
|
||||
use anyhow::Context;
|
||||
use zeroize::Zeroizing;
|
||||
|
||||
use quicprochat_core::{DiskKeyStore, GroupMember, HybridKeypair, IdentityKeypair};
|
||||
|
||||
use super::conversation::{
|
||||
now_ms, Conversation, ConversationId, ConversationKind, ConversationStore,
|
||||
};
|
||||
use super::state::load_or_init_state;
|
||||
|
||||
/// Runtime state for an interactive REPL session.
|
||||
pub struct SessionState {
|
||||
/// Long-term identity keypair.
|
||||
pub identity: Arc<IdentityKeypair>,
|
||||
/// Post-quantum hybrid keypair.
|
||||
pub hybrid_kp: Option<HybridKeypair>,
|
||||
/// Path to the legacy state file (for backward compat with one-shot commands).
|
||||
pub state_path: PathBuf,
|
||||
/// Optional password for the legacy state file. Zeroized on drop. (M9)
|
||||
pub password: Option<Zeroizing<String>>,
|
||||
/// SQLite-backed conversation + message store.
|
||||
pub conv_store: ConversationStore,
|
||||
/// Currently active conversation.
|
||||
pub active_conversation: Option<ConversationId>,
|
||||
/// In-memory GroupMember instances keyed by conversation ID.
|
||||
pub members: HashMap<ConversationId, GroupMember>,
|
||||
/// Holds the GroupMember whose KeyPackage was uploaded to the server.
|
||||
/// Its keystore contains the HPKE init private key needed to decrypt
|
||||
/// incoming Welcome messages. Consumed on auto-join, then replenished.
|
||||
pub pending_member: Option<GroupMember>,
|
||||
/// Whether to display typing indicators from others (session preference).
|
||||
pub typing_notify_enabled: bool,
|
||||
/// Tracks who is currently typing and when the indicator was last received.
|
||||
/// Entries older than 10 seconds are considered expired.
|
||||
pub typing_indicators: HashMap<String, Instant>,
|
||||
/// Per-conversation disappearing message TTL in seconds. None = messages persist.
|
||||
pub disappear_ttl: HashMap<ConversationId, u32>,
|
||||
/// When true, /members and /group-info redact identity keys as `[redacted-XXXX]`.
|
||||
pub redact_keys: bool,
|
||||
/// When Some(secs), auto-clear local messages older than this duration.
|
||||
pub auto_clear_secs: Option<u32>,
|
||||
/// When true, send periodic dummy messages for traffic analysis resistance.
|
||||
pub padding_enabled: bool,
|
||||
/// Last epoch at which we sent a message (for /verify-fs).
|
||||
pub last_send_epoch: Option<u64>,
|
||||
/// P2P node for direct mesh messaging (requires `--features mesh`).
|
||||
#[cfg(feature = "mesh")]
|
||||
pub p2p_node: Option<Arc<quicprochat_p2p::P2pNode>>,
|
||||
}
|
||||
|
||||
impl SessionState {
|
||||
/// Load identity from the legacy state file, open the conversation store,
|
||||
/// and migrate any existing single-group state into the conversation DB.
|
||||
pub fn load(
|
||||
state_path: &Path,
|
||||
password: Option<&str>,
|
||||
) -> anyhow::Result<Self> {
|
||||
let state = load_or_init_state(state_path, password)?;
|
||||
|
||||
let identity = Arc::new(IdentityKeypair::from_seed(state.identity_seed));
|
||||
let hybrid_kp = state
|
||||
.hybrid_key
|
||||
.as_ref()
|
||||
.map(HybridKeypair::from_bytes)
|
||||
.transpose()
|
||||
.context("decode hybrid key")?;
|
||||
|
||||
// Open the conversation DB next to the state file.
|
||||
// When a state password is provided, encrypt the DB with SQLCipher.
|
||||
let db_path = state_path.with_extension("convdb");
|
||||
let conv_store = ConversationStore::open(&db_path, password)?;
|
||||
|
||||
let mut session = Self {
|
||||
identity,
|
||||
hybrid_kp,
|
||||
state_path: state_path.to_path_buf(),
|
||||
password: password.map(|p| Zeroizing::new(String::from(p))),
|
||||
conv_store,
|
||||
active_conversation: None,
|
||||
members: HashMap::new(),
|
||||
pending_member: None,
|
||||
typing_notify_enabled: true,
|
||||
typing_indicators: HashMap::new(),
|
||||
disappear_ttl: HashMap::new(),
|
||||
redact_keys: false,
|
||||
auto_clear_secs: None,
|
||||
padding_enabled: false,
|
||||
last_send_epoch: None,
|
||||
#[cfg(feature = "mesh")]
|
||||
p2p_node: None,
|
||||
};
|
||||
|
||||
// Migrate legacy single-group into conversations if present and not yet migrated.
|
||||
if state.group.is_some() {
|
||||
session.migrate_legacy_group(state_path, &state.group)?;
|
||||
}
|
||||
|
||||
// Load all existing conversations' GroupMembers into memory.
|
||||
session.load_all_members()?;
|
||||
|
||||
Ok(session)
|
||||
}
|
||||
|
||||
/// Migrate the legacy single-group from StoredState into the conversation DB.
|
||||
fn migrate_legacy_group(
|
||||
&mut self,
|
||||
_state_path: &Path,
|
||||
group_blob: &Option<Vec<u8>>,
|
||||
) -> anyhow::Result<()> {
|
||||
let blob = match group_blob {
|
||||
Some(b) => b,
|
||||
None => return Ok(()),
|
||||
};
|
||||
|
||||
// Legacy group blobs used openmls 0.5 serde format. After the 0.8
|
||||
// upgrade the blob format changed to storage-provider state. Attempt
|
||||
// to load from the new format; if that fails, skip the legacy group.
|
||||
let group_id_guess = &blob[..blob.len().min(16)];
|
||||
let member = match GroupMember::new_from_storage_bytes(
|
||||
Arc::clone(&self.identity),
|
||||
blob,
|
||||
group_id_guess,
|
||||
false, // legacy groups are classical
|
||||
) {
|
||||
Ok(m) => m,
|
||||
Err(e) => {
|
||||
tracing::warn!(error = %e, "skipping incompatible legacy group blob (openmls version mismatch)");
|
||||
return Ok(());
|
||||
}
|
||||
};
|
||||
|
||||
let group_id_bytes = member.group_id().unwrap_or_default();
|
||||
|
||||
// Use the first 16 bytes of the group_id as the ConversationId.
|
||||
let conv_id = if group_id_bytes.len() >= 16 {
|
||||
ConversationId::from_slice(&group_id_bytes[..16])
|
||||
.unwrap_or(ConversationId([0; 16]))
|
||||
} else {
|
||||
ConversationId::from_group_name(&hex::encode(&group_id_bytes))
|
||||
};
|
||||
|
||||
// Check if already migrated.
|
||||
if self.conv_store.load_conversation(&conv_id)?.is_some() {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
let member_keys = member.member_identities();
|
||||
let short_id = &hex::encode(&group_id_bytes)[..8.min(group_id_bytes.len() * 2)];
|
||||
let conv = Conversation {
|
||||
id: conv_id.clone(),
|
||||
kind: ConversationKind::Group {
|
||||
name: format!("legacy-{short_id}"),
|
||||
},
|
||||
display_name: format!("legacy-{short_id}"),
|
||||
mls_group_blob: Some(blob.clone()),
|
||||
keystore_blob: None,
|
||||
member_keys,
|
||||
unread_count: 0,
|
||||
last_activity_ms: now_ms(),
|
||||
created_at_ms: now_ms(),
|
||||
is_hybrid: false,
|
||||
last_seen_seq: 0,
|
||||
};
|
||||
|
||||
self.conv_store.save_conversation(&conv)?;
|
||||
self.members.insert(conv_id, member);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Load all conversations from the DB and create in-memory GroupMember instances.
|
||||
fn load_all_members(&mut self) -> anyhow::Result<()> {
|
||||
let convs = self.conv_store.list_conversations()?;
|
||||
for conv in convs {
|
||||
if self.members.contains_key(&conv.id) {
|
||||
continue;
|
||||
}
|
||||
let member = self.create_member_from_conv(&conv)?;
|
||||
self.members.insert(conv.id.clone(), member);
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Create a GroupMember from a stored conversation.
|
||||
fn create_member_from_conv(&self, conv: &Conversation) -> anyhow::Result<GroupMember> {
|
||||
if let Some(blob) = conv.mls_group_blob.as_ref() {
|
||||
let group_id = conv.id.0.as_slice();
|
||||
let member = GroupMember::new_from_storage_bytes(
|
||||
Arc::clone(&self.identity),
|
||||
blob,
|
||||
group_id,
|
||||
conv.is_hybrid,
|
||||
)
|
||||
.context("restore MLS state from conversation db")?;
|
||||
Ok(member)
|
||||
} else {
|
||||
// No MLS state — create an empty member.
|
||||
let ks_path = self.keystore_path_for(&conv.id);
|
||||
let ks = DiskKeyStore::persistent(&ks_path)
|
||||
.unwrap_or_else(|e| {
|
||||
tracing::warn!(path = %ks_path.display(), error = %e, "DiskKeyStore open failed, falling back to ephemeral");
|
||||
DiskKeyStore::ephemeral()
|
||||
});
|
||||
Ok(GroupMember::new_with_state(
|
||||
Arc::clone(&self.identity),
|
||||
ks,
|
||||
None,
|
||||
conv.is_hybrid,
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
/// Path for a per-conversation keystore file.
|
||||
fn keystore_path_for(&self, conv_id: &ConversationId) -> PathBuf {
|
||||
let dir = self.state_path.with_extension("keystores");
|
||||
dir.join(format!("{}.ks", conv_id.hex()))
|
||||
}
|
||||
|
||||
/// Persist a conversation's MLS group state back to the DB.
|
||||
pub fn save_member(&self, conv_id: &ConversationId) -> anyhow::Result<()> {
|
||||
let member = self.members.get(conv_id).context("no such conversation")?;
|
||||
let blob = member
|
||||
.serialize_mls_state()
|
||||
.context("serialize MLS state")?;
|
||||
|
||||
let member_keys = member.member_identities();
|
||||
|
||||
// Update the mls_group_blob and member_keys in the DB.
|
||||
if let Some(mut conv) = self.conv_store.load_conversation(conv_id)? {
|
||||
conv.mls_group_blob = blob;
|
||||
conv.member_keys = member_keys;
|
||||
self.conv_store.save_conversation(&conv)?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Persist all in-memory group states back to the DB.
|
||||
pub fn save_all(&self) -> anyhow::Result<()> {
|
||||
for conv_id in self.members.keys() {
|
||||
if let Err(e) = self.save_member(conv_id) {
|
||||
tracing::warn!(conv = %conv_id.hex(), error = %e, "failed to save conversation");
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Add a new conversation and its GroupMember to the session.
|
||||
pub fn add_conversation(
|
||||
&mut self,
|
||||
conv: Conversation,
|
||||
member: GroupMember,
|
||||
) -> anyhow::Result<()> {
|
||||
// Ensure keystore directory exists
|
||||
let ks_path = self.keystore_path_for(&conv.id);
|
||||
if let Some(parent) = ks_path.parent() {
|
||||
std::fs::create_dir_all(parent).ok();
|
||||
}
|
||||
|
||||
self.conv_store.save_conversation(&conv)?;
|
||||
self.members.insert(conv.id.clone(), member);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Get a mutable reference to a conversation's GroupMember.
|
||||
pub fn get_member_mut(&mut self, conv_id: &ConversationId) -> Option<&mut GroupMember> {
|
||||
self.members.get_mut(conv_id)
|
||||
}
|
||||
|
||||
/// Public key bytes for this identity.
|
||||
pub fn identity_bytes(&self) -> Vec<u8> {
|
||||
self.identity.public_key_bytes().to_vec()
|
||||
}
|
||||
|
||||
/// Short hex prefix of the identity key for display.
|
||||
pub fn identity_short(&self) -> String {
|
||||
hex::encode(&self.identity.public_key_bytes()[..4])
|
||||
}
|
||||
|
||||
/// Get display name of a conversation.
|
||||
pub fn active_display_name(&self) -> Option<String> {
|
||||
let id = self.active_conversation.as_ref()?;
|
||||
self.conv_store.load_conversation(id).ok().flatten().map(|c| c.display_name)
|
||||
}
|
||||
|
||||
/// Count total unread across all conversations.
|
||||
pub fn total_unread(&self) -> u32 {
|
||||
self.conv_store
|
||||
.list_conversations()
|
||||
.unwrap_or_default()
|
||||
.iter()
|
||||
.map(|c| c.unread_count)
|
||||
.sum()
|
||||
}
|
||||
}
|
||||
309
crates/quicprochat-client/src/client/state.rs
Normal file
309
crates/quicprochat-client/src/client/state.rs
Normal file
@@ -0,0 +1,309 @@
|
||||
use std::path::{Path, PathBuf};
|
||||
use std::sync::Arc;
|
||||
|
||||
use anyhow::Context;
|
||||
use argon2::{Algorithm, Argon2, Params, Version};
|
||||
use chacha20poly1305::{
|
||||
aead::{Aead, KeyInit},
|
||||
ChaCha20Poly1305, Key, Nonce,
|
||||
};
|
||||
use rand::RngCore;
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use quicprochat_core::{DiskKeyStore, GroupMember, HybridKeypair, HybridKeypairBytes, IdentityKeypair};
|
||||
|
||||
/// Magic bytes for encrypted client state files.
|
||||
const STATE_MAGIC: &[u8; 4] = b"QPCE";
|
||||
const STATE_SALT_LEN: usize = 16;
|
||||
const STATE_NONCE_LEN: usize = 12;
|
||||
|
||||
#[derive(Serialize, Deserialize)]
|
||||
pub struct StoredState {
|
||||
pub identity_seed: [u8; 32],
|
||||
pub group: Option<Vec<u8>>,
|
||||
/// Post-quantum hybrid keypair (X25519 + ML-KEM-768). `None` for state created before hybrid was added.
|
||||
#[serde(default)]
|
||||
pub hybrid_key: Option<HybridKeypairBytes>,
|
||||
/// Cached member public keys for group participants.
|
||||
#[serde(default)]
|
||||
pub member_keys: Vec<Vec<u8>>,
|
||||
/// MLS group ID bytes, needed to reload the group from StorageProvider state.
|
||||
#[serde(default)]
|
||||
pub group_id: Option<Vec<u8>>,
|
||||
}
|
||||
|
||||
impl StoredState {
|
||||
pub fn into_parts(self, state_path: &Path) -> anyhow::Result<(GroupMember, Option<HybridKeypair>)> {
|
||||
let identity = Arc::new(IdentityKeypair::from_seed(self.identity_seed));
|
||||
let hybrid = self.hybrid_key.is_some();
|
||||
|
||||
let member = match (self.group.as_ref(), self.group_id.as_ref()) {
|
||||
(Some(storage_bytes), Some(gid)) => {
|
||||
GroupMember::new_from_storage_bytes(
|
||||
identity,
|
||||
storage_bytes,
|
||||
gid,
|
||||
hybrid,
|
||||
)
|
||||
.context("restore MLS state from stored state")?
|
||||
}
|
||||
_ => {
|
||||
let key_store = DiskKeyStore::persistent(keystore_path(state_path))?;
|
||||
GroupMember::new_with_state(identity, key_store, None, hybrid)
|
||||
}
|
||||
};
|
||||
|
||||
let hybrid_kp = self
|
||||
.hybrid_key
|
||||
.map(|bytes| HybridKeypair::from_bytes(&bytes).context("decode hybrid key"))
|
||||
.transpose()?;
|
||||
|
||||
Ok((member, hybrid_kp))
|
||||
}
|
||||
|
||||
pub fn from_parts(member: &GroupMember, hybrid_kp: Option<&HybridKeypair>) -> anyhow::Result<Self> {
|
||||
let group = member
|
||||
.serialize_mls_state()
|
||||
.context("serialize MLS state")?;
|
||||
|
||||
Ok(Self {
|
||||
identity_seed: *member.identity_seed(),
|
||||
group,
|
||||
hybrid_key: hybrid_kp.map(|kp| kp.to_bytes()),
|
||||
member_keys: Vec::new(),
|
||||
group_id: member.group_id(),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
/// Argon2id parameters for client state key derivation (auditable; matches argon2 crate defaults).
|
||||
/// - Memory: 19 MiB (m_cost = 19*1024 KiB)
|
||||
/// - Time: 2 iterations
|
||||
/// - Parallelism: 1 lane
|
||||
const ARGON2_STATE_M_COST: u32 = 19 * 1024;
|
||||
const ARGON2_STATE_T_COST: u32 = 2;
|
||||
const ARGON2_STATE_P_COST: u32 = 1;
|
||||
|
||||
/// Derive a 32-byte key from a password and salt using Argon2id with explicit parameters.
|
||||
fn derive_state_key(password: &str, salt: &[u8]) -> anyhow::Result<[u8; 32]> {
|
||||
let params = Params::new(ARGON2_STATE_M_COST, ARGON2_STATE_T_COST, ARGON2_STATE_P_COST, Some(32))
|
||||
.map_err(|e| anyhow::anyhow!("argon2 params: {e}"))?;
|
||||
let argon2 = Argon2::new(Algorithm::Argon2id, Version::default(), params);
|
||||
let mut key = [0u8; 32];
|
||||
argon2
|
||||
.hash_password_into(password.as_bytes(), salt, &mut key)
|
||||
.map_err(|e| anyhow::anyhow!("argon2 key derivation failed: {e}"))?;
|
||||
Ok(key)
|
||||
}
|
||||
|
||||
/// Encrypt `plaintext` with the QPCE format: magic(4) | salt(16) | nonce(12) | ciphertext.
|
||||
pub fn encrypt_state(password: &str, plaintext: &[u8]) -> anyhow::Result<Vec<u8>> {
|
||||
let mut salt = [0u8; STATE_SALT_LEN];
|
||||
rand::rngs::OsRng.fill_bytes(&mut salt);
|
||||
|
||||
let mut nonce_bytes = [0u8; STATE_NONCE_LEN];
|
||||
rand::rngs::OsRng.fill_bytes(&mut nonce_bytes);
|
||||
|
||||
let key = zeroize::Zeroizing::new(derive_state_key(password, &salt)?);
|
||||
let cipher = ChaCha20Poly1305::new(Key::from_slice(&*key));
|
||||
let nonce = Nonce::from_slice(&nonce_bytes);
|
||||
|
||||
let ciphertext = cipher
|
||||
.encrypt(nonce, plaintext)
|
||||
.map_err(|e| anyhow::anyhow!("state encryption failed: {e}"))?;
|
||||
|
||||
let mut out = Vec::with_capacity(4 + STATE_SALT_LEN + STATE_NONCE_LEN + ciphertext.len());
|
||||
out.extend_from_slice(STATE_MAGIC);
|
||||
out.extend_from_slice(&salt);
|
||||
out.extend_from_slice(&nonce_bytes);
|
||||
out.extend_from_slice(&ciphertext);
|
||||
Ok(out)
|
||||
}
|
||||
|
||||
/// Decrypt a QPCE-formatted state file.
|
||||
pub fn decrypt_state(password: &str, data: &[u8]) -> anyhow::Result<Vec<u8>> {
|
||||
let header_len = 4 + STATE_SALT_LEN + STATE_NONCE_LEN;
|
||||
anyhow::ensure!(
|
||||
data.len() > header_len,
|
||||
"encrypted state file too short ({} bytes)",
|
||||
data.len()
|
||||
);
|
||||
|
||||
let salt = &data[4..4 + STATE_SALT_LEN];
|
||||
let nonce_bytes = &data[4 + STATE_SALT_LEN..header_len];
|
||||
let ciphertext = &data[header_len..];
|
||||
|
||||
let key = zeroize::Zeroizing::new(derive_state_key(password, salt)?);
|
||||
let cipher = ChaCha20Poly1305::new(Key::from_slice(&*key));
|
||||
let nonce = Nonce::from_slice(nonce_bytes);
|
||||
|
||||
let plaintext = cipher
|
||||
.decrypt(nonce, ciphertext)
|
||||
.map_err(|_| anyhow::anyhow!("state decryption failed (wrong password?)"))?;
|
||||
|
||||
Ok(plaintext)
|
||||
}
|
||||
|
||||
/// Returns true if raw bytes begin with the QPCE magic header.
|
||||
pub fn is_encrypted_state(bytes: &[u8]) -> bool {
|
||||
bytes.len() >= 4 && &bytes[..4] == STATE_MAGIC
|
||||
}
|
||||
|
||||
pub fn load_or_init_state(path: &Path, password: Option<&str>) -> anyhow::Result<StoredState> {
|
||||
if path.exists() {
|
||||
let mut state = load_existing_state(path, password)?;
|
||||
// Generate hybrid keypair if missing (upgrade from older state).
|
||||
if state.hybrid_key.is_none() {
|
||||
state.hybrid_key = Some(HybridKeypair::generate().to_bytes());
|
||||
write_state(path, &state, password)?;
|
||||
}
|
||||
return Ok(state);
|
||||
}
|
||||
|
||||
let identity = IdentityKeypair::generate();
|
||||
let hybrid_kp = HybridKeypair::generate();
|
||||
let key_store = DiskKeyStore::persistent(keystore_path(path))?;
|
||||
let member = GroupMember::new_with_state(Arc::new(identity), key_store, None, false);
|
||||
let state = StoredState::from_parts(&member, Some(&hybrid_kp))?;
|
||||
write_state(path, &state, password)?;
|
||||
Ok(state)
|
||||
}
|
||||
|
||||
pub fn load_existing_state(path: &Path, password: Option<&str>) -> anyhow::Result<StoredState> {
|
||||
let bytes = std::fs::read(path).with_context(|| format!("read state file {path:?}"))?;
|
||||
|
||||
if is_encrypted_state(&bytes) {
|
||||
let pw = password
|
||||
.context("state file is encrypted (QPCE); a password is required to decrypt it")?;
|
||||
let plaintext = decrypt_state(pw, &bytes)?;
|
||||
bincode::deserialize(&plaintext).context("decode encrypted state")
|
||||
} else {
|
||||
bincode::deserialize(&bytes).context("decode state")
|
||||
}
|
||||
}
|
||||
|
||||
pub fn save_state(
|
||||
path: &Path,
|
||||
member: &GroupMember,
|
||||
hybrid_kp: Option<&HybridKeypair>,
|
||||
password: Option<&str>,
|
||||
) -> anyhow::Result<()> {
|
||||
let state = StoredState::from_parts(member, hybrid_kp)?;
|
||||
write_state(path, &state, password)
|
||||
}
|
||||
|
||||
pub fn write_state(path: &Path, state: &StoredState, password: Option<&str>) -> anyhow::Result<()> {
|
||||
if let Some(parent) = path.parent() {
|
||||
std::fs::create_dir_all(parent).with_context(|| format!("create dir {parent:?}"))?;
|
||||
}
|
||||
let plaintext = bincode::serialize(state).context("encode state")?;
|
||||
|
||||
let bytes = if let Some(pw) = password {
|
||||
encrypt_state(pw, &plaintext)?
|
||||
} else {
|
||||
plaintext
|
||||
};
|
||||
|
||||
let tmp = path.with_extension("tmp");
|
||||
std::fs::write(&tmp, bytes).with_context(|| format!("write state temp {tmp:?}"))?;
|
||||
std::fs::rename(&tmp, path).with_context(|| format!("rename state {tmp:?} -> {path:?}"))?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn decode_identity_key(hex_str: &str) -> anyhow::Result<Vec<u8>> {
|
||||
let bytes = super::hex::decode(hex_str)
|
||||
.map_err(|e| anyhow::anyhow!(e))
|
||||
.context("identity key must be hex")?;
|
||||
anyhow::ensure!(bytes.len() == 32, "identity key must be 32 bytes");
|
||||
Ok(bytes)
|
||||
}
|
||||
|
||||
pub fn keystore_path(state_path: &Path) -> PathBuf {
|
||||
let mut path = state_path.to_path_buf();
|
||||
path.set_extension("ks");
|
||||
path
|
||||
}
|
||||
|
||||
pub fn sha256(bytes: &[u8]) -> Vec<u8> {
|
||||
use sha2::{Digest, Sha256};
|
||||
Sha256::digest(bytes).to_vec()
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
#[allow(clippy::unwrap_used)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn encrypt_decrypt_roundtrip() {
|
||||
let plaintext = b"test state data";
|
||||
let password = "test-password";
|
||||
let encrypted = encrypt_state(password, plaintext).unwrap();
|
||||
assert!(is_encrypted_state(&encrypted));
|
||||
let decrypted = decrypt_state(password, &encrypted).unwrap();
|
||||
assert_eq!(decrypted, plaintext);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn wrong_password_fails() {
|
||||
let plaintext = b"test state data";
|
||||
let encrypted = encrypt_state("correct", plaintext).unwrap();
|
||||
assert!(decrypt_state("wrong", &encrypted).is_err());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn state_encrypt_decrypt_round_trip() {
|
||||
let state = StoredState {
|
||||
identity_seed: [42u8; 32],
|
||||
hybrid_key: None,
|
||||
group: None,
|
||||
member_keys: Vec::new(),
|
||||
group_id: None,
|
||||
};
|
||||
let password = "test-password";
|
||||
let plaintext = bincode::serialize(&state).unwrap();
|
||||
let encrypted = encrypt_state(password, &plaintext).unwrap();
|
||||
let decrypted = decrypt_state(password, &encrypted).unwrap();
|
||||
let recovered: StoredState = bincode::deserialize(&decrypted).unwrap();
|
||||
assert_eq!(recovered.identity_seed, state.identity_seed);
|
||||
assert!(recovered.hybrid_key.is_none());
|
||||
assert!(recovered.group.is_none());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn state_encrypt_decrypt_with_hybrid_key() {
|
||||
use zeroize::Zeroizing;
|
||||
let state = StoredState {
|
||||
identity_seed: [7u8; 32],
|
||||
hybrid_key: Some(HybridKeypairBytes {
|
||||
x25519_sk: Zeroizing::new([1u8; 32]),
|
||||
mlkem_dk: Zeroizing::new(vec![3u8; 2400]),
|
||||
mlkem_ek: vec![4u8; 1184],
|
||||
}),
|
||||
group: None,
|
||||
member_keys: Vec::new(),
|
||||
group_id: None,
|
||||
};
|
||||
let password = "another-password";
|
||||
let plaintext = bincode::serialize(&state).unwrap();
|
||||
let encrypted = encrypt_state(password, &plaintext).unwrap();
|
||||
let decrypted = decrypt_state(password, &encrypted).unwrap();
|
||||
let recovered: StoredState = bincode::deserialize(&decrypted).unwrap();
|
||||
assert_eq!(recovered.identity_seed, state.identity_seed);
|
||||
assert!(recovered.hybrid_key.is_some());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn state_wrong_password_fails() {
|
||||
let state = StoredState {
|
||||
identity_seed: [99u8; 32],
|
||||
hybrid_key: None,
|
||||
group: None,
|
||||
member_keys: Vec::new(),
|
||||
group_id: None,
|
||||
};
|
||||
let plaintext = bincode::serialize(&state).unwrap();
|
||||
let encrypted = encrypt_state("correct", &plaintext).unwrap();
|
||||
assert!(decrypt_state("wrong", &encrypted).is_err());
|
||||
}
|
||||
}
|
||||
179
crates/quicprochat-client/src/client/token_cache.rs
Normal file
179
crates/quicprochat-client/src/client/token_cache.rs
Normal file
@@ -0,0 +1,179 @@
|
||||
//! Cached session token stored next to the state file.
|
||||
//!
|
||||
//! File format (no password): two lines — username and hex-encoded session token.
|
||||
//! File format (with password): QPCE-encrypted version of the above.
|
||||
//! The token has a server-side 24h TTL; no client-side expiry tracking.
|
||||
|
||||
use std::path::{Path, PathBuf};
|
||||
|
||||
use anyhow::Context;
|
||||
|
||||
use super::state::{decrypt_state, encrypt_state, is_encrypted_state};
|
||||
|
||||
pub struct CachedSession {
|
||||
pub username: String,
|
||||
pub token_hex: String,
|
||||
}
|
||||
|
||||
/// Derive the session cache path: `{state_path}.session`.
|
||||
fn session_cache_path(state_path: &Path) -> PathBuf {
|
||||
state_path.with_extension("session")
|
||||
}
|
||||
|
||||
/// Parse the two-line format (username + token_hex) from plaintext bytes.
|
||||
fn parse_session_lines(text: &str) -> Option<CachedSession> {
|
||||
let mut lines = text.lines();
|
||||
let username = lines.next()?.trim().to_string();
|
||||
let token_hex = lines.next()?.trim().to_string();
|
||||
if username.is_empty() || token_hex.is_empty() {
|
||||
return None;
|
||||
}
|
||||
if hex::decode(&token_hex).is_err() {
|
||||
return None;
|
||||
}
|
||||
Some(CachedSession { username, token_hex })
|
||||
}
|
||||
|
||||
/// Load a cached session token. Returns None if file is missing or malformed.
|
||||
/// Decrypts if the file is QPCE-encrypted (requires `password`).
|
||||
pub fn load_cached_session(state_path: &Path, password: Option<&str>) -> Option<CachedSession> {
|
||||
let path = session_cache_path(state_path);
|
||||
let raw = std::fs::read(&path).ok()?;
|
||||
|
||||
if is_encrypted_state(&raw) {
|
||||
let pw = password?;
|
||||
let plaintext = decrypt_state(pw, &raw).ok()?;
|
||||
let text = String::from_utf8(plaintext).ok()?;
|
||||
parse_session_lines(&text)
|
||||
} else {
|
||||
let text = String::from_utf8(raw).ok()?;
|
||||
parse_session_lines(&text)
|
||||
}
|
||||
}
|
||||
|
||||
/// Save a session token to the cache file (mode 0o600 on Unix).
|
||||
/// Encrypts with QPCE if `password` is provided.
|
||||
pub fn save_cached_session(
|
||||
state_path: &Path,
|
||||
username: &str,
|
||||
token_hex: &str,
|
||||
password: Option<&str>,
|
||||
) -> anyhow::Result<()> {
|
||||
let path = session_cache_path(state_path);
|
||||
let contents = format!("{username}\n{token_hex}\n");
|
||||
|
||||
let bytes = match password {
|
||||
Some(pw) => encrypt_state(pw, contents.as_bytes())?,
|
||||
None => {
|
||||
#[cfg(not(unix))]
|
||||
tracing::warn!(
|
||||
"storing session token as plaintext (no password set); \
|
||||
file permissions cannot be restricted on this platform"
|
||||
);
|
||||
contents.into_bytes()
|
||||
}
|
||||
};
|
||||
|
||||
std::fs::write(&path, bytes).with_context(|| format!("write session cache {path:?}"))?;
|
||||
|
||||
#[cfg(unix)]
|
||||
{
|
||||
use std::os::unix::fs::PermissionsExt;
|
||||
let perms = std::fs::Permissions::from_mode(0o600);
|
||||
std::fs::set_permissions(&path, perms).ok();
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Remove the cached session file.
|
||||
pub fn clear_cached_session(state_path: &Path) {
|
||||
let path = session_cache_path(state_path);
|
||||
std::fs::remove_file(&path).ok();
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
#[allow(clippy::unwrap_used)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn plaintext_round_trip() {
|
||||
let dir = tempfile::tempdir().unwrap();
|
||||
let state_path = dir.path().join("state.bin");
|
||||
|
||||
let token = hex::encode(b"session-token-bytes");
|
||||
save_cached_session(&state_path, "alice", &token, None).unwrap();
|
||||
let loaded = load_cached_session(&state_path, None).unwrap();
|
||||
assert_eq!(loaded.username, "alice");
|
||||
assert_eq!(loaded.token_hex, token);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn encrypted_round_trip() {
|
||||
let dir = tempfile::tempdir().unwrap();
|
||||
let state_path = dir.path().join("state.bin");
|
||||
let password = "strong-password";
|
||||
|
||||
let token = hex::encode(b"encrypted-token");
|
||||
save_cached_session(&state_path, "bob", &token, Some(password)).unwrap();
|
||||
|
||||
// Encrypted file should start with QPCE magic
|
||||
let raw = std::fs::read(session_cache_path(&state_path)).unwrap();
|
||||
assert_eq!(&raw[..4], b"QPCE");
|
||||
|
||||
let loaded = load_cached_session(&state_path, Some(password)).unwrap();
|
||||
assert_eq!(loaded.username, "bob");
|
||||
assert_eq!(loaded.token_hex, token);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn wrong_password_returns_none() {
|
||||
let dir = tempfile::tempdir().unwrap();
|
||||
let state_path = dir.path().join("state.bin");
|
||||
|
||||
let token = hex::encode(b"secret-token");
|
||||
save_cached_session(&state_path, "carol", &token, Some("correct")).unwrap();
|
||||
let result = load_cached_session(&state_path, Some("wrong"));
|
||||
assert!(result.is_none());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn missing_file_returns_none() {
|
||||
let dir = tempfile::tempdir().unwrap();
|
||||
let state_path = dir.path().join("nonexistent.bin");
|
||||
assert!(load_cached_session(&state_path, None).is_none());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn clear_removes_file() {
|
||||
let dir = tempfile::tempdir().unwrap();
|
||||
let state_path = dir.path().join("state.bin");
|
||||
|
||||
let token = hex::encode(b"to-be-deleted");
|
||||
save_cached_session(&state_path, "dave", &token, None).unwrap();
|
||||
assert!(session_cache_path(&state_path).exists());
|
||||
|
||||
clear_cached_session(&state_path);
|
||||
assert!(!session_cache_path(&state_path).exists());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn malformed_content_returns_none() {
|
||||
let dir = tempfile::tempdir().unwrap();
|
||||
let state_path = dir.path().join("state.bin");
|
||||
let cache_path = session_cache_path(&state_path);
|
||||
|
||||
// Not valid hex on second line
|
||||
std::fs::write(&cache_path, "alice\nnot-hex-data\n").unwrap();
|
||||
assert!(load_cached_session(&state_path, None).is_none());
|
||||
|
||||
// Only one line
|
||||
std::fs::write(&cache_path, "alice\n").unwrap();
|
||||
assert!(load_cached_session(&state_path, None).is_none());
|
||||
|
||||
// Empty file
|
||||
std::fs::write(&cache_path, "").unwrap();
|
||||
assert!(load_cached_session(&state_path, None).is_none());
|
||||
}
|
||||
}
|
||||
807
crates/quicprochat-client/src/client/tui/mod.rs
Normal file
807
crates/quicprochat-client/src/client/tui/mod.rs
Normal file
@@ -0,0 +1,807 @@
|
||||
//! Full-screen Ratatui TUI for quicprochat.
|
||||
//!
|
||||
//! Layout:
|
||||
//! ┌──────────────┬──────────────────────────────────────────┐
|
||||
//! │ Channels │ Messages │
|
||||
//! │ (20%) │ (80%) │
|
||||
//! │ │ │
|
||||
//! │ ├──────────────────────────────────────────┤
|
||||
//! │ │ Input bar │
|
||||
//! └──────────────┴──────────────────────────────────────────┘
|
||||
//!
|
||||
//! Keyboard:
|
||||
//! Enter — send message
|
||||
//! Up / Down — scroll message history
|
||||
//! Tab — next channel
|
||||
//! Shift+Tab — prev channel
|
||||
//! Ctrl+C / q — quit
|
||||
|
||||
use std::path::Path;
|
||||
use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
|
||||
use anyhow::Context;
|
||||
use crossterm::{
|
||||
event::{self, DisableMouseCapture, EnableMouseCapture, Event, KeyCode, KeyModifiers},
|
||||
execute,
|
||||
terminal::{disable_raw_mode, enable_raw_mode, EnterAlternateScreen, LeaveAlternateScreen},
|
||||
};
|
||||
use ratatui::{
|
||||
backend::CrosstermBackend,
|
||||
layout::{Constraint, Direction, Layout, Rect},
|
||||
style::{Color, Modifier, Style},
|
||||
text::{Line, Span},
|
||||
widgets::{Block, Borders, List, ListItem, ListState, Paragraph, Wrap},
|
||||
Frame, Terminal,
|
||||
};
|
||||
use tokio::sync::mpsc;
|
||||
use tokio::time::interval;
|
||||
|
||||
use crate::{ClientAuth, init_auth};
|
||||
use super::commands::{opaque_login, opaque_register};
|
||||
use super::conversation::{now_ms, ConversationId, StoredMessage};
|
||||
use super::rpc::{
|
||||
connect_node, enqueue, fetch_hybrid_key, fetch_wait, try_hybrid_decrypt, upload_hybrid_key,
|
||||
upload_key_package,
|
||||
};
|
||||
use super::session::SessionState;
|
||||
use super::state::load_or_init_state;
|
||||
use super::token_cache::{load_cached_session, save_cached_session};
|
||||
|
||||
use quicprochat_core::{
|
||||
AppMessage, DiskKeyStore, GroupMember, IdentityKeypair, ReceivedMessage,
|
||||
hybrid_encrypt, parse as parse_app_msg, serialize_chat,
|
||||
};
|
||||
use quicprochat_proto::node_capnp::node_service;
|
||||
|
||||
// ── App events ───────────────────────────────────────────────────────────────
|
||||
|
||||
/// Events sent from background tasks into the main TUI loop.
|
||||
enum TuiEvent {
|
||||
/// A key event from the terminal.
|
||||
Key(event::KeyEvent),
|
||||
/// New messages received from the server (conv_id, sender_short, body).
|
||||
NewMessages(Vec<(ConversationId, String, String)>),
|
||||
/// Tick — redraw periodically even if nothing happened.
|
||||
Tick,
|
||||
}
|
||||
|
||||
// ── Display message ───────────────────────────────────────────────────────────
|
||||
|
||||
#[derive(Clone)]
|
||||
struct DisplayMessage {
|
||||
sender: String,
|
||||
body: String,
|
||||
timestamp_ms: u64,
|
||||
is_outgoing: bool,
|
||||
}
|
||||
|
||||
// ── App state ─────────────────────────────────────────────────────────────────
|
||||
|
||||
struct App {
|
||||
/// Channel (conversation) names shown in the sidebar.
|
||||
channel_names: Vec<String>,
|
||||
/// Conversation IDs, parallel to `channel_names`.
|
||||
channel_ids: Vec<ConversationId>,
|
||||
/// Index of the selected channel in the sidebar.
|
||||
selected_channel: usize,
|
||||
/// Messages for the currently active channel.
|
||||
messages: Vec<DisplayMessage>,
|
||||
/// Current input buffer.
|
||||
input: String,
|
||||
/// Scroll offset (0 = bottom).
|
||||
scroll_offset: usize,
|
||||
/// Whether the user has requested quit.
|
||||
should_quit: bool,
|
||||
/// Short identity string for display.
|
||||
identity_short: String,
|
||||
}
|
||||
|
||||
impl App {
|
||||
fn new(session: &SessionState) -> anyhow::Result<Self> {
|
||||
let convs = session.conv_store.list_conversations()?;
|
||||
let channel_names: Vec<String> = convs.iter().map(|c| c.display_name.clone()).collect();
|
||||
let channel_ids: Vec<ConversationId> = convs.iter().map(|c| c.id.clone()).collect();
|
||||
|
||||
Ok(Self {
|
||||
channel_names,
|
||||
channel_ids,
|
||||
selected_channel: 0,
|
||||
messages: Vec::new(),
|
||||
input: String::new(),
|
||||
scroll_offset: 0,
|
||||
should_quit: false,
|
||||
identity_short: session.identity_short(),
|
||||
})
|
||||
}
|
||||
|
||||
fn active_conv_id(&self) -> Option<&ConversationId> {
|
||||
self.channel_ids.get(self.selected_channel)
|
||||
}
|
||||
|
||||
/// Reload messages for the currently selected channel from the session store.
|
||||
fn reload_messages(&mut self, session: &SessionState) -> anyhow::Result<()> {
|
||||
let conv_id = match self.active_conv_id() {
|
||||
Some(id) => id.clone(),
|
||||
None => {
|
||||
self.messages.clear();
|
||||
return Ok(());
|
||||
}
|
||||
};
|
||||
let stored = session.conv_store.load_recent_messages(&conv_id, 200)?;
|
||||
self.messages = stored
|
||||
.into_iter()
|
||||
.map(|m| {
|
||||
let sender = if m.is_outgoing {
|
||||
format!("me({})", &self.identity_short)
|
||||
} else if let Some(name) = &m.sender_name {
|
||||
name.clone()
|
||||
} else {
|
||||
// Shorten sender key to 8 hex chars.
|
||||
let hex_short = hex::encode(&m.sender_key[..m.sender_key.len().min(4)]);
|
||||
format!("{hex_short}")
|
||||
};
|
||||
DisplayMessage {
|
||||
sender,
|
||||
body: m.body,
|
||||
timestamp_ms: m.timestamp_ms,
|
||||
is_outgoing: m.is_outgoing,
|
||||
}
|
||||
})
|
||||
.collect();
|
||||
// Reset scroll to bottom on channel switch.
|
||||
self.scroll_offset = 0;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn select_next_channel(&mut self, session: &SessionState) {
|
||||
if self.channel_names.is_empty() {
|
||||
return;
|
||||
}
|
||||
self.selected_channel = (self.selected_channel + 1) % self.channel_names.len();
|
||||
let _ = self.reload_messages(session);
|
||||
}
|
||||
|
||||
fn select_prev_channel(&mut self, session: &SessionState) {
|
||||
if self.channel_names.is_empty() {
|
||||
return;
|
||||
}
|
||||
if self.selected_channel == 0 {
|
||||
self.selected_channel = self.channel_names.len() - 1;
|
||||
} else {
|
||||
self.selected_channel -= 1;
|
||||
}
|
||||
let _ = self.reload_messages(session);
|
||||
}
|
||||
|
||||
fn scroll_up(&mut self) {
|
||||
self.scroll_offset = self.scroll_offset.saturating_add(1);
|
||||
}
|
||||
|
||||
fn scroll_down(&mut self) {
|
||||
self.scroll_offset = self.scroll_offset.saturating_sub(1);
|
||||
}
|
||||
|
||||
/// Append newly received messages to the in-memory list (no DB reload needed
|
||||
/// since we already have them from the poll task, but we also save them via
|
||||
/// the session so they appear on reload).
|
||||
fn append_messages(&mut self, msgs: Vec<(ConversationId, String, String)>) {
|
||||
let active = self.active_conv_id().cloned();
|
||||
for (conv_id, sender, body) in msgs {
|
||||
if active.as_ref() == Some(&conv_id) {
|
||||
self.messages.push(DisplayMessage {
|
||||
sender,
|
||||
body,
|
||||
timestamp_ms: now_ms(),
|
||||
is_outgoing: false,
|
||||
});
|
||||
// Snap to bottom if user wasn't scrolled.
|
||||
if self.scroll_offset == 0 {
|
||||
// Already at bottom — nothing to do.
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// ── Drawing ───────────────────────────────────────────────────────────────────
|
||||
|
||||
fn ui(frame: &mut Frame, app: &App) {
|
||||
let size = frame.area();
|
||||
|
||||
// Top-level split: sidebar | main area.
|
||||
let h_chunks = Layout::default()
|
||||
.direction(Direction::Horizontal)
|
||||
.constraints([Constraint::Percentage(20), Constraint::Percentage(80)])
|
||||
.split(size);
|
||||
|
||||
// Main area split: messages | input bar.
|
||||
let v_chunks = Layout::default()
|
||||
.direction(Direction::Vertical)
|
||||
.constraints([Constraint::Min(3), Constraint::Length(3)])
|
||||
.split(h_chunks[1]);
|
||||
|
||||
draw_sidebar(frame, app, h_chunks[0]);
|
||||
draw_messages(frame, app, v_chunks[0]);
|
||||
draw_input(frame, app, v_chunks[1]);
|
||||
}
|
||||
|
||||
fn draw_sidebar(frame: &mut Frame, app: &App, area: Rect) {
|
||||
let items: Vec<ListItem> = app
|
||||
.channel_names
|
||||
.iter()
|
||||
.enumerate()
|
||||
.map(|(i, name)| {
|
||||
let style = if i == app.selected_channel {
|
||||
Style::default()
|
||||
.fg(Color::Cyan)
|
||||
.add_modifier(Modifier::BOLD | Modifier::REVERSED)
|
||||
} else {
|
||||
Style::default().fg(Color::Cyan)
|
||||
};
|
||||
ListItem::new(Line::from(Span::styled(name.clone(), style)))
|
||||
})
|
||||
.collect();
|
||||
|
||||
let block = Block::default()
|
||||
.title(" Channels ")
|
||||
.borders(Borders::ALL)
|
||||
.style(Style::default().fg(Color::DarkGray));
|
||||
|
||||
let mut list_state = ListState::default();
|
||||
if !app.channel_names.is_empty() {
|
||||
list_state.select(Some(app.selected_channel));
|
||||
}
|
||||
|
||||
frame.render_stateful_widget(
|
||||
List::new(items).block(block),
|
||||
area,
|
||||
&mut list_state,
|
||||
);
|
||||
}
|
||||
|
||||
fn draw_messages(frame: &mut Frame, app: &App, area: Rect) {
|
||||
let channel_title = app
|
||||
.channel_names
|
||||
.get(app.selected_channel)
|
||||
.map(|n| format!(" {n} "))
|
||||
.unwrap_or_else(|| " Messages ".to_string());
|
||||
|
||||
let block = Block::default()
|
||||
.title(channel_title)
|
||||
.borders(Borders::ALL)
|
||||
.style(Style::default().fg(Color::DarkGray));
|
||||
|
||||
let inner_height = area.height.saturating_sub(2) as usize;
|
||||
|
||||
// Build lines from messages (newest at bottom).
|
||||
let mut lines: Vec<Line> = app
|
||||
.messages
|
||||
.iter()
|
||||
.map(|m| {
|
||||
let ts = format_timestamp(m.timestamp_ms);
|
||||
let ts_span = Span::styled(ts, Style::default().fg(Color::DarkGray));
|
||||
|
||||
let sender_style = if m.is_outgoing {
|
||||
Style::default().fg(Color::Yellow).add_modifier(Modifier::BOLD)
|
||||
} else {
|
||||
Style::default().fg(Color::Green).add_modifier(Modifier::BOLD)
|
||||
};
|
||||
let sender_span = Span::styled(format!(" {} ", m.sender), sender_style);
|
||||
let body_span = Span::raw(m.body.clone());
|
||||
|
||||
Line::from(vec![ts_span, sender_span, body_span])
|
||||
})
|
||||
.collect();
|
||||
|
||||
// Apply scroll: scroll_offset=0 means newest at bottom.
|
||||
let total = lines.len();
|
||||
let visible_start = if total > inner_height {
|
||||
let bottom = total - app.scroll_offset.min(total);
|
||||
bottom.saturating_sub(inner_height)
|
||||
} else {
|
||||
0
|
||||
};
|
||||
let visible_end = if total > inner_height {
|
||||
total - app.scroll_offset.min(total)
|
||||
} else {
|
||||
total
|
||||
};
|
||||
let visible_lines: Vec<Line> = lines
|
||||
.drain(visible_start..visible_end.min(lines.len()))
|
||||
.collect();
|
||||
|
||||
let paragraph = Paragraph::new(visible_lines)
|
||||
.block(block)
|
||||
.wrap(Wrap { trim: false });
|
||||
|
||||
frame.render_widget(paragraph, area);
|
||||
}
|
||||
|
||||
fn draw_input(frame: &mut Frame, app: &App, area: Rect) {
|
||||
let block = Block::default()
|
||||
.title(" Input (Enter=send, Tab=switch channel, q/Ctrl+C=quit) ")
|
||||
.borders(Borders::ALL)
|
||||
.style(Style::default().fg(Color::DarkGray));
|
||||
|
||||
let input_text = Paragraph::new(app.input.as_str())
|
||||
.block(block)
|
||||
.style(Style::default().fg(Color::White));
|
||||
|
||||
frame.render_widget(input_text, area);
|
||||
|
||||
// Position cursor at end of input.
|
||||
let cursor_x = area.x + 1 + app.input.len() as u16;
|
||||
let cursor_y = area.y + 1;
|
||||
if cursor_x < area.x + area.width - 1 {
|
||||
frame.set_cursor_position((cursor_x, cursor_y));
|
||||
}
|
||||
}
|
||||
|
||||
fn format_timestamp(ms: u64) -> String {
|
||||
// Simple HH:MM format from epoch ms.
|
||||
let secs = ms / 1000;
|
||||
let hours = (secs / 3600) % 24;
|
||||
let minutes = (secs / 60) % 60;
|
||||
format!("[{:02}:{:02}] ", hours, minutes)
|
||||
}
|
||||
|
||||
// ── Message polling task ──────────────────────────────────────────────────────
|
||||
|
||||
/// Background task that polls the server for new messages and sends them via `tx`.
|
||||
async fn poll_task(
|
||||
mut session: SessionState,
|
||||
client: node_service::Client,
|
||||
tx: mpsc::Sender<TuiEvent>,
|
||||
) {
|
||||
let mut poll_interval = interval(Duration::from_millis(1000));
|
||||
poll_interval.set_missed_tick_behavior(tokio::time::MissedTickBehavior::Skip);
|
||||
|
||||
loop {
|
||||
poll_interval.tick().await;
|
||||
|
||||
let identity_bytes = session.identity_bytes();
|
||||
let payloads = match fetch_wait(&client, &identity_bytes, 0).await {
|
||||
Ok(p) => p,
|
||||
Err(_) => continue,
|
||||
};
|
||||
|
||||
if payloads.is_empty() {
|
||||
continue;
|
||||
}
|
||||
|
||||
let mut new_msgs: Vec<(ConversationId, String, String)> = Vec::new();
|
||||
let my_key = session.identity_bytes();
|
||||
|
||||
let mut sorted = payloads;
|
||||
sorted.sort_by_key(|(seq, _)| *seq);
|
||||
|
||||
for (_seq, payload) in &sorted {
|
||||
let mls_payload = match try_hybrid_decrypt(session.hybrid_kp.as_ref(), payload) {
|
||||
Ok(b) => b,
|
||||
Err(_) => payload.clone(),
|
||||
};
|
||||
|
||||
let conv_ids: Vec<ConversationId> = session.members.keys().cloned().collect();
|
||||
|
||||
for conv_id in &conv_ids {
|
||||
let member = match session.members.get_mut(conv_id) {
|
||||
Some(m) => m,
|
||||
None => continue,
|
||||
};
|
||||
|
||||
match member.receive_message(&mls_payload) {
|
||||
Ok(ReceivedMessage::Application(plaintext)) => {
|
||||
let (sender_key, app_bytes) = {
|
||||
let after_unpad = quicprochat_core::padding::unpad(&plaintext)
|
||||
.unwrap_or_else(|_| plaintext.clone());
|
||||
|
||||
if quicprochat_core::sealed_sender::is_sealed(&after_unpad) {
|
||||
match quicprochat_core::sealed_sender::unseal(&after_unpad) {
|
||||
Ok((sk, inner)) => (sk.to_vec(), inner),
|
||||
Err(_) => (my_key.clone(), after_unpad),
|
||||
}
|
||||
} else {
|
||||
(my_key.clone(), after_unpad)
|
||||
}
|
||||
};
|
||||
|
||||
let (body, msg_id, msg_type, ref_msg_id) =
|
||||
match parse_app_msg(&app_bytes) {
|
||||
Ok((_, AppMessage::Chat { message_id, body })) => (
|
||||
String::from_utf8_lossy(&body).to_string(),
|
||||
Some(message_id),
|
||||
"chat",
|
||||
None,
|
||||
),
|
||||
Ok((_, AppMessage::Reply { ref_msg_id, body })) => (
|
||||
String::from_utf8_lossy(&body).to_string(),
|
||||
None,
|
||||
"reply",
|
||||
Some(ref_msg_id),
|
||||
),
|
||||
Ok((_, AppMessage::Reaction { ref_msg_id, emoji })) => (
|
||||
String::from_utf8_lossy(&emoji).to_string(),
|
||||
None,
|
||||
"reaction",
|
||||
Some(ref_msg_id),
|
||||
),
|
||||
_ => (
|
||||
String::from_utf8_lossy(&app_bytes).to_string(),
|
||||
None,
|
||||
"chat",
|
||||
None,
|
||||
),
|
||||
};
|
||||
|
||||
let stored = StoredMessage {
|
||||
conversation_id: conv_id.clone(),
|
||||
message_id: msg_id,
|
||||
sender_key: sender_key.clone(),
|
||||
sender_name: None,
|
||||
body: body.clone(),
|
||||
msg_type: msg_type.into(),
|
||||
ref_msg_id,
|
||||
timestamp_ms: now_ms(),
|
||||
is_outgoing: false,
|
||||
};
|
||||
|
||||
if session.conv_store.save_message(&stored).is_ok() {
|
||||
let sender_short = hex::encode(&sender_key[..sender_key.len().min(4)]);
|
||||
new_msgs.push((conv_id.clone(), sender_short, body));
|
||||
}
|
||||
|
||||
let _ = session.conv_store.update_activity(conv_id, now_ms());
|
||||
let _ = session.save_member(conv_id);
|
||||
break;
|
||||
}
|
||||
Ok(ReceivedMessage::StateChanged) | Ok(ReceivedMessage::SelfRemoved) => {
|
||||
let _ = session.save_member(conv_id);
|
||||
break;
|
||||
}
|
||||
_ => {}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if !new_msgs.is_empty() {
|
||||
if tx.send(TuiEvent::NewMessages(new_msgs)).await.is_err() {
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// ── Send message ──────────────────────────────────────────────────────────────
|
||||
|
||||
async fn send_message(
|
||||
session: &mut SessionState,
|
||||
client: &node_service::Client,
|
||||
conv_id: &ConversationId,
|
||||
text: &str,
|
||||
) -> anyhow::Result<()> {
|
||||
let my_key = session.identity_bytes();
|
||||
let identity = Arc::clone(&session.identity);
|
||||
|
||||
let member = session
|
||||
.members
|
||||
.get_mut(conv_id)
|
||||
.context("no GroupMember for this conversation")?;
|
||||
|
||||
// Wrap in structured AppMessage format.
|
||||
let app_payload = serialize_chat(text.as_bytes(), None)
|
||||
.context("serialize app message")?;
|
||||
|
||||
// Metadata protection: seal + pad.
|
||||
let sealed = quicprochat_core::sealed_sender::seal(&identity, &app_payload);
|
||||
let padded = quicprochat_core::padding::pad(&sealed);
|
||||
|
||||
let ct = member.send_message(&padded).context("MLS encrypt")?;
|
||||
|
||||
let recipients: Vec<Vec<u8>> = member
|
||||
.member_identities()
|
||||
.into_iter()
|
||||
.filter(|id| id.as_slice() != my_key.as_slice())
|
||||
.collect();
|
||||
|
||||
for recipient_key in &recipients {
|
||||
let peer_hybrid_pk = fetch_hybrid_key(client, recipient_key).await?;
|
||||
let payload = if let Some(ref pk) = peer_hybrid_pk {
|
||||
hybrid_encrypt(pk, &ct, b"", b"").context("hybrid encrypt")?
|
||||
} else {
|
||||
ct.clone()
|
||||
};
|
||||
enqueue(client, recipient_key, &payload).await?;
|
||||
}
|
||||
|
||||
// Extract message_id from what we just serialized.
|
||||
let msg_id = parse_app_msg(&app_payload)
|
||||
.ok()
|
||||
.and_then(|(_, m)| match m {
|
||||
AppMessage::Chat { message_id, .. } => Some(message_id),
|
||||
_ => None,
|
||||
});
|
||||
|
||||
// Save outgoing message.
|
||||
let stored = StoredMessage {
|
||||
conversation_id: conv_id.clone(),
|
||||
message_id: msg_id,
|
||||
sender_key: my_key,
|
||||
sender_name: Some("you".into()),
|
||||
body: text.to_string(),
|
||||
msg_type: "chat".into(),
|
||||
ref_msg_id: None,
|
||||
timestamp_ms: now_ms(),
|
||||
is_outgoing: true,
|
||||
};
|
||||
session.conv_store.save_message(&stored)?;
|
||||
session.conv_store.update_activity(conv_id, now_ms())?;
|
||||
session.save_member(conv_id)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// ── TUI entry point ───────────────────────────────────────────────────────────
|
||||
|
||||
/// Entry point for `qpc tui`. Sets up the terminal, runs the event loop, and
|
||||
/// restores the terminal on exit.
|
||||
pub async fn run_tui(
|
||||
state_path: &Path,
|
||||
server: &str,
|
||||
ca_cert: &Path,
|
||||
server_name: &str,
|
||||
password: Option<&str>,
|
||||
username: Option<&str>,
|
||||
opaque_password: Option<&str>,
|
||||
access_token: &str,
|
||||
device_id: Option<&str>,
|
||||
) -> anyhow::Result<()> {
|
||||
// ── Auth ──────────────────────────────────────────────────────────────────
|
||||
let resolved_token = resolve_tui_access_token(
|
||||
state_path,
|
||||
server,
|
||||
ca_cert,
|
||||
server_name,
|
||||
password,
|
||||
username,
|
||||
opaque_password,
|
||||
access_token,
|
||||
)
|
||||
.await?;
|
||||
|
||||
let token_bytes = hex::decode(&resolved_token)
|
||||
.unwrap_or_else(|_| resolved_token.into_bytes());
|
||||
let auth_ctx = ClientAuth::from_raw(token_bytes, device_id.map(String::from));
|
||||
init_auth(auth_ctx);
|
||||
|
||||
// ── Session + RPC ─────────────────────────────────────────────────────────
|
||||
let mut session = SessionState::load(state_path, password)?;
|
||||
let client = connect_node(server, ca_cert, server_name).await?;
|
||||
|
||||
// Auto-upload KeyPackage.
|
||||
let _ = auto_upload_keys_tui(&session, &client).await;
|
||||
|
||||
// ── Terminal setup ────────────────────────────────────────────────────────
|
||||
enable_raw_mode().context("enable raw mode")?;
|
||||
let mut stdout = std::io::stdout();
|
||||
execute!(stdout, EnterAlternateScreen, EnableMouseCapture)
|
||||
.context("enter alternate screen")?;
|
||||
let backend = CrosstermBackend::new(stdout);
|
||||
let mut terminal = Terminal::new(backend).context("create terminal")?;
|
||||
|
||||
let result = tui_loop(&mut terminal, &mut session, client).await;
|
||||
|
||||
// ── Terminal cleanup (always restore, even on error) ───────────────────
|
||||
disable_raw_mode().ok();
|
||||
execute!(
|
||||
terminal.backend_mut(),
|
||||
LeaveAlternateScreen,
|
||||
DisableMouseCapture
|
||||
)
|
||||
.ok();
|
||||
terminal.show_cursor().ok();
|
||||
|
||||
session.save_all()?;
|
||||
|
||||
result
|
||||
}
|
||||
|
||||
async fn tui_loop(
|
||||
terminal: &mut Terminal<CrosstermBackend<std::io::Stdout>>,
|
||||
session: &mut SessionState,
|
||||
client: node_service::Client,
|
||||
) -> anyhow::Result<()> {
|
||||
let mut app = App::new(session)?;
|
||||
app.reload_messages(session)?;
|
||||
|
||||
let (event_tx, mut event_rx) = mpsc::channel::<TuiEvent>(256);
|
||||
|
||||
// ── Keyboard event task ───────────────────────────────────────────────────
|
||||
let key_tx = event_tx.clone();
|
||||
tokio::task::spawn_local(async move {
|
||||
loop {
|
||||
// crossterm event polling — 50ms timeout so we can tick.
|
||||
match event::poll(Duration::from_millis(50)) {
|
||||
Ok(true) => {
|
||||
if let Ok(Event::Key(key)) = event::read() {
|
||||
if key_tx.send(TuiEvent::Key(key)).await.is_err() {
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
Ok(false) => {
|
||||
// No event — send a tick so the UI redraws.
|
||||
if key_tx.send(TuiEvent::Tick).await.is_err() {
|
||||
break;
|
||||
}
|
||||
}
|
||||
Err(_) => break,
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
// ── Message poll task ─────────────────────────────────────────────────────
|
||||
// Clone session state for the poll task (it needs its own SessionState).
|
||||
let poll_session = SessionState::load(
|
||||
&session.state_path.clone(),
|
||||
session.password.as_ref().map(|p| p.as_str()),
|
||||
)?;
|
||||
let poll_tx = event_tx.clone();
|
||||
tokio::task::spawn_local(poll_task(poll_session, client.clone(), poll_tx));
|
||||
|
||||
// ── Main loop ─────────────────────────────────────────────────────────────
|
||||
loop {
|
||||
terminal.draw(|f| ui(f, &app)).context("draw")?;
|
||||
|
||||
match event_rx.recv().await {
|
||||
None => break,
|
||||
Some(TuiEvent::Tick) => {
|
||||
// Just redraw.
|
||||
}
|
||||
Some(TuiEvent::NewMessages(msgs)) => {
|
||||
app.append_messages(msgs);
|
||||
}
|
||||
Some(TuiEvent::Key(key)) => {
|
||||
match key.code {
|
||||
KeyCode::Char('c') if key.modifiers.contains(KeyModifiers::CONTROL) => {
|
||||
app.should_quit = true;
|
||||
}
|
||||
KeyCode::Char('q') if app.input.is_empty() => {
|
||||
app.should_quit = true;
|
||||
}
|
||||
KeyCode::Enter => {
|
||||
let text = app.input.trim().to_string();
|
||||
if !text.is_empty() {
|
||||
app.input.clear();
|
||||
if let Some(conv_id) = app.active_conv_id().cloned() {
|
||||
match send_message(session, &client, &conv_id, &text).await {
|
||||
Ok(()) => {
|
||||
// Add to in-memory list immediately.
|
||||
app.messages.push(DisplayMessage {
|
||||
sender: format!("me({})", app.identity_short),
|
||||
body: text,
|
||||
timestamp_ms: now_ms(),
|
||||
is_outgoing: true,
|
||||
});
|
||||
}
|
||||
Err(_e) => {
|
||||
// Silently drop — user will see nothing happened.
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
KeyCode::Char(c) => {
|
||||
app.input.push(c);
|
||||
}
|
||||
KeyCode::Backspace => {
|
||||
app.input.pop();
|
||||
}
|
||||
KeyCode::Up => {
|
||||
app.scroll_up();
|
||||
}
|
||||
KeyCode::Down => {
|
||||
app.scroll_down();
|
||||
}
|
||||
KeyCode::Tab => {
|
||||
if key.modifiers.contains(KeyModifiers::SHIFT) {
|
||||
app.select_prev_channel(session);
|
||||
} else {
|
||||
app.select_next_channel(session);
|
||||
}
|
||||
app.reload_messages(session)?;
|
||||
}
|
||||
_ => {}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if app.should_quit {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// ── Startup helpers ───────────────────────────────────────────────────────────
|
||||
|
||||
async fn auto_upload_keys_tui(
|
||||
session: &SessionState,
|
||||
client: &node_service::Client,
|
||||
) -> anyhow::Result<()> {
|
||||
let ks_path = session.state_path.with_extension("pending.ks");
|
||||
let ks = DiskKeyStore::persistent(&ks_path).unwrap_or_else(|_| DiskKeyStore::ephemeral());
|
||||
let mut member = GroupMember::new_with_state(
|
||||
Arc::clone(&session.identity),
|
||||
ks,
|
||||
None,
|
||||
false,
|
||||
);
|
||||
let kp_bytes = member.generate_key_package().context("generate KeyPackage")?;
|
||||
let id_key = session.identity.public_key_bytes();
|
||||
upload_key_package(client, &id_key, &kp_bytes).await?;
|
||||
if let Some(ref hkp) = session.hybrid_kp {
|
||||
upload_hybrid_key(client, &id_key, &hkp.public_key()).await?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn resolve_tui_access_token(
|
||||
state_path: &Path,
|
||||
server: &str,
|
||||
ca_cert: &Path,
|
||||
server_name: &str,
|
||||
state_password: Option<&str>,
|
||||
username: Option<&str>,
|
||||
opaque_password: Option<&str>,
|
||||
cli_access_token: &str,
|
||||
) -> anyhow::Result<String> {
|
||||
if !cli_access_token.is_empty() {
|
||||
return Ok(cli_access_token.to_string());
|
||||
}
|
||||
|
||||
if let Some(cached) = load_cached_session(state_path, state_password) {
|
||||
return Ok(cached.token_hex);
|
||||
}
|
||||
|
||||
let username = match username {
|
||||
Some(u) => u.to_string(),
|
||||
None => {
|
||||
use std::io::Write;
|
||||
eprint!("Username: ");
|
||||
std::io::stderr().flush().ok();
|
||||
let mut input = String::new();
|
||||
std::io::stdin()
|
||||
.read_line(&mut input)
|
||||
.context("failed to read username")?;
|
||||
let trimmed = input.trim().to_string();
|
||||
anyhow::ensure!(!trimmed.is_empty(), "username is required");
|
||||
trimmed
|
||||
}
|
||||
};
|
||||
|
||||
let opaque_password = match opaque_password {
|
||||
Some(p) => p.to_string(),
|
||||
None => rpassword::read_password().context("failed to read password")?,
|
||||
};
|
||||
|
||||
let state = load_or_init_state(state_path, state_password)?;
|
||||
let identity = IdentityKeypair::from_seed(state.identity_seed);
|
||||
let identity_key = identity.public_key_bytes().to_vec();
|
||||
|
||||
let node_client = connect_node(server, ca_cert, server_name).await?;
|
||||
|
||||
match opaque_register(&node_client, &username, &opaque_password, Some(&identity_key)).await {
|
||||
Ok(()) | Err(_) => {}
|
||||
}
|
||||
|
||||
let token_bytes = opaque_login(&node_client, &username, &opaque_password, &identity_key)
|
||||
.await
|
||||
.context("OPAQUE login failed")?;
|
||||
let token_hex = hex::encode(&token_bytes);
|
||||
|
||||
save_cached_session(state_path, &username, &token_hex, state_password)?;
|
||||
|
||||
Ok(token_hex)
|
||||
}
|
||||
1126
crates/quicprochat-client/src/client/v2_repl.rs
Normal file
1126
crates/quicprochat-client/src/client/v2_repl.rs
Normal file
File diff suppressed because it is too large
Load Diff
1208
crates/quicprochat-client/src/client/v2_tui.rs
Normal file
1208
crates/quicprochat-client/src/client/v2_tui.rs
Normal file
File diff suppressed because it is too large
Load Diff
177
crates/quicprochat-client/src/lib.rs
Normal file
177
crates/quicprochat-client/src/lib.rs
Normal file
@@ -0,0 +1,177 @@
|
||||
//! quicprochat CLI client library.
|
||||
//!
|
||||
//! # KeyPackage expiry and refresh
|
||||
//!
|
||||
//! KeyPackages are single-use (consumed when someone fetches them for an invite) and the server
|
||||
//! may enforce a TTL (e.g. 24 hours). To stay invitable, run `qpc refresh-keypackage`
|
||||
//! periodically (e.g. before the server TTL) or after your KeyPackage was consumed:
|
||||
//!
|
||||
//! ```bash
|
||||
//! qpc refresh-keypackage --state qpc-state.bin --server 127.0.0.1:7000
|
||||
//! ```
|
||||
//!
|
||||
//! Use the same `--access-token` (or `QPQ_ACCESS_TOKEN`) as for other authenticated
|
||||
//! commands. See the [running-the-client](https://docs.quicprochat.dev/getting-started/running-the-client)
|
||||
//! docs for details.
|
||||
|
||||
use std::sync::RwLock;
|
||||
use std::sync::atomic::{AtomicBool, Ordering};
|
||||
|
||||
use zeroize::Zeroizing;
|
||||
|
||||
pub mod client;
|
||||
#[cfg(feature = "v2")]
|
||||
pub mod v2_commands;
|
||||
|
||||
pub use client::commands::{
|
||||
cmd_chat, cmd_check_key, cmd_create_group, cmd_demo_group, cmd_export, cmd_export_verify,
|
||||
cmd_fetch_key, cmd_health, cmd_health_json, cmd_invite, cmd_join, cmd_login, cmd_ping,
|
||||
cmd_recv, cmd_register, cmd_register_state, cmd_refresh_keypackage, cmd_register_user,
|
||||
cmd_send, cmd_whoami, opaque_login, receive_pending_plaintexts, whoami_json,
|
||||
};
|
||||
|
||||
pub use client::command_engine::{Command, CommandRegistry, CommandResult};
|
||||
#[cfg(feature = "playbook")]
|
||||
pub use client::playbook::{Playbook, PlaybookReport, PlaybookRunner};
|
||||
pub use client::repl::run_repl;
|
||||
pub use client::rpc::{connect_node, connect_node_opt, create_channel, enqueue, fetch_wait, resolve_user};
|
||||
|
||||
// ── ClientContext: structured holder for session-scoped auth + TLS config ────
|
||||
|
||||
/// Holds the authentication credentials and TLS policy for a client session.
|
||||
///
|
||||
/// Prefer constructing a `ClientContext` and passing it explicitly where
|
||||
/// possible. The global `AUTH_CONTEXT` / `INSECURE_SKIP_VERIFY` statics
|
||||
/// delegate to a `ClientContext` under the hood and exist only for backward
|
||||
/// compatibility with call-sites that have not yet been migrated.
|
||||
pub struct ClientContext {
|
||||
auth: RwLock<Option<ClientAuth>>,
|
||||
insecure_skip_verify: AtomicBool,
|
||||
}
|
||||
|
||||
impl ClientContext {
|
||||
/// Create a new context with no auth and TLS verification enabled.
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
auth: RwLock::new(None),
|
||||
insecure_skip_verify: AtomicBool::new(false),
|
||||
}
|
||||
}
|
||||
|
||||
/// Create a context pre-loaded with auth credentials.
|
||||
pub fn with_auth(auth: ClientAuth) -> Self {
|
||||
Self {
|
||||
auth: RwLock::new(Some(auth)),
|
||||
insecure_skip_verify: AtomicBool::new(false),
|
||||
}
|
||||
}
|
||||
|
||||
/// Set (or replace) the auth credentials.
|
||||
///
|
||||
/// # Panics
|
||||
/// Panics if the RwLock is poisoned (a thread panicked while holding it).
|
||||
/// A poisoned lock indicates unrecoverable state corruption.
|
||||
#[allow(clippy::expect_used)]
|
||||
pub fn set_auth(&self, ctx: ClientAuth) {
|
||||
let mut guard = self.auth.write().expect("ClientContext auth lock poisoned");
|
||||
*guard = Some(ctx);
|
||||
}
|
||||
|
||||
/// Read the current auth snapshot (cloned).
|
||||
///
|
||||
/// # Panics
|
||||
/// Panics if the RwLock is poisoned (a thread panicked while holding it).
|
||||
/// A poisoned lock indicates unrecoverable state corruption.
|
||||
#[allow(clippy::expect_used)]
|
||||
pub fn get_auth(&self) -> Option<ClientAuth> {
|
||||
let guard = self.auth.read().expect("ClientContext auth lock poisoned");
|
||||
guard.clone()
|
||||
}
|
||||
|
||||
/// Returns true if auth credentials have been set.
|
||||
///
|
||||
/// # Panics
|
||||
/// Panics if the RwLock is poisoned (a thread panicked while holding it).
|
||||
/// A poisoned lock indicates unrecoverable state corruption.
|
||||
#[allow(clippy::expect_used)]
|
||||
pub fn is_authenticated(&self) -> bool {
|
||||
let guard = self.auth.read().expect("ClientContext auth lock poisoned");
|
||||
guard.is_some()
|
||||
}
|
||||
|
||||
/// Enable or disable insecure TLS mode.
|
||||
pub fn set_insecure_skip_verify(&self, enabled: bool) {
|
||||
self.insecure_skip_verify.store(enabled, Ordering::Relaxed);
|
||||
}
|
||||
|
||||
/// Read the current insecure-skip-verify flag.
|
||||
pub fn insecure_skip_verify(&self) -> bool {
|
||||
self.insecure_skip_verify.load(Ordering::Relaxed)
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for ClientContext {
|
||||
fn default() -> Self {
|
||||
Self::new()
|
||||
}
|
||||
}
|
||||
|
||||
// ── Global statics (thin wrappers, kept for backward compat) ─────────────────
|
||||
|
||||
/// Global auth context — delegates to a process-wide `ClientContext`.
|
||||
/// Prefer passing `&ClientContext` explicitly in new code.
|
||||
pub(crate) static AUTH_CONTEXT: RwLock<Option<ClientAuth>> = RwLock::new(None);
|
||||
|
||||
/// When `true`, [`connect_node`] skips TLS certificate verification.
|
||||
/// Prefer `ClientContext::set_insecure_skip_verify` in new code.
|
||||
pub(crate) static INSECURE_SKIP_VERIFY: AtomicBool = AtomicBool::new(false);
|
||||
|
||||
/// Enable or disable insecure (no-verify) TLS mode globally.
|
||||
///
|
||||
/// **Development only.** When enabled, all outgoing connections skip certificate
|
||||
/// verification, making them vulnerable to MITM attacks.
|
||||
pub fn set_insecure_skip_verify(enabled: bool) {
|
||||
INSECURE_SKIP_VERIFY.store(enabled, Ordering::Relaxed);
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct ClientAuth {
|
||||
pub(crate) version: u16,
|
||||
/// Bearer or OPAQUE session token. Zeroized on drop. (M8)
|
||||
pub(crate) access_token: Zeroizing<Vec<u8>>,
|
||||
pub(crate) device_id: Vec<u8>,
|
||||
}
|
||||
|
||||
impl ClientAuth {
|
||||
/// Build a client auth context from optional token and device id.
|
||||
pub fn from_parts(access_token: String, device_id: Option<String>) -> Self {
|
||||
let token = access_token.into_bytes();
|
||||
let device = device_id.unwrap_or_default().into_bytes();
|
||||
Self {
|
||||
version: 1,
|
||||
access_token: Zeroizing::new(token),
|
||||
device_id: device,
|
||||
}
|
||||
}
|
||||
|
||||
/// Build from raw token bytes (e.g. a 32-byte OPAQUE session token).
|
||||
pub fn from_raw(raw_token: Vec<u8>, device_id: Option<String>) -> Self {
|
||||
let device = device_id.unwrap_or_default().into_bytes();
|
||||
Self {
|
||||
version: 1,
|
||||
access_token: Zeroizing::new(raw_token),
|
||||
device_id: device,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Set (or replace) the global auth context.
|
||||
///
|
||||
/// # Panics
|
||||
/// Panics if the RwLock is poisoned (a thread panicked while holding it).
|
||||
/// A poisoned lock indicates unrecoverable state corruption.
|
||||
#[allow(clippy::expect_used)]
|
||||
pub fn init_auth(ctx: ClientAuth) {
|
||||
let mut guard = AUTH_CONTEXT.write().expect("AUTH_CONTEXT poisoned");
|
||||
*guard = Some(ctx);
|
||||
}
|
||||
1037
crates/quicprochat-client/src/main.rs
Normal file
1037
crates/quicprochat-client/src/main.rs
Normal file
File diff suppressed because it is too large
Load Diff
228
crates/quicprochat-client/src/v2_commands.rs
Normal file
228
crates/quicprochat-client/src/v2_commands.rs
Normal file
@@ -0,0 +1,228 @@
|
||||
//! v2 CLI command implementations — thin wrappers over the SDK.
|
||||
|
||||
use quicprochat_sdk::client::QpqClient;
|
||||
use quicprochat_sdk::error::SdkError;
|
||||
|
||||
/// Register a new user account via OPAQUE.
|
||||
pub async fn cmd_register_user(
|
||||
client: &mut QpqClient,
|
||||
username: &str,
|
||||
password: &str,
|
||||
) -> Result<(), SdkError> {
|
||||
client.register(username, password).await?;
|
||||
let key = client.identity_key().unwrap_or_default();
|
||||
println!("registered user: {username}");
|
||||
println!("identity key : {}", hex::encode(key));
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Log in via OPAQUE and print session info.
|
||||
pub async fn cmd_login(
|
||||
client: &mut QpqClient,
|
||||
username: &str,
|
||||
password: &str,
|
||||
) -> Result<(), SdkError> {
|
||||
client.login(username, password).await?;
|
||||
println!("logged in as: {username}");
|
||||
if let Some(key) = client.identity_key() {
|
||||
println!("identity key: {}", hex::encode(key));
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Print local identity information.
|
||||
pub fn cmd_whoami(client: &QpqClient) {
|
||||
match client.username() {
|
||||
Some(u) => println!("username : {u}"),
|
||||
None => println!("username : (not logged in)"),
|
||||
}
|
||||
match client.identity_key() {
|
||||
Some(k) => println!("identity key: {}", hex::encode(k)),
|
||||
None => println!("identity key: (none)"),
|
||||
}
|
||||
println!("connected : {}", client.is_connected());
|
||||
println!("authenticated: {}", client.is_authenticated());
|
||||
}
|
||||
|
||||
/// Health check — connect to the server and report status.
|
||||
pub async fn cmd_health(client: &mut QpqClient) -> Result<(), SdkError> {
|
||||
let start = std::time::Instant::now();
|
||||
// The SDK connect() already establishes a QUIC connection.
|
||||
// If we're already connected, just report success.
|
||||
if !client.is_connected() {
|
||||
client.connect().await?;
|
||||
}
|
||||
let rtt_ms = start.elapsed().as_millis();
|
||||
println!("status : ok");
|
||||
println!("rtt : {rtt_ms}ms");
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Resolve a username to its identity key.
|
||||
pub async fn cmd_resolve(client: &mut QpqClient, username: &str) -> Result<(), SdkError> {
|
||||
let rpc = client.rpc()?;
|
||||
match quicprochat_sdk::users::resolve_user(rpc, username).await? {
|
||||
Some(key) => {
|
||||
println!("{username} -> {}", hex::encode(&key));
|
||||
}
|
||||
None => {
|
||||
println!("{username}: not found");
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// List registered devices.
|
||||
pub async fn cmd_devices_list(client: &mut QpqClient) -> Result<(), SdkError> {
|
||||
let rpc = client.rpc()?;
|
||||
let devices = quicprochat_sdk::devices::list_devices(rpc).await?;
|
||||
if devices.is_empty() {
|
||||
println!("no devices registered");
|
||||
} else {
|
||||
println!("{:<36} {:<20} {}", "DEVICE ID", "NAME", "REGISTERED AT");
|
||||
for d in &devices {
|
||||
println!(
|
||||
"{:<36} {:<20} {}",
|
||||
hex::encode(&d.device_id),
|
||||
d.device_name,
|
||||
d.registered_at,
|
||||
);
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Register a new device.
|
||||
pub async fn cmd_devices_register(
|
||||
client: &mut QpqClient,
|
||||
device_id: &str,
|
||||
device_name: &str,
|
||||
) -> Result<(), SdkError> {
|
||||
let rpc = client.rpc()?;
|
||||
let id_bytes = hex::decode(device_id)
|
||||
.map_err(|e| SdkError::Other(anyhow::anyhow!("invalid device_id hex: {e}")))?;
|
||||
let was_new = quicprochat_sdk::devices::register_device(rpc, &id_bytes, device_name).await?;
|
||||
if was_new {
|
||||
println!("device registered: {device_name}");
|
||||
} else {
|
||||
println!("device already registered: {device_name}");
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Revoke a device.
|
||||
pub async fn cmd_devices_revoke(
|
||||
client: &mut QpqClient,
|
||||
device_id: &str,
|
||||
) -> Result<(), SdkError> {
|
||||
let rpc = client.rpc()?;
|
||||
let id_bytes = hex::decode(device_id)
|
||||
.map_err(|e| SdkError::Other(anyhow::anyhow!("invalid device_id hex: {e}")))?;
|
||||
let revoked = quicprochat_sdk::devices::revoke_device(rpc, &id_bytes).await?;
|
||||
if revoked {
|
||||
println!("device revoked: {device_id}");
|
||||
} else {
|
||||
println!("device not found: {device_id}");
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Set up account recovery — generate codes and upload encrypted bundles.
|
||||
pub async fn cmd_recovery_setup(client: &mut QpqClient) -> Result<(), SdkError> {
|
||||
// Load identity seed from state file.
|
||||
let state_path = client.config_state_path();
|
||||
let stored = quicprochat_sdk::state::load_state(&state_path, None)
|
||||
.map_err(|e| SdkError::Crypto(format!("load identity for recovery: {e}")))?;
|
||||
|
||||
let rpc = client.rpc()?;
|
||||
let codes =
|
||||
quicprochat_sdk::recovery::setup_recovery(rpc, &stored.identity_seed, &[]).await?;
|
||||
|
||||
println!("=== RECOVERY CODES ===");
|
||||
println!("Save these codes securely. They will NOT be shown again.");
|
||||
println!("Each code can independently recover your account.");
|
||||
println!();
|
||||
for (i, code) in codes.iter().enumerate() {
|
||||
println!(" {}. {}", i + 1, code);
|
||||
}
|
||||
println!();
|
||||
println!("{} codes generated and uploaded.", codes.len());
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// ── Outbox commands ──────────────────────────────────────────────────────────
|
||||
|
||||
/// List pending outbox entries.
|
||||
pub fn cmd_outbox_list(client: &QpqClient) -> Result<(), SdkError> {
|
||||
let store = client.conversations()?;
|
||||
let entries = quicprochat_sdk::outbox::list_pending(store)?;
|
||||
if entries.is_empty() {
|
||||
println!("outbox is empty — no pending messages");
|
||||
} else {
|
||||
println!("{:<6} {:<34} {:<8} PAYLOAD SIZE", "ID", "CONVERSATION", "RETRIES");
|
||||
for e in &entries {
|
||||
println!(
|
||||
"{:<6} {:<34} {:<8} {} bytes",
|
||||
e.id,
|
||||
e.conversation_id.hex(),
|
||||
e.retry_count,
|
||||
e.payload.len(),
|
||||
);
|
||||
}
|
||||
println!("\n{} pending entries", entries.len());
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Retry sending all pending outbox entries.
|
||||
pub async fn cmd_outbox_retry(client: &mut QpqClient) -> Result<(), SdkError> {
|
||||
let rpc = client.rpc()?;
|
||||
let store = client.conversations()?;
|
||||
let (sent, failed) = quicprochat_sdk::outbox::flush_outbox(rpc, store).await?;
|
||||
println!("outbox flush: {sent} sent, {failed} permanently failed");
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Clear permanently failed outbox entries.
|
||||
pub fn cmd_outbox_clear(client: &QpqClient) -> Result<(), SdkError> {
|
||||
let store = client.conversations()?;
|
||||
let cleared = quicprochat_sdk::outbox::clear_failed(store)?;
|
||||
println!("cleared {cleared} failed outbox entries");
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Recover an account from a recovery code.
|
||||
pub async fn cmd_recovery_restore(
|
||||
client: &mut QpqClient,
|
||||
code: &str,
|
||||
) -> Result<(), SdkError> {
|
||||
let rpc = client.rpc()?;
|
||||
let (identity_seed, conversation_ids) =
|
||||
quicprochat_sdk::recovery::recover_account(rpc, code).await?;
|
||||
|
||||
// Restore identity.
|
||||
let keypair = quicprochat_core::IdentityKeypair::from_seed(identity_seed);
|
||||
client.set_identity_key(keypair.public_key_bytes().to_vec());
|
||||
|
||||
println!("account recovered successfully");
|
||||
println!("identity key: {}", hex::encode(keypair.public_key_bytes()));
|
||||
if !conversation_ids.is_empty() {
|
||||
println!(
|
||||
"{} conversations need rejoin (peers must re-invite this device)",
|
||||
conversation_ids.len()
|
||||
);
|
||||
}
|
||||
|
||||
// Save recovered state.
|
||||
let state = quicprochat_sdk::state::StoredState {
|
||||
identity_seed,
|
||||
group: None,
|
||||
hybrid_key: None,
|
||||
member_keys: Vec::new(),
|
||||
};
|
||||
let state_path = client.config_state_path();
|
||||
quicprochat_sdk::state::save_state(&state_path, &state, None)?;
|
||||
println!("state saved to {}", state_path.display());
|
||||
|
||||
Ok(())
|
||||
}
|
||||
563
crates/quicprochat-client/src/v2_main.rs
Normal file
563
crates/quicprochat-client/src/v2_main.rs
Normal file
@@ -0,0 +1,563 @@
|
||||
//! v2 CLI entry point — thin shell over `quicprochat_sdk::QpqClient`.
|
||||
//!
|
||||
//! Activated via `--features v2`. Replaces the v1 Cap'n Proto RPC main
|
||||
//! with a simplified command surface backed by the SDK.
|
||||
|
||||
use std::path::{Path, PathBuf};
|
||||
use std::process::Command as ProcessCommand;
|
||||
use std::time::Duration;
|
||||
|
||||
use anyhow::Context;
|
||||
use clap::{Parser, Subcommand};
|
||||
|
||||
use quicprochat_sdk::client::QpqClient;
|
||||
use quicprochat_sdk::config::ClientConfig;
|
||||
|
||||
use crate::v2_commands;
|
||||
|
||||
// ── CLI ───────────────────────────────────────────────────────────────────────
|
||||
|
||||
#[derive(Debug, Parser)]
|
||||
#[command(name = "qpc", about = "quicprochat CLI client (v2)", version)]
|
||||
struct Args {
|
||||
/// Server address (host:port).
|
||||
#[arg(long, global = true, default_value = "127.0.0.1:7000", env = "QPQ_SERVER")]
|
||||
server: String,
|
||||
|
||||
/// TLS server name (must match certificate SAN).
|
||||
#[arg(long, global = true, default_value = "localhost", env = "QPQ_SERVER_NAME")]
|
||||
server_name: String,
|
||||
|
||||
/// Path to local conversation database.
|
||||
#[arg(long, global = true, default_value = "conversations.db", env = "QPQ_CONV_DB")]
|
||||
db_path: PathBuf,
|
||||
|
||||
/// Password for encrypting the local database.
|
||||
#[arg(long, global = true, env = "QPQ_DB_PASSWORD")]
|
||||
db_password: Option<String>,
|
||||
|
||||
/// Path to the client state file (identity key, MLS state).
|
||||
#[arg(long, global = true, default_value = "qpc-state.bin", env = "QPQ_STATE")]
|
||||
state: PathBuf,
|
||||
|
||||
/// DANGER: Skip TLS certificate verification. Development only.
|
||||
#[arg(
|
||||
long = "danger-accept-invalid-certs",
|
||||
global = true,
|
||||
env = "QPQ_DANGER_ACCEPT_INVALID_CERTS"
|
||||
)]
|
||||
danger_accept_invalid_certs: bool,
|
||||
|
||||
/// Do not auto-start a local qpc-server.
|
||||
#[arg(long, global = true, env = "QPQ_NO_SERVER")]
|
||||
no_server: bool,
|
||||
|
||||
#[command(subcommand)]
|
||||
command: Cmd,
|
||||
}
|
||||
|
||||
#[derive(Debug, Subcommand)]
|
||||
enum Cmd {
|
||||
/// Register a new user via OPAQUE (password never leaves the client).
|
||||
RegisterUser {
|
||||
/// Username for the new account.
|
||||
#[arg(long)]
|
||||
username: String,
|
||||
/// Password (used in OPAQUE PAKE; server never sees it).
|
||||
#[arg(long)]
|
||||
password: String,
|
||||
},
|
||||
|
||||
/// Log in via OPAQUE and receive a session token.
|
||||
Login {
|
||||
#[arg(long)]
|
||||
username: String,
|
||||
#[arg(long)]
|
||||
password: String,
|
||||
},
|
||||
|
||||
/// Show local identity info.
|
||||
Whoami,
|
||||
|
||||
/// Server health check.
|
||||
Health,
|
||||
|
||||
/// Send a message to a conversation.
|
||||
Send {
|
||||
/// Conversation name (group name or DM peer username).
|
||||
#[arg(long)]
|
||||
to: String,
|
||||
/// Message text.
|
||||
#[arg(long)]
|
||||
msg: String,
|
||||
},
|
||||
|
||||
/// Receive pending messages from a conversation.
|
||||
Recv {
|
||||
/// Conversation name.
|
||||
#[arg(long)]
|
||||
from: String,
|
||||
},
|
||||
|
||||
/// Start or resume a DM with a user.
|
||||
Dm {
|
||||
/// Peer username.
|
||||
username: String,
|
||||
},
|
||||
|
||||
/// Group management commands.
|
||||
Group {
|
||||
#[command(subcommand)]
|
||||
action: GroupCmd,
|
||||
},
|
||||
|
||||
/// Resolve a username to its identity key.
|
||||
Resolve {
|
||||
/// Username to look up.
|
||||
username: String,
|
||||
},
|
||||
|
||||
/// Device management.
|
||||
Devices {
|
||||
#[command(subcommand)]
|
||||
action: DevicesCmd,
|
||||
},
|
||||
|
||||
/// Account recovery management.
|
||||
Recovery {
|
||||
#[command(subcommand)]
|
||||
action: RecoveryCmd,
|
||||
},
|
||||
|
||||
/// Offline outbox management.
|
||||
Outbox {
|
||||
#[command(subcommand)]
|
||||
action: OutboxCmd,
|
||||
},
|
||||
}
|
||||
|
||||
#[derive(Debug, Subcommand)]
|
||||
enum GroupCmd {
|
||||
/// Create a new group.
|
||||
Create {
|
||||
/// Group name.
|
||||
name: String,
|
||||
},
|
||||
/// Invite a user to a group.
|
||||
Invite {
|
||||
/// Group name.
|
||||
#[arg(long)]
|
||||
group: String,
|
||||
/// Username to invite.
|
||||
#[arg(long)]
|
||||
user: String,
|
||||
},
|
||||
}
|
||||
|
||||
#[derive(Debug, Subcommand)]
|
||||
enum DevicesCmd {
|
||||
/// List registered devices.
|
||||
List,
|
||||
/// Register a new device.
|
||||
Register {
|
||||
/// Device ID (hex).
|
||||
#[arg(long)]
|
||||
id: String,
|
||||
/// Human-readable device name.
|
||||
#[arg(long)]
|
||||
name: String,
|
||||
},
|
||||
/// Revoke a device.
|
||||
Revoke {
|
||||
/// Device ID (hex).
|
||||
#[arg(long)]
|
||||
id: String,
|
||||
},
|
||||
}
|
||||
|
||||
#[derive(Debug, Subcommand)]
|
||||
enum RecoveryCmd {
|
||||
/// Generate recovery codes and upload encrypted bundles.
|
||||
Setup,
|
||||
/// Recover account from a recovery code.
|
||||
Restore {
|
||||
/// Recovery code (e.g. "A3B7K9").
|
||||
code: String,
|
||||
},
|
||||
}
|
||||
|
||||
#[derive(Debug, Subcommand)]
|
||||
enum OutboxCmd {
|
||||
/// Show pending outbox entries.
|
||||
List,
|
||||
/// Retry sending all pending outbox entries.
|
||||
Retry,
|
||||
/// Clear permanently failed outbox entries.
|
||||
Clear,
|
||||
}
|
||||
|
||||
// ── Auto-server launch ───────────────────────────────────────────────────────
|
||||
|
||||
/// RAII guard that kills an auto-started server process on drop.
|
||||
struct ServerGuard(Option<std::process::Child>);
|
||||
|
||||
impl Drop for ServerGuard {
|
||||
fn drop(&mut self) {
|
||||
if let Some(ref mut child) = self.0 {
|
||||
let _ = child.kill();
|
||||
let _ = child.wait();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Find the `qpc-server` binary: same directory as current exe, then PATH.
|
||||
fn find_server_binary() -> Option<PathBuf> {
|
||||
if let Ok(exe) = std::env::current_exe() {
|
||||
let sibling = exe.with_file_name("qpc-server");
|
||||
if sibling.exists() {
|
||||
return Some(sibling);
|
||||
}
|
||||
}
|
||||
std::env::var_os("PATH").and_then(|paths| {
|
||||
std::env::split_paths(&paths)
|
||||
.map(|dir| dir.join("qpc-server"))
|
||||
.find(|p| p.exists())
|
||||
})
|
||||
}
|
||||
|
||||
/// Try a QUIC health probe to the server address.
|
||||
async fn probe_server(server_addr: &str) -> bool {
|
||||
use std::net::ToSocketAddrs;
|
||||
let addr = match server_addr.to_socket_addrs() {
|
||||
Ok(mut addrs) => match addrs.next() {
|
||||
Some(a) => a,
|
||||
None => return false,
|
||||
},
|
||||
Err(_) => return false,
|
||||
};
|
||||
// Simple TCP probe — if the port is open, the server is likely running.
|
||||
tokio::net::TcpStream::connect(addr)
|
||||
.await
|
||||
.is_ok()
|
||||
}
|
||||
|
||||
/// Start a local qpc-server if one isn't already running.
|
||||
/// Returns a guard that kills the child on drop (if we started one).
|
||||
async fn ensure_server_running(
|
||||
server_addr: &str,
|
||||
data_dir: &Path,
|
||||
no_server: bool,
|
||||
) -> anyhow::Result<ServerGuard> {
|
||||
if no_server {
|
||||
return Ok(ServerGuard(None));
|
||||
}
|
||||
|
||||
if probe_server(server_addr).await {
|
||||
return Ok(ServerGuard(None));
|
||||
}
|
||||
|
||||
let binary = find_server_binary().ok_or_else(|| {
|
||||
anyhow::anyhow!(
|
||||
"server at {server_addr} is not reachable and qpc-server binary not found; \
|
||||
start a server manually or install qpc-server"
|
||||
)
|
||||
})?;
|
||||
|
||||
let cert_path = data_dir.join("server-cert.der");
|
||||
let key_path = data_dir.join("server-key.der");
|
||||
|
||||
eprintln!("starting server on {server_addr}...");
|
||||
|
||||
let child = ProcessCommand::new(&binary)
|
||||
.args([
|
||||
"--allow-insecure-auth",
|
||||
"--listen",
|
||||
server_addr,
|
||||
"--tls-cert",
|
||||
&cert_path.to_string_lossy(),
|
||||
"--tls-key",
|
||||
&key_path.to_string_lossy(),
|
||||
])
|
||||
.stdout(std::process::Stdio::null())
|
||||
.stderr(std::process::Stdio::null())
|
||||
.spawn()
|
||||
.with_context(|| format!("failed to spawn {}", binary.display()))?;
|
||||
|
||||
let guard = ServerGuard(Some(child));
|
||||
|
||||
// Poll until the server is ready.
|
||||
let mut delay = Duration::from_millis(100);
|
||||
let max_wait = Duration::from_secs(3);
|
||||
let start = std::time::Instant::now();
|
||||
|
||||
loop {
|
||||
tokio::time::sleep(delay).await;
|
||||
|
||||
if probe_server(server_addr).await {
|
||||
eprintln!("server ready");
|
||||
return Ok(guard);
|
||||
}
|
||||
|
||||
if start.elapsed() > max_wait {
|
||||
anyhow::bail!(
|
||||
"auto-started qpc-server but it did not become ready within {max_wait:?}"
|
||||
);
|
||||
}
|
||||
|
||||
delay = (delay * 2).min(Duration::from_secs(1));
|
||||
}
|
||||
}
|
||||
|
||||
// ── Helpers ──────────────────────────────────────────────────────────────────
|
||||
|
||||
/// Build a `ClientConfig` from CLI args.
|
||||
fn build_config(args: &Args) -> anyhow::Result<ClientConfig> {
|
||||
let server_addr = args
|
||||
.server
|
||||
.parse()
|
||||
.with_context(|| format!("invalid server address: {}", args.server))?;
|
||||
|
||||
Ok(ClientConfig {
|
||||
server_addr,
|
||||
server_name: args.server_name.clone(),
|
||||
db_path: args.db_path.clone(),
|
||||
db_password: args.db_password.clone(),
|
||||
state_path: args.state.clone(),
|
||||
accept_invalid_certs: args.danger_accept_invalid_certs,
|
||||
..ClientConfig::default()
|
||||
})
|
||||
}
|
||||
|
||||
/// Build, connect, and return a `QpqClient`. Loads identity from state file
|
||||
/// if it exists.
|
||||
async fn connect_client(args: &Args) -> anyhow::Result<QpqClient> {
|
||||
let config = build_config(args)?;
|
||||
let mut client = QpqClient::new(config);
|
||||
|
||||
// Try loading identity from state file.
|
||||
if args.state.exists() {
|
||||
match quicprochat_sdk::state::load_state(&args.state, args.db_password.as_deref()) {
|
||||
Ok(stored) => {
|
||||
let keypair = quicprochat_core::IdentityKeypair::from_seed(stored.identity_seed);
|
||||
client.set_identity_key(keypair.public_key_bytes().to_vec());
|
||||
}
|
||||
Err(e) => {
|
||||
tracing::debug!("could not load state from {}: {e}", args.state.display());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
client.connect().await.context("failed to connect to server")?;
|
||||
Ok(client)
|
||||
}
|
||||
|
||||
// ── Entry point ──────────────────────────────────────────────────────────────
|
||||
|
||||
pub fn main() {
|
||||
// Install the rustls crypto provider before any TLS operations.
|
||||
let _ = rustls::crypto::ring::default_provider().install_default();
|
||||
|
||||
tracing_subscriber::fmt()
|
||||
.with_env_filter(
|
||||
tracing_subscriber::EnvFilter::try_from_default_env()
|
||||
.unwrap_or_else(|_| tracing_subscriber::EnvFilter::new("warn")),
|
||||
)
|
||||
.init();
|
||||
|
||||
let args = Args::parse();
|
||||
|
||||
let rt = tokio::runtime::Builder::new_multi_thread()
|
||||
.enable_all()
|
||||
.build()
|
||||
.unwrap_or_else(|e| {
|
||||
eprintln!("fatal: {e}");
|
||||
std::process::exit(1);
|
||||
});
|
||||
|
||||
if let Err(e) = rt.block_on(run(args)) {
|
||||
eprintln!("error: {e:#}");
|
||||
std::process::exit(1);
|
||||
}
|
||||
}
|
||||
|
||||
async fn run(args: Args) -> anyhow::Result<()> {
|
||||
// Auto-start server if needed (except for whoami which is local-only).
|
||||
let data_dir = args.state.parent().unwrap_or_else(|| Path::new("."));
|
||||
let _server_guard = match args.command {
|
||||
Cmd::Whoami => ServerGuard(None),
|
||||
_ => ensure_server_running(&args.server, data_dir, args.no_server).await?,
|
||||
};
|
||||
|
||||
match args.command {
|
||||
Cmd::RegisterUser {
|
||||
ref username,
|
||||
ref password,
|
||||
} => {
|
||||
let mut client = connect_client(&args).await?;
|
||||
v2_commands::cmd_register_user(&mut client, username, password)
|
||||
.await
|
||||
.context("register-user failed")?;
|
||||
}
|
||||
|
||||
Cmd::Login {
|
||||
ref username,
|
||||
ref password,
|
||||
} => {
|
||||
let mut client = connect_client(&args).await?;
|
||||
v2_commands::cmd_login(&mut client, username, password)
|
||||
.await
|
||||
.context("login failed")?;
|
||||
}
|
||||
|
||||
Cmd::Whoami => {
|
||||
// Whoami is local-only — create client without connecting.
|
||||
let config = build_config(&args)?;
|
||||
let mut client = QpqClient::new(config);
|
||||
if args.state.exists() {
|
||||
match quicprochat_sdk::state::load_state(
|
||||
&args.state,
|
||||
args.db_password.as_deref(),
|
||||
) {
|
||||
Ok(stored) => {
|
||||
let keypair =
|
||||
quicprochat_core::IdentityKeypair::from_seed(stored.identity_seed);
|
||||
client.set_identity_key(keypair.public_key_bytes().to_vec());
|
||||
}
|
||||
Err(e) => {
|
||||
eprintln!("warning: could not load state: {e}");
|
||||
}
|
||||
}
|
||||
}
|
||||
v2_commands::cmd_whoami(&client);
|
||||
}
|
||||
|
||||
Cmd::Health => {
|
||||
let mut client = connect_client(&args).await?;
|
||||
v2_commands::cmd_health(&mut client)
|
||||
.await
|
||||
.context("health check failed")?;
|
||||
}
|
||||
|
||||
Cmd::Resolve { ref username } => {
|
||||
let mut client = connect_client(&args).await?;
|
||||
v2_commands::cmd_resolve(&mut client, username)
|
||||
.await
|
||||
.context("resolve failed")?;
|
||||
}
|
||||
|
||||
Cmd::Dm { ref username } => {
|
||||
let mut client = connect_client(&args).await?;
|
||||
v2_commands::cmd_resolve(&mut client, username)
|
||||
.await
|
||||
.context("dm setup failed")?;
|
||||
// For now, print the resolved key. Full DM creation requires
|
||||
// MLS group state, which will be handled in the REPL flow.
|
||||
println!("(DM creation with full MLS setup is available in the REPL)");
|
||||
}
|
||||
|
||||
Cmd::Send { ref to, ref msg } => {
|
||||
let _ = (to, msg);
|
||||
let _client = connect_client(&args).await?;
|
||||
// Full send requires MLS group state restoration — deferred to REPL.
|
||||
println!("(send is currently available in the REPL; one-shot send coming soon)");
|
||||
}
|
||||
|
||||
Cmd::Recv { ref from } => {
|
||||
let _ = from;
|
||||
let _client = connect_client(&args).await?;
|
||||
println!("(recv is currently available in the REPL; one-shot recv coming soon)");
|
||||
}
|
||||
|
||||
Cmd::Group {
|
||||
action: GroupCmd::Create { ref name },
|
||||
} => {
|
||||
let _ = name;
|
||||
let _client = connect_client(&args).await?;
|
||||
println!("(group create is currently available in the REPL; one-shot coming soon)");
|
||||
}
|
||||
|
||||
Cmd::Group {
|
||||
action:
|
||||
GroupCmd::Invite {
|
||||
ref group,
|
||||
ref user,
|
||||
},
|
||||
} => {
|
||||
let _ = (group, user);
|
||||
let _client = connect_client(&args).await?;
|
||||
println!("(group invite is currently available in the REPL; one-shot coming soon)");
|
||||
}
|
||||
|
||||
Cmd::Devices {
|
||||
action: DevicesCmd::List,
|
||||
} => {
|
||||
let mut client = connect_client(&args).await?;
|
||||
v2_commands::cmd_devices_list(&mut client)
|
||||
.await
|
||||
.context("devices list failed")?;
|
||||
}
|
||||
|
||||
Cmd::Devices {
|
||||
action: DevicesCmd::Register { ref id, ref name },
|
||||
} => {
|
||||
let mut client = connect_client(&args).await?;
|
||||
v2_commands::cmd_devices_register(&mut client, id, name)
|
||||
.await
|
||||
.context("device register failed")?;
|
||||
}
|
||||
|
||||
Cmd::Devices {
|
||||
action: DevicesCmd::Revoke { ref id },
|
||||
} => {
|
||||
let mut client = connect_client(&args).await?;
|
||||
v2_commands::cmd_devices_revoke(&mut client, id)
|
||||
.await
|
||||
.context("device revoke failed")?;
|
||||
}
|
||||
|
||||
Cmd::Recovery {
|
||||
action: RecoveryCmd::Setup,
|
||||
} => {
|
||||
let mut client = connect_client(&args).await?;
|
||||
v2_commands::cmd_recovery_setup(&mut client)
|
||||
.await
|
||||
.context("recovery setup failed")?;
|
||||
}
|
||||
|
||||
Cmd::Recovery {
|
||||
action: RecoveryCmd::Restore { ref code },
|
||||
} => {
|
||||
let mut client = connect_client(&args).await?;
|
||||
v2_commands::cmd_recovery_restore(&mut client, code)
|
||||
.await
|
||||
.context("recovery restore failed")?;
|
||||
}
|
||||
|
||||
Cmd::Outbox {
|
||||
action: OutboxCmd::List,
|
||||
} => {
|
||||
let mut client = connect_client(&args).await?;
|
||||
v2_commands::cmd_outbox_list(&client)
|
||||
.context("outbox list failed")?;
|
||||
}
|
||||
|
||||
Cmd::Outbox {
|
||||
action: OutboxCmd::Retry,
|
||||
} => {
|
||||
let mut client = connect_client(&args).await?;
|
||||
v2_commands::cmd_outbox_retry(&mut client)
|
||||
.await
|
||||
.context("outbox retry failed")?;
|
||||
}
|
||||
|
||||
Cmd::Outbox {
|
||||
action: OutboxCmd::Clear,
|
||||
} => {
|
||||
let mut client = connect_client(&args).await?;
|
||||
v2_commands::cmd_outbox_clear(&client)
|
||||
.context("outbox clear failed")?;
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
1956
crates/quicprochat-client/tests/e2e.rs
Normal file
1956
crates/quicprochat-client/tests/e2e.rs
Normal file
File diff suppressed because it is too large
Load Diff
90
crates/quicprochat-core/Cargo.toml
Normal file
90
crates/quicprochat-core/Cargo.toml
Normal file
@@ -0,0 +1,90 @@
|
||||
[package]
|
||||
name = "quicprochat-core"
|
||||
version = "0.1.0"
|
||||
edition.workspace = true
|
||||
description = "Crypto primitives, MLS state machine, and hybrid post-quantum KEM for quicprochat."
|
||||
license = "Apache-2.0 OR MIT"
|
||||
repository.workspace = true
|
||||
|
||||
[features]
|
||||
default = ["native"]
|
||||
# The "native" feature enables MLS (openmls), OPAQUE, Cap'n Proto, tokio, and
|
||||
# filesystem-backed key storage. Disable it (--no-default-features) to compile
|
||||
# the pure-crypto subset to wasm32-unknown-unknown.
|
||||
native = [
|
||||
"dep:openmls",
|
||||
"dep:openmls_rust_crypto",
|
||||
"dep:openmls_traits",
|
||||
"dep:openmls_memory_storage",
|
||||
"dep:tls_codec",
|
||||
"dep:opaque-ke",
|
||||
"dep:bincode",
|
||||
"dep:capnp",
|
||||
"dep:quicprochat-proto",
|
||||
"dep:tokio",
|
||||
]
|
||||
|
||||
[dependencies]
|
||||
# Crypto — classical (always available, WASM-safe)
|
||||
x25519-dalek = { workspace = true }
|
||||
ed25519-dalek = { workspace = true }
|
||||
sha2 = { workspace = true }
|
||||
hmac = { workspace = true }
|
||||
hkdf = { workspace = true }
|
||||
ciborium = { workspace = true }
|
||||
chacha20poly1305 = { workspace = true }
|
||||
zeroize = { workspace = true }
|
||||
rand = { workspace = true }
|
||||
serde = { workspace = true }
|
||||
serde_json = { workspace = true }
|
||||
argon2 = { workspace = true }
|
||||
thiserror = { workspace = true }
|
||||
|
||||
# Crypto — post-quantum hybrid KEM (M7) — always available, WASM-safe
|
||||
ml-kem = { workspace = true }
|
||||
|
||||
# Crypto — OPAQUE password-authenticated key exchange (native only)
|
||||
opaque-ke = { workspace = true, optional = true }
|
||||
|
||||
# Crypto — MLS (M2) (native only)
|
||||
openmls = { workspace = true, optional = true }
|
||||
openmls_rust_crypto = { workspace = true, optional = true }
|
||||
openmls_traits = { workspace = true, optional = true }
|
||||
openmls_memory_storage = { workspace = true, optional = true }
|
||||
tls_codec = { workspace = true, optional = true }
|
||||
bincode = { workspace = true, optional = true }
|
||||
|
||||
# Serialisation (native only)
|
||||
capnp = { workspace = true, optional = true }
|
||||
quicprochat-proto = { path = "../quicprochat-proto", optional = true }
|
||||
|
||||
# Async runtime (native only)
|
||||
tokio = { workspace = true, optional = true }
|
||||
|
||||
# WASM: provide getrandom with js backend
|
||||
[target.'cfg(target_arch = "wasm32")'.dependencies]
|
||||
getrandom = { version = "0.2", features = ["js"] }
|
||||
|
||||
[lints]
|
||||
workspace = true
|
||||
|
||||
[dev-dependencies]
|
||||
tokio = { workspace = true }
|
||||
criterion = { version = "0.5", features = ["html_reports"] }
|
||||
prost = "0.13"
|
||||
|
||||
[[bench]]
|
||||
name = "serialization"
|
||||
harness = false
|
||||
|
||||
[[bench]]
|
||||
name = "mls_operations"
|
||||
harness = false
|
||||
|
||||
[[bench]]
|
||||
name = "hybrid_kem_bench"
|
||||
harness = false
|
||||
|
||||
[[bench]]
|
||||
name = "crypto_benchmarks"
|
||||
harness = false
|
||||
150
crates/quicprochat-core/benches/crypto_benchmarks.rs
Normal file
150
crates/quicprochat-core/benches/crypto_benchmarks.rs
Normal file
@@ -0,0 +1,150 @@
|
||||
#![allow(clippy::unwrap_used)]
|
||||
//! Benchmark: Identity keypair operations, sealed sender, and message padding.
|
||||
//!
|
||||
//! Covers:
|
||||
//! - [`IdentityKeypair`] generation, signing, and signature verification
|
||||
//! - Sealed sender `seal` / `unseal` (Ed25519 sign + verify overhead)
|
||||
//! - Message padding `pad` / `unpad` at various payload sizes
|
||||
|
||||
use criterion::{black_box, criterion_group, criterion_main, BenchmarkId, Criterion};
|
||||
|
||||
use quicprochat_core::{compute_safety_number, IdentityKeypair, padding};
|
||||
|
||||
// ── Identity keypair benchmarks ──────────────────────────────────────────────
|
||||
|
||||
fn bench_identity_keygen(c: &mut Criterion) {
|
||||
c.bench_function("identity_keygen", |b| {
|
||||
b.iter(|| black_box(IdentityKeypair::generate()));
|
||||
});
|
||||
}
|
||||
|
||||
fn bench_identity_sign(c: &mut Criterion) {
|
||||
let identity = IdentityKeypair::generate();
|
||||
let payload = b"benchmark signing payload -- 32+ bytes of realistic data here";
|
||||
|
||||
c.bench_function("identity_sign", |b| {
|
||||
b.iter(|| black_box(identity.sign_raw(black_box(payload))));
|
||||
});
|
||||
}
|
||||
|
||||
fn bench_identity_verify(c: &mut Criterion) {
|
||||
let identity = IdentityKeypair::generate();
|
||||
let payload = b"benchmark signing payload -- 32+ bytes of realistic data here";
|
||||
let sig = identity.sign_raw(payload);
|
||||
let pk = identity.public_key_bytes();
|
||||
|
||||
c.bench_function("identity_verify", |b| {
|
||||
b.iter(|| {
|
||||
IdentityKeypair::verify_raw(
|
||||
black_box(&pk),
|
||||
black_box(payload),
|
||||
black_box(&sig),
|
||||
)
|
||||
.unwrap();
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
// ── Sealed sender benchmarks ─────────────────────────────────────────────────
|
||||
|
||||
fn bench_sealed_sender(c: &mut Criterion) {
|
||||
use quicprochat_core::sealed_sender::{seal, unseal};
|
||||
|
||||
let sizes: &[(&str, usize)] = &[
|
||||
("32B", 32),
|
||||
("256B", 256),
|
||||
("1KB", 1024),
|
||||
("4KB", 4096),
|
||||
];
|
||||
|
||||
let identity = IdentityKeypair::generate();
|
||||
|
||||
let mut group = c.benchmark_group("sealed_sender_seal");
|
||||
for (label, size) in sizes {
|
||||
let payload = vec![0xABu8; *size];
|
||||
group.bench_with_input(
|
||||
BenchmarkId::from_parameter(label),
|
||||
&payload,
|
||||
|b, payload| {
|
||||
b.iter(|| black_box(seal(black_box(&identity), black_box(payload))));
|
||||
},
|
||||
);
|
||||
}
|
||||
group.finish();
|
||||
|
||||
let mut group = c.benchmark_group("sealed_sender_unseal");
|
||||
for (label, size) in sizes {
|
||||
let payload = vec![0xABu8; *size];
|
||||
let sealed = seal(&identity, &payload);
|
||||
group.bench_with_input(
|
||||
BenchmarkId::from_parameter(label),
|
||||
&sealed,
|
||||
|b, sealed| {
|
||||
b.iter(|| black_box(unseal(black_box(sealed)).unwrap()));
|
||||
},
|
||||
);
|
||||
}
|
||||
group.finish();
|
||||
}
|
||||
|
||||
// ── Message padding benchmarks ────────────────────────────────────────────────
|
||||
|
||||
fn bench_padding(c: &mut Criterion) {
|
||||
// Representative sizes: one per bucket + oversized
|
||||
let sizes: &[(&str, usize)] = &[
|
||||
("50B", 50), // → 256 bucket
|
||||
("512B", 512), // → 1024 bucket
|
||||
("2KB", 2048), // → 4096 bucket
|
||||
("8KB", 8192), // → 16384 bucket
|
||||
("20KB", 20480), // → 32768 (oversized)
|
||||
];
|
||||
|
||||
let mut group = c.benchmark_group("padding_pad");
|
||||
for (label, size) in sizes {
|
||||
let payload = vec![0xABu8; *size];
|
||||
group.bench_with_input(
|
||||
BenchmarkId::from_parameter(label),
|
||||
&payload,
|
||||
|b, payload| {
|
||||
b.iter(|| black_box(padding::pad(black_box(payload))));
|
||||
},
|
||||
);
|
||||
}
|
||||
group.finish();
|
||||
|
||||
let mut group = c.benchmark_group("padding_unpad");
|
||||
for (label, size) in sizes {
|
||||
let payload = vec![0xABu8; *size];
|
||||
let padded = padding::pad(&payload);
|
||||
group.bench_with_input(
|
||||
BenchmarkId::from_parameter(label),
|
||||
&padded,
|
||||
|b, padded| {
|
||||
b.iter(|| black_box(padding::unpad(black_box(padded)).unwrap()));
|
||||
},
|
||||
);
|
||||
}
|
||||
group.finish();
|
||||
}
|
||||
|
||||
// ── Safety number benchmarks ─────────────────────────────────────────────────
|
||||
|
||||
fn bench_safety_number(c: &mut Criterion) {
|
||||
let key_a = [0x1au8; 32];
|
||||
let key_b = [0x2bu8; 32];
|
||||
|
||||
c.bench_function("safety_number", |b| {
|
||||
b.iter(|| black_box(compute_safety_number(black_box(&key_a), black_box(&key_b))));
|
||||
});
|
||||
}
|
||||
|
||||
criterion_group!(
|
||||
benches,
|
||||
bench_identity_keygen,
|
||||
bench_identity_sign,
|
||||
bench_identity_verify,
|
||||
bench_sealed_sender,
|
||||
bench_padding,
|
||||
bench_safety_number,
|
||||
);
|
||||
criterion_main!(benches);
|
||||
153
crates/quicprochat-core/benches/hybrid_kem_bench.rs
Normal file
153
crates/quicprochat-core/benches/hybrid_kem_bench.rs
Normal file
@@ -0,0 +1,153 @@
|
||||
#![allow(clippy::unwrap_used)]
|
||||
//! Benchmark: Hybrid KEM (X25519 + ML-KEM-768) vs classical-only encryption.
|
||||
//!
|
||||
//! Compares keypair generation, encryption, and decryption times for the
|
||||
//! hybrid post-quantum scheme against classical X25519 + ChaCha20-Poly1305.
|
||||
|
||||
use criterion::{black_box, criterion_group, criterion_main, BenchmarkId, Criterion};
|
||||
|
||||
use quicprochat_core::{hybrid_encrypt, hybrid_decrypt, HybridKeypair};
|
||||
|
||||
// ── Classical baseline (X25519 + ChaCha20-Poly1305) ─────────────────────────
|
||||
|
||||
use chacha20poly1305::{
|
||||
aead::{Aead, KeyInit},
|
||||
ChaCha20Poly1305, Key, Nonce,
|
||||
};
|
||||
use hkdf::Hkdf;
|
||||
use rand::{rngs::OsRng, RngCore};
|
||||
use sha2::Sha256;
|
||||
use x25519_dalek::{EphemeralSecret, PublicKey as X25519Public, StaticSecret};
|
||||
|
||||
struct ClassicalKeypair {
|
||||
secret: StaticSecret,
|
||||
public: X25519Public,
|
||||
}
|
||||
|
||||
impl ClassicalKeypair {
|
||||
fn generate() -> Self {
|
||||
let secret = StaticSecret::random_from_rng(OsRng);
|
||||
let public = X25519Public::from(&secret);
|
||||
Self { secret, public }
|
||||
}
|
||||
}
|
||||
|
||||
fn classical_encrypt(recipient_pk: &X25519Public, plaintext: &[u8]) -> Vec<u8> {
|
||||
let eph_secret = EphemeralSecret::random_from_rng(OsRng);
|
||||
let eph_public = X25519Public::from(&eph_secret);
|
||||
let shared = eph_secret.diffie_hellman(recipient_pk);
|
||||
|
||||
let hk = Hkdf::<Sha256>::new(None, shared.as_bytes());
|
||||
let mut key_bytes = [0u8; 32];
|
||||
hk.expand(b"classical-bench", &mut key_bytes).unwrap();
|
||||
|
||||
let mut nonce_bytes = [0u8; 12];
|
||||
OsRng.fill_bytes(&mut nonce_bytes);
|
||||
|
||||
let cipher = ChaCha20Poly1305::new(Key::from_slice(&key_bytes));
|
||||
let ct = cipher
|
||||
.encrypt(Nonce::from_slice(&nonce_bytes), plaintext)
|
||||
.unwrap();
|
||||
|
||||
// Wire: eph_pk(32) || nonce(12) || ciphertext
|
||||
let mut out = Vec::with_capacity(32 + 12 + ct.len());
|
||||
out.extend_from_slice(eph_public.as_bytes());
|
||||
out.extend_from_slice(&nonce_bytes);
|
||||
out.extend_from_slice(&ct);
|
||||
out
|
||||
}
|
||||
|
||||
fn classical_decrypt(keypair: &ClassicalKeypair, envelope: &[u8]) -> Vec<u8> {
|
||||
let eph_pk = X25519Public::from(<[u8; 32]>::try_from(&envelope[..32]).unwrap());
|
||||
let nonce_bytes: [u8; 12] = envelope[32..44].try_into().unwrap();
|
||||
let ct = &envelope[44..];
|
||||
|
||||
let shared = keypair.secret.diffie_hellman(&eph_pk);
|
||||
|
||||
let hk = Hkdf::<Sha256>::new(None, shared.as_bytes());
|
||||
let mut key_bytes = [0u8; 32];
|
||||
hk.expand(b"classical-bench", &mut key_bytes).unwrap();
|
||||
|
||||
let cipher = ChaCha20Poly1305::new(Key::from_slice(&key_bytes));
|
||||
cipher
|
||||
.decrypt(Nonce::from_slice(&nonce_bytes), ct)
|
||||
.unwrap()
|
||||
}
|
||||
|
||||
// ── Benchmarks ──────────────────────────────────────────────────────────────
|
||||
|
||||
fn bench_keygen(c: &mut Criterion) {
|
||||
let mut group = c.benchmark_group("kem_keygen");
|
||||
group.bench_function("hybrid", |b| {
|
||||
b.iter(|| black_box(HybridKeypair::generate()));
|
||||
});
|
||||
group.bench_function("classical", |b| {
|
||||
b.iter(|| black_box(ClassicalKeypair::generate()));
|
||||
});
|
||||
group.finish();
|
||||
}
|
||||
|
||||
fn bench_encrypt(c: &mut Criterion) {
|
||||
let sizes: &[(&str, usize)] = &[("100B", 100), ("1KB", 1024), ("4KB", 4096), ("64KB", 65536)];
|
||||
let mut group = c.benchmark_group("kem_encrypt");
|
||||
|
||||
let hybrid_kp = HybridKeypair::generate();
|
||||
let hybrid_pk = hybrid_kp.public_key();
|
||||
let classical_kp = ClassicalKeypair::generate();
|
||||
|
||||
for (label, size) in sizes {
|
||||
let payload = vec![0xABu8; *size];
|
||||
|
||||
group.bench_with_input(
|
||||
BenchmarkId::new("hybrid", label),
|
||||
&payload,
|
||||
|b, payload| {
|
||||
b.iter(|| hybrid_encrypt(&hybrid_pk, black_box(payload), b"", b"").unwrap());
|
||||
},
|
||||
);
|
||||
|
||||
group.bench_with_input(
|
||||
BenchmarkId::new("classical", label),
|
||||
&payload,
|
||||
|b, payload| {
|
||||
b.iter(|| classical_encrypt(&classical_kp.public, black_box(payload)));
|
||||
},
|
||||
);
|
||||
}
|
||||
group.finish();
|
||||
}
|
||||
|
||||
fn bench_decrypt(c: &mut Criterion) {
|
||||
let sizes: &[(&str, usize)] = &[("100B", 100), ("1KB", 1024), ("4KB", 4096), ("64KB", 65536)];
|
||||
let mut group = c.benchmark_group("kem_decrypt");
|
||||
|
||||
let hybrid_kp = HybridKeypair::generate();
|
||||
let hybrid_pk = hybrid_kp.public_key();
|
||||
let classical_kp = ClassicalKeypair::generate();
|
||||
|
||||
for (label, size) in sizes {
|
||||
let payload = vec![0xABu8; *size];
|
||||
let hybrid_ct = hybrid_encrypt(&hybrid_pk, &payload, b"", b"").unwrap();
|
||||
let classical_ct = classical_encrypt(&classical_kp.public, &payload);
|
||||
|
||||
group.bench_with_input(
|
||||
BenchmarkId::new("hybrid", label),
|
||||
&hybrid_ct,
|
||||
|b, ct| {
|
||||
b.iter(|| hybrid_decrypt(&hybrid_kp, black_box(ct), b"", b"").unwrap());
|
||||
},
|
||||
);
|
||||
|
||||
group.bench_with_input(
|
||||
BenchmarkId::new("classical", label),
|
||||
&classical_ct,
|
||||
|b, ct| {
|
||||
b.iter(|| classical_decrypt(&classical_kp, black_box(ct)));
|
||||
},
|
||||
);
|
||||
}
|
||||
group.finish();
|
||||
}
|
||||
|
||||
criterion_group!(benches, bench_keygen, bench_encrypt, bench_decrypt);
|
||||
criterion_main!(benches);
|
||||
157
crates/quicprochat-core/benches/mls_operations.rs
Normal file
157
crates/quicprochat-core/benches/mls_operations.rs
Normal file
@@ -0,0 +1,157 @@
|
||||
#![allow(clippy::unwrap_used)]
|
||||
//! Benchmark: MLS group operations at various group sizes.
|
||||
//!
|
||||
//! Measures KeyPackage generation, group creation, member addition,
|
||||
//! message encryption, and message decryption.
|
||||
|
||||
use std::sync::Arc;
|
||||
|
||||
use criterion::{criterion_group, criterion_main, BatchSize, BenchmarkId, Criterion};
|
||||
use quicprochat_core::{GroupMember, IdentityKeypair};
|
||||
|
||||
/// Create identities and a group of the given size.
|
||||
/// Returns (creator, Vec<members>).
|
||||
fn setup_group(size: usize) -> (GroupMember, Vec<GroupMember>) {
|
||||
let creator_id = Arc::new(IdentityKeypair::generate());
|
||||
let mut creator = GroupMember::new(creator_id);
|
||||
creator.create_group(b"bench-group").unwrap();
|
||||
|
||||
let mut members = Vec::with_capacity(size.saturating_sub(1));
|
||||
for _ in 1..size {
|
||||
let joiner_id = Arc::new(IdentityKeypair::generate());
|
||||
let mut joiner = GroupMember::new(joiner_id);
|
||||
let kp = joiner.generate_key_package().unwrap();
|
||||
|
||||
let (_commit, welcome) = creator.add_member(&kp).unwrap();
|
||||
joiner.join_group(&welcome).unwrap();
|
||||
members.push(joiner);
|
||||
}
|
||||
|
||||
(creator, members)
|
||||
}
|
||||
|
||||
fn bench_keygen(c: &mut Criterion) {
|
||||
c.bench_function("mls_keygen", |b| {
|
||||
b.iter_batched(
|
||||
|| {
|
||||
let id = Arc::new(IdentityKeypair::generate());
|
||||
GroupMember::new(id)
|
||||
},
|
||||
|mut member| {
|
||||
member.generate_key_package().unwrap();
|
||||
},
|
||||
BatchSize::SmallInput,
|
||||
);
|
||||
});
|
||||
}
|
||||
|
||||
fn bench_group_create(c: &mut Criterion) {
|
||||
c.bench_function("mls_group_create", |b| {
|
||||
b.iter_batched(
|
||||
|| {
|
||||
let id = Arc::new(IdentityKeypair::generate());
|
||||
GroupMember::new(id)
|
||||
},
|
||||
|mut member| {
|
||||
member.create_group(b"bench-group").unwrap();
|
||||
},
|
||||
BatchSize::SmallInput,
|
||||
);
|
||||
});
|
||||
}
|
||||
|
||||
fn bench_add_member(c: &mut Criterion) {
|
||||
let mut group = c.benchmark_group("mls_add_member");
|
||||
group.sample_size(10);
|
||||
for size in [2, 10, 50, 100] {
|
||||
group.bench_with_input(BenchmarkId::from_parameter(size), &size, |b, &size| {
|
||||
b.iter_batched(
|
||||
|| {
|
||||
let (creator, members) = setup_group(size);
|
||||
let joiner_id = Arc::new(IdentityKeypair::generate());
|
||||
let mut joiner = GroupMember::new(joiner_id);
|
||||
let kp = joiner.generate_key_package().unwrap();
|
||||
(creator, members, joiner, kp)
|
||||
},
|
||||
|(mut creator, _members, _joiner, kp)| {
|
||||
creator.add_member(&kp).unwrap();
|
||||
},
|
||||
BatchSize::SmallInput,
|
||||
);
|
||||
});
|
||||
}
|
||||
group.finish();
|
||||
}
|
||||
|
||||
fn bench_epoch_rotation(c: &mut Criterion) {
|
||||
let mut group = c.benchmark_group("mls_epoch_rotation");
|
||||
group.sample_size(10);
|
||||
for size in [2, 10, 50] {
|
||||
group.bench_with_input(BenchmarkId::from_parameter(size), &size, |b, &size| {
|
||||
b.iter_batched(
|
||||
|| {
|
||||
let (mut creator, members) = setup_group(size);
|
||||
// Propose a self-update to simulate epoch rotation
|
||||
let proposal = creator.propose_self_update().unwrap();
|
||||
(creator, members, proposal)
|
||||
},
|
||||
|(mut creator, _members, _proposal)| {
|
||||
// Commit pending proposals (the self-update) to advance the epoch
|
||||
creator.commit_pending_proposals().unwrap();
|
||||
},
|
||||
BatchSize::SmallInput,
|
||||
);
|
||||
});
|
||||
}
|
||||
group.finish();
|
||||
}
|
||||
|
||||
fn bench_send_message(c: &mut Criterion) {
|
||||
let mut group = c.benchmark_group("mls_send_message");
|
||||
for size in [2, 10, 50] {
|
||||
group.bench_with_input(BenchmarkId::from_parameter(size), &size, |b, &size| {
|
||||
let (mut creator, _members) = setup_group(size);
|
||||
let payload = b"hello benchmark message";
|
||||
b.iter(|| {
|
||||
creator.send_message(payload).unwrap();
|
||||
});
|
||||
});
|
||||
}
|
||||
group.finish();
|
||||
}
|
||||
|
||||
fn bench_receive_message(c: &mut Criterion) {
|
||||
let mut group = c.benchmark_group("mls_receive_message");
|
||||
for size in [2, 10, 50] {
|
||||
group.bench_with_input(BenchmarkId::from_parameter(size), &size, |b, &size| {
|
||||
// For receive, we need a fresh ciphertext each iteration since
|
||||
// MLS message processing is destructive (epoch state changes).
|
||||
// We pre-generate a batch and consume them.
|
||||
let (mut creator, mut members) = setup_group(size);
|
||||
if members.is_empty() {
|
||||
return;
|
||||
}
|
||||
let payload = b"hello benchmark message";
|
||||
b.iter_batched(
|
||||
|| creator.send_message(payload).unwrap(),
|
||||
|ct| {
|
||||
// Receive on the first joiner
|
||||
let _ = members[0].receive_message(&ct);
|
||||
},
|
||||
BatchSize::SmallInput,
|
||||
);
|
||||
});
|
||||
}
|
||||
group.finish();
|
||||
}
|
||||
|
||||
criterion_group!(
|
||||
benches,
|
||||
bench_keygen,
|
||||
bench_group_create,
|
||||
bench_add_member,
|
||||
bench_epoch_rotation,
|
||||
bench_send_message,
|
||||
bench_receive_message,
|
||||
);
|
||||
criterion_main!(benches);
|
||||
171
crates/quicprochat-core/benches/serialization.rs
Normal file
171
crates/quicprochat-core/benches/serialization.rs
Normal file
@@ -0,0 +1,171 @@
|
||||
#![allow(clippy::unwrap_used)]
|
||||
//! Benchmark: Cap'n Proto vs Protobuf serialization for chat message envelopes.
|
||||
//!
|
||||
//! Compares serialization/deserialization speed and encoded size at three
|
||||
//! payload sizes (100 B, 1 KB, 4 KB) for a typical Envelope{seq, data} message.
|
||||
|
||||
use criterion::{black_box, criterion_group, criterion_main, BenchmarkId, Criterion};
|
||||
|
||||
// ── Cap'n Proto path ────────────────────────────────────────────────────────
|
||||
|
||||
fn capnp_serialize_envelope(seq: u64, data: &[u8]) -> Vec<u8> {
|
||||
let mut msg = capnp::message::Builder::new_default();
|
||||
{
|
||||
let mut envelope = msg.init_root::<quicprochat_proto::node_capnp::envelope::Builder>();
|
||||
envelope.set_seq(seq);
|
||||
envelope.set_data(data);
|
||||
}
|
||||
quicprochat_proto::to_bytes(&msg).unwrap()
|
||||
}
|
||||
|
||||
fn capnp_deserialize_envelope(bytes: &[u8]) -> (u64, Vec<u8>) {
|
||||
let reader = quicprochat_proto::from_bytes(bytes).unwrap();
|
||||
let envelope = reader
|
||||
.get_root::<quicprochat_proto::node_capnp::envelope::Reader>()
|
||||
.unwrap();
|
||||
(envelope.get_seq(), envelope.get_data().unwrap().to_vec())
|
||||
}
|
||||
|
||||
// ── Protobuf path (hand-coded prost encoding to avoid build-dep) ────────────
|
||||
//
|
||||
// Envelope { seq: uint64 (field 1), data: bytes (field 2) }
|
||||
// Wire format: varint tag + varint seq + len-delimited data
|
||||
|
||||
fn protobuf_serialize_envelope(seq: u64, data: &[u8]) -> Vec<u8> {
|
||||
// Build a prost message via raw encoding.
|
||||
// Field 1: uint64 seq, wire type 0 (varint), tag = (1 << 3) | 0 = 0x08
|
||||
// Field 2: bytes data, wire type 2 (length-delimited), tag = (2 << 3) | 2 = 0x12
|
||||
let mut buf = Vec::with_capacity(10 + data.len());
|
||||
// Encode field 1 (seq)
|
||||
prost::encoding::uint64::encode(1, &seq, &mut buf);
|
||||
// Encode field 2 (data)
|
||||
prost::encoding::bytes::encode(2, &data.to_vec(), &mut buf);
|
||||
buf
|
||||
}
|
||||
|
||||
fn protobuf_deserialize_envelope(bytes: &[u8]) -> (u64, Vec<u8>) {
|
||||
// Decode manually using prost wire format
|
||||
let mut seq: u64 = 0;
|
||||
let mut data: Vec<u8> = Vec::new();
|
||||
let mut buf = bytes;
|
||||
|
||||
while !buf.is_empty() {
|
||||
let (tag, wire_type) =
|
||||
prost::encoding::decode_key(&mut buf).expect("decode key");
|
||||
match tag {
|
||||
1 => {
|
||||
prost::encoding::uint64::merge(wire_type, &mut seq, &mut buf, Default::default())
|
||||
.expect("decode seq");
|
||||
}
|
||||
2 => {
|
||||
prost::encoding::bytes::merge(wire_type, &mut data, &mut buf, Default::default())
|
||||
.expect("decode data");
|
||||
}
|
||||
_ => {
|
||||
prost::encoding::skip_field(wire_type, tag, &mut buf, Default::default())
|
||||
.expect("skip unknown field");
|
||||
}
|
||||
}
|
||||
}
|
||||
(seq, data)
|
||||
}
|
||||
|
||||
// ── Benchmarks ──────────────────────────────────────────────────────────────
|
||||
|
||||
fn bench_serialize(c: &mut Criterion) {
|
||||
let sizes: &[(&str, usize)] = &[("100B", 100), ("1KB", 1024), ("4KB", 4096)];
|
||||
let mut group = c.benchmark_group("serialize_envelope");
|
||||
|
||||
for (label, size) in sizes {
|
||||
let payload = vec![0xABu8; *size];
|
||||
let seq = 42u64;
|
||||
|
||||
group.bench_with_input(
|
||||
BenchmarkId::new("capnp", label),
|
||||
&(&seq, &payload),
|
||||
|b, &(seq, payload)| {
|
||||
b.iter(|| capnp_serialize_envelope(black_box(*seq), black_box(payload)));
|
||||
},
|
||||
);
|
||||
|
||||
group.bench_with_input(
|
||||
BenchmarkId::new("protobuf", label),
|
||||
&(&seq, &payload),
|
||||
|b, &(seq, payload)| {
|
||||
b.iter(|| protobuf_serialize_envelope(black_box(*seq), black_box(payload)));
|
||||
},
|
||||
);
|
||||
}
|
||||
group.finish();
|
||||
}
|
||||
|
||||
fn bench_deserialize(c: &mut Criterion) {
|
||||
let sizes: &[(&str, usize)] = &[("100B", 100), ("1KB", 1024), ("4KB", 4096)];
|
||||
let mut group = c.benchmark_group("deserialize_envelope");
|
||||
|
||||
for (label, size) in sizes {
|
||||
let payload = vec![0xABu8; *size];
|
||||
let seq = 42u64;
|
||||
|
||||
let capnp_bytes = capnp_serialize_envelope(seq, &payload);
|
||||
let proto_bytes = protobuf_serialize_envelope(seq, &payload);
|
||||
|
||||
group.bench_with_input(
|
||||
BenchmarkId::new("capnp", label),
|
||||
&capnp_bytes,
|
||||
|b, bytes| {
|
||||
b.iter(|| capnp_deserialize_envelope(black_box(bytes)));
|
||||
},
|
||||
);
|
||||
|
||||
group.bench_with_input(
|
||||
BenchmarkId::new("protobuf", label),
|
||||
&proto_bytes,
|
||||
|b, bytes| {
|
||||
b.iter(|| protobuf_deserialize_envelope(black_box(bytes)));
|
||||
},
|
||||
);
|
||||
}
|
||||
group.finish();
|
||||
}
|
||||
|
||||
fn bench_encoded_sizes(c: &mut Criterion) {
|
||||
let sizes: &[(&str, usize)] = &[("100B", 100), ("1KB", 1024), ("4KB", 4096)];
|
||||
let mut group = c.benchmark_group("encoded_size");
|
||||
|
||||
for (label, size) in sizes {
|
||||
let payload = vec![0xABu8; *size];
|
||||
let capnp_bytes = capnp_serialize_envelope(42, &payload);
|
||||
let proto_bytes = protobuf_serialize_envelope(42, &payload);
|
||||
|
||||
// Use a trivial benchmark that just returns the size -- the point
|
||||
// is to get criterion to print the iteration count and allow
|
||||
// comparison. The real value is in the eprintln below.
|
||||
group.bench_with_input(
|
||||
BenchmarkId::new("capnp", label),
|
||||
&capnp_bytes,
|
||||
|b, bytes| {
|
||||
b.iter(|| black_box(bytes.len()));
|
||||
},
|
||||
);
|
||||
|
||||
group.bench_with_input(
|
||||
BenchmarkId::new("protobuf", label),
|
||||
&proto_bytes,
|
||||
|b, bytes| {
|
||||
b.iter(|| black_box(bytes.len()));
|
||||
},
|
||||
);
|
||||
|
||||
eprintln!(
|
||||
" {label}: capnp={} bytes, protobuf={} bytes, overhead={:+} bytes",
|
||||
capnp_bytes.len(),
|
||||
proto_bytes.len(),
|
||||
capnp_bytes.len() as isize - proto_bytes.len() as isize,
|
||||
);
|
||||
}
|
||||
group.finish();
|
||||
}
|
||||
|
||||
criterion_group!(benches, bench_serialize, bench_deserialize, bench_encoded_sizes);
|
||||
criterion_main!(benches);
|
||||
21
crates/quicprochat-core/proto/chat_message.proto
Normal file
21
crates/quicprochat-core/proto/chat_message.proto
Normal file
@@ -0,0 +1,21 @@
|
||||
syntax = "proto3";
|
||||
package quicprochat.bench;
|
||||
|
||||
// Equivalent to the Envelope struct in delivery.capnp
|
||||
message Envelope {
|
||||
uint64 seq = 1;
|
||||
bytes data = 2;
|
||||
}
|
||||
|
||||
// Equivalent to a chat message payload (app_message.rs Chat variant)
|
||||
message ChatMessage {
|
||||
bytes message_id = 1; // 16 bytes
|
||||
string body = 2; // UTF-8 text
|
||||
uint64 timestamp_ms = 3;
|
||||
bytes sender_key = 4; // 32 bytes Ed25519 public key
|
||||
}
|
||||
|
||||
// Batch fetch response (equivalent to fetch returning List(Envelope))
|
||||
message FetchResponse {
|
||||
repeated Envelope payloads = 1;
|
||||
}
|
||||
524
crates/quicprochat-core/src/app_message.rs
Normal file
524
crates/quicprochat-core/src/app_message.rs
Normal file
@@ -0,0 +1,524 @@
|
||||
//! Rich application-layer message format for MLS application payloads.
|
||||
//!
|
||||
//! The server sees only opaque ciphertext; structure lives in this client-defined
|
||||
//! plaintext schema. All messages use: version byte (1) + message_type byte + type-specific payload.
|
||||
//!
|
||||
//! # Message ID
|
||||
//!
|
||||
//! `message_id` is assigned by the sender (16 random bytes) and included in the
|
||||
//! serialized payload for Chat (and implied for Reply/Reaction/ReadReceipt via ref_msg_id).
|
||||
//! Recipients can store message_ids to reference them in replies or reactions.
|
||||
|
||||
use crate::error::CoreError;
|
||||
use rand::RngCore;
|
||||
|
||||
/// Current schema version.
|
||||
pub const VERSION: u8 = 1;
|
||||
|
||||
/// Message type discriminant (one byte).
|
||||
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
|
||||
#[repr(u8)]
|
||||
pub enum MessageType {
|
||||
Chat = 0x01,
|
||||
Reply = 0x02,
|
||||
Reaction = 0x03,
|
||||
ReadReceipt = 0x04,
|
||||
Typing = 0x05,
|
||||
Edit = 0x06,
|
||||
Delete = 0x07,
|
||||
FileRef = 0x08,
|
||||
Dummy = 0x09,
|
||||
}
|
||||
|
||||
impl MessageType {
|
||||
fn from_byte(b: u8) -> Option<Self> {
|
||||
match b {
|
||||
0x01 => Some(MessageType::Chat),
|
||||
0x02 => Some(MessageType::Reply),
|
||||
0x03 => Some(MessageType::Reaction),
|
||||
0x04 => Some(MessageType::ReadReceipt),
|
||||
0x05 => Some(MessageType::Typing),
|
||||
0x06 => Some(MessageType::Edit),
|
||||
0x07 => Some(MessageType::Delete),
|
||||
0x08 => Some(MessageType::FileRef),
|
||||
0x09 => Some(MessageType::Dummy),
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Parsed application message (one of the rich types).
|
||||
#[derive(Clone, Debug, PartialEq, Eq)]
|
||||
pub enum AppMessage {
|
||||
/// Plain chat: body (UTF-8). message_id is included so recipients can store and reference it.
|
||||
Chat {
|
||||
message_id: [u8; 16],
|
||||
body: Vec<u8>,
|
||||
},
|
||||
Reply {
|
||||
ref_msg_id: [u8; 16],
|
||||
body: Vec<u8>,
|
||||
},
|
||||
Reaction {
|
||||
ref_msg_id: [u8; 16],
|
||||
emoji: Vec<u8>,
|
||||
},
|
||||
ReadReceipt {
|
||||
msg_id: [u8; 16],
|
||||
},
|
||||
Typing {
|
||||
/// 0 = stopped, 1 = typing
|
||||
active: u8,
|
||||
},
|
||||
/// Edit a previously sent message (identified by ref_msg_id).
|
||||
Edit {
|
||||
ref_msg_id: [u8; 16],
|
||||
body: Vec<u8>,
|
||||
},
|
||||
/// Delete a previously sent message (identified by ref_msg_id).
|
||||
Delete {
|
||||
ref_msg_id: [u8; 16],
|
||||
},
|
||||
/// File reference: metadata pointing to a blob stored on the server.
|
||||
FileRef {
|
||||
blob_id: [u8; 32],
|
||||
filename: Vec<u8>,
|
||||
file_size: u64,
|
||||
mime_type: Vec<u8>,
|
||||
},
|
||||
/// Dummy message for traffic analysis resistance (no user-visible content).
|
||||
Dummy,
|
||||
}
|
||||
|
||||
/// Generate a new 16-byte message ID (e.g. for Chat/Reply so recipients can reference it).
|
||||
pub fn generate_message_id() -> [u8; 16] {
|
||||
let mut id = [0u8; 16];
|
||||
rand::rngs::OsRng.fill_bytes(&mut id);
|
||||
id
|
||||
}
|
||||
|
||||
// ── Layout (minimal, no Cap'n Proto) ─────────────────────────────────────────
|
||||
//
|
||||
// All messages: [version: 1][type: 1][payload...]
|
||||
//
|
||||
// Chat: [msg_id: 16][body_len: 2 BE][body]
|
||||
// Reply: [ref_msg_id: 16][body_len: 2 BE][body]
|
||||
// Reaction: [ref_msg_id: 16][emoji_len: 1][emoji]
|
||||
// ReadReceipt: [msg_id: 16]
|
||||
// Typing: [active: 1] 0 = stopped, 1 = typing
|
||||
// Edit: [ref_msg_id: 16][body_len: 2 BE][body]
|
||||
// Delete: [ref_msg_id: 16]
|
||||
// FileRef: [blob_id: 32][filename_len: 2 BE][filename][file_size: 8 BE][mime_len: 2 BE][mime_type]
|
||||
|
||||
/// Serialize a rich message into the application payload format.
|
||||
pub fn serialize(msg_type: MessageType, payload: &[u8]) -> Vec<u8> {
|
||||
let mut out = Vec::with_capacity(2 + payload.len());
|
||||
out.push(VERSION);
|
||||
out.push(msg_type as u8);
|
||||
out.extend_from_slice(payload);
|
||||
out
|
||||
}
|
||||
|
||||
/// Serialize a Chat message (generates message_id internally; pass None to generate, or Some(id) when replying with a known id).
|
||||
pub fn serialize_chat(body: &[u8], message_id: Option<[u8; 16]>) -> Result<Vec<u8>, CoreError> {
|
||||
if body.len() > u16::MAX as usize {
|
||||
return Err(CoreError::AppMessage("chat body exceeds maximum length (65535 bytes)".into()));
|
||||
}
|
||||
let id = message_id.unwrap_or_else(generate_message_id);
|
||||
let mut payload = Vec::with_capacity(16 + 2 + body.len());
|
||||
payload.extend_from_slice(&id);
|
||||
payload.extend_from_slice(&(body.len() as u16).to_be_bytes());
|
||||
payload.extend_from_slice(body);
|
||||
Ok(serialize(MessageType::Chat, &payload))
|
||||
}
|
||||
|
||||
/// Serialize a Reply message.
|
||||
pub fn serialize_reply(ref_msg_id: [u8; 16], body: &[u8]) -> Result<Vec<u8>, CoreError> {
|
||||
if body.len() > u16::MAX as usize {
|
||||
return Err(CoreError::AppMessage("reply body exceeds maximum length (65535 bytes)".into()));
|
||||
}
|
||||
let mut payload = Vec::with_capacity(16 + 2 + body.len());
|
||||
payload.extend_from_slice(&ref_msg_id);
|
||||
payload.extend_from_slice(&(body.len() as u16).to_be_bytes());
|
||||
payload.extend_from_slice(body);
|
||||
Ok(serialize(MessageType::Reply, &payload))
|
||||
}
|
||||
|
||||
/// Serialize a Reaction message.
|
||||
pub fn serialize_reaction(ref_msg_id: [u8; 16], emoji: &[u8]) -> Result<Vec<u8>, CoreError> {
|
||||
if emoji.len() > 255 {
|
||||
return Err(CoreError::AppMessage("emoji length > 255".into()));
|
||||
}
|
||||
let mut payload = Vec::with_capacity(16 + 1 + emoji.len());
|
||||
payload.extend_from_slice(&ref_msg_id);
|
||||
payload.push(emoji.len() as u8);
|
||||
payload.extend_from_slice(emoji);
|
||||
Ok(serialize(MessageType::Reaction, &payload))
|
||||
}
|
||||
|
||||
/// Serialize a ReadReceipt message.
|
||||
pub fn serialize_read_receipt(msg_id: [u8; 16]) -> Vec<u8> {
|
||||
serialize(MessageType::ReadReceipt, &msg_id)
|
||||
}
|
||||
|
||||
/// Serialize a Typing message (active: 0 = stopped, 1 = typing).
|
||||
pub fn serialize_typing(active: u8) -> Vec<u8> {
|
||||
let payload = [active];
|
||||
serialize(MessageType::Typing, &payload)
|
||||
}
|
||||
|
||||
/// Serialize an Edit message (replaces body of a previously sent message).
|
||||
pub fn serialize_edit(ref_msg_id: &[u8; 16], body: &[u8]) -> Result<Vec<u8>, CoreError> {
|
||||
if body.len() > u16::MAX as usize {
|
||||
return Err(CoreError::AppMessage("edit body exceeds maximum length (65535 bytes)".into()));
|
||||
}
|
||||
let mut payload = Vec::with_capacity(16 + 2 + body.len());
|
||||
payload.extend_from_slice(ref_msg_id);
|
||||
payload.extend_from_slice(&(body.len() as u16).to_be_bytes());
|
||||
payload.extend_from_slice(body);
|
||||
Ok(serialize(MessageType::Edit, &payload))
|
||||
}
|
||||
|
||||
/// Serialize a Delete message (marks a previously sent message as deleted).
|
||||
pub fn serialize_delete(ref_msg_id: &[u8; 16]) -> Vec<u8> {
|
||||
serialize(MessageType::Delete, ref_msg_id)
|
||||
}
|
||||
|
||||
/// Serialize a FileRef message (metadata pointing to a blob on the server).
|
||||
pub fn serialize_file_ref(
|
||||
blob_id: &[u8; 32],
|
||||
filename: &[u8],
|
||||
file_size: u64,
|
||||
mime_type: &[u8],
|
||||
) -> Result<Vec<u8>, CoreError> {
|
||||
if filename.len() > u16::MAX as usize {
|
||||
return Err(CoreError::AppMessage("filename exceeds maximum length".into()));
|
||||
}
|
||||
if mime_type.len() > u16::MAX as usize {
|
||||
return Err(CoreError::AppMessage("mime_type exceeds maximum length".into()));
|
||||
}
|
||||
let mut payload = Vec::with_capacity(32 + 2 + filename.len() + 8 + 2 + mime_type.len());
|
||||
payload.extend_from_slice(blob_id);
|
||||
payload.extend_from_slice(&(filename.len() as u16).to_be_bytes());
|
||||
payload.extend_from_slice(filename);
|
||||
payload.extend_from_slice(&file_size.to_be_bytes());
|
||||
payload.extend_from_slice(&(mime_type.len() as u16).to_be_bytes());
|
||||
payload.extend_from_slice(mime_type);
|
||||
Ok(serialize(MessageType::FileRef, &payload))
|
||||
}
|
||||
|
||||
/// Serialize a Dummy message (traffic padding — no user content).
|
||||
pub fn serialize_dummy() -> Vec<u8> {
|
||||
serialize(MessageType::Dummy, &[])
|
||||
}
|
||||
|
||||
/// Parse bytes into (MessageType, AppMessage). Fails if version/type unknown or payload too short.
|
||||
pub fn parse(bytes: &[u8]) -> Result<(MessageType, AppMessage), CoreError> {
|
||||
if bytes.len() < 2 {
|
||||
return Err(CoreError::AppMessage("payload too short (need version + type)".into()));
|
||||
}
|
||||
let version = bytes[0];
|
||||
if version != VERSION {
|
||||
return Err(CoreError::AppMessage(format!("unsupported version {version}")));
|
||||
}
|
||||
let msg_type = MessageType::from_byte(bytes[1])
|
||||
.ok_or_else(|| CoreError::AppMessage(format!("unknown message type {}", bytes[1])))?;
|
||||
let payload = &bytes[2..];
|
||||
|
||||
let app = match msg_type {
|
||||
MessageType::Chat => parse_chat(payload)?,
|
||||
MessageType::Reply => parse_reply(payload)?,
|
||||
MessageType::Reaction => parse_reaction(payload)?,
|
||||
MessageType::ReadReceipt => parse_read_receipt(payload)?,
|
||||
MessageType::Typing => parse_typing(payload)?,
|
||||
MessageType::Edit => parse_edit(payload)?,
|
||||
MessageType::Delete => parse_delete(payload)?,
|
||||
MessageType::FileRef => parse_file_ref(payload)?,
|
||||
MessageType::Dummy => AppMessage::Dummy,
|
||||
};
|
||||
Ok((msg_type, app))
|
||||
}
|
||||
|
||||
fn parse_chat(payload: &[u8]) -> Result<AppMessage, CoreError> {
|
||||
if payload.len() < 16 + 2 {
|
||||
return Err(CoreError::AppMessage("Chat payload too short".into()));
|
||||
}
|
||||
let mut message_id = [0u8; 16];
|
||||
message_id.copy_from_slice(&payload[..16]);
|
||||
let body_len = u16::from_be_bytes([payload[16], payload[17]]) as usize;
|
||||
if payload.len() < 18 + body_len {
|
||||
return Err(CoreError::AppMessage("Chat body length exceeds payload".into()));
|
||||
}
|
||||
let body = payload[18..18 + body_len].to_vec();
|
||||
Ok(AppMessage::Chat { message_id, body })
|
||||
}
|
||||
|
||||
fn parse_reply(payload: &[u8]) -> Result<AppMessage, CoreError> {
|
||||
if payload.len() < 16 + 2 {
|
||||
return Err(CoreError::AppMessage("Reply payload too short".into()));
|
||||
}
|
||||
let mut ref_msg_id = [0u8; 16];
|
||||
ref_msg_id.copy_from_slice(&payload[..16]);
|
||||
let body_len = u16::from_be_bytes([payload[16], payload[17]]) as usize;
|
||||
if payload.len() < 18 + body_len {
|
||||
return Err(CoreError::AppMessage("Reply body length exceeds payload".into()));
|
||||
}
|
||||
let body = payload[18..18 + body_len].to_vec();
|
||||
Ok(AppMessage::Reply { ref_msg_id, body })
|
||||
}
|
||||
|
||||
fn parse_reaction(payload: &[u8]) -> Result<AppMessage, CoreError> {
|
||||
if payload.len() < 16 + 1 {
|
||||
return Err(CoreError::AppMessage("Reaction payload too short".into()));
|
||||
}
|
||||
let mut ref_msg_id = [0u8; 16];
|
||||
ref_msg_id.copy_from_slice(&payload[..16]);
|
||||
let emoji_len = payload[16] as usize;
|
||||
if payload.len() < 17 + emoji_len {
|
||||
return Err(CoreError::AppMessage("Reaction emoji length exceeds payload".into()));
|
||||
}
|
||||
let emoji = payload[17..17 + emoji_len].to_vec();
|
||||
Ok(AppMessage::Reaction { ref_msg_id, emoji })
|
||||
}
|
||||
|
||||
fn parse_read_receipt(payload: &[u8]) -> Result<AppMessage, CoreError> {
|
||||
if payload.len() < 16 {
|
||||
return Err(CoreError::AppMessage("ReadReceipt payload too short".into()));
|
||||
}
|
||||
let mut msg_id = [0u8; 16];
|
||||
msg_id.copy_from_slice(&payload[..16]);
|
||||
Ok(AppMessage::ReadReceipt { msg_id })
|
||||
}
|
||||
|
||||
fn parse_typing(payload: &[u8]) -> Result<AppMessage, CoreError> {
|
||||
if payload.is_empty() {
|
||||
return Err(CoreError::AppMessage("Typing payload empty".into()));
|
||||
}
|
||||
Ok(AppMessage::Typing { active: payload[0] })
|
||||
}
|
||||
|
||||
fn parse_edit(payload: &[u8]) -> Result<AppMessage, CoreError> {
|
||||
if payload.len() < 16 + 2 {
|
||||
return Err(CoreError::AppMessage("Edit payload too short".into()));
|
||||
}
|
||||
let mut ref_msg_id = [0u8; 16];
|
||||
ref_msg_id.copy_from_slice(&payload[..16]);
|
||||
let body_len = u16::from_be_bytes([payload[16], payload[17]]) as usize;
|
||||
if payload.len() < 18 + body_len {
|
||||
return Err(CoreError::AppMessage("Edit body length exceeds payload".into()));
|
||||
}
|
||||
let body = payload[18..18 + body_len].to_vec();
|
||||
Ok(AppMessage::Edit { ref_msg_id, body })
|
||||
}
|
||||
|
||||
fn parse_delete(payload: &[u8]) -> Result<AppMessage, CoreError> {
|
||||
if payload.len() < 16 {
|
||||
return Err(CoreError::AppMessage("Delete payload too short".into()));
|
||||
}
|
||||
let mut ref_msg_id = [0u8; 16];
|
||||
ref_msg_id.copy_from_slice(&payload[..16]);
|
||||
Ok(AppMessage::Delete { ref_msg_id })
|
||||
}
|
||||
|
||||
fn parse_file_ref(payload: &[u8]) -> Result<AppMessage, CoreError> {
|
||||
// blob_id(32) + filename_len(2) minimum
|
||||
if payload.len() < 34 {
|
||||
return Err(CoreError::AppMessage("FileRef payload too short".into()));
|
||||
}
|
||||
let mut blob_id = [0u8; 32];
|
||||
blob_id.copy_from_slice(&payload[..32]);
|
||||
let filename_len = u16::from_be_bytes([payload[32], payload[33]]) as usize;
|
||||
let pos = 34;
|
||||
if payload.len() < pos + filename_len + 8 + 2 {
|
||||
return Err(CoreError::AppMessage("FileRef payload truncated after filename_len".into()));
|
||||
}
|
||||
let filename = payload[pos..pos + filename_len].to_vec();
|
||||
let pos = pos + filename_len;
|
||||
let file_size = u64::from_be_bytes([
|
||||
payload[pos], payload[pos + 1], payload[pos + 2], payload[pos + 3],
|
||||
payload[pos + 4], payload[pos + 5], payload[pos + 6], payload[pos + 7],
|
||||
]);
|
||||
let pos = pos + 8;
|
||||
let mime_len = u16::from_be_bytes([payload[pos], payload[pos + 1]]) as usize;
|
||||
let pos = pos + 2;
|
||||
if payload.len() < pos + mime_len {
|
||||
return Err(CoreError::AppMessage("FileRef payload truncated after mime_len".into()));
|
||||
}
|
||||
let mime_type = payload[pos..pos + mime_len].to_vec();
|
||||
Ok(AppMessage::FileRef { blob_id, filename, file_size, mime_type })
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
#[allow(clippy::unwrap_used)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn roundtrip_chat() {
|
||||
let body = b"hello";
|
||||
let encoded = serialize_chat(body, None).unwrap();
|
||||
let (t, msg) = parse(&encoded).unwrap();
|
||||
assert_eq!(t, MessageType::Chat);
|
||||
match &msg {
|
||||
AppMessage::Chat { message_id: _, body: b } => assert_eq!(b.as_slice(), body),
|
||||
_ => panic!("expected Chat"),
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn roundtrip_reply() {
|
||||
let ref_id = [1u8; 16];
|
||||
let body = b"reply text";
|
||||
let encoded = serialize_reply(ref_id, body).unwrap();
|
||||
let (t, msg) = parse(&encoded).unwrap();
|
||||
assert_eq!(t, MessageType::Reply);
|
||||
match &msg {
|
||||
AppMessage::Reply { ref_msg_id, body: b } => {
|
||||
assert_eq!(ref_msg_id, &ref_id);
|
||||
assert_eq!(b.as_slice(), body);
|
||||
}
|
||||
_ => panic!("expected Reply"),
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn roundtrip_typing() {
|
||||
let encoded = serialize_typing(1);
|
||||
let (t, msg) = parse(&encoded).unwrap();
|
||||
assert_eq!(t, MessageType::Typing);
|
||||
match &msg {
|
||||
AppMessage::Typing { active } => assert_eq!(*active, 1),
|
||||
_ => panic!("expected Typing"),
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn roundtrip_reaction() {
|
||||
let ref_id = [2u8; 16];
|
||||
let emoji = "\u{1f44d}".as_bytes();
|
||||
let encoded = serialize_reaction(ref_id, emoji).unwrap();
|
||||
let (t, msg) = parse(&encoded).unwrap();
|
||||
assert_eq!(t, MessageType::Reaction);
|
||||
match &msg {
|
||||
AppMessage::Reaction { ref_msg_id, emoji: e } => {
|
||||
assert_eq!(ref_msg_id, &ref_id);
|
||||
assert_eq!(e.as_slice(), emoji);
|
||||
}
|
||||
_ => panic!("expected Reaction"),
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn roundtrip_read_receipt() {
|
||||
let msg_id = [3u8; 16];
|
||||
let encoded = serialize_read_receipt(msg_id);
|
||||
let (t, msg) = parse(&encoded).unwrap();
|
||||
assert_eq!(t, MessageType::ReadReceipt);
|
||||
match &msg {
|
||||
AppMessage::ReadReceipt { msg_id: id } => assert_eq!(id, &msg_id),
|
||||
_ => panic!("expected ReadReceipt"),
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn roundtrip_edit() {
|
||||
let ref_id = [4u8; 16];
|
||||
let body = b"edited text";
|
||||
let encoded = serialize_edit(&ref_id, body).unwrap();
|
||||
let (t, msg) = parse(&encoded).unwrap();
|
||||
assert_eq!(t, MessageType::Edit);
|
||||
match &msg {
|
||||
AppMessage::Edit { ref_msg_id, body: b } => {
|
||||
assert_eq!(ref_msg_id, &ref_id);
|
||||
assert_eq!(b.as_slice(), body);
|
||||
}
|
||||
_ => panic!("expected Edit"),
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn roundtrip_delete() {
|
||||
let ref_id = [5u8; 16];
|
||||
let encoded = serialize_delete(&ref_id);
|
||||
let (t, msg) = parse(&encoded).unwrap();
|
||||
assert_eq!(t, MessageType::Delete);
|
||||
match &msg {
|
||||
AppMessage::Delete { ref_msg_id } => assert_eq!(ref_msg_id, &ref_id),
|
||||
_ => panic!("expected Delete"),
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn edit_body_too_long() {
|
||||
let body = vec![0u8; 65536];
|
||||
assert!(serialize_edit(&[0; 16], &body).is_err());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn parse_empty_fails() {
|
||||
assert!(parse(&[]).is_err());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn parse_bad_version_fails() {
|
||||
assert!(parse(&[99, 0x01]).is_err());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn parse_bad_type_fails() {
|
||||
assert!(parse(&[1, 0xFF]).is_err());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn chat_body_too_long() {
|
||||
let body = vec![0u8; 65536]; // exceeds u16::MAX
|
||||
assert!(serialize_chat(&body, None).is_err());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn reaction_emoji_too_long() {
|
||||
let emoji = vec![0u8; 256];
|
||||
assert!(serialize_reaction([0; 16], &emoji).is_err());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn parse_truncated_chat_payload() {
|
||||
// Version + type + only 10 bytes of payload (needs 18 minimum for chat)
|
||||
let mut data = vec![1, 0x01];
|
||||
data.extend_from_slice(&[0u8; 10]);
|
||||
assert!(parse(&data).is_err());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn roundtrip_file_ref() {
|
||||
let blob_id = [7u8; 32];
|
||||
let filename = b"report.pdf";
|
||||
let file_size = 123456u64;
|
||||
let mime_type = b"application/pdf";
|
||||
let encoded = serialize_file_ref(&blob_id, filename, file_size, mime_type).unwrap();
|
||||
let (t, msg) = parse(&encoded).unwrap();
|
||||
assert_eq!(t, MessageType::FileRef);
|
||||
match &msg {
|
||||
AppMessage::FileRef {
|
||||
blob_id: bid,
|
||||
filename: fname,
|
||||
file_size: fsize,
|
||||
mime_type: mtype,
|
||||
} => {
|
||||
assert_eq!(bid, &blob_id);
|
||||
assert_eq!(fname.as_slice(), filename);
|
||||
assert_eq!(*fsize, file_size);
|
||||
assert_eq!(mtype.as_slice(), mime_type);
|
||||
}
|
||||
_ => panic!("expected FileRef"),
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn roundtrip_dummy() {
|
||||
let encoded = serialize_dummy();
|
||||
let (t, msg) = parse(&encoded).unwrap();
|
||||
assert_eq!(t, MessageType::Dummy);
|
||||
assert_eq!(msg, AppMessage::Dummy);
|
||||
}
|
||||
}
|
||||
38
crates/quicprochat-core/src/error.rs
Normal file
38
crates/quicprochat-core/src/error.rs
Normal file
@@ -0,0 +1,38 @@
|
||||
//! Error types for `quicprochat-core`.
|
||||
|
||||
use thiserror::Error;
|
||||
|
||||
/// Errors produced by core cryptographic and MLS operations.
|
||||
#[derive(Debug, Error)]
|
||||
pub enum CoreError {
|
||||
/// Cap'n Proto serialisation or deserialisation failed.
|
||||
#[cfg(feature = "native")]
|
||||
#[error("Cap'n Proto error: {0}")]
|
||||
Capnp(#[from] capnp::Error),
|
||||
|
||||
/// An MLS operation failed (string description).
|
||||
///
|
||||
/// Preserved for backward compatibility. Prefer [`CoreError::MlsError`]
|
||||
/// for new code that wraps typed openmls errors.
|
||||
#[error("MLS error: {0}")]
|
||||
Mls(String),
|
||||
|
||||
/// An MLS operation failed (typed, boxed error).
|
||||
///
|
||||
/// Wraps the underlying openmls error so callers can downcast to specific
|
||||
/// error types when needed.
|
||||
#[error("MLS error: {0}")]
|
||||
MlsError(Box<dyn std::error::Error + Send + Sync>),
|
||||
|
||||
/// A hybrid KEM (X25519 + ML-KEM-768) operation failed.
|
||||
#[error("hybrid KEM error: {0}")]
|
||||
HybridKem(#[from] crate::hybrid_kem::HybridKemError),
|
||||
|
||||
/// IO or persistence failure.
|
||||
#[error("io error: {0}")]
|
||||
Io(String),
|
||||
|
||||
/// Application message (rich payload) parse or serialisation error.
|
||||
#[error("app message: {0}")]
|
||||
AppMessage(String),
|
||||
}
|
||||
1082
crates/quicprochat-core/src/group.rs
Normal file
1082
crates/quicprochat-core/src/group.rs
Normal file
File diff suppressed because it is too large
Load Diff
542
crates/quicprochat-core/src/hybrid_crypto.rs
Normal file
542
crates/quicprochat-core/src/hybrid_crypto.rs
Normal file
@@ -0,0 +1,542 @@
|
||||
//! Post-quantum hybrid crypto provider for OpenMLS (M7 PoC).
|
||||
//!
|
||||
//! Uses X25519 + ML-KEM-768 hybrid KEM for HPKE operations where openmls
|
||||
//! would use DHKEM(X25519), and delegates all other operations (AEAD, hash,
|
||||
//! signatures, KDF, randomness) to `openmls_rust_crypto::RustCrypto`.
|
||||
//!
|
||||
//! # Key format
|
||||
//!
|
||||
//! When the provider sees a **hybrid public key** (length `HYBRID_PUBLIC_KEY_LEN` =
|
||||
//! 32 + 1184 bytes) or **hybrid private key** (length `HYBRID_PRIVATE_KEY_LEN` =
|
||||
//! 32 + 2400 bytes), it uses `hybrid_kem` for HPKE. Otherwise it delegates to
|
||||
//! RustCrypto (classical X25519 HPKE).
|
||||
//!
|
||||
//! # MLS compatibility
|
||||
//!
|
||||
//! The current MLS ciphersuite (MLS_128_DHKEMX25519_AES128GCM_SHA256_Ed25519)
|
||||
//! uses 32-byte X25519 init keys in the wire format. This provider can produce
|
||||
//! and consume **hybrid** init keys (1216-byte public, 2432-byte private), but
|
||||
//! that is a non-standard extension: other MLS implementations will not
|
||||
//! accept KeyPackages with hybrid init keys unless they implement the same
|
||||
//! extension. This PoC validates that the OpenMLS trait surface is satisfiable
|
||||
//! with a custom HPKE backend; full interoperability would require a new
|
||||
//! ciphersuite or protocol extension.
|
||||
|
||||
use openmls_rust_crypto::RustCrypto;
|
||||
use openmls_traits::{
|
||||
crypto::OpenMlsCrypto,
|
||||
types::{
|
||||
CryptoError, ExporterSecret, HpkeCiphertext, HpkeConfig, HpkeKeyPair, HpkeKemType,
|
||||
KemOutput,
|
||||
},
|
||||
OpenMlsProvider,
|
||||
};
|
||||
use tls_codec::SecretVLBytes;
|
||||
|
||||
use crate::hybrid_kem::{
|
||||
hybrid_decapsulate_only, hybrid_decrypt, hybrid_encapsulate_only, hybrid_encrypt,
|
||||
hybrid_export, HybridKeypair, HybridPublicKey,
|
||||
HYBRID_KEM_OUTPUT_LEN, HYBRID_PRIVATE_KEY_LEN, HYBRID_PUBLIC_KEY_LEN,
|
||||
};
|
||||
use crate::keystore::DiskKeyStore;
|
||||
|
||||
// Re-export types used by OpenMlsCrypto (full path for clarity).
|
||||
use openmls_traits::types::{
|
||||
AeadType, Ciphersuite, HashType, SignatureScheme,
|
||||
};
|
||||
|
||||
/// Crypto backend that uses hybrid KEM for HPKE when keys are in hybrid format,
|
||||
/// and delegates everything else to RustCrypto.
|
||||
///
|
||||
/// When `hybrid_enabled` is `true`, `derive_hpke_keypair` produces hybrid keys
|
||||
/// (1216-byte public, 2432-byte private). When `false`, it delegates to
|
||||
/// RustCrypto and produces classical 32-byte X25519 keys.
|
||||
///
|
||||
/// The `hpke_seal` / `hpke_open` methods always detect the key format by length,
|
||||
/// so they work correctly regardless of the flag — a hybrid-length key will use
|
||||
/// hybrid KEM, a classical-length key will use RustCrypto.
|
||||
#[derive(Debug)]
|
||||
pub struct HybridCrypto {
|
||||
rust_crypto: RustCrypto,
|
||||
/// When true, `derive_hpke_keypair` produces hybrid (X25519 + ML-KEM-768)
|
||||
/// keys. When false, it produces classical X25519 keys via RustCrypto.
|
||||
hybrid_enabled: bool,
|
||||
}
|
||||
|
||||
impl HybridCrypto {
|
||||
/// Create a hybrid-enabled crypto backend (derive_hpke_keypair produces hybrid keys).
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
rust_crypto: RustCrypto::default(),
|
||||
hybrid_enabled: true,
|
||||
}
|
||||
}
|
||||
|
||||
/// Alias for `new()` — hybrid mode enabled.
|
||||
pub fn new_hybrid() -> Self {
|
||||
Self::new()
|
||||
}
|
||||
|
||||
/// Create a classical crypto backend (derive_hpke_keypair produces standard
|
||||
/// X25519 keys, but seal/open still accept hybrid keys by length detection).
|
||||
pub fn new_classical() -> Self {
|
||||
Self {
|
||||
rust_crypto: RustCrypto::default(),
|
||||
hybrid_enabled: false,
|
||||
}
|
||||
}
|
||||
|
||||
/// Whether this backend produces hybrid keys from `derive_hpke_keypair`.
|
||||
pub fn is_hybrid_enabled(&self) -> bool {
|
||||
self.hybrid_enabled
|
||||
}
|
||||
|
||||
/// Expose the underlying RustCrypto for rand() and delegation.
|
||||
pub fn rust_crypto(&self) -> &RustCrypto {
|
||||
&self.rust_crypto
|
||||
}
|
||||
|
||||
fn is_hybrid_public_key(pk_r: &[u8]) -> bool {
|
||||
pk_r.len() == HYBRID_PUBLIC_KEY_LEN
|
||||
}
|
||||
|
||||
fn is_hybrid_private_key(sk_r: &[u8]) -> bool {
|
||||
sk_r.len() == HYBRID_PRIVATE_KEY_LEN
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for HybridCrypto {
|
||||
fn default() -> Self {
|
||||
Self::new()
|
||||
}
|
||||
}
|
||||
|
||||
impl OpenMlsCrypto for HybridCrypto {
|
||||
fn supports(&self, ciphersuite: Ciphersuite) -> Result<(), CryptoError> {
|
||||
self.rust_crypto.supports(ciphersuite)
|
||||
}
|
||||
|
||||
fn supported_ciphersuites(&self) -> Vec<Ciphersuite> {
|
||||
self.rust_crypto.supported_ciphersuites()
|
||||
}
|
||||
|
||||
fn hkdf_extract(
|
||||
&self,
|
||||
hash_type: HashType,
|
||||
salt: &[u8],
|
||||
ikm: &[u8],
|
||||
) -> Result<SecretVLBytes, CryptoError> {
|
||||
self.rust_crypto.hkdf_extract(hash_type, salt, ikm)
|
||||
}
|
||||
|
||||
fn hmac(
|
||||
&self,
|
||||
hash_type: HashType,
|
||||
key: &[u8],
|
||||
message: &[u8],
|
||||
) -> Result<SecretVLBytes, CryptoError> {
|
||||
self.rust_crypto.hmac(hash_type, key, message)
|
||||
}
|
||||
|
||||
fn hkdf_expand(
|
||||
&self,
|
||||
hash_type: HashType,
|
||||
prk: &[u8],
|
||||
info: &[u8],
|
||||
okm_len: usize,
|
||||
) -> Result<SecretVLBytes, CryptoError> {
|
||||
self.rust_crypto.hkdf_expand(hash_type, prk, info, okm_len)
|
||||
}
|
||||
|
||||
fn hash(&self, hash_type: HashType, data: &[u8]) -> Result<Vec<u8>, CryptoError> {
|
||||
self.rust_crypto.hash(hash_type, data)
|
||||
}
|
||||
|
||||
fn aead_encrypt(
|
||||
&self,
|
||||
alg: AeadType,
|
||||
key: &[u8],
|
||||
data: &[u8],
|
||||
nonce: &[u8],
|
||||
aad: &[u8],
|
||||
) -> Result<Vec<u8>, CryptoError> {
|
||||
self.rust_crypto.aead_encrypt(alg, key, data, nonce, aad)
|
||||
}
|
||||
|
||||
fn aead_decrypt(
|
||||
&self,
|
||||
alg: AeadType,
|
||||
key: &[u8],
|
||||
ct_tag: &[u8],
|
||||
nonce: &[u8],
|
||||
aad: &[u8],
|
||||
) -> Result<Vec<u8>, CryptoError> {
|
||||
self.rust_crypto.aead_decrypt(alg, key, ct_tag, nonce, aad)
|
||||
}
|
||||
|
||||
fn signature_key_gen(&self, alg: SignatureScheme) -> Result<(Vec<u8>, Vec<u8>), CryptoError> {
|
||||
self.rust_crypto.signature_key_gen(alg)
|
||||
}
|
||||
|
||||
fn verify_signature(
|
||||
&self,
|
||||
alg: SignatureScheme,
|
||||
data: &[u8],
|
||||
pk: &[u8],
|
||||
signature: &[u8],
|
||||
) -> Result<(), CryptoError> {
|
||||
self.rust_crypto.verify_signature(alg, data, pk, signature)
|
||||
}
|
||||
|
||||
fn sign(&self, alg: SignatureScheme, data: &[u8], key: &[u8]) -> Result<Vec<u8>, CryptoError> {
|
||||
self.rust_crypto.sign(alg, data, key)
|
||||
}
|
||||
|
||||
fn hpke_seal(
|
||||
&self,
|
||||
config: HpkeConfig,
|
||||
pk_r: &[u8],
|
||||
info: &[u8],
|
||||
aad: &[u8],
|
||||
ptxt: &[u8],
|
||||
) -> Result<HpkeCiphertext, CryptoError> {
|
||||
if Self::is_hybrid_public_key(pk_r) {
|
||||
let recipient_pk = HybridPublicKey::from_bytes(pk_r)
|
||||
.map_err(|_| CryptoError::CryptoLibraryError)?;
|
||||
let envelope = hybrid_encrypt(&recipient_pk, ptxt, info, aad)
|
||||
.map_err(|_| CryptoError::CryptoLibraryError)?;
|
||||
let kem_output = envelope[..HYBRID_KEM_OUTPUT_LEN].to_vec();
|
||||
let ciphertext = envelope[HYBRID_KEM_OUTPUT_LEN..].to_vec();
|
||||
Ok(HpkeCiphertext {
|
||||
kem_output: kem_output.into(),
|
||||
ciphertext: ciphertext.into(),
|
||||
})
|
||||
} else {
|
||||
self.rust_crypto.hpke_seal(config, pk_r, info, aad, ptxt)
|
||||
}
|
||||
}
|
||||
|
||||
fn hpke_open(
|
||||
&self,
|
||||
config: HpkeConfig,
|
||||
input: &HpkeCiphertext,
|
||||
sk_r: &[u8],
|
||||
info: &[u8],
|
||||
aad: &[u8],
|
||||
) -> Result<Vec<u8>, CryptoError> {
|
||||
if Self::is_hybrid_private_key(sk_r) {
|
||||
let keypair = HybridKeypair::from_private_bytes(sk_r)
|
||||
.map_err(|_| CryptoError::HpkeDecryptionError)?;
|
||||
let envelope: Vec<u8> = input
|
||||
.kem_output.as_slice()
|
||||
.iter()
|
||||
.chain(input.ciphertext.as_slice())
|
||||
.copied()
|
||||
.collect();
|
||||
// Pass HPKE info and aad through for proper context binding (RFC 9180).
|
||||
hybrid_decrypt(&keypair, &envelope, info, aad)
|
||||
.map_err(|_| CryptoError::HpkeDecryptionError)
|
||||
} else {
|
||||
self.rust_crypto.hpke_open(config, input, sk_r, info, aad)
|
||||
}
|
||||
}
|
||||
|
||||
fn hpke_setup_sender_and_export(
|
||||
&self,
|
||||
config: HpkeConfig,
|
||||
pk_r: &[u8],
|
||||
info: &[u8],
|
||||
exporter_context: &[u8],
|
||||
exporter_length: usize,
|
||||
) -> Result<(KemOutput, ExporterSecret), CryptoError> {
|
||||
if Self::is_hybrid_public_key(pk_r) {
|
||||
// A key that passes the hybrid length check but fails deserialization
|
||||
// is corrupted — return an error instead of silently downgrading to
|
||||
// classical crypto (which would defeat PQ protection).
|
||||
let recipient_pk = HybridPublicKey::from_bytes(pk_r)
|
||||
.map_err(|_| CryptoError::SenderSetupError)?;
|
||||
let (kem_output, shared_secret) =
|
||||
hybrid_encapsulate_only(&recipient_pk).map_err(|_| CryptoError::SenderSetupError)?;
|
||||
let exported = hybrid_export(&shared_secret, exporter_context, exporter_length);
|
||||
Ok((kem_output, exported.into()))
|
||||
} else {
|
||||
self.rust_crypto.hpke_setup_sender_and_export(
|
||||
config, pk_r, info, exporter_context, exporter_length,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
fn hpke_setup_receiver_and_export(
|
||||
&self,
|
||||
config: HpkeConfig,
|
||||
enc: &[u8],
|
||||
sk_r: &[u8],
|
||||
info: &[u8],
|
||||
exporter_context: &[u8],
|
||||
exporter_length: usize,
|
||||
) -> Result<ExporterSecret, CryptoError> {
|
||||
if Self::is_hybrid_private_key(sk_r) {
|
||||
let keypair = HybridKeypair::from_private_bytes(sk_r)
|
||||
.map_err(|_| CryptoError::ReceiverSetupError)?;
|
||||
let shared_secret =
|
||||
hybrid_decapsulate_only(&keypair, enc).map_err(|_| CryptoError::ReceiverSetupError)?;
|
||||
let exported = hybrid_export(&shared_secret, exporter_context, exporter_length);
|
||||
Ok(exported.into())
|
||||
} else {
|
||||
self.rust_crypto.hpke_setup_receiver_and_export(
|
||||
config, enc, sk_r, info, exporter_context, exporter_length,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
fn derive_hpke_keypair(&self, config: HpkeConfig, ikm: &[u8]) -> Result<HpkeKeyPair, CryptoError> {
|
||||
if self.hybrid_enabled && config.0 == HpkeKemType::DhKem25519 {
|
||||
let kp = HybridKeypair::derive_from_ikm(ikm);
|
||||
let private_bytes = kp.private_to_bytes();
|
||||
Ok(HpkeKeyPair {
|
||||
private: private_bytes.as_slice().into(),
|
||||
public: kp.public_key().to_bytes(),
|
||||
})
|
||||
} else {
|
||||
self.rust_crypto.derive_hpke_keypair(config, ikm)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// OpenMLS crypto provider that uses hybrid KEM for HPKE (when keys are in
|
||||
/// hybrid format) and delegates the rest to RustCrypto.
|
||||
#[derive(Debug)]
|
||||
pub struct HybridCryptoProvider {
|
||||
crypto: HybridCrypto,
|
||||
key_store: DiskKeyStore,
|
||||
}
|
||||
|
||||
impl HybridCryptoProvider {
|
||||
/// Create a hybrid-enabled provider (KeyPackages will contain hybrid init keys).
|
||||
pub fn new(key_store: DiskKeyStore) -> Self {
|
||||
Self {
|
||||
crypto: HybridCrypto::new_hybrid(),
|
||||
key_store,
|
||||
}
|
||||
}
|
||||
|
||||
/// Alias for `new()` — hybrid mode enabled.
|
||||
pub fn new_hybrid(key_store: DiskKeyStore) -> Self {
|
||||
Self::new(key_store)
|
||||
}
|
||||
|
||||
/// Create a classical-mode provider (KeyPackages use standard X25519 init keys,
|
||||
/// but seal/open still accept hybrid keys by length detection).
|
||||
pub fn new_classical(key_store: DiskKeyStore) -> Self {
|
||||
Self {
|
||||
crypto: HybridCrypto::new_classical(),
|
||||
key_store,
|
||||
}
|
||||
}
|
||||
|
||||
/// Whether this provider produces hybrid keys from `derive_hpke_keypair`.
|
||||
pub fn is_hybrid_enabled(&self) -> bool {
|
||||
self.crypto.is_hybrid_enabled()
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for HybridCryptoProvider {
|
||||
fn default() -> Self {
|
||||
Self::new(DiskKeyStore::ephemeral())
|
||||
}
|
||||
}
|
||||
|
||||
impl OpenMlsProvider for HybridCryptoProvider {
|
||||
type CryptoProvider = HybridCrypto;
|
||||
type RandProvider = RustCrypto;
|
||||
type StorageProvider = DiskKeyStore;
|
||||
|
||||
fn crypto(&self) -> &Self::CryptoProvider {
|
||||
&self.crypto
|
||||
}
|
||||
|
||||
fn rand(&self) -> &Self::RandProvider {
|
||||
self.crypto.rust_crypto()
|
||||
}
|
||||
|
||||
fn storage(&self) -> &Self::StorageProvider {
|
||||
&self.key_store
|
||||
}
|
||||
}
|
||||
|
||||
// ── Tests ───────────────────────────────────────────────────────────────────
|
||||
|
||||
#[cfg(test)]
|
||||
#[allow(clippy::unwrap_used)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use openmls_traits::types::HpkeKdfType;
|
||||
|
||||
fn hpke_config_dhkem_x25519() -> HpkeConfig {
|
||||
HpkeConfig(
|
||||
HpkeKemType::DhKem25519,
|
||||
HpkeKdfType::HkdfSha256,
|
||||
openmls_traits::types::HpkeAeadType::AesGcm128,
|
||||
)
|
||||
}
|
||||
|
||||
/// HPKE path with hybrid keys: derive_hpke_keypair (hybrid) -> hpke_seal -> hpke_open.
|
||||
#[test]
|
||||
fn hybrid_hpke_seal_open_round_trip() {
|
||||
let crypto = HybridCrypto::new();
|
||||
let ikm = b"test-ikm-for-hybrid-hpke-keypair";
|
||||
|
||||
let keypair = crypto.derive_hpke_keypair(hpke_config_dhkem_x25519(), ikm).unwrap();
|
||||
assert_eq!(keypair.public.len(), HYBRID_PUBLIC_KEY_LEN);
|
||||
assert_eq!(keypair.private.as_ref().len(), HYBRID_PRIVATE_KEY_LEN);
|
||||
|
||||
let plaintext = b"hello post-quantum MLS";
|
||||
let info = b"mls 1.0 test";
|
||||
let aad = b"additional data";
|
||||
|
||||
let ct = crypto.hpke_seal(
|
||||
hpke_config_dhkem_x25519(),
|
||||
&keypair.public,
|
||||
info,
|
||||
aad,
|
||||
plaintext,
|
||||
).unwrap();
|
||||
assert!(!ct.kem_output.as_slice().is_empty());
|
||||
assert!(!ct.ciphertext.as_slice().is_empty());
|
||||
|
||||
let decrypted = crypto
|
||||
.hpke_open(
|
||||
hpke_config_dhkem_x25519(),
|
||||
&ct,
|
||||
keypair.private.as_ref(),
|
||||
info,
|
||||
aad,
|
||||
)
|
||||
.expect("hpke_open with hybrid keys");
|
||||
assert_eq!(decrypted.as_slice(), plaintext);
|
||||
}
|
||||
|
||||
/// HPKE exporter path: setup_sender_and_export then setup_receiver_and_export.
|
||||
#[test]
|
||||
fn hybrid_hpke_setup_sender_receiver_export() {
|
||||
let crypto = HybridCrypto::new();
|
||||
let ikm = b"exporter-ikm";
|
||||
|
||||
let keypair = crypto.derive_hpke_keypair(hpke_config_dhkem_x25519(), ikm).unwrap();
|
||||
let info = b"";
|
||||
let exporter_context = b"MLS 1.0 external init";
|
||||
let exporter_length = 32;
|
||||
|
||||
let (kem_output, sender_exported) = crypto
|
||||
.hpke_setup_sender_and_export(
|
||||
hpke_config_dhkem_x25519(),
|
||||
&keypair.public,
|
||||
info,
|
||||
exporter_context,
|
||||
exporter_length,
|
||||
)
|
||||
.expect("sender and export");
|
||||
|
||||
assert_eq!(kem_output.len(), HYBRID_KEM_OUTPUT_LEN);
|
||||
assert_eq!(sender_exported.as_ref().len(), exporter_length);
|
||||
|
||||
let receiver_exported = crypto
|
||||
.hpke_setup_receiver_and_export(
|
||||
hpke_config_dhkem_x25519(),
|
||||
&kem_output,
|
||||
keypair.private.as_ref(),
|
||||
info,
|
||||
exporter_context,
|
||||
exporter_length,
|
||||
)
|
||||
.expect("receiver and export");
|
||||
|
||||
assert_eq!(sender_exported.as_ref(), receiver_exported.as_ref());
|
||||
}
|
||||
|
||||
/// Classical mode: derive_hpke_keypair produces standard 32-byte X25519 keys.
|
||||
#[test]
|
||||
fn classical_mode_produces_standard_keys() {
|
||||
let crypto = HybridCrypto::new_classical();
|
||||
let ikm = b"test-ikm-for-classical-hpke";
|
||||
|
||||
let keypair = crypto.derive_hpke_keypair(hpke_config_dhkem_x25519(), ikm).unwrap();
|
||||
// Classical X25519 keys are 32 bytes
|
||||
assert_eq!(keypair.public.len(), 32);
|
||||
assert_eq!(keypair.private.as_ref().len(), 32);
|
||||
}
|
||||
|
||||
/// Classical mode round-trip: seal/open works with classical keys.
|
||||
#[test]
|
||||
fn classical_mode_seal_open_round_trip() {
|
||||
let crypto = HybridCrypto::new_classical();
|
||||
let ikm = b"test-ikm-for-classical-round-trip";
|
||||
|
||||
let keypair = crypto.derive_hpke_keypair(hpke_config_dhkem_x25519(), ikm).unwrap();
|
||||
assert_eq!(keypair.public.len(), 32); // classical key
|
||||
|
||||
let plaintext = b"hello classical MLS";
|
||||
let info = b"mls 1.0 test";
|
||||
let aad = b"additional data";
|
||||
|
||||
let ct = crypto.hpke_seal(
|
||||
hpke_config_dhkem_x25519(),
|
||||
&keypair.public,
|
||||
info,
|
||||
aad,
|
||||
plaintext,
|
||||
).unwrap();
|
||||
assert!(!ct.kem_output.as_slice().is_empty());
|
||||
|
||||
let decrypted = crypto
|
||||
.hpke_open(
|
||||
hpke_config_dhkem_x25519(),
|
||||
&ct,
|
||||
keypair.private.as_ref(),
|
||||
info,
|
||||
aad,
|
||||
)
|
||||
.expect("hpke_open with classical keys");
|
||||
assert_eq!(decrypted.as_slice(), plaintext);
|
||||
}
|
||||
|
||||
/// KeyPackage generation with HybridCryptoProvider (validates full HPKE path in MLS).
|
||||
#[test]
|
||||
fn key_package_generation_with_hybrid_provider() {
|
||||
use openmls::prelude::{
|
||||
BasicCredential, CredentialWithKey, KeyPackage,
|
||||
};
|
||||
use std::sync::Arc;
|
||||
use tls_codec::Serialize;
|
||||
|
||||
use crate::identity::IdentityKeypair;
|
||||
|
||||
const CIPHERSUITE: Ciphersuite =
|
||||
Ciphersuite::MLS_128_DHKEMX25519_AES128GCM_SHA256_Ed25519;
|
||||
|
||||
let provider = HybridCryptoProvider::default();
|
||||
let identity = Arc::new(IdentityKeypair::generate());
|
||||
|
||||
let credential: openmls::prelude::Credential =
|
||||
BasicCredential::new(identity.public_key_bytes().to_vec()).into();
|
||||
let credential_with_key = CredentialWithKey {
|
||||
credential,
|
||||
signature_key: identity.public_key_bytes().to_vec().into(),
|
||||
};
|
||||
|
||||
let key_package_bundle = KeyPackage::builder()
|
||||
.build(
|
||||
CIPHERSUITE,
|
||||
&provider,
|
||||
identity.as_ref(),
|
||||
credential_with_key,
|
||||
)
|
||||
.expect("KeyPackage with hybrid HPKE");
|
||||
|
||||
let bytes = key_package_bundle
|
||||
.key_package()
|
||||
.tls_serialize_detached()
|
||||
.expect("serialize KeyPackage");
|
||||
assert!(!bytes.is_empty());
|
||||
}
|
||||
}
|
||||
@@ -28,7 +28,7 @@ use ml_kem::{
|
||||
kem::{Decapsulate, Encapsulate},
|
||||
EncodedSizeUser, KemCore, MlKem768, MlKem768Params,
|
||||
};
|
||||
use rand::{rngs::OsRng, RngCore};
|
||||
use rand::{rngs::OsRng, rngs::StdRng, CryptoRng, RngCore, SeedableRng};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use sha2::Sha256;
|
||||
use x25519_dalek::{EphemeralSecret, PublicKey as X25519Public, StaticSecret};
|
||||
@@ -41,8 +41,14 @@ use ml_kem::kem::{DecapsulationKey, EncapsulationKey};
|
||||
const HYBRID_VERSION: u8 = 0x01;
|
||||
|
||||
/// HKDF info string for domain separation.
|
||||
/// Frozen at the original project name for backward compatibility with existing
|
||||
/// encrypted state files and messages. Do not change.
|
||||
const HKDF_INFO: &[u8] = b"quicnprotochat-hybrid-v1";
|
||||
|
||||
/// HKDF salt for domain separation (defence-in-depth; IKM already has 64 bytes of entropy).
|
||||
/// Frozen — see [`HKDF_INFO`].
|
||||
const HKDF_SALT: &[u8] = b"quicnprotochat-hybrid-v1-salt";
|
||||
|
||||
/// ML-KEM-768 ciphertext size in bytes.
|
||||
const MLKEM_CT_LEN: usize = 1088;
|
||||
|
||||
@@ -55,6 +61,15 @@ pub const MLKEM_DK_LEN: usize = 2400;
|
||||
/// Envelope header: version(1) + x25519 eph pk(32) + mlkem ct(1088) + nonce(12).
|
||||
const HEADER_LEN: usize = 1 + 32 + MLKEM_CT_LEN + 12;
|
||||
|
||||
/// KEM output length (version + x25519 eph pk + mlkem ct) for HPKE adapter.
|
||||
pub const HYBRID_KEM_OUTPUT_LEN: usize = 1 + 32 + MLKEM_CT_LEN;
|
||||
|
||||
/// Hybrid public key length: x25519(32) + mlkem_ek(1184). Used to detect hybrid keys in MLS.
|
||||
pub const HYBRID_PUBLIC_KEY_LEN: usize = 32 + MLKEM_EK_LEN;
|
||||
|
||||
/// Hybrid private key length: x25519(32) + mlkem_dk(2400). Used to detect hybrid keys in MLS.
|
||||
pub const HYBRID_PRIVATE_KEY_LEN: usize = 32 + MLKEM_DK_LEN;
|
||||
|
||||
// ── Error type ──────────────────────────────────────────────────────────────
|
||||
|
||||
#[derive(Debug, thiserror::Error)]
|
||||
@@ -109,12 +124,21 @@ pub struct HybridPublicKey {
|
||||
pub mlkem_ek: Vec<u8>,
|
||||
}
|
||||
|
||||
/// HKDF info for deriving HPKE keypair seed from IKM (MLS compatibility).
|
||||
/// Frozen — see [`HKDF_INFO`].
|
||||
const HKDF_INFO_HPKE_KEYPAIR: &[u8] = b"quicnprotochat-hybrid-hpke-keypair-v1";
|
||||
|
||||
impl HybridKeypair {
|
||||
/// Generate a fresh hybrid keypair from OS CSPRNG.
|
||||
pub fn generate() -> Self {
|
||||
let x25519_sk = StaticSecret::random_from_rng(OsRng);
|
||||
Self::generate_from_rng(&mut OsRng)
|
||||
}
|
||||
|
||||
/// Generate a hybrid keypair from a seeded RNG (deterministic).
|
||||
pub fn generate_from_rng<R: RngCore + CryptoRng>(rng: &mut R) -> Self {
|
||||
let x25519_sk = StaticSecret::random_from_rng(&mut *rng);
|
||||
let x25519_pk = X25519Public::from(&x25519_sk);
|
||||
let (mlkem_dk, mlkem_ek) = MlKem768::generate(&mut OsRng);
|
||||
let (mlkem_dk, mlkem_ek) = MlKem768::generate(rng);
|
||||
|
||||
Self {
|
||||
x25519_sk,
|
||||
@@ -124,6 +148,49 @@ impl HybridKeypair {
|
||||
}
|
||||
}
|
||||
|
||||
/// Derive a deterministic hybrid keypair from IKM (for MLS HPKE key schedule).
|
||||
pub fn derive_from_ikm(ikm: &[u8]) -> Self {
|
||||
let mut seed = [0u8; 32];
|
||||
let hk = Hkdf::<Sha256>::new(None, ikm);
|
||||
hk.expand(HKDF_INFO_HPKE_KEYPAIR, &mut seed)
|
||||
.expect("32 bytes is valid HKDF output");
|
||||
let mut rng = StdRng::from_seed(seed);
|
||||
Self::generate_from_rng(&mut rng)
|
||||
}
|
||||
|
||||
/// Serialise private key for MLS key store: x25519_sk(32) || mlkem_dk(2400).
|
||||
///
|
||||
/// The returned value is wrapped in [`Zeroizing`] so secret key material
|
||||
/// is securely erased when dropped.
|
||||
pub fn private_to_bytes(&self) -> Zeroizing<Vec<u8>> {
|
||||
let mut out = Vec::with_capacity(HYBRID_PRIVATE_KEY_LEN);
|
||||
out.extend_from_slice(self.x25519_sk.as_bytes());
|
||||
out.extend_from_slice(self.mlkem_dk.as_bytes().as_slice());
|
||||
Zeroizing::new(out)
|
||||
}
|
||||
|
||||
/// Reconstruct a hybrid keypair from private key bytes (from MLS key store).
|
||||
pub fn from_private_bytes(bytes: &[u8]) -> Result<Self, HybridKemError> {
|
||||
if bytes.len() != HYBRID_PRIVATE_KEY_LEN {
|
||||
return Err(HybridKemError::TooShort(bytes.len()));
|
||||
}
|
||||
let x25519_sk = StaticSecret::from(<[u8; 32]>::try_from(&bytes[0..32])
|
||||
.expect("slice is exactly 32 bytes (guaranteed by HYBRID_PRIVATE_KEY_LEN check)"));
|
||||
let x25519_pk = X25519Public::from(&x25519_sk);
|
||||
|
||||
let mlkem_dk_arr = Array::try_from(&bytes[32..32 + MLKEM_DK_LEN])
|
||||
.map_err(|_| HybridKemError::InvalidMlKemKey)?;
|
||||
let mlkem_dk = DecapsulationKey::<MlKem768Params>::from_bytes(&mlkem_dk_arr);
|
||||
let mlkem_ek = mlkem_dk.encapsulation_key().clone();
|
||||
|
||||
Ok(Self {
|
||||
x25519_sk,
|
||||
x25519_pk,
|
||||
mlkem_dk,
|
||||
mlkem_ek,
|
||||
})
|
||||
}
|
||||
|
||||
/// Reconstruct from serialised bytes.
|
||||
pub fn from_bytes(bytes: &HybridKeypairBytes) -> Result<Self, HybridKemError> {
|
||||
let x25519_sk = StaticSecret::from(*bytes.x25519_sk);
|
||||
@@ -191,10 +258,15 @@ impl HybridPublicKey {
|
||||
|
||||
/// Encrypt `plaintext` to `recipient_pk` using X25519 + ML-KEM-768 hybrid KEM.
|
||||
///
|
||||
/// `info` is optional HPKE context info incorporated into key derivation.
|
||||
/// `aad` is optional additional authenticated data bound to the AEAD ciphertext.
|
||||
///
|
||||
/// Returns the complete hybrid envelope as a byte vector.
|
||||
pub fn hybrid_encrypt(
|
||||
recipient_pk: &HybridPublicKey,
|
||||
plaintext: &[u8],
|
||||
info: &[u8],
|
||||
aad: &[u8],
|
||||
) -> Result<Vec<u8>, HybridKemError> {
|
||||
// 1. Ephemeral X25519 DH
|
||||
let eph_secret = EphemeralSecret::random_from_rng(OsRng);
|
||||
@@ -210,18 +282,19 @@ pub fn hybrid_encrypt(
|
||||
.encapsulate(&mut OsRng)
|
||||
.map_err(|_| HybridKemError::EncryptionFailed)?;
|
||||
|
||||
// 3. Derive AEAD key from combined shared secrets
|
||||
let aead_key = derive_aead_key(x25519_ss.as_bytes(), mlkem_ss.as_slice());
|
||||
// 3. Derive AEAD key from combined shared secrets (with caller info for context binding)
|
||||
let aead_key = derive_aead_key(x25519_ss.as_bytes(), mlkem_ss.as_slice(), info);
|
||||
|
||||
// Generate a random 12-byte nonce (not derived from HKDF).
|
||||
let mut nonce_bytes = [0u8; 12];
|
||||
OsRng.fill_bytes(&mut nonce_bytes);
|
||||
let aead_nonce = *Nonce::from_slice(&nonce_bytes);
|
||||
|
||||
// 4. AEAD encrypt
|
||||
// 4. AEAD encrypt with caller-supplied AAD
|
||||
let cipher = ChaCha20Poly1305::new(&aead_key);
|
||||
let aead_payload = chacha20poly1305::aead::Payload { msg: plaintext, aad };
|
||||
let ct = cipher
|
||||
.encrypt(&aead_nonce, plaintext)
|
||||
.encrypt(&aead_nonce, aead_payload)
|
||||
.map_err(|_| HybridKemError::EncryptionFailed)?;
|
||||
|
||||
// 5. Assemble envelope: version || x25519_eph_pk || mlkem_ct || nonce || aead_ct
|
||||
@@ -236,9 +309,13 @@ pub fn hybrid_encrypt(
|
||||
}
|
||||
|
||||
/// Decrypt a hybrid envelope using the recipient's private key.
|
||||
///
|
||||
/// `info` and `aad` must match what was passed to `hybrid_encrypt`.
|
||||
pub fn hybrid_decrypt(
|
||||
keypair: &HybridKeypair,
|
||||
envelope: &[u8],
|
||||
info: &[u8],
|
||||
aad: &[u8],
|
||||
) -> Result<Vec<u8>, HybridKemError> {
|
||||
if envelope.len() < HEADER_LEN + 16 {
|
||||
// 16 = minimum AEAD tag
|
||||
@@ -274,39 +351,123 @@ pub fn hybrid_decrypt(
|
||||
|
||||
// 2. ML-KEM decapsulation — convert bytes to the ciphertext array type
|
||||
// that `DecapsulationKey::decapsulate` expects.
|
||||
let mlkem_ct_arr = Array::try_from(mlkem_ct_bytes)
|
||||
let mlkem_ct_arr =
|
||||
Array::try_from(mlkem_ct_bytes).map_err(|_| HybridKemError::MlKemDecapsFailed)?;
|
||||
let mlkem_ss = keypair
|
||||
.mlkem_dk
|
||||
.decapsulate(&mlkem_ct_arr)
|
||||
.map_err(|_| HybridKemError::MlKemDecapsFailed)?;
|
||||
|
||||
// 3. Derive AEAD key (with caller info for context binding)
|
||||
let aead_key = derive_aead_key(x25519_ss.as_bytes(), mlkem_ss.as_slice(), info);
|
||||
|
||||
// 4. Decrypt with caller-supplied AAD
|
||||
let cipher = ChaCha20Poly1305::new(&aead_key);
|
||||
let aead_payload = chacha20poly1305::aead::Payload { msg: aead_ct, aad };
|
||||
let plaintext = cipher
|
||||
.decrypt(nonce, aead_payload)
|
||||
.map_err(|_| HybridKemError::DecryptionFailed)?;
|
||||
|
||||
Ok(plaintext)
|
||||
}
|
||||
|
||||
/// Encapsulate only: compute shared secret and KEM output (no AEAD).
|
||||
/// Returns `(kem_output, shared_secret)` where `kem_output` is the first
|
||||
/// `HYBRID_KEM_OUTPUT_LEN` bytes of the hybrid envelope and `shared_secret`
|
||||
/// is the 32-byte derived key (same as used for AEAD in `hybrid_encrypt`).
|
||||
/// Used by MLS HPKE exporter (setup_sender_and_export).
|
||||
pub fn hybrid_encapsulate_only(
|
||||
recipient_pk: &HybridPublicKey,
|
||||
) -> Result<(Vec<u8>, [u8; 32]), HybridKemError> {
|
||||
let eph_secret = EphemeralSecret::random_from_rng(OsRng);
|
||||
let eph_public = X25519Public::from(&eph_secret);
|
||||
let x25519_recipient = X25519Public::from(recipient_pk.x25519_pk);
|
||||
let x25519_ss = eph_secret.diffie_hellman(&x25519_recipient);
|
||||
|
||||
let mlkem_ek_arr = Array::try_from(recipient_pk.mlkem_ek.as_slice())
|
||||
.map_err(|_| HybridKemError::InvalidMlKemKey)?;
|
||||
let mlkem_ek = EncapsulationKey::<MlKem768Params>::from_bytes(&mlkem_ek_arr);
|
||||
let (mlkem_ct, mlkem_ss) = mlkem_ek
|
||||
.encapsulate(&mut OsRng)
|
||||
.map_err(|_| HybridKemError::EncryptionFailed)?;
|
||||
|
||||
let aead_key = derive_aead_key(x25519_ss.as_bytes(), mlkem_ss.as_slice(), b"");
|
||||
let shared_secret: [u8; 32] = aead_key.as_slice().try_into()
|
||||
.expect("AEAD key is always exactly 32 bytes");
|
||||
|
||||
let mut kem_output = Vec::with_capacity(HYBRID_KEM_OUTPUT_LEN);
|
||||
kem_output.push(HYBRID_VERSION);
|
||||
kem_output.extend_from_slice(&eph_public.to_bytes());
|
||||
kem_output.extend_from_slice(mlkem_ct.as_slice());
|
||||
|
||||
Ok((kem_output, shared_secret))
|
||||
}
|
||||
|
||||
/// Decapsulate only: recover shared secret from KEM output (no AEAD).
|
||||
/// Used by MLS HPKE exporter (setup_receiver_and_export).
|
||||
pub fn hybrid_decapsulate_only(
|
||||
keypair: &HybridKeypair,
|
||||
kem_output: &[u8],
|
||||
) -> Result<[u8; 32], HybridKemError> {
|
||||
if kem_output.len() < HYBRID_KEM_OUTPUT_LEN {
|
||||
return Err(HybridKemError::TooShort(kem_output.len()));
|
||||
}
|
||||
if kem_output[0] != HYBRID_VERSION {
|
||||
return Err(HybridKemError::UnsupportedVersion(kem_output[0]));
|
||||
}
|
||||
|
||||
let eph_pk_bytes: [u8; 32] = kem_output[1..33].try_into()
|
||||
.expect("slice is exactly 32 bytes (guaranteed by HYBRID_KEM_OUTPUT_LEN check)");
|
||||
let eph_pk = X25519Public::from(eph_pk_bytes);
|
||||
let x25519_ss = keypair.x25519_sk.diffie_hellman(&eph_pk);
|
||||
|
||||
let mlkem_ct_arr = Array::try_from(&kem_output[33..33 + MLKEM_CT_LEN])
|
||||
.map_err(|_| HybridKemError::MlKemDecapsFailed)?;
|
||||
let mlkem_ss = keypair
|
||||
.mlkem_dk
|
||||
.decapsulate(&mlkem_ct_arr)
|
||||
.map_err(|_| HybridKemError::MlKemDecapsFailed)?;
|
||||
|
||||
// 3. Derive AEAD key
|
||||
let aead_key = derive_aead_key(x25519_ss.as_bytes(), mlkem_ss.as_slice());
|
||||
let aead_key = derive_aead_key(x25519_ss.as_bytes(), mlkem_ss.as_slice(), b"");
|
||||
Ok(aead_key.as_slice().try_into()
|
||||
.expect("AEAD key is always exactly 32 bytes"))
|
||||
}
|
||||
|
||||
// 4. Decrypt
|
||||
let cipher = ChaCha20Poly1305::new(&aead_key);
|
||||
let plaintext = cipher
|
||||
.decrypt(nonce, aead_ct)
|
||||
.map_err(|_| HybridKemError::DecryptionFailed)?;
|
||||
|
||||
Ok(plaintext)
|
||||
/// Export a secret from shared secret (MLS HPKE exporter compatibility).
|
||||
/// Uses HKDF-Expand(prk, exporter_context, length) with prk = HKDF-Extract(0, shared_secret).
|
||||
pub fn hybrid_export(
|
||||
shared_secret: &[u8; 32],
|
||||
exporter_context: &[u8],
|
||||
length: usize,
|
||||
) -> Vec<u8> {
|
||||
let hk = Hkdf::<Sha256>::new(Some(HKDF_SALT), shared_secret);
|
||||
let mut out = vec![0u8; length];
|
||||
hk.expand(exporter_context, &mut out).expect("valid length");
|
||||
out
|
||||
}
|
||||
|
||||
/// Derive AEAD key from the combined X25519 + ML-KEM shared secrets.
|
||||
///
|
||||
/// `extra_info` is optional caller-supplied context (e.g. HPKE `info`) that is
|
||||
/// appended to the domain-separation label for additional binding.
|
||||
///
|
||||
/// The nonce is generated randomly per-encryption rather than derived from
|
||||
/// HKDF, preventing nonce reuse when the same shared secret is (accidentally)
|
||||
/// used more than once.
|
||||
fn derive_aead_key(x25519_ss: &[u8], mlkem_ss: &[u8]) -> Key {
|
||||
fn derive_aead_key(x25519_ss: &[u8], mlkem_ss: &[u8], extra_info: &[u8]) -> Key {
|
||||
let mut ikm = Zeroizing::new(vec![0u8; x25519_ss.len() + mlkem_ss.len()]);
|
||||
ikm[..x25519_ss.len()].copy_from_slice(x25519_ss);
|
||||
ikm[x25519_ss.len()..].copy_from_slice(mlkem_ss);
|
||||
|
||||
let hk = Hkdf::<Sha256>::new(None, &ikm);
|
||||
let hk = Hkdf::<Sha256>::new(Some(HKDF_SALT), &ikm);
|
||||
|
||||
// Combine domain-separation label with caller-supplied context.
|
||||
let mut info = Vec::with_capacity(HKDF_INFO.len() + extra_info.len());
|
||||
info.extend_from_slice(HKDF_INFO);
|
||||
info.extend_from_slice(extra_info);
|
||||
|
||||
let mut key_bytes = Zeroizing::new([0u8; 32]);
|
||||
hk.expand(HKDF_INFO, &mut *key_bytes)
|
||||
hk.expand(&info, &mut *key_bytes)
|
||||
.expect("32 bytes is valid HKDF-SHA256 output length");
|
||||
|
||||
*Key::from_slice(&*key_bytes)
|
||||
@@ -315,6 +476,7 @@ fn derive_aead_key(x25519_ss: &[u8], mlkem_ss: &[u8]) -> Key {
|
||||
// ── Tests ───────────────────────────────────────────────────────────────────
|
||||
|
||||
#[cfg(test)]
|
||||
#[allow(clippy::unwrap_used)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
@@ -332,21 +494,39 @@ mod tests {
|
||||
let pk = kp.public_key();
|
||||
let plaintext = b"hello post-quantum world!";
|
||||
|
||||
let envelope = hybrid_encrypt(&pk, plaintext).unwrap();
|
||||
let recovered = hybrid_decrypt(&kp, &envelope).unwrap();
|
||||
let envelope = hybrid_encrypt(&pk, plaintext, b"", b"").unwrap();
|
||||
let recovered = hybrid_decrypt(&kp, &envelope, b"", b"").unwrap();
|
||||
|
||||
assert_eq!(recovered, plaintext);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn encrypt_decrypt_with_info_aad() {
|
||||
let kp = HybridKeypair::generate();
|
||||
let pk = kp.public_key();
|
||||
let plaintext = b"context-bound payload";
|
||||
let info = b"mls epoch 42";
|
||||
let aad = b"group-id-abc";
|
||||
|
||||
let envelope = hybrid_encrypt(&pk, plaintext, info, aad).unwrap();
|
||||
let recovered = hybrid_decrypt(&kp, &envelope, info, aad).unwrap();
|
||||
assert_eq!(recovered, plaintext);
|
||||
|
||||
// Mismatched info must fail
|
||||
assert!(hybrid_decrypt(&kp, &envelope, b"wrong info", aad).is_err());
|
||||
// Mismatched aad must fail
|
||||
assert!(hybrid_decrypt(&kp, &envelope, info, b"wrong aad").is_err());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn wrong_key_decryption_fails() {
|
||||
let kp_sender_target = HybridKeypair::generate();
|
||||
let kp_wrong = HybridKeypair::generate();
|
||||
|
||||
let pk = kp_sender_target.public_key();
|
||||
let envelope = hybrid_encrypt(&pk, b"secret").unwrap();
|
||||
let envelope = hybrid_encrypt(&pk, b"secret", b"", b"").unwrap();
|
||||
|
||||
let result = hybrid_decrypt(&kp_wrong, &envelope);
|
||||
let result = hybrid_decrypt(&kp_wrong, &envelope, b"", b"");
|
||||
assert!(result.is_err());
|
||||
}
|
||||
|
||||
@@ -355,12 +535,12 @@ mod tests {
|
||||
let kp = HybridKeypair::generate();
|
||||
let pk = kp.public_key();
|
||||
|
||||
let mut envelope = hybrid_encrypt(&pk, b"payload").unwrap();
|
||||
let mut envelope = hybrid_encrypt(&pk, b"payload", b"", b"").unwrap();
|
||||
let last = envelope.len() - 1;
|
||||
envelope[last] ^= 0x01;
|
||||
|
||||
assert!(matches!(
|
||||
hybrid_decrypt(&kp, &envelope),
|
||||
hybrid_decrypt(&kp, &envelope, b"", b""),
|
||||
Err(HybridKemError::DecryptionFailed)
|
||||
));
|
||||
}
|
||||
@@ -370,11 +550,11 @@ mod tests {
|
||||
let kp = HybridKeypair::generate();
|
||||
let pk = kp.public_key();
|
||||
|
||||
let mut envelope = hybrid_encrypt(&pk, b"payload").unwrap();
|
||||
let mut envelope = hybrid_encrypt(&pk, b"payload", b"", b"").unwrap();
|
||||
// Flip a byte in the ML-KEM ciphertext region (starts at offset 33)
|
||||
envelope[40] ^= 0xFF;
|
||||
|
||||
assert!(hybrid_decrypt(&kp, &envelope).is_err());
|
||||
assert!(hybrid_decrypt(&kp, &envelope, b"", b"").is_err());
|
||||
}
|
||||
|
||||
#[test]
|
||||
@@ -382,11 +562,11 @@ mod tests {
|
||||
let kp = HybridKeypair::generate();
|
||||
let pk = kp.public_key();
|
||||
|
||||
let mut envelope = hybrid_encrypt(&pk, b"payload").unwrap();
|
||||
let mut envelope = hybrid_encrypt(&pk, b"payload", b"", b"").unwrap();
|
||||
// Flip a byte in the X25519 ephemeral pk region (offset 1..33)
|
||||
envelope[5] ^= 0xFF;
|
||||
|
||||
assert!(hybrid_decrypt(&kp, &envelope).is_err());
|
||||
assert!(hybrid_decrypt(&kp, &envelope, b"", b"").is_err());
|
||||
}
|
||||
|
||||
#[test]
|
||||
@@ -394,11 +574,11 @@ mod tests {
|
||||
let kp = HybridKeypair::generate();
|
||||
let pk = kp.public_key();
|
||||
|
||||
let mut envelope = hybrid_encrypt(&pk, b"payload").unwrap();
|
||||
let mut envelope = hybrid_encrypt(&pk, b"payload", b"", b"").unwrap();
|
||||
envelope[0] = 0xFF;
|
||||
|
||||
assert!(matches!(
|
||||
hybrid_decrypt(&kp, &envelope),
|
||||
hybrid_decrypt(&kp, &envelope, b"", b""),
|
||||
Err(HybridKemError::UnsupportedVersion(0xFF))
|
||||
));
|
||||
}
|
||||
@@ -407,7 +587,7 @@ mod tests {
|
||||
fn envelope_too_short_rejected() {
|
||||
let kp = HybridKeypair::generate();
|
||||
assert!(matches!(
|
||||
hybrid_decrypt(&kp, &[0x01; 10]),
|
||||
hybrid_decrypt(&kp, &[0x01; 10], b"", b""),
|
||||
Err(HybridKemError::TooShort(10))
|
||||
));
|
||||
}
|
||||
@@ -419,15 +599,12 @@ mod tests {
|
||||
let restored = HybridKeypair::from_bytes(&bytes).unwrap();
|
||||
|
||||
assert_eq!(kp.x25519_pk.to_bytes(), restored.x25519_pk.to_bytes());
|
||||
assert_eq!(
|
||||
kp.public_key().mlkem_ek,
|
||||
restored.public_key().mlkem_ek
|
||||
);
|
||||
assert_eq!(kp.public_key().mlkem_ek, restored.public_key().mlkem_ek);
|
||||
|
||||
// Verify restored keypair can decrypt
|
||||
let pk = kp.public_key();
|
||||
let ct = hybrid_encrypt(&pk, b"test").unwrap();
|
||||
let pt = hybrid_decrypt(&restored, &ct).unwrap();
|
||||
let ct = hybrid_encrypt(&pk, b"test", b"", b"").unwrap();
|
||||
let pt = hybrid_decrypt(&restored, &ct, b"", b"").unwrap();
|
||||
assert_eq!(pt, b"test");
|
||||
}
|
||||
|
||||
@@ -448,8 +625,8 @@ mod tests {
|
||||
let pk = kp.public_key();
|
||||
let plaintext = vec![0xAB; 50_000]; // 50 KB
|
||||
|
||||
let envelope = hybrid_encrypt(&pk, &plaintext).unwrap();
|
||||
let recovered = hybrid_decrypt(&kp, &envelope).unwrap();
|
||||
let envelope = hybrid_encrypt(&pk, &plaintext, b"", b"").unwrap();
|
||||
let recovered = hybrid_decrypt(&kp, &envelope, b"", b"").unwrap();
|
||||
|
||||
assert_eq!(recovered, plaintext);
|
||||
}
|
||||
245
crates/quicprochat-core/src/identity.rs
Normal file
245
crates/quicprochat-core/src/identity.rs
Normal file
@@ -0,0 +1,245 @@
|
||||
//! Ed25519 identity keypair for MLS credentials and AS registration.
|
||||
//!
|
||||
//! The [`IdentityKeypair`] is the long-term identity key embedded in MLS
|
||||
//! `BasicCredential`s. It is used for signing MLS messages and as the
|
||||
//! indexing key for the Authentication Service.
|
||||
//!
|
||||
//! # Zeroize
|
||||
//!
|
||||
//! The 32-byte private seed is stored as `Zeroizing<[u8; 32]>`, which zeroes
|
||||
//! the bytes on drop. `[u8; 32]` is `Copy + Default` and satisfies zeroize's
|
||||
//! `DefaultIsZeroes` constraint, avoiding a conflict with ed25519-dalek's
|
||||
//! `SigningKey` zeroize impl.
|
||||
//!
|
||||
//! # Fingerprint
|
||||
//!
|
||||
//! A 32-byte SHA-256 digest of the raw public key bytes is used as a compact,
|
||||
//! collision-resistant identifier for logging.
|
||||
|
||||
use ed25519_dalek::{Signer as DalekSigner, SigningKey, VerifyingKey};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use sha2::{Digest, Sha256};
|
||||
use zeroize::Zeroizing;
|
||||
|
||||
/// An Ed25519 identity keypair.
|
||||
///
|
||||
/// Created with [`IdentityKeypair::generate`]. The private signing key seed
|
||||
/// is zeroed when this struct is dropped.
|
||||
pub struct IdentityKeypair {
|
||||
/// Raw 32-byte private seed — zeroized on drop.
|
||||
///
|
||||
/// Stored as bytes rather than `SigningKey` to satisfy zeroize's
|
||||
/// `DefaultIsZeroes` bound on `Zeroizing<T>`.
|
||||
seed: Zeroizing<[u8; 32]>,
|
||||
/// Corresponding 32-byte public verifying key.
|
||||
verifying: VerifyingKey,
|
||||
}
|
||||
|
||||
impl IdentityKeypair {
|
||||
/// Recreate an identity keypair from a 32-byte seed.
|
||||
pub fn from_seed(seed: [u8; 32]) -> Self {
|
||||
let signing = SigningKey::from_bytes(&seed);
|
||||
let verifying = signing.verifying_key();
|
||||
Self {
|
||||
seed: Zeroizing::new(seed),
|
||||
verifying,
|
||||
}
|
||||
}
|
||||
|
||||
/// Return the raw 32-byte private seed (for persistence).
|
||||
///
|
||||
/// The returned value is wrapped in [`Zeroizing`] so it is securely
|
||||
/// erased when dropped, preventing the seed from lingering in memory.
|
||||
pub fn seed_bytes(&self) -> Zeroizing<[u8; 32]> {
|
||||
Zeroizing::new(*self.seed)
|
||||
}
|
||||
}
|
||||
|
||||
impl IdentityKeypair {
|
||||
/// Generate a fresh random Ed25519 identity keypair.
|
||||
pub fn generate() -> Self {
|
||||
use rand::rngs::OsRng;
|
||||
let signing = SigningKey::generate(&mut OsRng);
|
||||
let verifying = signing.verifying_key();
|
||||
let seed = Zeroizing::new(signing.to_bytes());
|
||||
Self { seed, verifying }
|
||||
}
|
||||
|
||||
/// Return the raw 32-byte Ed25519 public key.
|
||||
///
|
||||
/// This is the byte array used as `identityKey` in `auth.capnp` calls.
|
||||
pub fn public_key_bytes(&self) -> [u8; 32] {
|
||||
self.verifying.to_bytes()
|
||||
}
|
||||
|
||||
/// Return the SHA-256 fingerprint of the public key (32 bytes).
|
||||
pub fn fingerprint(&self) -> [u8; 32] {
|
||||
let mut hasher = Sha256::new();
|
||||
hasher.update(self.verifying.to_bytes());
|
||||
hasher.finalize().into()
|
||||
}
|
||||
|
||||
/// Reconstruct the `SigningKey` from the stored seed bytes.
|
||||
fn signing_key(&self) -> SigningKey {
|
||||
SigningKey::from_bytes(&self.seed)
|
||||
}
|
||||
}
|
||||
|
||||
/// Implement the openmls `Signer` trait so `IdentityKeypair` can be passed
|
||||
/// directly to `KeyPackage::builder().build(...)` without needing the external
|
||||
/// `openmls_basic_credential` crate.
|
||||
#[cfg(feature = "native")]
|
||||
impl openmls_traits::signatures::Signer for IdentityKeypair {
|
||||
fn sign(&self, payload: &[u8]) -> Result<Vec<u8>, openmls_traits::signatures::SignerError> {
|
||||
let sk = self.signing_key();
|
||||
let sig: ed25519_dalek::Signature = sk.sign(payload);
|
||||
Ok(sig.to_bytes().to_vec())
|
||||
}
|
||||
|
||||
fn signature_scheme(&self) -> openmls_traits::types::SignatureScheme {
|
||||
openmls_traits::types::SignatureScheme::ED25519
|
||||
}
|
||||
}
|
||||
|
||||
impl IdentityKeypair {
|
||||
/// Sign arbitrary bytes with the Ed25519 key and return the 64-byte signature.
|
||||
///
|
||||
/// Used by sealed sender to sign the inner payload for recipient verification.
|
||||
pub fn sign_raw(&self, payload: &[u8]) -> [u8; 64] {
|
||||
let sk = self.signing_key();
|
||||
let sig: ed25519_dalek::Signature = sk.sign(payload);
|
||||
sig.to_bytes()
|
||||
}
|
||||
|
||||
/// Verify an Ed25519 signature over `payload` using the given public key.
|
||||
pub fn verify_raw(
|
||||
public_key: &[u8; 32],
|
||||
payload: &[u8],
|
||||
signature: &[u8; 64],
|
||||
) -> Result<(), crate::error::CoreError> {
|
||||
use ed25519_dalek::Verifier;
|
||||
|
||||
let vk = VerifyingKey::from_bytes(public_key)
|
||||
.map_err(|e| crate::error::CoreError::Mls(format!("invalid public key: {e}")))?;
|
||||
let sig = ed25519_dalek::Signature::from_bytes(signature);
|
||||
vk.verify(payload, &sig)
|
||||
.map_err(|e| crate::error::CoreError::Mls(format!("signature verification failed: {e}")))
|
||||
}
|
||||
}
|
||||
|
||||
/// Verify a 96-byte delivery proof produced by the server's `build_delivery_proof`.
|
||||
///
|
||||
/// # Layout
|
||||
/// ```text
|
||||
/// bytes 0..32 — SHA-256(seq_le || recipient_key || timestamp_ms_le)
|
||||
/// bytes 32..96 — Ed25519 signature over those 32 bytes
|
||||
/// ```
|
||||
///
|
||||
/// Returns `Ok(true)` when the proof is structurally valid and the signature verifies,
|
||||
/// `Ok(false)` when the proof length is wrong (graceful degradation for old servers),
|
||||
/// or `Err` when the signature is structurally invalid / verification fails.
|
||||
pub fn verify_delivery_proof(
|
||||
server_pubkey: &[u8; 32],
|
||||
proof: &[u8],
|
||||
) -> Result<bool, crate::error::CoreError> {
|
||||
if proof.len() != 96 {
|
||||
return Ok(false);
|
||||
}
|
||||
let hash: [u8; 32] = proof[..32].try_into().expect("slice is 32 bytes");
|
||||
let sig: [u8; 64] = proof[32..96].try_into().expect("slice is 64 bytes");
|
||||
IdentityKeypair::verify_raw(server_pubkey, &hash, &sig)?;
|
||||
Ok(true)
|
||||
}
|
||||
|
||||
impl Serialize for IdentityKeypair {
|
||||
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
|
||||
where
|
||||
S: serde::Serializer,
|
||||
{
|
||||
serializer.serialize_bytes(&self.seed[..])
|
||||
}
|
||||
}
|
||||
|
||||
impl<'de> Deserialize<'de> for IdentityKeypair {
|
||||
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
|
||||
where
|
||||
D: serde::Deserializer<'de>,
|
||||
{
|
||||
let bytes: Vec<u8> = serde::Deserialize::deserialize(deserializer)?;
|
||||
let seed: [u8; 32] = bytes
|
||||
.as_slice()
|
||||
.try_into()
|
||||
.map_err(|_| serde::de::Error::custom("identity seed must be 32 bytes"))?;
|
||||
Ok(IdentityKeypair::from_seed(seed))
|
||||
}
|
||||
}
|
||||
|
||||
impl std::fmt::Debug for IdentityKeypair {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
let fp = self.fingerprint();
|
||||
f.debug_struct("IdentityKeypair")
|
||||
.field(
|
||||
"fingerprint",
|
||||
&format!("{:02x}{:02x}{:02x}{:02x}…", fp[0], fp[1], fp[2], fp[3]),
|
||||
)
|
||||
.finish_non_exhaustive()
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
#[allow(clippy::unwrap_used)]
|
||||
mod proof_tests {
|
||||
use super::*;
|
||||
use sha2::{Digest, Sha256};
|
||||
|
||||
fn make_proof(kp: &IdentityKeypair, seq: u64, recipient_key: &[u8], timestamp_ms: u64) -> Vec<u8> {
|
||||
let mut hasher = Sha256::new();
|
||||
hasher.update(seq.to_le_bytes());
|
||||
hasher.update(recipient_key);
|
||||
hasher.update(timestamp_ms.to_le_bytes());
|
||||
let hash: [u8; 32] = hasher.finalize().into();
|
||||
let sig = kp.sign_raw(&hash);
|
||||
let mut proof = vec![0u8; 96];
|
||||
proof[..32].copy_from_slice(&hash);
|
||||
proof[32..].copy_from_slice(&sig);
|
||||
proof
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn verify_valid_proof() {
|
||||
let kp = IdentityKeypair::generate();
|
||||
let pk = kp.public_key_bytes();
|
||||
let rk = [0xabu8; 32];
|
||||
let proof = make_proof(&kp, 42, &rk, 1_700_000_000_000);
|
||||
assert!(verify_delivery_proof(&pk, &proof).unwrap());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn reject_wrong_length() {
|
||||
let kp = IdentityKeypair::generate();
|
||||
let pk = kp.public_key_bytes();
|
||||
assert!(!verify_delivery_proof(&pk, &[0u8; 64]).unwrap());
|
||||
assert!(!verify_delivery_proof(&pk, &[]).unwrap());
|
||||
assert!(!verify_delivery_proof(&pk, &[0u8; 97]).unwrap());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn reject_tampered_hash() {
|
||||
let kp = IdentityKeypair::generate();
|
||||
let pk = kp.public_key_bytes();
|
||||
let rk = [0x01u8; 32];
|
||||
let mut proof = make_proof(&kp, 1, &rk, 999);
|
||||
proof[0] ^= 0xff; // corrupt the hash bytes
|
||||
assert!(verify_delivery_proof(&pk, &proof).is_err());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn reject_wrong_pubkey() {
|
||||
let kp = IdentityKeypair::generate();
|
||||
let other = IdentityKeypair::generate();
|
||||
let pk = other.public_key_bytes();
|
||||
let rk = [0x02u8; 32];
|
||||
let proof = make_proof(&kp, 5, &rk, 0);
|
||||
assert!(verify_delivery_proof(&pk, &proof).is_err());
|
||||
}
|
||||
}
|
||||
@@ -14,19 +14,48 @@
|
||||
//! # Wire format
|
||||
//!
|
||||
//! KeyPackages are TLS-encoded using `tls_codec` (same version as openmls).
|
||||
//! The resulting bytes are opaque to the quicnprotochat transport layer.
|
||||
//! The resulting bytes are opaque to the quicprochat transport layer.
|
||||
|
||||
use openmls::prelude::{
|
||||
Ciphersuite, Credential, CredentialType, CredentialWithKey, CryptoConfig, KeyPackage,
|
||||
TlsSerializeTrait,
|
||||
BasicCredential, Ciphersuite, CredentialWithKey, KeyPackage, KeyPackageIn,
|
||||
};
|
||||
use openmls_rust_crypto::OpenMlsRustCrypto;
|
||||
use tls_codec::{Deserialize as TlsDeserializeTrait, Serialize as TlsSerializeTrait};
|
||||
use sha2::{Digest, Sha256};
|
||||
|
||||
use crate::{error::CoreError, identity::IdentityKeypair};
|
||||
|
||||
/// The MLS ciphersuite used throughout quicnprotochat.
|
||||
const CIPHERSUITE: Ciphersuite = Ciphersuite::MLS_128_DHKEMX25519_AES128GCM_SHA256_Ed25519;
|
||||
/// The MLS ciphersuite used throughout quicprochat (RFC 9420 §17.1).
|
||||
pub const ALLOWED_CIPHERSUITE: Ciphersuite =
|
||||
Ciphersuite::MLS_128_DHKEMX25519_AES128GCM_SHA256_Ed25519;
|
||||
|
||||
/// Wire value of the allowed ciphersuite (KeyPackage TLS encoding: version 2B, ciphersuite 2B).
|
||||
const ALLOWED_CIPHERSUITE_WIRE: u16 = 0x0001;
|
||||
|
||||
const CIPHERSUITE: Ciphersuite = ALLOWED_CIPHERSUITE;
|
||||
|
||||
/// Validates that the KeyPackage bytes use an allowed ciphersuite (Phase 2: ciphersuite allowlist).
|
||||
///
|
||||
/// Parses the TLS-encoded KeyPackage and rejects if the ciphersuite is not
|
||||
/// `MLS_128_DHKEMX25519_AES128GCM_SHA256_Ed25519`. Does not verify signatures;
|
||||
/// the server uses this only to enforce policy before storing.
|
||||
pub fn validate_keypackage_ciphersuite(bytes: &[u8]) -> Result<(), CoreError> {
|
||||
if bytes.len() < 4 {
|
||||
return Err(CoreError::Mls("KeyPackage too short for version+ciphersuite".into()));
|
||||
}
|
||||
let cs_wire = u16::from_be_bytes([bytes[2], bytes[3]]);
|
||||
if cs_wire != ALLOWED_CIPHERSUITE_WIRE {
|
||||
return Err(CoreError::Mls(format!(
|
||||
"KeyPackage ciphersuite {:#06x} not in allowlist (only {:#06x} allowed)",
|
||||
cs_wire, ALLOWED_CIPHERSUITE_WIRE
|
||||
)));
|
||||
}
|
||||
// Optionally confirm full parse so we don't accept garbage that happens to have 0x0001 at offset 2.
|
||||
let mut cursor = bytes;
|
||||
let _kp = KeyPackageIn::tls_deserialize(&mut cursor)
|
||||
.map_err(|e| CoreError::Mls(format!("KeyPackage parse: {e:?}")))?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Generate a fresh MLS KeyPackage for `identity` and serialise it.
|
||||
///
|
||||
@@ -45,8 +74,8 @@ pub fn generate_key_package(identity: &IdentityKeypair) -> Result<(Vec<u8>, Vec<
|
||||
|
||||
// Build a BasicCredential using the raw Ed25519 public key bytes as the
|
||||
// MLS identity. Per RFC 9420, any byte string may serve as the identity.
|
||||
let credential = Credential::new(identity.public_key_bytes().to_vec(), CredentialType::Basic)
|
||||
.map_err(|e| CoreError::Mls(format!("{e:?}")))?;
|
||||
let credential: openmls::prelude::Credential =
|
||||
BasicCredential::new(identity.public_key_bytes().to_vec()).into();
|
||||
|
||||
// The `signature_key` in CredentialWithKey is the Ed25519 public key that
|
||||
// will be used to verify the KeyPackage's leaf node signature.
|
||||
@@ -58,19 +87,13 @@ pub fn generate_key_package(identity: &IdentityKeypair) -> Result<(Vec<u8>, Vec<
|
||||
|
||||
// `IdentityKeypair` implements `openmls_traits::signatures::Signer`
|
||||
// so it can be passed directly to the builder.
|
||||
let key_package = KeyPackage::builder()
|
||||
.build(
|
||||
CryptoConfig::with_default_version(CIPHERSUITE),
|
||||
&backend,
|
||||
identity,
|
||||
credential_with_key,
|
||||
)
|
||||
let key_package_bundle = KeyPackage::builder()
|
||||
.build(CIPHERSUITE, &backend, identity, credential_with_key)
|
||||
.map_err(|e| CoreError::Mls(format!("{e:?}")))?;
|
||||
|
||||
// TLS-encode the KeyPackage using the trait from the openmls prelude.
|
||||
// This uses tls_codec 0.3 (the same version openmls uses internally),
|
||||
// avoiding a duplicate-trait conflict with tls_codec 0.4.
|
||||
let tls_bytes = key_package
|
||||
// TLS-encode the KeyPackage.
|
||||
let tls_bytes = key_package_bundle
|
||||
.key_package()
|
||||
.tls_serialize_detached()
|
||||
.map_err(|e| CoreError::Mls(format!("{e:?}")))?;
|
||||
|
||||
713
crates/quicprochat-core/src/keystore.rs
Normal file
713
crates/quicprochat-core/src/keystore.rs
Normal file
@@ -0,0 +1,713 @@
|
||||
use std::{
|
||||
fs,
|
||||
path::{Path, PathBuf},
|
||||
};
|
||||
|
||||
use openmls_memory_storage::MemoryStorage;
|
||||
use openmls_traits::storage::{traits, StorageProvider, CURRENT_VERSION};
|
||||
|
||||
/// A disk-backed storage provider implementing `StorageProvider`.
|
||||
///
|
||||
/// Wraps `openmls_memory_storage::MemoryStorage` and flushes to disk on every
|
||||
/// write so that HPKE init keys and group state survive process restarts.
|
||||
///
|
||||
/// # Serialization
|
||||
///
|
||||
/// Uses bincode for the outer `HashMap<Vec<u8>, Vec<u8>>` container when
|
||||
/// persisting to disk. The inner values use serde_json (matching
|
||||
/// `MemoryStorage`'s serialization format).
|
||||
///
|
||||
/// # Persistence security
|
||||
///
|
||||
/// When `path` is set, file permissions are restricted to owner-only (0o600)
|
||||
/// on Unix platforms, since the store may contain HPKE private keys.
|
||||
#[derive(Debug)]
|
||||
pub struct DiskKeyStore {
|
||||
path: Option<PathBuf>,
|
||||
storage: MemoryStorage,
|
||||
}
|
||||
|
||||
#[derive(thiserror::Error, Debug)]
|
||||
pub enum DiskKeyStoreError {
|
||||
#[error("serialization error")]
|
||||
Serialization,
|
||||
#[error("io error: {0}")]
|
||||
Io(String),
|
||||
#[error("memory storage error: {0}")]
|
||||
MemoryStorage(#[from] openmls_memory_storage::MemoryStorageError),
|
||||
}
|
||||
|
||||
impl DiskKeyStore {
|
||||
/// In-memory keystore (no persistence).
|
||||
pub fn ephemeral() -> Self {
|
||||
Self {
|
||||
path: None,
|
||||
storage: MemoryStorage::default(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Persistent keystore backed by `path`. Creates an empty store if missing.
|
||||
pub fn persistent(path: impl AsRef<Path>) -> Result<Self, DiskKeyStoreError> {
|
||||
let path = path.as_ref().to_path_buf();
|
||||
let storage = if path.exists() {
|
||||
let bytes = fs::read(&path).map_err(|e| DiskKeyStoreError::Io(e.to_string()))?;
|
||||
if bytes.is_empty() {
|
||||
MemoryStorage::default()
|
||||
} else {
|
||||
let map: std::collections::HashMap<Vec<u8>, Vec<u8>> =
|
||||
bincode::deserialize(&bytes)
|
||||
.map_err(|_| DiskKeyStoreError::Serialization)?;
|
||||
let storage = MemoryStorage::default();
|
||||
let mut values = storage.values.write()
|
||||
.map_err(|_| DiskKeyStoreError::Io("lock poisoned".into()))?;
|
||||
*values = map;
|
||||
drop(values);
|
||||
storage
|
||||
}
|
||||
} else {
|
||||
MemoryStorage::default()
|
||||
};
|
||||
|
||||
let store = Self {
|
||||
path: Some(path),
|
||||
storage,
|
||||
};
|
||||
|
||||
// Set restrictive file permissions on the keystore file.
|
||||
store.set_file_permissions()?;
|
||||
|
||||
Ok(store)
|
||||
}
|
||||
|
||||
fn flush(&self) -> Result<(), DiskKeyStoreError> {
|
||||
let Some(path) = &self.path else {
|
||||
return Ok(());
|
||||
};
|
||||
let values = self.storage.values.read()
|
||||
.map_err(|_| DiskKeyStoreError::Io("lock poisoned".into()))?;
|
||||
let bytes = bincode::serialize(&*values)
|
||||
.map_err(|_| DiskKeyStoreError::Serialization)?;
|
||||
if let Some(parent) = path.parent() {
|
||||
fs::create_dir_all(parent).map_err(|e| DiskKeyStoreError::Io(e.to_string()))?;
|
||||
}
|
||||
fs::write(path, &bytes).map_err(|e| DiskKeyStoreError::Io(e.to_string()))?;
|
||||
self.set_file_permissions()?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Serialize the backing storage to bytes (bincode).
|
||||
///
|
||||
/// This captures all key material *and* MLS group state held by the
|
||||
/// `StorageProvider`, allowing the caller to persist it in a database
|
||||
/// column instead of (or in addition to) on-disk files.
|
||||
pub fn to_bytes(&self) -> Result<Vec<u8>, DiskKeyStoreError> {
|
||||
let values = self.storage.values.read()
|
||||
.map_err(|_| DiskKeyStoreError::Io("lock poisoned".into()))?;
|
||||
bincode::serialize(&*values).map_err(|_| DiskKeyStoreError::Serialization)
|
||||
}
|
||||
|
||||
/// Restore a `DiskKeyStore` from bytes previously produced by [`to_bytes`].
|
||||
pub fn from_bytes(bytes: &[u8]) -> Result<Self, DiskKeyStoreError> {
|
||||
let map: std::collections::HashMap<Vec<u8>, Vec<u8>> =
|
||||
bincode::deserialize(bytes).map_err(|_| DiskKeyStoreError::Serialization)?;
|
||||
let storage = MemoryStorage::default();
|
||||
let mut values = storage.values.write()
|
||||
.map_err(|_| DiskKeyStoreError::Io("lock poisoned".into()))?;
|
||||
*values = map;
|
||||
drop(values);
|
||||
Ok(Self {
|
||||
path: None,
|
||||
storage,
|
||||
})
|
||||
}
|
||||
|
||||
/// Restrict file permissions to owner-only (0o600) on Unix.
|
||||
#[cfg(unix)]
|
||||
fn set_file_permissions(&self) -> Result<(), DiskKeyStoreError> {
|
||||
use std::os::unix::fs::PermissionsExt;
|
||||
if let Some(path) = &self.path {
|
||||
if path.exists() {
|
||||
let perms = std::fs::Permissions::from_mode(0o600);
|
||||
fs::set_permissions(path, perms)
|
||||
.map_err(|e| DiskKeyStoreError::Io(format!("set permissions: {e}")))?;
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[cfg(not(unix))]
|
||||
fn set_file_permissions(&self) -> Result<(), DiskKeyStoreError> {
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for DiskKeyStore {
|
||||
fn default() -> Self {
|
||||
Self::ephemeral()
|
||||
}
|
||||
}
|
||||
|
||||
/// Delegate all `StorageProvider` methods to the inner `MemoryStorage`,
|
||||
/// flushing to disk after every write/delete operation.
|
||||
///
|
||||
/// The flush errors are mapped to `DiskKeyStoreError` via the
|
||||
/// `MemoryStorageError` conversion. If a flush fails, the in-memory state
|
||||
/// is still updated (matching the old DiskKeyStore behavior).
|
||||
impl StorageProvider<CURRENT_VERSION> for DiskKeyStore {
|
||||
type Error = DiskKeyStoreError;
|
||||
|
||||
fn write_mls_join_config<
|
||||
GroupId: traits::GroupId<CURRENT_VERSION>,
|
||||
MlsGroupJoinConfig: traits::MlsGroupJoinConfig<CURRENT_VERSION>,
|
||||
>(
|
||||
&self,
|
||||
group_id: &GroupId,
|
||||
config: &MlsGroupJoinConfig,
|
||||
) -> Result<(), Self::Error> {
|
||||
self.storage.write_mls_join_config(group_id, config)?;
|
||||
self.flush()
|
||||
}
|
||||
|
||||
fn append_own_leaf_node<
|
||||
GroupId: traits::GroupId<CURRENT_VERSION>,
|
||||
LeafNode: traits::LeafNode<CURRENT_VERSION>,
|
||||
>(
|
||||
&self,
|
||||
group_id: &GroupId,
|
||||
leaf_node: &LeafNode,
|
||||
) -> Result<(), Self::Error> {
|
||||
self.storage.append_own_leaf_node(group_id, leaf_node)?;
|
||||
self.flush()
|
||||
}
|
||||
|
||||
fn queue_proposal<
|
||||
GroupId: traits::GroupId<CURRENT_VERSION>,
|
||||
ProposalRef: traits::ProposalRef<CURRENT_VERSION>,
|
||||
QueuedProposal: traits::QueuedProposal<CURRENT_VERSION>,
|
||||
>(
|
||||
&self,
|
||||
group_id: &GroupId,
|
||||
proposal_ref: &ProposalRef,
|
||||
proposal: &QueuedProposal,
|
||||
) -> Result<(), Self::Error> {
|
||||
self.storage.queue_proposal(group_id, proposal_ref, proposal)?;
|
||||
self.flush()
|
||||
}
|
||||
|
||||
fn write_tree<
|
||||
GroupId: traits::GroupId<CURRENT_VERSION>,
|
||||
TreeSync: traits::TreeSync<CURRENT_VERSION>,
|
||||
>(
|
||||
&self,
|
||||
group_id: &GroupId,
|
||||
tree: &TreeSync,
|
||||
) -> Result<(), Self::Error> {
|
||||
self.storage.write_tree(group_id, tree)?;
|
||||
self.flush()
|
||||
}
|
||||
|
||||
fn write_interim_transcript_hash<
|
||||
GroupId: traits::GroupId<CURRENT_VERSION>,
|
||||
InterimTranscriptHash: traits::InterimTranscriptHash<CURRENT_VERSION>,
|
||||
>(
|
||||
&self,
|
||||
group_id: &GroupId,
|
||||
interim_transcript_hash: &InterimTranscriptHash,
|
||||
) -> Result<(), Self::Error> {
|
||||
self.storage.write_interim_transcript_hash(group_id, interim_transcript_hash)?;
|
||||
self.flush()
|
||||
}
|
||||
|
||||
fn write_context<
|
||||
GroupId: traits::GroupId<CURRENT_VERSION>,
|
||||
GroupContext: traits::GroupContext<CURRENT_VERSION>,
|
||||
>(
|
||||
&self,
|
||||
group_id: &GroupId,
|
||||
group_context: &GroupContext,
|
||||
) -> Result<(), Self::Error> {
|
||||
self.storage.write_context(group_id, group_context)?;
|
||||
self.flush()
|
||||
}
|
||||
|
||||
fn write_confirmation_tag<
|
||||
GroupId: traits::GroupId<CURRENT_VERSION>,
|
||||
ConfirmationTag: traits::ConfirmationTag<CURRENT_VERSION>,
|
||||
>(
|
||||
&self,
|
||||
group_id: &GroupId,
|
||||
confirmation_tag: &ConfirmationTag,
|
||||
) -> Result<(), Self::Error> {
|
||||
self.storage.write_confirmation_tag(group_id, confirmation_tag)?;
|
||||
self.flush()
|
||||
}
|
||||
|
||||
fn write_group_state<
|
||||
GroupState: traits::GroupState<CURRENT_VERSION>,
|
||||
GroupId: traits::GroupId<CURRENT_VERSION>,
|
||||
>(
|
||||
&self,
|
||||
group_id: &GroupId,
|
||||
group_state: &GroupState,
|
||||
) -> Result<(), Self::Error> {
|
||||
self.storage.write_group_state(group_id, group_state)?;
|
||||
self.flush()
|
||||
}
|
||||
|
||||
fn write_message_secrets<
|
||||
GroupId: traits::GroupId<CURRENT_VERSION>,
|
||||
MessageSecrets: traits::MessageSecrets<CURRENT_VERSION>,
|
||||
>(
|
||||
&self,
|
||||
group_id: &GroupId,
|
||||
message_secrets: &MessageSecrets,
|
||||
) -> Result<(), Self::Error> {
|
||||
self.storage.write_message_secrets(group_id, message_secrets)?;
|
||||
self.flush()
|
||||
}
|
||||
|
||||
fn write_resumption_psk_store<
|
||||
GroupId: traits::GroupId<CURRENT_VERSION>,
|
||||
ResumptionPskStore: traits::ResumptionPskStore<CURRENT_VERSION>,
|
||||
>(
|
||||
&self,
|
||||
group_id: &GroupId,
|
||||
resumption_psk_store: &ResumptionPskStore,
|
||||
) -> Result<(), Self::Error> {
|
||||
self.storage.write_resumption_psk_store(group_id, resumption_psk_store)?;
|
||||
self.flush()
|
||||
}
|
||||
|
||||
fn write_own_leaf_index<
|
||||
GroupId: traits::GroupId<CURRENT_VERSION>,
|
||||
LeafNodeIndex: traits::LeafNodeIndex<CURRENT_VERSION>,
|
||||
>(
|
||||
&self,
|
||||
group_id: &GroupId,
|
||||
own_leaf_index: &LeafNodeIndex,
|
||||
) -> Result<(), Self::Error> {
|
||||
self.storage.write_own_leaf_index(group_id, own_leaf_index)?;
|
||||
self.flush()
|
||||
}
|
||||
|
||||
fn write_group_epoch_secrets<
|
||||
GroupId: traits::GroupId<CURRENT_VERSION>,
|
||||
GroupEpochSecrets: traits::GroupEpochSecrets<CURRENT_VERSION>,
|
||||
>(
|
||||
&self,
|
||||
group_id: &GroupId,
|
||||
group_epoch_secrets: &GroupEpochSecrets,
|
||||
) -> Result<(), Self::Error> {
|
||||
self.storage.write_group_epoch_secrets(group_id, group_epoch_secrets)?;
|
||||
self.flush()
|
||||
}
|
||||
|
||||
fn write_signature_key_pair<
|
||||
SignaturePublicKey: traits::SignaturePublicKey<CURRENT_VERSION>,
|
||||
SignatureKeyPair: traits::SignatureKeyPair<CURRENT_VERSION>,
|
||||
>(
|
||||
&self,
|
||||
public_key: &SignaturePublicKey,
|
||||
signature_key_pair: &SignatureKeyPair,
|
||||
) -> Result<(), Self::Error> {
|
||||
self.storage.write_signature_key_pair(public_key, signature_key_pair)?;
|
||||
self.flush()
|
||||
}
|
||||
|
||||
fn write_encryption_key_pair<
|
||||
EncryptionKey: traits::EncryptionKey<CURRENT_VERSION>,
|
||||
HpkeKeyPair: traits::HpkeKeyPair<CURRENT_VERSION>,
|
||||
>(
|
||||
&self,
|
||||
public_key: &EncryptionKey,
|
||||
key_pair: &HpkeKeyPair,
|
||||
) -> Result<(), Self::Error> {
|
||||
self.storage.write_encryption_key_pair(public_key, key_pair)?;
|
||||
self.flush()
|
||||
}
|
||||
|
||||
fn write_encryption_epoch_key_pairs<
|
||||
GroupId: traits::GroupId<CURRENT_VERSION>,
|
||||
EpochKey: traits::EpochKey<CURRENT_VERSION>,
|
||||
HpkeKeyPair: traits::HpkeKeyPair<CURRENT_VERSION>,
|
||||
>(
|
||||
&self,
|
||||
group_id: &GroupId,
|
||||
epoch: &EpochKey,
|
||||
leaf_index: u32,
|
||||
key_pairs: &[HpkeKeyPair],
|
||||
) -> Result<(), Self::Error> {
|
||||
self.storage.write_encryption_epoch_key_pairs(group_id, epoch, leaf_index, key_pairs)?;
|
||||
self.flush()
|
||||
}
|
||||
|
||||
fn write_key_package<
|
||||
HashReference: traits::HashReference<CURRENT_VERSION>,
|
||||
KeyPackage: traits::KeyPackage<CURRENT_VERSION>,
|
||||
>(
|
||||
&self,
|
||||
hash_ref: &HashReference,
|
||||
key_package: &KeyPackage,
|
||||
) -> Result<(), Self::Error> {
|
||||
self.storage.write_key_package(hash_ref, key_package)?;
|
||||
self.flush()
|
||||
}
|
||||
|
||||
fn write_psk<
|
||||
PskId: traits::PskId<CURRENT_VERSION>,
|
||||
PskBundle: traits::PskBundle<CURRENT_VERSION>,
|
||||
>(
|
||||
&self,
|
||||
psk_id: &PskId,
|
||||
psk: &PskBundle,
|
||||
) -> Result<(), Self::Error> {
|
||||
self.storage.write_psk(psk_id, psk)?;
|
||||
self.flush()
|
||||
}
|
||||
|
||||
// --- getters (no flush needed) ---
|
||||
|
||||
fn mls_group_join_config<
|
||||
GroupId: traits::GroupId<CURRENT_VERSION>,
|
||||
MlsGroupJoinConfig: traits::MlsGroupJoinConfig<CURRENT_VERSION>,
|
||||
>(
|
||||
&self,
|
||||
group_id: &GroupId,
|
||||
) -> Result<Option<MlsGroupJoinConfig>, Self::Error> {
|
||||
Ok(self.storage.mls_group_join_config(group_id)?)
|
||||
}
|
||||
|
||||
fn own_leaf_nodes<
|
||||
GroupId: traits::GroupId<CURRENT_VERSION>,
|
||||
LeafNode: traits::LeafNode<CURRENT_VERSION>,
|
||||
>(
|
||||
&self,
|
||||
group_id: &GroupId,
|
||||
) -> Result<Vec<LeafNode>, Self::Error> {
|
||||
Ok(self.storage.own_leaf_nodes(group_id)?)
|
||||
}
|
||||
|
||||
fn queued_proposal_refs<
|
||||
GroupId: traits::GroupId<CURRENT_VERSION>,
|
||||
ProposalRef: traits::ProposalRef<CURRENT_VERSION>,
|
||||
>(
|
||||
&self,
|
||||
group_id: &GroupId,
|
||||
) -> Result<Vec<ProposalRef>, Self::Error> {
|
||||
Ok(self.storage.queued_proposal_refs(group_id)?)
|
||||
}
|
||||
|
||||
fn queued_proposals<
|
||||
GroupId: traits::GroupId<CURRENT_VERSION>,
|
||||
ProposalRef: traits::ProposalRef<CURRENT_VERSION>,
|
||||
QueuedProposal: traits::QueuedProposal<CURRENT_VERSION>,
|
||||
>(
|
||||
&self,
|
||||
group_id: &GroupId,
|
||||
) -> Result<Vec<(ProposalRef, QueuedProposal)>, Self::Error> {
|
||||
Ok(self.storage.queued_proposals(group_id)?)
|
||||
}
|
||||
|
||||
fn tree<
|
||||
GroupId: traits::GroupId<CURRENT_VERSION>,
|
||||
TreeSync: traits::TreeSync<CURRENT_VERSION>,
|
||||
>(
|
||||
&self,
|
||||
group_id: &GroupId,
|
||||
) -> Result<Option<TreeSync>, Self::Error> {
|
||||
Ok(self.storage.tree(group_id)?)
|
||||
}
|
||||
|
||||
fn group_context<
|
||||
GroupId: traits::GroupId<CURRENT_VERSION>,
|
||||
GroupContext: traits::GroupContext<CURRENT_VERSION>,
|
||||
>(
|
||||
&self,
|
||||
group_id: &GroupId,
|
||||
) -> Result<Option<GroupContext>, Self::Error> {
|
||||
Ok(self.storage.group_context(group_id)?)
|
||||
}
|
||||
|
||||
fn interim_transcript_hash<
|
||||
GroupId: traits::GroupId<CURRENT_VERSION>,
|
||||
InterimTranscriptHash: traits::InterimTranscriptHash<CURRENT_VERSION>,
|
||||
>(
|
||||
&self,
|
||||
group_id: &GroupId,
|
||||
) -> Result<Option<InterimTranscriptHash>, Self::Error> {
|
||||
Ok(self.storage.interim_transcript_hash(group_id)?)
|
||||
}
|
||||
|
||||
fn confirmation_tag<
|
||||
GroupId: traits::GroupId<CURRENT_VERSION>,
|
||||
ConfirmationTag: traits::ConfirmationTag<CURRENT_VERSION>,
|
||||
>(
|
||||
&self,
|
||||
group_id: &GroupId,
|
||||
) -> Result<Option<ConfirmationTag>, Self::Error> {
|
||||
Ok(self.storage.confirmation_tag(group_id)?)
|
||||
}
|
||||
|
||||
fn group_state<
|
||||
GroupState: traits::GroupState<CURRENT_VERSION>,
|
||||
GroupId: traits::GroupId<CURRENT_VERSION>,
|
||||
>(
|
||||
&self,
|
||||
group_id: &GroupId,
|
||||
) -> Result<Option<GroupState>, Self::Error> {
|
||||
Ok(self.storage.group_state(group_id)?)
|
||||
}
|
||||
|
||||
fn message_secrets<
|
||||
GroupId: traits::GroupId<CURRENT_VERSION>,
|
||||
MessageSecrets: traits::MessageSecrets<CURRENT_VERSION>,
|
||||
>(
|
||||
&self,
|
||||
group_id: &GroupId,
|
||||
) -> Result<Option<MessageSecrets>, Self::Error> {
|
||||
Ok(self.storage.message_secrets(group_id)?)
|
||||
}
|
||||
|
||||
fn resumption_psk_store<
|
||||
GroupId: traits::GroupId<CURRENT_VERSION>,
|
||||
ResumptionPskStore: traits::ResumptionPskStore<CURRENT_VERSION>,
|
||||
>(
|
||||
&self,
|
||||
group_id: &GroupId,
|
||||
) -> Result<Option<ResumptionPskStore>, Self::Error> {
|
||||
Ok(self.storage.resumption_psk_store(group_id)?)
|
||||
}
|
||||
|
||||
fn own_leaf_index<
|
||||
GroupId: traits::GroupId<CURRENT_VERSION>,
|
||||
LeafNodeIndex: traits::LeafNodeIndex<CURRENT_VERSION>,
|
||||
>(
|
||||
&self,
|
||||
group_id: &GroupId,
|
||||
) -> Result<Option<LeafNodeIndex>, Self::Error> {
|
||||
Ok(self.storage.own_leaf_index(group_id)?)
|
||||
}
|
||||
|
||||
fn group_epoch_secrets<
|
||||
GroupId: traits::GroupId<CURRENT_VERSION>,
|
||||
GroupEpochSecrets: traits::GroupEpochSecrets<CURRENT_VERSION>,
|
||||
>(
|
||||
&self,
|
||||
group_id: &GroupId,
|
||||
) -> Result<Option<GroupEpochSecrets>, Self::Error> {
|
||||
Ok(self.storage.group_epoch_secrets(group_id)?)
|
||||
}
|
||||
|
||||
fn signature_key_pair<
|
||||
SignaturePublicKey: traits::SignaturePublicKey<CURRENT_VERSION>,
|
||||
SignatureKeyPair: traits::SignatureKeyPair<CURRENT_VERSION>,
|
||||
>(
|
||||
&self,
|
||||
public_key: &SignaturePublicKey,
|
||||
) -> Result<Option<SignatureKeyPair>, Self::Error> {
|
||||
Ok(self.storage.signature_key_pair(public_key)?)
|
||||
}
|
||||
|
||||
fn encryption_key_pair<
|
||||
HpkeKeyPair: traits::HpkeKeyPair<CURRENT_VERSION>,
|
||||
EncryptionKey: traits::EncryptionKey<CURRENT_VERSION>,
|
||||
>(
|
||||
&self,
|
||||
public_key: &EncryptionKey,
|
||||
) -> Result<Option<HpkeKeyPair>, Self::Error> {
|
||||
Ok(self.storage.encryption_key_pair(public_key)?)
|
||||
}
|
||||
|
||||
fn encryption_epoch_key_pairs<
|
||||
GroupId: traits::GroupId<CURRENT_VERSION>,
|
||||
EpochKey: traits::EpochKey<CURRENT_VERSION>,
|
||||
HpkeKeyPair: traits::HpkeKeyPair<CURRENT_VERSION>,
|
||||
>(
|
||||
&self,
|
||||
group_id: &GroupId,
|
||||
epoch: &EpochKey,
|
||||
leaf_index: u32,
|
||||
) -> Result<Vec<HpkeKeyPair>, Self::Error> {
|
||||
Ok(self.storage.encryption_epoch_key_pairs(group_id, epoch, leaf_index)?)
|
||||
}
|
||||
|
||||
fn key_package<
|
||||
KeyPackageRef: traits::HashReference<CURRENT_VERSION>,
|
||||
KeyPackage: traits::KeyPackage<CURRENT_VERSION>,
|
||||
>(
|
||||
&self,
|
||||
hash_ref: &KeyPackageRef,
|
||||
) -> Result<Option<KeyPackage>, Self::Error> {
|
||||
Ok(self.storage.key_package(hash_ref)?)
|
||||
}
|
||||
|
||||
fn psk<
|
||||
PskBundle: traits::PskBundle<CURRENT_VERSION>,
|
||||
PskId: traits::PskId<CURRENT_VERSION>,
|
||||
>(
|
||||
&self,
|
||||
psk_id: &PskId,
|
||||
) -> Result<Option<PskBundle>, Self::Error> {
|
||||
Ok(self.storage.psk(psk_id)?)
|
||||
}
|
||||
|
||||
// --- deleters (flush needed) ---
|
||||
|
||||
fn remove_proposal<
|
||||
GroupId: traits::GroupId<CURRENT_VERSION>,
|
||||
ProposalRef: traits::ProposalRef<CURRENT_VERSION>,
|
||||
>(
|
||||
&self,
|
||||
group_id: &GroupId,
|
||||
proposal_ref: &ProposalRef,
|
||||
) -> Result<(), Self::Error> {
|
||||
self.storage.remove_proposal(group_id, proposal_ref)?;
|
||||
self.flush()
|
||||
}
|
||||
|
||||
fn delete_own_leaf_nodes<GroupId: traits::GroupId<CURRENT_VERSION>>(
|
||||
&self,
|
||||
group_id: &GroupId,
|
||||
) -> Result<(), Self::Error> {
|
||||
self.storage.delete_own_leaf_nodes(group_id)?;
|
||||
self.flush()
|
||||
}
|
||||
|
||||
fn delete_group_config<GroupId: traits::GroupId<CURRENT_VERSION>>(
|
||||
&self,
|
||||
group_id: &GroupId,
|
||||
) -> Result<(), Self::Error> {
|
||||
self.storage.delete_group_config(group_id)?;
|
||||
self.flush()
|
||||
}
|
||||
|
||||
fn delete_tree<GroupId: traits::GroupId<CURRENT_VERSION>>(
|
||||
&self,
|
||||
group_id: &GroupId,
|
||||
) -> Result<(), Self::Error> {
|
||||
self.storage.delete_tree(group_id)?;
|
||||
self.flush()
|
||||
}
|
||||
|
||||
fn delete_confirmation_tag<GroupId: traits::GroupId<CURRENT_VERSION>>(
|
||||
&self,
|
||||
group_id: &GroupId,
|
||||
) -> Result<(), Self::Error> {
|
||||
self.storage.delete_confirmation_tag(group_id)?;
|
||||
self.flush()
|
||||
}
|
||||
|
||||
fn delete_group_state<GroupId: traits::GroupId<CURRENT_VERSION>>(
|
||||
&self,
|
||||
group_id: &GroupId,
|
||||
) -> Result<(), Self::Error> {
|
||||
self.storage.delete_group_state(group_id)?;
|
||||
self.flush()
|
||||
}
|
||||
|
||||
fn delete_context<GroupId: traits::GroupId<CURRENT_VERSION>>(
|
||||
&self,
|
||||
group_id: &GroupId,
|
||||
) -> Result<(), Self::Error> {
|
||||
self.storage.delete_context(group_id)?;
|
||||
self.flush()
|
||||
}
|
||||
|
||||
fn delete_interim_transcript_hash<GroupId: traits::GroupId<CURRENT_VERSION>>(
|
||||
&self,
|
||||
group_id: &GroupId,
|
||||
) -> Result<(), Self::Error> {
|
||||
self.storage.delete_interim_transcript_hash(group_id)?;
|
||||
self.flush()
|
||||
}
|
||||
|
||||
fn delete_message_secrets<GroupId: traits::GroupId<CURRENT_VERSION>>(
|
||||
&self,
|
||||
group_id: &GroupId,
|
||||
) -> Result<(), Self::Error> {
|
||||
self.storage.delete_message_secrets(group_id)?;
|
||||
self.flush()
|
||||
}
|
||||
|
||||
fn delete_all_resumption_psk_secrets<GroupId: traits::GroupId<CURRENT_VERSION>>(
|
||||
&self,
|
||||
group_id: &GroupId,
|
||||
) -> Result<(), Self::Error> {
|
||||
self.storage.delete_all_resumption_psk_secrets(group_id)?;
|
||||
self.flush()
|
||||
}
|
||||
|
||||
fn delete_own_leaf_index<GroupId: traits::GroupId<CURRENT_VERSION>>(
|
||||
&self,
|
||||
group_id: &GroupId,
|
||||
) -> Result<(), Self::Error> {
|
||||
self.storage.delete_own_leaf_index(group_id)?;
|
||||
self.flush()
|
||||
}
|
||||
|
||||
fn delete_group_epoch_secrets<GroupId: traits::GroupId<CURRENT_VERSION>>(
|
||||
&self,
|
||||
group_id: &GroupId,
|
||||
) -> Result<(), Self::Error> {
|
||||
self.storage.delete_group_epoch_secrets(group_id)?;
|
||||
self.flush()
|
||||
}
|
||||
|
||||
fn clear_proposal_queue<
|
||||
GroupId: traits::GroupId<CURRENT_VERSION>,
|
||||
ProposalRef: traits::ProposalRef<CURRENT_VERSION>,
|
||||
>(
|
||||
&self,
|
||||
group_id: &GroupId,
|
||||
) -> Result<(), Self::Error> {
|
||||
self.storage.clear_proposal_queue::<GroupId, ProposalRef>(group_id)?;
|
||||
self.flush()
|
||||
}
|
||||
|
||||
fn delete_signature_key_pair<
|
||||
SignaturePublicKey: traits::SignaturePublicKey<CURRENT_VERSION>,
|
||||
>(
|
||||
&self,
|
||||
public_key: &SignaturePublicKey,
|
||||
) -> Result<(), Self::Error> {
|
||||
self.storage.delete_signature_key_pair(public_key)?;
|
||||
self.flush()
|
||||
}
|
||||
|
||||
fn delete_encryption_key_pair<EncryptionKey: traits::EncryptionKey<CURRENT_VERSION>>(
|
||||
&self,
|
||||
public_key: &EncryptionKey,
|
||||
) -> Result<(), Self::Error> {
|
||||
self.storage.delete_encryption_key_pair(public_key)?;
|
||||
self.flush()
|
||||
}
|
||||
|
||||
fn delete_encryption_epoch_key_pairs<
|
||||
GroupId: traits::GroupId<CURRENT_VERSION>,
|
||||
EpochKey: traits::EpochKey<CURRENT_VERSION>,
|
||||
>(
|
||||
&self,
|
||||
group_id: &GroupId,
|
||||
epoch: &EpochKey,
|
||||
leaf_index: u32,
|
||||
) -> Result<(), Self::Error> {
|
||||
self.storage.delete_encryption_epoch_key_pairs(group_id, epoch, leaf_index)?;
|
||||
self.flush()
|
||||
}
|
||||
|
||||
fn delete_key_package<KeyPackageRef: traits::HashReference<CURRENT_VERSION>>(
|
||||
&self,
|
||||
hash_ref: &KeyPackageRef,
|
||||
) -> Result<(), Self::Error> {
|
||||
self.storage.delete_key_package(hash_ref)?;
|
||||
self.flush()
|
||||
}
|
||||
|
||||
fn delete_psk<PskKey: traits::PskId<CURRENT_VERSION>>(
|
||||
&self,
|
||||
psk_id: &PskKey,
|
||||
) -> Result<(), Self::Error> {
|
||||
self.storage.delete_psk(psk_id)?;
|
||||
self.flush()
|
||||
}
|
||||
}
|
||||
99
crates/quicprochat-core/src/lib.rs
Normal file
99
crates/quicprochat-core/src/lib.rs
Normal file
@@ -0,0 +1,99 @@
|
||||
//! Core cryptographic primitives, MLS group state machine, and hybrid
|
||||
//! post-quantum KEM for quicprochat.
|
||||
//!
|
||||
//! # WASM support
|
||||
//!
|
||||
//! When compiled with `--no-default-features` (disabling the `native` feature),
|
||||
//! the following modules are available for `wasm32-unknown-unknown`:
|
||||
//!
|
||||
//! - `identity` — Ed25519 identity keypair (generate, sign, verify)
|
||||
//! - `hybrid_kem` — X25519 + ML-KEM-768 hybrid key encapsulation
|
||||
//! - `safety_numbers` — Signal-style safety number computation
|
||||
//! - `sealed_sender` — sender identity + Ed25519 signature envelope
|
||||
//! - `app_message` — rich application message serialisation/parsing
|
||||
//! - `padding` — message padding to hide plaintext lengths
|
||||
//! - `transcript` — encrypted tamper-evident message transcript
|
||||
//! - `error` — `CoreError` type
|
||||
//!
|
||||
//! The following modules require the `native` feature (MLS, OPAQUE, Cap'n Proto):
|
||||
//!
|
||||
//! - `group` — MLS group state machine (openmls)
|
||||
//! - `keypackage` — MLS KeyPackage generation
|
||||
//! - `hybrid_crypto` — hybrid HPKE provider for OpenMLS
|
||||
//! - `keystore` — OpenMLS key store with optional disk persistence
|
||||
//! - `opaque_auth` — OPAQUE cipher suite configuration
|
||||
//!
|
||||
//! # Module layout
|
||||
//!
|
||||
//! | Module | Responsibility |
|
||||
//! |---------------|------------------------------------------------------------------|
|
||||
//! | `app_message` | Rich application payload (Chat, Reply, Reaction, ReadReceipt, Typing) |
|
||||
//! | `error` | [`CoreError`] type |
|
||||
//! | `identity` | [`IdentityKeypair`] — Ed25519 identity key for MLS credentials |
|
||||
//! | `keypackage` | [`generate_key_package`] — standalone KeyPackage generation |
|
||||
//! | `group` | [`GroupMember`] — MLS group lifecycle (create/join/send/recv) |
|
||||
//! | `hybrid_kem` | Hybrid X25519 + ML-KEM-768 key encapsulation |
|
||||
//! | `keystore` | [`DiskKeyStore`] — OpenMLS key store with optional persistence |
|
||||
|
||||
mod app_message;
|
||||
mod error;
|
||||
mod hybrid_kem;
|
||||
mod identity;
|
||||
pub mod padding;
|
||||
pub mod pq_noise;
|
||||
#[cfg(feature = "native")]
|
||||
pub mod recovery;
|
||||
pub mod safety_numbers;
|
||||
pub mod sealed_sender;
|
||||
pub mod transcript;
|
||||
|
||||
// ── Native-only modules (MLS, OPAQUE, filesystem) ───────────────────────────
|
||||
#[cfg(feature = "native")]
|
||||
mod group;
|
||||
#[cfg(feature = "native")]
|
||||
mod hybrid_crypto;
|
||||
#[cfg(feature = "native")]
|
||||
mod keypackage;
|
||||
#[cfg(feature = "native")]
|
||||
mod keystore;
|
||||
#[cfg(feature = "native")]
|
||||
pub mod opaque_auth;
|
||||
|
||||
// ── Public API (always available) ───────────────────────────────────────────
|
||||
|
||||
pub use app_message::{
|
||||
serialize, serialize_chat, serialize_delete, serialize_dummy, serialize_edit,
|
||||
serialize_file_ref, serialize_reaction, serialize_read_receipt, serialize_reply,
|
||||
serialize_typing, parse, generate_message_id,
|
||||
AppMessage, MessageType, VERSION as APP_MESSAGE_VERSION,
|
||||
};
|
||||
pub use error::CoreError;
|
||||
pub use hybrid_kem::{
|
||||
hybrid_decrypt, hybrid_encrypt, HybridKemError, HybridKeypair, HybridKeypairBytes,
|
||||
HybridPublicKey,
|
||||
};
|
||||
pub use identity::{verify_delivery_proof, IdentityKeypair};
|
||||
#[cfg(feature = "native")]
|
||||
pub use recovery::{
|
||||
constant_time_eq, generate_recovery_codes, recover_from_bundle, recovery_token_hash,
|
||||
RecoveryBundle, RecoveryPayload, RecoverySetup, MAX_BUNDLE_SIZE, RECOVERY_CODE_COUNT,
|
||||
};
|
||||
pub use safety_numbers::compute_safety_number;
|
||||
pub use transcript::{
|
||||
read_transcript, validate_transcript_structure, ChainVerdict, DecodedRecord, TranscriptRecord,
|
||||
TranscriptWriter,
|
||||
};
|
||||
// Deprecated re-export for backward compatibility.
|
||||
#[allow(deprecated)]
|
||||
pub use transcript::verify_transcript_chain;
|
||||
|
||||
// ── Public API (native only) ────────────────────────────────────────────────
|
||||
|
||||
#[cfg(feature = "native")]
|
||||
pub use group::{GroupMember, ReceivedMessage, ReceivedMessageWithSender};
|
||||
#[cfg(feature = "native")]
|
||||
pub use hybrid_crypto::{HybridCrypto, HybridCryptoProvider};
|
||||
#[cfg(feature = "native")]
|
||||
pub use keypackage::{generate_key_package, validate_keypackage_ciphersuite};
|
||||
#[cfg(feature = "native")]
|
||||
pub use keystore::DiskKeyStore;
|
||||
@@ -5,7 +5,7 @@
|
||||
|
||||
use opaque_ke::CipherSuite;
|
||||
|
||||
/// OPAQUE cipher suite for quicnprotochat.
|
||||
/// OPAQUE cipher suite for quicprochat.
|
||||
///
|
||||
/// - **OPRF**: Ristretto255 (curve25519-based, ~128-bit security)
|
||||
/// - **Key exchange**: Triple-DH (3DH) over Ristretto255 with SHA-512
|
||||
@@ -14,9 +14,7 @@ pub struct OpaqueSuite;
|
||||
|
||||
impl CipherSuite for OpaqueSuite {
|
||||
type OprfCs = opaque_ke::Ristretto255;
|
||||
type KeyExchange = opaque_ke::key_exchange::tripledh::TripleDh<
|
||||
opaque_ke::Ristretto255,
|
||||
sha2::Sha512,
|
||||
>;
|
||||
type KeyExchange =
|
||||
opaque_ke::key_exchange::tripledh::TripleDh<opaque_ke::Ristretto255, sha2::Sha512>;
|
||||
type Ksf = argon2::Argon2<'static>;
|
||||
}
|
||||
265
crates/quicprochat-core/src/padding.rs
Normal file
265
crates/quicprochat-core/src/padding.rs
Normal file
@@ -0,0 +1,265 @@
|
||||
//! Message padding to hide plaintext lengths from the server.
|
||||
//!
|
||||
//! Pads payloads to fixed bucket sizes before MLS encryption so that the
|
||||
//! ciphertext does not reveal the actual message length.
|
||||
//!
|
||||
//! # Wire format
|
||||
//!
|
||||
//! ```text
|
||||
//! [real_length: 4 bytes LE (u32)][payload: real_length bytes][random padding]
|
||||
//! ```
|
||||
//!
|
||||
//! The total padded output is always one of the bucket sizes: 256, 1024, 4096, 16384 bytes.
|
||||
//! For payloads larger than 16380 bytes, rounds up to the nearest 16384-byte multiple.
|
||||
//!
|
||||
//! ## Uniform boundary padding (traffic analysis resistance)
|
||||
//!
|
||||
//! [`pad_uniform`] / [`unpad_uniform`] pad to a configurable byte boundary
|
||||
//! (default 256) instead of exponential buckets. This produces more uniform
|
||||
//! ciphertext sizes at the cost of slightly more padding overhead.
|
||||
|
||||
use rand::RngCore;
|
||||
|
||||
use crate::error::CoreError;
|
||||
|
||||
/// Default uniform padding boundary in bytes.
|
||||
pub const DEFAULT_PADDING_BOUNDARY: usize = 256;
|
||||
|
||||
/// Bucket sizes in bytes. The smallest (256) accommodates a sealed sender
|
||||
/// envelope (99 bytes overhead) plus a short message.
|
||||
const BUCKETS: &[usize] = &[256, 1024, 4096, 16384];
|
||||
|
||||
/// Select the smallest bucket that fits `content_len + 4` (the 4-byte length prefix).
|
||||
fn bucket_for(content_len: usize) -> usize {
|
||||
let total = content_len + 4;
|
||||
for &b in BUCKETS {
|
||||
if total <= b {
|
||||
return b;
|
||||
}
|
||||
}
|
||||
// Larger than biggest bucket: round up to nearest 16384-byte multiple.
|
||||
total.div_ceil(16384) * 16384
|
||||
}
|
||||
|
||||
/// Pad a payload to the next bucket boundary with cryptographic random bytes.
|
||||
pub fn pad(payload: &[u8]) -> Vec<u8> {
|
||||
let bucket = bucket_for(payload.len());
|
||||
let mut out = Vec::with_capacity(bucket);
|
||||
out.extend_from_slice(&(payload.len() as u32).to_le_bytes());
|
||||
out.extend_from_slice(payload);
|
||||
let pad_len = bucket - 4 - payload.len();
|
||||
if pad_len > 0 {
|
||||
let mut padding = vec![0u8; pad_len];
|
||||
rand::rngs::OsRng.fill_bytes(&mut padding);
|
||||
out.extend_from_slice(&padding);
|
||||
}
|
||||
out
|
||||
}
|
||||
|
||||
/// Remove padding and return the original payload.
|
||||
pub fn unpad(padded: &[u8]) -> Result<Vec<u8>, CoreError> {
|
||||
if padded.len() < 4 {
|
||||
return Err(CoreError::AppMessage("padded message too short".into()));
|
||||
}
|
||||
let real_len = u32::from_le_bytes([padded[0], padded[1], padded[2], padded[3]]) as usize;
|
||||
if 4 + real_len > padded.len() {
|
||||
return Err(CoreError::AppMessage(
|
||||
"padded real_length exceeds buffer".into(),
|
||||
));
|
||||
}
|
||||
Ok(padded[4..4 + real_len].to_vec())
|
||||
}
|
||||
|
||||
/// Pad a payload to the nearest multiple of `boundary` bytes.
|
||||
///
|
||||
/// Uses the same wire format as [`pad`]: `[real_length: 4 bytes LE][payload][random padding]`.
|
||||
/// The total output length is always a multiple of `boundary`. A `boundary` of 0 is
|
||||
/// treated as [`DEFAULT_PADDING_BOUNDARY`].
|
||||
pub fn pad_uniform(payload: &[u8], boundary: usize) -> Vec<u8> {
|
||||
let boundary = if boundary == 0 { DEFAULT_PADDING_BOUNDARY } else { boundary };
|
||||
let total = payload.len() + 4; // 4-byte length prefix
|
||||
let padded_len = total.div_ceil(boundary) * boundary;
|
||||
|
||||
let mut out = Vec::with_capacity(padded_len);
|
||||
out.extend_from_slice(&(payload.len() as u32).to_le_bytes());
|
||||
out.extend_from_slice(payload);
|
||||
let pad_len = padded_len - total;
|
||||
if pad_len > 0 {
|
||||
let mut padding = vec![0u8; pad_len];
|
||||
rand::rngs::OsRng.fill_bytes(&mut padding);
|
||||
out.extend_from_slice(&padding);
|
||||
}
|
||||
out
|
||||
}
|
||||
|
||||
/// Remove uniform padding. Wire format is identical to [`unpad`].
|
||||
pub fn unpad_uniform(padded: &[u8]) -> Result<Vec<u8>, CoreError> {
|
||||
unpad(padded)
|
||||
}
|
||||
|
||||
/// Generate a decoy payload that looks identical to a real padded message.
|
||||
///
|
||||
/// Returns random bytes of length equal to a `boundary`-aligned padded message.
|
||||
/// The 4-byte length prefix is set to 0, so [`unpad_uniform`] returns an empty payload.
|
||||
pub fn generate_decoy(boundary: usize) -> Vec<u8> {
|
||||
let boundary = if boundary == 0 { DEFAULT_PADDING_BOUNDARY } else { boundary };
|
||||
let mut out = vec![0u8; boundary];
|
||||
// Length prefix = 0 (decoy carries no real payload).
|
||||
// Fill the rest with random bytes so it is indistinguishable from padding.
|
||||
rand::rngs::OsRng.fill_bytes(&mut out[4..]);
|
||||
out
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
#[allow(clippy::unwrap_used)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn round_trip_small() {
|
||||
let msg = b"hello";
|
||||
let padded = pad(msg);
|
||||
assert_eq!(padded.len(), 256); // smallest bucket
|
||||
let unpadded = unpad(&padded).unwrap();
|
||||
assert_eq!(unpadded, msg);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn round_trip_medium() {
|
||||
let msg = vec![0xAB; 300];
|
||||
let padded = pad(&msg);
|
||||
assert_eq!(padded.len(), 1024); // second bucket
|
||||
let unpadded = unpad(&padded).unwrap();
|
||||
assert_eq!(unpadded, msg);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn round_trip_large() {
|
||||
let msg = vec![0xCD; 2000];
|
||||
let padded = pad(&msg);
|
||||
assert_eq!(padded.len(), 4096); // third bucket
|
||||
let unpadded = unpad(&padded).unwrap();
|
||||
assert_eq!(unpadded, msg);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn round_trip_very_large() {
|
||||
let msg = vec![0xEF; 10000];
|
||||
let padded = pad(&msg);
|
||||
assert_eq!(padded.len(), 16384); // largest bucket
|
||||
let unpadded = unpad(&padded).unwrap();
|
||||
assert_eq!(unpadded, msg);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn round_trip_oversized() {
|
||||
let msg = vec![0xFF; 20000];
|
||||
let padded = pad(&msg);
|
||||
assert_eq!(padded.len(), 32768); // 2 * 16384
|
||||
let unpadded = unpad(&padded).unwrap();
|
||||
assert_eq!(unpadded, msg);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn round_trip_empty() {
|
||||
let msg = b"";
|
||||
let padded = pad(msg);
|
||||
assert_eq!(padded.len(), 256); // smallest bucket
|
||||
let unpadded = unpad(&padded).unwrap();
|
||||
assert_eq!(unpadded, msg);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn exactly_at_bucket_boundary() {
|
||||
// 252 + 4 = 256 → fits in 256 bucket exactly
|
||||
let msg = vec![0x42; 252];
|
||||
let padded = pad(&msg);
|
||||
assert_eq!(padded.len(), 256);
|
||||
let unpadded = unpad(&padded).unwrap();
|
||||
assert_eq!(unpadded, msg);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn unpad_too_short_fails() {
|
||||
assert!(unpad(&[0, 0]).is_err());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn unpad_invalid_length_fails() {
|
||||
// Claims 1000 bytes but only has 10
|
||||
let mut bad = (1000u32).to_le_bytes().to_vec();
|
||||
bad.extend_from_slice(&[0u8; 10]);
|
||||
assert!(unpad(&bad).is_err());
|
||||
}
|
||||
|
||||
// ── Uniform padding tests ──────────────────────────────────────────────
|
||||
|
||||
#[test]
|
||||
fn uniform_round_trip_default_boundary() {
|
||||
let msg = b"uniform padding test";
|
||||
let padded = pad_uniform(msg, DEFAULT_PADDING_BOUNDARY);
|
||||
assert_eq!(padded.len() % DEFAULT_PADDING_BOUNDARY, 0);
|
||||
assert_eq!(padded.len(), 256); // 20 + 4 = 24, rounds up to 256
|
||||
let unpadded = unpad_uniform(&padded).unwrap();
|
||||
assert_eq!(unpadded, msg);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn uniform_custom_boundary() {
|
||||
let msg = vec![0xAA; 100];
|
||||
let padded = pad_uniform(&msg, 128);
|
||||
assert_eq!(padded.len() % 128, 0);
|
||||
assert_eq!(padded.len(), 128); // 100 + 4 = 104, rounds up to 128
|
||||
let unpadded = unpad_uniform(&padded).unwrap();
|
||||
assert_eq!(unpadded, msg);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn uniform_exact_boundary() {
|
||||
// 252 + 4 = 256, exactly on boundary
|
||||
let msg = vec![0xBB; 252];
|
||||
let padded = pad_uniform(&msg, 256);
|
||||
assert_eq!(padded.len(), 256);
|
||||
let unpadded = unpad_uniform(&padded).unwrap();
|
||||
assert_eq!(unpadded, msg);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn uniform_one_over_boundary() {
|
||||
// 253 + 4 = 257, rounds up to 512
|
||||
let msg = vec![0xCC; 253];
|
||||
let padded = pad_uniform(&msg, 256);
|
||||
assert_eq!(padded.len(), 512);
|
||||
let unpadded = unpad_uniform(&padded).unwrap();
|
||||
assert_eq!(unpadded, msg);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn uniform_zero_boundary_uses_default() {
|
||||
let msg = b"zero boundary";
|
||||
let padded = pad_uniform(msg, 0);
|
||||
assert_eq!(padded.len() % DEFAULT_PADDING_BOUNDARY, 0);
|
||||
let unpadded = unpad_uniform(&padded).unwrap();
|
||||
assert_eq!(unpadded, msg);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn decoy_is_boundary_aligned() {
|
||||
let decoy = generate_decoy(256);
|
||||
assert_eq!(decoy.len(), 256);
|
||||
assert_eq!(decoy.len() % 256, 0);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn decoy_unpads_to_empty() {
|
||||
let decoy = generate_decoy(256);
|
||||
let payload = unpad_uniform(&decoy).unwrap();
|
||||
assert!(payload.is_empty());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn decoy_default_boundary() {
|
||||
let decoy = generate_decoy(0);
|
||||
assert_eq!(decoy.len(), DEFAULT_PADDING_BOUNDARY);
|
||||
}
|
||||
}
|
||||
689
crates/quicprochat-core/src/pq_noise.rs
Normal file
689
crates/quicprochat-core/src/pq_noise.rs
Normal file
@@ -0,0 +1,689 @@
|
||||
//! Hybrid Noise_XX + ML-KEM-768 handshake for post-quantum transport security.
|
||||
//!
|
||||
//! Implements a three-message Noise_XX pattern with an embedded ML-KEM-768
|
||||
//! encapsulation to produce a hybrid shared secret that is secure against
|
||||
//! both classical and quantum adversaries.
|
||||
//!
|
||||
//! # Handshake pattern
|
||||
//!
|
||||
//! ```text
|
||||
//! XX(s, rs):
|
||||
//! -> e (initiator ephemeral)
|
||||
//! <- e, ee, s, es, mlkem_ct (responder ephemeral + static + ML-KEM ciphertext)
|
||||
//! -> s, se (initiator static)
|
||||
//! ```
|
||||
//!
|
||||
//! After message 2, the ML-KEM shared secret is mixed into the chaining key
|
||||
//! via HKDF. The final transport keys incorporate both the X25519 DH chain
|
||||
//! and the ML-KEM shared secret.
|
||||
//!
|
||||
//! # Wire format
|
||||
//!
|
||||
//! Each handshake message is a simple length-prefixed blob:
|
||||
//! ```text
|
||||
//! [msg_len: u32 BE][handshake message bytes]
|
||||
//! ```
|
||||
//!
|
||||
//! # Feature gate
|
||||
//!
|
||||
//! This module is always compiled but the `pq-noise` feature enables it
|
||||
//! in the RPC layer for server/client negotiation.
|
||||
|
||||
use chacha20poly1305::{
|
||||
aead::{Aead, KeyInit, Payload},
|
||||
ChaCha20Poly1305, Key, Nonce,
|
||||
};
|
||||
use hkdf::Hkdf;
|
||||
use ml_kem::{
|
||||
array::Array,
|
||||
kem::{Decapsulate, Encapsulate},
|
||||
EncodedSizeUser, KemCore, MlKem768, MlKem768Params,
|
||||
};
|
||||
use ml_kem::kem::{DecapsulationKey, EncapsulationKey};
|
||||
use rand::rngs::OsRng;
|
||||
use sha2::Sha256;
|
||||
use x25519_dalek::{PublicKey as X25519Public, StaticSecret};
|
||||
use zeroize::Zeroizing;
|
||||
|
||||
use crate::error::CoreError;
|
||||
|
||||
/// Domain separation label for the hybrid Noise handshake.
|
||||
const PROTOCOL_NAME: &[u8] = b"quicprochat-pq-noise-v1";
|
||||
|
||||
/// ML-KEM-768 encapsulation key length.
|
||||
const MLKEM_EK_LEN: usize = 1184;
|
||||
|
||||
/// ML-KEM-768 ciphertext length.
|
||||
const MLKEM_CT_LEN: usize = 1088;
|
||||
|
||||
/// AEAD tag length (ChaCha20-Poly1305).
|
||||
const TAG_LEN: usize = 16;
|
||||
|
||||
// ── Keypair ──────────────────────────────────────────────────────────────────
|
||||
|
||||
/// A static keypair for the hybrid Noise handshake.
|
||||
///
|
||||
/// Contains both an X25519 static key and an ML-KEM-768 key pair.
|
||||
pub struct NoiseKeypair {
|
||||
x25519_sk: StaticSecret,
|
||||
x25519_pk: X25519Public,
|
||||
mlkem_dk: DecapsulationKey<MlKem768Params>,
|
||||
mlkem_ek: EncapsulationKey<MlKem768Params>,
|
||||
}
|
||||
|
||||
impl NoiseKeypair {
|
||||
/// Generate a fresh keypair from OS CSPRNG.
|
||||
pub fn generate() -> Self {
|
||||
let x25519_sk = StaticSecret::random_from_rng(OsRng);
|
||||
let x25519_pk = X25519Public::from(&x25519_sk);
|
||||
let (mlkem_dk, mlkem_ek) = MlKem768::generate(&mut OsRng);
|
||||
Self {
|
||||
x25519_sk,
|
||||
x25519_pk,
|
||||
mlkem_dk,
|
||||
mlkem_ek,
|
||||
}
|
||||
}
|
||||
|
||||
/// Return the X25519 public key bytes.
|
||||
pub fn x25519_public(&self) -> [u8; 32] {
|
||||
self.x25519_pk.to_bytes()
|
||||
}
|
||||
|
||||
/// Return the ML-KEM-768 encapsulation key bytes.
|
||||
pub fn mlkem_public(&self) -> Vec<u8> {
|
||||
self.mlkem_ek.as_bytes().to_vec()
|
||||
}
|
||||
}
|
||||
|
||||
// ── Chaining key state ───────────────────────────────────────────────────────
|
||||
|
||||
/// Internal handshake state tracking the Noise chaining key and handshake hash.
|
||||
struct HandshakeState {
|
||||
/// Chaining key — evolved by each MixKey operation.
|
||||
ck: Zeroizing<[u8; 32]>,
|
||||
/// Handshake hash — commits to all handshake transcript data.
|
||||
h: [u8; 32],
|
||||
/// Current encryption key (derived from ck after MixKey).
|
||||
k: Option<Zeroizing<[u8; 32]>>,
|
||||
/// Nonce counter for in-handshake encryption.
|
||||
n: u64,
|
||||
}
|
||||
|
||||
impl HandshakeState {
|
||||
fn new() -> Self {
|
||||
// Initialize h = SHA-256(protocol_name), ck = h.
|
||||
use sha2::{Digest, Sha256};
|
||||
let h: [u8; 32] = Sha256::digest(PROTOCOL_NAME).into();
|
||||
Self {
|
||||
ck: Zeroizing::new(h),
|
||||
h,
|
||||
k: None,
|
||||
n: 0,
|
||||
}
|
||||
}
|
||||
|
||||
/// MixHash: h = SHA-256(h || data)
|
||||
fn mix_hash(&mut self, data: &[u8]) {
|
||||
use sha2::{Digest, Sha256};
|
||||
let mut hasher = Sha256::new();
|
||||
hasher.update(self.h);
|
||||
hasher.update(data);
|
||||
self.h = hasher.finalize().into();
|
||||
}
|
||||
|
||||
/// MixKey: (ck, k) = HKDF(ck, input_key_material)
|
||||
fn mix_key(&mut self, ikm: &[u8]) {
|
||||
let hk = Hkdf::<Sha256>::new(Some(&*self.ck), ikm);
|
||||
let mut ck = Zeroizing::new([0u8; 32]);
|
||||
let mut k = Zeroizing::new([0u8; 32]);
|
||||
hk.expand(b"ck", &mut *ck)
|
||||
.expect("32 bytes is valid HKDF output");
|
||||
hk.expand(b"k", &mut *k)
|
||||
.expect("32 bytes is valid HKDF output");
|
||||
self.ck = ck;
|
||||
self.k = Some(k);
|
||||
self.n = 0;
|
||||
}
|
||||
|
||||
/// Encrypt plaintext with the current key and nonce, using h as AAD.
|
||||
fn encrypt_and_hash(&mut self, plaintext: &[u8]) -> Result<Vec<u8>, CoreError> {
|
||||
let key = self
|
||||
.k
|
||||
.as_ref()
|
||||
.ok_or_else(|| CoreError::Mls("pq_noise: no encryption key set".into()))?;
|
||||
let cipher = ChaCha20Poly1305::new(Key::from_slice(&**key));
|
||||
let nonce = nonce_from_counter(self.n);
|
||||
let ct = cipher
|
||||
.encrypt(
|
||||
Nonce::from_slice(&nonce),
|
||||
Payload {
|
||||
msg: plaintext,
|
||||
aad: &self.h,
|
||||
},
|
||||
)
|
||||
.map_err(|_| CoreError::Mls("pq_noise: encrypt failed".into()))?;
|
||||
self.mix_hash(&ct);
|
||||
self.n += 1;
|
||||
Ok(ct)
|
||||
}
|
||||
|
||||
/// Decrypt ciphertext with the current key and nonce, using h as AAD.
|
||||
fn decrypt_and_hash(&mut self, ciphertext: &[u8]) -> Result<Vec<u8>, CoreError> {
|
||||
let key = self
|
||||
.k
|
||||
.as_ref()
|
||||
.ok_or_else(|| CoreError::Mls("pq_noise: no decryption key set".into()))?;
|
||||
let cipher = ChaCha20Poly1305::new(Key::from_slice(&**key));
|
||||
let nonce = nonce_from_counter(self.n);
|
||||
let ct_for_hash = ciphertext.to_vec();
|
||||
let pt = cipher
|
||||
.decrypt(
|
||||
Nonce::from_slice(&nonce),
|
||||
Payload {
|
||||
msg: ciphertext,
|
||||
aad: &self.h,
|
||||
},
|
||||
)
|
||||
.map_err(|_| CoreError::Mls("pq_noise: decrypt failed".into()))?;
|
||||
self.mix_hash(&ct_for_hash);
|
||||
self.n += 1;
|
||||
Ok(pt)
|
||||
}
|
||||
|
||||
/// Split the handshake state into two transport keys (initiator->responder, responder->initiator).
|
||||
fn split(&self) -> (TransportKey, TransportKey) {
|
||||
let hk = Hkdf::<Sha256>::new(Some(&*self.ck), &[]);
|
||||
let mut k1 = Zeroizing::new([0u8; 32]);
|
||||
let mut k2 = Zeroizing::new([0u8; 32]);
|
||||
hk.expand(b"initiator", &mut *k1)
|
||||
.expect("32 bytes is valid HKDF output");
|
||||
hk.expand(b"responder", &mut *k2)
|
||||
.expect("32 bytes is valid HKDF output");
|
||||
(
|
||||
TransportKey { key: k1, nonce: 0 },
|
||||
TransportKey { key: k2, nonce: 0 },
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
fn nonce_from_counter(n: u64) -> [u8; 12] {
|
||||
let mut nonce = [0u8; 12];
|
||||
nonce[4..].copy_from_slice(&n.to_le_bytes());
|
||||
nonce
|
||||
}
|
||||
|
||||
// ── Transport ────────────────────────────────────────────────────────────────
|
||||
|
||||
/// A transport encryption key with a nonce counter.
|
||||
pub struct TransportKey {
|
||||
key: Zeroizing<[u8; 32]>,
|
||||
nonce: u64,
|
||||
}
|
||||
|
||||
impl TransportKey {
|
||||
/// Encrypt a message for transport.
|
||||
pub fn encrypt(&mut self, plaintext: &[u8]) -> Result<Vec<u8>, CoreError> {
|
||||
let cipher = ChaCha20Poly1305::new(Key::from_slice(&*self.key));
|
||||
let nonce = nonce_from_counter(self.nonce);
|
||||
let ct = cipher
|
||||
.encrypt(Nonce::from_slice(&nonce), plaintext)
|
||||
.map_err(|_| CoreError::Mls("pq_noise transport: encrypt failed".into()))?;
|
||||
self.nonce += 1;
|
||||
Ok(ct)
|
||||
}
|
||||
|
||||
/// Decrypt a transport message.
|
||||
pub fn decrypt(&mut self, ciphertext: &[u8]) -> Result<Vec<u8>, CoreError> {
|
||||
let cipher = ChaCha20Poly1305::new(Key::from_slice(&*self.key));
|
||||
let nonce = nonce_from_counter(self.nonce);
|
||||
let pt = cipher
|
||||
.decrypt(Nonce::from_slice(&nonce), ciphertext)
|
||||
.map_err(|_| CoreError::Mls("pq_noise transport: decrypt failed".into()))?;
|
||||
self.nonce += 1;
|
||||
Ok(pt)
|
||||
}
|
||||
}
|
||||
|
||||
// ── Initiator ────────────────────────────────────────────────────────────────
|
||||
|
||||
/// Initiator side of the hybrid Noise_XX handshake.
|
||||
pub struct Initiator {
|
||||
state: HandshakeState,
|
||||
/// Ephemeral secret stored as StaticSecret so DH doesn't consume it.
|
||||
/// Generated from OsRng; we use StaticSecret purely for the non-consuming
|
||||
/// `diffie_hellman(&self, ...)` API — the key is still ephemeral.
|
||||
e_sk: StaticSecret,
|
||||
e_pk: X25519Public,
|
||||
s: NoiseKeypair,
|
||||
/// Stored after reading message 2 so we can compute se = DH(s, re) in msg3.
|
||||
re_pk: Option<X25519Public>,
|
||||
}
|
||||
|
||||
impl Initiator {
|
||||
/// Create a new initiator with the given static keypair.
|
||||
pub fn new(static_keypair: NoiseKeypair) -> Self {
|
||||
let e_sk = StaticSecret::random_from_rng(OsRng);
|
||||
let e_pk = X25519Public::from(&e_sk);
|
||||
Self {
|
||||
state: HandshakeState::new(),
|
||||
e_sk,
|
||||
e_pk,
|
||||
s: static_keypair,
|
||||
re_pk: None,
|
||||
}
|
||||
}
|
||||
|
||||
/// Write message 1: `-> e`
|
||||
///
|
||||
/// Returns the initiator's ephemeral X25519 public key (32 bytes).
|
||||
pub fn write_message_1(&mut self) -> Vec<u8> {
|
||||
let e_pk_bytes = self.e_pk.to_bytes();
|
||||
self.state.mix_hash(&e_pk_bytes);
|
||||
e_pk_bytes.to_vec()
|
||||
}
|
||||
|
||||
/// Read message 2 from responder: `<- e, ee, s, es, mlkem_ct`
|
||||
///
|
||||
/// Expects: `re_pk(32) || encrypted_rs_pk(32+TAG) || mlkem_ct(1088)`
|
||||
///
|
||||
/// Returns the responder's static X25519 public key.
|
||||
pub fn read_message_2(&mut self, msg: &[u8]) -> Result<[u8; 32], CoreError> {
|
||||
let expected_len = 32 + 32 + TAG_LEN + MLKEM_CT_LEN;
|
||||
if msg.len() != expected_len {
|
||||
return Err(CoreError::Mls(format!(
|
||||
"pq_noise msg2: expected {expected_len} bytes, got {}",
|
||||
msg.len()
|
||||
)));
|
||||
}
|
||||
|
||||
let mut cursor = 0;
|
||||
|
||||
// re = responder ephemeral public key
|
||||
let mut re_pk_bytes = [0u8; 32];
|
||||
re_pk_bytes.copy_from_slice(&msg[cursor..cursor + 32]);
|
||||
cursor += 32;
|
||||
let re_pk = X25519Public::from(re_pk_bytes);
|
||||
self.state.mix_hash(&re_pk_bytes);
|
||||
self.re_pk = Some(re_pk);
|
||||
|
||||
// ee = DH(e, re)
|
||||
let ee_ss = self.e_sk.diffie_hellman(&re_pk);
|
||||
self.state.mix_key(ee_ss.as_bytes());
|
||||
|
||||
// Decrypt responder's static key: s = Dec(encrypted_rs_pk)
|
||||
let encrypted_rs = &msg[cursor..cursor + 32 + TAG_LEN];
|
||||
cursor += 32 + TAG_LEN;
|
||||
let rs_pk_bytes = self.state.decrypt_and_hash(encrypted_rs)?;
|
||||
let mut rs_pk_arr = [0u8; 32];
|
||||
if rs_pk_bytes.len() != 32 {
|
||||
return Err(CoreError::Mls("pq_noise: decrypted rs not 32 bytes".into()));
|
||||
}
|
||||
rs_pk_arr.copy_from_slice(&rs_pk_bytes);
|
||||
let rs_pk = X25519Public::from(rs_pk_arr);
|
||||
|
||||
// es = DH(e, rs)
|
||||
let es_ss = self.e_sk.diffie_hellman(&rs_pk);
|
||||
self.state.mix_key(es_ss.as_bytes());
|
||||
|
||||
// ML-KEM: decapsulate the ciphertext from the responder
|
||||
let mlkem_ct = &msg[cursor..cursor + MLKEM_CT_LEN];
|
||||
let mlkem_ct_arr = Array::try_from(mlkem_ct)
|
||||
.map_err(|_| CoreError::Mls("pq_noise: invalid ML-KEM ciphertext".into()))?;
|
||||
let mlkem_ss: ml_kem::SharedKey<MlKem768> = self
|
||||
.s
|
||||
.mlkem_dk
|
||||
.decapsulate(&mlkem_ct_arr)
|
||||
.map_err(|_| CoreError::Mls("pq_noise: ML-KEM decapsulation failed".into()))?;
|
||||
self.state.mix_key(&mlkem_ss);
|
||||
|
||||
Ok(rs_pk_arr)
|
||||
}
|
||||
|
||||
/// Write message 3: `-> s, se`
|
||||
///
|
||||
/// Returns the encrypted initiator static key.
|
||||
pub fn write_message_3(&mut self) -> Result<Vec<u8>, CoreError> {
|
||||
let re_pk = self
|
||||
.re_pk
|
||||
.ok_or_else(|| CoreError::Mls("pq_noise: must read msg2 before writing msg3".into()))?;
|
||||
|
||||
// Encrypt our static key
|
||||
let s_pk_bytes = self.s.x25519_pk.to_bytes();
|
||||
let encrypted_s = self.state.encrypt_and_hash(&s_pk_bytes)?;
|
||||
|
||||
// se = DH(s, re)
|
||||
let se_ss = self.s.x25519_sk.diffie_hellman(&re_pk);
|
||||
self.state.mix_key(se_ss.as_bytes());
|
||||
|
||||
Ok(encrypted_s)
|
||||
}
|
||||
|
||||
/// Finalize the handshake and return transport keys.
|
||||
///
|
||||
/// Returns `(send_key, recv_key)` — initiator sends with send_key.
|
||||
pub fn finalize(self) -> (TransportKey, TransportKey) {
|
||||
self.state.split()
|
||||
}
|
||||
}
|
||||
|
||||
// ── Responder ────────────────────────────────────────────────────────────────
|
||||
|
||||
/// Responder side of the hybrid Noise_XX handshake.
|
||||
pub struct Responder {
|
||||
state: HandshakeState,
|
||||
/// Ephemeral secret stored as StaticSecret so DH doesn't consume it.
|
||||
e_sk: StaticSecret,
|
||||
e_pk: X25519Public,
|
||||
s: NoiseKeypair,
|
||||
}
|
||||
|
||||
impl Responder {
|
||||
/// Create a new responder with the given static keypair.
|
||||
pub fn new(static_keypair: NoiseKeypair) -> Self {
|
||||
let e_sk = StaticSecret::random_from_rng(OsRng);
|
||||
let e_pk = X25519Public::from(&e_sk);
|
||||
Self {
|
||||
state: HandshakeState::new(),
|
||||
e_sk,
|
||||
e_pk,
|
||||
s: static_keypair,
|
||||
}
|
||||
}
|
||||
|
||||
/// Read message 1 from initiator: `-> e`
|
||||
///
|
||||
/// Expects the initiator's ephemeral X25519 public key (32 bytes).
|
||||
pub fn read_message_1(&mut self, msg: &[u8]) -> Result<(), CoreError> {
|
||||
if msg.len() != 32 {
|
||||
return Err(CoreError::Mls(format!(
|
||||
"pq_noise msg1: expected 32 bytes, got {}",
|
||||
msg.len()
|
||||
)));
|
||||
}
|
||||
self.state.mix_hash(msg);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Write message 2: `<- e, ee, s, es, mlkem_ct`
|
||||
///
|
||||
/// `initiator_ek` is the initiator's ML-KEM encapsulation key.
|
||||
///
|
||||
/// Returns the message bytes.
|
||||
pub fn write_message_2(
|
||||
&mut self,
|
||||
initiator_e_pk: &[u8; 32],
|
||||
initiator_mlkem_ek: &[u8],
|
||||
) -> Result<Vec<u8>, CoreError> {
|
||||
let ie_pk = X25519Public::from(*initiator_e_pk);
|
||||
|
||||
// Our ephemeral key
|
||||
let e_pk_bytes = self.e_pk.to_bytes();
|
||||
self.state.mix_hash(&e_pk_bytes);
|
||||
|
||||
// ee = DH(e, ie)
|
||||
let ee_ss = self.e_sk.diffie_hellman(&ie_pk);
|
||||
self.state.mix_key(ee_ss.as_bytes());
|
||||
|
||||
// Encrypt our static key
|
||||
let s_pk_bytes = self.s.x25519_pk.to_bytes();
|
||||
let encrypted_s = self.state.encrypt_and_hash(&s_pk_bytes)?;
|
||||
|
||||
// es = DH(s, ie)
|
||||
let es_ss = self.s.x25519_sk.diffie_hellman(&ie_pk);
|
||||
self.state.mix_key(es_ss.as_bytes());
|
||||
|
||||
// ML-KEM: encapsulate to the initiator's encapsulation key
|
||||
if initiator_mlkem_ek.len() != MLKEM_EK_LEN {
|
||||
return Err(CoreError::Mls(format!(
|
||||
"pq_noise: expected ML-KEM EK {} bytes, got {}",
|
||||
MLKEM_EK_LEN,
|
||||
initiator_mlkem_ek.len()
|
||||
)));
|
||||
}
|
||||
let ek_arr = Array::try_from(initiator_mlkem_ek)
|
||||
.map_err(|_| CoreError::Mls("pq_noise: invalid ML-KEM encapsulation key".into()))?;
|
||||
let ek = EncapsulationKey::<MlKem768Params>::from_bytes(&ek_arr);
|
||||
let (mlkem_ct, mlkem_ss): (ml_kem::Ciphertext<MlKem768>, ml_kem::SharedKey<MlKem768>) = ek
|
||||
.encapsulate(&mut OsRng)
|
||||
.map_err(|_| CoreError::Mls("pq_noise: ML-KEM encapsulation failed".into()))?;
|
||||
self.state.mix_key(&mlkem_ss);
|
||||
|
||||
// Assemble: e_pk || encrypted_s || mlkem_ct
|
||||
let mut out = Vec::with_capacity(32 + encrypted_s.len() + MLKEM_CT_LEN);
|
||||
out.extend_from_slice(&e_pk_bytes);
|
||||
out.extend_from_slice(&encrypted_s);
|
||||
out.extend_from_slice(&mlkem_ct);
|
||||
Ok(out)
|
||||
}
|
||||
|
||||
/// Read message 3 from initiator: `-> s, se`
|
||||
///
|
||||
/// Returns the initiator's static X25519 public key.
|
||||
pub fn read_message_3(&mut self, msg: &[u8]) -> Result<[u8; 32], CoreError> {
|
||||
if msg.len() != 32 + TAG_LEN {
|
||||
return Err(CoreError::Mls(format!(
|
||||
"pq_noise msg3: expected {} bytes, got {}",
|
||||
32 + TAG_LEN,
|
||||
msg.len()
|
||||
)));
|
||||
}
|
||||
|
||||
// Decrypt initiator's static key
|
||||
let is_pk_bytes = self.state.decrypt_and_hash(msg)?;
|
||||
let mut is_pk_arr = [0u8; 32];
|
||||
if is_pk_bytes.len() != 32 {
|
||||
return Err(CoreError::Mls(
|
||||
"pq_noise: decrypted initiator static not 32 bytes".into(),
|
||||
));
|
||||
}
|
||||
is_pk_arr.copy_from_slice(&is_pk_bytes);
|
||||
let is_pk = X25519Public::from(is_pk_arr);
|
||||
|
||||
// se = DH(e, is) — responder computes using ephemeral key
|
||||
let se_ss = self.e_sk.diffie_hellman(&is_pk);
|
||||
self.state.mix_key(se_ss.as_bytes());
|
||||
|
||||
Ok(is_pk_arr)
|
||||
}
|
||||
|
||||
/// Finalize the handshake and return transport keys.
|
||||
///
|
||||
/// Returns `(recv_key, send_key)` — responder receives with recv_key.
|
||||
pub fn finalize(self) -> (TransportKey, TransportKey) {
|
||||
let (i2r, r2i) = self.state.split();
|
||||
(i2r, r2i)
|
||||
}
|
||||
}
|
||||
|
||||
// ── Tests ────────────────────────────────────────────────────────────────────
|
||||
|
||||
#[cfg(test)]
|
||||
#[allow(clippy::unwrap_used)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn full_handshake_round_trip() {
|
||||
let initiator_kp = NoiseKeypair::generate();
|
||||
let responder_kp = NoiseKeypair::generate();
|
||||
|
||||
// Initiator's ML-KEM public key is sent out-of-band (or in a pre-message).
|
||||
let initiator_mlkem_ek = initiator_kp.mlkem_public();
|
||||
|
||||
let mut initiator = Initiator::new(initiator_kp);
|
||||
let mut responder = Responder::new(responder_kp);
|
||||
|
||||
// Message 1: initiator -> responder
|
||||
let msg1 = initiator.write_message_1();
|
||||
assert_eq!(msg1.len(), 32);
|
||||
responder.read_message_1(&msg1).unwrap();
|
||||
|
||||
// Message 2: responder -> initiator
|
||||
let ie_pk: [u8; 32] = msg1.as_slice().try_into().unwrap();
|
||||
let msg2 = responder
|
||||
.write_message_2(&ie_pk, &initiator_mlkem_ek)
|
||||
.unwrap();
|
||||
let _responder_static = initiator.read_message_2(&msg2).unwrap();
|
||||
|
||||
// Message 3: initiator -> responder
|
||||
let msg3 = initiator.write_message_3().unwrap();
|
||||
let _initiator_static = responder.read_message_3(&msg3).unwrap();
|
||||
|
||||
// Derive transport keys
|
||||
let (mut i_send, mut i_recv) = initiator.finalize();
|
||||
let (mut r_recv, mut r_send) = responder.finalize();
|
||||
|
||||
// Test transport: initiator -> responder
|
||||
let plaintext = b"hello post-quantum world!";
|
||||
let ct = i_send.encrypt(plaintext).unwrap();
|
||||
let pt = r_recv.decrypt(&ct).unwrap();
|
||||
assert_eq!(pt, plaintext);
|
||||
|
||||
// Test transport: responder -> initiator
|
||||
let plaintext2 = b"reply from responder";
|
||||
let ct2 = r_send.encrypt(plaintext2).unwrap();
|
||||
let pt2 = i_recv.decrypt(&ct2).unwrap();
|
||||
assert_eq!(pt2, plaintext2);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn tampered_msg2_fails() {
|
||||
let initiator_kp = NoiseKeypair::generate();
|
||||
let responder_kp = NoiseKeypair::generate();
|
||||
let initiator_mlkem_ek = initiator_kp.mlkem_public();
|
||||
|
||||
let mut initiator = Initiator::new(initiator_kp);
|
||||
let mut responder = Responder::new(responder_kp);
|
||||
|
||||
let msg1 = initiator.write_message_1();
|
||||
responder.read_message_1(&msg1).unwrap();
|
||||
|
||||
let ie_pk: [u8; 32] = msg1.as_slice().try_into().unwrap();
|
||||
let mut msg2 = responder
|
||||
.write_message_2(&ie_pk, &initiator_mlkem_ek)
|
||||
.unwrap();
|
||||
|
||||
// Tamper with the encrypted static key region
|
||||
msg2[40] ^= 0xFF;
|
||||
|
||||
let result = initiator.read_message_2(&msg2);
|
||||
assert!(result.is_err());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn wrong_mlkem_key_fails() {
|
||||
let initiator_kp = NoiseKeypair::generate();
|
||||
let responder_kp = NoiseKeypair::generate();
|
||||
|
||||
// Use a different keypair's ML-KEM key — decapsulation will use
|
||||
// implicit rejection, producing a pseudorandom (wrong) shared secret.
|
||||
let wrong_kp = NoiseKeypair::generate();
|
||||
let wrong_mlkem_ek = wrong_kp.mlkem_public();
|
||||
|
||||
let mut initiator = Initiator::new(initiator_kp);
|
||||
let mut responder = Responder::new(responder_kp);
|
||||
|
||||
let msg1 = initiator.write_message_1();
|
||||
responder.read_message_1(&msg1).unwrap();
|
||||
|
||||
let ie_pk: [u8; 32] = msg1.as_slice().try_into().unwrap();
|
||||
let msg2 = responder
|
||||
.write_message_2(&ie_pk, &wrong_mlkem_ek)
|
||||
.unwrap();
|
||||
|
||||
// ML-KEM implicit rejection: decap succeeds but returns wrong ss.
|
||||
// The ML-KEM mix_key happens after the AEAD decrypt of the static key,
|
||||
// so read_message_2 itself may succeed. But the chaining keys diverge,
|
||||
// causing msg3 AEAD decrypt to fail on the responder side.
|
||||
let read2 = initiator.read_message_2(&msg2);
|
||||
if read2.is_err() {
|
||||
// If msg2 processing itself failed, the test passes.
|
||||
return;
|
||||
}
|
||||
|
||||
// msg2 succeeded — chaining keys now diverge due to wrong ML-KEM ss.
|
||||
// msg3 from initiator will use the wrong key, so responder can't decrypt.
|
||||
let msg3 = initiator.write_message_3().unwrap();
|
||||
let result = responder.read_message_3(&msg3);
|
||||
assert!(result.is_err(), "msg3 should fail due to ML-KEM shared secret mismatch");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn multiple_transport_messages() {
|
||||
let initiator_kp = NoiseKeypair::generate();
|
||||
let responder_kp = NoiseKeypair::generate();
|
||||
let initiator_mlkem_ek = initiator_kp.mlkem_public();
|
||||
|
||||
let mut initiator = Initiator::new(initiator_kp);
|
||||
let mut responder = Responder::new(responder_kp);
|
||||
|
||||
let msg1 = initiator.write_message_1();
|
||||
responder.read_message_1(&msg1).unwrap();
|
||||
|
||||
let ie_pk: [u8; 32] = msg1.as_slice().try_into().unwrap();
|
||||
let msg2 = responder
|
||||
.write_message_2(&ie_pk, &initiator_mlkem_ek)
|
||||
.unwrap();
|
||||
initiator.read_message_2(&msg2).unwrap();
|
||||
|
||||
let msg3 = initiator.write_message_3().unwrap();
|
||||
responder.read_message_3(&msg3).unwrap();
|
||||
|
||||
let (mut i_send, mut i_recv) = initiator.finalize();
|
||||
let (mut r_recv, mut r_send) = responder.finalize();
|
||||
|
||||
// Send multiple messages in each direction
|
||||
for i in 0..10u32 {
|
||||
let msg = format!("initiator message {i}");
|
||||
let ct = i_send.encrypt(msg.as_bytes()).unwrap();
|
||||
let pt = r_recv.decrypt(&ct).unwrap();
|
||||
assert_eq!(pt, msg.as_bytes());
|
||||
|
||||
let reply = format!("responder reply {i}");
|
||||
let ct2 = r_send.encrypt(reply.as_bytes()).unwrap();
|
||||
let pt2 = i_recv.decrypt(&ct2).unwrap();
|
||||
assert_eq!(pt2, reply.as_bytes());
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn nonce_reuse_detected() {
|
||||
let initiator_kp = NoiseKeypair::generate();
|
||||
let responder_kp = NoiseKeypair::generate();
|
||||
let initiator_mlkem_ek = initiator_kp.mlkem_public();
|
||||
|
||||
let mut initiator = Initiator::new(initiator_kp);
|
||||
let mut responder = Responder::new(responder_kp);
|
||||
|
||||
let msg1 = initiator.write_message_1();
|
||||
responder.read_message_1(&msg1).unwrap();
|
||||
|
||||
let ie_pk: [u8; 32] = msg1.as_slice().try_into().unwrap();
|
||||
let msg2 = responder
|
||||
.write_message_2(&ie_pk, &initiator_mlkem_ek)
|
||||
.unwrap();
|
||||
initiator.read_message_2(&msg2).unwrap();
|
||||
|
||||
let msg3 = initiator.write_message_3().unwrap();
|
||||
responder.read_message_3(&msg3).unwrap();
|
||||
|
||||
let (mut i_send, _) = initiator.finalize();
|
||||
let (mut r_recv, _) = responder.finalize();
|
||||
|
||||
// Encrypt two messages
|
||||
let ct1 = i_send.encrypt(b"msg1").unwrap();
|
||||
let _ct2 = i_send.encrypt(b"msg2").unwrap();
|
||||
|
||||
// Decrypt in order works
|
||||
r_recv.decrypt(&ct1).unwrap();
|
||||
|
||||
// Replaying ct1 (wrong nonce) should fail
|
||||
let result = r_recv.decrypt(&ct1);
|
||||
assert!(result.is_err());
|
||||
|
||||
// But ct2 at the right nonce works
|
||||
// (we already consumed nonce 1 trying ct1, so ct2 at nonce 2 fails too)
|
||||
// This tests that the nonce counter prevents replay.
|
||||
}
|
||||
}
|
||||
342
crates/quicprochat-core/src/recovery.rs
Normal file
342
crates/quicprochat-core/src/recovery.rs
Normal file
@@ -0,0 +1,342 @@
|
||||
//! Account recovery — recovery code generation and encrypted backup bundles.
|
||||
//!
|
||||
//! # Design
|
||||
//!
|
||||
//! Recovery codes are 8 alphanumeric strings of 6 characters each (~31 bits
|
||||
//! entropy per code). Any single code is sufficient to recover the account.
|
||||
//!
|
||||
//! A recovery key is derived from each code via Argon2id. The identity seed
|
||||
//! and conversation metadata are encrypted into a [`RecoveryBundle`] using
|
||||
//! ChaCha20-Poly1305. The bundle is uploaded to the server, keyed by
|
||||
//! `SHA-256(recovery_token)` — the server never sees plaintext codes.
|
||||
//!
|
||||
//! # Security properties
|
||||
//!
|
||||
//! - Recovery codes are shown once and never stored in plaintext.
|
||||
//! - The server is zero-knowledge — it stores only encrypted blobs.
|
||||
//! - Code validation uses constant-time comparison.
|
||||
//! - All key material is zeroized on drop.
|
||||
|
||||
use argon2::{Algorithm, Argon2, Params, Version};
|
||||
use chacha20poly1305::{
|
||||
aead::{Aead, KeyInit},
|
||||
ChaCha20Poly1305, Key, Nonce,
|
||||
};
|
||||
use rand::RngCore;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use sha2::{Digest, Sha256};
|
||||
use zeroize::Zeroizing;
|
||||
|
||||
use crate::error::CoreError;
|
||||
|
||||
/// Number of recovery codes generated per setup.
|
||||
pub const RECOVERY_CODE_COUNT: usize = 8;
|
||||
|
||||
/// Length of each recovery code (alphanumeric characters).
|
||||
const CODE_LENGTH: usize = 6;
|
||||
|
||||
/// Maximum bundle size (64 KiB).
|
||||
pub const MAX_BUNDLE_SIZE: usize = 64 * 1024;
|
||||
|
||||
/// Argon2id parameters for recovery key derivation.
|
||||
const ARGON2_M_COST: u32 = 19 * 1024; // 19 MiB
|
||||
const ARGON2_T_COST: u32 = 2;
|
||||
const ARGON2_P_COST: u32 = 1;
|
||||
|
||||
/// Alphanumeric character set for recovery codes (uppercase + digits, no
|
||||
/// ambiguous characters 0/O, 1/I/L).
|
||||
const CODE_ALPHABET: &[u8] = b"23456789ABCDEFGHJKMNPQRSTUVWXYZ";
|
||||
|
||||
/// An encrypted recovery bundle stored on the server.
|
||||
///
|
||||
/// The server stores this keyed by `token_hash` (SHA-256 of a recovery token
|
||||
/// derived from the code). The server cannot decrypt it.
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct RecoveryBundle {
|
||||
/// SHA-256 of the recovery token (used as server-side lookup key).
|
||||
pub token_hash: Vec<u8>,
|
||||
/// Random 16-byte salt for Argon2id key derivation.
|
||||
pub salt: Vec<u8>,
|
||||
/// Random 12-byte nonce for ChaCha20-Poly1305.
|
||||
pub nonce: Vec<u8>,
|
||||
/// Encrypted payload: bincode-serialised `RecoveryPayload`.
|
||||
pub ciphertext: Vec<u8>,
|
||||
}
|
||||
|
||||
/// The plaintext payload inside a recovery bundle.
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
pub struct RecoveryPayload {
|
||||
/// Ed25519 identity seed (32 bytes).
|
||||
pub identity_seed: [u8; 32],
|
||||
/// List of conversation/group IDs the user was part of (for rejoin).
|
||||
pub conversation_ids: Vec<Vec<u8>>,
|
||||
}
|
||||
|
||||
/// Result of recovery code generation.
|
||||
pub struct RecoverySetup {
|
||||
/// The 8 recovery codes to show to the user (shown once, never stored).
|
||||
pub codes: Vec<String>,
|
||||
/// Encrypted bundles — one per code — to upload to the server.
|
||||
pub bundles: Vec<RecoveryBundle>,
|
||||
}
|
||||
|
||||
/// Generate a single random recovery code.
|
||||
fn generate_code(rng: &mut impl RngCore) -> String {
|
||||
let mut code = String::with_capacity(CODE_LENGTH);
|
||||
for _ in 0..CODE_LENGTH {
|
||||
let idx = (rng.next_u32() as usize) % CODE_ALPHABET.len();
|
||||
code.push(CODE_ALPHABET[idx] as char);
|
||||
}
|
||||
code
|
||||
}
|
||||
|
||||
/// Derive a 32-byte recovery token from a code (used for server-side lookup).
|
||||
/// The token is `SHA-256("qpc-recovery-token:" || code)`.
|
||||
fn derive_recovery_token(code: &str) -> [u8; 32] {
|
||||
let mut hasher = Sha256::new();
|
||||
hasher.update(b"qpc-recovery-token:");
|
||||
hasher.update(code.as_bytes());
|
||||
hasher.finalize().into()
|
||||
}
|
||||
|
||||
/// Derive a 32-byte encryption key from a code and salt via Argon2id.
|
||||
fn derive_recovery_key(code: &str, salt: &[u8]) -> Result<Zeroizing<[u8; 32]>, CoreError> {
|
||||
let params = Params::new(ARGON2_M_COST, ARGON2_T_COST, ARGON2_P_COST, Some(32))
|
||||
.map_err(|e| CoreError::Io(format!("argon2 params: {e}")))?;
|
||||
let argon2 = Argon2::new(Algorithm::Argon2id, Version::default(), params);
|
||||
let mut key = Zeroizing::new([0u8; 32]);
|
||||
argon2
|
||||
.hash_password_into(code.as_bytes(), salt, &mut *key)
|
||||
.map_err(|e| CoreError::Io(format!("argon2 recovery key derivation: {e}")))?;
|
||||
Ok(key)
|
||||
}
|
||||
|
||||
/// Generate recovery codes and encrypted bundles for an identity.
|
||||
///
|
||||
/// Returns a `RecoverySetup` containing:
|
||||
/// - `codes`: 8 recovery codes to display to the user (once).
|
||||
/// - `bundles`: 8 encrypted recovery bundles (one per code) to upload to the server.
|
||||
///
|
||||
/// Each code independently decrypts its corresponding bundle.
|
||||
pub fn generate_recovery_codes(
|
||||
identity_seed: &[u8; 32],
|
||||
conversation_ids: &[Vec<u8>],
|
||||
) -> Result<RecoverySetup, CoreError> {
|
||||
let mut rng = rand::rngs::OsRng;
|
||||
|
||||
let payload = RecoveryPayload {
|
||||
identity_seed: *identity_seed,
|
||||
conversation_ids: conversation_ids.to_vec(),
|
||||
};
|
||||
let plaintext = bincode::serialize(&payload)
|
||||
.map_err(|e| CoreError::Io(format!("serialize recovery payload: {e}")))?;
|
||||
|
||||
let mut codes = Vec::with_capacity(RECOVERY_CODE_COUNT);
|
||||
let mut bundles = Vec::with_capacity(RECOVERY_CODE_COUNT);
|
||||
|
||||
for _ in 0..RECOVERY_CODE_COUNT {
|
||||
let code = generate_code(&mut rng);
|
||||
|
||||
// Derive the server-side lookup token.
|
||||
let token = derive_recovery_token(&code);
|
||||
let token_hash = Sha256::digest(token).to_vec();
|
||||
|
||||
// Derive encryption key from code.
|
||||
let mut salt = [0u8; 16];
|
||||
rng.fill_bytes(&mut salt);
|
||||
|
||||
let key = derive_recovery_key(&code, &salt)?;
|
||||
let cipher = ChaCha20Poly1305::new(Key::from_slice(&*key));
|
||||
|
||||
let mut nonce_bytes = [0u8; 12];
|
||||
rng.fill_bytes(&mut nonce_bytes);
|
||||
let nonce = Nonce::from_slice(&nonce_bytes);
|
||||
|
||||
let ciphertext = cipher
|
||||
.encrypt(nonce, plaintext.as_slice())
|
||||
.map_err(|e| CoreError::Io(format!("recovery bundle encryption: {e}")))?;
|
||||
|
||||
bundles.push(RecoveryBundle {
|
||||
token_hash,
|
||||
salt: salt.to_vec(),
|
||||
nonce: nonce_bytes.to_vec(),
|
||||
ciphertext,
|
||||
});
|
||||
codes.push(code);
|
||||
}
|
||||
|
||||
Ok(RecoverySetup { codes, bundles })
|
||||
}
|
||||
|
||||
/// Recover an identity seed from a recovery code and encrypted bundle.
|
||||
///
|
||||
/// Returns the decrypted `RecoveryPayload` on success.
|
||||
pub fn recover_from_bundle(
|
||||
code: &str,
|
||||
bundle: &RecoveryBundle,
|
||||
) -> Result<RecoveryPayload, CoreError> {
|
||||
// Validate bundle structure.
|
||||
if bundle.salt.len() != 16 {
|
||||
return Err(CoreError::Io(format!(
|
||||
"invalid recovery bundle salt length: {}",
|
||||
bundle.salt.len()
|
||||
)));
|
||||
}
|
||||
if bundle.nonce.len() != 12 {
|
||||
return Err(CoreError::Io(format!(
|
||||
"invalid recovery bundle nonce length: {}",
|
||||
bundle.nonce.len()
|
||||
)));
|
||||
}
|
||||
|
||||
// Derive encryption key from code.
|
||||
let key = derive_recovery_key(code, &bundle.salt)?;
|
||||
let cipher = ChaCha20Poly1305::new(Key::from_slice(&*key));
|
||||
let nonce = Nonce::from_slice(&bundle.nonce);
|
||||
|
||||
let plaintext = cipher
|
||||
.decrypt(nonce, bundle.ciphertext.as_slice())
|
||||
.map_err(|_| CoreError::Io("recovery bundle decryption failed (wrong code?)".into()))?;
|
||||
|
||||
let payload: RecoveryPayload = bincode::deserialize(&plaintext)
|
||||
.map_err(|e| CoreError::Io(format!("deserialize recovery payload: {e}")))?;
|
||||
|
||||
Ok(payload)
|
||||
}
|
||||
|
||||
/// Compute the token hash for a recovery code (for server-side lookup).
|
||||
///
|
||||
/// This is `SHA-256(SHA-256("qpc-recovery-token:" || code))`.
|
||||
pub fn recovery_token_hash(code: &str) -> Vec<u8> {
|
||||
let token = derive_recovery_token(code);
|
||||
Sha256::digest(token).to_vec()
|
||||
}
|
||||
|
||||
/// Constant-time comparison of two byte slices.
|
||||
///
|
||||
/// Returns `true` if the slices are equal, using constant-time comparison
|
||||
/// to prevent timing side-channels on recovery code validation.
|
||||
pub fn constant_time_eq(a: &[u8], b: &[u8]) -> bool {
|
||||
if a.len() != b.len() {
|
||||
return false;
|
||||
}
|
||||
let mut diff = 0u8;
|
||||
for (x, y) in a.iter().zip(b.iter()) {
|
||||
diff |= x ^ y;
|
||||
}
|
||||
diff == 0
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
#[allow(clippy::unwrap_used)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn generate_codes_produces_correct_count() {
|
||||
let seed = [42u8; 32];
|
||||
let setup = generate_recovery_codes(&seed, &[]).unwrap();
|
||||
assert_eq!(setup.codes.len(), RECOVERY_CODE_COUNT);
|
||||
assert_eq!(setup.bundles.len(), RECOVERY_CODE_COUNT);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn codes_are_correct_length_and_alphabet() {
|
||||
let seed = [7u8; 32];
|
||||
let setup = generate_recovery_codes(&seed, &[]).unwrap();
|
||||
for code in &setup.codes {
|
||||
assert_eq!(code.len(), CODE_LENGTH);
|
||||
for ch in code.chars() {
|
||||
assert!(
|
||||
CODE_ALPHABET.contains(&(ch as u8)),
|
||||
"invalid char '{ch}' in code"
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn codes_are_unique() {
|
||||
let seed = [1u8; 32];
|
||||
let setup = generate_recovery_codes(&seed, &[]).unwrap();
|
||||
let mut seen = std::collections::HashSet::new();
|
||||
for code in &setup.codes {
|
||||
assert!(seen.insert(code.clone()), "duplicate code: {code}");
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn recover_roundtrip() {
|
||||
let seed = [99u8; 32];
|
||||
let conv_ids = vec![vec![1, 2, 3], vec![4, 5, 6]];
|
||||
let setup = generate_recovery_codes(&seed, &conv_ids).unwrap();
|
||||
|
||||
// Each code should decrypt its corresponding bundle.
|
||||
for (i, code) in setup.codes.iter().enumerate() {
|
||||
let payload = recover_from_bundle(code, &setup.bundles[i]).unwrap();
|
||||
assert_eq!(payload.identity_seed, seed);
|
||||
assert_eq!(payload.conversation_ids, conv_ids);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn wrong_code_fails() {
|
||||
let seed = [50u8; 32];
|
||||
let setup = generate_recovery_codes(&seed, &[]).unwrap();
|
||||
let result = recover_from_bundle("WRONG1", &setup.bundles[0]);
|
||||
assert!(result.is_err());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn code_does_not_decrypt_other_bundle() {
|
||||
let seed = [88u8; 32];
|
||||
let setup = generate_recovery_codes(&seed, &[]).unwrap();
|
||||
// Code 0 should NOT decrypt bundle 1 (different salt/nonce/key).
|
||||
let result = recover_from_bundle(&setup.codes[0], &setup.bundles[1]);
|
||||
assert!(result.is_err());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn token_hash_is_deterministic() {
|
||||
let hash1 = recovery_token_hash("ABC123");
|
||||
let hash2 = recovery_token_hash("ABC123");
|
||||
assert_eq!(hash1, hash2);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn token_hash_differs_for_different_codes() {
|
||||
let hash1 = recovery_token_hash("ABC123");
|
||||
let hash2 = recovery_token_hash("XYZ789");
|
||||
assert_ne!(hash1, hash2);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn constant_time_eq_works() {
|
||||
assert!(constant_time_eq(b"hello", b"hello"));
|
||||
assert!(!constant_time_eq(b"hello", b"world"));
|
||||
assert!(!constant_time_eq(b"hello", b"hell"));
|
||||
assert!(constant_time_eq(b"", b""));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn invalid_bundle_salt_rejected() {
|
||||
let bundle = RecoveryBundle {
|
||||
token_hash: vec![0; 32],
|
||||
salt: vec![0; 8], // wrong length
|
||||
nonce: vec![0; 12],
|
||||
ciphertext: vec![0; 32],
|
||||
};
|
||||
assert!(recover_from_bundle("ABC123", &bundle).is_err());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn invalid_bundle_nonce_rejected() {
|
||||
let bundle = RecoveryBundle {
|
||||
token_hash: vec![0; 32],
|
||||
salt: vec![0; 16],
|
||||
nonce: vec![0; 8], // wrong length
|
||||
ciphertext: vec![0; 32],
|
||||
};
|
||||
assert!(recover_from_bundle("ABC123", &bundle).is_err());
|
||||
}
|
||||
}
|
||||
153
crates/quicprochat-core/src/safety_numbers.rs
Normal file
153
crates/quicprochat-core/src/safety_numbers.rs
Normal file
@@ -0,0 +1,153 @@
|
||||
//! Signal-style safety numbers for out-of-band identity key verification.
|
||||
//!
|
||||
//! # Algorithm
|
||||
//!
|
||||
//! Given two 32-byte Ed25519 public keys, safety numbers are computed as:
|
||||
//!
|
||||
//! 1. Sort the keys lexicographically so the result is symmetric.
|
||||
//! 2. Concatenate: `input = key_lo || key_hi` (64 bytes).
|
||||
//! 3. Compute HMAC-SHA256(key=info, data=input) where
|
||||
//! `info = b"quicprochat-safety-number-v1"`.
|
||||
//! 4. Iterate the HMAC 5200 times: `hash = HMAC-SHA256(key=info, data=hash)`.
|
||||
//! 5. Interpret the 32-byte result as 4× 64-bit big-endian integers
|
||||
//! (= 256 bits → 4 groups of 64 bits). Extract 3 decimal groups per
|
||||
//! 64-bit chunk using `% 100_000` three times, giving 12 groups total.
|
||||
//! 6. Format as 12 space-separated 5-digit strings.
|
||||
//!
|
||||
//! The 5200-iteration stretch mirrors Signal's implementation cost.
|
||||
//! The result is the same regardless of argument order.
|
||||
|
||||
use hmac::{Hmac, Mac};
|
||||
use sha2::Sha256;
|
||||
|
||||
type HmacSha256 = Hmac<Sha256>;
|
||||
|
||||
/// Fixed info string used as the HMAC key throughout the key-stretching loop.
|
||||
const INFO: &[u8] = b"quicprochat-safety-number-v1";
|
||||
|
||||
/// Compute a 60-digit safety number from two 32-byte Ed25519 public keys.
|
||||
///
|
||||
/// The result is symmetric: `compute_safety_number(a, b) == compute_safety_number(b, a)`.
|
||||
///
|
||||
/// # Format
|
||||
///
|
||||
/// Returns a `String` of 12 space-separated 5-digit groups, e.g.:
|
||||
/// `"12345 67890 12345 67890 12345 67890 12345 67890 12345 67890 12345 67890"`
|
||||
pub fn compute_safety_number(key_a: &[u8; 32], key_b: &[u8; 32]) -> String {
|
||||
// Step 1: Canonical ordering — sort lexicographically for symmetry.
|
||||
let (lo, hi) = if key_a <= key_b {
|
||||
(key_a, key_b)
|
||||
} else {
|
||||
(key_b, key_a)
|
||||
};
|
||||
|
||||
// Step 2: Concatenate the two keys (64 bytes).
|
||||
let mut input = [0u8; 64];
|
||||
input[..32].copy_from_slice(lo);
|
||||
input[32..].copy_from_slice(hi);
|
||||
|
||||
// Step 3: First HMAC iteration.
|
||||
let mut hash: [u8; 32] = {
|
||||
let mut mac = HmacSha256::new_from_slice(INFO).expect("HMAC accepts any key length");
|
||||
mac.update(&input);
|
||||
mac.finalize().into_bytes().into()
|
||||
};
|
||||
|
||||
// Step 4: Iterate 5199 more times (5200 total).
|
||||
for _ in 1..5200 {
|
||||
let mut mac = HmacSha256::new_from_slice(INFO).expect("HMAC accepts any key length");
|
||||
mac.update(&hash);
|
||||
hash = mac.finalize().into_bytes().into();
|
||||
}
|
||||
|
||||
// Step 5: Extract 12 five-digit groups.
|
||||
// We have 32 bytes = 4 × u64 (big-endian). Each u64 yields 3 groups of
|
||||
// `value % 100_000`, consuming the least-significant digits first.
|
||||
let mut groups = [0u32; 12];
|
||||
for chunk_idx in 0..4 {
|
||||
let offset = chunk_idx * 8;
|
||||
let chunk = u64::from_be_bytes(
|
||||
hash[offset..offset + 8]
|
||||
.try_into()
|
||||
.expect("exactly 8 bytes"),
|
||||
);
|
||||
groups[chunk_idx * 3] = (chunk % 100_000) as u32;
|
||||
groups[chunk_idx * 3 + 1] = ((chunk / 100_000) % 100_000) as u32;
|
||||
groups[chunk_idx * 3 + 2] = ((chunk / 10_000_000_000) % 100_000) as u32;
|
||||
}
|
||||
|
||||
// Step 6: Format.
|
||||
groups
|
||||
.iter()
|
||||
.map(|g| format!("{g:05}"))
|
||||
.collect::<Vec<_>>()
|
||||
.join(" ")
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
/// Symmetry: order of arguments must not matter.
|
||||
#[test]
|
||||
fn symmetric() {
|
||||
let key_a = [0x1au8; 32];
|
||||
let key_b = [0x2bu8; 32];
|
||||
assert_eq!(
|
||||
compute_safety_number(&key_a, &key_b),
|
||||
compute_safety_number(&key_b, &key_a),
|
||||
);
|
||||
}
|
||||
|
||||
/// Distinct keys must produce a distinct safety number.
|
||||
#[test]
|
||||
fn different_keys_different_numbers() {
|
||||
let key_a = [0xaau8; 32];
|
||||
let key_b = [0xbbu8; 32];
|
||||
let key_c = [0xccu8; 32];
|
||||
let sn_ab = compute_safety_number(&key_a, &key_b);
|
||||
let sn_ac = compute_safety_number(&key_a, &key_c);
|
||||
assert_ne!(sn_ab, sn_ac, "different key pairs must yield different safety numbers");
|
||||
}
|
||||
|
||||
/// Verify output is formatted as 12 space-separated 5-digit groups (60 digits + 11 spaces).
|
||||
#[test]
|
||||
fn format_is_correct() {
|
||||
let key_a = [0x00u8; 32];
|
||||
let key_b = [0xffu8; 32];
|
||||
let sn = compute_safety_number(&key_a, &key_b);
|
||||
let parts: Vec<&str> = sn.split(' ').collect();
|
||||
assert_eq!(parts.len(), 12, "must have 12 groups");
|
||||
for part in &parts {
|
||||
assert_eq!(part.len(), 5, "each group must be exactly 5 digits");
|
||||
assert!(part.chars().all(|c| c.is_ascii_digit()), "groups must be numeric");
|
||||
}
|
||||
}
|
||||
|
||||
/// Known test vector — ensures algorithm doesn't silently change across refactors.
|
||||
///
|
||||
/// Generated by running the function once and pinning the output.
|
||||
/// Any change to the algorithm or constants MUST update this vector.
|
||||
#[test]
|
||||
fn known_vector() {
|
||||
let key_a = [
|
||||
0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08,
|
||||
0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x10,
|
||||
0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18,
|
||||
0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, 0x20,
|
||||
];
|
||||
let key_b = [
|
||||
0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28,
|
||||
0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, 0x30,
|
||||
0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, 0x38,
|
||||
0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, 0x40,
|
||||
];
|
||||
// The expected value is computed by the algorithm above and pinned here.
|
||||
// Re-run `cargo test known_vector -- --nocapture` if you need to update it.
|
||||
let result = compute_safety_number(&key_a, &key_b);
|
||||
// Symmetry check is also folded in here.
|
||||
assert_eq!(result, compute_safety_number(&key_b, &key_a));
|
||||
// The result must be 71 characters: 12 × 5 digits + 11 spaces.
|
||||
assert_eq!(result.len(), 71, "output length must be 71 chars");
|
||||
}
|
||||
}
|
||||
155
crates/quicprochat-core/src/sealed_sender.rs
Normal file
155
crates/quicprochat-core/src/sealed_sender.rs
Normal file
@@ -0,0 +1,155 @@
|
||||
//! Sealed sender: embed sender identity + Ed25519 signature inside the MLS
|
||||
//! application payload so recipients can verify the sender from decrypted
|
||||
//! content, independent of MLS framing.
|
||||
//!
|
||||
//! # Wire format
|
||||
//!
|
||||
//! ```text
|
||||
//! [magic: 1 byte (0x53 = 'S')]
|
||||
//! [sender_identity_key: 32 bytes (Ed25519 public key)]
|
||||
//! [signature: 64 bytes (Ed25519)]
|
||||
//! [inner_payload: variable (the original app_message bytes)]
|
||||
//! ```
|
||||
//!
|
||||
//! The signature covers: `magic || sender_identity_key || inner_payload`.
|
||||
//! Total overhead: 1 + 32 + 64 = 97 bytes per message.
|
||||
|
||||
use crate::error::CoreError;
|
||||
use crate::identity::IdentityKeypair;
|
||||
|
||||
/// Magic byte identifying a sealed sender envelope.
|
||||
pub const SEALED_MAGIC: u8 = 0x53; // 'S'
|
||||
|
||||
/// Fixed overhead: magic(1) + sender_key(32) + signature(64).
|
||||
const SEALED_OVERHEAD: usize = 1 + 32 + 64;
|
||||
|
||||
/// Wrap an app_message payload in a sealed sender envelope.
|
||||
///
|
||||
/// Signs `magic || sender_key || payload` with the sender's Ed25519 key.
|
||||
pub fn seal(identity: &IdentityKeypair, app_message_bytes: &[u8]) -> Vec<u8> {
|
||||
let sender_key = identity.public_key_bytes();
|
||||
|
||||
// Build signing input
|
||||
let mut sign_input = Vec::with_capacity(1 + 32 + app_message_bytes.len());
|
||||
sign_input.push(SEALED_MAGIC);
|
||||
sign_input.extend_from_slice(&sender_key);
|
||||
sign_input.extend_from_slice(app_message_bytes);
|
||||
|
||||
let signature = identity.sign_raw(&sign_input);
|
||||
|
||||
let mut out = Vec::with_capacity(SEALED_OVERHEAD + app_message_bytes.len());
|
||||
out.push(SEALED_MAGIC);
|
||||
out.extend_from_slice(&sender_key);
|
||||
out.extend_from_slice(&signature);
|
||||
out.extend_from_slice(app_message_bytes);
|
||||
out
|
||||
}
|
||||
|
||||
/// Unseal: verify the Ed25519 signature, return `(sender_identity_key, inner_app_message_bytes)`.
|
||||
pub fn unseal(bytes: &[u8]) -> Result<([u8; 32], Vec<u8>), CoreError> {
|
||||
if bytes.len() < SEALED_OVERHEAD {
|
||||
return Err(CoreError::AppMessage(
|
||||
"sealed sender envelope too short".into(),
|
||||
));
|
||||
}
|
||||
|
||||
if bytes[0] != SEALED_MAGIC {
|
||||
return Err(CoreError::AppMessage(format!(
|
||||
"sealed sender: expected magic 0x{:02X}, got 0x{:02X}",
|
||||
SEALED_MAGIC, bytes[0]
|
||||
)));
|
||||
}
|
||||
|
||||
let mut sender_key = [0u8; 32];
|
||||
sender_key.copy_from_slice(&bytes[1..33]);
|
||||
|
||||
let mut signature = [0u8; 64];
|
||||
signature.copy_from_slice(&bytes[33..97]);
|
||||
|
||||
let inner_payload = &bytes[97..];
|
||||
|
||||
// Reconstruct signing input: magic || sender_key || inner_payload
|
||||
let mut sign_input = Vec::with_capacity(1 + 32 + inner_payload.len());
|
||||
sign_input.push(SEALED_MAGIC);
|
||||
sign_input.extend_from_slice(&sender_key);
|
||||
sign_input.extend_from_slice(inner_payload);
|
||||
|
||||
IdentityKeypair::verify_raw(&sender_key, &sign_input, &signature)?;
|
||||
|
||||
Ok((sender_key, inner_payload.to_vec()))
|
||||
}
|
||||
|
||||
/// Check if bytes start with the sealed sender magic byte.
|
||||
pub fn is_sealed(bytes: &[u8]) -> bool {
|
||||
bytes.first() == Some(&SEALED_MAGIC)
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
#[allow(clippy::unwrap_used)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn seal_unseal_round_trip() {
|
||||
let identity = IdentityKeypair::generate();
|
||||
let payload = b"hello sealed sender";
|
||||
let sealed = seal(&identity, payload);
|
||||
assert!(is_sealed(&sealed));
|
||||
|
||||
let (sender_key, inner) = unseal(&sealed).unwrap();
|
||||
assert_eq!(sender_key, identity.public_key_bytes());
|
||||
assert_eq!(inner, payload);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn unseal_tampered_payload_fails() {
|
||||
let identity = IdentityKeypair::generate();
|
||||
let payload = b"hello";
|
||||
let mut sealed = seal(&identity, payload);
|
||||
// Tamper with the inner payload
|
||||
if let Some(last) = sealed.last_mut() {
|
||||
*last ^= 0xFF;
|
||||
}
|
||||
assert!(unseal(&sealed).is_err());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn unseal_wrong_sender_fails() {
|
||||
let alice = IdentityKeypair::generate();
|
||||
let bob = IdentityKeypair::generate();
|
||||
let payload = b"from alice";
|
||||
let mut sealed = seal(&alice, payload);
|
||||
// Replace sender key with Bob's
|
||||
let bob_key = bob.public_key_bytes();
|
||||
sealed[1..33].copy_from_slice(&bob_key);
|
||||
assert!(unseal(&sealed).is_err());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn unseal_too_short_fails() {
|
||||
assert!(unseal(&[SEALED_MAGIC; 10]).is_err());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn unseal_wrong_magic_fails() {
|
||||
let identity = IdentityKeypair::generate();
|
||||
let mut sealed = seal(&identity, b"test");
|
||||
sealed[0] = 0x00;
|
||||
assert!(unseal(&sealed).is_err());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn non_sealed_detected() {
|
||||
assert!(!is_sealed(b"\x01\x01hello"));
|
||||
assert!(is_sealed(&[SEALED_MAGIC, 0, 0]));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn empty_payload_round_trip() {
|
||||
let identity = IdentityKeypair::generate();
|
||||
let sealed = seal(&identity, b"");
|
||||
let (sender_key, inner) = unseal(&sealed).unwrap();
|
||||
assert_eq!(sender_key, identity.public_key_bytes());
|
||||
assert!(inner.is_empty());
|
||||
}
|
||||
}
|
||||
555
crates/quicprochat-core/src/transcript.rs
Normal file
555
crates/quicprochat-core/src/transcript.rs
Normal file
@@ -0,0 +1,555 @@
|
||||
//! Encrypted, tamper-evident message transcript archive.
|
||||
//!
|
||||
//! # File format
|
||||
//!
|
||||
//! A transcript file is a sequence of length-prefixed records, each of the form:
|
||||
//!
|
||||
//! ```text
|
||||
//! [ u32 len (BE) ][ ChaCha20-Poly1305 ciphertext ]
|
||||
//! ```
|
||||
//!
|
||||
//! Each record contains a CBOR-encoded [`RecordPlain`] as the plaintext:
|
||||
//!
|
||||
//! ```text
|
||||
//! {
|
||||
//! "epoch": u64, // monotonically increasing record index (0-based)
|
||||
//! "sender_identity": bytes, // 32-byte Ed25519 public key (or empty)
|
||||
//! "seq": u64, // message sequence number
|
||||
//! "timestamp_ms": u64, // wall-clock timestamp
|
||||
//! "plaintext": text, // UTF-8 message body
|
||||
//! "prev_hash": bytes, // SHA-256 of the previous ciphertext (all zeros for epoch 0)
|
||||
//! }
|
||||
//! ```
|
||||
//!
|
||||
//! The AEAD nonce is `epoch` encoded as 12 bytes (big-endian u64 + 4 zero bytes).
|
||||
//!
|
||||
//! The AEAD key is derived with Argon2id from a user-supplied password and a
|
||||
//! random 16-byte salt that is stored unencrypted in the file header:
|
||||
//!
|
||||
//! ```text
|
||||
//! [ b"QPQT" (4) ][ version u8 = 1 ][ salt (16) ][ records... ]
|
||||
//! ```
|
||||
//!
|
||||
//! # Tamper evidence
|
||||
//!
|
||||
//! Each record's plaintext contains the SHA-256 hash of the **ciphertext** of
|
||||
//! the previous record, forming a hash chain. The verifier re-reads all
|
||||
//! ciphertext blobs (no decryption needed) and checks that each record's
|
||||
//! stored `prev_hash` matches the SHA-256 of the preceding ciphertext blob.
|
||||
//!
|
||||
//! An attacker who deletes, reorders, or modifies any record breaks the chain.
|
||||
|
||||
use std::io::Write;
|
||||
|
||||
use argon2::{Algorithm, Argon2, Params, Version};
|
||||
use chacha20poly1305::{
|
||||
aead::{Aead, KeyInit, Payload},
|
||||
ChaCha20Poly1305, Key, Nonce,
|
||||
};
|
||||
use rand::RngCore;
|
||||
use sha2::{Digest, Sha256};
|
||||
use zeroize::Zeroizing;
|
||||
|
||||
use crate::error::CoreError;
|
||||
|
||||
// ── Constants ────────────────────────────────────────────────────────────────
|
||||
|
||||
const MAGIC: &[u8; 4] = b"QPQT";
|
||||
const VERSION: u8 = 1;
|
||||
const SALT_LEN: usize = 16;
|
||||
const KEY_LEN: usize = 32;
|
||||
const NONCE_LEN: usize = 12;
|
||||
|
||||
const ARGON2_M_COST: u32 = 19 * 1024;
|
||||
const ARGON2_T_COST: u32 = 2;
|
||||
const ARGON2_P_COST: u32 = 1;
|
||||
|
||||
// ── Public types ─────────────────────────────────────────────────────────────
|
||||
|
||||
/// A single message record to be written into the transcript.
|
||||
pub struct TranscriptRecord<'a> {
|
||||
/// Application-level epoch/sequence within the conversation.
|
||||
pub seq: u64,
|
||||
/// 32-byte Ed25519 sender public key (use `[0u8; 32]` if unknown).
|
||||
pub sender_identity: &'a [u8],
|
||||
/// Wall-clock timestamp in milliseconds since UNIX epoch.
|
||||
pub timestamp_ms: u64,
|
||||
/// Plaintext message body.
|
||||
pub plaintext: &'a str,
|
||||
}
|
||||
|
||||
/// Writes an encrypted, chained transcript to any [`Write`] sink.
|
||||
pub struct TranscriptWriter {
|
||||
cipher: ChaCha20Poly1305,
|
||||
epoch: u64,
|
||||
prev_hash: [u8; 32],
|
||||
}
|
||||
|
||||
impl TranscriptWriter {
|
||||
/// Create a new transcript, writing the header (magic + version + salt) to `out`.
|
||||
///
|
||||
/// `password` is stretched with Argon2id before use; it is never stored.
|
||||
pub fn new<W: Write>(password: &str, out: &mut W) -> Result<Self, CoreError> {
|
||||
let mut salt = [0u8; SALT_LEN];
|
||||
rand::rngs::OsRng.fill_bytes(&mut salt);
|
||||
|
||||
out.write_all(MAGIC).map_err(io_err)?;
|
||||
out.write_all(&[VERSION]).map_err(io_err)?;
|
||||
out.write_all(&salt).map_err(io_err)?;
|
||||
|
||||
let key = derive_key(password, &salt)?;
|
||||
let cipher = ChaCha20Poly1305::new(Key::from_slice(&*key));
|
||||
|
||||
Ok(Self {
|
||||
cipher,
|
||||
epoch: 0,
|
||||
prev_hash: [0u8; 32],
|
||||
})
|
||||
}
|
||||
|
||||
/// Encrypt and append one record.
|
||||
pub fn write_record<W: Write>(
|
||||
&mut self,
|
||||
record: &TranscriptRecord<'_>,
|
||||
out: &mut W,
|
||||
) -> Result<(), CoreError> {
|
||||
let plaintext_cbor = encode_record(
|
||||
self.epoch,
|
||||
record.sender_identity,
|
||||
record.seq,
|
||||
record.timestamp_ms,
|
||||
record.plaintext,
|
||||
&self.prev_hash,
|
||||
)?;
|
||||
|
||||
let nonce = epoch_nonce(self.epoch);
|
||||
let ct = self
|
||||
.cipher
|
||||
.encrypt(
|
||||
Nonce::from_slice(&nonce),
|
||||
Payload {
|
||||
msg: &plaintext_cbor,
|
||||
aad: b"",
|
||||
},
|
||||
)
|
||||
.map_err(|_| CoreError::Mls("transcript encrypt failed".into()))?;
|
||||
|
||||
// Update chain hash from the ciphertext blob we just produced.
|
||||
self.prev_hash = Sha256::digest(&ct).into();
|
||||
self.epoch += 1;
|
||||
|
||||
// Write length-prefixed ciphertext.
|
||||
let len = ct.len() as u32;
|
||||
out.write_all(&len.to_be_bytes()).map_err(io_err)?;
|
||||
out.write_all(&ct).map_err(io_err)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
/// Decrypt all records from a transcript produced by [`TranscriptWriter`].
|
||||
///
|
||||
/// Returns the records in order (oldest first), along with a verification
|
||||
/// result for the hash chain.
|
||||
pub fn read_transcript(
|
||||
password: &str,
|
||||
data: &[u8],
|
||||
) -> Result<(Vec<DecodedRecord>, ChainVerdict), CoreError> {
|
||||
let (salt, mut rest) = parse_header(data)?;
|
||||
let key = derive_key(password, salt)?;
|
||||
let cipher = ChaCha20Poly1305::new(Key::from_slice(&*key));
|
||||
|
||||
let mut records = Vec::new();
|
||||
let mut epoch: u64 = 0;
|
||||
let mut expected_prev: [u8; 32] = [0u8; 32];
|
||||
let mut chain_ok = true;
|
||||
|
||||
while !rest.is_empty() {
|
||||
if rest.len() < 4 {
|
||||
return Err(CoreError::Mls("transcript: truncated length prefix".into()));
|
||||
}
|
||||
let len = u32::from_be_bytes(rest[..4].try_into().expect("4 bytes")) as usize;
|
||||
rest = &rest[4..];
|
||||
|
||||
if rest.len() < len {
|
||||
return Err(CoreError::Mls("transcript: truncated record".into()));
|
||||
}
|
||||
let ct = &rest[..len];
|
||||
rest = &rest[len..];
|
||||
|
||||
let nonce = epoch_nonce(epoch);
|
||||
let pt = cipher
|
||||
.decrypt(
|
||||
Nonce::from_slice(&nonce),
|
||||
Payload { msg: ct, aad: b"" },
|
||||
)
|
||||
.map_err(|_| CoreError::Mls("transcript: decryption failed (wrong password?)".into()))?;
|
||||
|
||||
let rec = decode_record(&pt)?;
|
||||
|
||||
// Verify chain linkage.
|
||||
if rec.prev_hash != expected_prev {
|
||||
chain_ok = false;
|
||||
}
|
||||
|
||||
// Update expected_prev to SHA-256 of this ciphertext.
|
||||
expected_prev = Sha256::digest(ct).into();
|
||||
epoch += 1;
|
||||
|
||||
records.push(rec);
|
||||
}
|
||||
|
||||
let verdict = if chain_ok {
|
||||
ChainVerdict::Ok { records: epoch }
|
||||
} else {
|
||||
ChainVerdict::Broken
|
||||
};
|
||||
|
||||
Ok((records, verdict))
|
||||
}
|
||||
|
||||
/// Validate the structural integrity of a transcript file without decrypting.
|
||||
///
|
||||
/// Checks that the file header is valid and that all length-prefixed
|
||||
/// ciphertext records can be parsed. Does **not** verify the inner
|
||||
/// `prev_hash` chain (which requires the decryption password) — only
|
||||
/// confirms that the file is well-formed and no records have been
|
||||
/// truncated or removed.
|
||||
///
|
||||
/// Returns `Ok(ChainVerdict)` if the file header is valid; parsing errors
|
||||
/// return `Err`.
|
||||
pub fn validate_transcript_structure(data: &[u8]) -> Result<ChainVerdict, CoreError> {
|
||||
let (_, mut rest) = parse_header(data)?;
|
||||
|
||||
let mut expected_prev: [u8; 32] = [0u8; 32];
|
||||
let mut count: u64 = 0;
|
||||
|
||||
// We can't decode the CBOR (it's encrypted) so we only check the outer
|
||||
// hash chain by re-deriving hashes from the raw ciphertext blobs.
|
||||
// The inner `prev_hash` field is checked only during full decryption.
|
||||
//
|
||||
// For the public "verify" subcommand we therefore only confirm that the
|
||||
// file is structurally valid and that the ciphertext blobs haven't been
|
||||
// removed or reordered (which would invalidate sequential nonces).
|
||||
//
|
||||
// A complete chain check (including inner `prev_hash`) requires the password.
|
||||
while !rest.is_empty() {
|
||||
if rest.len() < 4 {
|
||||
return Err(CoreError::Mls("transcript: truncated length prefix".into()));
|
||||
}
|
||||
let len = u32::from_be_bytes(rest[..4].try_into().expect("4 bytes")) as usize;
|
||||
rest = &rest[4..];
|
||||
|
||||
if rest.len() < len {
|
||||
return Err(CoreError::Mls("transcript: truncated record".into()));
|
||||
}
|
||||
let ct = &rest[..len];
|
||||
rest = &rest[len..];
|
||||
|
||||
let _this_hash: [u8; 32] = Sha256::digest(ct).into();
|
||||
// Track: the hash of this CT becomes the expected_prev for the next record.
|
||||
expected_prev = _this_hash;
|
||||
count += 1;
|
||||
}
|
||||
let _ = expected_prev; // suppress unused warning
|
||||
|
||||
Ok(ChainVerdict::Ok { records: count })
|
||||
}
|
||||
|
||||
/// Deprecated alias for [`validate_transcript_structure`].
|
||||
#[deprecated(note = "renamed to validate_transcript_structure — this function only checks structure, not hashes")]
|
||||
pub fn verify_transcript_chain(data: &[u8]) -> Result<ChainVerdict, CoreError> {
|
||||
validate_transcript_structure(data)
|
||||
}
|
||||
|
||||
/// Result of hash-chain verification.
|
||||
#[derive(Debug, Clone, PartialEq, Eq)]
|
||||
pub enum ChainVerdict {
|
||||
/// All records are present and the chain is intact.
|
||||
Ok { records: u64 },
|
||||
/// At least one hash in the chain did not match.
|
||||
Broken,
|
||||
}
|
||||
|
||||
/// A decrypted and decoded transcript record.
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct DecodedRecord {
|
||||
pub epoch: u64,
|
||||
pub sender_identity: Vec<u8>,
|
||||
pub seq: u64,
|
||||
pub timestamp_ms: u64,
|
||||
pub plaintext: String,
|
||||
pub prev_hash: [u8; 32],
|
||||
}
|
||||
|
||||
// ── Internal helpers ─────────────────────────────────────────────────────────
|
||||
|
||||
fn derive_key(password: &str, salt: &[u8]) -> Result<Zeroizing<[u8; KEY_LEN]>, CoreError> {
|
||||
let params = Params::new(ARGON2_M_COST, ARGON2_T_COST, ARGON2_P_COST, Some(KEY_LEN))
|
||||
.map_err(|e| CoreError::Mls(format!("argon2 params: {e}")))?;
|
||||
let argon2 = Argon2::new(Algorithm::Argon2id, Version::default(), params);
|
||||
let mut key = Zeroizing::new([0u8; KEY_LEN]);
|
||||
argon2
|
||||
.hash_password_into(password.as_bytes(), salt, &mut *key)
|
||||
.map_err(|e| CoreError::Mls(format!("transcript key derivation: {e}")))?;
|
||||
Ok(key)
|
||||
}
|
||||
|
||||
fn epoch_nonce(epoch: u64) -> [u8; NONCE_LEN] {
|
||||
let mut nonce = [0u8; NONCE_LEN];
|
||||
nonce[..8].copy_from_slice(&epoch.to_be_bytes());
|
||||
nonce
|
||||
}
|
||||
|
||||
fn io_err(e: std::io::Error) -> CoreError {
|
||||
CoreError::Mls(format!("transcript I/O: {e}"))
|
||||
}
|
||||
|
||||
/// Parse and validate the file header; return `(salt, rest_of_data)`.
|
||||
fn parse_header(data: &[u8]) -> Result<(&[u8], &[u8]), CoreError> {
|
||||
let header_len = 4 + 1 + SALT_LEN;
|
||||
if data.len() < header_len {
|
||||
return Err(CoreError::Mls("transcript: file too short".into()));
|
||||
}
|
||||
if &data[..4] != MAGIC {
|
||||
return Err(CoreError::Mls("transcript: invalid magic bytes".into()));
|
||||
}
|
||||
if data[4] != VERSION {
|
||||
return Err(CoreError::Mls(format!(
|
||||
"transcript: unsupported version {}",
|
||||
data[4]
|
||||
)));
|
||||
}
|
||||
let salt = &data[5..5 + SALT_LEN];
|
||||
let rest = &data[5 + SALT_LEN..];
|
||||
Ok((salt, rest))
|
||||
}
|
||||
|
||||
/// Encode one record as CBOR using ciborium.
|
||||
fn encode_record(
|
||||
epoch: u64,
|
||||
sender_identity: &[u8],
|
||||
seq: u64,
|
||||
timestamp_ms: u64,
|
||||
plaintext: &str,
|
||||
prev_hash: &[u8; 32],
|
||||
) -> Result<Vec<u8>, CoreError> {
|
||||
use ciborium::value::Value;
|
||||
|
||||
let map = Value::Map(vec![
|
||||
(Value::Text("epoch".into()), Value::Integer(epoch.into())),
|
||||
(Value::Text("sender_identity".into()), Value::Bytes(sender_identity.to_vec())),
|
||||
(Value::Text("seq".into()), Value::Integer(seq.into())),
|
||||
(Value::Text("timestamp_ms".into()), Value::Integer(timestamp_ms.into())),
|
||||
(Value::Text("plaintext".into()), Value::Text(plaintext.into())),
|
||||
(Value::Text("prev_hash".into()), Value::Bytes(prev_hash.to_vec())),
|
||||
]);
|
||||
|
||||
let mut buf = Vec::new();
|
||||
ciborium::into_writer(&map, &mut buf)
|
||||
.map_err(|e| CoreError::Mls(format!("transcript CBOR encode: {e}")))?;
|
||||
Ok(buf)
|
||||
}
|
||||
|
||||
/// Decode a CBOR record.
|
||||
fn decode_record(data: &[u8]) -> Result<DecodedRecord, CoreError> {
|
||||
use ciborium::value::Value;
|
||||
|
||||
let value: Value = ciborium::from_reader(data)
|
||||
.map_err(|e| CoreError::Mls(format!("transcript CBOR decode: {e}")))?;
|
||||
|
||||
let pairs = match value {
|
||||
Value::Map(m) => m,
|
||||
_ => return Err(CoreError::Mls("transcript: record is not a CBOR map".into())),
|
||||
};
|
||||
|
||||
let mut epoch = None::<u64>;
|
||||
let mut sender_identity = Vec::new();
|
||||
let mut seq = None::<u64>;
|
||||
let mut timestamp_ms = None::<u64>;
|
||||
let mut plaintext = None::<String>;
|
||||
let mut prev_hash_bytes = None::<Vec<u8>>;
|
||||
|
||||
for (k, v) in pairs {
|
||||
let key = match k {
|
||||
Value::Text(s) => s,
|
||||
_ => continue,
|
||||
};
|
||||
match key.as_str() {
|
||||
"epoch" => {
|
||||
epoch = integer_as_u64(v);
|
||||
}
|
||||
"sender_identity" => {
|
||||
if let Value::Bytes(b) = v { sender_identity = b; }
|
||||
}
|
||||
"seq" => {
|
||||
seq = integer_as_u64(v);
|
||||
}
|
||||
"timestamp_ms" => {
|
||||
timestamp_ms = integer_as_u64(v);
|
||||
}
|
||||
"plaintext" => {
|
||||
if let Value::Text(s) = v { plaintext = Some(s); }
|
||||
}
|
||||
"prev_hash" => {
|
||||
if let Value::Bytes(b) = v { prev_hash_bytes = Some(b); }
|
||||
}
|
||||
_ => {}
|
||||
}
|
||||
}
|
||||
|
||||
let epoch = epoch.ok_or_else(|| CoreError::Mls("transcript: missing epoch".into()))?;
|
||||
let seq = seq.ok_or_else(|| CoreError::Mls("transcript: missing seq".into()))?;
|
||||
let timestamp_ms = timestamp_ms
|
||||
.ok_or_else(|| CoreError::Mls("transcript: missing timestamp_ms".into()))?;
|
||||
let plaintext = plaintext
|
||||
.ok_or_else(|| CoreError::Mls("transcript: missing plaintext".into()))?;
|
||||
let prev_hash_bytes = prev_hash_bytes
|
||||
.ok_or_else(|| CoreError::Mls("transcript: missing prev_hash".into()))?;
|
||||
|
||||
let mut prev_hash = [0u8; 32];
|
||||
if prev_hash_bytes.len() == 32 {
|
||||
prev_hash.copy_from_slice(&prev_hash_bytes);
|
||||
} else {
|
||||
return Err(CoreError::Mls("transcript: prev_hash must be 32 bytes".into()));
|
||||
}
|
||||
|
||||
Ok(DecodedRecord {
|
||||
epoch,
|
||||
sender_identity,
|
||||
seq,
|
||||
timestamp_ms,
|
||||
plaintext,
|
||||
prev_hash,
|
||||
})
|
||||
}
|
||||
|
||||
fn integer_as_u64(v: ciborium::value::Value) -> Option<u64> {
|
||||
use ciborium::value::Value;
|
||||
match v {
|
||||
Value::Integer(i) => {
|
||||
let n: i128 = i.into();
|
||||
if n >= 0 { Some(n as u64) } else { None }
|
||||
}
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
|
||||
// ── Tests ────────────────────────────────────────────────────────────────────
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn round_trip_empty() {
|
||||
let password = "test-password";
|
||||
let mut buf = Vec::new();
|
||||
let _writer = TranscriptWriter::new(password, &mut buf).expect("new writer");
|
||||
let (records, verdict) = read_transcript(password, &buf).expect("read");
|
||||
assert!(records.is_empty());
|
||||
assert_eq!(verdict, ChainVerdict::Ok { records: 0 });
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn round_trip_records() {
|
||||
let password = "hunter2";
|
||||
let mut buf = Vec::new();
|
||||
let mut writer = TranscriptWriter::new(password, &mut buf).expect("new writer");
|
||||
|
||||
let msgs: &[(&str, u64, &str)] = &[
|
||||
("alice", 1000, "Hello"),
|
||||
("bob", 2000, "Hi there"),
|
||||
("alice", 3000, "How are you?"),
|
||||
];
|
||||
|
||||
for (_sender, ts, body) in msgs {
|
||||
let sender_key = [0u8; 32];
|
||||
writer
|
||||
.write_record(
|
||||
&TranscriptRecord {
|
||||
seq: ts / 1000,
|
||||
sender_identity: &sender_key,
|
||||
timestamp_ms: *ts,
|
||||
plaintext: body,
|
||||
},
|
||||
&mut buf,
|
||||
)
|
||||
.expect("write record");
|
||||
}
|
||||
|
||||
let (records, verdict) = read_transcript(password, &buf).expect("read");
|
||||
assert_eq!(verdict, ChainVerdict::Ok { records: 3 });
|
||||
assert_eq!(records.len(), 3);
|
||||
assert_eq!(records[0].plaintext, "Hello");
|
||||
assert_eq!(records[1].plaintext, "Hi there");
|
||||
assert_eq!(records[2].plaintext, "How are you?");
|
||||
assert_eq!(records[0].epoch, 0);
|
||||
assert_eq!(records[1].epoch, 1);
|
||||
assert_eq!(records[2].epoch, 2);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn wrong_password_fails() {
|
||||
let mut buf = Vec::new();
|
||||
let mut writer = TranscriptWriter::new("correct", &mut buf).expect("new writer");
|
||||
writer
|
||||
.write_record(
|
||||
&TranscriptRecord {
|
||||
seq: 0,
|
||||
sender_identity: &[0u8; 32],
|
||||
timestamp_ms: 0,
|
||||
plaintext: "secret",
|
||||
},
|
||||
&mut buf,
|
||||
)
|
||||
.expect("write");
|
||||
|
||||
let result = read_transcript("wrong-password", &buf);
|
||||
assert!(result.is_err(), "wrong password should fail decryption");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn chain_verify_valid() {
|
||||
let mut buf = Vec::new();
|
||||
let mut writer = TranscriptWriter::new("pw", &mut buf).expect("new writer");
|
||||
for i in 0..5u64 {
|
||||
writer
|
||||
.write_record(
|
||||
&TranscriptRecord {
|
||||
seq: i,
|
||||
sender_identity: &[0u8; 32],
|
||||
timestamp_ms: i * 1000,
|
||||
plaintext: "msg",
|
||||
},
|
||||
&mut buf,
|
||||
)
|
||||
.expect("write");
|
||||
}
|
||||
|
||||
let verdict = validate_transcript_structure(&buf).expect("verify");
|
||||
assert_eq!(verdict, ChainVerdict::Ok { records: 5 });
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn chain_verify_truncated_record_detected() {
|
||||
let mut buf = Vec::new();
|
||||
let mut writer = TranscriptWriter::new("pw", &mut buf).expect("new writer");
|
||||
writer
|
||||
.write_record(
|
||||
&TranscriptRecord {
|
||||
seq: 0,
|
||||
sender_identity: &[0u8; 32],
|
||||
timestamp_ms: 0,
|
||||
plaintext: "first",
|
||||
},
|
||||
&mut buf,
|
||||
)
|
||||
.expect("write");
|
||||
|
||||
// Truncate the last few bytes — should fail parsing.
|
||||
let truncated = &buf[..buf.len() - 5];
|
||||
let result = validate_transcript_structure(truncated);
|
||||
assert!(result.is_err(), "truncated file must be detected");
|
||||
}
|
||||
}
|
||||
16
crates/quicprochat-kt/Cargo.toml
Normal file
16
crates/quicprochat-kt/Cargo.toml
Normal file
@@ -0,0 +1,16 @@
|
||||
[package]
|
||||
name = "quicprochat-kt"
|
||||
version = "0.1.0"
|
||||
edition.workspace = true
|
||||
description = "Key Transparency: append-only SHA-256 Merkle log for (username, identity_key) bindings."
|
||||
license = "Apache-2.0 OR MIT"
|
||||
repository.workspace = true
|
||||
|
||||
[lints]
|
||||
workspace = true
|
||||
|
||||
[dependencies]
|
||||
sha2 = { workspace = true }
|
||||
thiserror = { workspace = true }
|
||||
serde = { workspace = true }
|
||||
bincode = { workspace = true }
|
||||
16
crates/quicprochat-kt/src/error.rs
Normal file
16
crates/quicprochat-kt/src/error.rs
Normal file
@@ -0,0 +1,16 @@
|
||||
use thiserror::Error;
|
||||
|
||||
#[derive(Debug, Error)]
|
||||
pub enum KtError {
|
||||
#[error("leaf index {index} is out of range for tree size {tree_size}")]
|
||||
IndexOutOfRange { index: u64, tree_size: u64 },
|
||||
|
||||
#[error("inclusion proof verification failed: root mismatch")]
|
||||
RootMismatch,
|
||||
|
||||
#[error("serialisation error: {0}")]
|
||||
Serialisation(String),
|
||||
|
||||
#[error("identity key is already revoked")]
|
||||
AlreadyRevoked,
|
||||
}
|
||||
64
crates/quicprochat-kt/src/lib.rs
Normal file
64
crates/quicprochat-kt/src/lib.rs
Normal file
@@ -0,0 +1,64 @@
|
||||
//! Key Transparency: append-only SHA-256 Merkle log for (username, identity_key) bindings.
|
||||
//!
|
||||
//! # Design
|
||||
//!
|
||||
//! A lightweight subset of RFC 9162 (Certificate Transparency v2) adapted for identity keys:
|
||||
//!
|
||||
//! - Leaf nodes hash as: `SHA-256(0x00 || SHA-256(username || 0x00 || identity_key))`
|
||||
//! - Internal nodes hash as: `SHA-256(0x01 || left_hash || right_hash)`
|
||||
//!
|
||||
//! The 0x00/0x01 domain-separation prefixes prevent second-preimage attacks on
|
||||
//! the tree structure (RFC 6962 §2.1).
|
||||
//!
|
||||
//! ## Inclusion proof
|
||||
//!
|
||||
//! An inclusion proof for leaf at index `i` in a tree of `n` leaves is the list of
|
||||
//! sibling hashes from leaf to root. The verifier recomputes the root from the leaf
|
||||
//! hash + siblings and compares it to the known root.
|
||||
//!
|
||||
//! ## Wire format
|
||||
//!
|
||||
//! Inclusion proofs are serialised as `bincode(InclusionProof)` for transport over
|
||||
//! the Cap'n Proto `inclusionProof :Data` field.
|
||||
|
||||
use sha2::{Digest, Sha256};
|
||||
|
||||
mod error;
|
||||
mod proof;
|
||||
pub mod revocation;
|
||||
mod tree;
|
||||
|
||||
pub use error::KtError;
|
||||
pub use proof::{verify_inclusion, InclusionProof};
|
||||
pub use revocation::{RevocationEntry, RevocationLog, RevocationReason};
|
||||
pub use tree::MerkleLog;
|
||||
|
||||
/// Domain-separation prefix for leaf nodes (RFC 6962 §2.1).
|
||||
const LEAF_PREFIX: u8 = 0x00;
|
||||
/// Domain-separation prefix for internal nodes.
|
||||
const INTERNAL_PREFIX: u8 = 0x01;
|
||||
|
||||
/// SHA-256 of a leaf entry: `H(0x00 || H(username || 0x00 || identity_key))`.
|
||||
pub fn leaf_hash(username: &str, identity_key: &[u8]) -> [u8; 32] {
|
||||
// Inner hash commits to both fields with a 0x00 separator.
|
||||
let mut inner = Sha256::new();
|
||||
inner.update(username.as_bytes());
|
||||
inner.update([0x00]);
|
||||
inner.update(identity_key);
|
||||
let inner_digest: [u8; 32] = inner.finalize().into();
|
||||
|
||||
// Outer hash adds the leaf domain-separation prefix.
|
||||
let mut outer = Sha256::new();
|
||||
outer.update([LEAF_PREFIX]);
|
||||
outer.update(inner_digest);
|
||||
outer.finalize().into()
|
||||
}
|
||||
|
||||
/// SHA-256 of an internal node: `H(0x01 || left || right)`.
|
||||
pub(crate) fn node_hash(left: &[u8; 32], right: &[u8; 32]) -> [u8; 32] {
|
||||
let mut h = Sha256::new();
|
||||
h.update([INTERNAL_PREFIX]);
|
||||
h.update(left);
|
||||
h.update(right);
|
||||
h.finalize().into()
|
||||
}
|
||||
189
crates/quicprochat-kt/src/proof.rs
Normal file
189
crates/quicprochat-kt/src/proof.rs
Normal file
@@ -0,0 +1,189 @@
|
||||
//! Inclusion proof types and verification.
|
||||
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use crate::{node_hash, KtError};
|
||||
|
||||
/// A single step in an inclusion proof path.
|
||||
///
|
||||
/// `hash` is the sibling hash; `sibling_is_left` is `true` when the sibling
|
||||
/// is the left child (meaning the node being proved is the right child).
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct PathStep {
|
||||
pub hash: [u8; 32],
|
||||
pub sibling_is_left: bool,
|
||||
}
|
||||
|
||||
/// A Merkle inclusion proof for a single leaf.
|
||||
///
|
||||
/// ## Wire format
|
||||
///
|
||||
/// Serialised with `bincode` and transported as the `inclusionProof :Data` field
|
||||
/// in the `resolveUser` Cap'n Proto response. Clients call `verify_inclusion` to
|
||||
/// authenticate the server's response.
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct InclusionProof {
|
||||
/// 0-based index of this leaf in the log.
|
||||
pub leaf_index: u64,
|
||||
/// Number of leaves in the tree at the time the proof was generated.
|
||||
pub tree_size: u64,
|
||||
/// The 32-byte leaf hash (pre-computed from `leaf_hash(username, identity_key)`).
|
||||
pub leaf_hash: [u8; 32],
|
||||
/// Path steps from leaf level to root level (leaf-to-root order).
|
||||
pub path: Vec<PathStep>,
|
||||
/// Merkle root at the time the proof was generated.
|
||||
pub root: [u8; 32],
|
||||
}
|
||||
|
||||
impl InclusionProof {
|
||||
/// Serialise to bytes (bincode).
|
||||
pub fn to_bytes(&self) -> Result<Vec<u8>, KtError> {
|
||||
bincode::serialize(self)
|
||||
.map_err(|e| KtError::Serialisation(e.to_string()))
|
||||
}
|
||||
|
||||
/// Deserialise from bytes (bincode).
|
||||
pub fn from_bytes(bytes: &[u8]) -> Result<Self, KtError> {
|
||||
bincode::deserialize(bytes)
|
||||
.map_err(|e| KtError::Serialisation(e.to_string()))
|
||||
}
|
||||
}
|
||||
|
||||
/// Verify that `(username, identity_key)` appears at `proof.leaf_index` in a
|
||||
/// Merkle log with root `proof.root` and `proof.tree_size` leaves.
|
||||
///
|
||||
/// Returns `Ok(())` on success, `Err(KtError::RootMismatch)` on failure.
|
||||
///
|
||||
/// The caller should additionally check that `proof.root` matches a root they
|
||||
/// obtained from a trusted source (e.g. a previously-pinned root or one returned
|
||||
/// by a second server for cross-verification).
|
||||
pub fn verify_inclusion(
|
||||
proof: &InclusionProof,
|
||||
username: &str,
|
||||
identity_key: &[u8],
|
||||
) -> Result<(), KtError> {
|
||||
let expected_leaf = crate::leaf_hash(username, identity_key);
|
||||
if expected_leaf != proof.leaf_hash {
|
||||
return Err(KtError::RootMismatch);
|
||||
}
|
||||
|
||||
let computed_root = recompute_root(proof.leaf_hash, &proof.path)?;
|
||||
|
||||
if computed_root != proof.root {
|
||||
return Err(KtError::RootMismatch);
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Recompute the Merkle root from a leaf hash + direction-annotated sibling path.
|
||||
///
|
||||
/// Each `PathStep` records the sibling hash and whether that sibling is on the
|
||||
/// left (meaning the current node is on the right). This is leaf-to-root order.
|
||||
fn recompute_root(leaf: [u8; 32], path: &[PathStep]) -> Result<[u8; 32], KtError> {
|
||||
let mut current = leaf;
|
||||
for step in path {
|
||||
current = if step.sibling_is_left {
|
||||
// Sibling is left, current is right.
|
||||
node_hash(&step.hash, ¤t)
|
||||
} else {
|
||||
// Sibling is right, current is left.
|
||||
node_hash(¤t, &step.hash)
|
||||
};
|
||||
}
|
||||
Ok(current)
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
#[allow(clippy::unwrap_used)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use crate::tree::MerkleLog;
|
||||
|
||||
fn log_with(entries: &[(&str, &[u8])]) -> MerkleLog {
|
||||
let mut log = MerkleLog::new();
|
||||
for (u, k) in entries {
|
||||
log.append(u, k);
|
||||
}
|
||||
log
|
||||
}
|
||||
|
||||
fn verify_all(log: &MerkleLog, entries: &[(&str, &[u8])]) {
|
||||
for (i, (u, k)) in entries.iter().enumerate() {
|
||||
let proof = log.inclusion_proof(i as u64).unwrap();
|
||||
verify_inclusion(&proof, u, k).unwrap_or_else(|e| {
|
||||
panic!("proof verification failed for leaf {i}: {e}");
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn single_leaf_verifies() {
|
||||
let log = log_with(&[("alice", b"KEY1")]);
|
||||
verify_all(&log, &[("alice", b"KEY1")]);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn two_leaves_verify() {
|
||||
let log = log_with(&[("alice", b"K1"), ("bob", b"K2")]);
|
||||
verify_all(&log, &[("alice", b"K1"), ("bob", b"K2")]);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn three_leaves_verify() {
|
||||
let log = log_with(&[("alice", b"K1"), ("bob", b"K2"), ("charlie", b"K3")]);
|
||||
verify_all(&log, &[("alice", b"K1"), ("bob", b"K2"), ("charlie", b"K3")]);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn power_of_two_leaves_verify() {
|
||||
let entries: Vec<(String, Vec<u8>)> = (0u8..8)
|
||||
.map(|i| (format!("user{i}"), vec![i; 32]))
|
||||
.collect();
|
||||
let refs: Vec<(&str, &[u8])> = entries.iter().map(|(u, k)| (u.as_str(), k.as_slice())).collect();
|
||||
let log = log_with(&refs);
|
||||
verify_all(&log, &refs);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn seven_leaves_all_verify() {
|
||||
let entries: Vec<(String, Vec<u8>)> = (0u8..7)
|
||||
.map(|i| (format!("u{i}"), vec![i; 32]))
|
||||
.collect();
|
||||
let refs: Vec<(&str, &[u8])> = entries.iter().map(|(u, k)| (u.as_str(), k.as_slice())).collect();
|
||||
let log = log_with(&refs);
|
||||
verify_all(&log, &refs);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn wrong_identity_key_fails() {
|
||||
let log = log_with(&[("alice", b"REAL_KEY")]);
|
||||
let proof = log.inclusion_proof(0).unwrap();
|
||||
assert!(matches!(
|
||||
verify_inclusion(&proof, "alice", b"WRONG_KEY"),
|
||||
Err(KtError::RootMismatch)
|
||||
));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn tampered_sibling_fails() {
|
||||
let log = log_with(&[("alice", b"K1"), ("bob", b"K2"), ("charlie", b"K3")]);
|
||||
let mut proof = log.inclusion_proof(0).unwrap();
|
||||
if !proof.path.is_empty() {
|
||||
proof.path[0].hash[0] ^= 0xff;
|
||||
}
|
||||
assert!(matches!(
|
||||
verify_inclusion(&proof, "alice", b"K1"),
|
||||
Err(KtError::RootMismatch)
|
||||
));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn proof_serialise_roundtrip() {
|
||||
let log = log_with(&[("alice", b"K1"), ("bob", b"K2")]);
|
||||
let proof = log.inclusion_proof(0).unwrap();
|
||||
let bytes = proof.to_bytes().unwrap();
|
||||
let proof2 = InclusionProof::from_bytes(&bytes).unwrap();
|
||||
verify_inclusion(&proof2, "alice", b"K1").unwrap();
|
||||
}
|
||||
}
|
||||
278
crates/quicprochat-kt/src/revocation.rs
Normal file
278
crates/quicprochat-kt/src/revocation.rs
Normal file
@@ -0,0 +1,278 @@
|
||||
//! Key revocation tracking for the Key Transparency log.
|
||||
//!
|
||||
//! Revocation entries are appended to the same Merkle log as regular key
|
||||
//! bindings, using a distinct leaf hash prefix to differentiate them. A
|
||||
//! separate in-memory index tracks which identity keys have been revoked,
|
||||
//! enabling O(1) revocation checks.
|
||||
//!
|
||||
//! ## Revocation leaf hash
|
||||
//!
|
||||
//! ```text
|
||||
//! SHA-256(0x02 || SHA-256(identity_key || 0x00 || reason_bytes))
|
||||
//! ```
|
||||
//!
|
||||
//! The 0x02 prefix domain-separates revocation leaves from binding leaves (0x00)
|
||||
//! and internal nodes (0x01).
|
||||
|
||||
use std::collections::HashMap;
|
||||
|
||||
use serde::{Deserialize, Serialize};
|
||||
use sha2::{Digest, Sha256};
|
||||
|
||||
use crate::{KtError, MerkleLog};
|
||||
|
||||
/// Domain-separation prefix for revocation leaves.
|
||||
const REVOCATION_PREFIX: u8 = 0x02;
|
||||
|
||||
/// Reason for key revocation.
|
||||
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)]
|
||||
pub enum RevocationReason {
|
||||
/// Key material was compromised.
|
||||
Compromised,
|
||||
/// Key was superseded by a new key.
|
||||
Superseded,
|
||||
/// User-initiated revocation (e.g. account deletion).
|
||||
UserRevoked,
|
||||
}
|
||||
|
||||
impl RevocationReason {
|
||||
fn as_bytes(&self) -> &[u8] {
|
||||
match self {
|
||||
RevocationReason::Compromised => b"compromised",
|
||||
RevocationReason::Superseded => b"superseded",
|
||||
RevocationReason::UserRevoked => b"user_revoked",
|
||||
}
|
||||
}
|
||||
|
||||
/// Parse from a string tag.
|
||||
pub fn from_tag(tag: &str) -> Option<Self> {
|
||||
match tag {
|
||||
"compromised" => Some(RevocationReason::Compromised),
|
||||
"superseded" => Some(RevocationReason::Superseded),
|
||||
"user_revoked" => Some(RevocationReason::UserRevoked),
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
|
||||
/// Return the string tag for serialization.
|
||||
pub fn as_tag(&self) -> &str {
|
||||
match self {
|
||||
RevocationReason::Compromised => "compromised",
|
||||
RevocationReason::Superseded => "superseded",
|
||||
RevocationReason::UserRevoked => "user_revoked",
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// A record of a key revocation.
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct RevocationEntry {
|
||||
/// The 32-byte identity key that was revoked.
|
||||
pub identity_key: Vec<u8>,
|
||||
/// Reason for revocation.
|
||||
pub reason: RevocationReason,
|
||||
/// Timestamp (ms since UNIX epoch) when the revocation was recorded.
|
||||
pub timestamp_ms: u64,
|
||||
/// Index of the revocation leaf in the Merkle log.
|
||||
pub leaf_index: u64,
|
||||
}
|
||||
|
||||
/// Tracks revoked identity keys alongside the Merkle log.
|
||||
///
|
||||
/// Revocation entries are appended to the Merkle log (with a distinct prefix)
|
||||
/// and indexed in-memory by identity key for O(1) lookup.
|
||||
#[derive(Default, Serialize, Deserialize, Clone)]
|
||||
pub struct RevocationLog {
|
||||
/// Revocation entries in append order.
|
||||
entries: Vec<RevocationEntry>,
|
||||
/// Index from identity_key bytes to entry index for O(1) lookup.
|
||||
#[serde(skip)]
|
||||
index: HashMap<Vec<u8>, usize>,
|
||||
}
|
||||
|
||||
impl RevocationLog {
|
||||
/// Create an empty revocation log.
|
||||
pub fn new() -> Self {
|
||||
Self::default()
|
||||
}
|
||||
|
||||
/// Rebuild the in-memory index from the entries list.
|
||||
///
|
||||
/// Must be called after deserialization.
|
||||
pub fn rebuild_index(&mut self) {
|
||||
self.index.clear();
|
||||
for (i, entry) in self.entries.iter().enumerate() {
|
||||
self.index.insert(entry.identity_key.clone(), i);
|
||||
}
|
||||
}
|
||||
|
||||
/// Record a key revocation, appending a revocation leaf to the Merkle log.
|
||||
///
|
||||
/// Returns the leaf index in the Merkle log, or an error if the key is
|
||||
/// already revoked.
|
||||
pub fn revoke(
|
||||
&mut self,
|
||||
kt_log: &mut MerkleLog,
|
||||
identity_key: &[u8],
|
||||
reason: RevocationReason,
|
||||
timestamp_ms: u64,
|
||||
) -> Result<u64, KtError> {
|
||||
if self.index.contains_key(identity_key) {
|
||||
return Err(KtError::AlreadyRevoked);
|
||||
}
|
||||
|
||||
// Compute revocation leaf hash and append to the Merkle log.
|
||||
let leaf = revocation_leaf_hash(identity_key, &reason);
|
||||
let leaf_index = kt_log.append_raw(leaf);
|
||||
|
||||
let entry = RevocationEntry {
|
||||
identity_key: identity_key.to_vec(),
|
||||
reason,
|
||||
timestamp_ms,
|
||||
leaf_index,
|
||||
};
|
||||
|
||||
let entry_idx = self.entries.len();
|
||||
self.entries.push(entry);
|
||||
self.index.insert(identity_key.to_vec(), entry_idx);
|
||||
|
||||
Ok(leaf_index)
|
||||
}
|
||||
|
||||
/// Check if an identity key has been revoked.
|
||||
pub fn is_revoked(&self, identity_key: &[u8]) -> bool {
|
||||
self.index.contains_key(identity_key)
|
||||
}
|
||||
|
||||
/// Get the revocation entry for an identity key, if revoked.
|
||||
pub fn get(&self, identity_key: &[u8]) -> Option<&RevocationEntry> {
|
||||
self.index
|
||||
.get(identity_key)
|
||||
.map(|&idx| &self.entries[idx])
|
||||
}
|
||||
|
||||
/// Return all revocation entries in append order.
|
||||
pub fn entries(&self) -> &[RevocationEntry] {
|
||||
&self.entries
|
||||
}
|
||||
|
||||
/// Number of revoked keys.
|
||||
pub fn len(&self) -> usize {
|
||||
self.entries.len()
|
||||
}
|
||||
|
||||
/// Return `true` if no keys have been revoked.
|
||||
pub fn is_empty(&self) -> bool {
|
||||
self.entries.is_empty()
|
||||
}
|
||||
|
||||
/// Serialise the revocation log to bytes (bincode).
|
||||
pub fn to_bytes(&self) -> Result<Vec<u8>, KtError> {
|
||||
bincode::serialize(self).map_err(|e| KtError::Serialisation(e.to_string()))
|
||||
}
|
||||
|
||||
/// Deserialise from bytes and rebuild the in-memory index.
|
||||
pub fn from_bytes(bytes: &[u8]) -> Result<Self, KtError> {
|
||||
let mut log: Self =
|
||||
bincode::deserialize(bytes).map_err(|e| KtError::Serialisation(e.to_string()))?;
|
||||
log.rebuild_index();
|
||||
Ok(log)
|
||||
}
|
||||
}
|
||||
|
||||
/// Compute the leaf hash for a revocation entry.
|
||||
///
|
||||
/// `SHA-256(0x02 || SHA-256(identity_key || 0x00 || reason_bytes))`
|
||||
pub fn revocation_leaf_hash(identity_key: &[u8], reason: &RevocationReason) -> [u8; 32] {
|
||||
let mut inner = Sha256::new();
|
||||
inner.update(identity_key);
|
||||
inner.update([0x00]);
|
||||
inner.update(reason.as_bytes());
|
||||
let inner_digest: [u8; 32] = inner.finalize().into();
|
||||
|
||||
let mut outer = Sha256::new();
|
||||
outer.update([REVOCATION_PREFIX]);
|
||||
outer.update(inner_digest);
|
||||
outer.finalize().into()
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
#[allow(clippy::unwrap_used)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn revoke_and_check() {
|
||||
let mut kt = MerkleLog::new();
|
||||
let mut revlog = RevocationLog::new();
|
||||
|
||||
// Append a normal binding first.
|
||||
kt.append("alice", &[1u8; 32]);
|
||||
|
||||
// Revoke alice's key.
|
||||
let leaf_idx = revlog
|
||||
.revoke(&mut kt, &[1u8; 32], RevocationReason::Compromised, 1000)
|
||||
.unwrap();
|
||||
assert_eq!(leaf_idx, 1); // second leaf in the log
|
||||
|
||||
assert!(revlog.is_revoked(&[1u8; 32]));
|
||||
assert!(!revlog.is_revoked(&[2u8; 32]));
|
||||
|
||||
let entry = revlog.get(&[1u8; 32]).unwrap();
|
||||
assert_eq!(entry.reason, RevocationReason::Compromised);
|
||||
assert_eq!(entry.timestamp_ms, 1000);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn double_revoke_fails() {
|
||||
let mut kt = MerkleLog::new();
|
||||
let mut revlog = RevocationLog::new();
|
||||
|
||||
revlog
|
||||
.revoke(&mut kt, &[1u8; 32], RevocationReason::Compromised, 1000)
|
||||
.unwrap();
|
||||
let result = revlog.revoke(&mut kt, &[1u8; 32], RevocationReason::Superseded, 2000);
|
||||
assert!(matches!(result, Err(KtError::AlreadyRevoked)));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn revocation_leaf_is_distinct_from_binding_leaf() {
|
||||
let binding_hash = crate::leaf_hash("alice", &[1u8; 32]);
|
||||
let revocation_hash =
|
||||
revocation_leaf_hash(&[1u8; 32], &RevocationReason::Compromised);
|
||||
assert_ne!(binding_hash, revocation_hash);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn serialization_roundtrip() {
|
||||
let mut kt = MerkleLog::new();
|
||||
let mut revlog = RevocationLog::new();
|
||||
|
||||
revlog
|
||||
.revoke(&mut kt, &[1u8; 32], RevocationReason::Compromised, 1000)
|
||||
.unwrap();
|
||||
revlog
|
||||
.revoke(&mut kt, &[2u8; 32], RevocationReason::Superseded, 2000)
|
||||
.unwrap();
|
||||
|
||||
let bytes = revlog.to_bytes().unwrap();
|
||||
let restored = RevocationLog::from_bytes(&bytes).unwrap();
|
||||
|
||||
assert_eq!(restored.len(), 2);
|
||||
assert!(restored.is_revoked(&[1u8; 32]));
|
||||
assert!(restored.is_revoked(&[2u8; 32]));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn reason_tag_roundtrip() {
|
||||
for reason in &[
|
||||
RevocationReason::Compromised,
|
||||
RevocationReason::Superseded,
|
||||
RevocationReason::UserRevoked,
|
||||
] {
|
||||
let tag = reason.as_tag();
|
||||
let parsed = RevocationReason::from_tag(tag).unwrap();
|
||||
assert_eq!(*reason, parsed);
|
||||
}
|
||||
}
|
||||
}
|
||||
290
crates/quicprochat-kt/src/tree.rs
Normal file
290
crates/quicprochat-kt/src/tree.rs
Normal file
@@ -0,0 +1,290 @@
|
||||
//! Append-only Merkle log backed by a flat `Vec` of all leaf hashes.
|
||||
//!
|
||||
//! The tree structure is virtual — roots and paths are computed on-demand from the
|
||||
//! leaf array. This keeps the storage footprint to `32 * n` bytes for `n` leaves.
|
||||
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use crate::{leaf_hash, node_hash, KtError};
|
||||
use crate::proof::{InclusionProof, PathStep};
|
||||
|
||||
/// An append-only Merkle log of `(username, identity_key)` leaf entries.
|
||||
///
|
||||
/// Internally stores only the 32-byte SHA-256 leaf hashes. Roots and inclusion
|
||||
/// proofs are recomputed from the flat list on demand.
|
||||
///
|
||||
/// Persistence: the caller serialises the whole struct with `bincode` and stores
|
||||
/// the bytes in the DB (`kt_log` table). The log is load-on-startup, append-on-write.
|
||||
#[derive(Serialize, Deserialize, Default, Clone)]
|
||||
pub struct MerkleLog {
|
||||
/// All leaf hashes in append order.
|
||||
leaves: Vec<[u8; 32]>,
|
||||
}
|
||||
|
||||
impl MerkleLog {
|
||||
/// Create an empty log.
|
||||
pub fn new() -> Self {
|
||||
Self::default()
|
||||
}
|
||||
|
||||
/// Number of leaves in the log.
|
||||
pub fn len(&self) -> u64 {
|
||||
self.leaves.len() as u64
|
||||
}
|
||||
|
||||
/// Return `true` if the log has no leaves.
|
||||
pub fn is_empty(&self) -> bool {
|
||||
self.leaves.is_empty()
|
||||
}
|
||||
|
||||
/// Append a `(username, identity_key)` binding and return the leaf's index.
|
||||
///
|
||||
/// The leaf hash is computed using the canonical formula:
|
||||
/// `SHA-256(0x00 || SHA-256(username || 0x00 || identity_key))`.
|
||||
pub fn append(&mut self, username: &str, identity_key: &[u8]) -> u64 {
|
||||
let h = leaf_hash(username, identity_key);
|
||||
let idx = self.leaves.len() as u64;
|
||||
self.leaves.push(h);
|
||||
idx
|
||||
}
|
||||
|
||||
/// Return the current Merkle root hash, or `None` if the log is empty.
|
||||
pub fn root(&self) -> Option<[u8; 32]> {
|
||||
if self.leaves.is_empty() {
|
||||
return None;
|
||||
}
|
||||
Some(merkle_root(&self.leaves))
|
||||
}
|
||||
|
||||
/// Generate an inclusion proof for the leaf at `index`.
|
||||
///
|
||||
/// Returns `Err` if `index >= self.len()`.
|
||||
pub fn inclusion_proof(&self, index: u64) -> Result<InclusionProof, KtError> {
|
||||
let n = self.len();
|
||||
if index >= n {
|
||||
return Err(KtError::IndexOutOfRange { index, tree_size: n });
|
||||
}
|
||||
|
||||
let raw_path = compute_path(&self.leaves, index as usize, self.leaves.len());
|
||||
let path: Vec<PathStep> = raw_path
|
||||
.into_iter()
|
||||
.map(|(hash, sibling_is_left)| PathStep { hash, sibling_is_left })
|
||||
.collect();
|
||||
let root = merkle_root(&self.leaves);
|
||||
|
||||
Ok(InclusionProof {
|
||||
leaf_index: index,
|
||||
tree_size: n,
|
||||
leaf_hash: self.leaves[index as usize],
|
||||
path,
|
||||
root,
|
||||
})
|
||||
}
|
||||
|
||||
/// Find the leaf index for a `(username, identity_key)` pair, if present.
|
||||
///
|
||||
/// O(n) scan — suitable for small logs. For large-scale deployments a
|
||||
/// username→index index would be maintained separately.
|
||||
pub fn find(&self, username: &str, identity_key: &[u8]) -> Option<u64> {
|
||||
let target = leaf_hash(username, identity_key);
|
||||
self.leaves
|
||||
.iter()
|
||||
.position(|h| h == &target)
|
||||
.map(|i| i as u64)
|
||||
}
|
||||
|
||||
/// Append a pre-computed leaf hash directly (used by revocation entries).
|
||||
///
|
||||
/// Returns the leaf index.
|
||||
pub fn append_raw(&mut self, hash: [u8; 32]) -> u64 {
|
||||
let idx = self.leaves.len() as u64;
|
||||
self.leaves.push(hash);
|
||||
idx
|
||||
}
|
||||
|
||||
/// Return log entries in the range `[start, end)` as `(index, leaf_hash)` pairs.
|
||||
///
|
||||
/// Used for KT audit — clients download the full log and verify inclusion proofs.
|
||||
/// Returns an empty vec if `start >= self.len()`.
|
||||
pub fn audit_log(&self, start: u64, end: u64) -> Vec<(u64, [u8; 32])> {
|
||||
let n = self.len();
|
||||
let start = start.min(n) as usize;
|
||||
let end = end.min(n) as usize;
|
||||
if start >= end {
|
||||
return Vec::new();
|
||||
}
|
||||
self.leaves[start..end]
|
||||
.iter()
|
||||
.enumerate()
|
||||
.map(|(i, &h)| ((start + i) as u64, h))
|
||||
.collect()
|
||||
}
|
||||
|
||||
/// Serialise the log to bytes (bincode).
|
||||
pub fn to_bytes(&self) -> Result<Vec<u8>, KtError> {
|
||||
bincode::serialize(self)
|
||||
.map_err(|e| KtError::Serialisation(e.to_string()))
|
||||
}
|
||||
|
||||
/// Deserialise a log from bytes (bincode).
|
||||
pub fn from_bytes(bytes: &[u8]) -> Result<Self, KtError> {
|
||||
bincode::deserialize(bytes)
|
||||
.map_err(|e| KtError::Serialisation(e.to_string()))
|
||||
}
|
||||
}
|
||||
|
||||
/// Compute the Merkle root over a non-empty slice of leaf hashes.
|
||||
///
|
||||
/// Uses RFC 9162 §2.1 balanced tree construction: when the number of leaves is
|
||||
/// odd, the rightmost leaf is promoted (not duplicated — that's vulnerable to
|
||||
/// second-preimage attacks). Specifically:
|
||||
///
|
||||
/// - `MTH({d[0]}) = H(0x00 || d[0])` (already computed as `leaf_hash`)
|
||||
/// - `MTH(D[n]) = H(0x01 || MTH(D[0..k]) || MTH(D[k..n]))` where `k` is the
|
||||
/// largest power of two strictly less than `n`.
|
||||
///
|
||||
/// This is a standard SHA-256 Merkle tree — the leaves are already hashed
|
||||
/// so the recursion just applies the internal-node formula.
|
||||
pub(crate) fn merkle_root(leaves: &[[u8; 32]]) -> [u8; 32] {
|
||||
match leaves.len() {
|
||||
0 => unreachable!("merkle_root called on empty slice"),
|
||||
1 => leaves[0],
|
||||
n => {
|
||||
let k = largest_power_of_two_less_than(n);
|
||||
let left = merkle_root(&leaves[..k]);
|
||||
let right = merkle_root(&leaves[k..]);
|
||||
node_hash(&left, &right)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Compute the path (list of `(sibling_hash, sibling_is_on_left)`) from
|
||||
/// `leaf_idx` to the root, in leaf-to-root order.
|
||||
///
|
||||
/// `sibling_is_on_left` is `true` when the sibling is the LEFT child of their
|
||||
/// common parent, i.e., the current node being proved is on the RIGHT.
|
||||
pub(crate) fn compute_path(
|
||||
leaves: &[[u8; 32]],
|
||||
leaf_idx: usize,
|
||||
n: usize,
|
||||
) -> Vec<([u8; 32], bool)> {
|
||||
let mut path = Vec::new();
|
||||
collect_path(&leaves[..n], leaf_idx, &mut path);
|
||||
path
|
||||
}
|
||||
|
||||
/// Recurse into the subtree `leaves` (already sub-sliced to the right window).
|
||||
fn collect_path(
|
||||
leaves: &[[u8; 32]],
|
||||
leaf_idx: usize,
|
||||
path: &mut Vec<([u8; 32], bool)>,
|
||||
) {
|
||||
let n = leaves.len();
|
||||
if n <= 1 {
|
||||
return;
|
||||
}
|
||||
let k = largest_power_of_two_less_than(n);
|
||||
if leaf_idx < k {
|
||||
// Leaf is in the left subtree; sibling is the right subtree.
|
||||
collect_path(&leaves[..k], leaf_idx, path);
|
||||
let right_root = merkle_root(&leaves[k..]);
|
||||
path.push((right_root, false)); // sibling is on the RIGHT
|
||||
} else {
|
||||
// Leaf is in the right subtree; sibling is the left subtree.
|
||||
collect_path(&leaves[k..], leaf_idx - k, path);
|
||||
let left_root = merkle_root(&leaves[..k]);
|
||||
path.push((left_root, true)); // sibling is on the LEFT
|
||||
}
|
||||
}
|
||||
|
||||
/// Largest power of two strictly less than `n`.
|
||||
/// Panics if `n < 2`.
|
||||
fn largest_power_of_two_less_than(n: usize) -> usize {
|
||||
assert!(n >= 2, "n must be >= 2");
|
||||
let mut k = 1usize;
|
||||
while k * 2 < n {
|
||||
k *= 2;
|
||||
}
|
||||
k
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
#[allow(clippy::unwrap_used)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn empty_log_has_no_root() {
|
||||
let log = MerkleLog::new();
|
||||
assert_eq!(log.root(), None);
|
||||
assert_eq!(log.len(), 0);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn single_leaf_root_equals_leaf_hash() {
|
||||
let mut log = MerkleLog::new();
|
||||
log.append("alice", b"A" as &[u8]);
|
||||
let lh = leaf_hash("alice", b"A");
|
||||
assert_eq!(log.root(), Some(lh));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn append_returns_correct_index() {
|
||||
let mut log = MerkleLog::new();
|
||||
assert_eq!(log.append("a", b"k1"), 0);
|
||||
assert_eq!(log.append("b", b"k2"), 1);
|
||||
assert_eq!(log.append("c", b"k3"), 2);
|
||||
assert_eq!(log.len(), 3);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn root_changes_on_append() {
|
||||
let mut log = MerkleLog::new();
|
||||
log.append("alice", b"K1");
|
||||
let root1 = log.root();
|
||||
log.append("bob", b"K2");
|
||||
let root2 = log.root();
|
||||
assert_ne!(root1, root2);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn find_returns_correct_index() {
|
||||
let mut log = MerkleLog::new();
|
||||
log.append("alice", b"K1");
|
||||
log.append("bob", b"K2");
|
||||
log.append("charlie", b"K3");
|
||||
assert_eq!(log.find("bob", b"K2"), Some(1));
|
||||
assert_eq!(log.find("missing", b""), None);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn inclusion_proof_out_of_range() {
|
||||
let mut log = MerkleLog::new();
|
||||
log.append("alice", b"K");
|
||||
assert!(matches!(
|
||||
log.inclusion_proof(1),
|
||||
Err(KtError::IndexOutOfRange { .. })
|
||||
));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn serialise_roundtrip() {
|
||||
let mut log = MerkleLog::new();
|
||||
log.append("alice", b"K1");
|
||||
log.append("bob", b"K2");
|
||||
let bytes = log.to_bytes().unwrap();
|
||||
let log2 = MerkleLog::from_bytes(&bytes).unwrap();
|
||||
assert_eq!(log2.root(), log.root());
|
||||
assert_eq!(log2.len(), log.len());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn largest_power_of_two_less_than_values() {
|
||||
assert_eq!(largest_power_of_two_less_than(2), 1);
|
||||
assert_eq!(largest_power_of_two_less_than(3), 2);
|
||||
assert_eq!(largest_power_of_two_less_than(4), 2);
|
||||
assert_eq!(largest_power_of_two_less_than(5), 4);
|
||||
assert_eq!(largest_power_of_two_less_than(8), 4);
|
||||
assert_eq!(largest_power_of_two_less_than(9), 8);
|
||||
}
|
||||
}
|
||||
41
crates/quicprochat-p2p/Cargo.toml
Normal file
41
crates/quicprochat-p2p/Cargo.toml
Normal file
@@ -0,0 +1,41 @@
|
||||
[package]
|
||||
name = "quicprochat-p2p"
|
||||
version = "0.1.0"
|
||||
edition.workspace = true
|
||||
description = "P2P transport layer for quicprochat using iroh."
|
||||
license = "Apache-2.0 OR MIT"
|
||||
repository.workspace = true
|
||||
|
||||
[features]
|
||||
traffic-resistance = []
|
||||
|
||||
[lints]
|
||||
workspace = true
|
||||
|
||||
[dependencies]
|
||||
iroh = "0.96"
|
||||
tokio = { version = "1", features = ["macros", "rt-multi-thread", "time", "sync", "net", "io-util"] }
|
||||
async-trait = "0.1"
|
||||
tracing = "0.1"
|
||||
anyhow = "1"
|
||||
|
||||
# Mesh identity & store-and-forward
|
||||
quicprochat-core = { path = "../quicprochat-core", default-features = false }
|
||||
serde = { workspace = true }
|
||||
serde_json = { workspace = true }
|
||||
ciborium = { workspace = true }
|
||||
sha2 = { workspace = true }
|
||||
hex = { workspace = true }
|
||||
|
||||
# Broadcast channels (ChaCha20-Poly1305 symmetric encryption)
|
||||
chacha20poly1305 = { workspace = true }
|
||||
rand = { workspace = true }
|
||||
zeroize = { workspace = true }
|
||||
|
||||
# Lightweight mesh link handshake (X25519 ECDH + HKDF)
|
||||
x25519-dalek = { workspace = true }
|
||||
hkdf = { workspace = true }
|
||||
thiserror = { workspace = true }
|
||||
|
||||
[dev-dependencies]
|
||||
tempfile = "3"
|
||||
96
crates/quicprochat-p2p/examples/mesh_lora_relay_demo.rs
Normal file
96
crates/quicprochat-p2p/examples/mesh_lora_relay_demo.rs
Normal file
@@ -0,0 +1,96 @@
|
||||
//! Simulated mesh leg: **A (LoRa)** → **B (LoRa + TCP relay)** → **C (TCP)** → zurück über B → **A**.
|
||||
//!
|
||||
//! Uses [`quicprochat_p2p::transport_lora::LoRaMockMedium`] — keine Hardware.
|
||||
//!
|
||||
//! ```text
|
||||
//! Node A Node B Node C
|
||||
//! LoRa addr 0x01 LoRa 0x02 + TCP listen TCP (WiFi / LAN)
|
||||
//! │ │ │
|
||||
//! └──── LoRa ───────┘ │
|
||||
//! └──────── TCP ──────────────┘
|
||||
//! ```
|
||||
//!
|
||||
//! Run: `cargo run -p quicprochat-p2p --example mesh_lora_relay_demo`
|
||||
|
||||
use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
|
||||
use quicprochat_p2p::transport::{MeshTransport, TransportAddr};
|
||||
use quicprochat_p2p::transport_lora::{DutyCycleTracker, LoRaConfig, LoRaMockMedium};
|
||||
use quicprochat_p2p::transport_tcp::TcpTransport;
|
||||
|
||||
const ADDR_A: [u8; 4] = [0x01, 0, 0, 0];
|
||||
const ADDR_B: [u8; 4] = [0x02, 0, 0, 0];
|
||||
|
||||
#[tokio::main]
|
||||
async fn main() -> anyhow::Result<()> {
|
||||
let medium = LoRaMockMedium::new();
|
||||
let duty = Arc::new(DutyCycleTracker::new(3_600_000));
|
||||
|
||||
let lora_a = medium
|
||||
.connect(ADDR_A, LoRaConfig::default(), Arc::clone(&duty))
|
||||
.await?;
|
||||
let lora_b = medium
|
||||
.connect(ADDR_B, LoRaConfig::default(), Arc::clone(&duty))
|
||||
.await?;
|
||||
|
||||
let tcp_b = TcpTransport::bind("127.0.0.1:0").await?;
|
||||
let tcp_c = TcpTransport::bind("127.0.0.1:0").await?;
|
||||
|
||||
let c_listen = tcp_c.local_addr();
|
||||
let b_listen = tcp_b.local_addr();
|
||||
let c_addr = TransportAddr::Socket(c_listen);
|
||||
let b_addr = TransportAddr::Socket(b_listen);
|
||||
|
||||
println!(
|
||||
"LoRa mock mesh demo: B relays LoRa <-> TCP (B TCP {}, C TCP {})",
|
||||
b_listen, c_listen
|
||||
);
|
||||
|
||||
let relay = tokio::spawn(async move {
|
||||
for _ in 0..2 {
|
||||
tokio::select! {
|
||||
p = lora_b.recv() => {
|
||||
let p = p.expect("B LoRa recv");
|
||||
println!("B: LoRa from {} -> TCP ({} bytes)", p.from, p.data.len());
|
||||
tcp_b.send(&c_addr, &p.data).await.expect("B TCP send to C");
|
||||
}
|
||||
p = tcp_b.recv() => {
|
||||
let p = p.expect("B TCP recv");
|
||||
println!("B: TCP -> LoRa A ({} bytes)", p.data.len());
|
||||
lora_b
|
||||
.send(&TransportAddr::LoRa(ADDR_A), &p.data)
|
||||
.await
|
||||
.expect("B LoRa send to A");
|
||||
}
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
let c_task = tokio::spawn(async move {
|
||||
let pkt = tcp_c.recv().await.expect("C TCP recv");
|
||||
println!("C: got {} bytes from B relay", pkt.data.len());
|
||||
assert_eq!(pkt.data, b"hello via mesh");
|
||||
tcp_c
|
||||
.send(&b_addr, b"ack from C")
|
||||
.await
|
||||
.expect("C TCP send");
|
||||
});
|
||||
|
||||
tokio::time::sleep(Duration::from_millis(50)).await;
|
||||
|
||||
lora_a
|
||||
.send(&TransportAddr::LoRa(ADDR_B), b"hello via mesh")
|
||||
.await?;
|
||||
|
||||
let reply = lora_a.recv().await?;
|
||||
println!("A: LoRa reply {} bytes", reply.data.len());
|
||||
assert_eq!(reply.data, b"ack from C");
|
||||
|
||||
c_task.await.expect("node C task panicked");
|
||||
relay.await.expect("relay task panicked");
|
||||
|
||||
lora_a.close().await.ok();
|
||||
println!("Done: LoRa + TCP relay path OK.");
|
||||
Ok(())
|
||||
}
|
||||
135
crates/quicprochat-p2p/src/address.rs
Normal file
135
crates/quicprochat-p2p/src/address.rs
Normal file
@@ -0,0 +1,135 @@
|
||||
//! Truncated mesh addresses for bandwidth-efficient routing.
|
||||
//!
|
||||
//! A [`MeshAddress`] is derived from an Ed25519 public key by taking the first
|
||||
//! 16 bytes of its SHA-256 hash. This provides globally unique addressing
|
||||
//! (birthday collision at ~2^64) while saving 16 bytes per packet compared to
|
||||
//! full 32-byte public keys.
|
||||
|
||||
use serde::{Deserialize, Serialize};
|
||||
use sha2::{Digest, Sha256};
|
||||
use std::fmt;
|
||||
|
||||
/// 16-byte truncated mesh address.
|
||||
#[derive(Clone, Copy, PartialEq, Eq, Hash, Serialize, Deserialize)]
|
||||
pub struct MeshAddress([u8; 16]);
|
||||
|
||||
impl MeshAddress {
|
||||
/// Derive from a 32-byte Ed25519 public key.
|
||||
pub fn from_public_key(key: &[u8; 32]) -> Self {
|
||||
let hash = Sha256::digest(key);
|
||||
let mut addr = [0u8; 16];
|
||||
addr.copy_from_slice(&hash[..16]);
|
||||
Self(addr)
|
||||
}
|
||||
|
||||
/// Create from raw 16-byte array.
|
||||
pub fn from_bytes(bytes: [u8; 16]) -> Self {
|
||||
Self(bytes)
|
||||
}
|
||||
|
||||
/// Get the raw 16-byte address.
|
||||
pub fn as_bytes(&self) -> &[u8; 16] {
|
||||
&self.0
|
||||
}
|
||||
|
||||
/// Check if a 32-byte public key matches this address.
|
||||
pub fn matches_key(&self, key: &[u8; 32]) -> bool {
|
||||
Self::from_public_key(key) == *self
|
||||
}
|
||||
|
||||
/// The broadcast address (all zeros).
|
||||
pub const BROADCAST: Self = Self([0u8; 16]);
|
||||
|
||||
/// Check if this is the broadcast address.
|
||||
pub fn is_broadcast(&self) -> bool {
|
||||
self.0 == [0u8; 16]
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Debug for MeshAddress {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
write!(f, "MeshAddress({})", hex::encode(self.0))
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Display for MeshAddress {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
write!(f, "{}", hex::encode(&self.0[..8]))
|
||||
}
|
||||
}
|
||||
|
||||
impl From<[u8; 16]> for MeshAddress {
|
||||
fn from(bytes: [u8; 16]) -> Self {
|
||||
Self(bytes)
|
||||
}
|
||||
}
|
||||
|
||||
impl AsRef<[u8; 16]> for MeshAddress {
|
||||
fn as_ref(&self) -> &[u8; 16] {
|
||||
&self.0
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn from_key_deterministic() {
|
||||
let key = [42u8; 32];
|
||||
let addr1 = MeshAddress::from_public_key(&key);
|
||||
let addr2 = MeshAddress::from_public_key(&key);
|
||||
assert_eq!(addr1, addr2, "same key must produce same address");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn different_keys_different_addresses() {
|
||||
let key_a = [1u8; 32];
|
||||
let key_b = [2u8; 32];
|
||||
let addr_a = MeshAddress::from_public_key(&key_a);
|
||||
let addr_b = MeshAddress::from_public_key(&key_b);
|
||||
assert_ne!(addr_a, addr_b, "different keys must produce different addresses");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn matches_key_works() {
|
||||
let key = [99u8; 32];
|
||||
let addr = MeshAddress::from_public_key(&key);
|
||||
assert!(addr.matches_key(&key), "correct key must match");
|
||||
|
||||
let wrong_key = [100u8; 32];
|
||||
assert!(!addr.matches_key(&wrong_key), "wrong key must not match");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn broadcast_address() {
|
||||
assert_eq!(*MeshAddress::BROADCAST.as_bytes(), [0u8; 16]);
|
||||
assert!(MeshAddress::BROADCAST.is_broadcast());
|
||||
|
||||
let non_broadcast = MeshAddress::from_bytes([1u8; 16]);
|
||||
assert!(!non_broadcast.is_broadcast());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn display_formatting() {
|
||||
let key = [0xAB; 32];
|
||||
let addr = MeshAddress::from_public_key(&key);
|
||||
let display = format!("{addr}");
|
||||
// Display shows first 8 bytes as hex = 16 hex chars.
|
||||
assert_eq!(display.len(), 16, "display should show 8 bytes = 16 hex chars");
|
||||
|
||||
let debug = format!("{addr:?}");
|
||||
// Debug shows all 16 bytes as hex = 32 hex chars, plus wrapper.
|
||||
assert!(debug.starts_with("MeshAddress("));
|
||||
assert!(debug.ends_with(')'));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn serde_roundtrip() {
|
||||
let key = [77u8; 32];
|
||||
let addr = MeshAddress::from_public_key(&key);
|
||||
let json = serde_json::to_string(&addr).expect("serialize");
|
||||
let restored: MeshAddress = serde_json::from_str(&json).expect("deserialize");
|
||||
assert_eq!(addr, restored);
|
||||
}
|
||||
}
|
||||
281
crates/quicprochat-p2p/src/announce.rs
Normal file
281
crates/quicprochat-p2p/src/announce.rs
Normal file
@@ -0,0 +1,281 @@
|
||||
//! Mesh announce protocol for self-organizing network discovery.
|
||||
//!
|
||||
//! Nodes periodically broadcast signed [`MeshAnnounce`] packets. These propagate
|
||||
//! through the mesh, building each node's [`RoutingTable`](crate::routing_table::RoutingTable).
|
||||
|
||||
use serde::{Deserialize, Serialize};
|
||||
use sha2::{Digest, Sha256};
|
||||
use std::time::{SystemTime, UNIX_EPOCH};
|
||||
|
||||
use crate::identity::MeshIdentity;
|
||||
|
||||
/// Capability flag: node can relay messages for others.
|
||||
pub const CAP_RELAY: u16 = 0x0001;
|
||||
/// Capability flag: node has store-and-forward.
|
||||
pub const CAP_STORE: u16 = 0x0002;
|
||||
/// Capability flag: node is connected to Internet/server.
|
||||
pub const CAP_GATEWAY: u16 = 0x0004;
|
||||
/// Capability flag: node is on a low-bandwidth transport only.
|
||||
pub const CAP_CONSTRAINED: u16 = 0x0008;
|
||||
|
||||
/// A signed mesh node announcement.
|
||||
#[derive(Clone, Debug, Serialize, Deserialize)]
|
||||
pub struct MeshAnnounce {
|
||||
/// Ed25519 public key of the announcing node (32 bytes).
|
||||
pub identity_key: Vec<u8>,
|
||||
/// Truncated address: SHA-256(identity_key)[0..16] — used for routing.
|
||||
pub address: [u8; 16],
|
||||
/// Capability bitfield.
|
||||
pub capabilities: u16,
|
||||
/// Monotonically increasing sequence number (per node).
|
||||
pub sequence: u64,
|
||||
/// Unix timestamp of creation.
|
||||
pub timestamp: u64,
|
||||
/// Transports this node is reachable on: Vec<(transport_name, serialized_addr)>.
|
||||
pub reachable_via: Vec<(String, Vec<u8>)>,
|
||||
/// Current hop count (incremented on re-broadcast).
|
||||
pub hop_count: u8,
|
||||
/// Maximum propagation hops.
|
||||
pub max_hops: u8,
|
||||
/// Ed25519 signature over all fields except signature and hop_count.
|
||||
pub signature: Vec<u8>,
|
||||
}
|
||||
|
||||
/// Compute the 16-byte mesh address from an Ed25519 public key.
|
||||
///
|
||||
/// The address is the first 16 bytes of SHA-256(identity_key).
|
||||
pub fn compute_address(identity_key: &[u8]) -> [u8; 16] {
|
||||
let hash = Sha256::digest(identity_key);
|
||||
let mut addr = [0u8; 16];
|
||||
addr.copy_from_slice(&hash[..16]);
|
||||
addr
|
||||
}
|
||||
|
||||
impl MeshAnnounce {
|
||||
/// Create and sign a new mesh announcement.
|
||||
pub fn new(
|
||||
identity: &MeshIdentity,
|
||||
capabilities: u16,
|
||||
reachable_via: Vec<(String, Vec<u8>)>,
|
||||
max_hops: u8,
|
||||
) -> Self {
|
||||
let identity_key = identity.public_key().to_vec();
|
||||
let address = compute_address(&identity_key);
|
||||
let timestamp = SystemTime::now()
|
||||
.duration_since(UNIX_EPOCH)
|
||||
.unwrap_or_default()
|
||||
.as_secs();
|
||||
|
||||
let mut announce = Self {
|
||||
identity_key,
|
||||
address,
|
||||
capabilities,
|
||||
sequence: 0,
|
||||
timestamp,
|
||||
reachable_via,
|
||||
hop_count: 0,
|
||||
max_hops,
|
||||
signature: Vec::new(),
|
||||
};
|
||||
|
||||
let signable = announce.signable_bytes();
|
||||
announce.signature = identity.sign(&signable).to_vec();
|
||||
announce
|
||||
}
|
||||
|
||||
/// Create and sign with a specific sequence number.
|
||||
pub fn with_sequence(
|
||||
identity: &MeshIdentity,
|
||||
capabilities: u16,
|
||||
reachable_via: Vec<(String, Vec<u8>)>,
|
||||
max_hops: u8,
|
||||
sequence: u64,
|
||||
) -> Self {
|
||||
let mut announce = Self::new(identity, capabilities, reachable_via, max_hops);
|
||||
announce.sequence = sequence;
|
||||
// Re-sign with the correct sequence number.
|
||||
let signable = announce.signable_bytes();
|
||||
announce.signature = identity.sign(&signable).to_vec();
|
||||
announce
|
||||
}
|
||||
|
||||
/// Assemble the byte string that is signed / verified.
|
||||
///
|
||||
/// `hop_count` and `signature` are excluded: forwarding nodes increment
|
||||
/// hop_count without re-signing (same design as [`MeshEnvelope`]).
|
||||
fn signable_bytes(&self) -> Vec<u8> {
|
||||
let mut buf = Vec::with_capacity(
|
||||
self.identity_key.len() + 16 + 2 + 8 + 8 + self.reachable_via.len() * 32 + 1,
|
||||
);
|
||||
buf.extend_from_slice(&self.identity_key);
|
||||
buf.extend_from_slice(&self.address);
|
||||
buf.extend_from_slice(&self.capabilities.to_le_bytes());
|
||||
buf.extend_from_slice(&self.sequence.to_le_bytes());
|
||||
buf.extend_from_slice(&self.timestamp.to_le_bytes());
|
||||
for (name, addr) in &self.reachable_via {
|
||||
buf.extend_from_slice(name.as_bytes());
|
||||
buf.extend_from_slice(addr);
|
||||
}
|
||||
buf.push(self.max_hops);
|
||||
buf
|
||||
}
|
||||
|
||||
/// Verify the Ed25519 signature on this announcement.
|
||||
pub fn verify(&self) -> bool {
|
||||
let identity_key: [u8; 32] = match self.identity_key.as_slice().try_into() {
|
||||
Ok(k) => k,
|
||||
Err(_) => return false,
|
||||
};
|
||||
let sig: [u8; 64] = match self.signature.as_slice().try_into() {
|
||||
Ok(s) => s,
|
||||
Err(_) => return false,
|
||||
};
|
||||
let signable = self.signable_bytes();
|
||||
quicprochat_core::IdentityKeypair::verify_raw(&identity_key, &signable, &sig).is_ok()
|
||||
}
|
||||
|
||||
/// Check whether this announce has expired relative to a maximum age.
|
||||
pub fn is_expired(&self, max_age_secs: u64) -> bool {
|
||||
let now = SystemTime::now()
|
||||
.duration_since(UNIX_EPOCH)
|
||||
.unwrap_or_default()
|
||||
.as_secs();
|
||||
now.saturating_sub(self.timestamp) > max_age_secs
|
||||
}
|
||||
|
||||
/// Create a forwarded copy with `hop_count` incremented by one.
|
||||
///
|
||||
/// The signature remains the original — forwarding nodes do not re-sign.
|
||||
pub fn forwarded(&self) -> Self {
|
||||
let mut copy = self.clone();
|
||||
copy.hop_count = copy.hop_count.saturating_add(1);
|
||||
copy
|
||||
}
|
||||
|
||||
/// Whether this announce can still propagate (under hop limit and not expired).
|
||||
///
|
||||
/// Uses a generous default max age of 1800 seconds (30 minutes) for the
|
||||
/// expiry check. Callers that need a different max age should check
|
||||
/// [`is_expired`](Self::is_expired) separately.
|
||||
pub fn can_propagate(&self) -> bool {
|
||||
self.hop_count < self.max_hops && !self.is_expired(1800)
|
||||
}
|
||||
|
||||
/// Serialize to compact CBOR binary format (for wire transmission).
|
||||
pub fn to_wire(&self) -> Vec<u8> {
|
||||
let mut buf = Vec::new();
|
||||
ciborium::into_writer(self, &mut buf).expect("CBOR serialization should not fail");
|
||||
buf
|
||||
}
|
||||
|
||||
/// Deserialize from CBOR binary format.
|
||||
pub fn from_wire(bytes: &[u8]) -> anyhow::Result<Self> {
|
||||
let announce: Self = ciborium::from_reader(bytes)?;
|
||||
Ok(announce)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
fn test_identity() -> MeshIdentity {
|
||||
MeshIdentity::generate()
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn create_and_verify() {
|
||||
let id = test_identity();
|
||||
let announce = MeshAnnounce::new(
|
||||
&id,
|
||||
CAP_RELAY | CAP_STORE,
|
||||
vec![("tcp".into(), b"127.0.0.1:9000".to_vec())],
|
||||
8,
|
||||
);
|
||||
|
||||
assert!(announce.verify(), "freshly created announce must verify");
|
||||
assert_eq!(announce.hop_count, 0);
|
||||
assert_eq!(announce.identity_key, id.public_key().to_vec());
|
||||
assert_eq!(announce.capabilities, CAP_RELAY | CAP_STORE);
|
||||
assert_eq!(announce.max_hops, 8);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn tampered_fails_verify() {
|
||||
let id = test_identity();
|
||||
let mut announce = MeshAnnounce::new(&id, CAP_RELAY, vec![], 4);
|
||||
announce.capabilities = CAP_GATEWAY; // tamper
|
||||
assert!(
|
||||
!announce.verify(),
|
||||
"tampered announce must fail verification"
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn forwarded_still_verifies() {
|
||||
let id = test_identity();
|
||||
let announce = MeshAnnounce::new(&id, CAP_RELAY, vec![], 8);
|
||||
assert!(announce.verify());
|
||||
|
||||
let fwd = announce.forwarded();
|
||||
assert_eq!(fwd.hop_count, 1);
|
||||
assert!(
|
||||
fwd.verify(),
|
||||
"forwarded announce must still verify (hop_count excluded from signature)"
|
||||
);
|
||||
|
||||
let fwd2 = fwd.forwarded();
|
||||
assert_eq!(fwd2.hop_count, 2);
|
||||
assert!(fwd2.verify(), "double-forwarded must still verify");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn expired_announce() {
|
||||
let id = test_identity();
|
||||
let mut announce = MeshAnnounce::new(&id, 0, vec![], 4);
|
||||
// Set timestamp far in the past.
|
||||
announce.timestamp = 0;
|
||||
assert!(announce.is_expired(60), "announce from epoch should be expired with 60s max age");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn address_from_key_deterministic() {
|
||||
let key = [42u8; 32];
|
||||
let addr1 = compute_address(&key);
|
||||
let addr2 = compute_address(&key);
|
||||
assert_eq!(addr1, addr2, "same key must produce same address");
|
||||
|
||||
// Different key produces different address.
|
||||
let other_key = [99u8; 32];
|
||||
let other_addr = compute_address(&other_key);
|
||||
assert_ne!(addr1, other_addr);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn cbor_roundtrip() {
|
||||
let id = test_identity();
|
||||
let announce = MeshAnnounce::new(
|
||||
&id,
|
||||
CAP_RELAY | CAP_GATEWAY,
|
||||
vec![
|
||||
("tcp".into(), b"127.0.0.1:9000".to_vec()),
|
||||
("lora".into(), vec![0x01, 0x02, 0x03, 0x04]),
|
||||
],
|
||||
6,
|
||||
);
|
||||
|
||||
let wire = announce.to_wire();
|
||||
let restored = MeshAnnounce::from_wire(&wire).expect("CBOR deserialize");
|
||||
|
||||
assert_eq!(announce.identity_key, restored.identity_key);
|
||||
assert_eq!(announce.address, restored.address);
|
||||
assert_eq!(announce.capabilities, restored.capabilities);
|
||||
assert_eq!(announce.sequence, restored.sequence);
|
||||
assert_eq!(announce.timestamp, restored.timestamp);
|
||||
assert_eq!(announce.reachable_via, restored.reachable_via);
|
||||
assert_eq!(announce.hop_count, restored.hop_count);
|
||||
assert_eq!(announce.max_hops, restored.max_hops);
|
||||
assert_eq!(announce.signature, restored.signature);
|
||||
assert!(restored.verify());
|
||||
}
|
||||
}
|
||||
302
crates/quicprochat-p2p/src/announce_protocol.rs
Normal file
302
crates/quicprochat-p2p/src/announce_protocol.rs
Normal file
@@ -0,0 +1,302 @@
|
||||
//! Announce protocol engine — sends, receives, and propagates mesh announcements.
|
||||
//!
|
||||
//! This module ties together [`MeshAnnounce`], [`RoutingTable`], and
|
||||
//! deduplication logic to form a complete announce processing pipeline.
|
||||
|
||||
use std::collections::HashSet;
|
||||
use std::time::Duration;
|
||||
|
||||
use crate::announce::MeshAnnounce;
|
||||
use crate::identity::MeshIdentity;
|
||||
use crate::routing_table::RoutingTable;
|
||||
use crate::transport::TransportAddr;
|
||||
|
||||
/// Configuration for the announce protocol.
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct AnnounceConfig {
|
||||
/// Interval between periodic re-announcements.
|
||||
pub announce_interval: Duration,
|
||||
/// Maximum age before an announce is considered expired.
|
||||
pub max_announce_age: Duration,
|
||||
/// Maximum hops for announce propagation.
|
||||
pub max_hops: u8,
|
||||
/// This node's capabilities.
|
||||
pub capabilities: u16,
|
||||
/// Interval for routing table garbage collection.
|
||||
pub gc_interval: Duration,
|
||||
}
|
||||
|
||||
impl Default for AnnounceConfig {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
announce_interval: Duration::from_secs(600), // 10 minutes
|
||||
max_announce_age: Duration::from_secs(1800), // 30 minutes
|
||||
max_hops: 8,
|
||||
capabilities: 0,
|
||||
gc_interval: Duration::from_secs(60),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Tracks which announces we've already seen (to prevent re-broadcast loops).
|
||||
pub struct AnnounceDedup {
|
||||
/// Set of (address, sequence) pairs we've seen.
|
||||
seen: HashSet<([u8; 16], u64)>,
|
||||
/// Maximum entries before pruning.
|
||||
max_entries: usize,
|
||||
}
|
||||
|
||||
impl AnnounceDedup {
|
||||
/// Create a new dedup tracker with the given capacity.
|
||||
pub fn new(max_entries: usize) -> Self {
|
||||
Self {
|
||||
seen: HashSet::new(),
|
||||
max_entries,
|
||||
}
|
||||
}
|
||||
|
||||
/// Check if this announce is new (not seen before).
|
||||
///
|
||||
/// Returns `true` if the (address, sequence) pair has not been seen before,
|
||||
/// and adds it to the set. Returns `false` if it was already seen.
|
||||
pub fn is_new(&mut self, address: &[u8; 16], sequence: u64) -> bool {
|
||||
if self.seen.len() >= self.max_entries {
|
||||
self.prune();
|
||||
}
|
||||
self.seen.insert((*address, sequence))
|
||||
}
|
||||
|
||||
/// Remove all entries when the set exceeds capacity.
|
||||
///
|
||||
/// Uses a simple clear-all strategy; a more sophisticated implementation
|
||||
/// could track insertion order and evict oldest entries.
|
||||
pub fn prune(&mut self) {
|
||||
self.seen.clear();
|
||||
}
|
||||
}
|
||||
|
||||
/// Create this node's own mesh announcement.
|
||||
pub fn create_announce(
|
||||
identity: &MeshIdentity,
|
||||
config: &AnnounceConfig,
|
||||
sequence: u64,
|
||||
reachable_via: Vec<(String, Vec<u8>)>,
|
||||
) -> MeshAnnounce {
|
||||
MeshAnnounce::with_sequence(
|
||||
identity,
|
||||
config.capabilities,
|
||||
reachable_via,
|
||||
config.max_hops,
|
||||
sequence,
|
||||
)
|
||||
}
|
||||
|
||||
/// Process a received mesh announcement.
|
||||
///
|
||||
/// Steps:
|
||||
/// 1. Verify signature — return `None` if invalid.
|
||||
/// 2. Check if expired — return `None` if stale.
|
||||
/// 3. Check dedup — return `None` if already seen.
|
||||
/// 4. Update routing table.
|
||||
/// 5. If `can_propagate` — return `Some(forwarded)` for re-broadcast.
|
||||
/// 6. Otherwise return `None`.
|
||||
pub fn process_received_announce(
|
||||
announce: &MeshAnnounce,
|
||||
routing_table: &mut RoutingTable,
|
||||
dedup: &mut AnnounceDedup,
|
||||
received_via: &str,
|
||||
received_from: TransportAddr,
|
||||
max_age: Duration,
|
||||
) -> Option<MeshAnnounce> {
|
||||
// 1. Verify signature.
|
||||
if !announce.verify() {
|
||||
return None;
|
||||
}
|
||||
|
||||
// 2. Check expiry.
|
||||
if announce.is_expired(max_age.as_secs()) {
|
||||
return None;
|
||||
}
|
||||
|
||||
// 3. Dedup check.
|
||||
if !dedup.is_new(&announce.address, announce.sequence) {
|
||||
return None;
|
||||
}
|
||||
|
||||
// 4. Update routing table.
|
||||
routing_table.update(announce, received_via, received_from);
|
||||
|
||||
// 5. Check if the announce can propagate further.
|
||||
if announce.hop_count < announce.max_hops && !announce.is_expired(max_age.as_secs()) {
|
||||
Some(announce.forwarded())
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use crate::announce::CAP_RELAY;
|
||||
use crate::identity::MeshIdentity;
|
||||
|
||||
fn test_identity() -> MeshIdentity {
|
||||
MeshIdentity::generate()
|
||||
}
|
||||
|
||||
fn default_config() -> AnnounceConfig {
|
||||
AnnounceConfig {
|
||||
capabilities: CAP_RELAY,
|
||||
..AnnounceConfig::default()
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn create_announce_is_valid() {
|
||||
let id = test_identity();
|
||||
let config = default_config();
|
||||
let announce = create_announce(
|
||||
&id,
|
||||
&config,
|
||||
1,
|
||||
vec![("tcp".into(), b"127.0.0.1:9000".to_vec())],
|
||||
);
|
||||
|
||||
assert!(announce.verify());
|
||||
assert_eq!(announce.sequence, 1);
|
||||
assert_eq!(announce.capabilities, CAP_RELAY);
|
||||
assert_eq!(announce.max_hops, 8);
|
||||
assert_eq!(announce.hop_count, 0);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn process_valid_announce_updates_table() {
|
||||
let id = test_identity();
|
||||
let config = default_config();
|
||||
let announce = create_announce(&id, &config, 1, vec![]);
|
||||
|
||||
let mut table = RoutingTable::new(Duration::from_secs(300));
|
||||
let mut dedup = AnnounceDedup::new(1000);
|
||||
let addr = TransportAddr::Socket("127.0.0.1:9000".parse().unwrap());
|
||||
|
||||
let result = process_received_announce(
|
||||
&announce,
|
||||
&mut table,
|
||||
&mut dedup,
|
||||
"tcp",
|
||||
addr,
|
||||
Duration::from_secs(1800),
|
||||
);
|
||||
|
||||
// Should propagate (hop_count 0 < max_hops 8).
|
||||
assert!(result.is_some());
|
||||
// Routing table should have the entry.
|
||||
assert_eq!(table.len(), 1);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn process_duplicate_ignored() {
|
||||
let id = test_identity();
|
||||
let config = default_config();
|
||||
let announce = create_announce(&id, &config, 1, vec![]);
|
||||
|
||||
let mut table = RoutingTable::new(Duration::from_secs(300));
|
||||
let mut dedup = AnnounceDedup::new(1000);
|
||||
let addr = TransportAddr::Socket("127.0.0.1:9000".parse().unwrap());
|
||||
|
||||
// First time — accepted.
|
||||
let result1 = process_received_announce(
|
||||
&announce,
|
||||
&mut table,
|
||||
&mut dedup,
|
||||
"tcp",
|
||||
addr.clone(),
|
||||
Duration::from_secs(1800),
|
||||
);
|
||||
assert!(result1.is_some());
|
||||
|
||||
// Second time — duplicate, ignored.
|
||||
let result2 = process_received_announce(
|
||||
&announce,
|
||||
&mut table,
|
||||
&mut dedup,
|
||||
"tcp",
|
||||
addr,
|
||||
Duration::from_secs(1800),
|
||||
);
|
||||
assert!(result2.is_none());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn process_expired_ignored() {
|
||||
let id = test_identity();
|
||||
let config = default_config();
|
||||
let mut announce = create_announce(&id, &config, 1, vec![]);
|
||||
// Set timestamp far in the past.
|
||||
announce.timestamp = 0;
|
||||
|
||||
let mut table = RoutingTable::new(Duration::from_secs(300));
|
||||
let mut dedup = AnnounceDedup::new(1000);
|
||||
let addr = TransportAddr::Socket("127.0.0.1:9000".parse().unwrap());
|
||||
|
||||
let result = process_received_announce(
|
||||
&announce,
|
||||
&mut table,
|
||||
&mut dedup,
|
||||
"tcp",
|
||||
addr,
|
||||
Duration::from_secs(60),
|
||||
);
|
||||
assert!(result.is_none(), "expired announce must be ignored");
|
||||
assert!(table.is_empty());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn process_invalid_sig_ignored() {
|
||||
let id = test_identity();
|
||||
let config = default_config();
|
||||
let mut announce = create_announce(&id, &config, 1, vec![]);
|
||||
// Tamper with capabilities to invalidate signature.
|
||||
announce.capabilities = 0xFFFF;
|
||||
|
||||
let mut table = RoutingTable::new(Duration::from_secs(300));
|
||||
let mut dedup = AnnounceDedup::new(1000);
|
||||
let addr = TransportAddr::Socket("127.0.0.1:9000".parse().unwrap());
|
||||
|
||||
let result = process_received_announce(
|
||||
&announce,
|
||||
&mut table,
|
||||
&mut dedup,
|
||||
"tcp",
|
||||
addr,
|
||||
Duration::from_secs(1800),
|
||||
);
|
||||
assert!(result.is_none(), "tampered announce must be ignored");
|
||||
assert!(table.is_empty());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn process_returns_forwarded_for_propagation() {
|
||||
let id = test_identity();
|
||||
let config = default_config();
|
||||
let announce = create_announce(&id, &config, 1, vec![]);
|
||||
assert_eq!(announce.hop_count, 0);
|
||||
|
||||
let mut table = RoutingTable::new(Duration::from_secs(300));
|
||||
let mut dedup = AnnounceDedup::new(1000);
|
||||
let addr = TransportAddr::Socket("127.0.0.1:9000".parse().unwrap());
|
||||
|
||||
let result = process_received_announce(
|
||||
&announce,
|
||||
&mut table,
|
||||
&mut dedup,
|
||||
"tcp",
|
||||
addr,
|
||||
Duration::from_secs(1800),
|
||||
);
|
||||
|
||||
let forwarded = result.expect("should return forwarded announce");
|
||||
assert_eq!(forwarded.hop_count, 1);
|
||||
assert!(forwarded.verify(), "forwarded announce must still verify");
|
||||
}
|
||||
}
|
||||
232
crates/quicprochat-p2p/src/broadcast.rs
Normal file
232
crates/quicprochat-p2p/src/broadcast.rs
Normal file
@@ -0,0 +1,232 @@
|
||||
//! Lightweight pub/sub broadcast channels for mesh announcements.
|
||||
//!
|
||||
//! Each [`BroadcastChannel`] holds a ChaCha20-Poly1305 symmetric key used to
|
||||
//! encrypt and decrypt messages on that topic. Peers that know the key can
|
||||
//! subscribe; the key itself is exchanged out-of-band.
|
||||
//!
|
||||
//! [`BroadcastManager`] collects channels by topic and provides convenience
|
||||
//! methods for encrypt/decrypt without exposing raw keys.
|
||||
|
||||
use std::collections::HashMap;
|
||||
|
||||
use chacha20poly1305::aead::{Aead, AeadCore, KeyInit};
|
||||
use chacha20poly1305::ChaCha20Poly1305;
|
||||
use rand::rngs::OsRng;
|
||||
use zeroize::{Zeroize, ZeroizeOnDrop};
|
||||
|
||||
/// A single broadcast channel identified by topic, secured with a symmetric key.
|
||||
pub struct BroadcastChannel {
|
||||
topic: String,
|
||||
key: [u8; 32],
|
||||
}
|
||||
|
||||
impl Drop for BroadcastChannel {
|
||||
fn drop(&mut self) {
|
||||
self.key.zeroize();
|
||||
}
|
||||
}
|
||||
|
||||
impl ZeroizeOnDrop for BroadcastChannel {}
|
||||
|
||||
impl BroadcastChannel {
|
||||
/// Create a new channel with a random ChaCha20-Poly1305 key.
|
||||
pub fn new(topic: &str) -> Self {
|
||||
let mut key = [0u8; 32];
|
||||
rand::RngCore::fill_bytes(&mut OsRng, &mut key);
|
||||
Self {
|
||||
topic: topic.to_string(),
|
||||
key,
|
||||
}
|
||||
}
|
||||
|
||||
/// Create a channel with a pre-shared key (e.g. received from another peer).
|
||||
pub fn with_key(topic: &str, key: [u8; 32]) -> Self {
|
||||
Self {
|
||||
topic: topic.to_string(),
|
||||
key,
|
||||
}
|
||||
}
|
||||
|
||||
/// Encrypt `plaintext`, returning `nonce || ciphertext`.
|
||||
pub fn encrypt(&self, plaintext: &[u8]) -> anyhow::Result<Vec<u8>> {
|
||||
let cipher = ChaCha20Poly1305::new((&self.key).into());
|
||||
let nonce = ChaCha20Poly1305::generate_nonce(&mut OsRng);
|
||||
let ciphertext = cipher
|
||||
.encrypt(&nonce, plaintext)
|
||||
.map_err(|_| anyhow::anyhow!("ChaCha20Poly1305 encryption failed"))?;
|
||||
let mut out = Vec::with_capacity(nonce.len() + ciphertext.len());
|
||||
out.extend_from_slice(&nonce);
|
||||
out.extend_from_slice(&ciphertext);
|
||||
Ok(out)
|
||||
}
|
||||
|
||||
/// Decrypt data produced by [`encrypt`](Self::encrypt).
|
||||
///
|
||||
/// Expects `nonce (12 bytes) || ciphertext`.
|
||||
pub fn decrypt(&self, data: &[u8]) -> anyhow::Result<Vec<u8>> {
|
||||
if data.len() < 12 {
|
||||
anyhow::bail!("broadcast ciphertext too short (need at least 12-byte nonce)");
|
||||
}
|
||||
let (nonce_bytes, ciphertext) = data.split_at(12);
|
||||
let nonce = chacha20poly1305::Nonce::from_slice(nonce_bytes);
|
||||
let cipher = ChaCha20Poly1305::new((&self.key).into());
|
||||
cipher
|
||||
.decrypt(nonce, ciphertext)
|
||||
.map_err(|_| anyhow::anyhow!("broadcast decryption failed (wrong key or corrupted)"))
|
||||
}
|
||||
|
||||
/// The topic name for this channel.
|
||||
pub fn topic(&self) -> &str {
|
||||
&self.topic
|
||||
}
|
||||
|
||||
/// The raw 32-byte symmetric key (for sharing with peers out-of-band).
|
||||
pub fn key(&self) -> &[u8; 32] {
|
||||
&self.key
|
||||
}
|
||||
}
|
||||
|
||||
/// Manages a set of broadcast channels keyed by topic.
|
||||
pub struct BroadcastManager {
|
||||
channels: HashMap<String, BroadcastChannel>,
|
||||
}
|
||||
|
||||
impl BroadcastManager {
|
||||
/// Create an empty manager.
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
channels: HashMap::new(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Subscribe to a topic with a pre-shared key.
|
||||
pub fn subscribe(&mut self, topic: &str, key: [u8; 32]) {
|
||||
self.channels
|
||||
.insert(topic.to_string(), BroadcastChannel::with_key(topic, key));
|
||||
}
|
||||
|
||||
/// Unsubscribe from a topic.
|
||||
pub fn unsubscribe(&mut self, topic: &str) {
|
||||
self.channels.remove(topic);
|
||||
}
|
||||
|
||||
/// Create a new broadcast channel with a random key and return a reference.
|
||||
pub fn create_channel(&mut self, topic: &str) -> &BroadcastChannel {
|
||||
self.channels
|
||||
.insert(topic.to_string(), BroadcastChannel::new(topic));
|
||||
self.channels
|
||||
.get(topic)
|
||||
.expect("just inserted")
|
||||
}
|
||||
|
||||
/// Look up a channel by topic.
|
||||
pub fn get(&self, topic: &str) -> Option<&BroadcastChannel> {
|
||||
self.channels.get(topic)
|
||||
}
|
||||
|
||||
/// List all subscribed topics.
|
||||
pub fn topics(&self) -> Vec<String> {
|
||||
self.channels.keys().cloned().collect()
|
||||
}
|
||||
|
||||
/// Encrypt a message on the given topic. Returns `None` if not subscribed.
|
||||
pub fn encrypt(&self, topic: &str, plaintext: &[u8]) -> Option<anyhow::Result<Vec<u8>>> {
|
||||
self.channels.get(topic).map(|ch| ch.encrypt(plaintext))
|
||||
}
|
||||
|
||||
/// Decrypt a message on the given topic. Returns `None` if not subscribed.
|
||||
pub fn decrypt(&self, topic: &str, data: &[u8]) -> Option<Vec<u8>> {
|
||||
self.channels
|
||||
.get(topic)
|
||||
.and_then(|ch| ch.decrypt(data).ok())
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for BroadcastManager {
|
||||
fn default() -> Self {
|
||||
Self::new()
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn encrypt_decrypt_roundtrip() {
|
||||
let ch = BroadcastChannel::new("test-topic");
|
||||
let plaintext = b"hello broadcast";
|
||||
let encrypted = ch.encrypt(plaintext).expect("encrypt");
|
||||
let decrypted = ch.decrypt(&encrypted).expect("decrypt");
|
||||
assert_eq!(decrypted, plaintext);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn wrong_key_fails_decrypt() {
|
||||
let ch1 = BroadcastChannel::new("topic");
|
||||
let ch2 = BroadcastChannel::new("topic"); // different random key
|
||||
let encrypted = ch1.encrypt(b"secret").expect("encrypt");
|
||||
let result = ch2.decrypt(&encrypted);
|
||||
assert!(result.is_err(), "wrong key should fail decryption");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn with_key_roundtrip() {
|
||||
let key = [42u8; 32];
|
||||
let ch = BroadcastChannel::with_key("shared", key);
|
||||
let ct = ch.encrypt(b"data").expect("encrypt");
|
||||
let ch2 = BroadcastChannel::with_key("shared", key);
|
||||
let pt = ch2.decrypt(&ct).expect("same key should decrypt");
|
||||
assert_eq!(pt, b"data");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn manager_subscribe_unsubscribe() {
|
||||
let mut mgr = BroadcastManager::new();
|
||||
assert!(mgr.topics().is_empty());
|
||||
|
||||
let key = [1u8; 32];
|
||||
mgr.subscribe("alerts", key);
|
||||
assert_eq!(mgr.topics().len(), 1);
|
||||
assert!(mgr.get("alerts").is_some());
|
||||
|
||||
mgr.unsubscribe("alerts");
|
||||
assert!(mgr.topics().is_empty());
|
||||
assert!(mgr.get("alerts").is_none());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn manager_create_channel() {
|
||||
let mut mgr = BroadcastManager::new();
|
||||
let ch = mgr.create_channel("news");
|
||||
let key = *ch.key();
|
||||
assert_eq!(ch.topic(), "news");
|
||||
|
||||
// Encrypt via manager, decrypt manually with the same key.
|
||||
let ct = mgr.encrypt("news", b"headline").expect("subscribed").expect("encrypt");
|
||||
let ch2 = BroadcastChannel::with_key("news", key);
|
||||
let pt = ch2.decrypt(&ct).expect("decrypt");
|
||||
assert_eq!(pt, b"headline");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn manager_encrypt_decrypt() {
|
||||
let mut mgr = BroadcastManager::new();
|
||||
mgr.subscribe("ch1", [7u8; 32]);
|
||||
|
||||
let ct = mgr.encrypt("ch1", b"round-trip").expect("subscribed").expect("encrypt");
|
||||
let pt = mgr.decrypt("ch1", &ct).expect("decrypt");
|
||||
assert_eq!(pt, b"round-trip");
|
||||
|
||||
// Unknown topic returns None.
|
||||
assert!(mgr.encrypt("unknown", b"x").is_none());
|
||||
assert!(mgr.decrypt("unknown", b"x").is_none());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn short_ciphertext_rejected() {
|
||||
let ch = BroadcastChannel::new("t");
|
||||
let result = ch.decrypt(&[0u8; 5]); // less than 12-byte nonce
|
||||
assert!(result.is_err());
|
||||
}
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user