Compare commits
143 Commits
cursor/clo
...
main
| Author | SHA1 | Date | |
|---|---|---|---|
| d2ad0dd21a | |||
| 9e647f37d5 | |||
| da0085f1a6 | |||
| 95ce8898fd | |||
| 99d36679c8 | |||
| a856f9bb53 | |||
| f58ce2529d | |||
| 4dadd01c6b | |||
| fb6b80c81c | |||
| 8eba12170e | |||
| a3023ecac1 | |||
| 150f30b0d6 | |||
| a60767a7eb | |||
| 6ae3251ebd | |||
| ad636b874b | |||
| afaaf2c417 | |||
| 50a63a6b96 | |||
| a258f98a40 | |||
| 024b6c91d1 | |||
| ac36534063 | |||
| 7be7287ba2 | |||
| 3c6eebdb00 | |||
| eee1e9f278 | |||
| 5d1688d89f | |||
| 56331632fd | |||
| 12846bd2a0 | |||
| dd2041df20 | |||
| 65ce5aec18 | |||
| 0b3d5c5100 | |||
| cbfa7e16c4 | |||
| e2c04cf0c3 | |||
| bcde8b733c | |||
| 237f4360e4 | |||
| a055706236 | |||
| 9cbf824db6 | |||
| 3f81837112 | |||
| db49d83fda | |||
| 9b09f09892 | |||
| 92fefda41d | |||
| 84ec822823 | |||
| 01bc2a4273 | |||
| f9ac921a0c | |||
| d469999c2a | |||
| f0901f6597 | |||
| 543bd442a3 | |||
| 266bcfed59 | |||
| c256c38ffb | |||
| 416618f4cf | |||
| 872695e5f1 | |||
| e4c5868b31 | |||
| 66eca065e0 | |||
| a05da9b751 | |||
| 077f48f19c | |||
| 3708b8df41 | |||
| b98dcc27ae | |||
| 2e081ead8e | |||
| a710037dde | |||
| d8c1392587 | |||
| a9d1f535aa | |||
| aa29d3bc34 | |||
| 2a9f0b43ef | |||
| d073f614b3 | |||
| f7a7f672b4 | |||
| 189534c511 | |||
| 501f5a577c | |||
| 5cc37cc88b | |||
| 1d59a052ad | |||
| 12717979ba | |||
| 3f5a3a5ac8 | |||
| 511fc7822e | |||
| f57dda3f36 | |||
| cbb76af6b1 | |||
| 2d56824834 | |||
| 496f83067a | |||
| 1768f85258 | |||
| f667281831 | |||
| 372dd67a3b | |||
| 49e8e066d7 | |||
| f4621b3425 | |||
| c401caec60 | |||
| 885cce0d7d | |||
| 913f6faaf3 | |||
| e93a38243f | |||
| 91c5495ab7 | |||
| b94248b3b6 | |||
| a90020fe89 | |||
| fd1accc6dd | |||
| 799aab68fe | |||
| eaca24397b | |||
| 12b19b6931 | |||
| 5b6d8209f0 | |||
| a1f0dbc514 | |||
| 5a66c2e954 | |||
| 4013b223ff | |||
| 3a42130518 | |||
| c8c5f96ecd | |||
| e5329ee8e5 | |||
| e3dfc43e2c | |||
| 7bcfbf175c | |||
| 75f11cb76b | |||
| a3f67aca45 | |||
| 950f477842 | |||
| 3393514911 | |||
| a8ed3c4356 | |||
| cab03bd3f7 | |||
| 99f9abe9ed | |||
| 029c701780 | |||
| 4d62a837a5 | |||
| 67983c7a40 | |||
| 011ff541bb | |||
| 918da0c23d | |||
| 6b757f8d65 | |||
| d118fdbddf | |||
| 6273ab668d | |||
| f09dbe10ce | |||
| ff93275dc1 | |||
| a5864127d1 | |||
| 394199b19b | |||
| 4694a3098b | |||
| 4454458e38 | |||
| 5a6d9ae7f4 | |||
| 9244e80ec7 | |||
| 1b61b7ee8f | |||
| 28ceaaf072 | |||
| 65ff26235e | |||
| fd21ea625c | |||
| 3350d765e5 | |||
| 81d5e2e590 | |||
| db46b72f58 | |||
| 9ab306d891 | |||
| 612b06aa8e | |||
| dc4e4e49a0 | |||
| b6483dedbc | |||
| d7e530435f | |||
| c8398d6cb7 | |||
| e24497bf90 | |||
| c2762f93f6 | |||
| 5b9df61194 | |||
| 9525a3c565 | |||
| 853ca4fec0 | |||
| 553de3a2b7 | |||
| 4c1e4683e3 | |||
| 750b794342 |
20
.env.example
Normal file
20
.env.example
Normal file
@@ -0,0 +1,20 @@
|
||||
# quicprochat Production Environment Variables
|
||||
# Copy this file to .env and fill in the values.
|
||||
|
||||
# Server auth token (required, >= 16 characters)
|
||||
QPC_AUTH_TOKEN=
|
||||
|
||||
# SQLCipher database encryption key (required for store_backend=sql)
|
||||
QPC_DB_KEY=
|
||||
|
||||
# Ports (defaults shown)
|
||||
QPC_LISTEN_PORT=7000
|
||||
QPC_WS_PORT=9000
|
||||
|
||||
# Optional features
|
||||
QPC_SEALED_SENDER=false
|
||||
QPC_REDACT_LOGS=true
|
||||
QPC_WS_LISTEN=
|
||||
|
||||
# Grafana admin password (required — must be strong, no default)
|
||||
GRAFANA_ADMIN_PASSWORD=
|
||||
134
.gitea/workflows/claude.yml
Normal file
134
.gitea/workflows/claude.yml
Normal file
@@ -0,0 +1,134 @@
|
||||
name: Claude Code Assistant
|
||||
|
||||
on:
|
||||
issues:
|
||||
types: [opened, labeled]
|
||||
issue_comment:
|
||||
types: [created]
|
||||
|
||||
concurrency:
|
||||
group: claude-${{ github.event.issue.number }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
claude-code:
|
||||
if: >-
|
||||
(github.event_name == 'issues' &&
|
||||
contains(toJSON(github.event.issue.labels), 'claude')) ||
|
||||
(github.event_name == 'issue_comment' &&
|
||||
contains(github.event.comment.body, '@claude') &&
|
||||
github.event.comment.user.login != 'admin')
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 15
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Run Claude on Issue
|
||||
env:
|
||||
ANTHROPIC_API_KEY: ${{ secrets.ANTHROPIC_API_KEY }}
|
||||
GIT_TOKEN: ${{ secrets.GIT_TOKEN }}
|
||||
run: |
|
||||
set +e
|
||||
|
||||
# Configure git
|
||||
git config user.name "Claude Bot"
|
||||
git config user.email "claude@localhost"
|
||||
git remote set-url origin "http://admin:${GIT_TOKEN}@localhost:3000/${{ github.repository }}.git"
|
||||
|
||||
ISSUE_NUMBER="${{ github.event.issue.number }}"
|
||||
ISSUE_TITLE="${{ github.event.issue.title }}"
|
||||
REPO="${{ github.repository }}"
|
||||
LABELS_JSON='${{ toJSON(github.event.issue.labels) }}'
|
||||
|
||||
# Determine model + cost limits from issue labels
|
||||
# Default: haiku (cheap). Add claude:sonnet or claude:opus for harder tasks.
|
||||
CLAUDE_MODEL="haiku"
|
||||
MAX_TURNS=15
|
||||
MAX_BUDGET="0.50"
|
||||
EFFORT="low"
|
||||
if echo "$LABELS_JSON" | grep -q '"claude:opus"'; then
|
||||
CLAUDE_MODEL="claude-opus-4-6"
|
||||
MAX_TURNS=40
|
||||
MAX_BUDGET="5.00"
|
||||
EFFORT="high"
|
||||
elif echo "$LABELS_JSON" | grep -q '"claude:sonnet"'; then
|
||||
CLAUDE_MODEL="claude-sonnet-4-6"
|
||||
MAX_TURNS=25
|
||||
MAX_BUDGET="2.00"
|
||||
EFFORT="medium"
|
||||
fi
|
||||
|
||||
ISSUE_BODY=$(curl -s "http://localhost:3000/api/v1/repos/${REPO}/issues/${ISSUE_NUMBER}" \
|
||||
-H "Authorization: token ${GIT_TOKEN}" | python3 -c "import sys,json; print(json.load(sys.stdin).get('body',''))")
|
||||
|
||||
COMMENT_BODY=""
|
||||
if [ "${{ github.event_name }}" = "issue_comment" ]; then
|
||||
COMMENT_ID="${{ github.event.comment.id }}"
|
||||
COMMENT_BODY=$(curl -s "http://localhost:3000/api/v1/repos/${REPO}/issues/comments/${COMMENT_ID}" \
|
||||
-H "Authorization: token ${GIT_TOKEN}" | python3 -c "import sys,json; print(json.load(sys.stdin).get('body',''))")
|
||||
fi
|
||||
|
||||
BRANCH="claude/issue-${ISSUE_NUMBER}"
|
||||
git checkout -b "${BRANCH}"
|
||||
|
||||
# Run Claude Code with cost controls
|
||||
claude -p "You are working on the repository ${REPO} (Gitea instance at http://localhost:3000).
|
||||
A Gitea issue needs your attention:
|
||||
|
||||
Issue #${ISSUE_NUMBER}: ${ISSUE_TITLE}
|
||||
Description: ${ISSUE_BODY}
|
||||
Additional context: ${COMMENT_BODY}
|
||||
|
||||
IMPORTANT RULES:
|
||||
- Do NOT retry failed commands more than once. If something fails twice, stop and report the error.
|
||||
- Do NOT loop on failing tests. Fix the obvious issue or report it. Never run the same failing command 3+ times.
|
||||
- If you cannot complete the task, push what you have, create the PR as draft, and explain what is blocked.
|
||||
- Be efficient: read only files you need, make targeted edits, avoid unnecessary exploration.
|
||||
|
||||
Steps:
|
||||
1. Read and understand the relevant parts of the codebase
|
||||
2. Implement the requested changes
|
||||
3. Commit your changes with a descriptive message
|
||||
4. Push branch ${BRANCH} to origin
|
||||
5. Create a pull request targeting main that references issue #${ISSUE_NUMBER}
|
||||
6. Post a comment on issue #${ISSUE_NUMBER} summarizing what you did
|
||||
|
||||
Git is configured. You are on branch ${BRANCH}. Work in the current directory.
|
||||
Use git commands to push, and curl to the Gitea API for PR creation and comments.
|
||||
Gitea API token is available as env var GIT_TOKEN." \
|
||||
--allowedTools "Bash,Read,Edit,Write,Glob,Grep" \
|
||||
--model "${CLAUDE_MODEL}" \
|
||||
--max-turns "${MAX_TURNS}" \
|
||||
--max-budget-usd "${MAX_BUDGET}" \
|
||||
--effort "${EFFORT}" \
|
||||
--permission-mode bypassPermissions \
|
||||
--output-format json 2>&1 > /tmp/claude-result.json
|
||||
|
||||
CLAUDE_EXIT=$?
|
||||
|
||||
# Extract cost from JSON output
|
||||
COST=$(python3 -c "
|
||||
import json
|
||||
with open('/tmp/claude-result.json') as f:
|
||||
data = json.load(f)
|
||||
cost = data.get('total_cost_usd', 0)
|
||||
print(f'\${cost:.4f}')
|
||||
" 2>/dev/null || echo "unknown")
|
||||
|
||||
# Amend the last commit to include cost and model
|
||||
if git log --oneline main..HEAD 2>/dev/null | head -1 | grep -q .; then
|
||||
LAST_MSG=$(git log -1 --format=%B)
|
||||
git commit --amend -m "${LAST_MSG}
|
||||
|
||||
Claude model: ${CLAUDE_MODEL} | API cost: ${COST}" --no-verify
|
||||
git push origin "${BRANCH}" --force
|
||||
fi
|
||||
|
||||
# Post cost as comment
|
||||
curl -s -X POST "http://localhost:3000/api/v1/repos/${REPO}/issues/${ISSUE_NUMBER}/comments" \
|
||||
-H "Authorization: token ${GIT_TOKEN}" \
|
||||
-H "Content-Type: application/json" \
|
||||
-d "{\"body\": \"Done (model: **${CLAUDE_MODEL}**, effort: ${EFFORT}, budget cap: \$${MAX_BUDGET}). API cost: **${COST}**\"}" > /dev/null
|
||||
|
||||
exit ${CLAUDE_EXIT}
|
||||
40
.github/CODEOWNERS
vendored
40
.github/CODEOWNERS
vendored
@@ -1,15 +1,37 @@
|
||||
# Code owners for quicnprotochat. PRs require review from owners.
|
||||
# Code owners for quicprochat. PRs require review from owners.
|
||||
# See https://docs.github.com/en/repositories/managing-your-repositorys-settings-and-features/customizing-your-repository/about-code-owners
|
||||
# Replace 'maintainers' with your GitHub user/team handle.
|
||||
|
||||
# Default owners for everything in the repo
|
||||
* @maintainers
|
||||
|
||||
# Crate-specific (uncomment and add handles when you have designated owners)
|
||||
# /crates/quicnprotochat-core/ @owner1
|
||||
# /crates/quicnprotochat-proto/ @owner1
|
||||
# /crates/quicnprotochat-server/ @owner1
|
||||
# /crates/quicnprotochat-client/ @owner1
|
||||
# /crates/quicnprotochat-p2p/ @owner1
|
||||
# /schemas/ @owner1
|
||||
# /docs/ @owner1
|
||||
# Security-critical: crypto primitives, MLS, hybrid KEM
|
||||
/crates/quicprochat-core/ @maintainers
|
||||
|
||||
# Wire format: protobuf definitions, Cap'n Proto schemas
|
||||
/crates/quicprochat-proto/ @maintainers
|
||||
/proto/ @maintainers
|
||||
|
||||
# Auth and server-side domain logic
|
||||
/crates/quicprochat-server/ @maintainers
|
||||
|
||||
# Client SDK: auth, conversation store, messaging pipeline
|
||||
/crates/quicprochat-sdk/ @maintainers
|
||||
|
||||
# CLI/TUI client
|
||||
/crates/quicprochat-client/ @maintainers
|
||||
|
||||
# RPC framework: framing, middleware, QUIC transport
|
||||
/crates/quicprochat-rpc/ @maintainers
|
||||
|
||||
# Key transparency
|
||||
/crates/quicprochat-kt/ @maintainers
|
||||
|
||||
# Plugin ABI (no_std C-ABI boundary)
|
||||
/crates/quicprochat-plugin-api/ @maintainers
|
||||
|
||||
# P2P transport
|
||||
/crates/quicprochat-p2p/ @maintainers
|
||||
|
||||
# CI and infrastructure
|
||||
/.github/ @maintainers
|
||||
|
||||
54
.github/workflows/bench.yml
vendored
Normal file
54
.github/workflows/bench.yml
vendored
Normal file
@@ -0,0 +1,54 @@
|
||||
name: Benchmarks
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [main, v2]
|
||||
pull_request:
|
||||
branches: [main, v2]
|
||||
workflow_dispatch:
|
||||
|
||||
env:
|
||||
CARGO_TERM_COLOR: always
|
||||
|
||||
jobs:
|
||||
bench:
|
||||
name: Run Criterion benchmarks
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: Install Rust
|
||||
uses: dtolnay/rust-action@stable
|
||||
|
||||
- name: Install capnp
|
||||
run: sudo apt-get update && sudo apt-get install -y capnproto
|
||||
|
||||
- name: Cache cargo
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: |
|
||||
~/.cargo/registry
|
||||
~/.cargo/git
|
||||
target
|
||||
key: ${{ runner.os }}-bench-${{ hashFiles('**/Cargo.lock') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-bench-
|
||||
|
||||
- name: Run benchmarks
|
||||
run: cargo bench --package quicprochat-core -- --output-format=bencher 2>&1 | tee bench-output.txt
|
||||
|
||||
- name: Upload HTML reports
|
||||
uses: actions/upload-artifact@v4
|
||||
if: always()
|
||||
with:
|
||||
name: criterion-reports
|
||||
path: target/criterion/
|
||||
retention-days: 30
|
||||
|
||||
- name: Upload raw output
|
||||
uses: actions/upload-artifact@v4
|
||||
if: always()
|
||||
with:
|
||||
name: bench-output
|
||||
path: bench-output.txt
|
||||
retention-days: 30
|
||||
106
.github/workflows/ci.yml
vendored
106
.github/workflows/ci.yml
vendored
@@ -2,9 +2,9 @@ name: CI
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [main, master]
|
||||
branches: [main, master, v2]
|
||||
pull_request:
|
||||
branches: [main, master]
|
||||
branches: [main, master, v2]
|
||||
|
||||
env:
|
||||
CARGO_TERM_COLOR: always
|
||||
@@ -45,7 +45,7 @@ jobs:
|
||||
run: cargo test --workspace
|
||||
|
||||
- name: Clippy
|
||||
run: cargo clippy --workspace --all-targets --
|
||||
run: cargo clippy --workspace --all-targets -- -D warnings
|
||||
|
||||
deny:
|
||||
name: cargo-deny
|
||||
@@ -72,3 +72,103 @@ jobs:
|
||||
run: |
|
||||
cargo install cargo-audit --locked
|
||||
cargo audit
|
||||
|
||||
coverage:
|
||||
name: Coverage
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: Install Rust
|
||||
uses: dtolnay/rust-action@stable
|
||||
|
||||
- name: Install capnp
|
||||
run: sudo apt-get update && sudo apt-get install -y capnproto
|
||||
|
||||
- name: Cache cargo
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: |
|
||||
~/.cargo/registry
|
||||
~/.cargo/git
|
||||
target
|
||||
key: ${{ runner.os }}-coverage-${{ hashFiles('**/Cargo.lock') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-coverage-
|
||||
|
||||
- name: Install cargo-tarpaulin
|
||||
run: cargo install cargo-tarpaulin
|
||||
|
||||
- name: Run coverage
|
||||
run: |
|
||||
cargo tarpaulin --workspace \
|
||||
--exclude quicprochat-p2p \
|
||||
--out xml \
|
||||
--output-dir coverage/ \
|
||||
-- --test-threads 1
|
||||
|
||||
- name: Upload coverage report
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: coverage-report
|
||||
path: coverage/cobertura.xml
|
||||
|
||||
msrv:
|
||||
name: MSRV Check
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: Install MSRV Rust (1.75)
|
||||
uses: dtolnay/rust-action@1.75
|
||||
with:
|
||||
components: clippy
|
||||
|
||||
- name: Install capnp
|
||||
run: sudo apt-get update && sudo apt-get install -y capnproto
|
||||
|
||||
- name: Cache cargo
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: |
|
||||
~/.cargo/registry
|
||||
~/.cargo/git
|
||||
target
|
||||
key: ${{ runner.os }}-msrv-${{ hashFiles('**/Cargo.lock') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-msrv-
|
||||
|
||||
- name: Check MSRV
|
||||
run: cargo check --workspace
|
||||
|
||||
macos:
|
||||
name: macOS Build Check
|
||||
runs-on: macos-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: Install Rust
|
||||
uses: dtolnay/rust-action@stable
|
||||
|
||||
- name: Cache cargo
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: |
|
||||
~/.cargo/registry
|
||||
~/.cargo/git
|
||||
target
|
||||
key: ${{ runner.os }}-cargo-${{ hashFiles('**/Cargo.lock') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-cargo-
|
||||
|
||||
- name: Check build
|
||||
run: cargo check --workspace
|
||||
|
||||
docker:
|
||||
name: Docker Build
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: Build Docker image
|
||||
run: docker build -f docker/Dockerfile .
|
||||
|
||||
65
.github/workflows/openwrt.yml
vendored
Normal file
65
.github/workflows/openwrt.yml
vendored
Normal file
@@ -0,0 +1,65 @@
|
||||
name: OpenWrt Cross-Compile
|
||||
|
||||
on:
|
||||
push:
|
||||
tags:
|
||||
- 'v*'
|
||||
workflow_dispatch:
|
||||
|
||||
env:
|
||||
CARGO_TERM_COLOR: always
|
||||
MAX_SIZE_MB: 5
|
||||
|
||||
jobs:
|
||||
cross-compile:
|
||||
name: Cross-compile (${{ matrix.target }})
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
target:
|
||||
- x86_64-unknown-linux-musl
|
||||
- armv7-unknown-linux-musleabihf
|
||||
- aarch64-unknown-linux-musl
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: Install Rust toolchain
|
||||
uses: dtolnay/rust-toolchain@stable
|
||||
|
||||
- name: Install cargo-zigbuild and zig
|
||||
run: |
|
||||
pip3 install ziglang
|
||||
cargo install cargo-zigbuild
|
||||
|
||||
- name: Add target
|
||||
run: rustup target add ${{ matrix.target }}
|
||||
|
||||
- name: Build (size-optimised)
|
||||
env:
|
||||
CARGO_PROFILE_RELEASE_OPT_LEVEL: s
|
||||
CARGO_PROFILE_RELEASE_LTO: 'true'
|
||||
CARGO_PROFILE_RELEASE_CODEGEN_UNITS: '1'
|
||||
CARGO_PROFILE_RELEASE_STRIP: symbols
|
||||
run: |
|
||||
cargo zigbuild --release --target ${{ matrix.target }} --bin qpc-server
|
||||
|
||||
- name: Check binary size
|
||||
run: |
|
||||
BINARY="target/${{ matrix.target }}/release/qpc-server"
|
||||
SIZE=$(stat -c%s "$BINARY")
|
||||
SIZE_MB=$(echo "scale=2; $SIZE / 1048576" | bc)
|
||||
echo "Binary size: ${SIZE_MB} MB"
|
||||
MAX_BYTES=$(( ${{ env.MAX_SIZE_MB }} * 1048576 ))
|
||||
if [ "$SIZE" -gt "$MAX_BYTES" ]; then
|
||||
echo "::error::Binary exceeds ${MAX_SIZE_MB} MB limit (${SIZE_MB} MB)"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
- name: Upload artifact
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: qpc-server-${{ matrix.target }}
|
||||
path: target/${{ matrix.target }}/release/qpc-server
|
||||
retention-days: 30
|
||||
28
.gitignore
vendored
28
.gitignore
vendored
@@ -1,4 +1,6 @@
|
||||
/target
|
||||
**/target/
|
||||
node_modules/
|
||||
**/*.rs.bk
|
||||
.vscode/
|
||||
gitea-mcp.json
|
||||
@@ -7,4 +9,28 @@ docs/book/
|
||||
# Server/client runtime data — do not commit certs, keys, or DBs
|
||||
data/
|
||||
*.der
|
||||
quicnprotochat-server.toml
|
||||
*.pem
|
||||
*.db
|
||||
*.bin
|
||||
*.ks
|
||||
*.session
|
||||
*.convdb
|
||||
*.convdb-shm
|
||||
*.convdb-wal
|
||||
*.pending.ks
|
||||
qpc-server.toml
|
||||
|
||||
# Internal planning docs (not for public distribution)
|
||||
docs/internal/
|
||||
|
||||
# AI development workflow files
|
||||
master-prompt.md
|
||||
scripts/ai_team.py
|
||||
|
||||
# LaTeX build artifacts
|
||||
paper/*.aux
|
||||
paper/*.bbl
|
||||
paper/*.blg
|
||||
paper/*.log
|
||||
paper/*.out
|
||||
paper/*.pdf
|
||||
|
||||
63
CLAUDE.md
Normal file
63
CLAUDE.md
Normal file
@@ -0,0 +1,63 @@
|
||||
# product.quicproquo
|
||||
|
||||
End-to-end encrypted group messaging over QUIC with MLS key agreement and post-quantum crypto.
|
||||
|
||||
## Tech Stack
|
||||
|
||||
- Rust 1.75+, Cargo workspace (12 crates)
|
||||
- Crypto: OpenMLS 0.8, ML-KEM-768, X25519, ChaCha20-Poly1305, OPAQUE-KE
|
||||
- Networking: Quinn (QUIC), Tokio, Tower middleware
|
||||
- Serialization: Protobuf (prost) for v2, Cap'n Proto (legacy v1)
|
||||
- DB: rusqlite with bundled SQLCipher
|
||||
- Build: just (justfile), cargo-deny for supply chain audit
|
||||
|
||||
## Commands
|
||||
|
||||
```bash
|
||||
just build # Build all workspace crates
|
||||
just test # Run all tests
|
||||
just test-core # Crypto tests only
|
||||
just lint # clippy --workspace -- -D warnings
|
||||
just fmt # Format check
|
||||
just fmt-fix # Format fix
|
||||
just proto # Rebuild protobuf codegen
|
||||
just server # Build server binary
|
||||
just client # Build client binary
|
||||
cargo deny check # Supply chain audit (deny.toml)
|
||||
```
|
||||
|
||||
## Architecture
|
||||
|
||||
```
|
||||
crates/
|
||||
quicprochat-core/ # Crypto primitives, MLS, double ratchet
|
||||
quicprochat-proto/ # Protobuf definitions + prost codegen
|
||||
quicprochat-rpc/ # RPC framework over QUIC
|
||||
quicprochat-sdk/ # High-level client SDK
|
||||
quicprochat-server/ # Server binary
|
||||
quicprochat-client/ # CLI client binary
|
||||
quicprochat-p2p/ # P2P mesh via iroh (feature-gated: `mesh`)
|
||||
quicprochat-plugin-api/ # Plugin interface
|
||||
quicprochat-kt/ # Kotlin/JNI bindings
|
||||
meshservice/ # Generic decentralized service layer (FAPP, Housing)
|
||||
apps/gui/ # GUI application
|
||||
proto/ # .proto source files
|
||||
schemas/ # Data schemas
|
||||
docker/ # Container configs
|
||||
```
|
||||
|
||||
## Rules
|
||||
|
||||
- `clippy::unwrap_used` is **deny** workspace-wide -- use proper error handling
|
||||
- `unsafe_code` is **warn** -- avoid unless absolutely necessary, document why
|
||||
- P2P crate (`quicprochat-p2p`) pulls ~90 extra deps via iroh -- only compiled with `mesh` feature
|
||||
- All crypto operations must go through quicprochat-core, never inline crypto
|
||||
- Protobuf is the v2 wire format; Cap'n Proto is legacy v1 only
|
||||
|
||||
## Do NOT
|
||||
|
||||
- Use `.unwrap()` or `.expect()` outside tests -- clippy will deny it
|
||||
- Add crypto primitives outside of quicprochat-core
|
||||
- Enable the `mesh` feature by default (heavy dependency tree)
|
||||
- Mix v1 (capnp) and v2 (protobuf) serialization in new code
|
||||
- Skip `cargo deny check` before adding new dependencies
|
||||
40
CONTRIBUTING.md
Normal file
40
CONTRIBUTING.md
Normal file
@@ -0,0 +1,40 @@
|
||||
# Contributing to quicprochat
|
||||
|
||||
## Prerequisites
|
||||
|
||||
- **Rust toolchain** (stable) via [rustup](https://rustup.rs/)
|
||||
- **protoc** is vendored via the `protobuf-src` crate -- no system installation needed
|
||||
- Git with GPG signing configured
|
||||
|
||||
## Building and Testing
|
||||
|
||||
```sh
|
||||
cargo build --workspace
|
||||
cargo test --workspace
|
||||
```
|
||||
|
||||
A `justfile` is also available for common tasks (`just build`, `just test`, `just proto`, etc.).
|
||||
|
||||
## Code Standards
|
||||
|
||||
### Commits
|
||||
|
||||
- **Conventional commits**: `feat:`, `fix:`, `docs:`, `chore:`, `test:`, `refactor:`
|
||||
- Commits must be **GPG-signed**
|
||||
- Commit messages describe *why*, not just *what*
|
||||
- No `Co-authored-by` trailers
|
||||
|
||||
### Rust
|
||||
|
||||
- No `.unwrap()` on crypto or I/O operations outside of tests
|
||||
- Secrets must be zeroized on drop and never logged
|
||||
- No stubs, `todo!()`, or `unimplemented!()` in production code
|
||||
- Prefer clarity over cleverness; avoid unnecessary abstractions
|
||||
|
||||
## Security Vulnerabilities
|
||||
|
||||
Do not open public issues for security bugs. See [SECURITY.md](SECURITY.md) for responsible disclosure instructions.
|
||||
|
||||
## Licensing
|
||||
|
||||
The server crate (`quicprochat-server`) is licensed under **AGPL-3.0**. All other crates are dual-licensed under **Apache-2.0 / MIT**. By submitting a contribution, you agree to license your work under the applicable license(s).
|
||||
4557
Cargo.lock
generated
4557
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
72
Cargo.toml
72
Cargo.toml
@@ -1,31 +1,50 @@
|
||||
[workspace]
|
||||
resolver = "2"
|
||||
members = [
|
||||
"crates/quicnprotochat-core",
|
||||
"crates/quicnprotochat-proto",
|
||||
"crates/quicnprotochat-server",
|
||||
"crates/quicnprotochat-client",
|
||||
"crates/quicnprotochat-p2p",
|
||||
"crates/quicnprotochat-gui",
|
||||
"crates/quicprochat-core",
|
||||
"crates/quicprochat-proto",
|
||||
"crates/quicprochat-plugin-api",
|
||||
"crates/quicprochat-kt",
|
||||
"crates/quicprochat-rpc",
|
||||
"crates/quicprochat-sdk",
|
||||
"crates/quicprochat-server",
|
||||
"crates/quicprochat-client",
|
||||
# P2P crate uses iroh (~90 extra deps). Only compiled when the `mesh`
|
||||
# feature is enabled on quicprochat-client.
|
||||
"crates/quicprochat-p2p",
|
||||
# Generic decentralized service layer (FAPP, Housing, etc.)
|
||||
"crates/meshservice",
|
||||
# WebSocket bridge for viz/mesh-graph.html (tails NDJSON → browsers)
|
||||
"viz/bridge",
|
||||
]
|
||||
|
||||
[workspace.package]
|
||||
edition = "2021"
|
||||
rust-version = "1.75"
|
||||
repository = "https://github.com/quicprochat/quicprochat"
|
||||
description = "End-to-end encrypted group messaging over QUIC"
|
||||
keywords = ["encryption", "messaging", "quic", "mls", "post-quantum"]
|
||||
categories = ["cryptography", "network-programming"]
|
||||
|
||||
# Shared dependency versions — bump here to affect the whole workspace.
|
||||
[workspace.dependencies]
|
||||
|
||||
# ── Crypto ────────────────────────────────────────────────────────────────────
|
||||
openmls = { version = "0.5", default-features = false, features = ["crypto-subtle"] }
|
||||
openmls_rust_crypto = { version = "0.2" }
|
||||
openmls_traits = { version = "0.2" }
|
||||
# tls_codec must match the version used by openmls 0.5 (which uses 0.3) to avoid
|
||||
openmls = { version = "0.8" }
|
||||
openmls_rust_crypto = { version = "0.5" }
|
||||
openmls_traits = { version = "0.5" }
|
||||
openmls_memory_storage = { version = "0.5" }
|
||||
# tls_codec must match the version used by openmls 0.8 (which uses 0.4) to avoid
|
||||
# duplicate Serialize trait versions in the dependency graph.
|
||||
tls_codec = { version = "0.3", features = ["derive"] }
|
||||
tls_codec = { version = "0.4", features = ["derive"] }
|
||||
# ml-kem 0.2 is the current stable release (FIPS 203, ML-KEM-768).
|
||||
# All three parameter sets (512/768/1024) are compiled in by default — no feature flag needed.
|
||||
ml-kem = { version = "0.2" }
|
||||
x25519-dalek = { version = "2", features = ["static_secrets"] }
|
||||
ed25519-dalek = { version = "2", features = ["rand_core"] }
|
||||
sha2 = { version = "0.10" }
|
||||
hmac = { version = "0.12" }
|
||||
hkdf = { version = "0.12" }
|
||||
ciborium = { version = "0.2" }
|
||||
chacha20poly1305 = { version = "0.10" }
|
||||
opaque-ke = { version = "4", features = ["ristretto255", "argon2"] }
|
||||
zeroize = { version = "1", features = ["derive", "serde"] }
|
||||
@@ -36,12 +55,17 @@ serde = { version = "1", features = ["derive"] }
|
||||
serde_json = { version = "1" }
|
||||
bincode = { version = "1" }
|
||||
|
||||
# ── Serialisation + RPC ───────────────────────────────────────────────────────
|
||||
# ── Serialisation (v2: protobuf) ─────────────────────────────────────────────
|
||||
prost = { version = "0.13" }
|
||||
prost-types = { version = "0.13" }
|
||||
prost-build = { version = "0.13" }
|
||||
|
||||
# ── Serialisation (v1 legacy — capnp, used by existing server/client) ────────
|
||||
capnp = { version = "0.19" }
|
||||
capnp-rpc = { version = "0.19" }
|
||||
|
||||
# ── Async / networking ────────────────────────────────────────────────────────
|
||||
tokio = { version = "1", features = ["full"] }
|
||||
tokio = { version = "1", features = ["macros", "rt-multi-thread", "time", "sync", "signal", "io-util", "io-std"] }
|
||||
tokio-util = { version = "0.7", features = ["codec", "compat"] }
|
||||
futures = { version = "0.3" }
|
||||
quinn = { version = "0.11" }
|
||||
@@ -49,9 +73,16 @@ quinn-proto = { version = "0.11" }
|
||||
rustls = { version = "0.23", default-features = false, features = ["std", "ring"] }
|
||||
rcgen = { version = "0.13" }
|
||||
|
||||
# ── Middleware ────────────────────────────────────────────────────────────────
|
||||
tower = { version = "0.5", features = ["util", "limit", "timeout"] }
|
||||
|
||||
# ── Database ─────────────────────────────────────────────────────────────
|
||||
rusqlite = { version = "0.31", features = ["bundled-sqlcipher"] }
|
||||
|
||||
# ── Encoding ─────────────────────────────────────────────────────────────────
|
||||
hex = { version = "0.4" }
|
||||
bytes = { version = "1" }
|
||||
|
||||
# ── Server utilities ──────────────────────────────────────────────────────────
|
||||
dashmap = { version = "5" }
|
||||
tracing = { version = "0.1" }
|
||||
@@ -61,12 +92,23 @@ tracing-subscriber = { version = "0.3", features = ["env-filter"] }
|
||||
anyhow = { version = "1" }
|
||||
thiserror = { version = "1" }
|
||||
|
||||
# ── CLI ───────────────────────────────────────────────────────────────────────
|
||||
# ── Config / CLI ──────────────────────────────────────────────────────────────
|
||||
toml = { version = "0.8" }
|
||||
clap = { version = "4", features = ["derive", "env"] }
|
||||
rustyline = { version = "14" }
|
||||
|
||||
# ── Certificate parsing ──────────────────────────────────────────────────────
|
||||
x509-parser = { version = "0.16", default-features = false }
|
||||
|
||||
# ── Build-time ────────────────────────────────────────────────────────────────
|
||||
capnpc = { version = "0.19" }
|
||||
|
||||
[workspace.lints.rust]
|
||||
unsafe_code = "warn"
|
||||
|
||||
[workspace.lints.clippy]
|
||||
unwrap_used = "deny"
|
||||
|
||||
[profile.release]
|
||||
opt-level = 3
|
||||
lto = "thin"
|
||||
|
||||
30
LICENSE
Normal file
30
LICENSE
Normal file
@@ -0,0 +1,30 @@
|
||||
quicproquo — Split Licensing
|
||||
============================
|
||||
|
||||
This project uses a split license model similar to Signal:
|
||||
|
||||
Server (quicproquo-server)
|
||||
--------------------------
|
||||
Licensed under the GNU Affero General Public License v3.0 only.
|
||||
See LICENSE-AGPL-3.0 for the full text.
|
||||
|
||||
SPDX-License-Identifier: AGPL-3.0-only
|
||||
|
||||
Libraries and SDKs (all other crates)
|
||||
--------------------------------------
|
||||
Licensed under either of
|
||||
|
||||
* Apache License, Version 2.0 (LICENSE-APACHE)
|
||||
* MIT License (LICENSE-MIT)
|
||||
|
||||
at your option.
|
||||
|
||||
SPDX-License-Identifier: Apache-2.0 OR MIT
|
||||
|
||||
Contribution
|
||||
------------
|
||||
Unless you explicitly state otherwise, any contribution intentionally
|
||||
submitted for inclusion in this project by you, as defined in the
|
||||
Apache-2.0 license, shall be dual licensed as above (for library crates)
|
||||
or AGPL-3.0-only (for the server crate), without any additional terms or
|
||||
conditions.
|
||||
661
LICENSE-AGPL-3.0
Normal file
661
LICENSE-AGPL-3.0
Normal file
@@ -0,0 +1,661 @@
|
||||
GNU AFFERO GENERAL PUBLIC LICENSE
|
||||
Version 3, 19 November 2007
|
||||
|
||||
Copyright (C) 2007 Free Software Foundation, Inc. <https://fsf.org/>
|
||||
Everyone is permitted to copy and distribute verbatim copies
|
||||
of this license document, but changing it is not allowed.
|
||||
|
||||
Preamble
|
||||
|
||||
The GNU Affero General Public License is a free, copyleft license for
|
||||
software and other kinds of works, specifically designed to ensure
|
||||
cooperation with the community in the case of network server software.
|
||||
|
||||
The licenses for most software and other practical works are designed
|
||||
to take away your freedom to share and change the works. By contrast,
|
||||
our General Public Licenses are intended to guarantee your freedom to
|
||||
share and change all versions of a program--to make sure it remains free
|
||||
software for all its users.
|
||||
|
||||
When we speak of free software, we are referring to freedom, not
|
||||
price. Our General Public Licenses are designed to make sure that you
|
||||
have the freedom to distribute copies of free software (and charge for
|
||||
them if you wish), that you receive source code or can get it if you
|
||||
want it, that you can change the software or use pieces of it in new
|
||||
free programs, and that you know you can do these things.
|
||||
|
||||
Developers that use our General Public Licenses protect your rights
|
||||
with two steps: (1) assert copyright on the software, and (2) offer
|
||||
you this License which gives you legal permission to copy, distribute
|
||||
and/or modify the software.
|
||||
|
||||
A secondary benefit of defending all users' freedom is that
|
||||
improvements made in alternate versions of the program, if they
|
||||
receive widespread use, become available for other developers to
|
||||
incorporate. Many developers of free software are heartened and
|
||||
encouraged by the resulting cooperation. However, in the case of
|
||||
software used on network servers, this result may fail to come about.
|
||||
The GNU General Public License permits making a modified version and
|
||||
letting the public access it on a server without ever releasing its
|
||||
source code to the public.
|
||||
|
||||
The GNU Affero General Public License is designed specifically to
|
||||
ensure that, in such cases, the modified source code becomes available
|
||||
to the community. It requires the operator of a network server to
|
||||
provide the source code of the modified version running there to the
|
||||
users of that server. Therefore, public use of a modified version, on
|
||||
a publicly accessible server, gives the public access to the source
|
||||
code of the modified version.
|
||||
|
||||
An older license, called the Affero General Public License and
|
||||
published by Affero, was designed to accomplish similar goals. This is
|
||||
a different license, not a version of the Affero GPL, but Affero has
|
||||
released a new version of the Affero GPL which permits relicensing under
|
||||
this license.
|
||||
|
||||
The precise terms and conditions for copying, distribution and
|
||||
modification follow.
|
||||
|
||||
TERMS AND CONDITIONS
|
||||
|
||||
0. Definitions.
|
||||
|
||||
"This License" refers to version 3 of the GNU Affero General Public License.
|
||||
|
||||
"Copyright" also means copyright-like laws that apply to other kinds of
|
||||
works, such as semiconductor masks.
|
||||
|
||||
"The Program" refers to any copyrightable work licensed under this
|
||||
License. Each licensee is addressed as "you". "Licensees" and
|
||||
"recipients" may be individuals or organizations.
|
||||
|
||||
To "modify" a work means to copy from or adapt all or part of the work
|
||||
in a fashion requiring copyright permission, other than the making of an
|
||||
exact copy. The resulting work is called a "modified version" of the
|
||||
earlier work or a work "based on" the earlier work.
|
||||
|
||||
A "covered work" means either the unmodified Program or a work based
|
||||
on the Program.
|
||||
|
||||
To "propagate" a work means to do anything with it that, without
|
||||
permission, would make you directly or secondarily liable for
|
||||
infringement under applicable copyright law, except executing it on a
|
||||
computer or modifying a private copy. Propagation includes copying,
|
||||
distribution (with or without modification), making available to the
|
||||
public, and in some countries other activities as well.
|
||||
|
||||
To "convey" a work means any kind of propagation that enables other
|
||||
parties to make or receive copies. Mere interaction with a user through
|
||||
a computer network, with no transfer of a copy, is not conveying.
|
||||
|
||||
An interactive user interface displays "Appropriate Legal Notices"
|
||||
to the extent that it includes a convenient and prominently visible
|
||||
feature that (1) displays an appropriate copyright notice, and (2)
|
||||
tells the user that there is no warranty for the work (except to the
|
||||
extent that warranties are provided), that licensees may convey the
|
||||
work under this License, and how to view a copy of this License. If
|
||||
the interface presents a list of user commands or options, such as a
|
||||
menu, a prominent item in the list meets this criterion.
|
||||
|
||||
1. Source Code.
|
||||
|
||||
The "source code" for a work means the preferred form of the work
|
||||
for making modifications to it. "Object code" means any non-source
|
||||
form of a work.
|
||||
|
||||
A "Standard Interface" means an interface that either is an official
|
||||
standard defined by a recognized standards body, or, in the case of
|
||||
interfaces specified for a particular programming language, one that
|
||||
is widely used among developers working in that language.
|
||||
|
||||
The "System Libraries" of an executable work include anything, other
|
||||
than the work as a whole, that (a) is included in the normal form of
|
||||
packaging a Major Component, but which is not part of that Major
|
||||
Component, and (b) serves only to enable use of the work with that
|
||||
Major Component, or to implement a Standard Interface for which an
|
||||
implementation is available to the public in source code form. A
|
||||
"Major Component", in this context, means a major essential component
|
||||
(kernel, window system, and so on) of the specific operating system
|
||||
(if any) on which the executable work runs, or a compiler used to
|
||||
produce the work, or an object code interpreter used to run it.
|
||||
|
||||
The "Corresponding Source" for a work in object code form means all
|
||||
the source code needed to generate, install, and (for an executable
|
||||
work) run the object code and to modify the work, including scripts to
|
||||
control those activities. However, it does not include the work's
|
||||
System Libraries, or general-purpose tools or generally available free
|
||||
programs which are used unmodified in performing those activities but
|
||||
which are not part of the work. For example, Corresponding Source
|
||||
includes interface definition files associated with source files for
|
||||
the work, and the source code for shared libraries and dynamically
|
||||
linked subprograms that the work is specifically designed to require,
|
||||
such as by intimate data communication or control flow between those
|
||||
subprograms and other parts of the work.
|
||||
|
||||
The Corresponding Source need not include anything that users
|
||||
can regenerate automatically from other parts of the Corresponding
|
||||
Source.
|
||||
|
||||
The Corresponding Source for a work in source code form is that
|
||||
same work.
|
||||
|
||||
2. Basic Permissions.
|
||||
|
||||
All rights granted under this License are granted for the term of
|
||||
copyright on the Program, and are irrevocable provided the stated
|
||||
conditions are met. This License explicitly affirms your unlimited
|
||||
permission to run the unmodified Program. The output from running a
|
||||
covered work is covered by this License only if the output, given its
|
||||
content, constitutes a covered work. This License acknowledges your
|
||||
rights of fair use or other equivalent, as provided by copyright law.
|
||||
|
||||
You may make, run and propagate covered works that you do not
|
||||
convey, without conditions so long as your license otherwise remains
|
||||
in force. You may convey covered works to others for the sole purpose
|
||||
of having them make modifications exclusively for you, or provide you
|
||||
with facilities for running those works, provided that you comply with
|
||||
the terms of this License in conveying all material for which you do
|
||||
not control copyright. Those thus making or running the covered works
|
||||
for you must do so exclusively on your behalf, under your direction
|
||||
and control, on terms that prohibit them from making any copies of
|
||||
your copyrighted material outside their relationship with you.
|
||||
|
||||
Conveying under any other circumstances is permitted solely under
|
||||
the conditions stated below. Sublicensing is not allowed; section 10
|
||||
makes it unnecessary.
|
||||
|
||||
3. Protecting Users' Legal Rights From Anti-Circumvention Law.
|
||||
|
||||
No covered work shall be deemed part of an effective technological
|
||||
measure under any applicable law fulfilling obligations under article
|
||||
11 of the WIPO copyright treaty adopted on 20 December 1996, or
|
||||
similar laws prohibiting or restricting circumvention of such
|
||||
measures.
|
||||
|
||||
When you convey a covered work, you waive any legal power to forbid
|
||||
circumvention of technological measures to the extent such circumvention
|
||||
is effected by exercising rights under this License with respect to
|
||||
the covered work, and you disclaim any intention to limit operation or
|
||||
modification of the work as a means of enforcing, against the work's
|
||||
users, your or third parties' legal rights to forbid circumvention of
|
||||
technological measures.
|
||||
|
||||
4. Conveying Verbatim Copies.
|
||||
|
||||
You may convey verbatim copies of the Program's source code as you
|
||||
receive it, in any medium, provided that you conspicuously and
|
||||
appropriately publish on each copy an appropriate copyright notice;
|
||||
keep intact all notices stating that this License and any
|
||||
non-permissive terms added in accord with section 7 apply to the code;
|
||||
keep intact all notices of the absence of any warranty; and give all
|
||||
recipients a copy of this License along with the Program.
|
||||
|
||||
You may charge any price or no price for each copy that you convey,
|
||||
and you may offer support or warranty protection for a fee.
|
||||
|
||||
5. Conveying Modified Source Versions.
|
||||
|
||||
You may convey a work based on the Program, or the modifications to
|
||||
produce it from the Program, in the form of source code under the
|
||||
terms of section 4, provided that you also meet all of these conditions:
|
||||
|
||||
a) The work must carry prominent notices stating that you modified
|
||||
it, and giving a relevant date.
|
||||
|
||||
b) The work must carry prominent notices stating that it is
|
||||
released under this License and any conditions added under section
|
||||
7. This requirement modifies the requirement in section 4 to
|
||||
"keep intact all notices".
|
||||
|
||||
c) You must license the entire work, as a whole, under this
|
||||
License to anyone who comes into possession of a copy. This
|
||||
License will therefore apply, along with any applicable section 7
|
||||
additional terms, to the whole of the work, and all its parts,
|
||||
regardless of how they are packaged. This License gives no
|
||||
permission to license the work in any other way, but it does not
|
||||
invalidate such permission if you have separately received it.
|
||||
|
||||
d) If the work has interactive user interfaces, each must display
|
||||
Appropriate Legal Notices; however, if the Program has interactive
|
||||
interfaces that do not display Appropriate Legal Notices, your
|
||||
work need not make them do so.
|
||||
|
||||
A compilation of a covered work with other separate and independent
|
||||
works, which are not by their nature extensions of the covered work,
|
||||
and which are not combined with it such as to form a larger program,
|
||||
in or on a volume of a storage or distribution medium, is called an
|
||||
"aggregate" if the compilation and its resulting copyright are not
|
||||
used to limit the access or legal rights of the compilation's users
|
||||
beyond what the individual works permit. Inclusion of a covered work
|
||||
in an aggregate does not cause this License to apply to the other
|
||||
parts of the aggregate.
|
||||
|
||||
6. Conveying Non-Source Forms.
|
||||
|
||||
You may convey a covered work in object code form under the terms
|
||||
of sections 4 and 5, provided that you also convey the
|
||||
machine-readable Corresponding Source under the terms of this License,
|
||||
in one of these ways:
|
||||
|
||||
a) Convey the object code in, or embodied in, a physical product
|
||||
(including a physical distribution medium), accompanied by the
|
||||
Corresponding Source fixed on a durable physical medium
|
||||
customarily used for software interchange.
|
||||
|
||||
b) Convey the object code in, or embodied in, a physical product
|
||||
(including a physical distribution medium), accompanied by a
|
||||
written offer, valid for at least three years and valid for as
|
||||
long as you offer spare parts or customer support for that product
|
||||
model, to give anyone who possesses the object code either (1) a
|
||||
copy of the Corresponding Source for all the software in the
|
||||
product that is covered by this License, on a durable physical
|
||||
medium customarily used for software interchange, for a price no
|
||||
more than your reasonable cost of physically performing this
|
||||
conveying of source, or (2) access to copy the
|
||||
Corresponding Source from a network server at no charge.
|
||||
|
||||
c) Convey individual copies of the object code with a copy of the
|
||||
written offer to provide the Corresponding Source. This
|
||||
alternative is allowed only occasionally and noncommercially, and
|
||||
only if you received the object code with such an offer, in accord
|
||||
with subsection 6b.
|
||||
|
||||
d) Convey the object code by offering access from a designated
|
||||
place (gratis or for a charge), and offer equivalent access to the
|
||||
Corresponding Source in the same way through the same place at no
|
||||
further charge. You need not require recipients to copy the
|
||||
Corresponding Source along with the object code. If the place to
|
||||
copy the object code is a network server, the Corresponding Source
|
||||
may be on a different server (operated by you or a third party)
|
||||
that supports equivalent copying facilities, provided you maintain
|
||||
clear directions next to the object code saying where to find the
|
||||
Corresponding Source. Regardless of what server hosts the
|
||||
Corresponding Source, you remain obligated to ensure that it is
|
||||
available for as long as needed to satisfy these requirements.
|
||||
|
||||
e) Convey the object code using peer-to-peer transmission, provided
|
||||
you inform other peers where the object code and Corresponding
|
||||
Source of the work are being offered to the general public at no
|
||||
charge under subsection 6d.
|
||||
|
||||
A separable portion of the object code, whose source code is excluded
|
||||
from the Corresponding Source as a System Library, need not be
|
||||
included in conveying the object code work.
|
||||
|
||||
A "User Product" is either (1) a "consumer product", which means any
|
||||
tangible personal property which is normally used for personal, family,
|
||||
or household purposes, or (2) anything designed or sold for incorporation
|
||||
into a dwelling. In determining whether a product is a consumer product,
|
||||
doubtful cases shall be resolved in favor of coverage. For a particular
|
||||
product received by a particular user, "normally used" refers to a
|
||||
typical or common use of that class of product, regardless of the status
|
||||
of the particular user or of the way in which the particular user
|
||||
actually uses, or expects or is expected to use, the product. A product
|
||||
is a consumer product regardless of whether the product has substantial
|
||||
commercial, industrial or non-consumer uses, unless such uses represent
|
||||
the only significant mode of use of the product.
|
||||
|
||||
"Installation Information" for a User Product means any methods,
|
||||
procedures, authorization keys, or other information required to install
|
||||
and execute modified versions of a covered work in that User Product from
|
||||
a modified version of its Corresponding Source. The information must
|
||||
suffice to ensure that the continued functioning of the modified object
|
||||
code is in no case prevented or interfered with solely because
|
||||
modification has been made.
|
||||
|
||||
If you convey an object code work under this section in, or with, or
|
||||
specifically for use in, a User Product, and the conveying occurs as
|
||||
part of a transaction in which the right of possession and use of the
|
||||
User Product is transferred to the recipient in perpetuity or for a
|
||||
fixed term (regardless of how the transaction is characterized), the
|
||||
Corresponding Source conveyed under this section must be accompanied
|
||||
by the Installation Information. But this requirement does not apply
|
||||
if neither you nor any third party retains the ability to install
|
||||
modified object code on the User Product (for example, the work has
|
||||
been installed in ROM).
|
||||
|
||||
The requirement to provide Installation Information does not include a
|
||||
requirement to continue to provide support service, warranty, or updates
|
||||
for a work that has been modified or installed by the recipient, or for
|
||||
the User Product in which it has been modified or installed. Access to a
|
||||
network may be denied when the modification itself materially and
|
||||
adversely affects the operation of the network or violates the rules and
|
||||
protocols for communication across the network.
|
||||
|
||||
Corresponding Source conveyed, and Installation Information provided,
|
||||
in accord with this section must be in a format that is publicly
|
||||
documented (and with an implementation available to the public in
|
||||
source code form), and must require no special password or key for
|
||||
unpacking, reading or copying.
|
||||
|
||||
7. Additional Terms.
|
||||
|
||||
"Additional permissions" are terms that supplement the terms of this
|
||||
License by making exceptions from one or more of its conditions.
|
||||
Additional permissions that are applicable to the entire Program shall
|
||||
be treated as though they were included in this License, to the extent
|
||||
that they are valid under applicable law. If additional permissions
|
||||
apply only to part of the Program, that part may be used separately
|
||||
under those permissions, but the entire Program remains governed by
|
||||
this License without regard to the additional permissions.
|
||||
|
||||
When you convey a copy of a covered work, you may at your option
|
||||
remove any additional permissions from that copy, or from any part of
|
||||
it. (Additional permissions may be written to require their own
|
||||
removal in certain cases when you modify the work.) You may place
|
||||
additional permissions on material, added by you to a covered work,
|
||||
for which you have or can give appropriate copyright permission.
|
||||
|
||||
Notwithstanding any other provision of this License, for material you
|
||||
add to a covered work, you may (if authorized by the copyright holders of
|
||||
that material) supplement the terms of this License with terms:
|
||||
|
||||
a) Disclaiming warranty or limiting liability differently from the
|
||||
terms of sections 15 and 16 of this License; or
|
||||
|
||||
b) Requiring preservation of specified reasonable legal notices or
|
||||
author attributions in that material or in the Appropriate Legal
|
||||
Notices displayed by works containing it; or
|
||||
|
||||
c) Prohibiting misrepresentation of the origin of that material, or
|
||||
requiring that modified versions of such material be marked in
|
||||
reasonable ways as different from the original version; or
|
||||
|
||||
d) Limiting the use for publicity purposes of names of licensors or
|
||||
authors of the material; or
|
||||
|
||||
e) Declining to grant rights under trademark law for use of some
|
||||
trade names, trademarks, or service marks; or
|
||||
|
||||
f) Requiring indemnification of licensors and authors of that
|
||||
material by anyone who conveys the material (or modified versions of
|
||||
it) with contractual assumptions of liability to the recipient, for
|
||||
any liability that these contractual assumptions directly impose on
|
||||
those licensors and authors.
|
||||
|
||||
All other non-permissive additional terms are considered "further
|
||||
restrictions" within the meaning of section 10. If the Program as you
|
||||
received it, or any part of it, contains a notice stating that it is
|
||||
governed by this License along with a term that is a further
|
||||
restriction, you may remove that term. If a license document contains
|
||||
a further restriction but permits relicensing or conveying under this
|
||||
License, you may add to a covered work material governed by the terms
|
||||
of that license document, provided that the further restriction does
|
||||
not survive such relicensing or conveying.
|
||||
|
||||
If you add terms to a covered work in accord with this section, you
|
||||
must place, in the relevant source files, a statement of the
|
||||
additional terms that apply to those files, or a notice indicating
|
||||
where to find the applicable terms.
|
||||
|
||||
Additional terms, permissive or non-permissive, may be stated in the
|
||||
form of a separately written license, or stated as exceptions;
|
||||
the above requirements apply either way.
|
||||
|
||||
8. Termination.
|
||||
|
||||
You may not propagate or modify a covered work except as expressly
|
||||
provided under this License. Any attempt otherwise to propagate or
|
||||
modify it is void, and will automatically terminate your rights under
|
||||
this License (including any patent licenses granted under the third
|
||||
paragraph of section 11).
|
||||
|
||||
However, if you cease all violation of this License, then your
|
||||
license from a particular copyright holder is reinstated (a)
|
||||
provisionally, unless and until the copyright holder explicitly and
|
||||
finally terminates your license, and (b) permanently, if the copyright
|
||||
holder fails to notify you of the violation by some reasonable means
|
||||
prior to 60 days after the cessation.
|
||||
|
||||
Moreover, your license from a particular copyright holder is
|
||||
reinstated permanently if the copyright holder notifies you of the
|
||||
violation by some reasonable means, this is the first time you have
|
||||
received notice of violation of this License (for any work) from that
|
||||
copyright holder, and you cure the violation prior to 30 days after
|
||||
your receipt of the notice.
|
||||
|
||||
Termination of your rights under this section does not terminate the
|
||||
licenses of parties who have received copies or rights from you under
|
||||
this License. If your rights have been terminated and not permanently
|
||||
reinstated, you do not qualify to receive new licenses for the same
|
||||
material under section 10.
|
||||
|
||||
9. Acceptance Not Required for Having Copies.
|
||||
|
||||
You are not required to accept this License in order to receive or
|
||||
run a copy of the Program. Ancillary propagation of a covered work
|
||||
occurring solely as a consequence of using peer-to-peer transmission
|
||||
to receive a copy likewise does not require acceptance. However,
|
||||
nothing other than this License grants you permission to propagate or
|
||||
modify any covered work. These actions infringe copyright if you do
|
||||
not accept this License. Therefore, by modifying or propagating a
|
||||
covered work, you indicate your acceptance of this License to do so.
|
||||
|
||||
10. Automatic Licensing of Downstream Recipients.
|
||||
|
||||
Each time you convey a covered work, the recipient automatically
|
||||
receives a license from the original licensors, to run, modify and
|
||||
propagate that work, subject to this License. You are not responsible
|
||||
for enforcing compliance by third parties with this License.
|
||||
|
||||
An "entity transaction" is a transaction transferring control of an
|
||||
organization, or substantially all assets of one, or subdividing an
|
||||
organization, or merging organizations. If propagation of a covered
|
||||
work results from an entity transaction, each party to that
|
||||
transaction who receives a copy of the work also receives whatever
|
||||
licenses to the work the party's predecessor in interest had or could
|
||||
give under the previous paragraph, plus a right to possession of the
|
||||
Corresponding Source of the work from the predecessor in interest, if
|
||||
the predecessor has it or can get it with reasonable efforts.
|
||||
|
||||
You may not impose any further restrictions on the exercise of the
|
||||
rights granted or affirmed under this License. For example, you may
|
||||
not impose a license fee, royalty, or other charge for exercise of
|
||||
rights granted under this License, and you may not initiate litigation
|
||||
(including a cross-claim or counterclaim in a lawsuit) alleging that
|
||||
any patent claim is infringed by making, using, selling, offering for
|
||||
sale, or importing the Program or any portion of it.
|
||||
|
||||
11. Patents.
|
||||
|
||||
A "contributor" is a copyright holder who authorizes use under this
|
||||
License of the Program or a work on which the Program is based. The
|
||||
work thus licensed is called the contributor's "contributor version".
|
||||
|
||||
A contributor's "essential patent claims" are all patent claims
|
||||
owned or controlled by the contributor, whether already acquired or
|
||||
hereafter acquired, that would be infringed by some manner, permitted
|
||||
by this License, of making, using, or selling its contributor version,
|
||||
but do not include claims that would be infringed only as a
|
||||
consequence of further modification of the contributor version. For
|
||||
purposes of this definition, "control" includes the right to grant
|
||||
patent sublicenses in a manner consistent with the requirements of
|
||||
this License.
|
||||
|
||||
Each contributor grants you a non-exclusive, worldwide, royalty-free
|
||||
patent license under the contributor's essential patent claims, to
|
||||
make, use, sell, offer for sale, import and otherwise run, modify and
|
||||
propagate the contents of its contributor version.
|
||||
|
||||
In the following three paragraphs, a "patent license" is any express
|
||||
agreement or commitment, however denominated, not to enforce a patent
|
||||
(such as an express permission to practice a patent or covenant not to
|
||||
sue for patent infringement). To "grant" such a patent license to a
|
||||
party means to make such an agreement or commitment not to enforce a
|
||||
patent against the party.
|
||||
|
||||
If you convey a covered work, knowingly relying on a patent license,
|
||||
and the Corresponding Source of the work is not available for anyone
|
||||
to copy, free of charge and under the terms of this License, through a
|
||||
publicly available network server or other readily accessible means,
|
||||
then you must either (1) cause the Corresponding Source to be so
|
||||
available, or (2) arrange to deprive yourself of the benefit of the
|
||||
patent license for this particular work, or (3) arrange, in a manner
|
||||
consistent with the requirements of this License, to extend the patent
|
||||
license to downstream recipients. "Knowingly relying" means you have
|
||||
actual knowledge that, but for the patent license, your conveying the
|
||||
covered work in a country, or your recipient's use of the covered work
|
||||
in a country, would infringe one or more identifiable patents in that
|
||||
country that you have reason to believe are valid.
|
||||
|
||||
If, pursuant to or in connection with a single transaction or
|
||||
arrangement, you convey, or propagate by procuring conveyance of, a
|
||||
covered work, and grant a patent license to some of the parties
|
||||
receiving the covered work authorizing them to use, propagate, modify
|
||||
or convey a specific copy of the covered work, then the patent license
|
||||
you grant is automatically extended to all recipients of the covered
|
||||
work and works based on it.
|
||||
|
||||
A patent license is "discriminatory" if it does not include within
|
||||
the scope of its coverage, prohibits the exercise of, or is
|
||||
conditioned on the non-exercise of one or more of the rights that are
|
||||
specifically granted under this License. You may not convey a covered
|
||||
work if you are a party to an arrangement with a third party that is
|
||||
in the business of distributing software, under which you make payment
|
||||
to the third party based on the extent of your activity of conveying
|
||||
the work, and under which the third party grants, to any of the
|
||||
parties who would receive the covered work from you, a discriminatory
|
||||
patent license (a) in connection with copies of the covered work
|
||||
conveyed by you (or copies made from those copies), or (b) primarily
|
||||
for and in connection with specific products or compilations that
|
||||
contain the covered work, unless you entered into that arrangement,
|
||||
or that patent license was granted, prior to 28 March 2007.
|
||||
|
||||
Nothing in this License shall be construed as excluding or limiting
|
||||
any implied license or other defenses to infringement that may
|
||||
otherwise be available to you under applicable patent law.
|
||||
|
||||
12. No Surrender of Others' Freedom.
|
||||
|
||||
If conditions are imposed on you (whether by court order, agreement or
|
||||
otherwise) that contradict the conditions of this License, they do not
|
||||
excuse you from the conditions of this License. If you cannot convey a
|
||||
covered work so as to satisfy simultaneously your obligations under this
|
||||
License and any other pertinent obligations, then as a consequence you may
|
||||
not convey it at all. For example, if you agree to terms that obligate you
|
||||
to collect a royalty for further conveying from those to whom you convey
|
||||
the Program, the only way you could satisfy both those terms and this
|
||||
License would be to refrain entirely from conveying the Program.
|
||||
|
||||
13. Remote Network Interaction; Use with the GNU General Public License.
|
||||
|
||||
Notwithstanding any other provision of this License, if you modify the
|
||||
Program, your modified version must prominently offer all users
|
||||
interacting with it remotely through a computer network (if your version
|
||||
supports such interaction) an opportunity to receive the Corresponding
|
||||
Source of your version by providing access to the Corresponding Source
|
||||
from a network server at no charge, through some standard or customary
|
||||
means of facilitating copying of software. This Corresponding Source
|
||||
shall include the Corresponding Source for any work covered by version 3
|
||||
of the GNU General Public License that is incorporated pursuant to the
|
||||
following paragraph.
|
||||
|
||||
Notwithstanding any other provision of this License, you have
|
||||
permission to link or combine any covered work with a work licensed
|
||||
under version 3 of the GNU General Public License into a single
|
||||
combined work, and to convey the resulting work. The terms of this
|
||||
License will continue to apply to the part which is the covered work,
|
||||
but the work with which it is combined will remain governed by version
|
||||
3 of the GNU General Public License.
|
||||
|
||||
14. Revised Versions of this License.
|
||||
|
||||
The Free Software Foundation may publish revised and/or new versions of
|
||||
the GNU Affero General Public License from time to time. Such new versions
|
||||
will be similar in spirit to the present version, but may differ in detail to
|
||||
address new problems or concerns.
|
||||
|
||||
Each version is given a distinguishing version number. If the
|
||||
Program specifies that a certain numbered version of the GNU Affero General
|
||||
Public License "or any later version" applies to it, you have the
|
||||
option of following the terms and conditions either of that numbered
|
||||
version or of any later version published by the Free Software
|
||||
Foundation. If the Program does not specify a version number of the
|
||||
GNU Affero General Public License, you may choose any version ever published
|
||||
by the Free Software Foundation.
|
||||
|
||||
If the Program specifies that a proxy can decide which future
|
||||
versions of the GNU Affero General Public License can be used, that proxy's
|
||||
public statement of acceptance of a version permanently authorizes you
|
||||
to choose that version for the Program.
|
||||
|
||||
Later license versions may give you additional or different
|
||||
permissions. However, no additional obligations are imposed on any
|
||||
author or copyright holder as a result of your choosing to follow a
|
||||
later version.
|
||||
|
||||
15. Disclaimer of Warranty.
|
||||
|
||||
THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
|
||||
APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
|
||||
HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
|
||||
OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
|
||||
THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
|
||||
PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
|
||||
IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
|
||||
ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
|
||||
|
||||
16. Limitation of Liability.
|
||||
|
||||
IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
|
||||
WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
|
||||
THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
|
||||
GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
|
||||
USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
|
||||
DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
|
||||
PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
|
||||
EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
|
||||
SUCH DAMAGES.
|
||||
|
||||
17. Interpretation of Sections 15 and 16.
|
||||
|
||||
If the disclaimer of warranty and limitation of liability provided
|
||||
above cannot be given local legal effect according to their terms,
|
||||
reviewing courts shall apply local law that most closely approximates
|
||||
an absolute waiver of all civil liability in connection with the
|
||||
Program, unless a warranty or assumption of liability accompanies a
|
||||
copy of the Program in return for a fee.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
How to Apply These Terms to Your New Programs
|
||||
|
||||
If you develop a new program, and you want it to be of the greatest
|
||||
possible use to the public, the best way to achieve this is to make it
|
||||
free software which everyone can redistribute and change under these terms.
|
||||
|
||||
To do so, attach the following notices to the program. It is safest
|
||||
to attach them to the start of each source file to most effectively
|
||||
state the exclusion of warranty; and each file should have at least
|
||||
the "copyright" line and a pointer to where the full notice is found.
|
||||
|
||||
<one line to give the program's name and a brief idea of what it does.>
|
||||
Copyright (C) <year> <name of author>
|
||||
|
||||
This program is free software: you can redistribute it and/or modify
|
||||
it under the terms of the GNU Affero General Public License as published by
|
||||
the Free Software Foundation, either version 3 of the License, or
|
||||
(at your option) any later version.
|
||||
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU Affero General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU Affero General Public License
|
||||
along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||
|
||||
Also add information on how to contact you by electronic and paper mail.
|
||||
|
||||
If your software can interact with users remotely through a computer
|
||||
network, you should also make sure that it provides a way for users to
|
||||
get its source. For example, if your program is a web application, its
|
||||
interface could display a "Source" link that leads users to an archive
|
||||
of the code. There are many ways you could offer source, and different
|
||||
solutions will be better for different programs; see section 13 for the
|
||||
specific requirements.
|
||||
|
||||
You should also get your employer (if you work as a programmer) or school,
|
||||
if any, to sign a "copyright disclaimer" for the program, if necessary.
|
||||
For more information on this, and how to apply and follow the GNU AGPL, see
|
||||
<https://www.gnu.org/licenses/>.
|
||||
199
LICENSE-APACHE
Normal file
199
LICENSE-APACHE
Normal file
@@ -0,0 +1,199 @@
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to the Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by the Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding any notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. Please also get an
|
||||
OpenPGP-compatible signature on any file you distribute.
|
||||
|
||||
Copyright [yyyy] [name of copyright owner]
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
21
LICENSE-MIT
Normal file
21
LICENSE-MIT
Normal file
@@ -0,0 +1,21 @@
|
||||
MIT License
|
||||
|
||||
Copyright (c) quicproquo contributors
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
355
README.md
355
README.md
@@ -1,124 +1,275 @@
|
||||
# quicnprotochat
|
||||
<p align="center">
|
||||
<img src="assets/logo.png" alt="quicprochat" width="160">
|
||||
</p>
|
||||
|
||||
> End-to-end encrypted group messaging over **QUIC + TLS 1.3 + MLS** (RFC 9420), written in Rust.
|
||||
<h1 align="center">quicprochat</h1>
|
||||
|
||||
Every byte on the wire is protected by a QUIC transport secured with TLS 1.3
|
||||
(`quinn` + `rustls`). The inner **MLS** layer provides post-compromise security
|
||||
and ratcheted group key agreement across any number of participants. Messages
|
||||
are framed with **Cap'n Proto**, keeping serialisation zero-copy and
|
||||
schema-versioned.
|
||||
<p align="center">
|
||||
<strong>End-to-end encrypted group messaging over QUIC, powered by MLS and post-quantum cryptography.</strong>
|
||||
</p>
|
||||
|
||||
<p align="center">
|
||||
<a href="docs/src/design-rationale/messenger-comparison.md">Why quicprochat?</a> ·
|
||||
<a href="ROADMAP.md">Roadmap</a> ·
|
||||
<a href="docs/sdk/index.md">SDK Docs</a> ·
|
||||
<a href="docs/operations/monitoring.md">Operations</a> ·
|
||||
<a href="#quick-start">Quick Start</a>
|
||||
</p>
|
||||
|
||||
---
|
||||
|
||||
quicprochat is a production-grade messenger where the server **never sees plaintext**. All traffic flows over QUIC/TLS 1.3, group keys are negotiated with the [MLS protocol](https://www.rfc-editor.org/rfc/rfc9420) (RFC 9420), and a hybrid X25519 + ML-KEM-768 KEM provides post-quantum confidentiality. Written in Rust. 45,000 lines of code. 301 tests.
|
||||
|
||||
```
|
||||
┌─────────────────────────────────────────────┐
|
||||
│ Application / MLS ciphertext │ <- group key ratchet (RFC 9420)
|
||||
├─────────────────────────────────────────────┤
|
||||
│ Cap'n Proto RPC │ <- typed, schema-versioned framing
|
||||
├─────────────────────────────────────────────┤
|
||||
│ QUIC + TLS 1.3 (quinn/rustls) │ <- mutual auth + transport secrecy
|
||||
└─────────────────────────────────────────────┘
|
||||
┌─────────────────────────────────────────────────┐
|
||||
│ Application / MLS ciphertext │ ← group key ratchet (RFC 9420)
|
||||
├─────────────────────────────────────────────────┤
|
||||
│ Protobuf RPC / Cap'n Proto (legacy) │ ← typed, schema-versioned framing
|
||||
├─────────────────────────────────────────────────┤
|
||||
│ QUIC + TLS 1.3 (quinn/rustls) │ ← mutual auth + transport secrecy
|
||||
└─────────────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
## Highlights
|
||||
|
||||
| | |
|
||||
|---|---|
|
||||
| **Zero-knowledge server** | Routes opaque MLS ciphertexts by recipient key — never decrypts |
|
||||
| **Post-quantum ready** | Hybrid X25519 + ML-KEM-768 KEM on both MLS and Noise layers |
|
||||
| **Password auth** | OPAQUE — password never leaves the client, not even as a hash |
|
||||
| **Forward secrecy** | MLS epoch ratchet: compromise today can't decrypt yesterday |
|
||||
| **Multi-device** | Per-device keys, delivery fan-out, up to 5 devices per account |
|
||||
| **Federation** | Server-to-server relay over QUIC with mTLS |
|
||||
| **Offline-first** | Client-side outbox with idempotent retry and gap detection |
|
||||
| **Sealed sender** | Optional anonymous enqueue — server can't see who sent a message |
|
||||
| **7 SDKs** | Rust, Go, Python, TypeScript, Swift, Kotlin/Java, Ruby |
|
||||
|
||||
## Quick Start
|
||||
|
||||
```bash
|
||||
# Build (no system dependencies — protoc is vendored)
|
||||
cargo build --workspace
|
||||
|
||||
# Run tests
|
||||
cargo test --workspace
|
||||
|
||||
# Start the server (auto-generates self-signed TLS cert)
|
||||
cargo run --bin qpc-server -- --allow-insecure-auth
|
||||
|
||||
# Interactive REPL (registers + logs in automatically)
|
||||
cargo run --bin qpc -- repl --username alice --password secret
|
||||
```
|
||||
|
||||
**Two-terminal demo:**
|
||||
|
||||
```bash
|
||||
# Terminal 1 # Terminal 2
|
||||
qpc repl -u alice -p secretA qpc repl -u bob -p secretB
|
||||
|
||||
# Alice: # Bob sees:
|
||||
/dm bob [alice] Hello, Bob!
|
||||
Hello, Bob!
|
||||
```
|
||||
|
||||
## Architecture
|
||||
|
||||
```
|
||||
quicprochat/
|
||||
├── crates/
|
||||
│ ├── quicprochat-core # MLS, hybrid KEM, PQ Noise, OPAQUE, recovery, padding
|
||||
│ ├── quicprochat-proto # Protobuf (prost) + Cap'n Proto generated types
|
||||
│ ├── quicprochat-rpc # QUIC RPC framework (framing, dispatch, middleware)
|
||||
│ ├── quicprochat-sdk # Client SDK (QpqClient, conversation store, outbox)
|
||||
│ ├── quicprochat-server # QUIC server, 33 RPC methods, domain services, plugins
|
||||
│ ├── quicprochat-client # CLI + REPL + TUI (Ratatui)
|
||||
│ ├── quicprochat-kt # Key transparency (Merkle-log, revocation)
|
||||
│ ├── quicprochat-p2p # iroh P2P, mesh identity, store-and-forward
|
||||
│ ├── meshservice # Decentralized service layer (FAPP, housing, wire format)
|
||||
│ ├── quicprochat-ffi # C FFI (libquicprochat_ffi.so)
|
||||
│ └── quicprochat-plugin-api # Dynamic plugin hooks (C ABI)
|
||||
├── proto/qpc/v1/ # 15 .proto schema files
|
||||
├── sdks/ # Go, Python, TypeScript, Swift, Kotlin, Java, Ruby
|
||||
├── docs/ # mdBook docs, SDK guides, operational runbooks
|
||||
└── packaging/ # OpenWrt, Docker, cross-compilation
|
||||
```
|
||||
|
||||
### Security Properties
|
||||
|
||||
| Property | Mechanism |
|
||||
|---|---|
|
||||
| Transport confidentiality | TLS 1.3 over QUIC (rustls) |
|
||||
| Transport authentication | TLS 1.3 server cert (self-signed by default) |
|
||||
| Group key agreement | MLS `MLS_128_DHKEMX25519_AES128GCM_SHA256_Ed25519` |
|
||||
| Post-compromise security | MLS epoch ratchet |
|
||||
| Identity | Ed25519 (MLS credential + leaf node signature) |
|
||||
| Message framing | Cap'n Proto (unpacked wire format) |
|
||||
| Post-quantum confidentiality | X25519 + ML-KEM-768 hybrid KEM (MLS + Noise layers) |
|
||||
| Forward secrecy | MLS epoch ratchet + per-epoch key schedule |
|
||||
| Post-compromise security | MLS Update proposals rotate leaf material |
|
||||
| Identity | Ed25519 long-term keys (MLS credential + leaf signature) |
|
||||
| Password authentication | OPAQUE-ke (augmented PAKE, no password on wire) |
|
||||
| Local storage | SQLCipher + Argon2id + ChaCha20-Poly1305 |
|
||||
| Key transparency | Append-only Merkle log with inclusion proofs + revocation |
|
||||
| Traffic analysis resistance | Uniform padding + configurable decoy traffic |
|
||||
|
||||
---
|
||||
## Features
|
||||
|
||||
### Messaging
|
||||
- **1:1 DMs** and **N-party groups** with full MLS lifecycle (add, remove, key rotation)
|
||||
- **Rich messaging** — reactions, read receipts, typing indicators, edit, delete
|
||||
- **File transfer** — chunked upload/download, SHA-256 content addressing, 50 MB limit
|
||||
- **Disappearing messages** — per-conversation TTL with server-side GC
|
||||
- **Offline queue** — messages queued locally when disconnected, flushed on reconnect
|
||||
- **Delivery proofs** — server-signed Ed25519 receipts for cryptographic send confirmation
|
||||
- **Transcript export** — encrypted, tamper-evident archives with Merkle chain verification
|
||||
|
||||
### Infrastructure
|
||||
- **Multi-device** — per-device keys and delivery fan-out (up to 5 devices)
|
||||
- **Account recovery** — 8 recovery codes, encrypted bundles, zero-knowledge server
|
||||
- **Federation** — server-to-server relay with mTLS and cross-server user resolution
|
||||
- **Abuse prevention** — user blocking, message reporting, ban enforcement, admin tools
|
||||
- **Graceful shutdown** — SIGTERM drain with configurable timeout, health endpoint awareness
|
||||
- **Rate limiting** — sliding window algorithm, trait-based for Redis swap
|
||||
- **Observability** — request correlation IDs, per-endpoint latency histograms, structured audit log
|
||||
- **Dynamic plugins** — load `.so`/`.dylib` at runtime via `--plugin-dir` (6 hook points)
|
||||
- **Mesh networking** — iroh P2P, mDNS discovery, store-and-forward, broadcast channels
|
||||
|
||||
### Mesh & P2P Features
|
||||
|
||||
The `quicprochat-p2p` crate provides a full **serverless mesh networking stack**:
|
||||
|
||||
| Feature | Module | Description |
|
||||
|---------|--------|-------------|
|
||||
| **P2P Transport** | `P2pNode` | Direct QUIC connections via iroh with NAT traversal |
|
||||
| **Mesh Identity** | `MeshIdentity` | Ed25519 keypairs with 16-byte truncated addresses |
|
||||
| **Mesh Envelope** | `MeshEnvelope` | Encrypted, signed, TTL-aware message containers |
|
||||
| **Store-and-Forward** | `MeshStore` | Queue messages for offline recipients |
|
||||
| **Multi-Hop Routing** | `MeshRouter` | Distributed routing table, forward through intermediaries |
|
||||
| **Announce Protocol** | `MeshAnnounce` | Signed peer discovery with capability flags |
|
||||
| **Broadcast Channels** | `BroadcastManager` | Pub/sub with symmetric key encryption |
|
||||
| **Transport Abstraction** | `TransportManager` | Iroh, TCP, LoRa — route by address type |
|
||||
| **LoRa Transport** | `transport_lora` | Duty-cycle aware, fragmentation, SF12 support |
|
||||
| **MLS-Lite** | `mls_lite` | Lightweight symmetric mode for constrained links |
|
||||
| **FAPP** | `fapp` + `fapp_router` | Free Appointment Propagation Protocol (see below) |
|
||||
|
||||
#### FAPP — Decentralized Appointment Discovery
|
||||
|
||||
**Problem:** In Germany, finding a psychotherapist takes 3–6 months due to artificial slot visibility limits.
|
||||
|
||||
**Solution:** FAPP lets licensed therapists announce free slots into the mesh. Patients discover and reserve slots anonymously — no central registry.
|
||||
|
||||
```rust
|
||||
// Therapist publishes slots
|
||||
let announce = SlotAnnounce::new(
|
||||
&therapist_identity,
|
||||
vec![Fachrichtung::Verhaltenstherapie],
|
||||
vec![Modalitaet::Praxis, Modalitaet::Video],
|
||||
vec![Kostentraeger::GKV],
|
||||
"80331", // PLZ only, never exact address
|
||||
slots,
|
||||
approbation_hash,
|
||||
sequence,
|
||||
);
|
||||
fapp_router.broadcast_announce(announce)?;
|
||||
|
||||
// Patient queries anonymously
|
||||
let query = SlotQuery {
|
||||
fachrichtung: Some(Fachrichtung::Verhaltenstherapie),
|
||||
plz_prefix: Some("803".into()),
|
||||
kostentraeger: Some(Kostentraeger::GKV),
|
||||
..Default::default()
|
||||
};
|
||||
fapp_router.send_query(query)?;
|
||||
```
|
||||
|
||||
**Privacy model:**
|
||||
- Therapist identity is **public** (bound to Approbation hash)
|
||||
- Patient queries are **anonymous** (no identifying information)
|
||||
- Reservations use **E2E encryption** to therapist's key
|
||||
|
||||
See [`docs/specs/fapp-protocol.md`](docs/specs/fapp-protocol.md) for the full protocol spec.
|
||||
|
||||
### Client SDKs
|
||||
|
||||
| Language | Location | Transport | Notes |
|
||||
|---|---|---|---|
|
||||
| **Rust** | `crates/quicprochat-sdk` | QUIC (quinn) | Reference implementation |
|
||||
| **Go** | `sdks/go/` | QUIC (quic-go) | Cap'n Proto RPC, full API |
|
||||
| **Python** | `sdks/python/` | QUIC (aioquic) + FFI | Async client, PyPI-ready |
|
||||
| **TypeScript** | `sdks/typescript/` | WebSocket + WASM crypto | 175 KB WASM bundle, browser demo |
|
||||
| **Swift** | `sdks/swift/` | C FFI wrapper | iOS 15+ / macOS 13+ |
|
||||
| **Kotlin/Java** | `sdks/kotlin/`, `sdks/java/` | JNI + C FFI | Android + JVM |
|
||||
| **Ruby** | `sdks/ruby/` | C FFI gem | Block-form auto-disconnect |
|
||||
|
||||
### REPL Commands
|
||||
|
||||
40+ slash commands including:
|
||||
|
||||
```
|
||||
/dm <user> Start a 1:1 DM
|
||||
/create-group <name> Create a group
|
||||
/invite <user> Add member to group
|
||||
/remove <user> Remove member
|
||||
/send-file <path> Upload and send a file
|
||||
/verify <user> Compare safety numbers
|
||||
/rotate-keys Rotate MLS key material
|
||||
/disappear <duration> Set message TTL
|
||||
/export <path> Export encrypted transcript
|
||||
/devices list|add|rm Manage devices
|
||||
/block <user> Block a user
|
||||
/recovery setup Generate recovery codes
|
||||
/help Full command reference
|
||||
```
|
||||
|
||||
## Deployment
|
||||
|
||||
### Docker
|
||||
|
||||
```bash
|
||||
docker build -t quicprochat -f docker/Dockerfile .
|
||||
docker run -p 7000:7000 -v qpc-data:/data quicprochat
|
||||
```
|
||||
|
||||
### Production (Docker Compose)
|
||||
|
||||
```bash
|
||||
# Includes server + Prometheus + Grafana with pre-configured dashboards
|
||||
docker compose -f docker-compose.prod.yml up -d
|
||||
```
|
||||
|
||||
### OpenWrt
|
||||
|
||||
Cross-compiled static binaries for mesh/embedded deployments:
|
||||
|
||||
```bash
|
||||
./scripts/cross-compile.sh # builds for x86_64, armv7, aarch64 (musl)
|
||||
```
|
||||
|
||||
See [docs/openwrt.md](docs/openwrt.md) for `opkg` packaging and `procd` init scripts.
|
||||
|
||||
### Configuration
|
||||
|
||||
```bash
|
||||
# Environment variables (see .env.example for full list)
|
||||
QPC_LISTEN=0.0.0.0:7000
|
||||
QPC_AUTH_TOKEN=your-strong-token
|
||||
QPC_DB_KEY=your-db-encryption-key
|
||||
QPC_STORE_BACKEND=sql
|
||||
QPC_METRICS_LISTEN=0.0.0.0:9090
|
||||
QPC_DRAIN_TIMEOUT=30
|
||||
QPC_RPC_TIMEOUT=30
|
||||
```
|
||||
|
||||
## Documentation
|
||||
|
||||
Full documentation is available as an **mdBook** wiki in [`docs/`](docs/):
|
||||
|
||||
```bash
|
||||
# Install mdBook (once)
|
||||
cargo install mdbook
|
||||
|
||||
# Build and serve locally
|
||||
mdbook serve docs
|
||||
# Open http://localhost:3000
|
||||
mdbook serve docs # http://localhost:3000
|
||||
```
|
||||
|
||||
### Highlights
|
||||
- [SDK Integration Guide](docs/sdk/index.md) — wire format, per-language guides, "build your own SDK"
|
||||
- [Operational Runbook](docs/operations/backup-restore.md) — backup/restore, key rotation, incident response
|
||||
- [Scaling Guide](docs/operations/scaling-guide.md) — resource sizing, horizontal scaling, capacity planning
|
||||
- [Monitoring](docs/operations/monitoring.md) — Prometheus metrics, Grafana dashboards, alert rules
|
||||
|
||||
- **[Architecture Overview](docs/src/architecture/overview.md)** — Two-service model, dual-key design, crate layout
|
||||
- **[Protocol Deep Dives](docs/src/protocol-layers/overview.md)** — QUIC/TLS 1.3, Cap'n Proto, MLS, Hybrid KEM
|
||||
- **[Cryptographic Properties](docs/src/cryptography/overview.md)** — Forward secrecy, post-compromise security, PQ readiness, threat model
|
||||
- **[Design Rationale](docs/src/design-rationale/overview.md)** — Why MLS over Signal/Matrix, ADRs for all key decisions
|
||||
- **[Wire Format Reference](docs/src/wire-format/overview.md)** — Annotated Cap'n Proto schemas
|
||||
- **[Getting Started](docs/src/getting-started/prerequisites.md)** — Build, run, demo walkthrough
|
||||
- **[Roadmap](docs/src/roadmap/milestones.md)** — Milestones, production readiness, future research
|
||||
## Security Status
|
||||
|
||||
---
|
||||
|
||||
## Quick start
|
||||
|
||||
```bash
|
||||
# Prerequisites: Rust 1.77+, capnp CLI
|
||||
brew install capnp # macOS
|
||||
# apt-get install capnproto # Debian/Ubuntu
|
||||
|
||||
# GUI prerequisites (Linux only) — WebKitGTK + GTK3 for Tauri 2
|
||||
# sudo apt install -y libwebkit2gtk-4.1-dev libgtk-3-dev libglib2.0-dev libssl-dev libayatana-appindicator3-dev librsvg2-dev patchelf
|
||||
|
||||
# Build and test
|
||||
cargo build --workspace
|
||||
cargo test --workspace
|
||||
|
||||
# Start the server (port 7000 by default)
|
||||
cargo run -p quicnprotochat-server
|
||||
|
||||
# Or via a config file (TOML)
|
||||
cat > quicnprotochat-server.toml <<'EOF'
|
||||
listen = "0.0.0.0:7000"
|
||||
data_dir = "data"
|
||||
tls_cert = "data/server-cert.der"
|
||||
tls_key = "data/server-key.der"
|
||||
auth_token = "devtoken"
|
||||
store_backend = "file" # or "sql"
|
||||
db_path = "data/quicnprotochat.db"
|
||||
db_key = ""
|
||||
EOF
|
||||
cargo run -p quicnprotochat-server -- --config quicnprotochat-server.toml
|
||||
|
||||
# Run the two-party demo
|
||||
cargo run -p quicnprotochat-client -- demo-group \
|
||||
--server 127.0.0.1:7000
|
||||
|
||||
# Interactive 1:1 chat (after creating a group and inviting a peer)
|
||||
# Terminal 1: quicnprotochat chat --peer-key <other_identity_hex>
|
||||
# Terminal 2: quicnprotochat chat --peer-key <first_identity_hex>
|
||||
# Type messages and press Enter; incoming messages appear as [peer] <msg>. Ctrl+D to exit.
|
||||
```
|
||||
|
||||
See the [full demo walkthrough](docs/src/getting-started/demo-walkthrough.md) for a step-by-step guide.
|
||||
|
||||
---
|
||||
|
||||
## Milestones
|
||||
|
||||
| # | Name | Status | What it adds |
|
||||
|---|------|--------|--------------|
|
||||
| M1 | QUIC/TLS transport | Done | QUIC + TLS 1.3 endpoint, length-prefixed framing, Ping/Pong |
|
||||
| M2 | Authentication Service | Done | Ed25519 identity, KeyPackage generation, AS upload/fetch |
|
||||
| M3 | Delivery Service + MLS groups | Done | DS relay, `GroupMember` create/join/add/send/recv |
|
||||
| M4 | Group CLI subcommands | Done | Persistent CLI (`create-group`, `invite`, `join`, `send`, `recv`), OPAQUE login |
|
||||
| M5 | Multi-party groups | Done | N > 2 members, Commit fan-out, send --all, epoch sync |
|
||||
| M6 | Persistence | Done | SQLite/SQLCipher, migrations, durable server + client state |
|
||||
| M7 | Post-quantum | Next | PQ hybrid for MLS/HPKE (X25519 + ML-KEM-768) |
|
||||
|
||||
---
|
||||
|
||||
## Security notes
|
||||
|
||||
This is a **proof-of-concept research project**. It has not been audited.
|
||||
See the [threat model](docs/src/cryptography/threat-model.md) for a detailed
|
||||
analysis of what is and isn't protected.
|
||||
|
||||
---
|
||||
> **This software has not undergone an independent security audit.** While it implements cryptographic best practices (MLS RFC 9420, OPAQUE, zeroization, constant-time comparisons), no third-party firm has reviewed the implementation. Do not rely on it for high-risk communications until an audit is completed. See [SECURITY.md](SECURITY.md) for our vulnerability disclosure policy.
|
||||
|
||||
## License
|
||||
|
||||
|
||||
891
ROADMAP.html
Normal file
891
ROADMAP.html
Normal file
@@ -0,0 +1,891 @@
|
||||
<!DOCTYPE HTML>
|
||||
<html lang="en" class="navy sidebar-visible" dir="ltr">
|
||||
<head>
|
||||
<!-- Book generated using mdBook -->
|
||||
<meta charset="UTF-8">
|
||||
<title>Full Roadmap (Phases 1-8) - quicproquo</title>
|
||||
|
||||
|
||||
<!-- Custom HTML head -->
|
||||
|
||||
<meta name="description" content="End-to-end encrypted group messaging over QUIC + TLS 1.3 + MLS (RFC 9420)">
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1">
|
||||
<meta name="theme-color" content="#ffffff">
|
||||
|
||||
<link rel="icon" href="favicon-de23e50b.svg">
|
||||
<link rel="shortcut icon" href="favicon-8114d1fc.png">
|
||||
<link rel="stylesheet" href="css/variables-8adf115d.css">
|
||||
<link rel="stylesheet" href="css/general-2459343d.css">
|
||||
<link rel="stylesheet" href="css/chrome-ae938929.css">
|
||||
<link rel="stylesheet" href="css/print-9e4910d8.css" media="print">
|
||||
|
||||
<!-- Fonts -->
|
||||
<link rel="stylesheet" href="fonts/fonts-9644e21d.css">
|
||||
|
||||
<!-- Highlight.js Stylesheets -->
|
||||
<link rel="stylesheet" id="mdbook-highlight-css" href="highlight-493f70e1.css">
|
||||
<link rel="stylesheet" id="mdbook-tomorrow-night-css" href="tomorrow-night-4c0ae647.css">
|
||||
<link rel="stylesheet" id="mdbook-ayu-highlight-css" href="ayu-highlight-3fdfc3ac.css">
|
||||
|
||||
<!-- Custom theme stylesheets -->
|
||||
|
||||
|
||||
<!-- Provide site root and default themes to javascript -->
|
||||
<script>
|
||||
const path_to_root = "";
|
||||
const default_light_theme = "navy";
|
||||
const default_dark_theme = "navy";
|
||||
window.path_to_searchindex_js = "searchindex-1e4ee6e2.js";
|
||||
</script>
|
||||
<!-- Start loading toc.js asap -->
|
||||
<script src="toc-69b0eb95.js"></script>
|
||||
</head>
|
||||
<body>
|
||||
<div id="mdbook-help-container">
|
||||
<div id="mdbook-help-popup">
|
||||
<h2 class="mdbook-help-title">Keyboard shortcuts</h2>
|
||||
<div>
|
||||
<p>Press <kbd>←</kbd> or <kbd>→</kbd> to navigate between chapters</p>
|
||||
<p>Press <kbd>S</kbd> or <kbd>/</kbd> to search in the book</p>
|
||||
<p>Press <kbd>?</kbd> to show this help</p>
|
||||
<p>Press <kbd>Esc</kbd> to hide this help</p>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
<div id="mdbook-body-container">
|
||||
<!-- Work around some values being stored in localStorage wrapped in quotes -->
|
||||
<script>
|
||||
try {
|
||||
let theme = localStorage.getItem('mdbook-theme');
|
||||
let sidebar = localStorage.getItem('mdbook-sidebar');
|
||||
|
||||
if (theme.startsWith('"') && theme.endsWith('"')) {
|
||||
localStorage.setItem('mdbook-theme', theme.slice(1, theme.length - 1));
|
||||
}
|
||||
|
||||
if (sidebar.startsWith('"') && sidebar.endsWith('"')) {
|
||||
localStorage.setItem('mdbook-sidebar', sidebar.slice(1, sidebar.length - 1));
|
||||
}
|
||||
} catch (e) { }
|
||||
</script>
|
||||
|
||||
<!-- Set the theme before any content is loaded, prevents flash -->
|
||||
<script>
|
||||
const default_theme = window.matchMedia("(prefers-color-scheme: dark)").matches ? default_dark_theme : default_light_theme;
|
||||
let theme;
|
||||
try { theme = localStorage.getItem('mdbook-theme'); } catch(e) { }
|
||||
if (theme === null || theme === undefined) { theme = default_theme; }
|
||||
const html = document.documentElement;
|
||||
html.classList.remove('navy')
|
||||
html.classList.add(theme);
|
||||
html.classList.add("js");
|
||||
</script>
|
||||
|
||||
<input type="checkbox" id="mdbook-sidebar-toggle-anchor" class="hidden">
|
||||
|
||||
<!-- Hide / unhide sidebar before it is displayed -->
|
||||
<script>
|
||||
let sidebar = null;
|
||||
const sidebar_toggle = document.getElementById("mdbook-sidebar-toggle-anchor");
|
||||
if (document.body.clientWidth >= 1080) {
|
||||
try { sidebar = localStorage.getItem('mdbook-sidebar'); } catch(e) { }
|
||||
sidebar = sidebar || 'visible';
|
||||
} else {
|
||||
sidebar = 'hidden';
|
||||
sidebar_toggle.checked = false;
|
||||
}
|
||||
if (sidebar === 'visible') {
|
||||
sidebar_toggle.checked = true;
|
||||
} else {
|
||||
html.classList.remove('sidebar-visible');
|
||||
}
|
||||
</script>
|
||||
|
||||
<nav id="mdbook-sidebar" class="sidebar" aria-label="Table of contents">
|
||||
<!-- populated by js -->
|
||||
<mdbook-sidebar-scrollbox class="sidebar-scrollbox"></mdbook-sidebar-scrollbox>
|
||||
<noscript>
|
||||
<iframe class="sidebar-iframe-outer" src="toc.html"></iframe>
|
||||
</noscript>
|
||||
<div id="mdbook-sidebar-resize-handle" class="sidebar-resize-handle">
|
||||
<div class="sidebar-resize-indicator"></div>
|
||||
</div>
|
||||
</nav>
|
||||
|
||||
<div id="mdbook-page-wrapper" class="page-wrapper">
|
||||
|
||||
<div class="page">
|
||||
<div id="mdbook-menu-bar-hover-placeholder"></div>
|
||||
<div id="mdbook-menu-bar" class="menu-bar sticky">
|
||||
<div class="left-buttons">
|
||||
<label id="mdbook-sidebar-toggle" class="icon-button" for="mdbook-sidebar-toggle-anchor" title="Toggle Table of Contents" aria-label="Toggle Table of Contents" aria-controls="mdbook-sidebar">
|
||||
<span class=fa-svg><svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 448 512"><!--! Font Awesome Free 6.2.0 by @fontawesome - https://fontawesome.com License - https://fontawesome.com/license/free (Icons: CC BY 4.0, Fonts: SIL OFL 1.1, Code: MIT License) Copyright 2022 Fonticons, Inc. --><path d="M0 96C0 78.3 14.3 64 32 64H416c17.7 0 32 14.3 32 32s-14.3 32-32 32H32C14.3 128 0 113.7 0 96zM0 256c0-17.7 14.3-32 32-32H416c17.7 0 32 14.3 32 32s-14.3 32-32 32H32c-17.7 0-32-14.3-32-32zM448 416c0 17.7-14.3 32-32 32H32c-17.7 0-32-14.3-32-32s14.3-32 32-32H416c17.7 0 32 14.3 32 32z"/></svg></span>
|
||||
</label>
|
||||
<button id="mdbook-theme-toggle" class="icon-button" type="button" title="Change theme" aria-label="Change theme" aria-haspopup="true" aria-expanded="false" aria-controls="mdbook-theme-list">
|
||||
<span class=fa-svg><svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 576 512"><!--! Font Awesome Free 6.2.0 by @fontawesome - https://fontawesome.com License - https://fontawesome.com/license/free (Icons: CC BY 4.0, Fonts: SIL OFL 1.1, Code: MIT License) Copyright 2022 Fonticons, Inc. --><path d="M371.3 367.1c27.3-3.9 51.9-19.4 67.2-42.9L600.2 74.1c12.6-19.5 9.4-45.3-7.6-61.2S549.7-4.4 531.1 9.6L294.4 187.2c-24 18-38.2 46.1-38.4 76.1L371.3 367.1zm-19.6 25.4l-116-104.4C175.9 290.3 128 339.6 128 400c0 3.9 .2 7.8 .6 11.6c1.8 17.5-10.2 36.4-27.8 36.4H96c-17.7 0-32 14.3-32 32s14.3 32 32 32H240c61.9 0 112-50.1 112-112c0-2.5-.1-5-.2-7.5z"/></svg></span>
|
||||
</button>
|
||||
<ul id="mdbook-theme-list" class="theme-popup" aria-label="Themes" role="menu">
|
||||
<li role="none"><button role="menuitem" class="theme" id="mdbook-theme-default_theme">Auto</button></li>
|
||||
<li role="none"><button role="menuitem" class="theme" id="mdbook-theme-light">Light</button></li>
|
||||
<li role="none"><button role="menuitem" class="theme" id="mdbook-theme-rust">Rust</button></li>
|
||||
<li role="none"><button role="menuitem" class="theme" id="mdbook-theme-coal">Coal</button></li>
|
||||
<li role="none"><button role="menuitem" class="theme" id="mdbook-theme-navy">Navy</button></li>
|
||||
<li role="none"><button role="menuitem" class="theme" id="mdbook-theme-ayu">Ayu</button></li>
|
||||
</ul>
|
||||
<button id="mdbook-search-toggle" class="icon-button" type="button" title="Search (`/`)" aria-label="Toggle Searchbar" aria-expanded="false" aria-keyshortcuts="/ s" aria-controls="mdbook-searchbar">
|
||||
<span class=fa-svg><svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 512 512"><!--! Font Awesome Free 6.2.0 by @fontawesome - https://fontawesome.com License - https://fontawesome.com/license/free (Icons: CC BY 4.0, Fonts: SIL OFL 1.1, Code: MIT License) Copyright 2022 Fonticons, Inc. --><path d="M416 208c0 45.9-14.9 88.3-40 122.7L502.6 457.4c12.5 12.5 12.5 32.8 0 45.3s-32.8 12.5-45.3 0L330.7 376c-34.4 25.2-76.8 40-122.7 40C93.1 416 0 322.9 0 208S93.1 0 208 0S416 93.1 416 208zM208 352c79.5 0 144-64.5 144-144s-64.5-144-144-144S64 128.5 64 208s64.5 144 144 144z"/></svg></span>
|
||||
</button>
|
||||
</div>
|
||||
|
||||
<h1 class="menu-title">quicproquo</h1>
|
||||
|
||||
<div class="right-buttons">
|
||||
<a href="print.html" title="Print this book" aria-label="Print this book">
|
||||
<span class=fa-svg id="print-button"><svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 512 512"><!--! Font Awesome Free 6.2.0 by @fontawesome - https://fontawesome.com License - https://fontawesome.com/license/free (Icons: CC BY 4.0, Fonts: SIL OFL 1.1, Code: MIT License) Copyright 2022 Fonticons, Inc. --><path d="M128 0C92.7 0 64 28.7 64 64v96h64V64H354.7L384 93.3V160h64V93.3c0-17-6.7-33.3-18.7-45.3L400 18.7C388 6.7 371.7 0 354.7 0H128zM384 352v32 64H128V384 368 352H384zm64 32h32c17.7 0 32-14.3 32-32V256c0-35.3-28.7-64-64-64H64c-35.3 0-64 28.7-64 64v96c0 17.7 14.3 32 32 32H64v64c0 35.3 28.7 64 64 64H384c35.3 0 64-28.7 64-64V384zm-16-88c-13.3 0-24-10.7-24-24s10.7-24 24-24s24 10.7 24 24s-10.7 24-24 24z"/></svg></span>
|
||||
</a>
|
||||
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div id="mdbook-search-wrapper" class="hidden">
|
||||
<form id="mdbook-searchbar-outer" class="searchbar-outer">
|
||||
<div class="search-wrapper">
|
||||
<input type="search" id="mdbook-searchbar" name="searchbar" placeholder="Search this book ..." aria-controls="mdbook-searchresults-outer" aria-describedby="searchresults-header">
|
||||
<div class="spinner-wrapper">
|
||||
<span class=fa-svg id="fa-spin"><svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 512 512"><!--! Font Awesome Free 6.2.0 by @fontawesome - https://fontawesome.com License - https://fontawesome.com/license/free (Icons: CC BY 4.0, Fonts: SIL OFL 1.1, Code: MIT License) Copyright 2022 Fonticons, Inc. --><path d="M304 48c0-26.5-21.5-48-48-48s-48 21.5-48 48s21.5 48 48 48s48-21.5 48-48zm0 416c0-26.5-21.5-48-48-48s-48 21.5-48 48s21.5 48 48 48s48-21.5 48-48zM48 304c26.5 0 48-21.5 48-48s-21.5-48-48-48s-48 21.5-48 48s21.5 48 48 48zm464-48c0-26.5-21.5-48-48-48s-48 21.5-48 48s21.5 48 48 48s48-21.5 48-48zM142.9 437c18.7-18.7 18.7-49.1 0-67.9s-49.1-18.7-67.9 0s-18.7 49.1 0 67.9s49.1 18.7 67.9 0zm0-294.2c18.7-18.7 18.7-49.1 0-67.9S93.7 56.2 75 75s-18.7 49.1 0 67.9s49.1 18.7 67.9 0zM369.1 437c18.7 18.7 49.1 18.7 67.9 0s18.7-49.1 0-67.9s-49.1-18.7-67.9 0s-18.7 49.1 0 67.9z"/></svg></span>
|
||||
</div>
|
||||
</div>
|
||||
</form>
|
||||
<div id="mdbook-searchresults-outer" class="searchresults-outer hidden">
|
||||
<div id="mdbook-searchresults-header" class="searchresults-header"></div>
|
||||
<ul id="mdbook-searchresults">
|
||||
</ul>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<!-- Apply ARIA attributes after the sidebar and the sidebar toggle button are added to the DOM -->
|
||||
<script>
|
||||
document.getElementById('mdbook-sidebar-toggle').setAttribute('aria-expanded', sidebar === 'visible');
|
||||
document.getElementById('mdbook-sidebar').setAttribute('aria-hidden', sidebar !== 'visible');
|
||||
Array.from(document.querySelectorAll('#mdbook-sidebar a')).forEach(function(link) {
|
||||
link.setAttribute('tabIndex', sidebar === 'visible' ? 0 : -1);
|
||||
});
|
||||
</script>
|
||||
|
||||
<div id="mdbook-content" class="content">
|
||||
<main>
|
||||
<h1 id="roadmap--quicproquo"><a class="header" href="#roadmap--quicproquo">Roadmap — quicproquo</a></h1>
|
||||
<blockquote>
|
||||
<p>From proof-of-concept to production-grade E2E encrypted messaging.</p>
|
||||
<p>Each phase is designed to be tackled sequentially. Items within a phase
|
||||
can be parallelised. Check the box when done.</p>
|
||||
</blockquote>
|
||||
<hr>
|
||||
<h2 id="phase-1--production-hardening-critical"><a class="header" href="#phase-1--production-hardening-critical">Phase 1 — Production Hardening (Critical)</a></h2>
|
||||
<p>Eliminate all crash paths, enforce secure defaults, fix deployment blockers.</p>
|
||||
<ul>
|
||||
<li>
|
||||
<p><input disabled="" type="checkbox" checked=""> <strong>1.1 Remove <code>.unwrap()</code> / <code>.expect()</code> from production paths</strong></p>
|
||||
<ul>
|
||||
<li>Replace <code>AUTH_CONTEXT.read().expect()</code> in client RPC with proper <code>Result</code></li>
|
||||
<li>Replace <code>"0.0.0.0:0".parse().unwrap()</code> in client with fallible parse</li>
|
||||
<li>Replace <code>Mutex::lock().unwrap()</code> in server storage with <code>.map_err()</code></li>
|
||||
<li>Audit: <code>grep -rn 'unwrap()\|expect(' crates/</code> outside <code>#[cfg(test)]</code></li>
|
||||
</ul>
|
||||
</li>
|
||||
<li>
|
||||
<p><input disabled="" type="checkbox" checked=""> <strong>1.2 Enforce secure defaults in production mode</strong></p>
|
||||
<ul>
|
||||
<li>Reject startup if <code>QPQ_PRODUCTION=true</code> and <code>auth_token</code> is empty or <code>"devtoken"</code></li>
|
||||
<li>Require non-empty <code>db_key</code> when using SQL backend in production</li>
|
||||
<li>Refuse to auto-generate TLS certs in production mode (require existing cert+key)</li>
|
||||
<li>Already partially implemented — verify and harden the validation in <code>config.rs</code></li>
|
||||
</ul>
|
||||
</li>
|
||||
<li>
|
||||
<p><input disabled="" type="checkbox" checked=""> <strong>1.3 Fix <code>.gitignore</code></strong></p>
|
||||
<ul>
|
||||
<li>Add <code>data/</code>, <code>*.der</code>, <code>*.pem</code>, <code>*.db</code>, <code>*.bin</code> (state files), <code>*.ks</code> (keystores)</li>
|
||||
<li>Verify no secrets are already tracked: <code>git ls-files data/ *.der *.db</code></li>
|
||||
</ul>
|
||||
</li>
|
||||
<li>
|
||||
<p><input disabled="" type="checkbox" checked=""> <strong>1.4 Fix Dockerfile</strong></p>
|
||||
<ul>
|
||||
<li>Sync workspace members (handle excluded <code>p2p</code> crate)</li>
|
||||
<li>Create dedicated user/group instead of <code>nobody</code></li>
|
||||
<li>Set writable <code>QPQ_DATA_DIR</code> with correct permissions</li>
|
||||
<li>Test: <code>docker build . && docker run --rm -it qpq-server --help</code></li>
|
||||
</ul>
|
||||
</li>
|
||||
<li>
|
||||
<p><input disabled="" type="checkbox" checked=""> <strong>1.5 TLS certificate lifecycle</strong></p>
|
||||
<ul>
|
||||
<li>Document CA-signed cert setup (Let’s Encrypt / custom CA)</li>
|
||||
<li>Add <code>--tls-required</code> flag that refuses to start without valid cert</li>
|
||||
<li>Log clear warning when using self-signed certs</li>
|
||||
<li>Document certificate rotation procedure</li>
|
||||
</ul>
|
||||
</li>
|
||||
</ul>
|
||||
<hr>
|
||||
<h2 id="phase-2--test--ci-maturity"><a class="header" href="#phase-2--test--ci-maturity">Phase 2 — Test & CI Maturity</a></h2>
|
||||
<p>Build confidence before adding features.</p>
|
||||
<ul>
|
||||
<li>
|
||||
<p><input disabled="" type="checkbox" checked=""> <strong>2.1 Expand E2E test coverage</strong></p>
|
||||
<ul>
|
||||
<li>Auth failure scenarios (wrong password, expired token, invalid token)</li>
|
||||
<li>Message ordering verification (send N messages, verify seq numbers)</li>
|
||||
<li>Concurrent clients (3+ members in group, simultaneous send/recv)</li>
|
||||
<li>OPAQUE registration + login full flow</li>
|
||||
<li>Queue full behavior (>1000 messages)</li>
|
||||
<li>Rate limiting behavior (>100 enqueues/minute)</li>
|
||||
<li>Reconnection after server restart</li>
|
||||
<li>KeyPackage exhaustion (fetch when none available)</li>
|
||||
</ul>
|
||||
</li>
|
||||
<li>
|
||||
<p><input disabled="" type="checkbox" checked=""> <strong>2.2 Add unit tests for untested paths</strong></p>
|
||||
<ul>
|
||||
<li>Client retry logic (exponential backoff, jitter, retriable classification)</li>
|
||||
<li>REPL input parsing edge cases (empty input, special characters, <code>/</code> commands)</li>
|
||||
<li>State file encryption/decryption round-trip with bad password</li>
|
||||
<li>Token cache expiry</li>
|
||||
<li>Conversation store migrations</li>
|
||||
</ul>
|
||||
</li>
|
||||
<li>
|
||||
<p><input disabled="" type="checkbox" checked=""> <strong>2.3 CI hardening</strong></p>
|
||||
<ul>
|
||||
<li>Add <code>.github/CODEOWNERS</code> (crypto, auth, wire-format require 2 reviewers)</li>
|
||||
<li>Ensure <code>cargo deny check</code> runs on every PR (already in CI — verify)</li>
|
||||
<li>Add <code>cargo audit</code> as blocking check (already in CI — verify)</li>
|
||||
<li>Add coverage reporting (tarpaulin or llvm-cov)</li>
|
||||
<li>Add CI job for Docker build validation</li>
|
||||
</ul>
|
||||
</li>
|
||||
<li>
|
||||
<p><input disabled="" type="checkbox" checked=""> <strong>2.4 Clean up build warnings</strong></p>
|
||||
<ul>
|
||||
<li>Fix Cap’n Proto generated <code>unused_parens</code> warnings</li>
|
||||
<li>Remove dead code / unused imports</li>
|
||||
<li>Address <code>openmls</code> future-incompat warnings</li>
|
||||
<li>Target: <code>cargo clippy --workspace -- -D warnings</code> passes clean</li>
|
||||
</ul>
|
||||
</li>
|
||||
</ul>
|
||||
<hr>
|
||||
<h2 id="phase-3--client-sdks-native-quic--capn-proto-everywhere"><a class="header" href="#phase-3--client-sdks-native-quic--capn-proto-everywhere">Phase 3 — Client SDKs: Native QUIC + Cap’n Proto Everywhere</a></h2>
|
||||
<p><strong>No REST gateway. No protocol dilution.</strong> The <code>.capnp</code> schemas are the
|
||||
interface definition. Every SDK speaks native QUIC + Cap’n Proto. The
|
||||
project name stays honest.</p>
|
||||
<h3 id="why-this-matters"><a class="header" href="#why-this-matters">Why this matters</a></h3>
|
||||
<p>The name is <strong>quic</strong>n<strong>proto</strong>chat — the protocol IS the product. Instead
|
||||
of adding an HTTP translation layer that loses zero-copy performance and
|
||||
adds base64 overhead, we invest in making the native protocol accessible
|
||||
from every language that has QUIC + Cap’n Proto support, and provide
|
||||
WASM/FFI for the crypto layer.</p>
|
||||
<h3 id="architecture"><a class="header" href="#architecture">Architecture</a></h3>
|
||||
<pre><code> Server: QUIC + Cap'n Proto (single protocol, no gateway)
|
||||
|
||||
Client SDKs:
|
||||
┌─── Rust quinn + capnp-rpc (existing, reference impl)
|
||||
├─── Go quic-go + go-capnp (native, high confidence)
|
||||
├─── Python aioquic + pycapnp (native QUIC, manual framing)
|
||||
├─── C/C++ msquic/ngtcp2 + capnproto (reference impl, full RPC)
|
||||
└─── Browser WebTransport + capnp (WASM) (QUIC transport, no HTTP needed)
|
||||
|
||||
Crypto layer (client-side MLS, shared across all SDKs):
|
||||
┌─── Rust crate (native, existing)
|
||||
├─── WASM module (browsers, Node.js, Deno)
|
||||
└─── C FFI (Swift, Kotlin, Python, Go via cgo)
|
||||
</code></pre>
|
||||
<h3 id="language-support-reality-check"><a class="header" href="#language-support-reality-check">Language support reality check</a></h3>
|
||||
<div class="table-wrapper">
|
||||
<table>
|
||||
<thead>
|
||||
<tr><th>Language</th><th>QUIC</th><th>Cap’n Proto</th><th>RPC</th><th>Confidence</th></tr>
|
||||
</thead>
|
||||
<tbody>
|
||||
<tr><td><strong>Rust</strong></td><td>quinn ✅</td><td>capnp-rpc ✅</td><td>Full ✅</td><td>Existing</td></tr>
|
||||
<tr><td><strong>Go</strong></td><td>quic-go ✅</td><td>go-capnp ✅</td><td>Level 1 ✅</td><td>High</td></tr>
|
||||
<tr><td><strong>Python</strong></td><td>aioquic ✅</td><td>pycapnp ⚠️</td><td>Manual framing</td><td>Medium</td></tr>
|
||||
<tr><td><strong>C/C++</strong></td><td>msquic/ngtcp2 ✅</td><td>capnproto ✅</td><td>Full ✅</td><td>High</td></tr>
|
||||
<tr><td><strong>Browser</strong></td><td>WebTransport ✅</td><td>WASM ✅</td><td>Via WASM bridge</td><td>Medium</td></tr>
|
||||
</tbody>
|
||||
</table>
|
||||
</div>
|
||||
<h3 id="implementation"><a class="header" href="#implementation">Implementation</a></h3>
|
||||
<ul>
|
||||
<li>
|
||||
<p><input disabled="" type="checkbox" checked=""> <strong>3.1 Go SDK (<code>quicproquo-go</code>)</strong></p>
|
||||
<ul>
|
||||
<li>Generated Go types from <code>node.capnp</code> (6487-line codegen, all 24 RPC methods)</li>
|
||||
<li>QUIC transport via <code>quic-go</code> with TLS 1.3 + ALPN <code>"capnp"</code></li>
|
||||
<li>High-level <code>qpq</code> package: Connect, Health, ResolveUser, CreateChannel, Send/SendWithTTL, Receive/ReceiveWait, DeleteAccount, OPAQUE auth</li>
|
||||
<li>Example CLI in <code>sdks/go/cmd/example/</code></li>
|
||||
</ul>
|
||||
</li>
|
||||
<li>
|
||||
<p><input disabled="" type="checkbox" checked=""> <strong>3.2 Python SDK (<code>quicproquo-py</code>)</strong></p>
|
||||
<ul>
|
||||
<li>QUIC transport: <code>aioquic</code> with custom Cap’n Proto stream handler</li>
|
||||
<li>Cap’n Proto serialization: <code>pycapnp</code> for message types</li>
|
||||
<li>Manual RPC framing: length-prefixed request/response over QUIC stream</li>
|
||||
<li>Async/await API matching the Rust client patterns</li>
|
||||
<li>Crypto: PyO3 bindings to <code>quicproquo-core</code> for MLS operations</li>
|
||||
<li>Publish: PyPI <code>quicproquo</code></li>
|
||||
<li>Example: async bot client</li>
|
||||
</ul>
|
||||
</li>
|
||||
<li>
|
||||
<p><input disabled="" type="checkbox" checked=""> <strong>3.3 C FFI layer (<code>quicproquo-ffi</code>)</strong></p>
|
||||
<ul>
|
||||
<li><code>crates/quicproquo-ffi</code> with 7 extern “C” functions: connect, login, send, receive, disconnect, last_error, free_string</li>
|
||||
<li>Builds as <code>libquicproquo_ffi.so</code> / <code>.dylib</code> / <code>.dll</code></li>
|
||||
<li>Python ctypes wrapper in <code>examples/python/qpq_client.py</code></li>
|
||||
</ul>
|
||||
</li>
|
||||
<li>
|
||||
<p><input disabled="" type="checkbox" checked=""> <strong>3.4 WASM compilation of <code>quicproquo-core</code></strong></p>
|
||||
<ul>
|
||||
<li><code>wasm-pack build</code> target producing 175 KB WASM bundle (LTO + opt-level=s)</li>
|
||||
<li>13 <code>wasm_bindgen</code> functions: Ed25519 identity, hybrid KEM, safety numbers, sealed sender, padding</li>
|
||||
<li>Browser-ready with <code>crypto.getRandomValues()</code> RNG</li>
|
||||
<li>Published as <code>sdks/typescript/wasm-crypto/</code></li>
|
||||
</ul>
|
||||
</li>
|
||||
<li>
|
||||
<p><input disabled="" type="checkbox" checked=""> <strong>3.5 WebTransport server endpoint</strong></p>
|
||||
<ul>
|
||||
<li>Add HTTP/3 + WebTransport listener to server (same QUIC stack via quinn)</li>
|
||||
<li>Cap’n Proto RPC framed over WebTransport bidirectional streams</li>
|
||||
<li>Same auth, same storage, same RPC handlers — just a different stream source</li>
|
||||
<li>Browsers connect via <code>new WebTransport("https://server:7443")</code></li>
|
||||
<li>ALPN negotiation: <code>"h3"</code> for WebTransport, <code>"capnp"</code> for native QUIC</li>
|
||||
<li>Configurable port: <code>--webtransport-listen 0.0.0.0:7443</code></li>
|
||||
<li>Feature-flagged: <code>--features webtransport</code></li>
|
||||
</ul>
|
||||
</li>
|
||||
<li>
|
||||
<p><input disabled="" type="checkbox" checked=""> <strong>3.6 TypeScript/JavaScript SDK (<code>@quicproquo/client</code>)</strong></p>
|
||||
<ul>
|
||||
<li><code>QpqClient</code> class: connect, offline, health, resolveUser, createChannel, send/sendWithTTL, receive, deleteAccount</li>
|
||||
<li>WASM crypto wrapper: generateIdentity, sign/verify, hybridEncrypt/Decrypt, computeSafetyNumber, sealedSend, pad</li>
|
||||
<li>WebSocket transport with request/response correlation and reconnection</li>
|
||||
<li>Browser demo: interactive crypto playground + chat UI (<code>sdks/typescript/demo/index.html</code>)</li>
|
||||
</ul>
|
||||
</li>
|
||||
<li>
|
||||
<p><input disabled="" type="checkbox" checked=""> <strong>3.7 SDK documentation and schema publishing</strong></p>
|
||||
<ul>
|
||||
<li>Publish <code>.capnp</code> schemas as the canonical API contract</li>
|
||||
<li>Document the QUIC + Cap’n Proto connection pattern for each language</li>
|
||||
<li>Provide a “build your own SDK” guide (QUIC stream → Cap’n Proto RPC bootstrap)</li>
|
||||
<li>Reference implementation checklist: connect, auth, upload key, enqueue, fetch</li>
|
||||
</ul>
|
||||
</li>
|
||||
</ul>
|
||||
<hr>
|
||||
<h2 id="phase-4--trust--security-infrastructure"><a class="header" href="#phase-4--trust--security-infrastructure">Phase 4 — Trust & Security Infrastructure</a></h2>
|
||||
<p>Address the security gaps required for real-world deployment.</p>
|
||||
<ul>
|
||||
<li>
|
||||
<p><input disabled="" type="checkbox"> <strong>4.1 Third-party cryptographic audit</strong></p>
|
||||
<ul>
|
||||
<li>Scope: MLS integration, OPAQUE flow, hybrid KEM, key lifecycle, zeroization</li>
|
||||
<li>Firms: NCC Group, Trail of Bits, Cure53</li>
|
||||
<li>Budget and timeline: typically 4-6 weeks, $50K–$150K</li>
|
||||
<li>Publish report publicly (builds trust)</li>
|
||||
</ul>
|
||||
</li>
|
||||
<li>
|
||||
<p><input disabled="" type="checkbox" checked=""> <strong>4.2 Key Transparency / revocation</strong></p>
|
||||
<ul>
|
||||
<li>Replace <code>BasicCredential</code> with X.509-based MLS credentials</li>
|
||||
<li>Or: verifiable key directory (Merkle tree, auditable log)</li>
|
||||
<li>Users can verify peer keys haven’t been substituted (MITM detection)</li>
|
||||
<li>Revocation mechanism for compromised keys</li>
|
||||
</ul>
|
||||
</li>
|
||||
<li>
|
||||
<p><input disabled="" type="checkbox" checked=""> <strong>4.3 Client authentication on Delivery Service</strong></p>
|
||||
<ul>
|
||||
<li>DS sender identity binding with explicit audit logging</li>
|
||||
<li><code>sender_prefix</code> tracking in enqueue/batch_enqueue RPCs</li>
|
||||
<li>Sender identity derived from authenticated session</li>
|
||||
</ul>
|
||||
</li>
|
||||
<li>
|
||||
<p><input disabled="" type="checkbox" checked=""> <strong>4.4 M7 — Post-quantum MLS integration</strong></p>
|
||||
<ul>
|
||||
<li>Integrate hybrid KEM (X25519 + ML-KEM-768) into the OpenMLS crypto provider</li>
|
||||
<li>Group key material gets post-quantum confidentiality</li>
|
||||
<li>Full test suite with PQ ciphersuite</li>
|
||||
<li>Ref: existing <code>hybrid_kem.rs</code> and <code>hybrid_crypto.rs</code></li>
|
||||
</ul>
|
||||
</li>
|
||||
<li>
|
||||
<p><input disabled="" type="checkbox" checked=""> <strong>4.5 Username enumeration mitigation</strong></p>
|
||||
<ul>
|
||||
<li>5 ms timing floor on <code>resolveUser</code> responses</li>
|
||||
<li>Rate limiting to prevent bulk enumeration attacks</li>
|
||||
</ul>
|
||||
</li>
|
||||
</ul>
|
||||
<hr>
|
||||
<h2 id="phase-5--features--ux"><a class="header" href="#phase-5--features--ux">Phase 5 — Features & UX</a></h2>
|
||||
<p>Make it a product people want to use.</p>
|
||||
<ul>
|
||||
<li>
|
||||
<p><input disabled="" type="checkbox" checked=""> <strong>5.1 Multi-device support</strong></p>
|
||||
<ul>
|
||||
<li>Account → multiple devices, each with own Ed25519 key + MLS KeyPackages</li>
|
||||
<li>Device graph management (add device, remove device, list devices)</li>
|
||||
<li>Messages delivered to all devices of a user</li>
|
||||
<li><code>device_id</code> field already in Auth struct — wire it through</li>
|
||||
</ul>
|
||||
</li>
|
||||
<li>
|
||||
<p><input disabled="" type="checkbox" checked=""> <strong>5.2 Account recovery</strong></p>
|
||||
<ul>
|
||||
<li>Recovery codes or backup key (encrypted, stored by user)</li>
|
||||
<li>Option: server-assisted recovery with security questions (lower security)</li>
|
||||
<li>MLS state re-establishment after device loss</li>
|
||||
</ul>
|
||||
</li>
|
||||
<li>
|
||||
<p><input disabled="" type="checkbox" checked=""> <strong>5.3 Full MLS lifecycle</strong></p>
|
||||
<ul>
|
||||
<li>Member removal (Remove proposal → Commit → fan-out)</li>
|
||||
<li>Credential update (Update proposal for key rotation)</li>
|
||||
<li>Explicit proposal handling (queue proposals, batch commit)</li>
|
||||
<li>Group metadata (name, description, avatar hash)</li>
|
||||
</ul>
|
||||
</li>
|
||||
<li>
|
||||
<p><input disabled="" type="checkbox" checked=""> <strong>5.4 Message editing and deletion</strong></p>
|
||||
<ul>
|
||||
<li><code>Edit</code> (0x06) and <code>Delete</code> (0x07) message types in <code>AppMessage</code></li>
|
||||
<li><code>/edit <index> <text></code> and <code>/delete <index></code> REPL commands (own messages only)</li>
|
||||
<li>Database update/removal on incoming edit/delete</li>
|
||||
</ul>
|
||||
</li>
|
||||
<li>
|
||||
<p><input disabled="" type="checkbox" checked=""> <strong>5.5 File and media transfer</strong></p>
|
||||
<ul>
|
||||
<li><code>uploadBlob</code> / <code>downloadBlob</code> RPCs with 256 KB chunked streaming</li>
|
||||
<li>SHA-256 content-addressable storage with hash verification</li>
|
||||
<li><code>FileRef</code> (0x08) message type with blob_id, filename, file_size, mime_type</li>
|
||||
<li><code>/send-file <path></code> and <code>/download <index></code> REPL commands with progress bars</li>
|
||||
<li>50 MB max file size, automatic MIME detection via <code>mime_guess</code></li>
|
||||
</ul>
|
||||
</li>
|
||||
<li>
|
||||
<p><input disabled="" type="checkbox" checked=""> <strong>5.6 Abuse prevention and moderation</strong></p>
|
||||
<ul>
|
||||
<li>Block user (client-side, suppress display)</li>
|
||||
<li>Report message (encrypted report to admin key)</li>
|
||||
<li>Admin tools: ban user, delete account, audit log</li>
|
||||
</ul>
|
||||
</li>
|
||||
<li>
|
||||
<p><input disabled="" type="checkbox" checked=""> <strong>5.7 Offline message queue (client-side)</strong></p>
|
||||
<ul>
|
||||
<li>Queue messages when disconnected, send on reconnect</li>
|
||||
<li>Idempotent message IDs to prevent duplicates</li>
|
||||
<li>Gap detection: compare local seq with server seq</li>
|
||||
</ul>
|
||||
</li>
|
||||
</ul>
|
||||
<hr>
|
||||
<h2 id="phase-6--scale--operations"><a class="header" href="#phase-6--scale--operations">Phase 6 — Scale & Operations</a></h2>
|
||||
<p>Prepare for real traffic.</p>
|
||||
<ul>
|
||||
<li>
|
||||
<p><input disabled="" type="checkbox" checked=""> <strong>6.1 Distributed rate limiting</strong></p>
|
||||
<ul>
|
||||
<li>Current: in-memory per-process, lost on restart</li>
|
||||
<li>Move to Redis or shared state for multi-node deployments</li>
|
||||
<li>Sliding window with configurable thresholds</li>
|
||||
</ul>
|
||||
</li>
|
||||
<li>
|
||||
<p><input disabled="" type="checkbox" checked=""> <strong>6.2 Multi-node / horizontal scaling</strong></p>
|
||||
<ul>
|
||||
<li>Stateless server design (already mostly there — state is in storage backend)</li>
|
||||
<li>Shared PostgreSQL or CockroachDB backend (replace SQLite)</li>
|
||||
<li>Message queue fan-out (Redis pub/sub or NATS for cross-node notification)</li>
|
||||
<li>Load balancer health check via QUIC RPC <code>health()</code> or Prometheus <code>/metrics</code></li>
|
||||
</ul>
|
||||
</li>
|
||||
<li>
|
||||
<p><input disabled="" type="checkbox" checked=""> <strong>6.3 Operational runbook</strong></p>
|
||||
<ul>
|
||||
<li>Backup / restore procedures (SQLCipher, file backend)</li>
|
||||
<li>Key rotation (auth token, TLS cert, DB encryption key)</li>
|
||||
<li>Incident response playbook</li>
|
||||
<li>Scaling guide (when to add nodes, resource sizing)</li>
|
||||
<li>Monitoring dashboard templates (Grafana + Prometheus)</li>
|
||||
</ul>
|
||||
</li>
|
||||
<li>
|
||||
<p><input disabled="" type="checkbox" checked=""> <strong>6.4 Connection draining and graceful shutdown</strong></p>
|
||||
<ul>
|
||||
<li>Stop accepting new connections on SIGTERM</li>
|
||||
<li>Wait for in-flight RPCs (configurable timeout, default 30s)</li>
|
||||
<li>Drain WebTransport sessions with close frame</li>
|
||||
<li>Document expected behavior for load balancers (health → unhealthy first)</li>
|
||||
</ul>
|
||||
</li>
|
||||
<li>
|
||||
<p><input disabled="" type="checkbox" checked=""> <strong>6.5 Request-level timeouts</strong></p>
|
||||
<ul>
|
||||
<li>Per-RPC timeout (prevent slow clients from holding resources)</li>
|
||||
<li>Database query timeout</li>
|
||||
<li>Overall request deadline propagation</li>
|
||||
</ul>
|
||||
</li>
|
||||
<li>
|
||||
<p><input disabled="" type="checkbox" checked=""> <strong>6.6 Observability enhancements</strong></p>
|
||||
<ul>
|
||||
<li>Request correlation IDs (trace across RPC → storage)</li>
|
||||
<li>Storage operation latency metrics</li>
|
||||
<li>Per-endpoint latency histograms</li>
|
||||
<li>Structured audit log to persistent storage (not just stdout)</li>
|
||||
<li>OpenTelemetry integration</li>
|
||||
</ul>
|
||||
</li>
|
||||
</ul>
|
||||
<hr>
|
||||
<h2 id="phase-7--platform-expansion--research"><a class="header" href="#phase-7--platform-expansion--research">Phase 7 — Platform Expansion & Research</a></h2>
|
||||
<p>Long-term vision for wide adoption.</p>
|
||||
<ul>
|
||||
<li>
|
||||
<p><input disabled="" type="checkbox" checked=""> <strong>7.1 Mobile clients (iOS + Android)</strong></p>
|
||||
<ul>
|
||||
<li>Use C FFI (Phase 3.3) for crypto + transport (single library)</li>
|
||||
<li>Push notifications via APNs / FCM (server sends notification on enqueue)</li>
|
||||
<li>Background QUIC connection for message polling</li>
|
||||
<li>Biometric auth for local key storage (Keychain / Android Keystore)</li>
|
||||
</ul>
|
||||
</li>
|
||||
<li>
|
||||
<p><input disabled="" type="checkbox" checked=""> <strong>7.2 Web client (browser)</strong></p>
|
||||
<ul>
|
||||
<li>Use WASM (Phase 3.4) for crypto</li>
|
||||
<li>Use WebTransport (Phase 3.5) for native QUIC transport</li>
|
||||
<li>Cap’n Proto via WASM bridge (Phase 3.6)</li>
|
||||
<li>IndexedDB for local state persistence</li>
|
||||
<li>Service Worker for background notifications</li>
|
||||
<li>Progressive Web App (PWA) support</li>
|
||||
</ul>
|
||||
</li>
|
||||
<li>
|
||||
<p><input disabled="" type="checkbox" checked=""> <strong>7.3 Federation</strong></p>
|
||||
<ul>
|
||||
<li>Server-to-server protocol via Cap’n Proto RPC over QUIC (see <code>federation.capnp</code>)</li>
|
||||
<li><code>relayEnqueue</code>, <code>proxyFetchKeyPackage</code>, <code>federationHealth</code> methods</li>
|
||||
<li>Identity resolution across federated servers</li>
|
||||
<li>MLS group spanning multiple servers</li>
|
||||
<li>Trust model for federated deployments</li>
|
||||
</ul>
|
||||
</li>
|
||||
<li>
|
||||
<p><input disabled="" type="checkbox" checked=""> <strong>7.4 Sealed Sender</strong></p>
|
||||
<ul>
|
||||
<li>Sender identity inside MLS ciphertext only (server can’t see who sent)</li>
|
||||
<li><code>sealed_sender</code> module in quicproquo-core with seal/unseal API</li>
|
||||
<li>WASM-accessible via <code>wasm_bindgen</code> for browser use</li>
|
||||
</ul>
|
||||
</li>
|
||||
<li>
|
||||
<p><input disabled="" type="checkbox" checked=""> <strong>7.5 Additional language SDKs</strong></p>
|
||||
<ul>
|
||||
<li>Java/Kotlin: JNI bindings to C FFI (Phase 3.3) + native QUIC (netty-quic)</li>
|
||||
<li>Swift: Swift wrapper over C FFI + Network.framework QUIC</li>
|
||||
<li>Ruby: FFI bindings via <code>quicproquo-ffi</code></li>
|
||||
<li>Evaluate demand-driven — only build SDKs people request</li>
|
||||
</ul>
|
||||
</li>
|
||||
<li>
|
||||
<p><input disabled="" type="checkbox" checked=""> <strong>7.6 P2P / NAT traversal</strong></p>
|
||||
<ul>
|
||||
<li>Direct peer-to-peer via iroh (foundation exists in <code>quicproquo-p2p</code>)</li>
|
||||
<li>Server as fallback relay only</li>
|
||||
<li>Reduces latency and single-point-of-failure</li>
|
||||
<li>Ref: <code>FUTURE-IMPROVEMENTS.md § 6.1</code></li>
|
||||
</ul>
|
||||
</li>
|
||||
<li>
|
||||
<p><input disabled="" type="checkbox" checked=""> <strong>7.7 Traffic analysis resistance</strong></p>
|
||||
<ul>
|
||||
<li>Padding messages to uniform size</li>
|
||||
<li>Decoy traffic to mask timing patterns</li>
|
||||
<li>Optional Tor/I2P routing for IP privacy</li>
|
||||
<li>Ref: <code>FUTURE-IMPROVEMENTS.md § 5.4, 6.3</code></li>
|
||||
</ul>
|
||||
</li>
|
||||
</ul>
|
||||
<hr>
|
||||
<h2 id="phase-8--freifunk--community-mesh-networking"><a class="header" href="#phase-8--freifunk--community-mesh-networking">Phase 8 — Freifunk / Community Mesh Networking</a></h2>
|
||||
<p>Make qpq a first-class citizen on decentralised, community-operated wireless
|
||||
networks (Freifunk, BATMAN-adv/Babel routing, OpenWrt). Multiple qpq nodes form
|
||||
a federated mesh; clients auto-discover nearby nodes via mDNS; the network
|
||||
functions without any central infrastructure or internet uplink.</p>
|
||||
<h3 id="architecture-1"><a class="header" href="#architecture-1">Architecture</a></h3>
|
||||
<pre><code> Client A ─── mDNS discovery ──► nearby qpq node (LAN / mesh)
|
||||
│
|
||||
Cap'n Proto federation
|
||||
│
|
||||
remote qpq node (across mesh)
|
||||
</code></pre>
|
||||
<ul>
|
||||
<li>
|
||||
<p><input disabled="" type="checkbox" checked=""> <strong>F0 — Re-include <code>quicproquo-p2p</code> in workspace; fix ALPN strings</strong></p>
|
||||
<ul>
|
||||
<li>Moved <code>crates/quicproquo-p2p</code> from <code>exclude</code> back into <code>[workspace] members</code></li>
|
||||
<li>Fixed ALPN <code>b"quicnprotochat/p2p/1"</code> → <code>b"quicproquo/p2p/1"</code> (breaking wire change)</li>
|
||||
<li>Fixed federation ALPN <code>b"qnpc-fed"</code> → <code>b"quicproquo/federation/1"</code></li>
|
||||
<li>Feature-gated behind <code>--features mesh</code> on client (keeps iroh out of default builds)</li>
|
||||
</ul>
|
||||
</li>
|
||||
<li>
|
||||
<p><input disabled="" type="checkbox" checked=""> <strong>F1 — Federation routing in message delivery</strong></p>
|
||||
<ul>
|
||||
<li><code>handle_enqueue</code> and <code>handle_batch_enqueue</code> call <code>federation::routing::resolve_destination()</code></li>
|
||||
<li>Recipients with a remote home server are relayed via <code>FederationClient::relay_enqueue()</code></li>
|
||||
<li>mTLS mutual authentication between nodes (both present client certs, validated against shared CA)</li>
|
||||
<li>Config: <code>QPQ_FEDERATION_LISTEN</code>, <code>QPQ_LOCAL_DOMAIN</code>, <code>QPQ_FEDERATION_CERT/KEY/CA</code></li>
|
||||
</ul>
|
||||
</li>
|
||||
<li>
|
||||
<p><input disabled="" type="checkbox" checked=""> <strong>F2 — mDNS local peer discovery</strong></p>
|
||||
<ul>
|
||||
<li>Server announces <code>_quicproquo._udp.local.</code> on startup via <code>mdns-sd</code></li>
|
||||
<li>Client: <code>MeshDiscovery::start()</code> browses for nearby nodes (feature-gated)</li>
|
||||
<li>REPL commands: <code>/mesh peers</code> (scan + list), <code>/mesh server <host:port></code> (note address)</li>
|
||||
<li>Nodes announce: <code>ver=1</code>, <code>server=<host:port></code>, <code>domain=<local_domain></code> TXT records</li>
|
||||
</ul>
|
||||
</li>
|
||||
<li>
|
||||
<p><input disabled="" type="checkbox" checked=""> <strong>F3 — Self-sovereign mesh identity</strong></p>
|
||||
<ul>
|
||||
<li>Ed25519 keypair-based identity independent of AS registration</li>
|
||||
<li>JSON-persisted seed + known peers directory</li>
|
||||
<li>Sign/verify operations for mesh authenticity (<code>crates/quicproquo-p2p/src/identity.rs</code>)</li>
|
||||
</ul>
|
||||
</li>
|
||||
<li>
|
||||
<p><input disabled="" type="checkbox" checked=""> <strong>F4 — Store-and-forward with TTL</strong></p>
|
||||
<ul>
|
||||
<li><code>MeshEnvelope</code> with TTL-based expiry, hop_count tracking, max_hops routing limit</li>
|
||||
<li>SHA-256 deduplication ID prevents relay loops</li>
|
||||
<li>Ed25519 signature verification on envelopes</li>
|
||||
<li><code>MeshStore</code> in-memory queue with per-recipient capacity limits and TTL-based GC</li>
|
||||
</ul>
|
||||
</li>
|
||||
<li>
|
||||
<p><input disabled="" type="checkbox" checked=""> <strong>F5 — Lightweight broadcast channels</strong></p>
|
||||
<ul>
|
||||
<li>Symmetric ChaCha20-Poly1305 encrypted channels (no MLS overhead)</li>
|
||||
<li>Topic-based pub/sub via <code>BroadcastChannel</code> and <code>BroadcastManager</code></li>
|
||||
<li>Subscribe/unsubscribe, create, publish API on <code>P2pNode</code></li>
|
||||
</ul>
|
||||
</li>
|
||||
<li>
|
||||
<p><input disabled="" type="checkbox" checked=""> <strong>F6 — Extended <code>/mesh</code> REPL commands</strong></p>
|
||||
<ul>
|
||||
<li><code>/mesh send <peer_id> <msg></code> — direct P2P message via iroh</li>
|
||||
<li><code>/mesh broadcast <topic> <msg></code> — publish to broadcast channel</li>
|
||||
<li><code>/mesh subscribe <topic></code> — join broadcast channel</li>
|
||||
<li><code>/mesh route</code> — show routing table</li>
|
||||
<li><code>/mesh identity</code> — show mesh identity info</li>
|
||||
<li><code>/mesh store</code> — show store-and-forward statistics</li>
|
||||
</ul>
|
||||
</li>
|
||||
<li>
|
||||
<p><input disabled="" type="checkbox" checked=""> <strong>F7 — OpenWrt cross-compilation guide</strong></p>
|
||||
<ul>
|
||||
<li>Musl static builds: <code>x86_64-unknown-linux-musl</code>, <code>armv7-unknown-linux-musleabihf</code>, <code>mips-unknown-linux-musl</code></li>
|
||||
<li>Strip binary: <code>--release</code> + <code>strip</code> → target size < 5 MB for flash storage</li>
|
||||
<li><code>opkg</code> package manifest for OpenWrt feed</li>
|
||||
<li><code>procd</code> init script + <code>uci</code> config file for OpenWrt integration</li>
|
||||
<li>CI job: cross-compile and size-check on every release tag</li>
|
||||
</ul>
|
||||
</li>
|
||||
<li>
|
||||
<p><input disabled="" type="checkbox" checked=""> <strong>F8 — Traffic analysis resistance for mesh</strong></p>
|
||||
<ul>
|
||||
<li>Uniform message padding to nearest 256-byte boundary (hides message size)</li>
|
||||
<li>Configurable decoy traffic rate (fake messages to mask send timing)</li>
|
||||
<li>Optional onion routing: 3-hop relay through other mesh nodes (no Tor dependency)</li>
|
||||
<li>Ref: Phase 7.7 for server-side traffic analysis resistance</li>
|
||||
</ul>
|
||||
</li>
|
||||
</ul>
|
||||
<hr>
|
||||
<h2 id="phase-9--developer-experience--community-growth"><a class="header" href="#phase-9--developer-experience--community-growth">Phase 9 — Developer Experience & Community Growth</a></h2>
|
||||
<p>Features designed to attract contributors, create demo/showcase potential,
|
||||
and lower the barrier to entry for non-crypto developers.</p>
|
||||
<ul>
|
||||
<li>
|
||||
<p><input disabled="" type="checkbox" checked=""> <strong>9.1 Criterion Benchmark Suite (<code>qpq-bench</code>)</strong></p>
|
||||
<ul>
|
||||
<li>Criterion benchmarks for all crypto primitives: hybrid KEM encap/decap,
|
||||
MLS group-add at 10/100/1000 members, epoch rotation, Noise_XX handshake</li>
|
||||
<li>CI publishes HTML benchmark reports as GitHub Actions artifacts</li>
|
||||
<li>Citable numbers — no other project benchmarks MLS + PQ-KEM in Rust</li>
|
||||
</ul>
|
||||
</li>
|
||||
<li>
|
||||
<p><input disabled="" type="checkbox" checked=""> <strong>9.2 Safety Numbers (key verification)</strong></p>
|
||||
<ul>
|
||||
<li>60-digit numeric code derived from two identity keys (Signal-style)</li>
|
||||
<li><code>/verify <username></code> REPL command for out-of-band verification</li>
|
||||
<li>Available in WASM via <code>compute_safety_number</code> binding</li>
|
||||
</ul>
|
||||
</li>
|
||||
<li>
|
||||
<p><input disabled="" type="checkbox" checked=""> <strong>9.3 Full-Screen TUI (Ratatui + Crossterm)</strong></p>
|
||||
<ul>
|
||||
<li><code>qpq tui</code> launches a full-screen terminal UI: message pane, input bar,
|
||||
channel sidebar with unread counts, MLS epoch indicator</li>
|
||||
<li>Feature-gated <code>--features tui</code> to keep ratatui/crossterm out of default builds</li>
|
||||
<li>Existing REPL and CLI subcommands are unaffected</li>
|
||||
</ul>
|
||||
</li>
|
||||
<li>
|
||||
<p><input disabled="" type="checkbox" checked=""> <strong>9.4 Delivery Proof Canary Tokens</strong></p>
|
||||
<ul>
|
||||
<li>Server signs <code>Ed25519(SHA-256(message_id || recipient || timestamp))</code> on enqueue</li>
|
||||
<li>Sender stores proof locally — cryptographic evidence the server queued the message</li>
|
||||
<li>Cap’n Proto schema gains optional <code>deliveryProof: Data</code> on enqueue response</li>
|
||||
</ul>
|
||||
</li>
|
||||
<li>
|
||||
<p><input disabled="" type="checkbox" checked=""> <strong>9.5 Verifiable Transcript Archive</strong></p>
|
||||
<ul>
|
||||
<li><code>GroupMember::export_transcript(path, password)</code> writes encrypted, tamper-evident
|
||||
message archive (CBOR records, Argon2id + ChaCha20-Poly1305, Merkle chain)</li>
|
||||
<li><code>qpq export verify</code> CLI command independently verifies chain integrity</li>
|
||||
<li>Useful for legal discovery, audit, or personal backup</li>
|
||||
</ul>
|
||||
</li>
|
||||
<li>
|
||||
<p><input disabled="" type="checkbox" checked=""> <strong>9.6 Key Transparency (Merkle-Log Identity Binding)</strong></p>
|
||||
<ul>
|
||||
<li>Append-only Merkle log of (username, identity_key) bindings in the AS</li>
|
||||
<li>Clients receive inclusion proofs alongside key fetches</li>
|
||||
<li>Any client can independently audit the full identity history</li>
|
||||
<li>Lightweight subset of RFC 9162 adapted for identity keys</li>
|
||||
</ul>
|
||||
</li>
|
||||
<li>
|
||||
<p><input disabled="" type="checkbox" checked=""> <strong>9.7 Dynamic Server Plugin System</strong></p>
|
||||
<ul>
|
||||
<li>Server loads <code>.so</code>/<code>.dylib</code> plugins at runtime via <code>--plugin-dir</code></li>
|
||||
<li>C-compatible <code>HookVTable</code> via <code>extern "C"</code> — plugins in any language</li>
|
||||
<li>6 hook points: on_message_enqueue, on_batch_enqueue, on_auth, on_channel_created, on_fetch, on_user_registered</li>
|
||||
<li>Example plugins: logging plugin, rate limit plugin (512 KiB payload enforcement)</li>
|
||||
</ul>
|
||||
</li>
|
||||
<li>
|
||||
<p><input disabled="" type="checkbox" checked=""> <strong>9.8 PQ Noise Transport Layer</strong></p>
|
||||
<ul>
|
||||
<li>Hybrid <code>Noise_XX + ML-KEM-768</code> handshake for post-quantum transport security</li>
|
||||
<li>Closes the harvest-now-decrypt-later gap on handshake metadata (ADR-006)</li>
|
||||
<li>Feature-gated <code>--features pq-noise</code>; classical Noise_XX default preserved</li>
|
||||
<li>May require extending or forking <code>snow</code> crate’s <code>CryptoResolver</code></li>
|
||||
</ul>
|
||||
</li>
|
||||
</ul>
|
||||
<hr>
|
||||
<h2 id="summary-timeline"><a class="header" href="#summary-timeline">Summary Timeline</a></h2>
|
||||
<div class="table-wrapper">
|
||||
<table>
|
||||
<thead>
|
||||
<tr><th>Phase</th><th>Focus</th><th>Estimated Effort</th></tr>
|
||||
</thead>
|
||||
<tbody>
|
||||
<tr><td><strong>1</strong></td><td>Production Hardening</td><td>1–2 days</td></tr>
|
||||
<tr><td><strong>2</strong></td><td>Test & CI Maturity</td><td>2–3 days</td></tr>
|
||||
<tr><td><strong>3</strong></td><td>Client SDKs (Go, Python, WASM, FFI, WebTransport)</td><td>5–8 days</td></tr>
|
||||
<tr><td><strong>4</strong></td><td>Trust & Security Infrastructure</td><td>2–4 days (excl. audit)</td></tr>
|
||||
<tr><td><strong>5</strong></td><td>Features & UX</td><td>5–7 days</td></tr>
|
||||
<tr><td><strong>6</strong></td><td>Scale & Operations</td><td>3–5 days</td></tr>
|
||||
<tr><td><strong>7</strong></td><td>Platform Expansion & Research</td><td>ongoing</td></tr>
|
||||
<tr><td><strong>8</strong></td><td>Freifunk / Community Mesh</td><td>ongoing</td></tr>
|
||||
<tr><td><strong>9</strong></td><td>Developer Experience & Community Growth</td><td>3–5 days</td></tr>
|
||||
</tbody>
|
||||
</table>
|
||||
</div>
|
||||
<hr>
|
||||
<h2 id="related-documents"><a class="header" href="#related-documents">Related Documents</a></h2>
|
||||
<ul>
|
||||
<li><a href="docs/FUTURE-IMPROVEMENTS.html">Future Improvements</a> — consolidated improvement list</li>
|
||||
<li><a href="docs/PRODUCTION-READINESS-AUDIT.html">Production Readiness Audit</a> — specific blockers</li>
|
||||
<li><a href="docs/SECURITY-AUDIT.html">Security Audit</a> — findings and recommendations</li>
|
||||
<li><a href="docs/src/roadmap/milestones.html">Milestone Tracker</a> — M1–M7 status</li>
|
||||
<li><a href="docs/src/roadmap/authz-plan.html">Auth, Devices, and Tokens</a> — authorization design</li>
|
||||
<li><a href="docs/src/roadmap/dm-channels.html">DM Channel Design</a> — 1:1 channel spec</li>
|
||||
</ul>
|
||||
|
||||
</main>
|
||||
|
||||
<nav class="nav-wrapper" aria-label="Page navigation">
|
||||
<!-- Mobile navigation buttons -->
|
||||
<a rel="prev" href="roadmap/future-research.html" class="mobile-nav-chapters previous" title="Previous chapter" aria-label="Previous chapter" aria-keyshortcuts="Left">
|
||||
<span class=fa-svg><svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 320 512"><!--! Font Awesome Free 6.2.0 by @fontawesome - https://fontawesome.com License - https://fontawesome.com/license/free (Icons: CC BY 4.0, Fonts: SIL OFL 1.1, Code: MIT License) Copyright 2022 Fonticons, Inc. --><path d="M41.4 233.4c-12.5 12.5-12.5 32.8 0 45.3l160 160c12.5 12.5 32.8 12.5 45.3 0s12.5-32.8 0-45.3L109.3 256 246.6 118.6c12.5-12.5 12.5-32.8 0-45.3s-32.8-12.5-45.3 0l-160 160z"/></svg></span>
|
||||
</a>
|
||||
|
||||
<a rel="next prefetch" href="operations/monitoring.html" class="mobile-nav-chapters next" title="Next chapter" aria-label="Next chapter" aria-keyshortcuts="Right">
|
||||
<span class=fa-svg><svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 320 512"><!--! Font Awesome Free 6.2.0 by @fontawesome - https://fontawesome.com License - https://fontawesome.com/license/free (Icons: CC BY 4.0, Fonts: SIL OFL 1.1, Code: MIT License) Copyright 2022 Fonticons, Inc. --><path d="M278.6 233.4c12.5 12.5 12.5 32.8 0 45.3l-160 160c-12.5 12.5-32.8 12.5-45.3 0s-12.5-32.8 0-45.3L210.7 256 73.4 118.6c-12.5-12.5-12.5-32.8 0-45.3s32.8-12.5 45.3 0l160 160z"/></svg></span>
|
||||
</a>
|
||||
|
||||
<div style="clear: both"></div>
|
||||
</nav>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<nav class="nav-wide-wrapper" aria-label="Page navigation">
|
||||
<a rel="prev" href="roadmap/future-research.html" class="nav-chapters previous" title="Previous chapter" aria-label="Previous chapter" aria-keyshortcuts="Left">
|
||||
<span class=fa-svg><svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 320 512"><!--! Font Awesome Free 6.2.0 by @fontawesome - https://fontawesome.com License - https://fontawesome.com/license/free (Icons: CC BY 4.0, Fonts: SIL OFL 1.1, Code: MIT License) Copyright 2022 Fonticons, Inc. --><path d="M41.4 233.4c-12.5 12.5-12.5 32.8 0 45.3l160 160c12.5 12.5 32.8 12.5 45.3 0s12.5-32.8 0-45.3L109.3 256 246.6 118.6c12.5-12.5 12.5-32.8 0-45.3s-32.8-12.5-45.3 0l-160 160z"/></svg></span>
|
||||
</a>
|
||||
|
||||
<a rel="next prefetch" href="operations/monitoring.html" class="nav-chapters next" title="Next chapter" aria-label="Next chapter" aria-keyshortcuts="Right">
|
||||
<span class=fa-svg><svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 320 512"><!--! Font Awesome Free 6.2.0 by @fontawesome - https://fontawesome.com License - https://fontawesome.com/license/free (Icons: CC BY 4.0, Fonts: SIL OFL 1.1, Code: MIT License) Copyright 2022 Fonticons, Inc. --><path d="M278.6 233.4c12.5 12.5 12.5 32.8 0 45.3l-160 160c-12.5 12.5-32.8 12.5-45.3 0s-12.5-32.8 0-45.3L210.7 256 73.4 118.6c-12.5-12.5-12.5-32.8 0-45.3s32.8-12.5 45.3 0l160 160z"/></svg></span>
|
||||
</a>
|
||||
</nav>
|
||||
|
||||
</div>
|
||||
|
||||
<template id=fa-eye><span class=fa-svg><svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 576 512"><!--! Font Awesome Free 6.2.0 by @fontawesome - https://fontawesome.com License - https://fontawesome.com/license/free (Icons: CC BY 4.0, Fonts: SIL OFL 1.1, Code: MIT License) Copyright 2022 Fonticons, Inc. --><path d="M288 32c-80.8 0-145.5 36.8-192.6 80.6C48.6 156 17.3 208 2.5 243.7c-3.3 7.9-3.3 16.7 0 24.6C17.3 304 48.6 356 95.4 399.4C142.5 443.2 207.2 480 288 480s145.5-36.8 192.6-80.6c46.8-43.5 78.1-95.4 93-131.1c3.3-7.9 3.3-16.7 0-24.6c-14.9-35.7-46.2-87.7-93-131.1C433.5 68.8 368.8 32 288 32zM432 256c0 79.5-64.5 144-144 144s-144-64.5-144-144s64.5-144 144-144s144 64.5 144 144zM288 192c0 35.3-28.7 64-64 64c-11.5 0-22.3-3-31.6-8.4c-.2 2.8-.4 5.5-.4 8.4c0 53 43 96 96 96s96-43 96-96s-43-96-96-96c-2.8 0-5.6 .1-8.4 .4c5.3 9.3 8.4 20.1 8.4 31.6z"/></svg></span></template>
|
||||
<template id=fa-eye-slash><span class=fa-svg><svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 640 512"><!--! Font Awesome Free 6.2.0 by @fontawesome - https://fontawesome.com License - https://fontawesome.com/license/free (Icons: CC BY 4.0, Fonts: SIL OFL 1.1, Code: MIT License) Copyright 2022 Fonticons, Inc. --><path d="M38.8 5.1C28.4-3.1 13.3-1.2 5.1 9.2S-1.2 34.7 9.2 42.9l592 464c10.4 8.2 25.5 6.3 33.7-4.1s6.3-25.5-4.1-33.7L525.6 386.7c39.6-40.6 66.4-86.1 79.9-118.4c3.3-7.9 3.3-16.7 0-24.6c-14.9-35.7-46.2-87.7-93-131.1C465.5 68.8 400.8 32 320 32c-68.2 0-125 26.3-169.3 60.8L38.8 5.1zM223.1 149.5C248.6 126.2 282.7 112 320 112c79.5 0 144 64.5 144 144c0 24.9-6.3 48.3-17.4 68.7L408 294.5c5.2-11.8 8-24.8 8-38.5c0-53-43-96-96-96c-2.8 0-5.6 .1-8.4 .4c5.3 9.3 8.4 20.1 8.4 31.6c0 10.2-2.4 19.8-6.6 28.3l-90.3-70.8zm223.1 298L373 389.9c-16.4 6.5-34.3 10.1-53 10.1c-79.5 0-144-64.5-144-144c0-6.9 .5-13.6 1.4-20.2L83.1 161.5C60.3 191.2 44 220.8 34.5 243.7c-3.3 7.9-3.3 16.7 0 24.6c14.9 35.7 46.2 87.7 93 131.1C174.5 443.2 239.2 480 320 480c47.8 0 89.9-12.9 126.2-32.5z"/></svg></span></template>
|
||||
<template id=fa-copy><span class=fa-svg><svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 512 512"><!--! Font Awesome Free 6.2.0 by @fontawesome - https://fontawesome.com License - https://fontawesome.com/license/free (Icons: CC BY 4.0, Fonts: SIL OFL 1.1, Code: MIT License) Copyright 2022 Fonticons, Inc. --><path d="M502.6 70.63l-61.25-61.25C435.4 3.371 427.2 0 418.7 0H255.1c-35.35 0-64 28.66-64 64l.0195 256C192 355.4 220.7 384 256 384h192c35.2 0 64-28.8 64-64V93.25C512 84.77 508.6 76.63 502.6 70.63zM464 320c0 8.836-7.164 16-16 16H255.1c-8.838 0-16-7.164-16-16L239.1 64.13c0-8.836 7.164-16 16-16h128L384 96c0 17.67 14.33 32 32 32h47.1V320zM272 448c0 8.836-7.164 16-16 16H63.1c-8.838 0-16-7.164-16-16L47.98 192.1c0-8.836 7.164-16 16-16H160V128H63.99c-35.35 0-64 28.65-64 64l.0098 256C.002 483.3 28.66 512 64 512h192c35.2 0 64-28.8 64-64v-32h-47.1L272 448z"/></svg></span></template>
|
||||
<template id=fa-play><span class=fa-svg><svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 384 512"><!--! Font Awesome Free 6.2.0 by @fontawesome - https://fontawesome.com License - https://fontawesome.com/license/free (Icons: CC BY 4.0, Fonts: SIL OFL 1.1, Code: MIT License) Copyright 2022 Fonticons, Inc. --><path d="M73 39c-14.8-9.1-33.4-9.4-48.5-.9S0 62.6 0 80V432c0 17.4 9.4 33.4 24.5 41.9s33.7 8.1 48.5-.9L361 297c14.3-8.7 23-24.2 23-41s-8.7-32.2-23-41L73 39z"/></svg></span></template>
|
||||
<template id=fa-clock-rotate-left><span class=fa-svg><svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 512 512"><!--! Font Awesome Free 6.2.0 by @fontawesome - https://fontawesome.com License - https://fontawesome.com/license/free (Icons: CC BY 4.0, Fonts: SIL OFL 1.1, Code: MIT License) Copyright 2022 Fonticons, Inc. --><path d="M75 75L41 41C25.9 25.9 0 36.6 0 57.9V168c0 13.3 10.7 24 24 24H134.1c21.4 0 32.1-25.9 17-41l-30.8-30.8C155 85.5 203 64 256 64c106 0 192 86 192 192s-86 192-192 192c-40.8 0-78.6-12.7-109.7-34.4c-14.5-10.1-34.4-6.6-44.6 7.9s-6.6 34.4 7.9 44.6C151.2 495 201.7 512 256 512c141.4 0 256-114.6 256-256S397.4 0 256 0C185.3 0 121.3 28.7 75 75zm181 53c-13.3 0-24 10.7-24 24V256c0 6.4 2.5 12.5 7 17l72 72c9.4 9.4 24.6 9.4 33.9 0s9.4-24.6 0-33.9l-65-65V152c0-13.3-10.7-24-24-24z"/></svg></span></template>
|
||||
|
||||
|
||||
|
||||
<script>
|
||||
window.playground_copyable = true;
|
||||
</script>
|
||||
|
||||
|
||||
<script src="elasticlunr-ef4e11c1.min.js"></script>
|
||||
<script src="mark-09e88c2c.min.js"></script>
|
||||
<script src="searcher-c2a407aa.js"></script>
|
||||
|
||||
<script src="clipboard-1626706a.min.js"></script>
|
||||
<script src="highlight-abc7f01d.js"></script>
|
||||
<script src="book-a0b12cfe.js"></script>
|
||||
|
||||
<!-- Custom JS scripts -->
|
||||
|
||||
|
||||
|
||||
</div>
|
||||
</body>
|
||||
</html>
|
||||
493
ROADMAP.md
Normal file
493
ROADMAP.md
Normal file
@@ -0,0 +1,493 @@
|
||||
# Roadmap — quicprochat
|
||||
|
||||
> From proof-of-concept to production-grade E2E encrypted messaging.
|
||||
>
|
||||
> Each phase is designed to be tackled sequentially. Items within a phase
|
||||
> can be parallelised. Check the box when done.
|
||||
|
||||
---
|
||||
|
||||
## Phase 1 — Production Hardening (Critical)
|
||||
|
||||
Eliminate all crash paths, enforce secure defaults, fix deployment blockers.
|
||||
|
||||
- [x] **1.1 Remove `.unwrap()` / `.expect()` from production paths**
|
||||
- Replace `AUTH_CONTEXT.read().expect()` in client RPC with proper `Result`
|
||||
- Replace `"0.0.0.0:0".parse().unwrap()` in client with fallible parse
|
||||
- Replace `Mutex::lock().unwrap()` in server storage with `.map_err()`
|
||||
- Audit: `grep -rn 'unwrap()\|expect(' crates/` outside `#[cfg(test)]`
|
||||
|
||||
- [x] **1.2 Enforce secure defaults in production mode**
|
||||
- Reject startup if `QPC_PRODUCTION=true` and `auth_token` is empty or `"devtoken"`
|
||||
- Require non-empty `db_key` when using SQL backend in production
|
||||
- Refuse to auto-generate TLS certs in production mode (require existing cert+key)
|
||||
- Already partially implemented — verify and harden the validation in `config.rs`
|
||||
|
||||
- [x] **1.3 Fix `.gitignore`**
|
||||
- Add `data/`, `*.der`, `*.pem`, `*.db`, `*.bin` (state files), `*.ks` (keystores)
|
||||
- Verify no secrets are already tracked: `git ls-files data/ *.der *.db`
|
||||
|
||||
- [x] **1.4 Fix Dockerfile**
|
||||
- Sync workspace members (handle excluded `p2p` crate)
|
||||
- Create dedicated user/group instead of `nobody`
|
||||
- Set writable `QPC_DATA_DIR` with correct permissions
|
||||
- Test: `docker build . && docker run --rm -it qpc-server --help`
|
||||
|
||||
- [x] **1.5 TLS certificate lifecycle**
|
||||
- Document CA-signed cert setup (Let's Encrypt / custom CA)
|
||||
- Add `--tls-required` flag that refuses to start without valid cert
|
||||
- Log clear warning when using self-signed certs
|
||||
- Document certificate rotation procedure
|
||||
|
||||
---
|
||||
|
||||
## Phase 2 — Test & CI Maturity
|
||||
|
||||
Build confidence before adding features.
|
||||
|
||||
- [x] **2.1 Expand E2E test coverage**
|
||||
- Auth failure scenarios (wrong password, expired token, invalid token)
|
||||
- Message ordering verification (send N messages, verify seq numbers)
|
||||
- Concurrent clients (3+ members in group, simultaneous send/recv)
|
||||
- OPAQUE registration + login full flow
|
||||
- Queue full behavior (>1000 messages)
|
||||
- Rate limiting behavior (>100 enqueues/minute)
|
||||
- Reconnection after server restart
|
||||
- KeyPackage exhaustion (fetch when none available)
|
||||
|
||||
- [x] **2.2 Add unit tests for untested paths**
|
||||
- Client retry logic (exponential backoff, jitter, retriable classification)
|
||||
- REPL input parsing edge cases (empty input, special characters, `/` commands)
|
||||
- State file encryption/decryption round-trip with bad password
|
||||
- Token cache expiry
|
||||
- Conversation store migrations
|
||||
|
||||
- [x] **2.3 CI hardening**
|
||||
- Add `.github/CODEOWNERS` (crypto, auth, wire-format require 2 reviewers)
|
||||
- Ensure `cargo deny check` runs on every PR (already in CI — verify)
|
||||
- Add `cargo audit` as blocking check (already in CI — verify)
|
||||
- Add coverage reporting (tarpaulin or llvm-cov)
|
||||
- Add CI job for Docker build validation
|
||||
|
||||
- [x] **2.4 Clean up build warnings**
|
||||
- Fix Cap'n Proto generated `unused_parens` warnings
|
||||
- Remove dead code / unused imports
|
||||
- Address `openmls` future-incompat warnings
|
||||
- Target: `cargo clippy --workspace -- -D warnings` passes clean
|
||||
|
||||
---
|
||||
|
||||
## Phase 3 — Client SDKs: Native QUIC + Cap'n Proto Everywhere
|
||||
|
||||
**No REST gateway. No protocol dilution.** The `.capnp` schemas are the
|
||||
interface definition. Every SDK speaks native QUIC + Cap'n Proto. The
|
||||
project name stays honest.
|
||||
|
||||
### Why this matters
|
||||
|
||||
The name is **quic**n**proto**chat — the protocol IS the product. Instead
|
||||
of adding an HTTP translation layer that loses zero-copy performance and
|
||||
adds base64 overhead, we invest in making the native protocol accessible
|
||||
from every language that has QUIC + Cap'n Proto support, and provide
|
||||
WASM/FFI for the crypto layer.
|
||||
|
||||
### Architecture
|
||||
|
||||
```
|
||||
Server: QUIC + Cap'n Proto (single protocol, no gateway)
|
||||
|
||||
Client SDKs:
|
||||
┌─── Rust quinn + capnp-rpc (existing, reference impl)
|
||||
├─── Go quic-go + go-capnp (native, high confidence)
|
||||
├─── Python aioquic + pycapnp (native QUIC, manual framing)
|
||||
├─── C/C++ msquic/ngtcp2 + capnproto (reference impl, full RPC)
|
||||
└─── Browser WebTransport + capnp (WASM) (QUIC transport, no HTTP needed)
|
||||
|
||||
Crypto layer (client-side MLS, shared across all SDKs):
|
||||
┌─── Rust crate (native, existing)
|
||||
├─── WASM module (browsers, Node.js, Deno)
|
||||
└─── C FFI (Swift, Kotlin, Python, Go via cgo)
|
||||
```
|
||||
|
||||
### Language support reality check
|
||||
|
||||
| Language | QUIC | Cap'n Proto | RPC | Confidence |
|
||||
|----------|------|-------------|-----|------------|
|
||||
| **Rust** | quinn ✅ | capnp-rpc ✅ | Full ✅ | Existing |
|
||||
| **Go** | quic-go ✅ | go-capnp ✅ | Level 1 ✅ | High |
|
||||
| **Python** | aioquic ✅ | pycapnp ⚠️ | Manual framing | Medium |
|
||||
| **C/C++** | msquic/ngtcp2 ✅ | capnproto ✅ | Full ✅ | High |
|
||||
| **Browser** | WebTransport ✅ | WASM ✅ | Via WASM bridge | Medium |
|
||||
|
||||
### Implementation
|
||||
|
||||
- [x] **3.1 Go SDK (`quicprochat-go`)**
|
||||
- Generated Go types from `node.capnp` (6487-line codegen, all 24 RPC methods)
|
||||
- QUIC transport via `quic-go` with TLS 1.3 + ALPN `"capnp"`
|
||||
- High-level `qpc` package: Connect, Health, ResolveUser, CreateChannel, Send/SendWithTTL, Receive/ReceiveWait, DeleteAccount, OPAQUE auth
|
||||
- Example CLI in `sdks/go/cmd/example/`
|
||||
|
||||
- [x] **3.2 Python SDK (`quicprochat-py`)**
|
||||
- QUIC transport: `aioquic` with custom Cap'n Proto stream handler
|
||||
- Cap'n Proto serialization: `pycapnp` for message types
|
||||
- Manual RPC framing: length-prefixed request/response over QUIC stream
|
||||
- Async/await API matching the Rust client patterns
|
||||
- Crypto: PyO3 bindings to `quicprochat-core` for MLS operations
|
||||
- Publish: PyPI `quicprochat`
|
||||
- Example: async bot client
|
||||
|
||||
- [x] **3.3 C FFI layer (`quicprochat-ffi`)**
|
||||
- `crates/quicprochat-ffi` with 7 extern "C" functions: connect, login, send, receive, disconnect, last_error, free_string
|
||||
- Builds as `libquicprochat_ffi.so` / `.dylib` / `.dll`
|
||||
- Python ctypes wrapper in `examples/python/qpc_client.py`
|
||||
|
||||
- [x] **3.4 WASM compilation of `quicprochat-core`**
|
||||
- `wasm-pack build` target producing 175 KB WASM bundle (LTO + opt-level=s)
|
||||
- 13 `wasm_bindgen` functions: Ed25519 identity, hybrid KEM, safety numbers, sealed sender, padding
|
||||
- Browser-ready with `crypto.getRandomValues()` RNG
|
||||
- Published as `sdks/typescript/wasm-crypto/`
|
||||
|
||||
- [x] **3.5 WebTransport server endpoint**
|
||||
- Add HTTP/3 + WebTransport listener to server (same QUIC stack via quinn)
|
||||
- Cap'n Proto RPC framed over WebTransport bidirectional streams
|
||||
- Same auth, same storage, same RPC handlers — just a different stream source
|
||||
- Browsers connect via `new WebTransport("https://server:7443")`
|
||||
- ALPN negotiation: `"h3"` for WebTransport, `"capnp"` for native QUIC
|
||||
- Configurable port: `--webtransport-listen 0.0.0.0:7443`
|
||||
- Feature-flagged: `--features webtransport`
|
||||
|
||||
- [x] **3.6 TypeScript/JavaScript SDK (`@quicprochat/client`)**
|
||||
- `QpqClient` class: connect, offline, health, resolveUser, createChannel, send/sendWithTTL, receive, deleteAccount
|
||||
- WASM crypto wrapper: generateIdentity, sign/verify, hybridEncrypt/Decrypt, computeSafetyNumber, sealedSend, pad
|
||||
- WebSocket transport with request/response correlation and reconnection
|
||||
- Browser demo: interactive crypto playground + chat UI (`sdks/typescript/demo/index.html`)
|
||||
|
||||
- [x] **3.7 SDK documentation and schema publishing**
|
||||
- Publish `.capnp` schemas as the canonical API contract
|
||||
- Document the QUIC + Cap'n Proto connection pattern for each language
|
||||
- Provide a "build your own SDK" guide (QUIC stream → Cap'n Proto RPC bootstrap)
|
||||
- Reference implementation checklist: connect, auth, upload key, enqueue, fetch
|
||||
|
||||
---
|
||||
|
||||
## Phase 4 — Trust & Security Infrastructure
|
||||
|
||||
Address the security gaps required for real-world deployment.
|
||||
|
||||
- [ ] **4.1 Third-party cryptographic audit**
|
||||
- Scope: MLS integration, OPAQUE flow, hybrid KEM, key lifecycle, zeroization
|
||||
- Firms: NCC Group, Trail of Bits, Cure53
|
||||
- Budget and timeline: typically 4-6 weeks, $50K–$150K
|
||||
- Publish report publicly (builds trust)
|
||||
|
||||
- [x] **4.2 Key Transparency / revocation**
|
||||
- Replace `BasicCredential` with X.509-based MLS credentials
|
||||
- Or: verifiable key directory (Merkle tree, auditable log)
|
||||
- Users can verify peer keys haven't been substituted (MITM detection)
|
||||
- Revocation mechanism for compromised keys
|
||||
|
||||
- [x] **4.3 Client authentication on Delivery Service**
|
||||
- DS sender identity binding with explicit audit logging
|
||||
- `sender_prefix` tracking in enqueue/batch_enqueue RPCs
|
||||
- Sender identity derived from authenticated session
|
||||
|
||||
- [x] **4.4 M7 — Post-quantum MLS integration**
|
||||
- Integrate hybrid KEM (X25519 + ML-KEM-768) into the OpenMLS crypto provider
|
||||
- Group key material gets post-quantum confidentiality
|
||||
- Full test suite with PQ ciphersuite
|
||||
- Ref: existing `hybrid_kem.rs` and `hybrid_crypto.rs`
|
||||
|
||||
- [x] **4.5 Username enumeration mitigation**
|
||||
- 5 ms timing floor on `resolveUser` responses
|
||||
- Rate limiting to prevent bulk enumeration attacks
|
||||
|
||||
---
|
||||
|
||||
## Phase 5 — Features & UX
|
||||
|
||||
Make it a product people want to use.
|
||||
|
||||
- [x] **5.1 Multi-device support**
|
||||
- Account → multiple devices, each with own Ed25519 key + MLS KeyPackages
|
||||
- Device graph management (add device, remove device, list devices)
|
||||
- Messages delivered to all devices of a user
|
||||
- `device_id` field already in Auth struct — wire it through
|
||||
|
||||
- [x] **5.2 Account recovery**
|
||||
- Recovery codes or backup key (encrypted, stored by user)
|
||||
- Option: server-assisted recovery with security questions (lower security)
|
||||
- MLS state re-establishment after device loss
|
||||
|
||||
- [x] **5.3 Full MLS lifecycle**
|
||||
- Member removal (Remove proposal → Commit → fan-out)
|
||||
- Credential update (Update proposal for key rotation)
|
||||
- Explicit proposal handling (queue proposals, batch commit)
|
||||
- Group metadata (name, description, avatar hash)
|
||||
|
||||
- [x] **5.4 Message editing and deletion**
|
||||
- `Edit` (0x06) and `Delete` (0x07) message types in `AppMessage`
|
||||
- `/edit <index> <text>` and `/delete <index>` REPL commands (own messages only)
|
||||
- Database update/removal on incoming edit/delete
|
||||
|
||||
- [x] **5.5 File and media transfer**
|
||||
- `uploadBlob` / `downloadBlob` RPCs with 256 KB chunked streaming
|
||||
- SHA-256 content-addressable storage with hash verification
|
||||
- `FileRef` (0x08) message type with blob_id, filename, file_size, mime_type
|
||||
- `/send-file <path>` and `/download <index>` REPL commands with progress bars
|
||||
- 50 MB max file size, automatic MIME detection via `mime_guess`
|
||||
|
||||
- [x] **5.6 Abuse prevention and moderation**
|
||||
- Block user (client-side, suppress display)
|
||||
- Report message (encrypted report to admin key)
|
||||
- Admin tools: ban user, delete account, audit log
|
||||
|
||||
- [x] **5.7 Offline message queue (client-side)**
|
||||
- Queue messages when disconnected, send on reconnect
|
||||
- Idempotent message IDs to prevent duplicates
|
||||
- Gap detection: compare local seq with server seq
|
||||
|
||||
---
|
||||
|
||||
## Phase 6 — Scale & Operations
|
||||
|
||||
Prepare for real traffic.
|
||||
|
||||
- [x] **6.1 Distributed rate limiting**
|
||||
- Current: in-memory per-process, lost on restart
|
||||
- Move to Redis or shared state for multi-node deployments
|
||||
- Sliding window with configurable thresholds
|
||||
|
||||
- [x] **6.2 Multi-node / horizontal scaling**
|
||||
- Stateless server design (already mostly there — state is in storage backend)
|
||||
- Shared PostgreSQL or CockroachDB backend (replace SQLite)
|
||||
- Message queue fan-out (Redis pub/sub or NATS for cross-node notification)
|
||||
- Load balancer health check via QUIC RPC `health()` or Prometheus `/metrics`
|
||||
|
||||
- [x] **6.3 Operational runbook**
|
||||
- Backup / restore procedures (SQLCipher, file backend)
|
||||
- Key rotation (auth token, TLS cert, DB encryption key)
|
||||
- Incident response playbook
|
||||
- Scaling guide (when to add nodes, resource sizing)
|
||||
- Monitoring dashboard templates (Grafana + Prometheus)
|
||||
|
||||
- [x] **6.4 Connection draining and graceful shutdown**
|
||||
- Stop accepting new connections on SIGTERM
|
||||
- Wait for in-flight RPCs (configurable timeout, default 30s)
|
||||
- Drain WebTransport sessions with close frame
|
||||
- Document expected behavior for load balancers (health → unhealthy first)
|
||||
|
||||
- [x] **6.5 Request-level timeouts**
|
||||
- Per-RPC timeout (prevent slow clients from holding resources)
|
||||
- Database query timeout
|
||||
- Overall request deadline propagation
|
||||
|
||||
- [x] **6.6 Observability enhancements**
|
||||
- Request correlation IDs (trace across RPC → storage)
|
||||
- Storage operation latency metrics
|
||||
- Per-endpoint latency histograms
|
||||
- Structured audit log to persistent storage (not just stdout)
|
||||
- OpenTelemetry integration
|
||||
|
||||
---
|
||||
|
||||
## Phase 7 — Platform Expansion & Research
|
||||
|
||||
Long-term vision for wide adoption.
|
||||
|
||||
- [x] **7.1 Mobile clients (iOS + Android)**
|
||||
- Use C FFI (Phase 3.3) for crypto + transport (single library)
|
||||
- Push notifications via APNs / FCM (server sends notification on enqueue)
|
||||
- Background QUIC connection for message polling
|
||||
- Biometric auth for local key storage (Keychain / Android Keystore)
|
||||
|
||||
- [x] **7.2 Web client (browser)**
|
||||
- Use WASM (Phase 3.4) for crypto
|
||||
- Use WebTransport (Phase 3.5) for native QUIC transport
|
||||
- Cap'n Proto via WASM bridge (Phase 3.6)
|
||||
- IndexedDB for local state persistence
|
||||
- Service Worker for background notifications
|
||||
- Progressive Web App (PWA) support
|
||||
|
||||
- [x] **7.3 Federation**
|
||||
- Server-to-server protocol via Cap'n Proto RPC over QUIC (see `federation.capnp`)
|
||||
- `relayEnqueue`, `proxyFetchKeyPackage`, `federationHealth` methods
|
||||
- Identity resolution across federated servers
|
||||
- MLS group spanning multiple servers
|
||||
- Trust model for federated deployments
|
||||
|
||||
- [x] **7.4 Sealed Sender**
|
||||
- Sender identity inside MLS ciphertext only (server can't see who sent)
|
||||
- `sealed_sender` module in quicprochat-core with seal/unseal API
|
||||
- WASM-accessible via `wasm_bindgen` for browser use
|
||||
|
||||
- [x] **7.5 Additional language SDKs**
|
||||
- Java/Kotlin: JNI bindings to C FFI (Phase 3.3) + native QUIC (netty-quic)
|
||||
- Swift: Swift wrapper over C FFI + Network.framework QUIC
|
||||
- Ruby: FFI bindings via `quicprochat-ffi`
|
||||
- Evaluate demand-driven — only build SDKs people request
|
||||
|
||||
- [x] **7.6 P2P / NAT traversal**
|
||||
- Direct peer-to-peer via iroh (foundation exists in `quicprochat-p2p`)
|
||||
- Server as fallback relay only
|
||||
- Reduces latency and single-point-of-failure
|
||||
- Ref: `FUTURE-IMPROVEMENTS.md § 6.1`
|
||||
|
||||
- [x] **7.7 Traffic analysis resistance**
|
||||
- Padding messages to uniform size
|
||||
- Decoy traffic to mask timing patterns
|
||||
- Optional Tor/I2P routing for IP privacy
|
||||
- Ref: `FUTURE-IMPROVEMENTS.md § 5.4, 6.3`
|
||||
|
||||
---
|
||||
|
||||
## Phase 8 — Freifunk / Community Mesh Networking
|
||||
|
||||
Make qpc a first-class citizen on decentralised, community-operated wireless
|
||||
networks (Freifunk, BATMAN-adv/Babel routing, OpenWrt). Multiple qpc nodes form
|
||||
a federated mesh; clients auto-discover nearby nodes via mDNS; the network
|
||||
functions without any central infrastructure or internet uplink.
|
||||
|
||||
### Architecture
|
||||
|
||||
```
|
||||
Client A ─── mDNS discovery ──► nearby qpc node (LAN / mesh)
|
||||
│
|
||||
Cap'n Proto federation
|
||||
│
|
||||
remote qpc node (across mesh)
|
||||
```
|
||||
|
||||
- [x] **F0 — Re-include `quicprochat-p2p` in workspace; fix ALPN strings**
|
||||
- Moved `crates/quicprochat-p2p` from `exclude` back into `[workspace] members`
|
||||
- Fixed ALPN `b"quicnprotochat/p2p/1"` → `b"quicprochat/p2p/1"` (breaking wire change)
|
||||
- Fixed federation ALPN `b"qnpc-fed"` → `b"quicprochat/federation/1"`
|
||||
- Feature-gated behind `--features mesh` on client (keeps iroh out of default builds)
|
||||
|
||||
- [x] **F1 — Federation routing in message delivery**
|
||||
- `handle_enqueue` and `handle_batch_enqueue` call `federation::routing::resolve_destination()`
|
||||
- Recipients with a remote home server are relayed via `FederationClient::relay_enqueue()`
|
||||
- mTLS mutual authentication between nodes (both present client certs, validated against shared CA)
|
||||
- Config: `QPC_FEDERATION_LISTEN`, `QPC_LOCAL_DOMAIN`, `QPC_FEDERATION_CERT/KEY/CA`
|
||||
|
||||
- [x] **F2 — mDNS local peer discovery**
|
||||
- Server announces `_quicprochat._udp.local.` on startup via `mdns-sd`
|
||||
- Client: `MeshDiscovery::start()` browses for nearby nodes (feature-gated)
|
||||
- REPL commands: `/mesh peers` (scan + list), `/mesh server <host:port>` (note address)
|
||||
- Nodes announce: `ver=1`, `server=<host:port>`, `domain=<local_domain>` TXT records
|
||||
|
||||
- [x] **F3 — Self-sovereign mesh identity**
|
||||
- Ed25519 keypair-based identity independent of AS registration
|
||||
- JSON-persisted seed + known peers directory
|
||||
- Sign/verify operations for mesh authenticity (`crates/quicprochat-p2p/src/identity.rs`)
|
||||
|
||||
- [x] **F4 — Store-and-forward with TTL**
|
||||
- `MeshEnvelope` with TTL-based expiry, hop_count tracking, max_hops routing limit
|
||||
- SHA-256 deduplication ID prevents relay loops
|
||||
- Ed25519 signature verification on envelopes
|
||||
- `MeshStore` in-memory queue with per-recipient capacity limits and TTL-based GC
|
||||
|
||||
- [x] **F5 — Lightweight broadcast channels**
|
||||
- Symmetric ChaCha20-Poly1305 encrypted channels (no MLS overhead)
|
||||
- Topic-based pub/sub via `BroadcastChannel` and `BroadcastManager`
|
||||
- Subscribe/unsubscribe, create, publish API on `P2pNode`
|
||||
|
||||
- [x] **F6 — Extended `/mesh` REPL commands**
|
||||
- `/mesh send <peer_id> <msg>` — direct P2P message via iroh
|
||||
- `/mesh broadcast <topic> <msg>` — publish to broadcast channel
|
||||
- `/mesh subscribe <topic>` — join broadcast channel
|
||||
- `/mesh route` — show routing table
|
||||
- `/mesh identity` — show mesh identity info
|
||||
- `/mesh store` — show store-and-forward statistics
|
||||
|
||||
- [x] **F7 — OpenWrt cross-compilation guide**
|
||||
- Musl static builds: `x86_64-unknown-linux-musl`, `armv7-unknown-linux-musleabihf`, `mips-unknown-linux-musl`
|
||||
- Strip binary: `--release` + `strip` → target size < 5 MB for flash storage
|
||||
- `opkg` package manifest for OpenWrt feed
|
||||
- `procd` init script + `uci` config file for OpenWrt integration
|
||||
- CI job: cross-compile and size-check on every release tag
|
||||
|
||||
- [x] **F8 — Traffic analysis resistance for mesh**
|
||||
- Uniform message padding to nearest 256-byte boundary (hides message size)
|
||||
- Configurable decoy traffic rate (fake messages to mask send timing)
|
||||
- Optional onion routing: 3-hop relay through other mesh nodes (no Tor dependency)
|
||||
- Ref: Phase 7.7 for server-side traffic analysis resistance
|
||||
|
||||
---
|
||||
|
||||
## Phase 9 — Developer Experience & Community Growth
|
||||
|
||||
Features designed to attract contributors, create demo/showcase potential,
|
||||
and lower the barrier to entry for non-crypto developers.
|
||||
|
||||
- [x] **9.1 Criterion Benchmark Suite (`qpc-bench`)**
|
||||
- Criterion benchmarks for all crypto primitives: hybrid KEM encap/decap,
|
||||
MLS group-add at 10/100/1000 members, epoch rotation, Noise_XX handshake
|
||||
- CI publishes HTML benchmark reports as GitHub Actions artifacts
|
||||
- Citable numbers — no other project benchmarks MLS + PQ-KEM in Rust
|
||||
|
||||
- [x] **9.2 Safety Numbers (key verification)**
|
||||
- 60-digit numeric code derived from two identity keys (Signal-style)
|
||||
- `/verify <username>` REPL command for out-of-band verification
|
||||
- Available in WASM via `compute_safety_number` binding
|
||||
|
||||
- [x] **9.3 Full-Screen TUI (Ratatui + Crossterm)**
|
||||
- `qpc tui` launches a full-screen terminal UI: message pane, input bar,
|
||||
channel sidebar with unread counts, MLS epoch indicator
|
||||
- Feature-gated `--features tui` to keep ratatui/crossterm out of default builds
|
||||
- Existing REPL and CLI subcommands are unaffected
|
||||
|
||||
- [x] **9.4 Delivery Proof Canary Tokens**
|
||||
- Server signs `Ed25519(SHA-256(message_id || recipient || timestamp))` on enqueue
|
||||
- Sender stores proof locally — cryptographic evidence the server queued the message
|
||||
- Cap'n Proto schema gains optional `deliveryProof: Data` on enqueue response
|
||||
|
||||
- [x] **9.5 Verifiable Transcript Archive**
|
||||
- `GroupMember::export_transcript(path, password)` writes encrypted, tamper-evident
|
||||
message archive (CBOR records, Argon2id + ChaCha20-Poly1305, Merkle chain)
|
||||
- `qpc export verify` CLI command independently verifies chain integrity
|
||||
- Useful for legal discovery, audit, or personal backup
|
||||
|
||||
- [x] **9.6 Key Transparency (Merkle-Log Identity Binding)**
|
||||
- Append-only Merkle log of (username, identity_key) bindings in the AS
|
||||
- Clients receive inclusion proofs alongside key fetches
|
||||
- Any client can independently audit the full identity history
|
||||
- Lightweight subset of RFC 9162 adapted for identity keys
|
||||
|
||||
- [x] **9.7 Dynamic Server Plugin System**
|
||||
- Server loads `.so`/`.dylib` plugins at runtime via `--plugin-dir`
|
||||
- C-compatible `HookVTable` via `extern "C"` — plugins in any language
|
||||
- 6 hook points: on_message_enqueue, on_batch_enqueue, on_auth, on_channel_created, on_fetch, on_user_registered
|
||||
- Example plugins: logging plugin, rate limit plugin (512 KiB payload enforcement)
|
||||
|
||||
- [x] **9.8 PQ Noise Transport Layer**
|
||||
- Hybrid `Noise_XX + ML-KEM-768` handshake for post-quantum transport security
|
||||
- Closes the harvest-now-decrypt-later gap on handshake metadata (ADR-006)
|
||||
- Feature-gated `--features pq-noise`; classical Noise_XX default preserved
|
||||
- May require extending or forking `snow` crate's `CryptoResolver`
|
||||
|
||||
---
|
||||
|
||||
## Summary Timeline
|
||||
|
||||
| Phase | Focus | Estimated Effort |
|
||||
|-------|-------|-----------------|
|
||||
| **1** | Production Hardening | 1–2 days |
|
||||
| **2** | Test & CI Maturity | 2–3 days |
|
||||
| **3** | Client SDKs (Go, Python, WASM, FFI, WebTransport) | 5–8 days |
|
||||
| **4** | Trust & Security Infrastructure | 2–4 days (excl. audit) |
|
||||
| **5** | Features & UX | 5–7 days |
|
||||
| **6** | Scale & Operations | 3–5 days |
|
||||
| **7** | Platform Expansion & Research | ongoing |
|
||||
| **8** | Freifunk / Community Mesh | ongoing |
|
||||
| **9** | Developer Experience & Community Growth | 3–5 days |
|
||||
|
||||
---
|
||||
|
||||
## Related Documents
|
||||
|
||||
- [Future Improvements](docs/FUTURE-IMPROVEMENTS.md) — consolidated improvement list
|
||||
- [Production Readiness Audit](docs/PRODUCTION-READINESS-AUDIT.md) — specific blockers
|
||||
- [Security Audit](docs/SECURITY-AUDIT.md) — findings and recommendations
|
||||
- [Milestone Tracker](docs/src/roadmap/milestones.md) — M1–M7 status
|
||||
- [Auth, Devices, and Tokens](docs/src/roadmap/authz-plan.md) — authorization design
|
||||
- [DM Channel Design](docs/src/roadmap/dm-channels.md) — 1:1 channel spec
|
||||
29
SECURITY.md
Normal file
29
SECURITY.md
Normal file
@@ -0,0 +1,29 @@
|
||||
# Security Policy
|
||||
|
||||
## Supported Versions
|
||||
|
||||
Only the current `main` branch is supported with security updates.
|
||||
|
||||
## Reporting a Vulnerability
|
||||
|
||||
**Do not use public GitHub issues to report security vulnerabilities.**
|
||||
|
||||
Instead, email **security@quicprochat.org** with:
|
||||
|
||||
- A description of the vulnerability
|
||||
- Steps to reproduce or a proof of concept
|
||||
- The affected component(s) and potential impact
|
||||
|
||||
We will acknowledge your report within **48 hours** and work with you on a fix under a **90-day coordinated disclosure** timeline.
|
||||
|
||||
## What Qualifies
|
||||
|
||||
- Cryptographic implementation bugs (MLS, Noise, hybrid KEM, key derivation)
|
||||
- Authentication or authorization bypass
|
||||
- Key material leakage (memory, logs, network)
|
||||
- Protocol-level flaws (replay, downgrade, impersonation)
|
||||
- Any issue that compromises message confidentiality or integrity
|
||||
|
||||
## Credit
|
||||
|
||||
Reporters are credited in published security advisories unless they prefer to remain anonymous. Let us know your preference when you report.
|
||||
229
SPRINTS.md
Normal file
229
SPRINTS.md
Normal file
@@ -0,0 +1,229 @@
|
||||
# quicprochat — Sprint Plan
|
||||
|
||||
> 7 sprints synthesized from code audit, architecture analysis, and ecosystem research.
|
||||
> Each sprint is ~1 week. Sprints are ordered by priority and dependency.
|
||||
|
||||
---
|
||||
|
||||
## Sprint 1 — Bug Fixes & Code Quality (Quick Wins)
|
||||
|
||||
Fix all known bugs, clippy warnings, and dead code before building on top.
|
||||
|
||||
- [x] **1.1 Fix boolean logic bug in TUI**
|
||||
- `crates/quicprochat-client/src/client/v2_tui.rs:832` — remove `|| true`
|
||||
- Cursor positioning always executes regardless of input state
|
||||
|
||||
- [x] **1.2 Fix unwrap violations in P2P router**
|
||||
- `crates/quicprochat-p2p/src/routing.rs:416,419` — `.lock().unwrap()` on Mutex
|
||||
- Replace with `.expect("lock poisoned")` or proper error handling
|
||||
|
||||
- [x] **1.3 Remove placeholder assertion in WebTransport**
|
||||
- `crates/quicprochat-server/src/webtransport.rs:418` — `assert!(true);`
|
||||
|
||||
- [x] **1.4 Wire up unused metrics**
|
||||
- `record_storage_latency()` — instrument storage layer calls
|
||||
- `record_uptime_seconds()` — add periodic heartbeat task in server main loop
|
||||
|
||||
- [x] **1.5 Wire up or remove unused config fields**
|
||||
- `EffectiveConfig::webtransport_listen` — connect to WebTransport listener
|
||||
- `EffectiveConfig::rpc_timeout_secs` — apply as per-RPC deadline
|
||||
- `EffectiveConfig::storage_timeout_secs` — apply as DB query timeout
|
||||
|
||||
- [x] **1.6 Fix remaining clippy warnings**
|
||||
- Reduce function arity (2 functions with 8-9 args → use config/param structs)
|
||||
- Remove useless `format!()` call
|
||||
- Collapse nested conditionals
|
||||
- Rename `from_str` method to avoid `FromStr` trait confusion
|
||||
|
||||
---
|
||||
|
||||
## Sprint 2 — OpenMLS 0.5 → 0.8 Migration
|
||||
|
||||
**CRITICAL**: OpenMLS 0.7.2 includes security patches. Staying on 0.5 is a risk.
|
||||
|
||||
- [x] **2.1 Migrate StorageProvider trait**
|
||||
- Old `OpenMlsKeyStore` → new `StorageProvider` (most invasive change)
|
||||
- Rework `DiskKeyStore` integration (must keep bincode serialization)
|
||||
- Update all `group.rs` calls that interact with the key store
|
||||
|
||||
- [x] **2.2 Update MLS API calls**
|
||||
- `self_update()` / `propose_self_update()` — add `LeafNodeParameters` arg
|
||||
- `join_by_external_commit()` — add optional LeafNode params
|
||||
- `Sender::NewMember` → split into `NewMemberProposal` / `NewMemberCommit`
|
||||
|
||||
- [x] **2.3 Handle GREASE support**
|
||||
- New variants in `ProposalType`, `ExtensionType`, `CredentialType`
|
||||
- Update match arms to handle unknown/GREASE values
|
||||
|
||||
- [x] **2.4 Update AAD handling**
|
||||
- AAD no longer persisted — set before every API call generating `MlsMessageOut`
|
||||
|
||||
- [x] **2.5 Verify FIPS 203 alignment**
|
||||
- Confirm ML-KEM-768 parameters match final FIPS 203 (not draft)
|
||||
- Review hybrid KEM against RFC 9794 combination methods
|
||||
|
||||
- [x] **2.6 Full test suite pass**
|
||||
- All 301 tests must pass with OpenMLS 0.8
|
||||
- Run crypto benchmarks to check for performance regressions
|
||||
|
||||
---
|
||||
|
||||
## Sprint 3 — Client Resilience
|
||||
|
||||
Currently, network glitches cause the client to hang. This blocks v2 launch.
|
||||
|
||||
- [x] **3.1 Auto-reconnect with backoff**
|
||||
- Integrate existing `retry.rs` into `RpcClient::call()` path
|
||||
- Exponential backoff with jitter (already implemented, not wired)
|
||||
- Configurable max retries and backoff ceiling
|
||||
|
||||
- [x] **3.2 Push subscription recovery**
|
||||
- Detect broken push stream and re-subscribe automatically
|
||||
- Buffer missed events during reconnection window
|
||||
|
||||
- [x] **3.3 Heartbeat / keepalive**
|
||||
- Periodic QUIC ping in TUI and REPL modes
|
||||
- Detect dead connections before user notices
|
||||
|
||||
- [x] **3.4 SDK disconnect lifecycle**
|
||||
- Add `QpcClient::disconnect()` for clean shutdown
|
||||
- Proper state machine: Connected → Reconnecting → Disconnected
|
||||
|
||||
- [x] **3.5 Connection status UI**
|
||||
- TUI: show connection state in status bar (Connected / Reconnecting / Offline)
|
||||
- REPL: print status change notifications
|
||||
|
||||
---
|
||||
|
||||
## Sprint 4 — Server Hardening
|
||||
|
||||
Fix graceful shutdown and wire up timeouts for production readiness.
|
||||
|
||||
- [x] **4.1 In-flight RPC tracking**
|
||||
- Replace fixed 30s shutdown delay with actual in-flight RPC counter
|
||||
- Drain when counter reaches zero (with configurable max wait)
|
||||
|
||||
- [x] **4.2 Apply request-level timeouts**
|
||||
- Wire `rpc_timeout_secs` config into per-RPC deadline enforcement
|
||||
- Wire `storage_timeout_secs` into DB query timeouts
|
||||
- Cancel long-running operations cleanly
|
||||
|
||||
- [x] **4.3 Plugin shutdown hooks**
|
||||
- Add `on_shutdown` hook to `HookVTable`
|
||||
- Call plugin shutdown before server exits
|
||||
|
||||
- [x] **4.4 Federation drain during shutdown**
|
||||
- Stop accepting federation relay requests on SIGTERM
|
||||
- Wait for in-flight federation RPCs before exit
|
||||
|
||||
- [x] **4.5 Connection draining improvements**
|
||||
- Send QUIC CONNECTION_CLOSE with application reason
|
||||
- WebTransport: send close frame before dropping sessions
|
||||
|
||||
---
|
||||
|
||||
## Sprint 5 — Test Coverage & CI Hardening
|
||||
|
||||
Address the major test coverage gaps identified in the audit.
|
||||
|
||||
- [x] **5.1 RPC framing unit tests**
|
||||
- `crates/quicprochat-rpc/src/framing.rs` — encode/decode edge cases
|
||||
- Malformed frames, truncated input, max-size payloads
|
||||
- Fuzzing harness for frame parser
|
||||
|
||||
- [x] **5.2 SDK state machine tests**
|
||||
- `crates/quicprochat-sdk/src/conversation.rs` — conversation lifecycle
|
||||
- `crates/quicprochat-sdk/src/groups.rs` — group join/leave/update
|
||||
- `crates/quicprochat-sdk/src/messaging.rs` — send/receive/queue
|
||||
|
||||
- [x] **5.3 Server domain service tests**
|
||||
- `crates/quicprochat-server/src/domain/` — all service modules
|
||||
- Test business logic without DB (mock storage trait)
|
||||
|
||||
- [x] **5.4 Integration tests**
|
||||
- Reconnection scenario (kill server, restart, verify client recovers)
|
||||
- Graceful shutdown (send SIGTERM during active RPCs, verify drain)
|
||||
- Multi-node federation relay (if federation wired in Sprint 6)
|
||||
|
||||
- [x] **5.5 CI hardening**
|
||||
- Add MSRV check (Rust 1.75 or declared minimum)
|
||||
- Add cross-platform CI (macOS, Windows — at least build check)
|
||||
- Add cargo-fuzz for crypto and parsing code
|
||||
- Add MIRI for unsafe code in plugin-api/FFI
|
||||
|
||||
---
|
||||
|
||||
## Sprint 6 — Federation & P2P Integration
|
||||
|
||||
Wire up the scaffolded federation and P2P code into working features.
|
||||
|
||||
- [x] **6.1 Federation message routing**
|
||||
- Wire `federation::routing::resolve_destination()` into `handle_enqueue`
|
||||
- Route messages to remote home servers via `FederationClient::relay_enqueue()`
|
||||
- Resolve protocol mismatch (Cap'n Proto federation vs Protobuf main RPC)
|
||||
|
||||
- [x] **6.2 Federation identity resolution**
|
||||
- Cross-server user lookup (`user@remote-server`)
|
||||
- KeyPackage fetching across federated nodes
|
||||
|
||||
- [x] **6.3 P2P client integration**
|
||||
- Wire iroh P2P into client as transport option
|
||||
- Fallback logic: prefer P2P direct → fall back to server relay
|
||||
- mDNS discovery in client (already scaffolded, needs activation)
|
||||
|
||||
- [x] **6.4 Multipath QUIC evaluation**
|
||||
- Research draft-ietf-quic-multipath (likely RFC in 2026)
|
||||
- Prototype: use multiple paths for mesh relay resilience
|
||||
- Decision: adopt or defer based on quinn support
|
||||
|
||||
- [x] **6.5 Federation integration tests**
|
||||
- Two-server test: register on A, send to user on B, verify delivery
|
||||
- mTLS mutual auth verification
|
||||
- Partition tolerance (one node goes down, messages queue)
|
||||
|
||||
---
|
||||
|
||||
## Sprint 7 — Documentation, Polish & Future Prep
|
||||
|
||||
Final polish and forward-looking improvements.
|
||||
|
||||
- [x] **7.1 Crate-level documentation**
|
||||
- Add module-level docs to `quicprochat-plugin-api`, `quicprochat-rpc`, `quicprochat-sdk`
|
||||
- Doc comments for all public APIs in domain services
|
||||
|
||||
- [x] **7.2 Refactor high-arity functions** (none found — already clean)
|
||||
- Consolidate 8-9 parameter functions into config/param structs
|
||||
- Improve builder patterns where appropriate
|
||||
|
||||
- [ ] **7.3 Review RFC 9750 (MLS Architecture)** (deferred — requires manual review)
|
||||
- Verify quicprochat's AS/DS split aligns with RFC 9750 recommendations
|
||||
- Document any deviations and rationale
|
||||
|
||||
- [ ] **7.4 Desktop client evaluation** (deferred — requires Tauri prototype)
|
||||
- Prototype Tauri v2 desktop shell wrapping the TUI or a web UI
|
||||
- Evaluate effort to ship cross-platform desktop client
|
||||
|
||||
- [x] **7.5 Security pre-audit prep**
|
||||
- Document all crypto boundaries and trust assumptions
|
||||
- Create threat model document
|
||||
- Prepare scope document for external auditors (Roadmap item 4.1)
|
||||
- Budget: NCC Group / Trail of Bits / Cure53 ($50K–$150K, 4-6 weeks)
|
||||
|
||||
- [ ] **7.6 Repository rename** (requires GitHub admin action)
|
||||
- Rename GitHub repository from `quicproquo` → `quicprochat`
|
||||
- Update all GitHub URLs, CI badge links, go.mod import paths
|
||||
- Set up redirect from old repo name
|
||||
|
||||
---
|
||||
|
||||
## Sprint Summary
|
||||
|
||||
| Sprint | Focus | Risk | Key Deliverable |
|
||||
|--------|-------|------|----------------|
|
||||
| **1** | Bug fixes & code quality | Low | Zero clippy warnings, metrics wired |
|
||||
| **2** | OpenMLS 0.5 → 0.8 | High | Security patches applied, FIPS 203 verified |
|
||||
| **3** | Client resilience | Medium | Auto-reconnect, heartbeat, status UI |
|
||||
| **4** | Server hardening | Medium | Real graceful shutdown, timeouts enforced |
|
||||
| **5** | Test coverage & CI | Low | Unit tests for SDK/RPC/domain, fuzzing |
|
||||
| **6** | Federation & P2P | High | Working cross-server messaging, P2P fallback |
|
||||
| **7** | Docs, polish & audit prep | Low | Audit-ready, desktop prototype |
|
||||
26
assets/left.ansi
Normal file
26
assets/left.ansi
Normal file
@@ -0,0 +1,26 @@
|
||||
registering 'alice'...
|
||||
user 'alice' registered
|
||||
logging in as 'alice'...
|
||||
logged in, session cached
|
||||
[2midentity: c1e1f6df17eeb6..2816[0m
|
||||
KeyPackage uploaded
|
||||
hybrid key uploaded
|
||||
type /help for commands, Ctrl+D to exit
|
||||
|
||||
[2m[[0m[1mno conversation[0m[2m][0m > /dm bob
|
||||
resolving bob...
|
||||
creating channel...
|
||||
fetching peer's key package...
|
||||
DM with @bob created. Start typing!
|
||||
[2m[[0m[1m@bob[0m[2m][0m > [32mHey Bob, testing our E2E encrypted channel![0m
|
||||
[36m[1m[bob][0m Works great -- the server never sees plaintext?
|
||||
[2m[[0m[1m@bob[0m[2m][0m > [32mRight. MLS forward secrecy + post-quantum KEM.[0m
|
||||
[36m[1m[bob][0m Impressive. How do I verify your identity?
|
||||
[2m[[0m[1m@bob[0m[2m][0m > [32mRun /verify alice -- compare the safety number out-of-band.[0m
|
||||
[2m[[0m[1m@bob[0m[2m][0m > /group-info
|
||||
[2m Conversation: @bob[0m
|
||||
[2m Type: DM[0m
|
||||
[2m Members: 2[0m
|
||||
[2m alice (you), bob[0m
|
||||
[2m MLS epoch: 3[0m
|
||||
[2m[[0m[1m@bob[0m[2m][0m >
|
||||
BIN
assets/logo-ccc.png
Normal file
BIN
assets/logo-ccc.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 1.3 MiB |
BIN
assets/logo.png
Normal file
BIN
assets/logo.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 2.2 MiB |
24
assets/right.ansi
Normal file
24
assets/right.ansi
Normal file
@@ -0,0 +1,24 @@
|
||||
registering 'bob'...
|
||||
user 'bob' registered
|
||||
logging in as 'bob'...
|
||||
logged in, session cached
|
||||
[2midentity: a8c2f19f1b0806..c73f[0m
|
||||
KeyPackage uploaded
|
||||
hybrid key uploaded
|
||||
type /help for commands, Ctrl+D to exit
|
||||
|
||||
[36m[1m[system][0m new conversation: @alice
|
||||
[2m[[0m[1m@alice[0m[2m][0m > [36m[1m[alice][0m Hey Bob, testing our E2E encrypted channel!
|
||||
[2m[[0m[1m@alice[0m[2m][0m > [32mWorks great -- the server never sees plaintext?[0m
|
||||
[36m[1m[alice][0m Right. MLS forward secrecy + post-quantum KEM.
|
||||
[2m[[0m[1m@alice[0m[2m][0m > [32mImpressive. How do I verify your identity?[0m
|
||||
[36m[1m[alice][0m Run /verify alice -- compare the safety number out-of-band.
|
||||
[2m[[0m[1m@alice[0m[2m][0m > /verify alice
|
||||
[2m Safety number for @alice:[0m
|
||||
[2m 096482 731945 208376[0m
|
||||
[2m 571039 284617 950283[0m
|
||||
[2m[[0m[1m@alice[0m[2m][0m > /whoami
|
||||
[2m identity: a8c2f19f1b0806..c73f[0m
|
||||
[2m hybrid key: yes[0m
|
||||
[2m conversations: 1[0m
|
||||
[2m[[0m[1m@alice[0m[2m][0m >
|
||||
BIN
assets/screenshot.png
Normal file
BIN
assets/screenshot.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 67 KiB |
59
assets/screenshot.txt
Normal file
59
assets/screenshot.txt
Normal file
@@ -0,0 +1,59 @@
|
||||
=== Alice (left) ===
|
||||
❯ ./target/debug/qpq repl --username alice --password de
|
||||
opass1 --server 127.0.0.1:17123 --ca-cert /tmp/tmp.adbXG
|
||||
OrPY/server-cert.der --state /tmp/tmp.adbXGLOrPY/alice.b
|
||||
n
|
||||
registering 'alice'...
|
||||
user 'alice' registered
|
||||
logging in as 'alice'...
|
||||
logged in, session cached
|
||||
identity: c1e1f6df17eeb6f539d7fbea94129fa32fc02ca40e5c
|
||||
7a7c95cfc94161d5f628
|
||||
KeyPackage uploaded
|
||||
hybrid key uploaded
|
||||
type /help for commands, Ctrl+D to exit
|
||||
|
||||
[no conversation] > /dm bob
|
||||
resolving bob...
|
||||
creating channel...
|
||||
fetching peer's key package...
|
||||
DM with @bob created. Start typing!
|
||||
[@bob] > ^LHey Bob, testing our E2E encrypted channel!
|
||||
[@bob] > Right. MLS forward secrecy + post-quantum KEM.
|
||||
[@bob] > /group-info
|
||||
Conversation: @bob
|
||||
Type: DM
|
||||
Members: 2
|
||||
alice (you), bob
|
||||
MLS epoch: 1
|
||||
[@bob] >
|
||||
|
||||
=== Bob (right) ===
|
||||
❯ ./target/debug/qpq repl --username bob --password demop
|
||||
ass2 --server 127.0.0.1:17123 --ca-cert /tmp/tmp.adbXGLOr
|
||||
PY/server-cert.der --state /tmp/tmp.adbXGLOrPY/bob.bin
|
||||
registering 'bob'...
|
||||
user 'bob' registered
|
||||
logging in as 'bob'...
|
||||
logged in, session cached
|
||||
identity: a8c2f19f1b080616b7206e02244fd14c2ab8821367392
|
||||
af5ff9c89c69750c73f
|
||||
KeyPackage uploaded
|
||||
hybrid key uploaded
|
||||
type /help for commands, Ctrl+D to exit
|
||||
|
||||
[no conversation] > /list
|
||||
no conversations yet. Try /dm <username> or /create-gro
|
||||
up <name>
|
||||
[no conversation] > /switch @alice
|
||||
error: conversation not found: @alice
|
||||
[no conversation] > ^LWorks great -- the server never see
|
||||
s plaintext?
|
||||
error: no active conversation; use /dm or /create-group
|
||||
first
|
||||
[no conversation] > /whoami
|
||||
identity: a8c2f19f1b080616b7206e02244fd14c2ab8821367392
|
||||
af5ff9c89c69750c73f
|
||||
hybrid key: yes
|
||||
conversations: 0
|
||||
[no conversation] >
|
||||
45
crates/meshservice/Cargo.toml
Normal file
45
crates/meshservice/Cargo.toml
Normal file
@@ -0,0 +1,45 @@
|
||||
[package]
|
||||
name = "meshservice"
|
||||
version = "0.1.0"
|
||||
edition = "2021"
|
||||
authors = ["Chris <c@xorwell.de>"]
|
||||
description = "Generic decentralized service layer for mesh networks"
|
||||
license = "MIT"
|
||||
repository = "https://git.xorwell.de/c/meshservice"
|
||||
keywords = ["mesh", "p2p", "decentralized", "services"]
|
||||
categories = ["network-programming"]
|
||||
|
||||
[dependencies]
|
||||
# Serialization
|
||||
serde = { version = "1.0", features = ["derive"] }
|
||||
ciborium = "0.2"
|
||||
|
||||
# Crypto
|
||||
ed25519-dalek = { version = "2.1", features = ["serde"] }
|
||||
sha2 = "0.10"
|
||||
rand = "0.8"
|
||||
x25519-dalek = { version = "2.0", features = ["static_secrets"] }
|
||||
chacha20poly1305 = "0.10"
|
||||
hkdf = "0.12"
|
||||
|
||||
# Async
|
||||
tokio = { version = "1.36", features = ["sync", "time"] }
|
||||
|
||||
# Error handling
|
||||
anyhow = "1.0"
|
||||
thiserror = "1.0"
|
||||
|
||||
[dev-dependencies]
|
||||
tokio = { version = "1.36", features = ["rt-multi-thread", "macros"] }
|
||||
|
||||
[[example]]
|
||||
name = "fapp_service"
|
||||
path = "examples/fapp_service.rs"
|
||||
|
||||
[[example]]
|
||||
name = "housing_service"
|
||||
path = "examples/housing_service.rs"
|
||||
|
||||
[[example]]
|
||||
name = "multi_service"
|
||||
path = "examples/multi_service.rs"
|
||||
233
crates/meshservice/README.md
Normal file
233
crates/meshservice/README.md
Normal file
@@ -0,0 +1,233 @@
|
||||
# MeshService
|
||||
|
||||
A generic decentralized service layer for mesh networks. Build any peer-to-peer service following the **Announce → Query → Response → Reserve** pattern.
|
||||
|
||||
## Architecture
|
||||
|
||||
```
|
||||
┌─────────────────────────────────────────────────────────────┐
|
||||
│ Application Services │
|
||||
│ ┌─────────┐ ┌─────────┐ ┌─────────┐ ┌─────────┐ │
|
||||
│ │ FAPP │ │ Housing │ │ Repair │ │ Custom │ ... │
|
||||
│ └────┬────┘ └────┬────┘ └────┬────┘ └────┬────┘ │
|
||||
│ └────────────┴────────────┴────────────┘ │
|
||||
│ Service Layer (this crate) │
|
||||
│ ServiceMessage, ServiceRouter, Verification │
|
||||
│ ─────────────────────────────────────────────────────── │
|
||||
│ Mesh Layer │
|
||||
│ (provided by quicprochat-p2p or other mesh impl) │
|
||||
└─────────────────────────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
## QuicProChat / quicprochat-p2p
|
||||
|
||||
This crate lives in the **product.quicproquo** workspace. Integration with the mesh stack:
|
||||
|
||||
- **Ed25519 seed**: `MeshIdentity::seed_bytes()` matches `ServiceIdentity::from_secret(&seed)` (same `ed25519-dalek` derivation as `quicprochat_core::IdentityKeypair`); truncated mesh address is SHA-256(pubkey)[0..16] in both layers.
|
||||
- **Example transport**: integration test `crates/quicprochat-p2p/tests/meshservice_tcp_transport.rs` sends `wire::encode(ServiceMessage)` over `TcpTransport` (length-prefixed framing). For iroh/production, embed the same bytes in `MeshEnvelope` on ALPN `quicprochat/mesh/1`.
|
||||
|
||||
Run the test from the repo root:
|
||||
|
||||
```bash
|
||||
cargo test -p quicprochat-p2p --test meshservice_tcp_transport
|
||||
```
|
||||
|
||||
## Features
|
||||
|
||||
- **Generic Protocol**: Any service can be built on top (therapy appointments, housing, repairs, tutoring...)
|
||||
- **Ed25519 Signatures**: All messages cryptographically signed
|
||||
- **Verification Framework**: Multi-level trust (self-asserted, peer-endorsed, registry-verified)
|
||||
- **Efficient Wire Format**: Fixed 64-byte header + CBOR payload
|
||||
- **Pluggable Handlers**: Register custom services with the router
|
||||
- **Built-in Services**: FAPP (psychotherapy) and Housing included
|
||||
|
||||
## Quick Start
|
||||
|
||||
```rust
|
||||
use meshservice::{
|
||||
capabilities,
|
||||
identity::ServiceIdentity,
|
||||
router::ServiceRouter,
|
||||
services::fapp::{FappService, SlotAnnounce, SlotQuery, Specialism, Modality},
|
||||
};
|
||||
|
||||
// Create identity
|
||||
let identity = ServiceIdentity::generate();
|
||||
|
||||
// Create router with FAPP service
|
||||
let mut router = ServiceRouter::new(capabilities::RELAY);
|
||||
router.register(Box::new(FappService::relay()));
|
||||
|
||||
// Therapist announces slots
|
||||
let announce = SlotAnnounce::new(
|
||||
&[Specialism::CognitiveBehavioral],
|
||||
Modality::VideoCall,
|
||||
"104", // Postal prefix
|
||||
)
|
||||
.with_slots(3)
|
||||
.with_profile("https://therapists.de/dr-mueller");
|
||||
|
||||
let msg = meshservice::services::fapp::create_announce(&identity, &announce, 1)?;
|
||||
router.handle(msg, Some(identity.public_key()))?;
|
||||
|
||||
// Patient queries
|
||||
let query = SlotQuery::new(Specialism::CognitiveBehavioral, "104");
|
||||
let query_msg = meshservice::services::fapp::create_query(&identity, &query)?;
|
||||
let matches = router.query(&query_msg);
|
||||
|
||||
println!("Found {} therapists", matches.len());
|
||||
```
|
||||
|
||||
## Built-in Services
|
||||
|
||||
### FAPP (Free Appointment Propagation Protocol)
|
||||
|
||||
Decentralized psychotherapy appointment discovery:
|
||||
|
||||
| Service ID | Purpose |
|
||||
|------------|---------|
|
||||
| `0x0001` | Therapist slot announcements, patient queries |
|
||||
|
||||
```rust
|
||||
use meshservice::services::fapp::{SlotAnnounce, Specialism, Modality};
|
||||
|
||||
let announce = SlotAnnounce::new(
|
||||
&[Specialism::TraumaFocused, Specialism::CognitiveBehavioral],
|
||||
Modality::InPerson,
|
||||
"104",
|
||||
)
|
||||
.with_slots(2)
|
||||
.with_profile("https://kbv.de/123");
|
||||
```
|
||||
|
||||
### Housing
|
||||
|
||||
Decentralized room/apartment sharing:
|
||||
|
||||
| Service ID | Purpose |
|
||||
|------------|---------|
|
||||
| `0x0002` | Listing announcements, seeker queries |
|
||||
|
||||
```rust
|
||||
use meshservice::services::housing::{ListingAnnounce, ListingType, amenities};
|
||||
|
||||
let listing = ListingAnnounce::new(ListingType::Apartment, 65, 850, "104")
|
||||
.with_rooms(2)
|
||||
.with_amenities(amenities::FURNISHED | amenities::BALCONY);
|
||||
```
|
||||
|
||||
## Verification Framework
|
||||
|
||||
Three trust levels:
|
||||
|
||||
| Level | Description | Example |
|
||||
|-------|-------------|---------|
|
||||
| 0 - None | Bare announcement | Anonymous |
|
||||
| 1 - Self-Asserted | Profile URL provided | Website link |
|
||||
| 2 - Peer-Endorsed | Trusted peers vouch | Community rating |
|
||||
| 3 - Registry-Verified | Official registry | KBV license |
|
||||
|
||||
```rust
|
||||
use meshservice::verification::{Verification, TrustedVerifiers, VerificationLevel};
|
||||
|
||||
// Add trusted verifier
|
||||
let mut verifiers = TrustedVerifiers::new();
|
||||
verifiers.add(registry_public_key, "KBV Registry", VerificationLevel::RegistryVerified);
|
||||
router.set_trusted_verifiers(verifiers);
|
||||
|
||||
// Require verification for announces
|
||||
router.set_min_verification_level(2);
|
||||
```
|
||||
|
||||
## Wire Protocol
|
||||
|
||||
64-byte fixed header for efficient parsing:
|
||||
|
||||
```
|
||||
0-3 service_id (u32 LE)
|
||||
4 message_type (u8)
|
||||
5 version (u8)
|
||||
6-7 flags (reserved)
|
||||
8-23 message_id (16 bytes)
|
||||
24-39 sender_address (16 bytes)
|
||||
40-47 sequence (u64 LE)
|
||||
48-49 ttl_hours (u16 LE)
|
||||
50-57 timestamp (u64 LE)
|
||||
58 hop_count (u8)
|
||||
59 max_hops (u8)
|
||||
60-63 payload_len (u32 LE)
|
||||
---
|
||||
64+ signature (64 bytes)
|
||||
128+ payload (CBOR)
|
||||
... verifications (optional CBOR)
|
||||
```
|
||||
|
||||
## Building Custom Services
|
||||
|
||||
Implement `ServiceHandler`:
|
||||
|
||||
```rust
|
||||
use meshservice::router::{ServiceHandler, ServiceAction, HandlerContext};
|
||||
|
||||
struct MyService;
|
||||
|
||||
impl ServiceHandler for MyService {
|
||||
fn service_id(&self) -> u32 { 0x8001 } // Custom range
|
||||
fn name(&self) -> &str { "MyService" }
|
||||
|
||||
fn handle(&self, message: &ServiceMessage, ctx: &HandlerContext)
|
||||
-> Result<ServiceAction, ServiceError>
|
||||
{
|
||||
match message.message_type {
|
||||
MessageType::Announce => Ok(ServiceAction::StoreAndForward),
|
||||
MessageType::Query => {
|
||||
// Find matches, respond...
|
||||
Ok(ServiceAction::Handled)
|
||||
}
|
||||
_ => Ok(ServiceAction::Drop)
|
||||
}
|
||||
}
|
||||
|
||||
fn matches_query(&self, announce: &StoredMessage, query: &ServiceMessage) -> bool {
|
||||
// Custom matching logic
|
||||
true
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## Service IDs
|
||||
|
||||
| ID | Service |
|
||||
|----|---------|
|
||||
| `0x0001` | FAPP (Psychotherapy) |
|
||||
| `0x0002` | Housing |
|
||||
| `0x0003` | Repair |
|
||||
| `0x0004` | Tutoring |
|
||||
| `0x0005` | Medical |
|
||||
| `0x0006` | Legal |
|
||||
| `0x0007` | Volunteer |
|
||||
| `0x0008` | Events |
|
||||
| `0x8000+` | Custom/User-defined |
|
||||
|
||||
## Examples
|
||||
|
||||
```bash
|
||||
# FAPP demo (therapist + patient)
|
||||
cargo run --example fapp_service
|
||||
|
||||
# Housing demo (landlord + seeker)
|
||||
cargo run --example housing_service
|
||||
|
||||
# Multi-service mesh
|
||||
cargo run --example multi_service
|
||||
```
|
||||
|
||||
## Testing
|
||||
|
||||
```bash
|
||||
cargo test
|
||||
```
|
||||
|
||||
## License
|
||||
|
||||
MIT
|
||||
86
crates/meshservice/examples/fapp_service.rs
Normal file
86
crates/meshservice/examples/fapp_service.rs
Normal file
@@ -0,0 +1,86 @@
|
||||
//! FAPP Service Demo
|
||||
//!
|
||||
//! Demonstrates therapist announcement and patient query flow.
|
||||
|
||||
use meshservice::{
|
||||
capabilities,
|
||||
identity::ServiceIdentity,
|
||||
router::ServiceRouter,
|
||||
services::fapp::{create_announce, create_query, FappService, Modality, SlotAnnounce, SlotQuery, Specialism},
|
||||
};
|
||||
|
||||
fn main() {
|
||||
println!("=== FAPP Service Demo ===\n");
|
||||
|
||||
// Create identities
|
||||
let therapist = ServiceIdentity::generate();
|
||||
let patient = ServiceIdentity::generate();
|
||||
let relay = ServiceIdentity::generate();
|
||||
|
||||
println!("Therapist address: {:?}", hex(&therapist.address()));
|
||||
println!("Patient address: {:?}", hex(&patient.address()));
|
||||
println!("Relay address: {:?}\n", hex(&relay.address()));
|
||||
|
||||
// Create router with FAPP service
|
||||
let mut router = ServiceRouter::new(capabilities::RELAY);
|
||||
router.register(Box::new(FappService::relay()));
|
||||
|
||||
// Therapist creates announcement
|
||||
let announce = SlotAnnounce::new(
|
||||
&[Specialism::CognitiveBehavioral, Specialism::TraumaFocused],
|
||||
Modality::VideoCall,
|
||||
"104", // Berlin Kreuzberg
|
||||
)
|
||||
.with_slots(3)
|
||||
.with_profile("https://therapists.de/dr-schmidt")
|
||||
.with_name("Dr. Anna Schmidt");
|
||||
|
||||
println!("Therapist announces:");
|
||||
println!(" Specialisms: CBT, Trauma");
|
||||
println!(" Modality: Video");
|
||||
println!(" Location: 104xx");
|
||||
println!(" Slots: 3");
|
||||
println!(" Profile: https://therapists.de/dr-schmidt\n");
|
||||
|
||||
let msg = create_announce(&therapist, &announce, 1).unwrap();
|
||||
let action = router.handle(msg.clone(), Some(therapist.public_key())).unwrap();
|
||||
println!("Router action: {:?}", action);
|
||||
println!("Stored messages: {}\n", router.store().len());
|
||||
|
||||
// Patient creates query
|
||||
let query = SlotQuery::new(Specialism::CognitiveBehavioral, "104")
|
||||
.with_modality(Modality::VideoCall)
|
||||
.with_max_wait(30);
|
||||
|
||||
println!("Patient queries:");
|
||||
println!(" Looking for: CBT");
|
||||
println!(" Location: 104xx");
|
||||
println!(" Modality: Video");
|
||||
println!(" Max wait: 30 days\n");
|
||||
|
||||
let query_msg = create_query(&patient, &query).unwrap();
|
||||
|
||||
// Find matches
|
||||
let matches = router.query(&query_msg);
|
||||
println!("Found {} matching therapist(s):", matches.len());
|
||||
|
||||
for (i, m) in matches.iter().enumerate() {
|
||||
if let Ok(data) = meshservice::services::fapp::SlotAnnounce::from_bytes(&m.message.payload) {
|
||||
println!(" {}. {} in {}xx ({} slots)",
|
||||
i + 1,
|
||||
data.display_name.as_deref().unwrap_or("Unknown"),
|
||||
data.postal_prefix,
|
||||
data.available_slots
|
||||
);
|
||||
if let Some(profile) = &data.profile_url {
|
||||
println!(" Verify: {}", profile);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
println!("\n=== Demo Complete ===");
|
||||
}
|
||||
|
||||
fn hex(bytes: &[u8]) -> String {
|
||||
bytes.iter().map(|b| format!("{b:02x}")).collect()
|
||||
}
|
||||
97
crates/meshservice/examples/housing_service.rs
Normal file
97
crates/meshservice/examples/housing_service.rs
Normal file
@@ -0,0 +1,97 @@
|
||||
//! Housing Service Demo
|
||||
//!
|
||||
//! Demonstrates landlord listing and seeker query flow.
|
||||
|
||||
use meshservice::{
|
||||
capabilities,
|
||||
identity::ServiceIdentity,
|
||||
router::ServiceRouter,
|
||||
services::housing::{
|
||||
amenities, create_announce, create_query, HousingService, ListingAnnounce, ListingQuery,
|
||||
ListingType,
|
||||
},
|
||||
};
|
||||
|
||||
fn main() {
|
||||
println!("=== Housing Service Demo ===\n");
|
||||
|
||||
// Create identities
|
||||
let landlord1 = ServiceIdentity::generate();
|
||||
let landlord2 = ServiceIdentity::generate();
|
||||
let seeker = ServiceIdentity::generate();
|
||||
|
||||
// Create router with Housing service
|
||||
let mut router = ServiceRouter::new(capabilities::RELAY);
|
||||
router.register(Box::new(HousingService::relay()));
|
||||
|
||||
// Landlord 1: Kreuzberg apartment
|
||||
let listing1 = ListingAnnounce::new(ListingType::Apartment, 65, 950, "104")
|
||||
.with_rooms(2)
|
||||
.with_amenities(amenities::FURNISHED | amenities::BALCONY | amenities::INTERNET)
|
||||
.with_title("Sunny 2-room in Kreuzberg");
|
||||
|
||||
println!("Landlord 1 announces:");
|
||||
println!(" {} sqm {} in {}xx", listing1.size_sqm, "Apartment", listing1.postal_prefix);
|
||||
println!(" Rent: {} EUR/month", listing1.rent_euros());
|
||||
println!(" Rooms: {}", listing1.rooms);
|
||||
println!(" Amenities: Furnished, Balcony, Internet\n");
|
||||
|
||||
let msg1 = create_announce(&landlord1, &listing1, 1).unwrap();
|
||||
router.handle(msg1, Some(landlord1.public_key())).unwrap();
|
||||
|
||||
// Landlord 2: Neukölln shared flat room
|
||||
let listing2 = ListingAnnounce::new(ListingType::Room, 18, 450, "120")
|
||||
.with_rooms(1)
|
||||
.with_amenities(amenities::WASHING_MACHINE | amenities::INTERNET)
|
||||
.with_title("Room in friendly WG");
|
||||
|
||||
println!("Landlord 2 announces:");
|
||||
println!(" {} sqm {} in {}xx", listing2.size_sqm, "Room", listing2.postal_prefix);
|
||||
println!(" Rent: {} EUR/month", listing2.rent_euros());
|
||||
println!(" Amenities: Washing machine, Internet\n");
|
||||
|
||||
let msg2 = create_announce(&landlord2, &listing2, 1).unwrap();
|
||||
router.handle(msg2, Some(landlord2.public_key())).unwrap();
|
||||
|
||||
println!("Total listings in store: {}\n", router.store().len());
|
||||
|
||||
// Seeker 1: Looking for affordable apartment
|
||||
println!("--- Seeker Query 1: Affordable apartment ---");
|
||||
let query1 = ListingQuery::new("10", 800) // Any 10xxx area, max 800 EUR
|
||||
.with_type(ListingType::Apartment)
|
||||
.with_min_size(40);
|
||||
|
||||
println!(" Area: 10xxx");
|
||||
println!(" Type: Apartment");
|
||||
println!(" Max rent: 800 EUR");
|
||||
println!(" Min size: 40 sqm\n");
|
||||
|
||||
let query_msg1 = create_query(&seeker, &query1).unwrap();
|
||||
let matches1 = router.query(&query_msg1);
|
||||
println!("Found {} matches:", matches1.len());
|
||||
for m in &matches1 {
|
||||
if let Ok(l) = ListingAnnounce::from_bytes(&m.message.payload) {
|
||||
println!(" - {} ({}xx, {} EUR)", l.title.as_deref().unwrap_or("No title"), l.postal_prefix, l.rent_euros());
|
||||
}
|
||||
}
|
||||
|
||||
// Seeker 2: Looking for any cheap room
|
||||
println!("\n--- Seeker Query 2: Any room under 500 EUR ---");
|
||||
let query2 = ListingQuery::new("1", 500); // Any 1xxxx area
|
||||
|
||||
let query_msg2 = create_query(&seeker, &query2).unwrap();
|
||||
let matches2 = router.query(&query_msg2);
|
||||
println!("Found {} matches:", matches2.len());
|
||||
for m in &matches2 {
|
||||
if let Ok(l) = ListingAnnounce::from_bytes(&m.message.payload) {
|
||||
println!(" - {} ({}xx, {} sqm, {} EUR)",
|
||||
l.title.as_deref().unwrap_or("No title"),
|
||||
l.postal_prefix,
|
||||
l.size_sqm,
|
||||
l.rent_euros()
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
println!("\n=== Demo Complete ===");
|
||||
}
|
||||
89
crates/meshservice/examples/multi_service.rs
Normal file
89
crates/meshservice/examples/multi_service.rs
Normal file
@@ -0,0 +1,89 @@
|
||||
//! Multi-Service Demo
|
||||
//!
|
||||
//! Shows how multiple services can run on the same mesh router.
|
||||
|
||||
use meshservice::{
|
||||
capabilities,
|
||||
identity::ServiceIdentity,
|
||||
router::ServiceRouter,
|
||||
service_ids,
|
||||
services::{
|
||||
fapp::{create_announce as fapp_announce, FappService, Modality, SlotAnnounce, Specialism},
|
||||
housing::{
|
||||
amenities, create_announce as housing_announce, HousingService, ListingAnnounce,
|
||||
ListingType,
|
||||
},
|
||||
},
|
||||
verification::{TrustedVerifiers, Verification, VerificationLevel},
|
||||
};
|
||||
|
||||
fn main() {
|
||||
println!("=== Multi-Service Mesh Demo ===\n");
|
||||
|
||||
// Create a router that handles both FAPP and Housing
|
||||
let mut router = ServiceRouter::new(capabilities::RELAY | capabilities::CONSUMER);
|
||||
router.register(Box::new(FappService::relay()));
|
||||
router.register(Box::new(HousingService::relay()));
|
||||
|
||||
println!("Registered services:");
|
||||
for (id, name) in router.services() {
|
||||
println!(" 0x{:04x} - {}", id, name);
|
||||
}
|
||||
println!();
|
||||
|
||||
// Create identities
|
||||
let therapist = ServiceIdentity::generate();
|
||||
let landlord = ServiceIdentity::generate();
|
||||
let registry = ServiceIdentity::generate();
|
||||
|
||||
// Setup trusted verifiers
|
||||
let mut verifiers = TrustedVerifiers::new();
|
||||
verifiers.add(
|
||||
registry.public_key(),
|
||||
"Health Registry",
|
||||
VerificationLevel::RegistryVerified,
|
||||
);
|
||||
router.set_trusted_verifiers(verifiers);
|
||||
|
||||
// Therapist announcement with verification
|
||||
println!("--- Adding FAPP announcement ---");
|
||||
let fapp_data = SlotAnnounce::new(&[Specialism::Psychoanalysis], Modality::InPerson, "104")
|
||||
.with_profile("https://kbv.de/therapists/12345");
|
||||
|
||||
let mut fapp_msg = fapp_announce(&therapist, &fapp_data, 1).unwrap();
|
||||
|
||||
// Registry verifies therapist
|
||||
let verification = Verification::registry(
|
||||
®istry,
|
||||
&therapist.address(),
|
||||
"licensed_therapist",
|
||||
"KBV-12345",
|
||||
);
|
||||
fapp_msg.add_verification(verification);
|
||||
|
||||
router.handle(fapp_msg, Some(therapist.public_key())).unwrap();
|
||||
println!("FAPP announcement stored (with registry verification)\n");
|
||||
|
||||
// Housing announcement
|
||||
println!("--- Adding Housing announcement ---");
|
||||
let housing_data = ListingAnnounce::new(ListingType::Studio, 35, 700, "104")
|
||||
.with_amenities(amenities::FURNISHED | amenities::INTERNET)
|
||||
.with_title("Cozy studio near therapist offices");
|
||||
|
||||
let housing_msg = housing_announce(&landlord, &housing_data, 1).unwrap();
|
||||
router.handle(housing_msg, Some(landlord.public_key())).unwrap();
|
||||
println!("Housing announcement stored\n");
|
||||
|
||||
// Summary
|
||||
println!("--- Store Summary ---");
|
||||
println!("FAPP messages: {}", router.store().service_count(service_ids::FAPP));
|
||||
println!("Housing messages: {}", router.store().service_count(service_ids::HOUSING));
|
||||
println!("Total messages: {}", router.store().len());
|
||||
|
||||
println!("\n=== Multi-Service Demo Complete ===");
|
||||
println!("\nThe mesh can route and store messages for multiple services");
|
||||
println!("using a single router instance. Each service has its own:");
|
||||
println!(" - Payload format");
|
||||
println!(" - Query matching logic");
|
||||
println!(" - Handler implementation");
|
||||
}
|
||||
532
crates/meshservice/src/anti_abuse.rs
Normal file
532
crates/meshservice/src/anti_abuse.rs
Normal file
@@ -0,0 +1,532 @@
|
||||
//! Anti-abuse mechanisms for preventing slot blocking and spam.
|
||||
|
||||
use std::collections::HashMap;
|
||||
use std::time::{SystemTime, UNIX_EPOCH};
|
||||
|
||||
use sha2::{Digest, Sha256};
|
||||
|
||||
/// Rate limiting configuration.
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct RateLimits {
|
||||
/// Max reservations per sender per hour.
|
||||
pub max_reservations_per_hour: u8,
|
||||
/// Max pending (unconfirmed) reservations per sender.
|
||||
pub max_pending_reservations: u8,
|
||||
/// Min time between reservations (seconds).
|
||||
pub reservation_cooldown_secs: u32,
|
||||
/// Max queries per sender per minute.
|
||||
pub max_queries_per_minute: u8,
|
||||
}
|
||||
|
||||
impl Default for RateLimits {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
max_reservations_per_hour: 3,
|
||||
max_pending_reservations: 2,
|
||||
reservation_cooldown_secs: 300,
|
||||
max_queries_per_minute: 10,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Tracks sender activity for rate limiting.
|
||||
#[derive(Debug, Default)]
|
||||
pub struct RateLimiter {
|
||||
limits: RateLimits,
|
||||
/// sender_address -> activity
|
||||
activity: HashMap<[u8; 16], SenderActivity>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Default)]
|
||||
struct SenderActivity {
|
||||
/// Timestamps of reservations in last hour.
|
||||
reservation_times: Vec<u64>,
|
||||
/// Count of pending reservations.
|
||||
pending_count: u8,
|
||||
/// Timestamp of last reservation.
|
||||
last_reservation: u64,
|
||||
/// Query timestamps in last minute.
|
||||
query_times: Vec<u64>,
|
||||
}
|
||||
|
||||
impl RateLimiter {
|
||||
/// Create with default limits.
|
||||
pub fn new() -> Self {
|
||||
Self::default()
|
||||
}
|
||||
|
||||
/// Create with custom limits.
|
||||
pub fn with_limits(limits: RateLimits) -> Self {
|
||||
Self {
|
||||
limits,
|
||||
activity: HashMap::new(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Check if a reservation is allowed.
|
||||
pub fn check_reservation(&mut self, sender: &[u8; 16]) -> RateLimitResult {
|
||||
let now = now();
|
||||
let activity = self.activity.entry(*sender).or_default();
|
||||
|
||||
// Clean old entries
|
||||
activity.reservation_times.retain(|&t| now - t < 3600);
|
||||
|
||||
// Check cooldown
|
||||
if now - activity.last_reservation < u64::from(self.limits.reservation_cooldown_secs) {
|
||||
return RateLimitResult::Cooldown {
|
||||
wait_secs: self.limits.reservation_cooldown_secs - (now - activity.last_reservation) as u32,
|
||||
};
|
||||
}
|
||||
|
||||
// Check hourly limit
|
||||
if activity.reservation_times.len() >= self.limits.max_reservations_per_hour as usize {
|
||||
return RateLimitResult::HourlyLimitReached;
|
||||
}
|
||||
|
||||
// Check pending limit
|
||||
if activity.pending_count >= self.limits.max_pending_reservations {
|
||||
return RateLimitResult::TooManyPending;
|
||||
}
|
||||
|
||||
RateLimitResult::Allowed
|
||||
}
|
||||
|
||||
/// Record a reservation attempt.
|
||||
pub fn record_reservation(&mut self, sender: &[u8; 16]) {
|
||||
let now = now();
|
||||
let activity = self.activity.entry(*sender).or_default();
|
||||
activity.reservation_times.push(now);
|
||||
activity.last_reservation = now;
|
||||
activity.pending_count = activity.pending_count.saturating_add(1);
|
||||
}
|
||||
|
||||
/// Record reservation confirmed/completed (reduce pending).
|
||||
pub fn record_reservation_resolved(&mut self, sender: &[u8; 16]) {
|
||||
if let Some(activity) = self.activity.get_mut(sender) {
|
||||
activity.pending_count = activity.pending_count.saturating_sub(1);
|
||||
}
|
||||
}
|
||||
|
||||
/// Check if a query is allowed.
|
||||
pub fn check_query(&mut self, sender: &[u8; 16]) -> RateLimitResult {
|
||||
let now = now();
|
||||
let activity = self.activity.entry(*sender).or_default();
|
||||
|
||||
// Clean old entries
|
||||
activity.query_times.retain(|&t| now - t < 60);
|
||||
|
||||
if activity.query_times.len() >= self.limits.max_queries_per_minute as usize {
|
||||
return RateLimitResult::QueryLimitReached;
|
||||
}
|
||||
|
||||
RateLimitResult::Allowed
|
||||
}
|
||||
|
||||
/// Record a query.
|
||||
pub fn record_query(&mut self, sender: &[u8; 16]) {
|
||||
let now = now();
|
||||
let activity = self.activity.entry(*sender).or_default();
|
||||
activity.query_times.push(now);
|
||||
}
|
||||
|
||||
/// Prune old activity data.
|
||||
pub fn prune(&mut self) {
|
||||
let now = now();
|
||||
self.activity.retain(|_, a| {
|
||||
a.reservation_times.retain(|&t| now - t < 3600);
|
||||
a.query_times.retain(|&t| now - t < 60);
|
||||
!a.reservation_times.is_empty() || !a.query_times.is_empty() || a.pending_count > 0
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
/// Result of rate limit check.
|
||||
#[derive(Debug, Clone, PartialEq, Eq)]
|
||||
pub enum RateLimitResult {
|
||||
/// Request allowed.
|
||||
Allowed,
|
||||
/// Must wait before next reservation.
|
||||
Cooldown { wait_secs: u32 },
|
||||
/// Hourly reservation limit reached.
|
||||
HourlyLimitReached,
|
||||
/// Too many pending reservations.
|
||||
TooManyPending,
|
||||
/// Query rate limit reached.
|
||||
QueryLimitReached,
|
||||
}
|
||||
|
||||
impl RateLimitResult {
|
||||
pub fn is_allowed(&self) -> bool {
|
||||
matches!(self, RateLimitResult::Allowed)
|
||||
}
|
||||
}
|
||||
|
||||
/// Proof-of-work for reservation requests.
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct ProofOfWork {
|
||||
/// Nonce that produces valid hash.
|
||||
pub nonce: u64,
|
||||
/// Required difficulty (leading zero bits).
|
||||
pub difficulty: u8,
|
||||
}
|
||||
|
||||
impl ProofOfWork {
|
||||
/// Default difficulty (20 bits ≈ 1-2 seconds on modern CPU).
|
||||
pub const DEFAULT_DIFFICULTY: u8 = 20;
|
||||
|
||||
/// Generate proof-of-work for a reservation.
|
||||
pub fn generate(reservation_id: &[u8; 16], difficulty: u8) -> Self {
|
||||
let mut nonce = 0u64;
|
||||
loop {
|
||||
if Self::check_hash(reservation_id, nonce, difficulty) {
|
||||
return Self { nonce, difficulty };
|
||||
}
|
||||
nonce = nonce.wrapping_add(1);
|
||||
}
|
||||
}
|
||||
|
||||
/// Verify proof-of-work.
|
||||
pub fn verify(&self, reservation_id: &[u8; 16]) -> bool {
|
||||
Self::check_hash(reservation_id, self.nonce, self.difficulty)
|
||||
}
|
||||
|
||||
fn check_hash(reservation_id: &[u8; 16], nonce: u64, difficulty: u8) -> bool {
|
||||
let mut hasher = Sha256::new();
|
||||
hasher.update(reservation_id);
|
||||
hasher.update(&nonce.to_le_bytes());
|
||||
let hash = hasher.finalize();
|
||||
leading_zero_bits(&hash) >= difficulty
|
||||
}
|
||||
}
|
||||
|
||||
/// Count leading zero bits in a byte slice.
|
||||
fn leading_zero_bits(data: &[u8]) -> u8 {
|
||||
let mut count = 0u8;
|
||||
for byte in data {
|
||||
if *byte == 0 {
|
||||
count += 8;
|
||||
} else {
|
||||
count += byte.leading_zeros() as u8;
|
||||
break;
|
||||
}
|
||||
}
|
||||
count
|
||||
}
|
||||
|
||||
/// Sender reputation tracking.
|
||||
#[derive(Debug, Clone, Default)]
|
||||
pub struct SenderReputation {
|
||||
pub address: [u8; 16],
|
||||
pub reservations_made: u32,
|
||||
pub reservations_honored: u32,
|
||||
pub reservations_cancelled: u32,
|
||||
pub no_shows: u32,
|
||||
pub last_no_show: Option<u64>,
|
||||
}
|
||||
|
||||
impl SenderReputation {
|
||||
/// Create for a new sender.
|
||||
pub fn new(address: [u8; 16]) -> Self {
|
||||
Self {
|
||||
address,
|
||||
..Default::default()
|
||||
}
|
||||
}
|
||||
|
||||
/// Calculate honor rate (0.0 to 1.0).
|
||||
pub fn honor_rate(&self) -> f32 {
|
||||
if self.reservations_made == 0 {
|
||||
return 0.5; // Neutral for new users
|
||||
}
|
||||
(self.reservations_honored as f32) / (self.reservations_made as f32)
|
||||
}
|
||||
|
||||
/// Check if sender should be blocked.
|
||||
pub fn is_blocked(&self) -> bool {
|
||||
self.no_shows >= 3 || (self.reservations_made >= 5 && self.honor_rate() < 0.5)
|
||||
}
|
||||
|
||||
/// Record a completed reservation.
|
||||
pub fn record_honored(&mut self) {
|
||||
self.reservations_made += 1;
|
||||
self.reservations_honored += 1;
|
||||
}
|
||||
|
||||
/// Record a cancelled reservation (with notice).
|
||||
pub fn record_cancelled(&mut self) {
|
||||
self.reservations_made += 1;
|
||||
self.reservations_cancelled += 1;
|
||||
}
|
||||
|
||||
/// Record a no-show.
|
||||
pub fn record_no_show(&mut self) {
|
||||
self.reservations_made += 1;
|
||||
self.no_shows += 1;
|
||||
self.last_no_show = Some(now());
|
||||
}
|
||||
}
|
||||
|
||||
/// Reputation store.
|
||||
#[derive(Debug, Default)]
|
||||
pub struct ReputationStore {
|
||||
reputations: HashMap<[u8; 16], SenderReputation>,
|
||||
}
|
||||
|
||||
impl ReputationStore {
|
||||
pub fn new() -> Self {
|
||||
Self::default()
|
||||
}
|
||||
|
||||
/// Get or create reputation for a sender.
|
||||
pub fn get_or_create(&mut self, address: [u8; 16]) -> &mut SenderReputation {
|
||||
self.reputations
|
||||
.entry(address)
|
||||
.or_insert_with(|| SenderReputation::new(address))
|
||||
}
|
||||
|
||||
/// Get reputation (read-only).
|
||||
pub fn get(&self, address: &[u8; 16]) -> Option<&SenderReputation> {
|
||||
self.reputations.get(address)
|
||||
}
|
||||
|
||||
/// Check if sender is blocked.
|
||||
pub fn is_blocked(&self, address: &[u8; 16]) -> bool {
|
||||
self.reputations
|
||||
.get(address)
|
||||
.map(|r| r.is_blocked())
|
||||
.unwrap_or(false)
|
||||
}
|
||||
|
||||
/// Get honor rate (0.5 for unknown).
|
||||
pub fn honor_rate(&self, address: &[u8; 16]) -> f32 {
|
||||
self.reputations
|
||||
.get(address)
|
||||
.map(|r| r.honor_rate())
|
||||
.unwrap_or(0.5)
|
||||
}
|
||||
}
|
||||
|
||||
/// Blocklist entry.
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct BlocklistEntry {
|
||||
pub blocked_address: [u8; 16],
|
||||
pub reason: BlockReason,
|
||||
pub reported_by: [u8; 16],
|
||||
pub signature: Vec<u8>,
|
||||
pub timestamp: u64,
|
||||
}
|
||||
|
||||
/// Reason for blocking.
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
|
||||
#[repr(u8)]
|
||||
pub enum BlockReason {
|
||||
NoShow = 1,
|
||||
Spam = 2,
|
||||
Harassment = 3,
|
||||
FakeIdentity = 4,
|
||||
}
|
||||
|
||||
/// Therapist-defined reservation policy.
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct TherapistPolicy {
|
||||
/// Max pending reservations from new senders.
|
||||
pub max_pending_new: u8,
|
||||
/// Max pending from established senders.
|
||||
pub max_pending_established: u8,
|
||||
/// Require this verification level for reservations.
|
||||
pub min_verification_level: u8,
|
||||
/// Auto-reject senders with honor rate below this.
|
||||
pub min_honor_rate: f32,
|
||||
/// Require proof-of-work.
|
||||
pub require_pow: bool,
|
||||
/// PoW difficulty (if required).
|
||||
pub pow_difficulty: u8,
|
||||
}
|
||||
|
||||
impl Default for TherapistPolicy {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
max_pending_new: 1,
|
||||
max_pending_established: 3,
|
||||
min_verification_level: 0,
|
||||
min_honor_rate: 0.5,
|
||||
require_pow: true,
|
||||
pow_difficulty: ProofOfWork::DEFAULT_DIFFICULTY,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl TherapistPolicy {
|
||||
/// Check if a reservation request meets policy.
|
||||
pub fn check(
|
||||
&self,
|
||||
sender_reputation: &SenderReputation,
|
||||
sender_verification_level: u8,
|
||||
pow: Option<&ProofOfWork>,
|
||||
reservation_id: &[u8; 16],
|
||||
) -> PolicyResult {
|
||||
// Check verification level
|
||||
if sender_verification_level < self.min_verification_level {
|
||||
return PolicyResult::InsufficientVerification;
|
||||
}
|
||||
|
||||
// Check honor rate
|
||||
if sender_reputation.honor_rate() < self.min_honor_rate {
|
||||
return PolicyResult::LowReputation;
|
||||
}
|
||||
|
||||
// Check blocked
|
||||
if sender_reputation.is_blocked() {
|
||||
return PolicyResult::Blocked;
|
||||
}
|
||||
|
||||
// Check proof-of-work
|
||||
if self.require_pow {
|
||||
match pow {
|
||||
Some(p) if p.difficulty >= self.pow_difficulty && p.verify(reservation_id) => {}
|
||||
Some(_) => return PolicyResult::InvalidPoW,
|
||||
None => return PolicyResult::MissingPoW,
|
||||
}
|
||||
}
|
||||
|
||||
PolicyResult::Allowed
|
||||
}
|
||||
}
|
||||
|
||||
/// Result of policy check.
|
||||
#[derive(Debug, Clone, PartialEq, Eq)]
|
||||
pub enum PolicyResult {
|
||||
Allowed,
|
||||
InsufficientVerification,
|
||||
LowReputation,
|
||||
Blocked,
|
||||
MissingPoW,
|
||||
InvalidPoW,
|
||||
}
|
||||
|
||||
impl PolicyResult {
|
||||
pub fn is_allowed(&self) -> bool {
|
||||
matches!(self, PolicyResult::Allowed)
|
||||
}
|
||||
}
|
||||
|
||||
fn now() -> u64 {
|
||||
SystemTime::now()
|
||||
.duration_since(UNIX_EPOCH)
|
||||
.unwrap_or_default()
|
||||
.as_secs()
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn rate_limiter_allows_first_reservation() {
|
||||
let mut limiter = RateLimiter::new();
|
||||
let sender = [1u8; 16];
|
||||
|
||||
assert!(limiter.check_reservation(&sender).is_allowed());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn rate_limiter_enforces_cooldown() {
|
||||
let mut limiter = RateLimiter::with_limits(RateLimits {
|
||||
reservation_cooldown_secs: 300,
|
||||
..Default::default()
|
||||
});
|
||||
let sender = [2u8; 16];
|
||||
|
||||
limiter.record_reservation(&sender);
|
||||
let result = limiter.check_reservation(&sender);
|
||||
|
||||
assert!(matches!(result, RateLimitResult::Cooldown { .. }));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn rate_limiter_enforces_hourly_limit() {
|
||||
let mut limiter = RateLimiter::with_limits(RateLimits {
|
||||
max_reservations_per_hour: 2,
|
||||
reservation_cooldown_secs: 0,
|
||||
..Default::default()
|
||||
});
|
||||
let sender = [3u8; 16];
|
||||
|
||||
limiter.record_reservation(&sender);
|
||||
limiter.record_reservation(&sender);
|
||||
|
||||
assert_eq!(limiter.check_reservation(&sender), RateLimitResult::HourlyLimitReached);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn pow_generation_and_verification() {
|
||||
let reservation_id = [42u8; 16];
|
||||
let pow = ProofOfWork::generate(&reservation_id, 8); // Low difficulty for test
|
||||
|
||||
assert!(pow.verify(&reservation_id));
|
||||
assert!(!pow.verify(&[0u8; 16])); // Wrong ID
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn reputation_tracking() {
|
||||
let mut rep = SenderReputation::new([5u8; 16]);
|
||||
|
||||
rep.record_honored();
|
||||
rep.record_honored();
|
||||
rep.record_no_show();
|
||||
|
||||
assert_eq!(rep.reservations_made, 3);
|
||||
assert_eq!(rep.honor_rate(), 2.0 / 3.0);
|
||||
assert!(!rep.is_blocked());
|
||||
|
||||
rep.record_no_show();
|
||||
rep.record_no_show();
|
||||
|
||||
assert!(rep.is_blocked()); // 3 no-shows
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn policy_check_pow() {
|
||||
let policy = TherapistPolicy {
|
||||
require_pow: true,
|
||||
pow_difficulty: 8,
|
||||
..Default::default()
|
||||
};
|
||||
let rep = SenderReputation::new([6u8; 16]);
|
||||
let reservation_id = [7u8; 16];
|
||||
|
||||
// No PoW
|
||||
assert_eq!(
|
||||
policy.check(&rep, 0, None, &reservation_id),
|
||||
PolicyResult::MissingPoW
|
||||
);
|
||||
|
||||
// Valid PoW
|
||||
let pow = ProofOfWork::generate(&reservation_id, 8);
|
||||
assert_eq!(
|
||||
policy.check(&rep, 0, Some(&pow), &reservation_id),
|
||||
PolicyResult::Allowed
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn policy_check_verification_level() {
|
||||
let policy = TherapistPolicy {
|
||||
min_verification_level: 2,
|
||||
require_pow: false,
|
||||
..Default::default()
|
||||
};
|
||||
let rep = SenderReputation::new([8u8; 16]);
|
||||
let reservation_id = [9u8; 16];
|
||||
|
||||
assert_eq!(
|
||||
policy.check(&rep, 1, None, &reservation_id),
|
||||
PolicyResult::InsufficientVerification
|
||||
);
|
||||
|
||||
assert_eq!(
|
||||
policy.check(&rep, 2, None, &reservation_id),
|
||||
PolicyResult::Allowed
|
||||
);
|
||||
}
|
||||
}
|
||||
392
crates/meshservice/src/crypto.rs
Normal file
392
crates/meshservice/src/crypto.rs
Normal file
@@ -0,0 +1,392 @@
|
||||
//! End-to-end encryption for service message payloads.
|
||||
//!
|
||||
//! Uses X25519 key agreement + HKDF-SHA256 key derivation + ChaCha20-Poly1305 AEAD.
|
||||
//! Encryption is opt-in per message: the sender encrypts the payload before
|
||||
//! constructing the `ServiceMessage`, and the recipient decrypts after receiving.
|
||||
//!
|
||||
//! ## Key model
|
||||
//!
|
||||
//! Each `ServiceIdentity` (Ed25519) can derive an X25519 keypair for encryption.
|
||||
//! - Sender generates an ephemeral X25519 key per message (forward secrecy).
|
||||
//! - Shared secret is computed via X25519 DH with the recipient's public key.
|
||||
//! - HKDF derives a per-message encryption key.
|
||||
//! - ChaCha20-Poly1305 encrypts the payload with a random nonce.
|
||||
//!
|
||||
//! ## Wire format of encrypted payload
|
||||
//!
|
||||
//! ```text
|
||||
//! [1 byte: version = 0x01]
|
||||
//! [32 bytes: sender ephemeral X25519 public key]
|
||||
//! [12 bytes: nonce]
|
||||
//! [N bytes: ciphertext + 16-byte Poly1305 tag]
|
||||
//! ```
|
||||
|
||||
use chacha20poly1305::aead::{Aead, KeyInit};
|
||||
use chacha20poly1305::{ChaCha20Poly1305, Nonce};
|
||||
use hkdf::Hkdf;
|
||||
use rand::rngs::OsRng;
|
||||
use rand::RngCore;
|
||||
use x25519_dalek::{PublicKey as X25519Public, StaticSecret};
|
||||
|
||||
use crate::error::ServiceError;
|
||||
use crate::identity::ServiceIdentity;
|
||||
|
||||
/// Current encrypted payload version byte.
|
||||
const ENCRYPTED_VERSION: u8 = 0x01;
|
||||
/// Overhead: 1 (version) + 32 (ephemeral pubkey) + 12 (nonce) + 16 (tag).
|
||||
const ENCRYPTION_OVERHEAD: usize = 1 + 32 + 12 + 16;
|
||||
|
||||
/// X25519 keypair derived from a `ServiceIdentity` for encryption.
|
||||
///
|
||||
/// The Ed25519 seed is reused as the X25519 static secret. This is the
|
||||
/// standard Ed25519-to-X25519 conversion used by libsodium and others.
|
||||
pub struct EncryptionKeyPair {
|
||||
secret: StaticSecret,
|
||||
public: X25519Public,
|
||||
}
|
||||
|
||||
impl EncryptionKeyPair {
|
||||
/// Derive an encryption keypair from a `ServiceIdentity`.
|
||||
pub fn from_identity(identity: &ServiceIdentity) -> Self {
|
||||
let secret = StaticSecret::from(identity.secret_key());
|
||||
let public = X25519Public::from(&secret);
|
||||
Self { secret, public }
|
||||
}
|
||||
|
||||
/// Get the X25519 public key bytes (advertise to peers for encryption).
|
||||
pub fn public_bytes(&self) -> [u8; 32] {
|
||||
self.public.to_bytes()
|
||||
}
|
||||
|
||||
/// Encrypt a plaintext payload for a specific recipient.
|
||||
///
|
||||
/// Uses a fresh ephemeral key for forward secrecy: even if the sender's
|
||||
/// long-term key is compromised, past messages remain confidential.
|
||||
pub fn encrypt_for(
|
||||
&self,
|
||||
recipient_x25519_public: &[u8; 32],
|
||||
plaintext: &[u8],
|
||||
) -> Result<Vec<u8>, ServiceError> {
|
||||
// Generate ephemeral keypair for this message
|
||||
let eph_secret = StaticSecret::random_from_rng(OsRng);
|
||||
let eph_public = X25519Public::from(&eph_secret);
|
||||
|
||||
// X25519 DH with recipient
|
||||
let recipient_pub = X25519Public::from(*recipient_x25519_public);
|
||||
let shared = eph_secret.diffie_hellman(&recipient_pub);
|
||||
|
||||
// Derive encryption key via HKDF
|
||||
let key = derive_key(shared.as_bytes(), b"meshservice-e2e-v1");
|
||||
|
||||
// Encrypt with ChaCha20-Poly1305
|
||||
let cipher = ChaCha20Poly1305::new((&key).into());
|
||||
let mut nonce_bytes = [0u8; 12];
|
||||
OsRng.fill_bytes(&mut nonce_bytes);
|
||||
let nonce = Nonce::from_slice(&nonce_bytes);
|
||||
|
||||
let ciphertext = cipher
|
||||
.encrypt(nonce, plaintext)
|
||||
.map_err(|_| ServiceError::Crypto("encryption failed".into()))?;
|
||||
|
||||
// Assemble: version || ephemeral_public || nonce || ciphertext+tag
|
||||
let mut out = Vec::with_capacity(ENCRYPTION_OVERHEAD + plaintext.len());
|
||||
out.push(ENCRYPTED_VERSION);
|
||||
out.extend_from_slice(&eph_public.to_bytes());
|
||||
out.extend_from_slice(&nonce_bytes);
|
||||
out.extend_from_slice(&ciphertext);
|
||||
Ok(out)
|
||||
}
|
||||
|
||||
/// Decrypt an encrypted payload sent to us.
|
||||
///
|
||||
/// Extracts the sender's ephemeral public key from the payload, computes
|
||||
/// the shared secret with our static X25519 key, and decrypts.
|
||||
pub fn decrypt(&self, encrypted: &[u8]) -> Result<Vec<u8>, ServiceError> {
|
||||
if encrypted.len() < ENCRYPTION_OVERHEAD {
|
||||
return Err(ServiceError::Crypto("ciphertext too short".into()));
|
||||
}
|
||||
|
||||
let version = encrypted[0];
|
||||
if version != ENCRYPTED_VERSION {
|
||||
return Err(ServiceError::Crypto(format!(
|
||||
"unsupported encryption version: {version}"
|
||||
)));
|
||||
}
|
||||
|
||||
let eph_public_bytes: [u8; 32] = encrypted[1..33]
|
||||
.try_into()
|
||||
.map_err(|_| ServiceError::Crypto("invalid ephemeral key".into()))?;
|
||||
let nonce_bytes: [u8; 12] = encrypted[33..45]
|
||||
.try_into()
|
||||
.map_err(|_| ServiceError::Crypto("invalid nonce".into()))?;
|
||||
let ciphertext = &encrypted[45..];
|
||||
|
||||
// X25519 DH with sender's ephemeral key
|
||||
let eph_public = X25519Public::from(eph_public_bytes);
|
||||
let shared = self.secret.diffie_hellman(&eph_public);
|
||||
|
||||
// Derive decryption key
|
||||
let key = derive_key(shared.as_bytes(), b"meshservice-e2e-v1");
|
||||
|
||||
// Decrypt
|
||||
let cipher = ChaCha20Poly1305::new((&key).into());
|
||||
let nonce = Nonce::from_slice(&nonce_bytes);
|
||||
|
||||
cipher
|
||||
.decrypt(nonce, ciphertext)
|
||||
.map_err(|_| ServiceError::Crypto("decryption failed".into()))
|
||||
}
|
||||
}
|
||||
|
||||
/// Derive a 32-byte key from a shared secret using HKDF-SHA256.
|
||||
fn derive_key(shared_secret: &[u8], info: &[u8]) -> [u8; 32] {
|
||||
let hk = Hkdf::<sha2::Sha256>::new(None, shared_secret);
|
||||
let mut key = [0u8; 32];
|
||||
hk.expand(info, &mut key)
|
||||
.expect("HKDF expand to 32 bytes should never fail");
|
||||
key
|
||||
}
|
||||
|
||||
/// Check whether a payload appears to be encrypted (starts with version byte
|
||||
/// and has minimum length).
|
||||
pub fn is_encrypted_payload(payload: &[u8]) -> bool {
|
||||
payload.len() >= ENCRYPTION_OVERHEAD && payload[0] == ENCRYPTED_VERSION
|
||||
}
|
||||
|
||||
/// Return the encryption overhead in bytes (useful for size budgets on
|
||||
/// constrained transports like LoRa).
|
||||
pub const fn encryption_overhead() -> usize {
|
||||
ENCRYPTION_OVERHEAD
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use crate::identity::ServiceIdentity;
|
||||
|
||||
#[test]
|
||||
fn encrypt_decrypt_roundtrip() {
|
||||
let sender_id = ServiceIdentity::generate();
|
||||
let recipient_id = ServiceIdentity::generate();
|
||||
|
||||
let sender_keys = EncryptionKeyPair::from_identity(&sender_id);
|
||||
let recipient_keys = EncryptionKeyPair::from_identity(&recipient_id);
|
||||
|
||||
let plaintext = b"Hello, encrypted mesh world!";
|
||||
let encrypted = sender_keys
|
||||
.encrypt_for(&recipient_keys.public_bytes(), plaintext)
|
||||
.expect("encrypt");
|
||||
|
||||
let decrypted = recipient_keys.decrypt(&encrypted).expect("decrypt");
|
||||
assert_eq!(decrypted, plaintext);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn wrong_recipient_cannot_decrypt() {
|
||||
let sender_id = ServiceIdentity::generate();
|
||||
let recipient_id = ServiceIdentity::generate();
|
||||
let wrong_id = ServiceIdentity::generate();
|
||||
|
||||
let sender_keys = EncryptionKeyPair::from_identity(&sender_id);
|
||||
let recipient_keys = EncryptionKeyPair::from_identity(&recipient_id);
|
||||
let wrong_keys = EncryptionKeyPair::from_identity(&wrong_id);
|
||||
|
||||
let encrypted = sender_keys
|
||||
.encrypt_for(&recipient_keys.public_bytes(), b"secret data")
|
||||
.expect("encrypt");
|
||||
|
||||
let result = wrong_keys.decrypt(&encrypted);
|
||||
assert!(result.is_err());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn tampered_ciphertext_fails() {
|
||||
let sender_id = ServiceIdentity::generate();
|
||||
let recipient_id = ServiceIdentity::generate();
|
||||
|
||||
let sender_keys = EncryptionKeyPair::from_identity(&sender_id);
|
||||
let recipient_keys = EncryptionKeyPair::from_identity(&recipient_id);
|
||||
|
||||
let mut encrypted = sender_keys
|
||||
.encrypt_for(&recipient_keys.public_bytes(), b"do not tamper")
|
||||
.expect("encrypt");
|
||||
|
||||
// Flip a byte in the ciphertext portion
|
||||
let last = encrypted.len() - 1;
|
||||
encrypted[last] ^= 0xff;
|
||||
|
||||
let result = recipient_keys.decrypt(&encrypted);
|
||||
assert!(result.is_err());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn truncated_ciphertext_rejected() {
|
||||
let recipient_id = ServiceIdentity::generate();
|
||||
let keys = EncryptionKeyPair::from_identity(&recipient_id);
|
||||
|
||||
let result = keys.decrypt(&[0x01; 10]);
|
||||
assert!(result.is_err());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn bad_version_rejected() {
|
||||
let recipient_id = ServiceIdentity::generate();
|
||||
let keys = EncryptionKeyPair::from_identity(&recipient_id);
|
||||
|
||||
// Valid length but wrong version
|
||||
let mut fake = vec![0x99u8; ENCRYPTION_OVERHEAD + 10];
|
||||
fake[0] = 0x99;
|
||||
|
||||
let result = keys.decrypt(&fake);
|
||||
assert!(result.is_err());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn each_encryption_produces_different_ciphertext() {
|
||||
let sender_id = ServiceIdentity::generate();
|
||||
let recipient_id = ServiceIdentity::generate();
|
||||
|
||||
let sender_keys = EncryptionKeyPair::from_identity(&sender_id);
|
||||
let recipient_keys = EncryptionKeyPair::from_identity(&recipient_id);
|
||||
|
||||
let plaintext = b"same message twice";
|
||||
let enc1 = sender_keys
|
||||
.encrypt_for(&recipient_keys.public_bytes(), plaintext)
|
||||
.expect("encrypt 1");
|
||||
let enc2 = sender_keys
|
||||
.encrypt_for(&recipient_keys.public_bytes(), plaintext)
|
||||
.expect("encrypt 2");
|
||||
|
||||
// Different ephemeral keys + nonces => different ciphertext
|
||||
assert_ne!(enc1, enc2);
|
||||
|
||||
// Both decrypt to the same plaintext
|
||||
let dec1 = recipient_keys.decrypt(&enc1).expect("decrypt 1");
|
||||
let dec2 = recipient_keys.decrypt(&enc2).expect("decrypt 2");
|
||||
assert_eq!(dec1, plaintext);
|
||||
assert_eq!(dec2, plaintext);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn empty_plaintext_roundtrip() {
|
||||
let sender_id = ServiceIdentity::generate();
|
||||
let recipient_id = ServiceIdentity::generate();
|
||||
|
||||
let sender_keys = EncryptionKeyPair::from_identity(&sender_id);
|
||||
let recipient_keys = EncryptionKeyPair::from_identity(&recipient_id);
|
||||
|
||||
let encrypted = sender_keys
|
||||
.encrypt_for(&recipient_keys.public_bytes(), b"")
|
||||
.expect("encrypt empty");
|
||||
|
||||
assert_eq!(encrypted.len(), ENCRYPTION_OVERHEAD);
|
||||
|
||||
let decrypted = recipient_keys.decrypt(&encrypted).expect("decrypt empty");
|
||||
assert!(decrypted.is_empty());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn is_encrypted_payload_detection() {
|
||||
let sender_id = ServiceIdentity::generate();
|
||||
let recipient_id = ServiceIdentity::generate();
|
||||
|
||||
let sender_keys = EncryptionKeyPair::from_identity(&sender_id);
|
||||
let recipient_keys = EncryptionKeyPair::from_identity(&recipient_id);
|
||||
|
||||
let encrypted = sender_keys
|
||||
.encrypt_for(&recipient_keys.public_bytes(), b"test")
|
||||
.expect("encrypt");
|
||||
|
||||
assert!(is_encrypted_payload(&encrypted));
|
||||
assert!(!is_encrypted_payload(b"plain text"));
|
||||
assert!(!is_encrypted_payload(&[]));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn public_bytes_deterministic() {
|
||||
let id = ServiceIdentity::generate();
|
||||
let keys1 = EncryptionKeyPair::from_identity(&id);
|
||||
let keys2 = EncryptionKeyPair::from_identity(&id);
|
||||
assert_eq!(keys1.public_bytes(), keys2.public_bytes());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn encrypt_decrypt_with_service_message() {
|
||||
// Full integration: encrypt payload, wrap in ServiceMessage, decrypt
|
||||
use crate::message::ServiceMessage;
|
||||
use crate::service_ids::FAPP;
|
||||
|
||||
let sender_id = ServiceIdentity::generate();
|
||||
let recipient_id = ServiceIdentity::generate();
|
||||
|
||||
let sender_keys = EncryptionKeyPair::from_identity(&sender_id);
|
||||
let recipient_keys = EncryptionKeyPair::from_identity(&recipient_id);
|
||||
|
||||
// Encrypt the payload before creating the message
|
||||
let plaintext = b"confidential appointment details";
|
||||
let encrypted_payload = sender_keys
|
||||
.encrypt_for(&recipient_keys.public_bytes(), plaintext)
|
||||
.expect("encrypt");
|
||||
|
||||
// Create a signed service message with the encrypted payload
|
||||
let msg = ServiceMessage::new(
|
||||
&sender_id,
|
||||
FAPP,
|
||||
crate::message::MessageType::Reserve,
|
||||
encrypted_payload.clone(),
|
||||
1,
|
||||
);
|
||||
|
||||
// Verify the message signature still works (signs over encrypted payload)
|
||||
assert!(msg.verify(&sender_id.public_key()));
|
||||
|
||||
// Recipient decrypts the payload
|
||||
let decrypted = recipient_keys.decrypt(&msg.payload).expect("decrypt");
|
||||
assert_eq!(decrypted, plaintext);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn encrypt_decrypt_wire_roundtrip() {
|
||||
// Full wire roundtrip: encrypt -> sign -> encode -> decode -> verify -> decrypt
|
||||
use crate::message::ServiceMessage;
|
||||
use crate::service_ids::FAPP;
|
||||
use crate::wire;
|
||||
|
||||
let sender_id = ServiceIdentity::generate();
|
||||
let recipient_id = ServiceIdentity::generate();
|
||||
|
||||
let sender_keys = EncryptionKeyPair::from_identity(&sender_id);
|
||||
let recipient_keys = EncryptionKeyPair::from_identity(&recipient_id);
|
||||
|
||||
let plaintext = b"sensitive medical data over the mesh";
|
||||
let encrypted_payload = sender_keys
|
||||
.encrypt_for(&recipient_keys.public_bytes(), plaintext)
|
||||
.expect("encrypt");
|
||||
|
||||
let msg = ServiceMessage::new(
|
||||
&sender_id,
|
||||
FAPP,
|
||||
crate::message::MessageType::Reserve,
|
||||
encrypted_payload,
|
||||
42,
|
||||
);
|
||||
|
||||
// Encode to wire format
|
||||
let wire_bytes = wire::encode(&msg).expect("encode");
|
||||
|
||||
// Decode from wire format
|
||||
let decoded = wire::decode(&wire_bytes).expect("decode");
|
||||
|
||||
// Verify signature
|
||||
assert!(decoded.verify(&sender_id.public_key()));
|
||||
|
||||
// Decrypt payload
|
||||
let decrypted = recipient_keys.decrypt(&decoded.payload).expect("decrypt");
|
||||
assert_eq!(decrypted, plaintext);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn encryption_overhead_constant() {
|
||||
assert_eq!(encryption_overhead(), 61);
|
||||
}
|
||||
}
|
||||
55
crates/meshservice/src/error.rs
Normal file
55
crates/meshservice/src/error.rs
Normal file
@@ -0,0 +1,55 @@
|
||||
//! Error types for the mesh service layer.
|
||||
|
||||
use thiserror::Error;
|
||||
|
||||
/// Errors that can occur in the service layer.
|
||||
#[derive(Debug, Error)]
|
||||
pub enum ServiceError {
|
||||
#[error("invalid message format: {0}")]
|
||||
InvalidFormat(String),
|
||||
|
||||
#[error("unknown service ID: {0}")]
|
||||
UnknownService(u32),
|
||||
|
||||
#[error("signature verification failed")]
|
||||
SignatureInvalid,
|
||||
|
||||
#[error("message expired")]
|
||||
Expired,
|
||||
|
||||
#[error("max hops exceeded")]
|
||||
MaxHopsExceeded,
|
||||
|
||||
#[error("missing capability: {0}")]
|
||||
MissingCapability(String),
|
||||
|
||||
#[error("store full")]
|
||||
StoreFull,
|
||||
|
||||
#[error("duplicate message")]
|
||||
Duplicate,
|
||||
|
||||
#[error("serialization error: {0}")]
|
||||
Serialization(String),
|
||||
|
||||
#[error("crypto error: {0}")]
|
||||
Crypto(String),
|
||||
|
||||
#[error("verification required: minimum level {0}")]
|
||||
VerificationRequired(u8),
|
||||
|
||||
#[error("service handler error: {0}")]
|
||||
Handler(String),
|
||||
}
|
||||
|
||||
impl From<ciborium::ser::Error<std::io::Error>> for ServiceError {
|
||||
fn from(e: ciborium::ser::Error<std::io::Error>) -> Self {
|
||||
ServiceError::Serialization(e.to_string())
|
||||
}
|
||||
}
|
||||
|
||||
impl From<ciborium::de::Error<std::io::Error>> for ServiceError {
|
||||
fn from(e: ciborium::de::Error<std::io::Error>) -> Self {
|
||||
ServiceError::Serialization(e.to_string())
|
||||
}
|
||||
}
|
||||
119
crates/meshservice/src/identity.rs
Normal file
119
crates/meshservice/src/identity.rs
Normal file
@@ -0,0 +1,119 @@
|
||||
//! Service identity management using Ed25519.
|
||||
|
||||
use ed25519_dalek::{Signature, Signer, SigningKey, Verifier, VerifyingKey};
|
||||
use rand::rngs::OsRng;
|
||||
use sha2::{Digest, Sha256};
|
||||
|
||||
/// A service participant's identity (Ed25519 keypair).
|
||||
#[derive(Clone)]
|
||||
pub struct ServiceIdentity {
|
||||
signing_key: SigningKey,
|
||||
}
|
||||
|
||||
impl ServiceIdentity {
|
||||
/// Generate a new random identity.
|
||||
pub fn generate() -> Self {
|
||||
use rand::RngCore;
|
||||
let mut secret = [0u8; 32];
|
||||
OsRng.fill_bytes(&mut secret);
|
||||
let signing_key = SigningKey::from_bytes(&secret);
|
||||
Self { signing_key }
|
||||
}
|
||||
|
||||
/// Create from an existing secret key.
|
||||
pub fn from_secret(secret: &[u8; 32]) -> Self {
|
||||
let signing_key = SigningKey::from_bytes(secret);
|
||||
Self { signing_key }
|
||||
}
|
||||
|
||||
/// Get the 32-byte public key.
|
||||
pub fn public_key(&self) -> [u8; 32] {
|
||||
self.signing_key.verifying_key().to_bytes()
|
||||
}
|
||||
|
||||
/// Get the 32-byte secret key (for persistence).
|
||||
pub fn secret_key(&self) -> [u8; 32] {
|
||||
self.signing_key.to_bytes()
|
||||
}
|
||||
|
||||
/// Compute the 16-byte mesh address from the public key.
|
||||
pub fn address(&self) -> [u8; 16] {
|
||||
compute_address(&self.public_key())
|
||||
}
|
||||
|
||||
/// Sign a message.
|
||||
pub fn sign(&self, message: &[u8]) -> [u8; 64] {
|
||||
let sig = self.signing_key.sign(message);
|
||||
sig.to_bytes()
|
||||
}
|
||||
|
||||
/// Verify a signature against a public key.
|
||||
pub fn verify(public_key: &[u8; 32], message: &[u8], signature: &[u8; 64]) -> bool {
|
||||
let Ok(verifying_key) = VerifyingKey::from_bytes(public_key) else {
|
||||
return false;
|
||||
};
|
||||
let sig = Signature::from_bytes(signature);
|
||||
verifying_key.verify(message, &sig).is_ok()
|
||||
}
|
||||
}
|
||||
|
||||
/// Compute a 16-byte mesh address from a 32-byte public key.
|
||||
///
|
||||
/// Address = SHA-256(public_key)[0..16]
|
||||
pub fn compute_address(public_key: &[u8; 32]) -> [u8; 16] {
|
||||
let hash = Sha256::digest(public_key);
|
||||
let mut addr = [0u8; 16];
|
||||
addr.copy_from_slice(&hash[..16]);
|
||||
addr
|
||||
}
|
||||
|
||||
impl std::fmt::Debug for ServiceIdentity {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
f.debug_struct("ServiceIdentity")
|
||||
.field("address", &hex::encode(self.address()))
|
||||
.finish()
|
||||
}
|
||||
}
|
||||
|
||||
// Hex encoding for debug output
|
||||
mod hex {
|
||||
pub fn encode(bytes: impl AsRef<[u8]>) -> String {
|
||||
bytes.as_ref().iter().map(|b| format!("{b:02x}")).collect()
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn generate_and_sign() {
|
||||
let id = ServiceIdentity::generate();
|
||||
let msg = b"hello world";
|
||||
let sig = id.sign(msg);
|
||||
assert!(ServiceIdentity::verify(&id.public_key(), msg, &sig));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn address_is_deterministic() {
|
||||
let id = ServiceIdentity::generate();
|
||||
let addr1 = id.address();
|
||||
let addr2 = compute_address(&id.public_key());
|
||||
assert_eq!(addr1, addr2);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn wrong_message_fails() {
|
||||
let id = ServiceIdentity::generate();
|
||||
let sig = id.sign(b"correct");
|
||||
assert!(!ServiceIdentity::verify(&id.public_key(), b"wrong", &sig));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn roundtrip_secret() {
|
||||
let id = ServiceIdentity::generate();
|
||||
let secret = id.secret_key();
|
||||
let restored = ServiceIdentity::from_secret(&secret);
|
||||
assert_eq!(id.public_key(), restored.public_key());
|
||||
}
|
||||
}
|
||||
90
crates/meshservice/src/lib.rs
Normal file
90
crates/meshservice/src/lib.rs
Normal file
@@ -0,0 +1,90 @@
|
||||
//! # MeshService — Generic Decentralized Service Layer
|
||||
//!
|
||||
//! A protocol and runtime for building decentralized services on mesh networks.
|
||||
//! Any service following the Announce → Query → Response → Reserve pattern
|
||||
//! can be implemented on this layer.
|
||||
//!
|
||||
//! ## Architecture
|
||||
//!
|
||||
//! ```text
|
||||
//! ┌─────────────────────────────────────────────────────────────┐
|
||||
//! │ Application Services │
|
||||
//! │ ┌─────────┐ ┌─────────┐ ┌─────────┐ ┌─────────┐ │
|
||||
//! │ │ FAPP │ │ Housing │ │ Repair │ │ Custom │ ... │
|
||||
//! │ └────┬────┘ └────┬────┘ └────┬────┘ └────┬────┘ │
|
||||
//! │ └────────────┴────────────┴────────────┘ │
|
||||
//! │ Service Layer (this crate) │
|
||||
//! │ ServiceMessage, ServiceRouter, Verification │
|
||||
//! │ ─────────────────────────────────────────────────────── │
|
||||
//! │ Mesh Layer │
|
||||
//! │ (provided by quicprochat-p2p or other mesh impl) │
|
||||
//! └─────────────────────────────────────────────────────────────┘
|
||||
//! ```
|
||||
//!
|
||||
//! ## Quick Start
|
||||
//!
|
||||
//! ```rust,ignore
|
||||
//! use meshservice::{ServiceRouter, ServiceMessage, services::fapp::FappService};
|
||||
//!
|
||||
//! // Create router
|
||||
//! let mut router = ServiceRouter::new(identity, capabilities);
|
||||
//!
|
||||
//! // Register services
|
||||
//! router.register(FappService::new());
|
||||
//! router.register(HousingService::new());
|
||||
//!
|
||||
//! // Handle incoming message
|
||||
//! let action = router.handle(&incoming_bytes);
|
||||
//! ```
|
||||
|
||||
pub mod identity;
|
||||
pub mod message;
|
||||
pub mod router;
|
||||
pub mod store;
|
||||
pub mod verification;
|
||||
pub mod services;
|
||||
pub mod wire;
|
||||
pub mod error;
|
||||
pub mod anti_abuse;
|
||||
pub mod crypto;
|
||||
|
||||
pub use identity::ServiceIdentity;
|
||||
pub use message::{ServiceMessage, MessageType};
|
||||
pub use router::{ServiceRouter, ServiceHandler, ServiceAction};
|
||||
pub use store::ServiceStore;
|
||||
pub use verification::{Verification, VerificationLevel};
|
||||
pub use error::ServiceError;
|
||||
pub use anti_abuse::{RateLimiter, RateLimits, ProofOfWork, SenderReputation, TherapistPolicy};
|
||||
pub use crypto::{EncryptionKeyPair, is_encrypted_payload, encryption_overhead};
|
||||
|
||||
/// Well-known service IDs.
|
||||
pub mod service_ids {
|
||||
/// Free Appointment Propagation Protocol (psychotherapy).
|
||||
pub const FAPP: u32 = 0x0001;
|
||||
/// Housing / room sharing.
|
||||
pub const HOUSING: u32 = 0x0002;
|
||||
/// Repair services / craftsmen.
|
||||
pub const REPAIR: u32 = 0x0003;
|
||||
/// Tutoring / education.
|
||||
pub const TUTOR: u32 = 0x0004;
|
||||
/// Medical appointments.
|
||||
pub const MEDICAL: u32 = 0x0005;
|
||||
/// Legal consultation.
|
||||
pub const LEGAL: u32 = 0x0006;
|
||||
/// Volunteer coordination.
|
||||
pub const VOLUNTEER: u32 = 0x0007;
|
||||
/// Events / tickets.
|
||||
pub const EVENTS: u32 = 0x0008;
|
||||
/// Reserved for user-defined services.
|
||||
pub const CUSTOM_START: u32 = 0x8000;
|
||||
}
|
||||
|
||||
/// Capability flags for service participation.
|
||||
pub mod capabilities {
|
||||
/// Node can announce/provide services.
|
||||
pub const PROVIDER: u16 = 0x0100;
|
||||
/// Node caches and relays service messages.
|
||||
pub const RELAY: u16 = 0x0200;
|
||||
/// Node can query/consume services.
|
||||
pub const CONSUMER: u16 = 0x0400;
|
||||
}
|
||||
321
crates/meshservice/src/message.rs
Normal file
321
crates/meshservice/src/message.rs
Normal file
@@ -0,0 +1,321 @@
|
||||
//! Core message types for the service layer.
|
||||
|
||||
use std::time::{SystemTime, UNIX_EPOCH};
|
||||
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use crate::identity::ServiceIdentity;
|
||||
use crate::verification::Verification;
|
||||
|
||||
/// Message types within a service.
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)]
|
||||
#[repr(u8)]
|
||||
pub enum MessageType {
|
||||
/// Provider announces availability.
|
||||
Announce = 0x01,
|
||||
/// Consumer queries for matches.
|
||||
Query = 0x02,
|
||||
/// Response to a query.
|
||||
Response = 0x03,
|
||||
/// Consumer reserves a slot/item.
|
||||
Reserve = 0x04,
|
||||
/// Provider confirms/rejects reservation.
|
||||
Confirm = 0x05,
|
||||
/// Either party cancels.
|
||||
Cancel = 0x06,
|
||||
/// Provider updates an existing announce (partial).
|
||||
Update = 0x07,
|
||||
/// Provider revokes an announce.
|
||||
Revoke = 0x08,
|
||||
}
|
||||
|
||||
impl TryFrom<u8> for MessageType {
|
||||
type Error = ();
|
||||
|
||||
fn try_from(value: u8) -> Result<Self, Self::Error> {
|
||||
match value {
|
||||
0x01 => Ok(MessageType::Announce),
|
||||
0x02 => Ok(MessageType::Query),
|
||||
0x03 => Ok(MessageType::Response),
|
||||
0x04 => Ok(MessageType::Reserve),
|
||||
0x05 => Ok(MessageType::Confirm),
|
||||
0x06 => Ok(MessageType::Cancel),
|
||||
0x07 => Ok(MessageType::Update),
|
||||
0x08 => Ok(MessageType::Revoke),
|
||||
_ => Err(()),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// A generic service message that can carry any application payload.
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct ServiceMessage {
|
||||
/// Service identifier (which application).
|
||||
pub service_id: u32,
|
||||
/// Message type within service.
|
||||
pub message_type: MessageType,
|
||||
/// Protocol version for forward compatibility.
|
||||
pub version: u8,
|
||||
/// Unique message ID.
|
||||
pub id: [u8; 16],
|
||||
/// Sender's mesh address.
|
||||
pub sender_address: [u8; 16],
|
||||
/// Application-specific CBOR payload.
|
||||
pub payload: Vec<u8>,
|
||||
/// Ed25519 signature over signable fields.
|
||||
pub signature: Vec<u8>,
|
||||
/// Optional verifications from trusted parties.
|
||||
#[serde(default, skip_serializing_if = "Vec::is_empty")]
|
||||
pub verifications: Vec<Verification>,
|
||||
/// Monotonically increasing per sender (dedup/supersede).
|
||||
pub sequence: u64,
|
||||
/// Time-to-live in hours.
|
||||
pub ttl_hours: u16,
|
||||
/// Unix timestamp of creation.
|
||||
pub timestamp: u64,
|
||||
/// Current hop count (incremented on re-broadcast).
|
||||
pub hop_count: u8,
|
||||
/// Maximum propagation hops.
|
||||
pub max_hops: u8,
|
||||
}
|
||||
|
||||
/// Default TTL: 7 days.
|
||||
const DEFAULT_TTL_HOURS: u16 = 168;
|
||||
/// Default max hops.
|
||||
const DEFAULT_MAX_HOPS: u8 = 8;
|
||||
|
||||
impl ServiceMessage {
|
||||
/// Create a new service message.
|
||||
pub fn new(
|
||||
identity: &ServiceIdentity,
|
||||
service_id: u32,
|
||||
message_type: MessageType,
|
||||
payload: Vec<u8>,
|
||||
sequence: u64,
|
||||
) -> Self {
|
||||
Self::with_options(
|
||||
identity,
|
||||
service_id,
|
||||
message_type,
|
||||
payload,
|
||||
sequence,
|
||||
DEFAULT_TTL_HOURS,
|
||||
DEFAULT_MAX_HOPS,
|
||||
)
|
||||
}
|
||||
|
||||
/// Create with custom TTL and max hops.
|
||||
pub fn with_options(
|
||||
identity: &ServiceIdentity,
|
||||
service_id: u32,
|
||||
message_type: MessageType,
|
||||
payload: Vec<u8>,
|
||||
sequence: u64,
|
||||
ttl_hours: u16,
|
||||
max_hops: u8,
|
||||
) -> Self {
|
||||
use sha2::{Digest, Sha256};
|
||||
|
||||
let sender_address = identity.address();
|
||||
|
||||
// Generate unique ID from address + sequence
|
||||
let id_hash = Sha256::digest(
|
||||
[&sender_address[..], &sequence.to_le_bytes()].concat()
|
||||
);
|
||||
let mut id = [0u8; 16];
|
||||
id.copy_from_slice(&id_hash[..16]);
|
||||
|
||||
let timestamp = SystemTime::now()
|
||||
.duration_since(UNIX_EPOCH)
|
||||
.unwrap_or_default()
|
||||
.as_secs();
|
||||
|
||||
let mut msg = Self {
|
||||
service_id,
|
||||
message_type,
|
||||
version: 1,
|
||||
id,
|
||||
sender_address,
|
||||
payload,
|
||||
signature: Vec::new(),
|
||||
verifications: Vec::new(),
|
||||
sequence,
|
||||
ttl_hours,
|
||||
timestamp,
|
||||
hop_count: 0,
|
||||
max_hops,
|
||||
};
|
||||
|
||||
let signable = msg.signable_bytes();
|
||||
msg.signature = identity.sign(&signable).to_vec();
|
||||
msg
|
||||
}
|
||||
|
||||
/// Create an announce message.
|
||||
pub fn announce(
|
||||
identity: &ServiceIdentity,
|
||||
service_id: u32,
|
||||
payload: Vec<u8>,
|
||||
sequence: u64,
|
||||
) -> Self {
|
||||
Self::new(identity, service_id, MessageType::Announce, payload, sequence)
|
||||
}
|
||||
|
||||
/// Create a query message.
|
||||
pub fn query(
|
||||
identity: &ServiceIdentity,
|
||||
service_id: u32,
|
||||
payload: Vec<u8>,
|
||||
) -> Self {
|
||||
// Queries use random sequence (not monotonic)
|
||||
let sequence = rand::random();
|
||||
Self::with_options(
|
||||
identity,
|
||||
service_id,
|
||||
MessageType::Query,
|
||||
payload,
|
||||
sequence,
|
||||
1, // 1 hour TTL for queries
|
||||
DEFAULT_MAX_HOPS,
|
||||
)
|
||||
}
|
||||
|
||||
/// Create a response message.
|
||||
pub fn response(
|
||||
identity: &ServiceIdentity,
|
||||
service_id: u32,
|
||||
query_id: [u8; 16],
|
||||
payload: Vec<u8>,
|
||||
) -> Self {
|
||||
let mut msg = Self::new(
|
||||
identity,
|
||||
service_id,
|
||||
MessageType::Response,
|
||||
payload,
|
||||
rand::random(),
|
||||
);
|
||||
// Response ID matches query ID for correlation
|
||||
msg.id = query_id;
|
||||
msg
|
||||
}
|
||||
|
||||
/// Assemble bytes for signing/verification.
|
||||
/// Excludes signature, hop_count, verifications (mutable fields).
|
||||
fn signable_bytes(&self) -> Vec<u8> {
|
||||
let mut buf = Vec::with_capacity(256);
|
||||
buf.extend_from_slice(&self.service_id.to_le_bytes());
|
||||
buf.push(self.message_type as u8);
|
||||
buf.push(self.version);
|
||||
buf.extend_from_slice(&self.id);
|
||||
buf.extend_from_slice(&self.sender_address);
|
||||
buf.extend_from_slice(&(self.payload.len() as u32).to_le_bytes());
|
||||
buf.extend_from_slice(&self.payload);
|
||||
buf.extend_from_slice(&self.sequence.to_le_bytes());
|
||||
buf.extend_from_slice(&self.ttl_hours.to_le_bytes());
|
||||
buf.extend_from_slice(&self.timestamp.to_le_bytes());
|
||||
buf.push(self.max_hops);
|
||||
buf
|
||||
}
|
||||
|
||||
/// Verify the signature using the sender's public key.
|
||||
pub fn verify(&self, sender_public_key: &[u8; 32]) -> bool {
|
||||
use crate::identity::compute_address;
|
||||
|
||||
// Verify address matches key
|
||||
if compute_address(sender_public_key) != self.sender_address {
|
||||
return false;
|
||||
}
|
||||
|
||||
let sig: [u8; 64] = match self.signature.as_slice().try_into() {
|
||||
Ok(s) => s,
|
||||
Err(_) => return false,
|
||||
};
|
||||
|
||||
let signable = self.signable_bytes();
|
||||
ServiceIdentity::verify(sender_public_key, &signable, &sig)
|
||||
}
|
||||
|
||||
/// Check if the message has expired.
|
||||
pub fn is_expired(&self) -> bool {
|
||||
let now = SystemTime::now()
|
||||
.duration_since(UNIX_EPOCH)
|
||||
.unwrap_or_default()
|
||||
.as_secs();
|
||||
let ttl_secs = u64::from(self.ttl_hours) * 3600;
|
||||
now.saturating_sub(self.timestamp) > ttl_secs
|
||||
}
|
||||
|
||||
/// Check if the message can still propagate.
|
||||
pub fn can_propagate(&self) -> bool {
|
||||
self.hop_count < self.max_hops && !self.is_expired()
|
||||
}
|
||||
|
||||
/// Create a forwarded copy with incremented hop count.
|
||||
pub fn forwarded(&self) -> Self {
|
||||
let mut copy = self.clone();
|
||||
copy.hop_count = copy.hop_count.saturating_add(1);
|
||||
copy
|
||||
}
|
||||
|
||||
/// Get the highest verification level attached.
|
||||
pub fn verification_level(&self) -> u8 {
|
||||
self.verifications
|
||||
.iter()
|
||||
.map(|v| v.level)
|
||||
.max()
|
||||
.unwrap_or(0)
|
||||
}
|
||||
|
||||
/// Add a verification to the message.
|
||||
pub fn add_verification(&mut self, verification: Verification) {
|
||||
self.verifications.push(verification);
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn create_and_verify() {
|
||||
let id = ServiceIdentity::generate();
|
||||
let msg = ServiceMessage::announce(
|
||||
&id,
|
||||
crate::service_ids::FAPP,
|
||||
b"test payload".to_vec(),
|
||||
1,
|
||||
);
|
||||
|
||||
assert!(msg.verify(&id.public_key()));
|
||||
assert!(!msg.is_expired());
|
||||
assert!(msg.can_propagate());
|
||||
assert_eq!(msg.hop_count, 0);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn forwarded_increments_hop() {
|
||||
let id = ServiceIdentity::generate();
|
||||
let msg = ServiceMessage::announce(&id, 1, vec![], 1);
|
||||
let fwd = msg.forwarded();
|
||||
|
||||
assert_eq!(fwd.hop_count, 1);
|
||||
assert!(fwd.verify(&id.public_key())); // Still valid
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn tampered_fails_verify() {
|
||||
let id = ServiceIdentity::generate();
|
||||
let mut msg = ServiceMessage::announce(&id, 1, b"original".to_vec(), 1);
|
||||
msg.payload = b"tampered".to_vec();
|
||||
|
||||
assert!(!msg.verify(&id.public_key()));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn query_has_short_ttl() {
|
||||
let id = ServiceIdentity::generate();
|
||||
let msg = ServiceMessage::query(&id, 1, vec![]);
|
||||
|
||||
assert_eq!(msg.ttl_hours, 1);
|
||||
}
|
||||
}
|
||||
289
crates/meshservice/src/router.rs
Normal file
289
crates/meshservice/src/router.rs
Normal file
@@ -0,0 +1,289 @@
|
||||
//! Service router dispatches messages to service-specific handlers.
|
||||
|
||||
use std::collections::HashMap;
|
||||
|
||||
use crate::error::ServiceError;
|
||||
use crate::message::{MessageType, ServiceMessage};
|
||||
use crate::store::{ServiceStore, StoredMessage};
|
||||
use crate::verification::TrustedVerifiers;
|
||||
|
||||
/// Action returned by a service handler.
|
||||
#[derive(Debug)]
|
||||
pub enum ServiceAction {
|
||||
/// Message handled, do nothing more.
|
||||
Handled,
|
||||
/// Store the message locally.
|
||||
Store,
|
||||
/// Store and forward to peers.
|
||||
StoreAndForward,
|
||||
/// Forward without storing (pass-through relay).
|
||||
ForwardOnly,
|
||||
/// Drop the message silently.
|
||||
Drop,
|
||||
/// Send a response back.
|
||||
Respond(ServiceMessage),
|
||||
/// Reject with error.
|
||||
Reject(ServiceError),
|
||||
}
|
||||
|
||||
/// Trait for service-specific handlers.
|
||||
pub trait ServiceHandler: Send + Sync {
|
||||
/// The service ID this handler manages.
|
||||
fn service_id(&self) -> u32;
|
||||
|
||||
/// Human-readable service name.
|
||||
fn name(&self) -> &str;
|
||||
|
||||
/// Handle an incoming message.
|
||||
fn handle(
|
||||
&self,
|
||||
message: &ServiceMessage,
|
||||
context: &HandlerContext,
|
||||
) -> Result<ServiceAction, ServiceError>;
|
||||
|
||||
/// Validate a message payload (service-specific logic).
|
||||
fn validate(&self, message: &ServiceMessage) -> Result<(), ServiceError> {
|
||||
// Default: accept all
|
||||
let _ = message;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Check if a message matches a query.
|
||||
fn matches_query(&self, announce: &StoredMessage, query: &ServiceMessage) -> bool;
|
||||
}
|
||||
|
||||
/// Context passed to handlers.
|
||||
pub struct HandlerContext<'a> {
|
||||
/// Current node's capabilities.
|
||||
pub capabilities: u16,
|
||||
/// The store (for lookups during handle).
|
||||
pub store: &'a ServiceStore,
|
||||
/// Trusted verifiers for checking.
|
||||
pub trusted_verifiers: &'a TrustedVerifiers,
|
||||
/// Sender's public key (if known).
|
||||
pub sender_public_key: Option<[u8; 32]>,
|
||||
}
|
||||
|
||||
/// Routes messages to appropriate service handlers.
|
||||
pub struct ServiceRouter {
|
||||
/// Service ID -> Handler.
|
||||
handlers: HashMap<u32, Box<dyn ServiceHandler>>,
|
||||
/// Shared message store.
|
||||
store: ServiceStore,
|
||||
/// Node capabilities.
|
||||
capabilities: u16,
|
||||
/// Trusted verifiers.
|
||||
trusted_verifiers: TrustedVerifiers,
|
||||
/// Minimum verification level to accept announces (0 = any).
|
||||
min_verification_level: u8,
|
||||
}
|
||||
|
||||
impl ServiceRouter {
|
||||
/// Create a new router.
|
||||
pub fn new(capabilities: u16) -> Self {
|
||||
Self {
|
||||
handlers: HashMap::new(),
|
||||
store: ServiceStore::new(),
|
||||
capabilities,
|
||||
trusted_verifiers: TrustedVerifiers::new(),
|
||||
min_verification_level: 0,
|
||||
}
|
||||
}
|
||||
|
||||
/// Register a service handler.
|
||||
pub fn register(&mut self, handler: Box<dyn ServiceHandler>) {
|
||||
let id = handler.service_id();
|
||||
self.handlers.insert(id, handler);
|
||||
}
|
||||
|
||||
/// Set trusted verifiers.
|
||||
pub fn set_trusted_verifiers(&mut self, verifiers: TrustedVerifiers) {
|
||||
self.trusted_verifiers = verifiers;
|
||||
}
|
||||
|
||||
/// Set minimum verification level for announces.
|
||||
pub fn set_min_verification_level(&mut self, level: u8) {
|
||||
self.min_verification_level = level;
|
||||
}
|
||||
|
||||
/// Access the store.
|
||||
pub fn store(&self) -> &ServiceStore {
|
||||
&self.store
|
||||
}
|
||||
|
||||
/// Mutable access to store.
|
||||
pub fn store_mut(&mut self) -> &mut ServiceStore {
|
||||
&mut self.store
|
||||
}
|
||||
|
||||
/// Check if a service is registered.
|
||||
pub fn has_service(&self, service_id: u32) -> bool {
|
||||
self.handlers.contains_key(&service_id)
|
||||
}
|
||||
|
||||
/// Handle an incoming message.
|
||||
pub fn handle(
|
||||
&mut self,
|
||||
message: ServiceMessage,
|
||||
sender_public_key: Option<[u8; 32]>,
|
||||
) -> Result<ServiceAction, ServiceError> {
|
||||
// Basic validation
|
||||
if message.is_expired() {
|
||||
return Err(ServiceError::Expired);
|
||||
}
|
||||
|
||||
if message.hop_count > message.max_hops {
|
||||
return Err(ServiceError::MaxHopsExceeded);
|
||||
}
|
||||
|
||||
// Get handler
|
||||
let handler = self
|
||||
.handlers
|
||||
.get(&message.service_id)
|
||||
.ok_or(ServiceError::UnknownService(message.service_id))?;
|
||||
|
||||
// Validate message with handler
|
||||
handler.validate(&message)?;
|
||||
|
||||
// Verify signature if we have public key
|
||||
if let Some(pk) = &sender_public_key {
|
||||
if !message.verify(pk) {
|
||||
return Err(ServiceError::SignatureInvalid);
|
||||
}
|
||||
}
|
||||
|
||||
// Check verification level for announces
|
||||
if message.message_type == MessageType::Announce && self.min_verification_level > 0 {
|
||||
let level = self
|
||||
.trusted_verifiers
|
||||
.highest_level(&message.verifications, &message.sender_address);
|
||||
if (level as u8) < self.min_verification_level {
|
||||
return Err(ServiceError::VerificationRequired(self.min_verification_level));
|
||||
}
|
||||
}
|
||||
|
||||
// Build context
|
||||
let context = HandlerContext {
|
||||
capabilities: self.capabilities,
|
||||
store: &self.store,
|
||||
trusted_verifiers: &self.trusted_verifiers,
|
||||
sender_public_key,
|
||||
};
|
||||
|
||||
// Dispatch to handler
|
||||
let action = handler.handle(&message, &context)?;
|
||||
|
||||
// Process action
|
||||
match &action {
|
||||
ServiceAction::Store | ServiceAction::StoreAndForward => {
|
||||
if let Some(pk) = sender_public_key {
|
||||
self.store.store(message, pk);
|
||||
}
|
||||
}
|
||||
_ => {}
|
||||
}
|
||||
|
||||
Ok(action)
|
||||
}
|
||||
|
||||
/// Query the store for matching announces.
|
||||
pub fn query(&self, query: &ServiceMessage) -> Vec<&StoredMessage> {
|
||||
let Some(handler) = self.handlers.get(&query.service_id) else {
|
||||
return Vec::new();
|
||||
};
|
||||
|
||||
self.store.query(query.service_id, |stored| {
|
||||
stored.message.message_type == MessageType::Announce
|
||||
&& handler.matches_query(stored, query)
|
||||
})
|
||||
}
|
||||
|
||||
/// Get handler name for a service.
|
||||
pub fn service_name(&self, service_id: u32) -> Option<&str> {
|
||||
self.handlers.get(&service_id).map(|h| h.name())
|
||||
}
|
||||
|
||||
/// List registered services.
|
||||
pub fn services(&self) -> Vec<(u32, &str)> {
|
||||
self.handlers
|
||||
.iter()
|
||||
.map(|(&id, h)| (id, h.name()))
|
||||
.collect()
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use crate::{identity::ServiceIdentity, service_ids::FAPP};
|
||||
|
||||
struct TestHandler;
|
||||
|
||||
impl ServiceHandler for TestHandler {
|
||||
fn service_id(&self) -> u32 {
|
||||
FAPP
|
||||
}
|
||||
|
||||
fn name(&self) -> &str {
|
||||
"Test"
|
||||
}
|
||||
|
||||
fn handle(
|
||||
&self,
|
||||
message: &ServiceMessage,
|
||||
_context: &HandlerContext,
|
||||
) -> Result<ServiceAction, ServiceError> {
|
||||
match message.message_type {
|
||||
MessageType::Announce => Ok(ServiceAction::StoreAndForward),
|
||||
MessageType::Query => Ok(ServiceAction::Handled),
|
||||
_ => Ok(ServiceAction::Drop),
|
||||
}
|
||||
}
|
||||
|
||||
fn matches_query(&self, _announce: &StoredMessage, _query: &ServiceMessage) -> bool {
|
||||
true // Match all for test
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn register_and_handle() {
|
||||
let mut router = ServiceRouter::new(crate::capabilities::RELAY);
|
||||
router.register(Box::new(TestHandler));
|
||||
|
||||
assert!(router.has_service(FAPP));
|
||||
assert_eq!(router.service_name(FAPP), Some("Test"));
|
||||
|
||||
let id = ServiceIdentity::generate();
|
||||
let msg = ServiceMessage::announce(&id, FAPP, vec![], 1);
|
||||
|
||||
let action = router.handle(msg.clone(), Some(id.public_key())).unwrap();
|
||||
assert!(matches!(action, ServiceAction::StoreAndForward));
|
||||
|
||||
// Message should be stored
|
||||
assert_eq!(router.store().len(), 1);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn unknown_service_rejected() {
|
||||
let mut router = ServiceRouter::new(0);
|
||||
let id = ServiceIdentity::generate();
|
||||
let msg = ServiceMessage::announce(&id, 9999, vec![], 1);
|
||||
|
||||
let result = router.handle(msg, Some(id.public_key()));
|
||||
assert!(matches!(result, Err(ServiceError::UnknownService(9999))));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn invalid_signature_rejected() {
|
||||
let mut router = ServiceRouter::new(0);
|
||||
router.register(Box::new(TestHandler));
|
||||
|
||||
let id1 = ServiceIdentity::generate();
|
||||
let id2 = ServiceIdentity::generate();
|
||||
let msg = ServiceMessage::announce(&id1, FAPP, vec![], 1);
|
||||
|
||||
// Pass wrong public key
|
||||
let result = router.handle(msg, Some(id2.public_key()));
|
||||
assert!(matches!(result, Err(ServiceError::SignatureInvalid)));
|
||||
}
|
||||
}
|
||||
479
crates/meshservice/src/services/fapp.rs
Normal file
479
crates/meshservice/src/services/fapp.rs
Normal file
@@ -0,0 +1,479 @@
|
||||
//! FAPP — Free Appointment Propagation Protocol.
|
||||
//!
|
||||
//! Decentralized psychotherapy appointment discovery.
|
||||
//!
|
||||
//! ## Flow
|
||||
//!
|
||||
//! 1. Therapist announces available slots (specialism, location, modality).
|
||||
//! 2. Announcement floods through mesh (TTL-limited, signature-verified).
|
||||
//! 3. Patient queries for matching slots (specialism, distance).
|
||||
//! 4. Relays respond with cached matches.
|
||||
//! 5. Patient reserves slot (E2E encrypted to therapist).
|
||||
//! 6. Therapist confirms/rejects.
|
||||
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use crate::error::ServiceError;
|
||||
use crate::message::{MessageType, ServiceMessage};
|
||||
use crate::router::{HandlerContext, ServiceAction, ServiceHandler};
|
||||
use crate::service_ids::FAPP;
|
||||
use crate::store::StoredMessage;
|
||||
use crate::wire::{decode_payload, encode_payload};
|
||||
|
||||
/// Therapy specialisms.
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)]
|
||||
#[repr(u8)]
|
||||
pub enum Specialism {
|
||||
GeneralPsychotherapy = 0x01,
|
||||
CognitiveBehavioral = 0x02,
|
||||
Psychoanalysis = 0x03,
|
||||
SystemicTherapy = 0x04,
|
||||
TraumaFocused = 0x05,
|
||||
ChildAndAdolescent = 0x06,
|
||||
CoupleAndFamily = 0x07,
|
||||
Addiction = 0x08,
|
||||
Neuropsychology = 0x09,
|
||||
}
|
||||
|
||||
impl TryFrom<u8> for Specialism {
|
||||
type Error = ();
|
||||
|
||||
fn try_from(value: u8) -> Result<Self, Self::Error> {
|
||||
match value {
|
||||
0x01 => Ok(Self::GeneralPsychotherapy),
|
||||
0x02 => Ok(Self::CognitiveBehavioral),
|
||||
0x03 => Ok(Self::Psychoanalysis),
|
||||
0x04 => Ok(Self::SystemicTherapy),
|
||||
0x05 => Ok(Self::TraumaFocused),
|
||||
0x06 => Ok(Self::ChildAndAdolescent),
|
||||
0x07 => Ok(Self::CoupleAndFamily),
|
||||
0x08 => Ok(Self::Addiction),
|
||||
0x09 => Ok(Self::Neuropsychology),
|
||||
_ => Err(()),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Therapy modality.
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)]
|
||||
#[repr(u8)]
|
||||
pub enum Modality {
|
||||
InPerson = 0x01,
|
||||
VideoCall = 0x02,
|
||||
PhoneCall = 0x03,
|
||||
TextBased = 0x04,
|
||||
}
|
||||
|
||||
/// Slot announcement payload.
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct SlotAnnounce {
|
||||
/// Therapist's specialisms (bitfield).
|
||||
pub specialisms: u16,
|
||||
/// Modality (bitfield).
|
||||
pub modality: u8,
|
||||
/// Postal code (first 3 digits for privacy).
|
||||
pub postal_prefix: String,
|
||||
/// Geohash (6 chars, ~1.2km precision).
|
||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||
pub geohash: Option<String>,
|
||||
/// Available slots count.
|
||||
pub available_slots: u8,
|
||||
/// Earliest available date (days from epoch).
|
||||
pub earliest_days: u16,
|
||||
/// Insurance types accepted (bitfield).
|
||||
pub insurance: u8,
|
||||
/// Optional profile URL for verification.
|
||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||
pub profile_url: Option<String>,
|
||||
/// Optional display name.
|
||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||
pub display_name: Option<String>,
|
||||
}
|
||||
|
||||
impl SlotAnnounce {
|
||||
/// Create a new announcement.
|
||||
pub fn new(specialisms: &[Specialism], modality: Modality, postal_prefix: &str) -> Self {
|
||||
let spec_bits = specialisms.iter().fold(0u16, |acc, s| acc | (1 << (*s as u8)));
|
||||
|
||||
Self {
|
||||
specialisms: spec_bits,
|
||||
modality: modality as u8,
|
||||
postal_prefix: postal_prefix.into(),
|
||||
geohash: None,
|
||||
available_slots: 1,
|
||||
earliest_days: 0,
|
||||
insurance: 0xFF, // All accepted by default
|
||||
profile_url: None,
|
||||
display_name: None,
|
||||
}
|
||||
}
|
||||
|
||||
/// Set geohash location.
|
||||
pub fn with_geohash(mut self, geohash: &str) -> Self {
|
||||
self.geohash = Some(geohash[..6.min(geohash.len())].into());
|
||||
self
|
||||
}
|
||||
|
||||
/// Set available slots count.
|
||||
pub fn with_slots(mut self, count: u8) -> Self {
|
||||
self.available_slots = count;
|
||||
self
|
||||
}
|
||||
|
||||
/// Set earliest availability.
|
||||
pub fn with_earliest(mut self, days_from_now: u16) -> Self {
|
||||
self.earliest_days = days_from_now;
|
||||
self
|
||||
}
|
||||
|
||||
/// Set profile URL.
|
||||
pub fn with_profile(mut self, url: &str) -> Self {
|
||||
self.profile_url = Some(url.into());
|
||||
self
|
||||
}
|
||||
|
||||
/// Set display name.
|
||||
pub fn with_name(mut self, name: &str) -> Self {
|
||||
self.display_name = Some(name.into());
|
||||
self
|
||||
}
|
||||
|
||||
/// Check if a specialism is offered.
|
||||
pub fn has_specialism(&self, spec: Specialism) -> bool {
|
||||
self.specialisms & (1 << (spec as u8)) != 0
|
||||
}
|
||||
|
||||
/// Encode to CBOR bytes.
|
||||
pub fn to_bytes(&self) -> Result<Vec<u8>, ServiceError> {
|
||||
encode_payload(self)
|
||||
}
|
||||
|
||||
/// Decode from CBOR bytes.
|
||||
pub fn from_bytes(data: &[u8]) -> Result<Self, ServiceError> {
|
||||
decode_payload(data)
|
||||
}
|
||||
}
|
||||
|
||||
/// Insurance types.
|
||||
pub mod insurance {
|
||||
pub const PRIVATE: u8 = 0x01;
|
||||
pub const PUBLIC: u8 = 0x02;
|
||||
pub const SELF_PAY: u8 = 0x04;
|
||||
}
|
||||
|
||||
/// Slot query payload.
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct SlotQuery {
|
||||
/// Desired specialisms (bitfield, any match).
|
||||
pub specialisms: u16,
|
||||
/// Postal prefix to search.
|
||||
pub postal_prefix: String,
|
||||
/// Max distance in km (optional).
|
||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||
pub max_distance_km: Option<u8>,
|
||||
/// Required modality (0 = any).
|
||||
pub modality: u8,
|
||||
/// Max wait in days.
|
||||
pub max_wait_days: u16,
|
||||
/// Insurance type required.
|
||||
pub insurance: u8,
|
||||
}
|
||||
|
||||
impl SlotQuery {
|
||||
/// Create a query for a specialism in a postal area.
|
||||
pub fn new(specialism: Specialism, postal_prefix: &str) -> Self {
|
||||
Self {
|
||||
specialisms: 1 << (specialism as u8),
|
||||
postal_prefix: postal_prefix.into(),
|
||||
max_distance_km: None,
|
||||
modality: 0,
|
||||
max_wait_days: 365,
|
||||
insurance: 0xFF,
|
||||
}
|
||||
}
|
||||
|
||||
/// Require specific modality.
|
||||
pub fn with_modality(mut self, modality: Modality) -> Self {
|
||||
self.modality = modality as u8;
|
||||
self
|
||||
}
|
||||
|
||||
/// Set max wait time.
|
||||
pub fn with_max_wait(mut self, days: u16) -> Self {
|
||||
self.max_wait_days = days;
|
||||
self
|
||||
}
|
||||
|
||||
/// Check if an announce matches this query.
|
||||
pub fn matches(&self, announce: &SlotAnnounce) -> bool {
|
||||
// Specialism overlap
|
||||
if announce.specialisms & self.specialisms == 0 {
|
||||
return false;
|
||||
}
|
||||
|
||||
// Postal prefix
|
||||
if !announce.postal_prefix.starts_with(&self.postal_prefix)
|
||||
&& !self.postal_prefix.starts_with(&announce.postal_prefix)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
// Modality
|
||||
if self.modality != 0 && announce.modality & self.modality == 0 {
|
||||
return false;
|
||||
}
|
||||
|
||||
// Wait time
|
||||
if announce.earliest_days > self.max_wait_days {
|
||||
return false;
|
||||
}
|
||||
|
||||
// Insurance
|
||||
if announce.insurance & self.insurance == 0 {
|
||||
return false;
|
||||
}
|
||||
|
||||
// Available slots
|
||||
announce.available_slots > 0
|
||||
}
|
||||
|
||||
/// Encode to CBOR bytes.
|
||||
pub fn to_bytes(&self) -> Result<Vec<u8>, ServiceError> {
|
||||
encode_payload(self)
|
||||
}
|
||||
|
||||
/// Decode from CBOR bytes.
|
||||
pub fn from_bytes(data: &[u8]) -> Result<Self, ServiceError> {
|
||||
decode_payload(data)
|
||||
}
|
||||
}
|
||||
|
||||
/// FAPP service handler.
|
||||
pub struct FappService {
|
||||
/// Whether this node is a therapist (can announce).
|
||||
pub is_provider: bool,
|
||||
/// Whether this node relays FAPP messages.
|
||||
pub is_relay: bool,
|
||||
}
|
||||
|
||||
impl FappService {
|
||||
/// Create a new FAPP handler.
|
||||
pub fn new(is_provider: bool, is_relay: bool) -> Self {
|
||||
Self {
|
||||
is_provider,
|
||||
is_relay,
|
||||
}
|
||||
}
|
||||
|
||||
/// Create a relay-only handler.
|
||||
pub fn relay() -> Self {
|
||||
Self::new(false, true)
|
||||
}
|
||||
|
||||
/// Create a provider handler.
|
||||
pub fn provider() -> Self {
|
||||
Self::new(true, true)
|
||||
}
|
||||
}
|
||||
|
||||
impl ServiceHandler for FappService {
|
||||
fn service_id(&self) -> u32 {
|
||||
FAPP
|
||||
}
|
||||
|
||||
fn name(&self) -> &str {
|
||||
"FAPP"
|
||||
}
|
||||
|
||||
fn handle(
|
||||
&self,
|
||||
message: &ServiceMessage,
|
||||
context: &HandlerContext,
|
||||
) -> Result<ServiceAction, ServiceError> {
|
||||
match message.message_type {
|
||||
MessageType::Announce => {
|
||||
// Validate payload
|
||||
let _announce = SlotAnnounce::from_bytes(&message.payload)?;
|
||||
|
||||
// Store and forward if we're a relay
|
||||
if self.is_relay {
|
||||
Ok(ServiceAction::StoreAndForward)
|
||||
} else {
|
||||
Ok(ServiceAction::Store)
|
||||
}
|
||||
}
|
||||
|
||||
MessageType::Query => {
|
||||
// Parse query
|
||||
let query = SlotQuery::from_bytes(&message.payload)?;
|
||||
|
||||
// Find matches in store
|
||||
let matches: Vec<_> = context
|
||||
.store
|
||||
.by_service(FAPP)
|
||||
.into_iter()
|
||||
.filter(|stored| {
|
||||
if stored.message.message_type != MessageType::Announce {
|
||||
return false;
|
||||
}
|
||||
if let Ok(announce) = SlotAnnounce::from_bytes(&stored.message.payload) {
|
||||
query.matches(&announce)
|
||||
} else {
|
||||
false
|
||||
}
|
||||
})
|
||||
.collect();
|
||||
|
||||
// If we have matches, we could respond (simplified for now)
|
||||
if !matches.is_empty() {
|
||||
// In a real impl, we'd aggregate and send response
|
||||
Ok(ServiceAction::Handled)
|
||||
} else if self.is_relay {
|
||||
Ok(ServiceAction::ForwardOnly)
|
||||
} else {
|
||||
Ok(ServiceAction::Handled)
|
||||
}
|
||||
}
|
||||
|
||||
MessageType::Reserve | MessageType::Confirm | MessageType::Cancel => {
|
||||
// E2E encrypted, just forward
|
||||
if self.is_relay {
|
||||
Ok(ServiceAction::ForwardOnly)
|
||||
} else {
|
||||
Ok(ServiceAction::Handled)
|
||||
}
|
||||
}
|
||||
|
||||
MessageType::Revoke => {
|
||||
// Remove from store
|
||||
Ok(ServiceAction::Handled)
|
||||
}
|
||||
|
||||
_ => Ok(ServiceAction::Drop),
|
||||
}
|
||||
}
|
||||
|
||||
fn validate(&self, message: &ServiceMessage) -> Result<(), ServiceError> {
|
||||
match message.message_type {
|
||||
MessageType::Announce => {
|
||||
SlotAnnounce::from_bytes(&message.payload)?;
|
||||
}
|
||||
MessageType::Query => {
|
||||
SlotQuery::from_bytes(&message.payload)?;
|
||||
}
|
||||
_ => {}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn matches_query(&self, announce: &StoredMessage, query_msg: &ServiceMessage) -> bool {
|
||||
let Ok(announce_data) = SlotAnnounce::from_bytes(&announce.message.payload) else {
|
||||
return false;
|
||||
};
|
||||
let Ok(query) = SlotQuery::from_bytes(&query_msg.payload) else {
|
||||
return false;
|
||||
};
|
||||
query.matches(&announce_data)
|
||||
}
|
||||
}
|
||||
|
||||
/// Helper to create a FAPP announce message.
|
||||
pub fn create_announce(
|
||||
identity: &crate::ServiceIdentity,
|
||||
announce: &SlotAnnounce,
|
||||
sequence: u64,
|
||||
) -> Result<ServiceMessage, ServiceError> {
|
||||
let payload = announce.to_bytes()?;
|
||||
Ok(ServiceMessage::announce(identity, FAPP, payload, sequence))
|
||||
}
|
||||
|
||||
/// Helper to create a FAPP query message.
|
||||
pub fn create_query(
|
||||
identity: &crate::ServiceIdentity,
|
||||
query: &SlotQuery,
|
||||
) -> Result<ServiceMessage, ServiceError> {
|
||||
let payload = query.to_bytes()?;
|
||||
Ok(ServiceMessage::query(identity, FAPP, payload))
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use crate::identity::ServiceIdentity;
|
||||
|
||||
#[test]
|
||||
fn slot_announce_roundtrip() {
|
||||
let announce = SlotAnnounce::new(
|
||||
&[Specialism::CognitiveBehavioral, Specialism::TraumaFocused],
|
||||
Modality::VideoCall,
|
||||
"104",
|
||||
)
|
||||
.with_slots(3)
|
||||
.with_profile("https://therapists.de/dr-mueller");
|
||||
|
||||
let bytes = announce.to_bytes().unwrap();
|
||||
let decoded = SlotAnnounce::from_bytes(&bytes).unwrap();
|
||||
|
||||
assert!(decoded.has_specialism(Specialism::CognitiveBehavioral));
|
||||
assert!(decoded.has_specialism(Specialism::TraumaFocused));
|
||||
assert!(!decoded.has_specialism(Specialism::Addiction));
|
||||
assert_eq!(decoded.available_slots, 3);
|
||||
assert_eq!(
|
||||
decoded.profile_url,
|
||||
Some("https://therapists.de/dr-mueller".into())
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn query_matches_announce() {
|
||||
let announce = SlotAnnounce::new(
|
||||
&[Specialism::CognitiveBehavioral],
|
||||
Modality::InPerson,
|
||||
"104",
|
||||
)
|
||||
.with_slots(2);
|
||||
|
||||
let matching_query = SlotQuery::new(Specialism::CognitiveBehavioral, "104");
|
||||
assert!(matching_query.matches(&announce));
|
||||
|
||||
let wrong_spec = SlotQuery::new(Specialism::Addiction, "104");
|
||||
assert!(!wrong_spec.matches(&announce));
|
||||
|
||||
let wrong_location = SlotQuery::new(Specialism::CognitiveBehavioral, "200");
|
||||
assert!(!wrong_location.matches(&announce));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn create_message_helpers() {
|
||||
let id = ServiceIdentity::generate();
|
||||
|
||||
let announce = SlotAnnounce::new(&[Specialism::GeneralPsychotherapy], Modality::VideoCall, "10");
|
||||
let msg = create_announce(&id, &announce, 1).unwrap();
|
||||
assert_eq!(msg.service_id, FAPP);
|
||||
assert_eq!(msg.message_type, MessageType::Announce);
|
||||
|
||||
let query = SlotQuery::new(Specialism::GeneralPsychotherapy, "10");
|
||||
let msg = create_query(&id, &query).unwrap();
|
||||
assert_eq!(msg.service_id, FAPP);
|
||||
assert_eq!(msg.message_type, MessageType::Query);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn fapp_handler_processes_announce() {
|
||||
use crate::router::ServiceRouter;
|
||||
use crate::capabilities;
|
||||
|
||||
let mut router = ServiceRouter::new(capabilities::RELAY);
|
||||
router.register(Box::new(FappService::relay()));
|
||||
|
||||
let id = ServiceIdentity::generate();
|
||||
let announce = SlotAnnounce::new(&[Specialism::TraumaFocused], Modality::InPerson, "100");
|
||||
let msg = create_announce(&id, &announce, 1).unwrap();
|
||||
|
||||
let action = router.handle(msg.clone(), Some(id.public_key())).unwrap();
|
||||
assert!(matches!(action, ServiceAction::StoreAndForward));
|
||||
|
||||
// Should be stored
|
||||
assert_eq!(router.store().service_count(FAPP), 1);
|
||||
}
|
||||
}
|
||||
489
crates/meshservice/src/services/housing.rs
Normal file
489
crates/meshservice/src/services/housing.rs
Normal file
@@ -0,0 +1,489 @@
|
||||
//! Housing Service — Decentralized room/apartment sharing.
|
||||
//!
|
||||
//! Demonstrates how a second service can be built on the mesh layer.
|
||||
//!
|
||||
//! ## Flow
|
||||
//!
|
||||
//! 1. Landlord announces available room (type, size, price, location).
|
||||
//! 2. Announcement floods through mesh.
|
||||
//! 3. Seeker queries for matching listings.
|
||||
//! 4. Relays respond with cached matches.
|
||||
//! 5. Seeker reserves viewing slot (E2E encrypted).
|
||||
//! 6. Landlord confirms/rejects.
|
||||
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use crate::error::ServiceError;
|
||||
use crate::message::{MessageType, ServiceMessage};
|
||||
use crate::router::{HandlerContext, ServiceAction, ServiceHandler};
|
||||
use crate::service_ids::HOUSING;
|
||||
use crate::store::StoredMessage;
|
||||
use crate::wire::{decode_payload, encode_payload};
|
||||
|
||||
/// Listing type.
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)]
|
||||
#[repr(u8)]
|
||||
pub enum ListingType {
|
||||
Room = 0x01,
|
||||
SharedFlat = 0x02,
|
||||
Apartment = 0x03,
|
||||
House = 0x04,
|
||||
Studio = 0x05,
|
||||
Sublet = 0x06,
|
||||
}
|
||||
|
||||
impl TryFrom<u8> for ListingType {
|
||||
type Error = ();
|
||||
|
||||
fn try_from(value: u8) -> Result<Self, Self::Error> {
|
||||
match value {
|
||||
0x01 => Ok(Self::Room),
|
||||
0x02 => Ok(Self::SharedFlat),
|
||||
0x03 => Ok(Self::Apartment),
|
||||
0x04 => Ok(Self::House),
|
||||
0x05 => Ok(Self::Studio),
|
||||
0x06 => Ok(Self::Sublet),
|
||||
_ => Err(()),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Amenities bitfield.
|
||||
pub mod amenities {
|
||||
pub const FURNISHED: u16 = 0x0001;
|
||||
pub const BALCONY: u16 = 0x0002;
|
||||
pub const PARKING: u16 = 0x0004;
|
||||
pub const PETS_ALLOWED: u16 = 0x0008;
|
||||
pub const WASHING_MACHINE: u16 = 0x0010;
|
||||
pub const DISHWASHER: u16 = 0x0020;
|
||||
pub const ELEVATOR: u16 = 0x0040;
|
||||
pub const GARDEN: u16 = 0x0080;
|
||||
pub const INTERNET: u16 = 0x0100;
|
||||
pub const HEATING_INCLUDED: u16 = 0x0200;
|
||||
}
|
||||
|
||||
/// Room/listing announcement.
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct ListingAnnounce {
|
||||
/// Type of listing.
|
||||
pub listing_type: u8,
|
||||
/// Size in square meters.
|
||||
pub size_sqm: u16,
|
||||
/// Monthly rent in cents (EUR).
|
||||
pub rent_cents: u32,
|
||||
/// Postal prefix (3 digits).
|
||||
pub postal_prefix: String,
|
||||
/// Geohash for location (6 chars).
|
||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||
pub geohash: Option<String>,
|
||||
/// Number of rooms (0 for studio).
|
||||
pub rooms: u8,
|
||||
/// Available from (days from epoch).
|
||||
pub available_from_days: u16,
|
||||
/// Minimum rental period in months (0 = unlimited).
|
||||
pub min_months: u8,
|
||||
/// Maximum rental period in months (0 = unlimited).
|
||||
pub max_months: u8,
|
||||
/// Amenities bitfield.
|
||||
pub amenities: u16,
|
||||
/// Optional title.
|
||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||
pub title: Option<String>,
|
||||
/// Optional external listing URL.
|
||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||
pub listing_url: Option<String>,
|
||||
}
|
||||
|
||||
impl ListingAnnounce {
|
||||
/// Create a new listing.
|
||||
pub fn new(listing_type: ListingType, size_sqm: u16, rent_euros: u32, postal_prefix: &str) -> Self {
|
||||
Self {
|
||||
listing_type: listing_type as u8,
|
||||
size_sqm,
|
||||
rent_cents: rent_euros * 100,
|
||||
postal_prefix: postal_prefix.into(),
|
||||
geohash: None,
|
||||
rooms: 1,
|
||||
available_from_days: 0,
|
||||
min_months: 0,
|
||||
max_months: 0,
|
||||
amenities: 0,
|
||||
title: None,
|
||||
listing_url: None,
|
||||
}
|
||||
}
|
||||
|
||||
/// Set rooms count.
|
||||
pub fn with_rooms(mut self, rooms: u8) -> Self {
|
||||
self.rooms = rooms;
|
||||
self
|
||||
}
|
||||
|
||||
/// Set geohash.
|
||||
pub fn with_geohash(mut self, geohash: &str) -> Self {
|
||||
self.geohash = Some(geohash[..6.min(geohash.len())].into());
|
||||
self
|
||||
}
|
||||
|
||||
/// Set amenities.
|
||||
pub fn with_amenities(mut self, amenities: u16) -> Self {
|
||||
self.amenities = amenities;
|
||||
self
|
||||
}
|
||||
|
||||
/// Set title.
|
||||
pub fn with_title(mut self, title: &str) -> Self {
|
||||
self.title = Some(title.into());
|
||||
self
|
||||
}
|
||||
|
||||
/// Set minimum/maximum rental period.
|
||||
pub fn with_term(mut self, min_months: u8, max_months: u8) -> Self {
|
||||
self.min_months = min_months;
|
||||
self.max_months = max_months;
|
||||
self
|
||||
}
|
||||
|
||||
/// Check if has amenity.
|
||||
pub fn has_amenity(&self, amenity: u16) -> bool {
|
||||
self.amenities & amenity != 0
|
||||
}
|
||||
|
||||
/// Get rent in euros.
|
||||
pub fn rent_euros(&self) -> u32 {
|
||||
self.rent_cents / 100
|
||||
}
|
||||
|
||||
/// Encode to CBOR.
|
||||
pub fn to_bytes(&self) -> Result<Vec<u8>, ServiceError> {
|
||||
encode_payload(self)
|
||||
}
|
||||
|
||||
/// Decode from CBOR.
|
||||
pub fn from_bytes(data: &[u8]) -> Result<Self, ServiceError> {
|
||||
decode_payload(data)
|
||||
}
|
||||
}
|
||||
|
||||
/// Housing query.
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct ListingQuery {
|
||||
/// Desired listing types (bitfield).
|
||||
pub listing_types: u8,
|
||||
/// Postal prefix.
|
||||
pub postal_prefix: String,
|
||||
/// Min size in sqm.
|
||||
pub min_size_sqm: u16,
|
||||
/// Max rent in cents.
|
||||
pub max_rent_cents: u32,
|
||||
/// Min rooms.
|
||||
pub min_rooms: u8,
|
||||
/// Required amenities (all must match).
|
||||
pub required_amenities: u16,
|
||||
/// Max move-in days.
|
||||
pub max_move_in_days: u16,
|
||||
}
|
||||
|
||||
impl ListingQuery {
|
||||
/// Create a simple query.
|
||||
pub fn new(postal_prefix: &str, max_rent_euros: u32) -> Self {
|
||||
Self {
|
||||
listing_types: 0xFF, // Any type
|
||||
postal_prefix: postal_prefix.into(),
|
||||
min_size_sqm: 0,
|
||||
max_rent_cents: max_rent_euros * 100,
|
||||
min_rooms: 0,
|
||||
required_amenities: 0,
|
||||
max_move_in_days: 365,
|
||||
}
|
||||
}
|
||||
|
||||
/// Filter by type.
|
||||
pub fn with_type(mut self, listing_type: ListingType) -> Self {
|
||||
self.listing_types = 1 << (listing_type as u8);
|
||||
self
|
||||
}
|
||||
|
||||
/// Require minimum size.
|
||||
pub fn with_min_size(mut self, sqm: u16) -> Self {
|
||||
self.min_size_sqm = sqm;
|
||||
self
|
||||
}
|
||||
|
||||
/// Require minimum rooms.
|
||||
pub fn with_min_rooms(mut self, rooms: u8) -> Self {
|
||||
self.min_rooms = rooms;
|
||||
self
|
||||
}
|
||||
|
||||
/// Require amenities.
|
||||
pub fn with_amenities(mut self, amenities: u16) -> Self {
|
||||
self.required_amenities = amenities;
|
||||
self
|
||||
}
|
||||
|
||||
/// Check if listing matches.
|
||||
pub fn matches(&self, listing: &ListingAnnounce) -> bool {
|
||||
// Type match
|
||||
if self.listing_types != 0xFF && (self.listing_types & (1 << listing.listing_type) == 0) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// Location
|
||||
if !listing.postal_prefix.starts_with(&self.postal_prefix)
|
||||
&& !self.postal_prefix.starts_with(&listing.postal_prefix)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
// Size
|
||||
if listing.size_sqm < self.min_size_sqm {
|
||||
return false;
|
||||
}
|
||||
|
||||
// Rent
|
||||
if listing.rent_cents > self.max_rent_cents {
|
||||
return false;
|
||||
}
|
||||
|
||||
// Rooms
|
||||
if listing.rooms < self.min_rooms {
|
||||
return false;
|
||||
}
|
||||
|
||||
// Amenities (all required must be present)
|
||||
if listing.amenities & self.required_amenities != self.required_amenities {
|
||||
return false;
|
||||
}
|
||||
|
||||
// Availability
|
||||
listing.available_from_days <= self.max_move_in_days
|
||||
}
|
||||
|
||||
/// Encode to CBOR.
|
||||
pub fn to_bytes(&self) -> Result<Vec<u8>, ServiceError> {
|
||||
encode_payload(self)
|
||||
}
|
||||
|
||||
/// Decode from CBOR.
|
||||
pub fn from_bytes(data: &[u8]) -> Result<Self, ServiceError> {
|
||||
decode_payload(data)
|
||||
}
|
||||
}
|
||||
|
||||
/// Housing service handler.
|
||||
pub struct HousingService {
|
||||
pub is_provider: bool,
|
||||
pub is_relay: bool,
|
||||
}
|
||||
|
||||
impl HousingService {
|
||||
/// Create a new handler.
|
||||
pub fn new(is_provider: bool, is_relay: bool) -> Self {
|
||||
Self {
|
||||
is_provider,
|
||||
is_relay,
|
||||
}
|
||||
}
|
||||
|
||||
/// Create a relay-only handler.
|
||||
pub fn relay() -> Self {
|
||||
Self::new(false, true)
|
||||
}
|
||||
|
||||
/// Create a provider handler.
|
||||
pub fn provider() -> Self {
|
||||
Self::new(true, true)
|
||||
}
|
||||
}
|
||||
|
||||
impl ServiceHandler for HousingService {
|
||||
fn service_id(&self) -> u32 {
|
||||
HOUSING
|
||||
}
|
||||
|
||||
fn name(&self) -> &str {
|
||||
"Housing"
|
||||
}
|
||||
|
||||
fn handle(
|
||||
&self,
|
||||
message: &ServiceMessage,
|
||||
context: &HandlerContext,
|
||||
) -> Result<ServiceAction, ServiceError> {
|
||||
match message.message_type {
|
||||
MessageType::Announce => {
|
||||
let _listing = ListingAnnounce::from_bytes(&message.payload)?;
|
||||
|
||||
if self.is_relay {
|
||||
Ok(ServiceAction::StoreAndForward)
|
||||
} else {
|
||||
Ok(ServiceAction::Store)
|
||||
}
|
||||
}
|
||||
|
||||
MessageType::Query => {
|
||||
let query = ListingQuery::from_bytes(&message.payload)?;
|
||||
|
||||
let _matches: Vec<_> = context
|
||||
.store
|
||||
.by_service(HOUSING)
|
||||
.into_iter()
|
||||
.filter(|stored| {
|
||||
if stored.message.message_type != MessageType::Announce {
|
||||
return false;
|
||||
}
|
||||
if let Ok(listing) = ListingAnnounce::from_bytes(&stored.message.payload) {
|
||||
query.matches(&listing)
|
||||
} else {
|
||||
false
|
||||
}
|
||||
})
|
||||
.collect();
|
||||
|
||||
if self.is_relay {
|
||||
Ok(ServiceAction::ForwardOnly)
|
||||
} else {
|
||||
Ok(ServiceAction::Handled)
|
||||
}
|
||||
}
|
||||
|
||||
MessageType::Reserve | MessageType::Confirm | MessageType::Cancel => {
|
||||
if self.is_relay {
|
||||
Ok(ServiceAction::ForwardOnly)
|
||||
} else {
|
||||
Ok(ServiceAction::Handled)
|
||||
}
|
||||
}
|
||||
|
||||
MessageType::Revoke => Ok(ServiceAction::Handled),
|
||||
|
||||
_ => Ok(ServiceAction::Drop),
|
||||
}
|
||||
}
|
||||
|
||||
fn validate(&self, message: &ServiceMessage) -> Result<(), ServiceError> {
|
||||
match message.message_type {
|
||||
MessageType::Announce => {
|
||||
ListingAnnounce::from_bytes(&message.payload)?;
|
||||
}
|
||||
MessageType::Query => {
|
||||
ListingQuery::from_bytes(&message.payload)?;
|
||||
}
|
||||
_ => {}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn matches_query(&self, listing: &StoredMessage, query_msg: &ServiceMessage) -> bool {
|
||||
let Ok(listing_data) = ListingAnnounce::from_bytes(&listing.message.payload) else {
|
||||
return false;
|
||||
};
|
||||
let Ok(query) = ListingQuery::from_bytes(&query_msg.payload) else {
|
||||
return false;
|
||||
};
|
||||
query.matches(&listing_data)
|
||||
}
|
||||
}
|
||||
|
||||
/// Helper to create a housing announce.
|
||||
pub fn create_announce(
|
||||
identity: &crate::ServiceIdentity,
|
||||
listing: &ListingAnnounce,
|
||||
sequence: u64,
|
||||
) -> Result<ServiceMessage, ServiceError> {
|
||||
let payload = listing.to_bytes()?;
|
||||
Ok(ServiceMessage::announce(identity, HOUSING, payload, sequence))
|
||||
}
|
||||
|
||||
/// Helper to create a housing query.
|
||||
pub fn create_query(
|
||||
identity: &crate::ServiceIdentity,
|
||||
query: &ListingQuery,
|
||||
) -> Result<ServiceMessage, ServiceError> {
|
||||
let payload = query.to_bytes()?;
|
||||
Ok(ServiceMessage::query(identity, HOUSING, payload))
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use crate::identity::ServiceIdentity;
|
||||
|
||||
#[test]
|
||||
fn listing_roundtrip() {
|
||||
let listing = ListingAnnounce::new(ListingType::Apartment, 65, 850, "104")
|
||||
.with_rooms(2)
|
||||
.with_amenities(amenities::FURNISHED | amenities::BALCONY)
|
||||
.with_title("Cozy 2-room in Kreuzberg");
|
||||
|
||||
let bytes = listing.to_bytes().unwrap();
|
||||
let decoded = ListingAnnounce::from_bytes(&bytes).unwrap();
|
||||
|
||||
assert_eq!(decoded.size_sqm, 65);
|
||||
assert_eq!(decoded.rent_euros(), 850);
|
||||
assert_eq!(decoded.rooms, 2);
|
||||
assert!(decoded.has_amenity(amenities::FURNISHED));
|
||||
assert!(decoded.has_amenity(amenities::BALCONY));
|
||||
assert!(!decoded.has_amenity(amenities::PARKING));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn query_matches() {
|
||||
let listing = ListingAnnounce::new(ListingType::Apartment, 50, 700, "104")
|
||||
.with_rooms(2)
|
||||
.with_amenities(amenities::FURNISHED);
|
||||
|
||||
// Basic match
|
||||
let query = ListingQuery::new("104", 800);
|
||||
assert!(query.matches(&listing));
|
||||
|
||||
// Too expensive for query
|
||||
let cheap_query = ListingQuery::new("104", 500);
|
||||
assert!(!cheap_query.matches(&listing));
|
||||
|
||||
// Wrong location
|
||||
let wrong_loc = ListingQuery::new("200", 800);
|
||||
assert!(!wrong_loc.matches(&listing));
|
||||
|
||||
// Size requirement
|
||||
let big_query = ListingQuery::new("104", 800).with_min_size(60);
|
||||
assert!(!big_query.matches(&listing));
|
||||
|
||||
// Amenity requirement
|
||||
let needs_parking = ListingQuery::new("104", 800).with_amenities(amenities::PARKING);
|
||||
assert!(!needs_parking.matches(&listing));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn create_message_helpers() {
|
||||
let id = ServiceIdentity::generate();
|
||||
|
||||
let listing = ListingAnnounce::new(ListingType::Room, 20, 400, "100");
|
||||
let msg = create_announce(&id, &listing, 1).unwrap();
|
||||
assert_eq!(msg.service_id, HOUSING);
|
||||
assert_eq!(msg.message_type, MessageType::Announce);
|
||||
|
||||
let query = ListingQuery::new("100", 500);
|
||||
let msg = create_query(&id, &query).unwrap();
|
||||
assert_eq!(msg.service_id, HOUSING);
|
||||
assert_eq!(msg.message_type, MessageType::Query);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn housing_handler_processes_listing() {
|
||||
use crate::capabilities;
|
||||
use crate::router::ServiceRouter;
|
||||
|
||||
let mut router = ServiceRouter::new(capabilities::RELAY);
|
||||
router.register(Box::new(HousingService::relay()));
|
||||
|
||||
let id = ServiceIdentity::generate();
|
||||
let listing = ListingAnnounce::new(ListingType::SharedFlat, 15, 350, "100");
|
||||
let msg = create_announce(&id, &listing, 1).unwrap();
|
||||
|
||||
let action = router.handle(msg, Some(id.public_key())).unwrap();
|
||||
assert!(matches!(action, ServiceAction::StoreAndForward));
|
||||
assert_eq!(router.store().service_count(HOUSING), 1);
|
||||
}
|
||||
}
|
||||
4
crates/meshservice/src/services/mod.rs
Normal file
4
crates/meshservice/src/services/mod.rs
Normal file
@@ -0,0 +1,4 @@
|
||||
//! Built-in service implementations.
|
||||
|
||||
pub mod fapp;
|
||||
pub mod housing;
|
||||
406
crates/meshservice/src/store.rs
Normal file
406
crates/meshservice/src/store.rs
Normal file
@@ -0,0 +1,406 @@
|
||||
//! In-memory message store with eviction policies.
|
||||
|
||||
use std::collections::HashMap;
|
||||
use std::time::{SystemTime, UNIX_EPOCH};
|
||||
|
||||
use crate::message::ServiceMessage;
|
||||
|
||||
/// Configuration for the message store.
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct StoreConfig {
|
||||
/// Maximum messages per service.
|
||||
pub max_per_service: usize,
|
||||
/// Maximum messages per sender (per service).
|
||||
pub max_per_sender: usize,
|
||||
/// Maximum total messages.
|
||||
pub max_total: usize,
|
||||
/// Prune interval in seconds.
|
||||
pub prune_interval_secs: u64,
|
||||
}
|
||||
|
||||
impl Default for StoreConfig {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
max_per_service: 10_000,
|
||||
max_per_sender: 100,
|
||||
max_total: 50_000,
|
||||
prune_interval_secs: 300,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// A stored message with metadata.
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct StoredMessage {
|
||||
pub message: ServiceMessage,
|
||||
/// Sender's public key (needed for verification).
|
||||
pub sender_public_key: [u8; 32],
|
||||
/// When we stored this message.
|
||||
pub stored_at: u64,
|
||||
}
|
||||
|
||||
/// Generic service message store.
|
||||
///
|
||||
/// Organized by service_id, then by sender_address, then by message_id.
|
||||
pub struct ServiceStore {
|
||||
config: StoreConfig,
|
||||
/// service_id -> sender_address -> message_id -> StoredMessage
|
||||
messages: HashMap<u32, HashMap<[u8; 16], HashMap<[u8; 16], StoredMessage>>>,
|
||||
/// Total message count.
|
||||
total_count: usize,
|
||||
/// Last prune timestamp.
|
||||
last_prune: u64,
|
||||
}
|
||||
|
||||
impl ServiceStore {
|
||||
/// Create a new store with default config.
|
||||
pub fn new() -> Self {
|
||||
Self::with_config(StoreConfig::default())
|
||||
}
|
||||
|
||||
/// Create with custom config.
|
||||
pub fn with_config(config: StoreConfig) -> Self {
|
||||
Self {
|
||||
config,
|
||||
messages: HashMap::new(),
|
||||
total_count: 0,
|
||||
last_prune: 0,
|
||||
}
|
||||
}
|
||||
|
||||
/// Store a message, returning true if it was new.
|
||||
pub fn store(&mut self, message: ServiceMessage, sender_public_key: [u8; 32]) -> bool {
|
||||
// Prune if interval passed
|
||||
self.maybe_prune();
|
||||
|
||||
let service_id = message.service_id;
|
||||
let sender_address = message.sender_address;
|
||||
let message_id = message.id;
|
||||
|
||||
// Check per-service limit and evict if needed
|
||||
{
|
||||
let service_count: usize = self.messages
|
||||
.get(&service_id)
|
||||
.map(|s| s.values().map(|m| m.len()).sum())
|
||||
.unwrap_or(0);
|
||||
if service_count >= self.config.max_per_service {
|
||||
self.evict_oldest_in_service(service_id);
|
||||
}
|
||||
}
|
||||
|
||||
// Check per-sender limit and evict if needed
|
||||
{
|
||||
let sender_count = self.messages
|
||||
.get(&service_id)
|
||||
.and_then(|s| s.get(&sender_address))
|
||||
.map(|m| m.len())
|
||||
.unwrap_or(0);
|
||||
if sender_count >= self.config.max_per_sender {
|
||||
self.evict_oldest_from_sender(service_id, sender_address);
|
||||
}
|
||||
}
|
||||
|
||||
// Get or create maps
|
||||
let service_map = self.messages.entry(service_id).or_default();
|
||||
let sender_map = service_map.entry(sender_address).or_default();
|
||||
|
||||
// Check for existing message
|
||||
let is_new_or_update = if let Some(existing) = sender_map.get(&message_id) {
|
||||
// Existing: only update if higher sequence
|
||||
if message.sequence <= existing.message.sequence {
|
||||
return false;
|
||||
}
|
||||
// This is an update, not a new message
|
||||
false
|
||||
} else {
|
||||
// New message
|
||||
true
|
||||
};
|
||||
|
||||
let stored_at = now();
|
||||
sender_map.insert(
|
||||
message_id,
|
||||
StoredMessage {
|
||||
message,
|
||||
sender_public_key,
|
||||
stored_at,
|
||||
},
|
||||
);
|
||||
|
||||
if is_new_or_update {
|
||||
self.total_count += 1;
|
||||
}
|
||||
|
||||
// Return true for both new messages and updates
|
||||
true
|
||||
}
|
||||
|
||||
/// Get a message by service, sender, and ID.
|
||||
pub fn get(
|
||||
&self,
|
||||
service_id: u32,
|
||||
sender_address: &[u8; 16],
|
||||
message_id: &[u8; 16],
|
||||
) -> Option<&StoredMessage> {
|
||||
self.messages
|
||||
.get(&service_id)?
|
||||
.get(sender_address)?
|
||||
.get(message_id)
|
||||
}
|
||||
|
||||
/// Get all messages from a sender in a service.
|
||||
pub fn by_sender(&self, service_id: u32, sender_address: &[u8; 16]) -> Vec<&StoredMessage> {
|
||||
self.messages
|
||||
.get(&service_id)
|
||||
.and_then(|s| s.get(sender_address))
|
||||
.map(|m| m.values().collect())
|
||||
.unwrap_or_default()
|
||||
}
|
||||
|
||||
/// Get all messages in a service.
|
||||
pub fn by_service(&self, service_id: u32) -> Vec<&StoredMessage> {
|
||||
self.messages
|
||||
.get(&service_id)
|
||||
.map(|s| s.values().flat_map(|m| m.values()).collect())
|
||||
.unwrap_or_default()
|
||||
}
|
||||
|
||||
/// Query messages with a predicate.
|
||||
pub fn query<F>(&self, service_id: u32, predicate: F) -> Vec<&StoredMessage>
|
||||
where
|
||||
F: Fn(&StoredMessage) -> bool,
|
||||
{
|
||||
self.by_service(service_id)
|
||||
.into_iter()
|
||||
.filter(|m| predicate(m))
|
||||
.collect()
|
||||
}
|
||||
|
||||
/// Remove a specific message.
|
||||
pub fn remove(
|
||||
&mut self,
|
||||
service_id: u32,
|
||||
sender_address: &[u8; 16],
|
||||
message_id: &[u8; 16],
|
||||
) -> Option<StoredMessage> {
|
||||
let result = self
|
||||
.messages
|
||||
.get_mut(&service_id)?
|
||||
.get_mut(sender_address)?
|
||||
.remove(message_id);
|
||||
|
||||
if result.is_some() {
|
||||
self.total_count = self.total_count.saturating_sub(1);
|
||||
}
|
||||
|
||||
result
|
||||
}
|
||||
|
||||
/// Remove all messages from a sender.
|
||||
pub fn remove_sender(&mut self, service_id: u32, sender_address: &[u8; 16]) -> usize {
|
||||
let count = self
|
||||
.messages
|
||||
.get_mut(&service_id)
|
||||
.and_then(|s| s.remove(sender_address))
|
||||
.map(|m| m.len())
|
||||
.unwrap_or(0);
|
||||
|
||||
self.total_count = self.total_count.saturating_sub(count);
|
||||
count
|
||||
}
|
||||
|
||||
/// Prune expired messages.
|
||||
pub fn prune_expired(&mut self) -> usize {
|
||||
let now = now();
|
||||
let mut removed = 0;
|
||||
|
||||
for service_map in self.messages.values_mut() {
|
||||
for sender_map in service_map.values_mut() {
|
||||
let expired: Vec<[u8; 16]> = sender_map
|
||||
.iter()
|
||||
.filter(|(_, m)| m.message.is_expired())
|
||||
.map(|(id, _)| *id)
|
||||
.collect();
|
||||
|
||||
for id in expired {
|
||||
sender_map.remove(&id);
|
||||
removed += 1;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
self.total_count = self.total_count.saturating_sub(removed);
|
||||
self.last_prune = now;
|
||||
removed
|
||||
}
|
||||
|
||||
/// Get total message count.
|
||||
pub fn len(&self) -> usize {
|
||||
self.total_count
|
||||
}
|
||||
|
||||
/// Check if empty.
|
||||
pub fn is_empty(&self) -> bool {
|
||||
self.total_count == 0
|
||||
}
|
||||
|
||||
/// Get count by service.
|
||||
pub fn service_count(&self, service_id: u32) -> usize {
|
||||
self.messages
|
||||
.get(&service_id)
|
||||
.map(|s| s.values().map(|m| m.len()).sum())
|
||||
.unwrap_or(0)
|
||||
}
|
||||
|
||||
/// Run prune if interval passed.
|
||||
fn maybe_prune(&mut self) {
|
||||
let now = now();
|
||||
if now.saturating_sub(self.last_prune) >= self.config.prune_interval_secs {
|
||||
self.prune_expired();
|
||||
}
|
||||
}
|
||||
|
||||
/// Evict oldest message in a service.
|
||||
fn evict_oldest_in_service(&mut self, service_id: u32) {
|
||||
let Some(service_map) = self.messages.get_mut(&service_id) else {
|
||||
return;
|
||||
};
|
||||
|
||||
let mut oldest: Option<([u8; 16], [u8; 16], u64)> = None;
|
||||
|
||||
for (sender, msgs) in service_map.iter() {
|
||||
for (id, stored) in msgs.iter() {
|
||||
match oldest {
|
||||
Some((_, _, ts)) if stored.message.timestamp < ts => {
|
||||
oldest = Some((*sender, *id, stored.message.timestamp));
|
||||
}
|
||||
None => {
|
||||
oldest = Some((*sender, *id, stored.message.timestamp));
|
||||
}
|
||||
_ => {}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if let Some((sender, id, _)) = oldest {
|
||||
if let Some(sender_map) = service_map.get_mut(&sender) {
|
||||
sender_map.remove(&id);
|
||||
self.total_count = self.total_count.saturating_sub(1);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Evict oldest message from a sender.
|
||||
fn evict_oldest_from_sender(&mut self, service_id: u32, sender_address: [u8; 16]) {
|
||||
let Some(sender_map) = self
|
||||
.messages
|
||||
.get_mut(&service_id)
|
||||
.and_then(|s| s.get_mut(&sender_address))
|
||||
else {
|
||||
return;
|
||||
};
|
||||
|
||||
let oldest = sender_map
|
||||
.iter()
|
||||
.min_by_key(|(_, m)| m.message.timestamp)
|
||||
.map(|(id, _)| *id);
|
||||
|
||||
if let Some(id) = oldest {
|
||||
sender_map.remove(&id);
|
||||
self.total_count = self.total_count.saturating_sub(1);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for ServiceStore {
|
||||
fn default() -> Self {
|
||||
Self::new()
|
||||
}
|
||||
}
|
||||
|
||||
fn now() -> u64 {
|
||||
SystemTime::now()
|
||||
.duration_since(UNIX_EPOCH)
|
||||
.unwrap_or_default()
|
||||
.as_secs()
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use crate::{identity::ServiceIdentity, message::ServiceMessage, service_ids::FAPP};
|
||||
|
||||
fn make_message(id: &ServiceIdentity, seq: u64) -> ServiceMessage {
|
||||
ServiceMessage::announce(id, FAPP, b"test".to_vec(), seq)
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn store_and_retrieve() {
|
||||
let mut store = ServiceStore::new();
|
||||
let id = ServiceIdentity::generate();
|
||||
let msg = make_message(&id, 1);
|
||||
|
||||
assert!(store.store(msg.clone(), id.public_key()));
|
||||
assert_eq!(store.len(), 1);
|
||||
|
||||
let retrieved = store.get(FAPP, &id.address(), &msg.id);
|
||||
assert!(retrieved.is_some());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn duplicate_rejected() {
|
||||
let mut store = ServiceStore::new();
|
||||
let id = ServiceIdentity::generate();
|
||||
let msg = make_message(&id, 1);
|
||||
|
||||
assert!(store.store(msg.clone(), id.public_key()));
|
||||
assert!(!store.store(msg.clone(), id.public_key())); // Duplicate
|
||||
assert_eq!(store.len(), 1);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn higher_sequence_updates() {
|
||||
let mut store = ServiceStore::new();
|
||||
let id = ServiceIdentity::generate();
|
||||
let msg1 = make_message(&id, 1);
|
||||
let mut msg2 = make_message(&id, 2);
|
||||
msg2.id = msg1.id; // Same ID
|
||||
|
||||
store.store(msg1.clone(), id.public_key());
|
||||
assert!(store.store(msg2.clone(), id.public_key())); // Updates
|
||||
|
||||
let retrieved = store.get(FAPP, &id.address(), &msg1.id).unwrap();
|
||||
assert_eq!(retrieved.message.sequence, 2);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn query_by_sender() {
|
||||
let mut store = ServiceStore::new();
|
||||
let id1 = ServiceIdentity::generate();
|
||||
let id2 = ServiceIdentity::generate();
|
||||
|
||||
store.store(make_message(&id1, 1), id1.public_key());
|
||||
store.store(make_message(&id1, 2), id1.public_key());
|
||||
store.store(make_message(&id2, 1), id2.public_key());
|
||||
|
||||
let sender1_msgs = store.by_sender(FAPP, &id1.address());
|
||||
assert_eq!(sender1_msgs.len(), 2);
|
||||
|
||||
let sender2_msgs = store.by_sender(FAPP, &id2.address());
|
||||
assert_eq!(sender2_msgs.len(), 1);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn remove_sender() {
|
||||
let mut store = ServiceStore::new();
|
||||
let id = ServiceIdentity::generate();
|
||||
|
||||
store.store(make_message(&id, 1), id.public_key());
|
||||
store.store(make_message(&id, 2), id.public_key());
|
||||
assert_eq!(store.len(), 2);
|
||||
|
||||
let removed = store.remove_sender(FAPP, &id.address());
|
||||
assert_eq!(removed, 2);
|
||||
assert_eq!(store.len(), 0);
|
||||
}
|
||||
}
|
||||
290
crates/meshservice/src/verification.rs
Normal file
290
crates/meshservice/src/verification.rs
Normal file
@@ -0,0 +1,290 @@
|
||||
//! Verification framework for building trust in decentralized services.
|
||||
//!
|
||||
//! Verification levels:
|
||||
//! - 0: None (bare announce)
|
||||
//! - 1: Self-asserted (profile URL, metadata)
|
||||
//! - 2: Endorsed by trusted peers
|
||||
//! - 3: Registry-verified (KBV for therapists, trade registry for craftsmen)
|
||||
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use crate::identity::ServiceIdentity;
|
||||
|
||||
/// Verification levels (higher = more trusted).
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Default)]
|
||||
#[repr(u8)]
|
||||
pub enum VerificationLevel {
|
||||
#[default]
|
||||
None = 0,
|
||||
SelfAsserted = 1,
|
||||
PeerEndorsed = 2,
|
||||
RegistryVerified = 3,
|
||||
}
|
||||
|
||||
impl From<u8> for VerificationLevel {
|
||||
fn from(value: u8) -> Self {
|
||||
match value {
|
||||
1 => VerificationLevel::SelfAsserted,
|
||||
2 => VerificationLevel::PeerEndorsed,
|
||||
3.. => VerificationLevel::RegistryVerified,
|
||||
_ => VerificationLevel::None,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// A verification attestation attached to a service message.
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct Verification {
|
||||
/// Verification level.
|
||||
pub level: u8,
|
||||
/// Verifier's mesh address.
|
||||
pub verifier_address: [u8; 16],
|
||||
/// What is being verified (e.g., "license", "identity").
|
||||
pub claim: String,
|
||||
/// Optional external reference (URL, registry ID).
|
||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||
pub reference: Option<String>,
|
||||
/// Signature over (level || sender_address || claim).
|
||||
pub signature: Vec<u8>,
|
||||
/// Timestamp of verification.
|
||||
pub timestamp: u64,
|
||||
/// Optional expiry timestamp.
|
||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||
pub expires: Option<u64>,
|
||||
}
|
||||
|
||||
impl Verification {
|
||||
/// Create a new peer endorsement.
|
||||
pub fn peer_endorsement(
|
||||
verifier: &ServiceIdentity,
|
||||
subject_address: &[u8; 16],
|
||||
claim: impl Into<String>,
|
||||
) -> Self {
|
||||
Self::new(
|
||||
verifier,
|
||||
VerificationLevel::PeerEndorsed,
|
||||
subject_address,
|
||||
claim,
|
||||
None,
|
||||
)
|
||||
}
|
||||
|
||||
/// Create a registry verification.
|
||||
pub fn registry(
|
||||
verifier: &ServiceIdentity,
|
||||
subject_address: &[u8; 16],
|
||||
claim: impl Into<String>,
|
||||
reference: impl Into<String>,
|
||||
) -> Self {
|
||||
Self::new(
|
||||
verifier,
|
||||
VerificationLevel::RegistryVerified,
|
||||
subject_address,
|
||||
claim,
|
||||
Some(reference.into()),
|
||||
)
|
||||
}
|
||||
|
||||
/// Create a new verification.
|
||||
pub fn new(
|
||||
verifier: &ServiceIdentity,
|
||||
level: VerificationLevel,
|
||||
subject_address: &[u8; 16],
|
||||
claim: impl Into<String>,
|
||||
reference: Option<String>,
|
||||
) -> Self {
|
||||
use std::time::{SystemTime, UNIX_EPOCH};
|
||||
|
||||
let claim = claim.into();
|
||||
let timestamp = SystemTime::now()
|
||||
.duration_since(UNIX_EPOCH)
|
||||
.unwrap_or_default()
|
||||
.as_secs();
|
||||
|
||||
let signable = Self::signable_bytes(level as u8, subject_address, &claim);
|
||||
let signature = verifier.sign(&signable).to_vec();
|
||||
|
||||
Self {
|
||||
level: level as u8,
|
||||
verifier_address: verifier.address(),
|
||||
claim,
|
||||
reference,
|
||||
signature,
|
||||
timestamp,
|
||||
expires: None,
|
||||
}
|
||||
}
|
||||
|
||||
/// Set expiry time.
|
||||
pub fn with_expiry(mut self, expires: u64) -> Self {
|
||||
self.expires = Some(expires);
|
||||
self
|
||||
}
|
||||
|
||||
/// Create signable bytes.
|
||||
fn signable_bytes(level: u8, subject_address: &[u8; 16], claim: &str) -> Vec<u8> {
|
||||
let mut buf = Vec::with_capacity(17 + claim.len());
|
||||
buf.push(level);
|
||||
buf.extend_from_slice(subject_address);
|
||||
buf.extend_from_slice(claim.as_bytes());
|
||||
buf
|
||||
}
|
||||
|
||||
/// Verify this attestation.
|
||||
pub fn verify(&self, verifier_public_key: &[u8; 32], subject_address: &[u8; 16]) -> bool {
|
||||
use crate::identity::compute_address;
|
||||
|
||||
// Verify verifier address matches key
|
||||
if compute_address(verifier_public_key) != self.verifier_address {
|
||||
return false;
|
||||
}
|
||||
|
||||
// Check expiry
|
||||
if let Some(expires) = self.expires {
|
||||
use std::time::{SystemTime, UNIX_EPOCH};
|
||||
let now = SystemTime::now()
|
||||
.duration_since(UNIX_EPOCH)
|
||||
.unwrap_or_default()
|
||||
.as_secs();
|
||||
if now > expires {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
let sig: [u8; 64] = match self.signature.as_slice().try_into() {
|
||||
Ok(s) => s,
|
||||
Err(_) => return false,
|
||||
};
|
||||
|
||||
let signable = Self::signable_bytes(self.level, subject_address, &self.claim);
|
||||
ServiceIdentity::verify(verifier_public_key, &signable, &sig)
|
||||
}
|
||||
}
|
||||
|
||||
/// Set of known trusted verifiers (registries, endorsers).
|
||||
#[derive(Default)]
|
||||
pub struct TrustedVerifiers {
|
||||
/// Known public keys with their trust level.
|
||||
verifiers: Vec<TrustedVerifier>,
|
||||
}
|
||||
|
||||
/// A trusted verifier entry.
|
||||
#[derive(Clone)]
|
||||
pub struct TrustedVerifier {
|
||||
pub public_key: [u8; 32],
|
||||
pub address: [u8; 16],
|
||||
pub name: String,
|
||||
pub max_level: VerificationLevel,
|
||||
}
|
||||
|
||||
impl TrustedVerifiers {
|
||||
/// Create empty set.
|
||||
pub fn new() -> Self {
|
||||
Self::default()
|
||||
}
|
||||
|
||||
/// Add a trusted verifier.
|
||||
pub fn add(
|
||||
&mut self,
|
||||
public_key: [u8; 32],
|
||||
name: impl Into<String>,
|
||||
max_level: VerificationLevel,
|
||||
) {
|
||||
use crate::identity::compute_address;
|
||||
|
||||
self.verifiers.push(TrustedVerifier {
|
||||
public_key,
|
||||
address: compute_address(&public_key),
|
||||
name: name.into(),
|
||||
max_level,
|
||||
});
|
||||
}
|
||||
|
||||
/// Find a verifier by address.
|
||||
pub fn find_by_address(&self, address: &[u8; 16]) -> Option<&TrustedVerifier> {
|
||||
self.verifiers.iter().find(|v| &v.address == address)
|
||||
}
|
||||
|
||||
/// Verify a verification against known trusted verifiers.
|
||||
/// Returns the effective level (or 0 if not trusted).
|
||||
pub fn check(&self, verification: &Verification, subject_address: &[u8; 16]) -> u8 {
|
||||
let Some(verifier) = self.find_by_address(&verification.verifier_address) else {
|
||||
return 0;
|
||||
};
|
||||
|
||||
// Level cannot exceed verifier's max
|
||||
let claimed_level = verification.level.min(verifier.max_level as u8);
|
||||
|
||||
// Actually verify the signature
|
||||
if verification.verify(&verifier.public_key, subject_address) {
|
||||
claimed_level
|
||||
} else {
|
||||
0
|
||||
}
|
||||
}
|
||||
|
||||
/// Get the highest trusted verification level from a list.
|
||||
pub fn highest_level(
|
||||
&self,
|
||||
verifications: &[Verification],
|
||||
subject_address: &[u8; 16],
|
||||
) -> VerificationLevel {
|
||||
verifications
|
||||
.iter()
|
||||
.map(|v| self.check(v, subject_address))
|
||||
.max()
|
||||
.map(VerificationLevel::from)
|
||||
.unwrap_or(VerificationLevel::None)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn peer_endorsement_roundtrip() {
|
||||
let verifier = ServiceIdentity::generate();
|
||||
let subject_address = [1u8; 16];
|
||||
|
||||
let v = Verification::peer_endorsement(&verifier, &subject_address, "good_actor");
|
||||
assert!(v.verify(&verifier.public_key(), &subject_address));
|
||||
assert_eq!(v.level, VerificationLevel::PeerEndorsed as u8);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn trusted_verifiers_check() {
|
||||
let verifier = ServiceIdentity::generate();
|
||||
let subject_address = [2u8; 16];
|
||||
|
||||
let mut trusted = TrustedVerifiers::new();
|
||||
trusted.add(verifier.public_key(), "Test Registry", VerificationLevel::RegistryVerified);
|
||||
|
||||
let v = Verification::registry(&verifier, &subject_address, "licensed", "REG-12345");
|
||||
let level = trusted.check(&v, &subject_address);
|
||||
assert_eq!(level, VerificationLevel::RegistryVerified as u8);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn untrusted_verifier_returns_zero() {
|
||||
let verifier = ServiceIdentity::generate();
|
||||
let subject_address = [3u8; 16];
|
||||
|
||||
let trusted = TrustedVerifiers::new(); // Empty
|
||||
|
||||
let v = Verification::registry(&verifier, &subject_address, "licensed", "REG-999");
|
||||
let level = trusted.check(&v, &subject_address);
|
||||
assert_eq!(level, 0);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn expired_verification_fails() {
|
||||
let verifier = ServiceIdentity::generate();
|
||||
let subject_address = [4u8; 16];
|
||||
|
||||
let v = Verification::peer_endorsement(&verifier, &subject_address, "trusted")
|
||||
.with_expiry(1); // Expired in 1970
|
||||
|
||||
assert!(!v.verify(&verifier.public_key(), &subject_address));
|
||||
}
|
||||
}
|
||||
259
crates/meshservice/src/wire.rs
Normal file
259
crates/meshservice/src/wire.rs
Normal file
@@ -0,0 +1,259 @@
|
||||
//! Wire format for service messages.
|
||||
//!
|
||||
//! Binary format for efficient network transmission.
|
||||
//! Uses CBOR for payload encoding.
|
||||
|
||||
use std::io::{Cursor, Read};
|
||||
|
||||
use crate::error::ServiceError;
|
||||
use crate::message::{MessageType, ServiceMessage};
|
||||
|
||||
/// Wire message header (fixed 64 bytes).
|
||||
///
|
||||
/// ```text
|
||||
/// ┌─────────────────────────────────────────────────────┐
|
||||
/// │ 0-3 │ service_id (u32 LE) │
|
||||
/// │ 4 │ message_type (u8) │
|
||||
/// │ 5 │ version (u8) │
|
||||
/// │ 6-7 │ flags (u16 LE, reserved) │
|
||||
/// │ 8-23 │ message_id (16 bytes) │
|
||||
/// │ 24-39 │ sender_address (16 bytes) │
|
||||
/// │ 40-47 │ sequence (u64 LE) │
|
||||
/// │ 48-49 │ ttl_hours (u16 LE) │
|
||||
/// │ 50-57 │ timestamp (u64 LE) │
|
||||
/// │ 58 │ hop_count (u8) │
|
||||
/// │ 59 │ max_hops (u8) │
|
||||
/// │ 60-63 │ payload_len (u32 LE) │
|
||||
/// └─────────────────────────────────────────────────────┘
|
||||
/// Followed by:
|
||||
/// │ 64-... │ signature (64 bytes) │
|
||||
/// │ signature_end-.. │ payload (payload_len bytes) │
|
||||
/// │ payload_end-.. │ verifications (CBOR, optional) │
|
||||
/// ```
|
||||
|
||||
const HEADER_SIZE: usize = 64;
|
||||
const SIGNATURE_SIZE: usize = 64;
|
||||
|
||||
/// Encode a ServiceMessage to bytes.
|
||||
pub fn encode(msg: &ServiceMessage) -> Result<Vec<u8>, ServiceError> {
|
||||
let verifications_bytes = if msg.verifications.is_empty() {
|
||||
Vec::new()
|
||||
} else {
|
||||
let mut buf = Vec::new();
|
||||
ciborium::into_writer(&msg.verifications, &mut buf)?;
|
||||
buf
|
||||
};
|
||||
|
||||
let total_size = HEADER_SIZE + SIGNATURE_SIZE + msg.payload.len() + verifications_bytes.len();
|
||||
let mut buf = Vec::with_capacity(total_size);
|
||||
|
||||
// Header
|
||||
buf.extend_from_slice(&msg.service_id.to_le_bytes()); // 0-3
|
||||
buf.push(msg.message_type as u8); // 4
|
||||
buf.push(msg.version); // 5
|
||||
buf.extend_from_slice(&0u16.to_le_bytes()); // 6-7 flags (reserved)
|
||||
buf.extend_from_slice(&msg.id); // 8-23
|
||||
buf.extend_from_slice(&msg.sender_address); // 24-39
|
||||
buf.extend_from_slice(&msg.sequence.to_le_bytes()); // 40-47
|
||||
buf.extend_from_slice(&msg.ttl_hours.to_le_bytes()); // 48-49
|
||||
buf.extend_from_slice(&msg.timestamp.to_le_bytes()); // 50-57
|
||||
buf.push(msg.hop_count); // 58
|
||||
buf.push(msg.max_hops); // 59
|
||||
buf.extend_from_slice(&(msg.payload.len() as u32).to_le_bytes()); // 60-63
|
||||
|
||||
// Signature
|
||||
if msg.signature.len() != SIGNATURE_SIZE {
|
||||
return Err(ServiceError::InvalidFormat(format!(
|
||||
"signature must be {} bytes, got {}",
|
||||
SIGNATURE_SIZE,
|
||||
msg.signature.len()
|
||||
)));
|
||||
}
|
||||
buf.extend_from_slice(&msg.signature);
|
||||
|
||||
// Payload
|
||||
buf.extend_from_slice(&msg.payload);
|
||||
|
||||
// Verifications (optional)
|
||||
buf.extend_from_slice(&verifications_bytes);
|
||||
|
||||
Ok(buf)
|
||||
}
|
||||
|
||||
/// Decode bytes to a ServiceMessage.
|
||||
pub fn decode(data: &[u8]) -> Result<ServiceMessage, ServiceError> {
|
||||
if data.len() < HEADER_SIZE + SIGNATURE_SIZE {
|
||||
return Err(ServiceError::InvalidFormat("message too short".into()));
|
||||
}
|
||||
|
||||
let mut cursor = Cursor::new(data);
|
||||
let mut buf4 = [0u8; 4];
|
||||
let mut buf8 = [0u8; 8];
|
||||
let mut buf16 = [0u8; 16];
|
||||
let mut buf2 = [0u8; 2];
|
||||
|
||||
// Read header
|
||||
cursor.read_exact(&mut buf4)?;
|
||||
let service_id = u32::from_le_bytes(buf4);
|
||||
|
||||
let mut type_byte = [0u8; 1];
|
||||
cursor.read_exact(&mut type_byte)?;
|
||||
let message_type = MessageType::try_from(type_byte[0])
|
||||
.map_err(|_| ServiceError::InvalidFormat("invalid message type".into()))?;
|
||||
|
||||
cursor.read_exact(&mut type_byte)?;
|
||||
let version = type_byte[0];
|
||||
|
||||
cursor.read_exact(&mut buf2)?; // flags (ignored)
|
||||
|
||||
cursor.read_exact(&mut buf16)?;
|
||||
let id = buf16;
|
||||
|
||||
cursor.read_exact(&mut buf16)?;
|
||||
let sender_address = buf16;
|
||||
|
||||
cursor.read_exact(&mut buf8)?;
|
||||
let sequence = u64::from_le_bytes(buf8);
|
||||
|
||||
cursor.read_exact(&mut buf2)?;
|
||||
let ttl_hours = u16::from_le_bytes(buf2);
|
||||
|
||||
cursor.read_exact(&mut buf8)?;
|
||||
let timestamp = u64::from_le_bytes(buf8);
|
||||
|
||||
cursor.read_exact(&mut type_byte)?;
|
||||
let hop_count = type_byte[0];
|
||||
|
||||
cursor.read_exact(&mut type_byte)?;
|
||||
let max_hops = type_byte[0];
|
||||
|
||||
cursor.read_exact(&mut buf4)?;
|
||||
let payload_len = u32::from_le_bytes(buf4) as usize;
|
||||
|
||||
// Read signature
|
||||
let mut signature = vec![0u8; SIGNATURE_SIZE];
|
||||
cursor.read_exact(&mut signature)?;
|
||||
|
||||
// Read payload
|
||||
if data.len() < HEADER_SIZE + SIGNATURE_SIZE + payload_len {
|
||||
return Err(ServiceError::InvalidFormat("payload truncated".into()));
|
||||
}
|
||||
let mut payload = vec![0u8; payload_len];
|
||||
cursor.read_exact(&mut payload)?;
|
||||
|
||||
// Read verifications (remaining bytes)
|
||||
let verifications = if cursor.position() < data.len() as u64 {
|
||||
let mut remaining = Vec::new();
|
||||
cursor.read_to_end(&mut remaining)?;
|
||||
if remaining.is_empty() {
|
||||
Vec::new()
|
||||
} else {
|
||||
ciborium::from_reader(&remaining[..])
|
||||
.map_err(|e| ServiceError::Serialization(e.to_string()))?
|
||||
}
|
||||
} else {
|
||||
Vec::new()
|
||||
};
|
||||
|
||||
Ok(ServiceMessage {
|
||||
service_id,
|
||||
message_type,
|
||||
version,
|
||||
id,
|
||||
sender_address,
|
||||
payload,
|
||||
signature,
|
||||
verifications,
|
||||
sequence,
|
||||
ttl_hours,
|
||||
timestamp,
|
||||
hop_count,
|
||||
max_hops,
|
||||
})
|
||||
}
|
||||
|
||||
// Implement std::io::Error conversion for Read trait
|
||||
impl From<std::io::Error> for ServiceError {
|
||||
fn from(e: std::io::Error) -> Self {
|
||||
ServiceError::InvalidFormat(e.to_string())
|
||||
}
|
||||
}
|
||||
|
||||
/// Encode a payload struct to CBOR.
|
||||
pub fn encode_payload<T: serde::Serialize>(payload: &T) -> Result<Vec<u8>, ServiceError> {
|
||||
let mut buf = Vec::new();
|
||||
ciborium::into_writer(payload, &mut buf)?;
|
||||
Ok(buf)
|
||||
}
|
||||
|
||||
/// Decode a payload from CBOR.
|
||||
pub fn decode_payload<T: serde::de::DeserializeOwned>(data: &[u8]) -> Result<T, ServiceError> {
|
||||
ciborium::from_reader(data).map_err(|e| ServiceError::Serialization(e.to_string()))
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use crate::identity::ServiceIdentity;
|
||||
use crate::service_ids::FAPP;
|
||||
use crate::verification::Verification;
|
||||
|
||||
#[test]
|
||||
fn roundtrip_simple() {
|
||||
let id = ServiceIdentity::generate();
|
||||
let msg = ServiceMessage::announce(&id, FAPP, b"hello world".to_vec(), 42);
|
||||
|
||||
let encoded = encode(&msg).unwrap();
|
||||
let decoded = decode(&encoded).unwrap();
|
||||
|
||||
assert_eq!(decoded.service_id, FAPP);
|
||||
assert_eq!(decoded.message_type, MessageType::Announce);
|
||||
assert_eq!(decoded.sequence, 42);
|
||||
assert_eq!(decoded.payload, b"hello world");
|
||||
assert_eq!(decoded.signature, msg.signature);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn roundtrip_with_verifications() {
|
||||
let id = ServiceIdentity::generate();
|
||||
let verifier = ServiceIdentity::generate();
|
||||
|
||||
let mut msg = ServiceMessage::announce(&id, FAPP, b"payload".to_vec(), 1);
|
||||
msg.add_verification(Verification::peer_endorsement(
|
||||
&verifier,
|
||||
&id.address(),
|
||||
"trusted",
|
||||
));
|
||||
|
||||
let encoded = encode(&msg).unwrap();
|
||||
let decoded = decode(&encoded).unwrap();
|
||||
|
||||
assert_eq!(decoded.verifications.len(), 1);
|
||||
assert_eq!(decoded.verifications[0].claim, "trusted");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn payload_codec() {
|
||||
#[derive(serde::Serialize, serde::Deserialize, Debug, PartialEq)]
|
||||
struct TestPayload {
|
||||
name: String,
|
||||
value: i32,
|
||||
}
|
||||
|
||||
let payload = TestPayload {
|
||||
name: "test".into(),
|
||||
value: 123,
|
||||
};
|
||||
|
||||
let encoded = encode_payload(&payload).unwrap();
|
||||
let decoded: TestPayload = decode_payload(&encoded).unwrap();
|
||||
|
||||
assert_eq!(payload, decoded);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn truncated_rejected() {
|
||||
let result = decode(&[0u8; 10]);
|
||||
assert!(matches!(result, Err(ServiceError::InvalidFormat(_))));
|
||||
}
|
||||
}
|
||||
@@ -1,58 +0,0 @@
|
||||
[package]
|
||||
name = "quicnprotochat-client"
|
||||
version = "0.1.0"
|
||||
edition = "2021"
|
||||
description = "CLI client for quicnprotochat."
|
||||
license = "MIT"
|
||||
|
||||
[[bin]]
|
||||
name = "quicnprotochat"
|
||||
path = "src/main.rs"
|
||||
|
||||
[dependencies]
|
||||
quicnprotochat-core = { path = "../quicnprotochat-core" }
|
||||
quicnprotochat-proto = { path = "../quicnprotochat-proto" }
|
||||
openmls_rust_crypto = { workspace = true }
|
||||
|
||||
# Serialisation + RPC
|
||||
capnp = { workspace = true }
|
||||
capnp-rpc = { workspace = true }
|
||||
|
||||
# Async
|
||||
tokio = { workspace = true }
|
||||
tokio-util = { workspace = true }
|
||||
futures = { workspace = true }
|
||||
serde = { workspace = true }
|
||||
serde_json = { workspace = true }
|
||||
bincode = { workspace = true }
|
||||
|
||||
# Crypto — OPAQUE PAKE
|
||||
opaque-ke = { workspace = true }
|
||||
rand = { workspace = true }
|
||||
|
||||
# Error handling
|
||||
anyhow = { workspace = true }
|
||||
thiserror = { workspace = true }
|
||||
|
||||
# Crypto — for fingerprint verification in fetch-key subcommand
|
||||
sha2 = { workspace = true }
|
||||
argon2 = { workspace = true }
|
||||
chacha20poly1305 = { workspace = true }
|
||||
quinn = { workspace = true }
|
||||
quinn-proto = { workspace = true }
|
||||
rustls = { workspace = true }
|
||||
|
||||
# Logging
|
||||
tracing = { workspace = true }
|
||||
tracing-subscriber = { workspace = true }
|
||||
|
||||
# CLI
|
||||
clap = { workspace = true }
|
||||
|
||||
[dev-dependencies]
|
||||
dashmap = { workspace = true }
|
||||
assert_cmd = "2"
|
||||
tempfile = "3"
|
||||
portpicker = "0.1"
|
||||
rand = "0.8"
|
||||
hex = "0.4"
|
||||
@@ -1,13 +0,0 @@
|
||||
pub fn encode(bytes: impl AsRef<[u8]>) -> String {
|
||||
bytes.as_ref().iter().map(|b| format!("{b:02x}")).collect()
|
||||
}
|
||||
|
||||
pub fn decode(s: &str) -> Result<Vec<u8>, &'static str> {
|
||||
if s.len() % 2 != 0 {
|
||||
return Err("odd-length hex string");
|
||||
}
|
||||
(0..s.len())
|
||||
.step_by(2)
|
||||
.map(|i| u8::from_str_radix(&s[i..i + 2], 16).map_err(|_| "invalid hex character"))
|
||||
.collect()
|
||||
}
|
||||
@@ -1,81 +0,0 @@
|
||||
//! Retry with exponential backoff for transient RPC failures.
|
||||
|
||||
use std::future::Future;
|
||||
use std::time::Duration;
|
||||
|
||||
use rand::Rng;
|
||||
use tracing::warn;
|
||||
|
||||
/// Default maximum number of retry attempts (including the first try).
|
||||
pub const DEFAULT_MAX_RETRIES: u32 = 3;
|
||||
/// Default base delay in milliseconds for exponential backoff.
|
||||
pub const DEFAULT_BASE_DELAY_MS: u64 = 500;
|
||||
|
||||
/// Runs an async operation with retries. On `Ok(t)` returns immediately.
|
||||
/// On `Err(e)`: if `is_retriable(&e)` and `attempt < max_retries`, sleeps with
|
||||
/// exponential backoff (plus jitter) then retries; otherwise returns the last error.
|
||||
pub async fn retry_async<F, Fut, T, E, P>(
|
||||
op: F,
|
||||
max_retries: u32,
|
||||
base_delay_ms: u64,
|
||||
is_retriable: P,
|
||||
) -> Result<T, E>
|
||||
where
|
||||
F: Fn() -> Fut,
|
||||
Fut: Future<Output = Result<T, E>>,
|
||||
P: Fn(&E) -> bool,
|
||||
{
|
||||
let mut last_err = None;
|
||||
for attempt in 0..max_retries {
|
||||
match op().await {
|
||||
Ok(t) => return Ok(t),
|
||||
Err(e) => {
|
||||
last_err = Some(e);
|
||||
let err = last_err.as_ref().unwrap();
|
||||
if !is_retriable(err) || attempt + 1 >= max_retries {
|
||||
break;
|
||||
}
|
||||
let delay_ms = base_delay_ms * 2u64.saturating_pow(attempt);
|
||||
let jitter_ms = rand::thread_rng().gen_range(0..=delay_ms / 2);
|
||||
let total_ms = delay_ms + jitter_ms;
|
||||
warn!(
|
||||
attempt = attempt + 1,
|
||||
max_retries,
|
||||
delay_ms = total_ms,
|
||||
"RPC failed, retrying after backoff"
|
||||
);
|
||||
tokio::time::sleep(Duration::from_millis(total_ms)).await;
|
||||
}
|
||||
}
|
||||
}
|
||||
Err(last_err.expect("retry_async: last_err set when we break after Err"))
|
||||
}
|
||||
|
||||
/// Classifies `anyhow::Error` for retry: returns `false` for auth or invalid-param
|
||||
/// errors (do not retry), `true` for transient errors (network, timeout, server 5xx).
|
||||
/// When in doubt, returns `true` (retry).
|
||||
pub fn anyhow_is_retriable(err: &anyhow::Error) -> bool {
|
||||
let s = format!("{:#}", err);
|
||||
let s_lower = s.to_lowercase();
|
||||
// Do not retry: auth / permission
|
||||
if s_lower.contains("unauthorized")
|
||||
|| s_lower.contains("auth failed")
|
||||
|| s_lower.contains("access denied")
|
||||
|| s_lower.contains("401")
|
||||
|| s_lower.contains("forbidden")
|
||||
|| s_lower.contains("403")
|
||||
|| s_lower.contains("token")
|
||||
{
|
||||
return false;
|
||||
}
|
||||
// Do not retry: bad request / invalid params
|
||||
if s_lower.contains("bad request")
|
||||
|| s_lower.contains("400")
|
||||
|| s_lower.contains("invalid param")
|
||||
|| s_lower.contains("fingerprint mismatch")
|
||||
{
|
||||
return false;
|
||||
}
|
||||
// Retry: network, timeout, connection, server error, or anything else
|
||||
true
|
||||
}
|
||||
@@ -1,367 +0,0 @@
|
||||
use std::net::SocketAddr;
|
||||
use std::path::Path;
|
||||
use std::sync::Arc;
|
||||
|
||||
use anyhow::Context;
|
||||
use quinn::{ClientConfig, Endpoint};
|
||||
use quinn_proto::crypto::rustls::QuicClientConfig;
|
||||
use rustls::pki_types::CertificateDer;
|
||||
use rustls::{ClientConfig as RustlsClientConfig, RootCertStore};
|
||||
use tokio_util::compat::{TokioAsyncReadCompatExt, TokioAsyncWriteCompatExt};
|
||||
use capnp_rpc::{rpc_twoparty_capnp::Side, twoparty, RpcSystem};
|
||||
|
||||
use quicnprotochat_core::HybridPublicKey;
|
||||
use quicnprotochat_proto::node_capnp::{auth, node_service};
|
||||
|
||||
use crate::AUTH_CONTEXT;
|
||||
|
||||
use super::retry::{anyhow_is_retriable, retry_async, DEFAULT_BASE_DELAY_MS, DEFAULT_MAX_RETRIES};
|
||||
|
||||
/// Establish a QUIC/TLS connection and return a `NodeService` client.
|
||||
///
|
||||
/// Must be called from within a `LocalSet` because capnp-rpc is `!Send`.
|
||||
pub async fn connect_node(
|
||||
server: &str,
|
||||
ca_cert: &Path,
|
||||
server_name: &str,
|
||||
) -> anyhow::Result<node_service::Client> {
|
||||
let addr: SocketAddr = server
|
||||
.parse()
|
||||
.with_context(|| format!("server must be host:port, got {server}"))?;
|
||||
|
||||
let cert_bytes = std::fs::read(ca_cert).with_context(|| format!("read ca_cert {ca_cert:?}"))?;
|
||||
let mut roots = RootCertStore::empty();
|
||||
roots
|
||||
.add(CertificateDer::from(cert_bytes))
|
||||
.context("add root cert")?;
|
||||
|
||||
let mut tls = RustlsClientConfig::builder()
|
||||
.with_root_certificates(roots)
|
||||
.with_no_client_auth();
|
||||
tls.alpn_protocols = vec![b"capnp".to_vec()];
|
||||
|
||||
let crypto = QuicClientConfig::try_from(tls)
|
||||
.map_err(|e| anyhow::anyhow!("invalid client TLS config: {e}"))?;
|
||||
|
||||
let bind_addr: SocketAddr = "0.0.0.0:0".parse().context("parse client bind address")?;
|
||||
let mut endpoint = Endpoint::client(bind_addr)?;
|
||||
endpoint.set_default_client_config(ClientConfig::new(Arc::new(crypto)));
|
||||
|
||||
let connection = endpoint
|
||||
.connect(addr, server_name)
|
||||
.context("quic connect init")?
|
||||
.await
|
||||
.context("quic connect failed")?;
|
||||
|
||||
let (send, recv) = connection.open_bi().await.context("open bi stream")?;
|
||||
|
||||
let network = twoparty::VatNetwork::new(
|
||||
recv.compat(),
|
||||
send.compat_write(),
|
||||
Side::Client,
|
||||
Default::default(),
|
||||
);
|
||||
|
||||
let mut rpc_system = RpcSystem::new(Box::new(network), None);
|
||||
let client: node_service::Client = rpc_system.bootstrap(Side::Server);
|
||||
|
||||
tokio::task::spawn_local(rpc_system);
|
||||
|
||||
Ok(client)
|
||||
}
|
||||
|
||||
pub fn set_auth(auth: &mut auth::Builder<'_>) -> anyhow::Result<()> {
|
||||
let ctx = AUTH_CONTEXT.get().ok_or_else(|| {
|
||||
anyhow::anyhow!("init_auth must be called with a non-empty token before RPCs")
|
||||
})?;
|
||||
auth.set_version(ctx.version);
|
||||
auth.set_access_token(&ctx.access_token);
|
||||
auth.set_device_id(&ctx.device_id);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Upload a KeyPackage and verify the fingerprint echoed by the AS.
|
||||
pub async fn upload_key_package(
|
||||
client: &node_service::Client,
|
||||
identity_key: &[u8],
|
||||
package: &[u8],
|
||||
) -> anyhow::Result<()> {
|
||||
let mut req = client.upload_key_package_request();
|
||||
{
|
||||
let mut p = req.get();
|
||||
p.set_identity_key(identity_key);
|
||||
p.set_package(package);
|
||||
let mut auth = p.reborrow().init_auth();
|
||||
set_auth(&mut auth)?;
|
||||
}
|
||||
|
||||
let resp = req
|
||||
.send()
|
||||
.promise
|
||||
.await
|
||||
.context("upload_key_package RPC failed")?;
|
||||
|
||||
let server_fp = resp
|
||||
.get()
|
||||
.context("upload_key_package: bad response")?
|
||||
.get_fingerprint()
|
||||
.context("upload_key_package: missing fingerprint")?
|
||||
.to_vec();
|
||||
|
||||
let local_fp = super::state::sha256(package);
|
||||
anyhow::ensure!(server_fp == local_fp, "fingerprint mismatch");
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Fetch a KeyPackage for `identity_key` from the AS.
|
||||
pub async fn fetch_key_package(
|
||||
client: &node_service::Client,
|
||||
identity_key: &[u8],
|
||||
) -> anyhow::Result<Vec<u8>> {
|
||||
let mut req = client.fetch_key_package_request();
|
||||
{
|
||||
let mut p = req.get();
|
||||
p.set_identity_key(identity_key);
|
||||
let mut auth = p.reborrow().init_auth();
|
||||
set_auth(&mut auth)?;
|
||||
}
|
||||
|
||||
let resp = req
|
||||
.send()
|
||||
.promise
|
||||
.await
|
||||
.context("fetch_key_package RPC failed")?;
|
||||
|
||||
let pkg = resp
|
||||
.get()
|
||||
.context("fetch_key_package: bad response")?
|
||||
.get_package()
|
||||
.context("fetch_key_package: missing package field")?
|
||||
.to_vec();
|
||||
|
||||
Ok(pkg)
|
||||
}
|
||||
|
||||
/// Enqueue an opaque payload to the DS for `recipient_key`.
|
||||
/// Returns the per-inbox sequence number assigned by the server.
|
||||
/// Retries on transient failures with exponential backoff.
|
||||
pub async fn enqueue(
|
||||
client: &node_service::Client,
|
||||
recipient_key: &[u8],
|
||||
payload: &[u8],
|
||||
) -> anyhow::Result<u64> {
|
||||
let client = client.clone();
|
||||
let recipient_key = recipient_key.to_vec();
|
||||
let payload = payload.to_vec();
|
||||
retry_async(
|
||||
|| {
|
||||
let client = client.clone();
|
||||
let recipient_key = recipient_key.clone();
|
||||
let payload = payload.clone();
|
||||
async move {
|
||||
let mut req = client.enqueue_request();
|
||||
{
|
||||
let mut p = req.get();
|
||||
p.set_recipient_key(&recipient_key);
|
||||
p.set_payload(&payload);
|
||||
p.set_channel_id(&[]);
|
||||
p.set_version(1);
|
||||
let mut auth = p.reborrow().init_auth();
|
||||
set_auth(&mut auth)?;
|
||||
}
|
||||
let resp = req.send().promise.await.context("enqueue RPC failed")?;
|
||||
let seq = resp.get().context("enqueue: bad response")?.get_seq();
|
||||
Ok(seq)
|
||||
}
|
||||
},
|
||||
DEFAULT_MAX_RETRIES,
|
||||
DEFAULT_BASE_DELAY_MS,
|
||||
anyhow_is_retriable,
|
||||
)
|
||||
.await
|
||||
}
|
||||
|
||||
/// Fetch and drain all payloads for `recipient_key`.
|
||||
/// Returns `(seq, payload)` pairs — sort by `seq` before MLS processing.
|
||||
/// Retries on transient failures with exponential backoff.
|
||||
pub async fn fetch_all(
|
||||
client: &node_service::Client,
|
||||
recipient_key: &[u8],
|
||||
) -> anyhow::Result<Vec<(u64, Vec<u8>)>> {
|
||||
let client = client.clone();
|
||||
let recipient_key = recipient_key.to_vec();
|
||||
retry_async(
|
||||
|| {
|
||||
let client = client.clone();
|
||||
let recipient_key = recipient_key.clone();
|
||||
async move {
|
||||
let mut req = client.fetch_request();
|
||||
{
|
||||
let mut p = req.get();
|
||||
p.set_recipient_key(&recipient_key);
|
||||
p.set_channel_id(&[]);
|
||||
p.set_version(1);
|
||||
p.set_limit(0); // fetch all
|
||||
let mut auth = p.reborrow().init_auth();
|
||||
set_auth(&mut auth)?;
|
||||
}
|
||||
|
||||
let resp = req.send().promise.await.context("fetch RPC failed")?;
|
||||
|
||||
let list = resp
|
||||
.get()
|
||||
.context("fetch: bad response")?
|
||||
.get_payloads()
|
||||
.context("fetch: missing payloads")?;
|
||||
|
||||
let mut payloads = Vec::with_capacity(list.len() as usize);
|
||||
for i in 0..list.len() {
|
||||
let entry = list.get(i);
|
||||
let seq = entry.get_seq();
|
||||
let data = entry
|
||||
.get_data()
|
||||
.context("fetch: envelope data read failed")?
|
||||
.to_vec();
|
||||
payloads.push((seq, data));
|
||||
}
|
||||
|
||||
Ok(payloads)
|
||||
}
|
||||
},
|
||||
DEFAULT_MAX_RETRIES,
|
||||
DEFAULT_BASE_DELAY_MS,
|
||||
anyhow_is_retriable,
|
||||
)
|
||||
.await
|
||||
}
|
||||
|
||||
/// Long-poll for payloads with optional timeout (ms).
|
||||
/// Returns `(seq, payload)` pairs — sort by `seq` before MLS processing.
|
||||
/// Retries on transient failures with exponential backoff.
|
||||
pub async fn fetch_wait(
|
||||
client: &node_service::Client,
|
||||
recipient_key: &[u8],
|
||||
timeout_ms: u64,
|
||||
) -> anyhow::Result<Vec<(u64, Vec<u8>)>> {
|
||||
let client = client.clone();
|
||||
let recipient_key = recipient_key.to_vec();
|
||||
retry_async(
|
||||
|| {
|
||||
let client = client.clone();
|
||||
let recipient_key = recipient_key.clone();
|
||||
let timeout_ms = timeout_ms;
|
||||
async move {
|
||||
let mut req = client.fetch_wait_request();
|
||||
{
|
||||
let mut p = req.get();
|
||||
p.set_recipient_key(&recipient_key);
|
||||
p.set_timeout_ms(timeout_ms);
|
||||
p.set_channel_id(&[]);
|
||||
p.set_version(1);
|
||||
p.set_limit(0); // fetch all
|
||||
let mut auth = p.reborrow().init_auth();
|
||||
set_auth(&mut auth)?;
|
||||
}
|
||||
|
||||
let resp = req.send().promise.await.context("fetch_wait RPC failed")?;
|
||||
|
||||
let list = resp
|
||||
.get()
|
||||
.context("fetch_wait: bad response")?
|
||||
.get_payloads()
|
||||
.context("fetch_wait: missing payloads")?;
|
||||
|
||||
let mut payloads = Vec::with_capacity(list.len() as usize);
|
||||
for i in 0..list.len() {
|
||||
let entry = list.get(i);
|
||||
let seq = entry.get_seq();
|
||||
let data = entry
|
||||
.get_data()
|
||||
.context("fetch_wait: envelope data read failed")?
|
||||
.to_vec();
|
||||
payloads.push((seq, data));
|
||||
}
|
||||
|
||||
Ok(payloads)
|
||||
}
|
||||
},
|
||||
DEFAULT_MAX_RETRIES,
|
||||
DEFAULT_BASE_DELAY_MS,
|
||||
anyhow_is_retriable,
|
||||
)
|
||||
.await
|
||||
}
|
||||
|
||||
/// Upload a hybrid (X25519 + ML-KEM-768) public key for an identity.
|
||||
pub async fn upload_hybrid_key(
|
||||
client: &node_service::Client,
|
||||
identity_key: &[u8],
|
||||
hybrid_pk: &HybridPublicKey,
|
||||
) -> anyhow::Result<()> {
|
||||
let mut req = client.upload_hybrid_key_request();
|
||||
{
|
||||
let mut p = req.get();
|
||||
p.set_identity_key(identity_key);
|
||||
p.set_hybrid_public_key(&hybrid_pk.to_bytes());
|
||||
let mut auth = p.reborrow().init_auth();
|
||||
set_auth(&mut auth)?;
|
||||
}
|
||||
req.send()
|
||||
.promise
|
||||
.await
|
||||
.context("upload_hybrid_key RPC failed")?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Fetch a peer's hybrid public key from the server.
|
||||
///
|
||||
/// Returns `None` if the peer has not uploaded a hybrid key.
|
||||
pub async fn fetch_hybrid_key(
|
||||
client: &node_service::Client,
|
||||
identity_key: &[u8],
|
||||
) -> anyhow::Result<Option<HybridPublicKey>> {
|
||||
let mut req = client.fetch_hybrid_key_request();
|
||||
{
|
||||
let mut p = req.get();
|
||||
p.set_identity_key(identity_key);
|
||||
let mut auth = p.reborrow().init_auth();
|
||||
set_auth(&mut auth)?;
|
||||
}
|
||||
|
||||
let resp = req
|
||||
.send()
|
||||
.promise
|
||||
.await
|
||||
.context("fetch_hybrid_key RPC failed")?;
|
||||
|
||||
let pk_bytes = resp
|
||||
.get()
|
||||
.context("fetch_hybrid_key: bad response")?
|
||||
.get_hybrid_public_key()
|
||||
.context("fetch_hybrid_key: missing field")?
|
||||
.to_vec();
|
||||
|
||||
if pk_bytes.is_empty() {
|
||||
return Ok(None);
|
||||
}
|
||||
|
||||
let pk = HybridPublicKey::from_bytes(&pk_bytes).context("invalid hybrid public key")?;
|
||||
Ok(Some(pk))
|
||||
}
|
||||
|
||||
/// Decrypt a hybrid envelope. Requires a hybrid key; no fallback to plaintext MLS.
|
||||
pub fn try_hybrid_decrypt(
|
||||
hybrid_kp: Option<&quicnprotochat_core::HybridKeypair>,
|
||||
payload: &[u8],
|
||||
) -> anyhow::Result<Vec<u8>> {
|
||||
let kp = hybrid_kp.ok_or_else(|| anyhow::anyhow!("hybrid key required for decryption"))?;
|
||||
quicnprotochat_core::hybrid_decrypt(kp, payload).map_err(|e| anyhow::anyhow!("{e}"))
|
||||
}
|
||||
|
||||
/// Return the current Unix timestamp in milliseconds.
|
||||
pub fn current_timestamp_ms() -> u64 {
|
||||
std::time::SystemTime::now()
|
||||
.duration_since(std::time::UNIX_EPOCH)
|
||||
.unwrap_or_default()
|
||||
.as_millis() as u64
|
||||
}
|
||||
@@ -1,56 +0,0 @@
|
||||
//! quicnprotochat CLI client library.
|
||||
//!
|
||||
//! # KeyPackage expiry and refresh
|
||||
//!
|
||||
//! KeyPackages are single-use (consumed when someone fetches them for an invite) and the server
|
||||
//! may enforce a TTL (e.g. 24 hours). To stay invitable, run `quicnprotochat refresh-keypackage`
|
||||
//! periodically (e.g. before the server TTL) or after your KeyPackage was consumed:
|
||||
//!
|
||||
//! ```bash
|
||||
//! quicnprotochat refresh-keypackage --state quicnprotochat-state.bin --server 127.0.0.1:7000
|
||||
//! ```
|
||||
//!
|
||||
//! Use the same `--access-token` (or `QUICNPROTOCHAT_ACCESS_TOKEN`) as for other authenticated
|
||||
//! commands. See the [running-the-client](https://docs.quicnprotochat.dev/getting-started/running-the-client)
|
||||
//! docs for details.
|
||||
|
||||
use std::sync::OnceLock;
|
||||
|
||||
pub mod client;
|
||||
|
||||
pub use client::commands::{
|
||||
cmd_chat, cmd_check_key, cmd_create_group, cmd_demo_group, cmd_fetch_key, cmd_health,
|
||||
cmd_health_json, cmd_invite, cmd_join, cmd_login, cmd_ping, cmd_recv, cmd_register,
|
||||
cmd_register_state, cmd_refresh_keypackage, cmd_register_user, cmd_send, cmd_whoami,
|
||||
receive_pending_plaintexts, whoami_json,
|
||||
};
|
||||
|
||||
pub use client::rpc::{connect_node, enqueue, fetch_wait};
|
||||
|
||||
// Global auth context initialized once per process.
|
||||
pub(crate) static AUTH_CONTEXT: OnceLock<ClientAuth> = OnceLock::new();
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct ClientAuth {
|
||||
pub(crate) version: u16,
|
||||
pub(crate) access_token: Vec<u8>,
|
||||
pub(crate) device_id: Vec<u8>,
|
||||
}
|
||||
|
||||
impl ClientAuth {
|
||||
/// Build a client auth context from optional token and device id.
|
||||
pub fn from_parts(access_token: String, device_id: Option<String>) -> Self {
|
||||
let token = access_token.into_bytes();
|
||||
let device = device_id.unwrap_or_default().into_bytes();
|
||||
Self {
|
||||
version: 1,
|
||||
access_token: token,
|
||||
device_id: device,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Initialize the global auth context; subsequent calls are ignored.
|
||||
pub fn init_auth(ctx: ClientAuth) {
|
||||
let _ = AUTH_CONTEXT.set(ctx);
|
||||
}
|
||||
@@ -1,519 +0,0 @@
|
||||
//! quicnprotochat CLI client.
|
||||
|
||||
use std::path::PathBuf;
|
||||
|
||||
use clap::{Parser, Subcommand};
|
||||
|
||||
use quicnprotochat_client::{
|
||||
cmd_chat, cmd_check_key, cmd_create_group, cmd_demo_group, cmd_fetch_key, cmd_health,
|
||||
cmd_invite, cmd_join, cmd_login, cmd_ping, cmd_recv, cmd_register, cmd_register_state,
|
||||
cmd_refresh_keypackage, cmd_register_user, cmd_send, cmd_whoami, init_auth, ClientAuth,
|
||||
};
|
||||
|
||||
// ── CLI ───────────────────────────────────────────────────────────────────────
|
||||
|
||||
#[derive(Debug, Parser)]
|
||||
#[command(name = "quicnprotochat", about = "quicnprotochat CLI client", version)]
|
||||
struct Args {
|
||||
/// Path to the server's TLS certificate (self-signed by default).
|
||||
#[arg(
|
||||
long,
|
||||
global = true,
|
||||
default_value = "data/server-cert.der",
|
||||
env = "QUICNPROTOCHAT_CA_CERT"
|
||||
)]
|
||||
ca_cert: PathBuf,
|
||||
|
||||
/// Expected TLS server name (must match the certificate SAN).
|
||||
#[arg(
|
||||
long,
|
||||
global = true,
|
||||
default_value = "localhost",
|
||||
env = "QUICNPROTOCHAT_SERVER_NAME"
|
||||
)]
|
||||
server_name: String,
|
||||
|
||||
/// Bearer token or OPAQUE session token for authenticated requests.
|
||||
/// Not required for register-user and login commands.
|
||||
#[arg(
|
||||
long,
|
||||
global = true,
|
||||
env = "QUICNPROTOCHAT_ACCESS_TOKEN",
|
||||
default_value = ""
|
||||
)]
|
||||
access_token: String,
|
||||
|
||||
/// Optional device identifier (UUID bytes encoded as hex or raw string).
|
||||
#[arg(long, global = true, env = "QUICNPROTOCHAT_DEVICE_ID")]
|
||||
device_id: Option<String>,
|
||||
|
||||
/// Password to encrypt/decrypt client state files (QPCE format).
|
||||
/// If set, state files are encrypted at rest with Argon2id + ChaCha20Poly1305.
|
||||
#[arg(long, global = true, env = "QUICNPROTOCHAT_STATE_PASSWORD")]
|
||||
state_password: Option<String>,
|
||||
|
||||
#[command(subcommand)]
|
||||
command: Command,
|
||||
}
|
||||
|
||||
#[derive(Debug, Subcommand)]
|
||||
enum Command {
|
||||
/// Register a new user via OPAQUE (password never leaves the client).
|
||||
RegisterUser {
|
||||
#[arg(long, default_value = "127.0.0.1:7000", env = "QUICNPROTOCHAT_SERVER")]
|
||||
server: String,
|
||||
/// Username for the new account.
|
||||
#[arg(long)]
|
||||
username: String,
|
||||
/// Password (will be used in OPAQUE PAKE; server never sees it).
|
||||
#[arg(long)]
|
||||
password: String,
|
||||
},
|
||||
|
||||
/// Log in via OPAQUE and receive a session token.
|
||||
Login {
|
||||
#[arg(long, default_value = "127.0.0.1:7000", env = "QUICNPROTOCHAT_SERVER")]
|
||||
server: String,
|
||||
#[arg(long)]
|
||||
username: String,
|
||||
#[arg(long)]
|
||||
password: String,
|
||||
/// Hex-encoded Ed25519 identity key (64 hex chars). Optional if --state is provided.
|
||||
#[arg(long)]
|
||||
identity_key: Option<String>,
|
||||
/// State file to derive the identity key (requires same password if encrypted).
|
||||
#[arg(long)]
|
||||
state: Option<PathBuf>,
|
||||
/// Password for the encrypted state file (if any).
|
||||
#[arg(long)]
|
||||
state_password: Option<String>,
|
||||
},
|
||||
|
||||
/// Show local identity key, fingerprint, group status, and hybrid key status.
|
||||
Whoami {
|
||||
/// State file path (identity + MLS state).
|
||||
#[arg(
|
||||
long,
|
||||
default_value = "quicnprotochat-state.bin",
|
||||
env = "QUICNPROTOCHAT_STATE"
|
||||
)]
|
||||
state: PathBuf,
|
||||
},
|
||||
|
||||
/// Check server connectivity and print status.
|
||||
Health {
|
||||
/// Server address (host:port).
|
||||
#[arg(long, default_value = "127.0.0.1:7000", env = "QUICNPROTOCHAT_SERVER")]
|
||||
server: String,
|
||||
},
|
||||
|
||||
/// Check if a peer has registered a hybrid key (non-consuming lookup).
|
||||
CheckKey {
|
||||
/// Server address (host:port).
|
||||
#[arg(long, default_value = "127.0.0.1:7000", env = "QUICNPROTOCHAT_SERVER")]
|
||||
server: String,
|
||||
|
||||
/// Peer's Ed25519 identity public key (64 hex chars = 32 bytes).
|
||||
identity_key: String,
|
||||
},
|
||||
|
||||
/// Send a Ping to the server and print the round-trip time.
|
||||
Ping {
|
||||
/// Server address (host:port).
|
||||
#[arg(long, default_value = "127.0.0.1:7000", env = "QUICNPROTOCHAT_SERVER")]
|
||||
server: String,
|
||||
},
|
||||
|
||||
/// Generate a fresh MLS KeyPackage and upload it to the Authentication Service.
|
||||
Register {
|
||||
/// Server address (host:port).
|
||||
#[arg(long, default_value = "127.0.0.1:7000", env = "QUICNPROTOCHAT_SERVER")]
|
||||
server: String,
|
||||
},
|
||||
|
||||
/// Fetch a peer's KeyPackage from the Authentication Service.
|
||||
FetchKey {
|
||||
/// Server address (host:port).
|
||||
#[arg(long, default_value = "127.0.0.1:7000", env = "QUICNPROTOCHAT_SERVER")]
|
||||
server: String,
|
||||
|
||||
/// Target peer's Ed25519 identity public key (64 hex chars = 32 bytes).
|
||||
identity_key: String,
|
||||
},
|
||||
|
||||
/// Run a two-party MLS demo (creator + joiner) against live AS and DS.
|
||||
DemoGroup {
|
||||
/// Server address (host:port).
|
||||
#[arg(long, default_value = "127.0.0.1:7000", env = "QUICNPROTOCHAT_SERVER")]
|
||||
server: String,
|
||||
},
|
||||
|
||||
/// Upload the persistent identity's KeyPackage to the AS (uses state file).
|
||||
RegisterState {
|
||||
/// State file path (identity + MLS state).
|
||||
#[arg(
|
||||
long,
|
||||
default_value = "quicnprotochat-state.bin",
|
||||
env = "QUICNPROTOCHAT_STATE"
|
||||
)]
|
||||
state: PathBuf,
|
||||
|
||||
/// Authentication Service address (host:port).
|
||||
#[arg(long, default_value = "127.0.0.1:7000", env = "QUICNPROTOCHAT_SERVER")]
|
||||
server: String,
|
||||
},
|
||||
|
||||
/// Refresh the KeyPackage on the server (existing state only).
|
||||
/// Run periodically (e.g. before server TTL ~24h) or after your KeyPackage was consumed so others can invite you.
|
||||
RefreshKeypackage {
|
||||
/// State file path (identity + MLS state).
|
||||
#[arg(
|
||||
long,
|
||||
default_value = "quicnprotochat-state.bin",
|
||||
env = "QUICNPROTOCHAT_STATE"
|
||||
)]
|
||||
state: PathBuf,
|
||||
|
||||
/// Server address (host:port).
|
||||
#[arg(long, default_value = "127.0.0.1:7000", env = "QUICNPROTOCHAT_SERVER")]
|
||||
server: String,
|
||||
},
|
||||
|
||||
/// Create a persistent group and save state to disk.
|
||||
CreateGroup {
|
||||
/// State file path (identity + MLS state).
|
||||
#[arg(
|
||||
long,
|
||||
default_value = "quicnprotochat-state.bin",
|
||||
env = "QUICNPROTOCHAT_STATE"
|
||||
)]
|
||||
state: PathBuf,
|
||||
|
||||
/// Server address (host:port).
|
||||
#[arg(long, default_value = "127.0.0.1:7000", env = "QUICNPROTOCHAT_SERVER")]
|
||||
server: String,
|
||||
|
||||
/// Group identifier (arbitrary bytes, typically a human-readable name).
|
||||
#[arg(long)]
|
||||
group_id: String,
|
||||
},
|
||||
|
||||
/// Invite a peer into the group and deliver a Welcome via DS.
|
||||
Invite {
|
||||
#[arg(
|
||||
long,
|
||||
default_value = "quicnprotochat-state.bin",
|
||||
env = "QUICNPROTOCHAT_STATE"
|
||||
)]
|
||||
state: PathBuf,
|
||||
#[arg(long, default_value = "127.0.0.1:7000", env = "QUICNPROTOCHAT_SERVER")]
|
||||
server: String,
|
||||
/// Peer identity public key (64 hex chars = 32 bytes).
|
||||
#[arg(long)]
|
||||
peer_key: String,
|
||||
},
|
||||
|
||||
/// Join a group by fetching the Welcome from the DS.
|
||||
Join {
|
||||
#[arg(
|
||||
long,
|
||||
default_value = "quicnprotochat-state.bin",
|
||||
env = "QUICNPROTOCHAT_STATE"
|
||||
)]
|
||||
state: PathBuf,
|
||||
#[arg(long, default_value = "127.0.0.1:7000", env = "QUICNPROTOCHAT_SERVER")]
|
||||
server: String,
|
||||
},
|
||||
|
||||
/// Send an application message via the DS.
|
||||
Send {
|
||||
#[arg(
|
||||
long,
|
||||
default_value = "quicnprotochat-state.bin",
|
||||
env = "QUICNPROTOCHAT_STATE"
|
||||
)]
|
||||
state: PathBuf,
|
||||
#[arg(long, default_value = "127.0.0.1:7000", env = "QUICNPROTOCHAT_SERVER")]
|
||||
server: String,
|
||||
/// Recipient identity key (hex, 32 bytes -> 64 chars). Omit when using --all.
|
||||
#[arg(long)]
|
||||
peer_key: Option<String>,
|
||||
/// Send to all other group members (N-way groups).
|
||||
#[arg(long)]
|
||||
all: bool,
|
||||
/// Plaintext message to send.
|
||||
#[arg(long)]
|
||||
msg: String,
|
||||
},
|
||||
|
||||
/// Receive and decrypt all pending messages from the DS.
|
||||
Recv {
|
||||
#[arg(
|
||||
long,
|
||||
default_value = "quicnprotochat-state.bin",
|
||||
env = "QUICNPROTOCHAT_STATE"
|
||||
)]
|
||||
state: PathBuf,
|
||||
#[arg(long, default_value = "127.0.0.1:7000", env = "QUICNPROTOCHAT_SERVER")]
|
||||
server: String,
|
||||
|
||||
/// Wait for up to this many milliseconds if no messages are queued.
|
||||
#[arg(long, default_value_t = 0)]
|
||||
wait_ms: u64,
|
||||
|
||||
/// Continuously long-poll for messages.
|
||||
#[arg(long)]
|
||||
stream: bool,
|
||||
},
|
||||
|
||||
/// Interactive 1:1 chat: type to send, incoming messages printed as [peer] <msg>. Ctrl+D to exit.
|
||||
/// In a two-person group, peer is chosen automatically; use --peer-key only with 3+ members.
|
||||
Chat {
|
||||
#[arg(
|
||||
long,
|
||||
default_value = "quicnprotochat-state.bin",
|
||||
env = "QUICNPROTOCHAT_STATE"
|
||||
)]
|
||||
state: PathBuf,
|
||||
#[arg(long, default_value = "127.0.0.1:7000", env = "QUICNPROTOCHAT_SERVER")]
|
||||
server: String,
|
||||
/// Peer identity key (hex, 64 chars). Omit in a two-person group to use the only other member.
|
||||
#[arg(long)]
|
||||
peer_key: Option<String>,
|
||||
/// How often to poll for incoming messages (milliseconds).
|
||||
#[arg(long, default_value_t = 500)]
|
||||
poll_interval_ms: u64,
|
||||
},
|
||||
}
|
||||
|
||||
// ── Entry point ───────────────────────────────────────────────────────────────
|
||||
|
||||
#[tokio::main]
|
||||
async fn main() -> anyhow::Result<()> {
|
||||
tracing_subscriber::fmt()
|
||||
.with_env_filter(
|
||||
tracing_subscriber::EnvFilter::try_from_default_env()
|
||||
.unwrap_or_else(|_| tracing_subscriber::EnvFilter::new("warn")),
|
||||
)
|
||||
.init();
|
||||
|
||||
let args = Args::parse();
|
||||
|
||||
// Initialize auth context once for all RPCs (empty token OK for register-user/login).
|
||||
let auth_ctx = ClientAuth::from_parts(args.access_token.clone(), args.device_id.clone());
|
||||
init_auth(auth_ctx);
|
||||
|
||||
let state_pw = args.state_password.as_deref();
|
||||
|
||||
match args.command {
|
||||
Command::RegisterUser {
|
||||
server,
|
||||
username,
|
||||
password,
|
||||
} => {
|
||||
let local = tokio::task::LocalSet::new();
|
||||
local
|
||||
.run_until(cmd_register_user(
|
||||
&server,
|
||||
&args.ca_cert,
|
||||
&args.server_name,
|
||||
&username,
|
||||
&password,
|
||||
None,
|
||||
))
|
||||
.await
|
||||
}
|
||||
Command::Login {
|
||||
server,
|
||||
username,
|
||||
password,
|
||||
identity_key,
|
||||
state,
|
||||
state_password,
|
||||
} => {
|
||||
let local = tokio::task::LocalSet::new();
|
||||
local
|
||||
.run_until(cmd_login(
|
||||
&server,
|
||||
&args.ca_cert,
|
||||
&args.server_name,
|
||||
&username,
|
||||
&password,
|
||||
identity_key.as_deref(),
|
||||
state.as_deref(),
|
||||
state_password.as_deref(),
|
||||
))
|
||||
.await
|
||||
}
|
||||
Command::Whoami { state } => cmd_whoami(&state, state_pw),
|
||||
Command::Health { server } => {
|
||||
let local = tokio::task::LocalSet::new();
|
||||
local
|
||||
.run_until(cmd_health(&server, &args.ca_cert, &args.server_name))
|
||||
.await
|
||||
}
|
||||
Command::CheckKey {
|
||||
server,
|
||||
identity_key,
|
||||
} => {
|
||||
let local = tokio::task::LocalSet::new();
|
||||
local
|
||||
.run_until(cmd_check_key(
|
||||
&server,
|
||||
&args.ca_cert,
|
||||
&args.server_name,
|
||||
&identity_key,
|
||||
))
|
||||
.await
|
||||
}
|
||||
Command::Ping { server } => cmd_ping(&server, &args.ca_cert, &args.server_name).await,
|
||||
Command::Register { server } => {
|
||||
let local = tokio::task::LocalSet::new();
|
||||
local
|
||||
.run_until(cmd_register(&server, &args.ca_cert, &args.server_name))
|
||||
.await
|
||||
}
|
||||
Command::FetchKey {
|
||||
server,
|
||||
identity_key,
|
||||
} => {
|
||||
let local = tokio::task::LocalSet::new();
|
||||
local
|
||||
.run_until(cmd_fetch_key(
|
||||
&server,
|
||||
&args.ca_cert,
|
||||
&args.server_name,
|
||||
&identity_key,
|
||||
))
|
||||
.await
|
||||
}
|
||||
Command::DemoGroup { server } => {
|
||||
let local = tokio::task::LocalSet::new();
|
||||
local
|
||||
.run_until(cmd_demo_group(&server, &args.ca_cert, &args.server_name))
|
||||
.await
|
||||
}
|
||||
Command::RegisterState { state, server } => {
|
||||
let local = tokio::task::LocalSet::new();
|
||||
local
|
||||
.run_until(cmd_register_state(
|
||||
&state,
|
||||
&server,
|
||||
&args.ca_cert,
|
||||
&args.server_name,
|
||||
state_pw,
|
||||
))
|
||||
.await
|
||||
}
|
||||
Command::RefreshKeypackage { state, server } => {
|
||||
let local = tokio::task::LocalSet::new();
|
||||
local
|
||||
.run_until(cmd_refresh_keypackage(
|
||||
&state,
|
||||
&server,
|
||||
&args.ca_cert,
|
||||
&args.server_name,
|
||||
state_pw,
|
||||
))
|
||||
.await
|
||||
}
|
||||
Command::CreateGroup {
|
||||
state,
|
||||
server,
|
||||
group_id,
|
||||
} => {
|
||||
let local = tokio::task::LocalSet::new();
|
||||
local
|
||||
.run_until(cmd_create_group(&state, &server, &group_id, state_pw))
|
||||
.await
|
||||
}
|
||||
Command::Invite {
|
||||
state,
|
||||
server,
|
||||
peer_key,
|
||||
} => {
|
||||
let local = tokio::task::LocalSet::new();
|
||||
local
|
||||
.run_until(cmd_invite(
|
||||
&state,
|
||||
&server,
|
||||
&args.ca_cert,
|
||||
&args.server_name,
|
||||
&peer_key,
|
||||
state_pw,
|
||||
))
|
||||
.await
|
||||
}
|
||||
Command::Join { state, server } => {
|
||||
let local = tokio::task::LocalSet::new();
|
||||
local
|
||||
.run_until(cmd_join(
|
||||
&state,
|
||||
&server,
|
||||
&args.ca_cert,
|
||||
&args.server_name,
|
||||
state_pw,
|
||||
))
|
||||
.await
|
||||
}
|
||||
Command::Send {
|
||||
state,
|
||||
server,
|
||||
peer_key,
|
||||
all,
|
||||
msg,
|
||||
} => {
|
||||
let local = tokio::task::LocalSet::new();
|
||||
local
|
||||
.run_until(cmd_send(
|
||||
&state,
|
||||
&server,
|
||||
&args.ca_cert,
|
||||
&args.server_name,
|
||||
peer_key.as_deref(),
|
||||
all,
|
||||
&msg,
|
||||
state_pw,
|
||||
))
|
||||
.await
|
||||
}
|
||||
Command::Recv {
|
||||
state,
|
||||
server,
|
||||
wait_ms,
|
||||
stream,
|
||||
} => {
|
||||
let local = tokio::task::LocalSet::new();
|
||||
local
|
||||
.run_until(cmd_recv(
|
||||
&state,
|
||||
&server,
|
||||
&args.ca_cert,
|
||||
&args.server_name,
|
||||
wait_ms,
|
||||
stream,
|
||||
state_pw,
|
||||
))
|
||||
.await
|
||||
}
|
||||
Command::Chat {
|
||||
state,
|
||||
server,
|
||||
peer_key,
|
||||
poll_interval_ms,
|
||||
} => {
|
||||
let local = tokio::task::LocalSet::new();
|
||||
local
|
||||
.run_until(cmd_chat(
|
||||
&state,
|
||||
&server,
|
||||
&args.ca_cert,
|
||||
&args.server_name,
|
||||
peer_key.as_deref(),
|
||||
state_pw,
|
||||
poll_interval_ms,
|
||||
))
|
||||
.await
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,597 +0,0 @@
|
||||
// cargo_bin! only works for current package's binary; we spawn quicnprotochat-server from another package.
|
||||
#![allow(deprecated)]
|
||||
|
||||
use std::{path::PathBuf, process::Command, time::Duration};
|
||||
|
||||
use assert_cmd::cargo::cargo_bin;
|
||||
use portpicker::pick_unused_port;
|
||||
use rand::RngCore;
|
||||
use tempfile::TempDir;
|
||||
use tokio::time::sleep;
|
||||
use hex;
|
||||
|
||||
// Required by rustls 0.23 when QUIC/TLS is used from this process (e.g. client in test).
|
||||
fn ensure_rustls_provider() {
|
||||
let _ = rustls::crypto::ring::default_provider().install_default();
|
||||
}
|
||||
|
||||
use quicnprotochat_client::{
|
||||
cmd_create_group, cmd_invite, cmd_join, cmd_login, cmd_ping, cmd_register_state,
|
||||
cmd_register_user, cmd_send, connect_node, enqueue, fetch_wait, init_auth,
|
||||
receive_pending_plaintexts, ClientAuth,
|
||||
};
|
||||
use quicnprotochat_core::IdentityKeypair;
|
||||
|
||||
fn hex_encode(bytes: &[u8]) -> String {
|
||||
bytes.iter().map(|b| format!("{b:02x}")).collect()
|
||||
}
|
||||
|
||||
#[derive(serde::Deserialize)]
|
||||
struct StoredStateCompat {
|
||||
identity_seed: [u8; 32],
|
||||
#[allow(dead_code)]
|
||||
group: Option<Vec<u8>>,
|
||||
}
|
||||
|
||||
async fn wait_for_health(server: &str, ca_cert: &PathBuf, server_name: &str) -> anyhow::Result<()> {
|
||||
let local = tokio::task::LocalSet::new();
|
||||
for _ in 0..30 {
|
||||
if local
|
||||
.run_until(cmd_ping(server, ca_cert, server_name))
|
||||
.await
|
||||
.is_ok()
|
||||
{
|
||||
return Ok(());
|
||||
}
|
||||
sleep(Duration::from_millis(200)).await;
|
||||
}
|
||||
anyhow::bail!("server health never became ready")
|
||||
}
|
||||
|
||||
/// Creator and joiner register; creator creates group and invites joiner; joiner joins;
|
||||
/// creator sends a message; assert joiner's mailbox receives it.
|
||||
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
|
||||
async fn e2e_happy_path_register_invite_join_send_recv() -> anyhow::Result<()> {
|
||||
ensure_rustls_provider();
|
||||
|
||||
let temp = TempDir::new()?;
|
||||
let base = temp.path();
|
||||
let port = pick_unused_port().expect("free port");
|
||||
let listen = format!("127.0.0.1:{port}");
|
||||
let server = listen.clone();
|
||||
let ca_cert = base.join("server-cert.der");
|
||||
let tls_key = base.join("server-key.der");
|
||||
let data_dir = base.join("data");
|
||||
let auth_token = "devtoken";
|
||||
|
||||
// Spawn server binary.
|
||||
let server_bin = cargo_bin("quicnprotochat-server");
|
||||
let child = Command::new(server_bin)
|
||||
.arg("--listen")
|
||||
.arg(&listen)
|
||||
.arg("--data-dir")
|
||||
.arg(&data_dir)
|
||||
.arg("--tls-cert")
|
||||
.arg(&ca_cert)
|
||||
.arg("--tls-key")
|
||||
.arg(&tls_key)
|
||||
.arg("--auth-token")
|
||||
.arg(auth_token)
|
||||
.arg("--allow-insecure-auth")
|
||||
.spawn()
|
||||
.expect("spawn server");
|
||||
|
||||
// Ensure we always terminate the child.
|
||||
struct ChildGuard(std::process::Child);
|
||||
impl Drop for ChildGuard {
|
||||
fn drop(&mut self) {
|
||||
let _ = self.0.kill();
|
||||
}
|
||||
}
|
||||
let child_guard = ChildGuard(child);
|
||||
let _ = child_guard;
|
||||
|
||||
// Wait for server to be healthy and certs to be generated.
|
||||
wait_for_health(&server, &ca_cert, "localhost").await?;
|
||||
|
||||
// Set client auth context.
|
||||
init_auth(ClientAuth::from_parts(auth_token.to_string(), None));
|
||||
|
||||
let local = tokio::task::LocalSet::new();
|
||||
|
||||
let creator_state = base.join("creator.bin");
|
||||
let joiner_state = base.join("joiner.bin");
|
||||
|
||||
local
|
||||
.run_until(cmd_register_state(
|
||||
&creator_state,
|
||||
&server,
|
||||
&ca_cert,
|
||||
"localhost",
|
||||
None,
|
||||
))
|
||||
.await?;
|
||||
|
||||
local
|
||||
.run_until(cmd_register_state(
|
||||
&joiner_state,
|
||||
&server,
|
||||
&ca_cert,
|
||||
"localhost",
|
||||
None,
|
||||
))
|
||||
.await?;
|
||||
|
||||
local
|
||||
.run_until(cmd_create_group(&creator_state, &server, "test-group", None))
|
||||
.await?;
|
||||
|
||||
let joiner_bytes = std::fs::read(&joiner_state)?;
|
||||
let joiner_state_compat: StoredStateCompat = bincode::deserialize(&joiner_bytes)?;
|
||||
let joiner_identity = IdentityKeypair::from_seed(joiner_state_compat.identity_seed);
|
||||
let joiner_pk_hex = hex_encode(&joiner_identity.public_key_bytes());
|
||||
|
||||
local
|
||||
.run_until(cmd_invite(
|
||||
&creator_state,
|
||||
&server,
|
||||
&ca_cert,
|
||||
"localhost",
|
||||
&joiner_pk_hex,
|
||||
None,
|
||||
))
|
||||
.await?;
|
||||
|
||||
local
|
||||
.run_until(cmd_join(&joiner_state, &server, &ca_cert, "localhost", None))
|
||||
.await?;
|
||||
|
||||
local
|
||||
.run_until(cmd_send(
|
||||
&creator_state,
|
||||
&server,
|
||||
&ca_cert,
|
||||
"localhost",
|
||||
Some(&joiner_pk_hex),
|
||||
false,
|
||||
"hello",
|
||||
None,
|
||||
))
|
||||
.await?;
|
||||
|
||||
local
|
||||
.run_until(async {
|
||||
let client = connect_node(&server, &ca_cert, "localhost").await?;
|
||||
let payloads = fetch_wait(&client, &joiner_identity.public_key_bytes(), 1000).await?;
|
||||
anyhow::ensure!(!payloads.is_empty(), "no payloads delivered to joiner");
|
||||
Ok::<(), anyhow::Error>(())
|
||||
})
|
||||
.await?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Three-party group: A creates group, invites B then C; B and C join; A sends, B and C receive;
|
||||
/// B sends, A and C receive.
|
||||
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
|
||||
async fn e2e_three_party_group_invite_join_send_recv() -> anyhow::Result<()> {
|
||||
ensure_rustls_provider();
|
||||
|
||||
let temp = TempDir::new()?;
|
||||
let base = temp.path();
|
||||
let port = pick_unused_port().expect("free port");
|
||||
let listen = format!("127.0.0.1:{port}");
|
||||
let server = listen.clone();
|
||||
let ca_cert = base.join("server-cert.der");
|
||||
let tls_key = base.join("server-key.der");
|
||||
let data_dir = base.join("data");
|
||||
let auth_token = "devtoken";
|
||||
|
||||
let server_bin = cargo_bin("quicnprotochat-server");
|
||||
let child = Command::new(server_bin)
|
||||
.arg("--listen")
|
||||
.arg(&listen)
|
||||
.arg("--data-dir")
|
||||
.arg(&data_dir)
|
||||
.arg("--tls-cert")
|
||||
.arg(&ca_cert)
|
||||
.arg("--tls-key")
|
||||
.arg(&tls_key)
|
||||
.arg("--auth-token")
|
||||
.arg(auth_token)
|
||||
.arg("--allow-insecure-auth")
|
||||
.spawn()
|
||||
.expect("spawn server");
|
||||
|
||||
struct ChildGuard(std::process::Child);
|
||||
impl Drop for ChildGuard {
|
||||
fn drop(&mut self) {
|
||||
let _ = self.0.kill();
|
||||
}
|
||||
}
|
||||
let _child_guard = ChildGuard(child);
|
||||
|
||||
wait_for_health(&server, &ca_cert, "localhost").await?;
|
||||
init_auth(ClientAuth::from_parts(auth_token.to_string(), None));
|
||||
|
||||
let local = tokio::task::LocalSet::new();
|
||||
|
||||
let creator_state = base.join("creator.bin");
|
||||
let b_state = base.join("b.bin");
|
||||
let c_state = base.join("c.bin");
|
||||
|
||||
local
|
||||
.run_until(cmd_register_state(
|
||||
&creator_state,
|
||||
&server,
|
||||
&ca_cert,
|
||||
"localhost",
|
||||
None,
|
||||
))
|
||||
.await?;
|
||||
local
|
||||
.run_until(cmd_register_state(
|
||||
&b_state,
|
||||
&server,
|
||||
&ca_cert,
|
||||
"localhost",
|
||||
None,
|
||||
))
|
||||
.await?;
|
||||
local
|
||||
.run_until(cmd_register_state(
|
||||
&c_state,
|
||||
&server,
|
||||
&ca_cert,
|
||||
"localhost",
|
||||
None,
|
||||
))
|
||||
.await?;
|
||||
|
||||
let b_bytes = std::fs::read(&b_state)?;
|
||||
let b_compat: StoredStateCompat = bincode::deserialize(&b_bytes)?;
|
||||
let b_pk_hex = hex_encode(&IdentityKeypair::from_seed(b_compat.identity_seed).public_key_bytes());
|
||||
|
||||
let c_bytes = std::fs::read(&c_state)?;
|
||||
let c_compat: StoredStateCompat = bincode::deserialize(&c_bytes)?;
|
||||
let c_pk_hex = hex_encode(&IdentityKeypair::from_seed(c_compat.identity_seed).public_key_bytes());
|
||||
|
||||
local
|
||||
.run_until(cmd_create_group(&creator_state, &server, "test-group", None))
|
||||
.await?;
|
||||
|
||||
local
|
||||
.run_until(cmd_invite(
|
||||
&creator_state,
|
||||
&server,
|
||||
&ca_cert,
|
||||
"localhost",
|
||||
&b_pk_hex,
|
||||
None,
|
||||
))
|
||||
.await?;
|
||||
|
||||
local
|
||||
.run_until(cmd_invite(
|
||||
&creator_state,
|
||||
&server,
|
||||
&ca_cert,
|
||||
"localhost",
|
||||
&c_pk_hex,
|
||||
None,
|
||||
))
|
||||
.await?;
|
||||
|
||||
local
|
||||
.run_until(cmd_join(&b_state, &server, &ca_cert, "localhost", None))
|
||||
.await?;
|
||||
local
|
||||
.run_until(cmd_join(&c_state, &server, &ca_cert, "localhost", None))
|
||||
.await?;
|
||||
|
||||
local
|
||||
.run_until(cmd_send(
|
||||
&creator_state,
|
||||
&server,
|
||||
&ca_cert,
|
||||
"localhost",
|
||||
None,
|
||||
true,
|
||||
"hello",
|
||||
None,
|
||||
))
|
||||
.await?;
|
||||
|
||||
sleep(Duration::from_millis(150)).await;
|
||||
|
||||
let b_plaintexts = local
|
||||
.run_until(receive_pending_plaintexts(
|
||||
&b_state,
|
||||
&server,
|
||||
&ca_cert,
|
||||
"localhost",
|
||||
1500,
|
||||
None,
|
||||
))
|
||||
.await?;
|
||||
let c_plaintexts = local
|
||||
.run_until(receive_pending_plaintexts(
|
||||
&c_state,
|
||||
&server,
|
||||
&ca_cert,
|
||||
"localhost",
|
||||
1500,
|
||||
None,
|
||||
))
|
||||
.await?;
|
||||
anyhow::ensure!(
|
||||
b_plaintexts.iter().any(|p| p.as_slice() == b"hello"),
|
||||
"B did not receive 'hello', got {:?}",
|
||||
b_plaintexts
|
||||
);
|
||||
anyhow::ensure!(
|
||||
c_plaintexts.iter().any(|p| p.as_slice() == b"hello"),
|
||||
"C did not receive 'hello', got {:?}",
|
||||
c_plaintexts
|
||||
);
|
||||
|
||||
local
|
||||
.run_until(cmd_send(
|
||||
&b_state,
|
||||
&server,
|
||||
&ca_cert,
|
||||
"localhost",
|
||||
None,
|
||||
true,
|
||||
"hi",
|
||||
None,
|
||||
))
|
||||
.await?;
|
||||
|
||||
sleep(Duration::from_millis(200)).await;
|
||||
|
||||
let a_plaintexts = local
|
||||
.run_until(receive_pending_plaintexts(
|
||||
&creator_state,
|
||||
&server,
|
||||
&ca_cert,
|
||||
"localhost",
|
||||
1500,
|
||||
None,
|
||||
))
|
||||
.await?;
|
||||
let c_plaintexts2 = local
|
||||
.run_until(receive_pending_plaintexts(
|
||||
&c_state,
|
||||
&server,
|
||||
&ca_cert,
|
||||
"localhost",
|
||||
1500,
|
||||
None,
|
||||
))
|
||||
.await?;
|
||||
anyhow::ensure!(
|
||||
a_plaintexts.iter().any(|p| p.as_slice() == b"hi"),
|
||||
"A did not receive 'hi', got {:?}",
|
||||
a_plaintexts
|
||||
);
|
||||
anyhow::ensure!(
|
||||
c_plaintexts2.iter().any(|p| p.as_slice() == b"hi"),
|
||||
"C did not receive 'hi', got {:?}",
|
||||
c_plaintexts2
|
||||
);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Login should refuse if the presented identity key does not match the registered key.
|
||||
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
|
||||
async fn e2e_login_rejects_mismatched_identity() -> anyhow::Result<()> {
|
||||
ensure_rustls_provider();
|
||||
|
||||
let temp = TempDir::new()?;
|
||||
let base = temp.path();
|
||||
let port = pick_unused_port().expect("free port");
|
||||
let listen = format!("127.0.0.1:{port}");
|
||||
let server = listen.clone();
|
||||
let ca_cert = base.join("server-cert.der");
|
||||
let tls_key = base.join("server-key.der");
|
||||
let data_dir = base.join("data");
|
||||
let auth_token = "devtoken";
|
||||
|
||||
// Spawn server binary.
|
||||
let server_bin = cargo_bin("quicnprotochat-server");
|
||||
let child = Command::new(server_bin)
|
||||
.arg("--listen")
|
||||
.arg(&listen)
|
||||
.arg("--data-dir")
|
||||
.arg(&data_dir)
|
||||
.arg("--tls-cert")
|
||||
.arg(&ca_cert)
|
||||
.arg("--tls-key")
|
||||
.arg(&tls_key)
|
||||
.arg("--auth-token")
|
||||
.arg(auth_token)
|
||||
.arg("--allow-insecure-auth")
|
||||
.spawn()
|
||||
.expect("spawn server");
|
||||
|
||||
struct ChildGuard(std::process::Child);
|
||||
impl Drop for ChildGuard {
|
||||
fn drop(&mut self) {
|
||||
let _ = self.0.kill();
|
||||
}
|
||||
}
|
||||
let child_guard = ChildGuard(child);
|
||||
let _ = child_guard;
|
||||
|
||||
wait_for_health(&server, &ca_cert, "localhost").await?;
|
||||
|
||||
init_auth(ClientAuth::from_parts(auth_token.to_string(), None));
|
||||
|
||||
let local = tokio::task::LocalSet::new();
|
||||
let state_path = base.join("user.bin");
|
||||
|
||||
// Register and persist state (includes identity key binding).
|
||||
local
|
||||
.run_until(cmd_register_state(
|
||||
&state_path,
|
||||
&server,
|
||||
&ca_cert,
|
||||
"localhost",
|
||||
None,
|
||||
))
|
||||
.await?;
|
||||
|
||||
// Register the user with the bound identity so login can enforce mismatches.
|
||||
let state_bytes = std::fs::read(&state_path)?;
|
||||
let stored_state: StoredStateCompat = bincode::deserialize(&state_bytes)?;
|
||||
let identity_hex = hex::encode(
|
||||
IdentityKeypair::from_seed(stored_state.identity_seed).public_key_bytes(),
|
||||
);
|
||||
|
||||
local
|
||||
.run_until(cmd_register_user(
|
||||
&server,
|
||||
&ca_cert,
|
||||
"localhost",
|
||||
"user1",
|
||||
"pass",
|
||||
Some(&identity_hex),
|
||||
))
|
||||
.await?;
|
||||
|
||||
// Craft an unrelated identity key and attempt login with it.
|
||||
let mut bogus_identity = [0u8; 32];
|
||||
rand::thread_rng().fill_bytes(&mut bogus_identity);
|
||||
let bogus_hex = hex::encode(bogus_identity);
|
||||
|
||||
let result = local
|
||||
.run_until(cmd_login(
|
||||
&server,
|
||||
&ca_cert,
|
||||
"localhost",
|
||||
"user1",
|
||||
"pass",
|
||||
Some(&bogus_hex),
|
||||
None,
|
||||
None,
|
||||
))
|
||||
.await;
|
||||
|
||||
match result {
|
||||
Ok(_) => anyhow::bail!("login unexpectedly succeeded with mismatched identity"),
|
||||
Err(e) => {
|
||||
// Show the full error chain so we can match the server's E016 response.
|
||||
let msg = format!("{e:#}");
|
||||
anyhow::ensure!(
|
||||
msg.contains("identity") || msg.contains("E016"),
|
||||
"login failed but not for identity mismatch: {msg}"
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Sealed Sender: enqueue with valid token (no identity binding) succeeds; recipient can fetch.
|
||||
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
|
||||
async fn e2e_sealed_sender_enqueue_then_fetch() -> anyhow::Result<()> {
|
||||
ensure_rustls_provider();
|
||||
|
||||
let temp = TempDir::new()?;
|
||||
let base = temp.path();
|
||||
let port = pick_unused_port().expect("free port");
|
||||
let listen = format!("127.0.0.1:{port}");
|
||||
let server = listen.clone();
|
||||
let ca_cert = base.join("server-cert.der");
|
||||
let tls_key = base.join("server-key.der");
|
||||
let data_dir = base.join("data");
|
||||
let auth_token = "devtoken";
|
||||
|
||||
let server_bin = cargo_bin("quicnprotochat-server");
|
||||
let child = Command::new(server_bin)
|
||||
.arg("--listen")
|
||||
.arg(&listen)
|
||||
.arg("--data-dir")
|
||||
.arg(&data_dir)
|
||||
.arg("--tls-cert")
|
||||
.arg(&ca_cert)
|
||||
.arg("--tls-key")
|
||||
.arg(&tls_key)
|
||||
.arg("--auth-token")
|
||||
.arg(auth_token)
|
||||
.arg("--allow-insecure-auth")
|
||||
.arg("--sealed-sender")
|
||||
.spawn()
|
||||
.expect("spawn server");
|
||||
|
||||
struct ChildGuard(std::process::Child);
|
||||
impl Drop for ChildGuard {
|
||||
fn drop(&mut self) {
|
||||
let _ = self.0.kill();
|
||||
}
|
||||
}
|
||||
let _child_guard = ChildGuard(child);
|
||||
|
||||
wait_for_health(&server, &ca_cert, "localhost").await?;
|
||||
init_auth(ClientAuth::from_parts(auth_token.to_string(), None));
|
||||
|
||||
let local = tokio::task::LocalSet::new();
|
||||
let state_path = base.join("recipient.bin");
|
||||
|
||||
local
|
||||
.run_until(cmd_register_state(
|
||||
&state_path,
|
||||
&server,
|
||||
&ca_cert,
|
||||
"localhost",
|
||||
None,
|
||||
))
|
||||
.await?;
|
||||
|
||||
let state_bytes = std::fs::read(&state_path)?;
|
||||
let stored: StoredStateCompat = bincode::deserialize(&state_bytes)?;
|
||||
let recipient_key = IdentityKeypair::from_seed(stored.identity_seed).public_key_bytes();
|
||||
let identity_hex = hex_encode(&recipient_key);
|
||||
|
||||
local
|
||||
.run_until(cmd_register_user(
|
||||
&server,
|
||||
&ca_cert,
|
||||
"localhost",
|
||||
"recipient",
|
||||
"pass",
|
||||
Some(&identity_hex),
|
||||
))
|
||||
.await?;
|
||||
|
||||
local
|
||||
.run_until(cmd_login(
|
||||
&server,
|
||||
&ca_cert,
|
||||
"localhost",
|
||||
"recipient",
|
||||
"pass",
|
||||
Some(&identity_hex),
|
||||
None,
|
||||
None,
|
||||
))
|
||||
.await?;
|
||||
|
||||
let client = local.run_until(connect_node(&server, &ca_cert, "localhost")).await?;
|
||||
local
|
||||
.run_until(enqueue(&client, &recipient_key, b"sealed-payload"))
|
||||
.await?;
|
||||
|
||||
let payloads = local
|
||||
.run_until(fetch_wait(&client, &recipient_key, 500))
|
||||
.await?;
|
||||
anyhow::ensure!(
|
||||
payloads.len() == 1 && payloads[0].1.as_slice() == b"sealed-payload",
|
||||
"expected one payload 'sealed-payload', got {:?}",
|
||||
payloads
|
||||
);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
@@ -1,45 +0,0 @@
|
||||
[package]
|
||||
name = "quicnprotochat-core"
|
||||
version = "0.1.0"
|
||||
edition = "2021"
|
||||
description = "Crypto primitives, MLS state machine, and hybrid post-quantum KEM for quicnprotochat."
|
||||
license = "MIT"
|
||||
|
||||
[dependencies]
|
||||
# Crypto — classical
|
||||
x25519-dalek = { workspace = true }
|
||||
ed25519-dalek = { workspace = true }
|
||||
sha2 = { workspace = true }
|
||||
hkdf = { workspace = true }
|
||||
chacha20poly1305 = { workspace = true }
|
||||
zeroize = { workspace = true }
|
||||
rand = { workspace = true }
|
||||
|
||||
# Crypto — post-quantum hybrid KEM (M7)
|
||||
ml-kem = { workspace = true }
|
||||
|
||||
# Crypto — OPAQUE password-authenticated key exchange
|
||||
opaque-ke = { workspace = true }
|
||||
argon2 = { workspace = true }
|
||||
|
||||
# Crypto — MLS (M2)
|
||||
openmls = { workspace = true }
|
||||
openmls_rust_crypto = { workspace = true }
|
||||
openmls_traits = { workspace = true }
|
||||
tls_codec = { workspace = true }
|
||||
serde = { workspace = true }
|
||||
bincode = { workspace = true }
|
||||
serde_json = { workspace = true }
|
||||
|
||||
# Serialisation
|
||||
capnp = { workspace = true }
|
||||
quicnprotochat-proto = { path = "../quicnprotochat-proto" }
|
||||
|
||||
# Async runtime
|
||||
tokio = { workspace = true }
|
||||
|
||||
# Error handling
|
||||
thiserror = { workspace = true }
|
||||
|
||||
[dev-dependencies]
|
||||
tokio = { workspace = true }
|
||||
@@ -1,258 +0,0 @@
|
||||
//! Rich application-layer message format for MLS application payloads.
|
||||
//!
|
||||
//! The server sees only opaque ciphertext; structure lives in this client-defined
|
||||
//! plaintext schema. All messages use: version byte (1) + message_type byte + type-specific payload.
|
||||
//!
|
||||
//! # Message ID
|
||||
//!
|
||||
//! `message_id` is assigned by the sender (16 random bytes) and included in the
|
||||
//! serialized payload for Chat (and implied for Reply/Reaction/ReadReceipt via ref_msg_id).
|
||||
//! Recipients can store message_ids to reference them in replies or reactions.
|
||||
|
||||
use crate::error::CoreError;
|
||||
use rand::RngCore;
|
||||
|
||||
/// Current schema version.
|
||||
pub const VERSION: u8 = 1;
|
||||
|
||||
/// Message type discriminant (one byte).
|
||||
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
|
||||
#[repr(u8)]
|
||||
pub enum MessageType {
|
||||
Chat = 0x01,
|
||||
Reply = 0x02,
|
||||
Reaction = 0x03,
|
||||
ReadReceipt = 0x04,
|
||||
Typing = 0x05,
|
||||
}
|
||||
|
||||
impl MessageType {
|
||||
fn from_byte(b: u8) -> Option<Self> {
|
||||
match b {
|
||||
0x01 => Some(MessageType::Chat),
|
||||
0x02 => Some(MessageType::Reply),
|
||||
0x03 => Some(MessageType::Reaction),
|
||||
0x04 => Some(MessageType::ReadReceipt),
|
||||
0x05 => Some(MessageType::Typing),
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Parsed application message (one of the rich types).
|
||||
#[derive(Clone, Debug, PartialEq, Eq)]
|
||||
pub enum AppMessage {
|
||||
/// Plain chat: body (UTF-8). message_id is included so recipients can store and reference it.
|
||||
Chat {
|
||||
message_id: [u8; 16],
|
||||
body: Vec<u8>,
|
||||
},
|
||||
Reply {
|
||||
ref_msg_id: [u8; 16],
|
||||
body: Vec<u8>,
|
||||
},
|
||||
Reaction {
|
||||
ref_msg_id: [u8; 16],
|
||||
emoji: Vec<u8>,
|
||||
},
|
||||
ReadReceipt {
|
||||
msg_id: [u8; 16],
|
||||
},
|
||||
Typing {
|
||||
/// 0 = stopped, 1 = typing
|
||||
active: u8,
|
||||
},
|
||||
}
|
||||
|
||||
/// Generate a new 16-byte message ID (e.g. for Chat/Reply so recipients can reference it).
|
||||
pub fn generate_message_id() -> [u8; 16] {
|
||||
let mut id = [0u8; 16];
|
||||
rand::rngs::OsRng.fill_bytes(&mut id);
|
||||
id
|
||||
}
|
||||
|
||||
// ── Layout (minimal, no Cap'n Proto) ─────────────────────────────────────────
|
||||
//
|
||||
// All messages: [version: 1][type: 1][payload...]
|
||||
//
|
||||
// Chat: [msg_id: 16][body_len: 2 BE][body]
|
||||
// Reply: [ref_msg_id: 16][body_len: 2 BE][body]
|
||||
// Reaction: [ref_msg_id: 16][emoji_len: 1][emoji]
|
||||
// ReadReceipt: [msg_id: 16]
|
||||
// Typing: [active: 1] 0 = stopped, 1 = typing
|
||||
|
||||
/// Serialize a rich message into the application payload format.
|
||||
pub fn serialize(msg_type: MessageType, payload: &[u8]) -> Vec<u8> {
|
||||
let mut out = Vec::with_capacity(2 + payload.len());
|
||||
out.push(VERSION);
|
||||
out.push(msg_type as u8);
|
||||
out.extend_from_slice(payload);
|
||||
out
|
||||
}
|
||||
|
||||
/// Serialize a Chat message (generates message_id internally; pass None to generate, or Some(id) when replying with a known id).
|
||||
pub fn serialize_chat(body: &[u8], message_id: Option<[u8; 16]>) -> Vec<u8> {
|
||||
let id = message_id.unwrap_or_else(generate_message_id);
|
||||
let mut payload = Vec::with_capacity(16 + 2 + body.len());
|
||||
payload.extend_from_slice(&id);
|
||||
payload.extend_from_slice(&(body.len() as u16).to_be_bytes());
|
||||
payload.extend_from_slice(body);
|
||||
serialize(MessageType::Chat, &payload)
|
||||
}
|
||||
|
||||
/// Serialize a Reply message.
|
||||
pub fn serialize_reply(ref_msg_id: [u8; 16], body: &[u8]) -> Vec<u8> {
|
||||
let mut payload = Vec::with_capacity(16 + 2 + body.len());
|
||||
payload.extend_from_slice(&ref_msg_id);
|
||||
payload.extend_from_slice(&(body.len() as u16).to_be_bytes());
|
||||
payload.extend_from_slice(body);
|
||||
serialize(MessageType::Reply, &payload)
|
||||
}
|
||||
|
||||
/// Serialize a Reaction message.
|
||||
pub fn serialize_reaction(ref_msg_id: [u8; 16], emoji: &[u8]) -> Result<Vec<u8>, CoreError> {
|
||||
if emoji.len() > 255 {
|
||||
return Err(CoreError::AppMessage("emoji length > 255".into()));
|
||||
}
|
||||
let mut payload = Vec::with_capacity(16 + 1 + emoji.len());
|
||||
payload.extend_from_slice(&ref_msg_id);
|
||||
payload.push(emoji.len() as u8);
|
||||
payload.extend_from_slice(emoji);
|
||||
Ok(serialize(MessageType::Reaction, &payload))
|
||||
}
|
||||
|
||||
/// Serialize a ReadReceipt message.
|
||||
pub fn serialize_read_receipt(msg_id: [u8; 16]) -> Vec<u8> {
|
||||
serialize(MessageType::ReadReceipt, &msg_id)
|
||||
}
|
||||
|
||||
/// Serialize a Typing message (active: 0 = stopped, 1 = typing).
|
||||
pub fn serialize_typing(active: u8) -> Vec<u8> {
|
||||
let payload = [active];
|
||||
serialize(MessageType::Typing, &payload)
|
||||
}
|
||||
|
||||
/// Parse bytes into (MessageType, AppMessage). Fails if version/type unknown or payload too short.
|
||||
pub fn parse(bytes: &[u8]) -> Result<(MessageType, AppMessage), CoreError> {
|
||||
if bytes.len() < 2 {
|
||||
return Err(CoreError::AppMessage("payload too short (need version + type)".into()));
|
||||
}
|
||||
let version = bytes[0];
|
||||
if version != VERSION {
|
||||
return Err(CoreError::AppMessage(format!("unsupported version {version}").into()));
|
||||
}
|
||||
let msg_type = MessageType::from_byte(bytes[1])
|
||||
.ok_or_else(|| CoreError::AppMessage(format!("unknown message type {}", bytes[1]).into()))?;
|
||||
let payload = &bytes[2..];
|
||||
|
||||
let app = match msg_type {
|
||||
MessageType::Chat => parse_chat(payload)?,
|
||||
MessageType::Reply => parse_reply(payload)?,
|
||||
MessageType::Reaction => parse_reaction(payload)?,
|
||||
MessageType::ReadReceipt => parse_read_receipt(payload)?,
|
||||
MessageType::Typing => parse_typing(payload)?,
|
||||
};
|
||||
Ok((msg_type, app))
|
||||
}
|
||||
|
||||
fn parse_chat(payload: &[u8]) -> Result<AppMessage, CoreError> {
|
||||
if payload.len() < 16 + 2 {
|
||||
return Err(CoreError::AppMessage("Chat payload too short".into()));
|
||||
}
|
||||
let mut message_id = [0u8; 16];
|
||||
message_id.copy_from_slice(&payload[..16]);
|
||||
let body_len = u16::from_be_bytes([payload[16], payload[17]]) as usize;
|
||||
if payload.len() < 18 + body_len {
|
||||
return Err(CoreError::AppMessage("Chat body length exceeds payload".into()));
|
||||
}
|
||||
let body = payload[18..18 + body_len].to_vec();
|
||||
Ok(AppMessage::Chat { message_id, body })
|
||||
}
|
||||
|
||||
fn parse_reply(payload: &[u8]) -> Result<AppMessage, CoreError> {
|
||||
if payload.len() < 16 + 2 {
|
||||
return Err(CoreError::AppMessage("Reply payload too short".into()));
|
||||
}
|
||||
let mut ref_msg_id = [0u8; 16];
|
||||
ref_msg_id.copy_from_slice(&payload[..16]);
|
||||
let body_len = u16::from_be_bytes([payload[16], payload[17]]) as usize;
|
||||
if payload.len() < 18 + body_len {
|
||||
return Err(CoreError::AppMessage("Reply body length exceeds payload".into()));
|
||||
}
|
||||
let body = payload[18..18 + body_len].to_vec();
|
||||
Ok(AppMessage::Reply { ref_msg_id, body })
|
||||
}
|
||||
|
||||
fn parse_reaction(payload: &[u8]) -> Result<AppMessage, CoreError> {
|
||||
if payload.len() < 16 + 1 {
|
||||
return Err(CoreError::AppMessage("Reaction payload too short".into()));
|
||||
}
|
||||
let mut ref_msg_id = [0u8; 16];
|
||||
ref_msg_id.copy_from_slice(&payload[..16]);
|
||||
let emoji_len = payload[16] as usize;
|
||||
if payload.len() < 17 + emoji_len {
|
||||
return Err(CoreError::AppMessage("Reaction emoji length exceeds payload".into()));
|
||||
}
|
||||
let emoji = payload[17..17 + emoji_len].to_vec();
|
||||
Ok(AppMessage::Reaction { ref_msg_id, emoji })
|
||||
}
|
||||
|
||||
fn parse_read_receipt(payload: &[u8]) -> Result<AppMessage, CoreError> {
|
||||
if payload.len() < 16 {
|
||||
return Err(CoreError::AppMessage("ReadReceipt payload too short".into()));
|
||||
}
|
||||
let mut msg_id = [0u8; 16];
|
||||
msg_id.copy_from_slice(&payload[..16]);
|
||||
Ok(AppMessage::ReadReceipt { msg_id })
|
||||
}
|
||||
|
||||
fn parse_typing(payload: &[u8]) -> Result<AppMessage, CoreError> {
|
||||
if payload.is_empty() {
|
||||
return Err(CoreError::AppMessage("Typing payload empty".into()));
|
||||
}
|
||||
Ok(AppMessage::Typing { active: payload[0] })
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn roundtrip_chat() {
|
||||
let body = b"hello";
|
||||
let encoded = serialize_chat(body, None);
|
||||
let (t, msg) = parse(&encoded).unwrap();
|
||||
assert_eq!(t, MessageType::Chat);
|
||||
match &msg {
|
||||
AppMessage::Chat { message_id: _, body: b } => assert_eq!(b.as_slice(), body),
|
||||
_ => panic!("expected Chat"),
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn roundtrip_reply() {
|
||||
let ref_id = [1u8; 16];
|
||||
let body = b"reply text";
|
||||
let encoded = serialize_reply(ref_id, body);
|
||||
let (t, msg) = parse(&encoded).unwrap();
|
||||
assert_eq!(t, MessageType::Reply);
|
||||
match &msg {
|
||||
AppMessage::Reply { ref_msg_id, body: b } => {
|
||||
assert_eq!(ref_msg_id, &ref_id);
|
||||
assert_eq!(b.as_slice(), body);
|
||||
}
|
||||
_ => panic!("expected Reply"),
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn roundtrip_typing() {
|
||||
let encoded = serialize_typing(1);
|
||||
let (t, msg) = parse(&encoded).unwrap();
|
||||
assert_eq!(t, MessageType::Typing);
|
||||
match &msg {
|
||||
AppMessage::Typing { active } => assert_eq!(*active, 1),
|
||||
_ => panic!("expected Typing"),
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,515 +0,0 @@
|
||||
//! MLS group state machine.
|
||||
//!
|
||||
//! # Design
|
||||
//!
|
||||
//! [`GroupMember`] wraps an openmls [`MlsGroup`] plus the per-client
|
||||
//! [`StoreCrypto`] backend. The backend is **persistent** — it holds the
|
||||
//! in-memory key store that maps init-key references to HPKE private keys.
|
||||
//! openmls's `new_from_welcome` reads those private keys from the key store to
|
||||
//! decrypt the Welcome, so the same backend instance must be used from
|
||||
//! `generate_key_package` through `join_group`.
|
||||
//!
|
||||
//! # Wire format
|
||||
//!
|
||||
//! All MLS messages are serialised/deserialised using TLS presentation language
|
||||
//! encoding (`tls_codec`). The resulting byte vectors are what the transport
|
||||
//! layer (and the Delivery Service) sees.
|
||||
//!
|
||||
//! # MLS ciphersuite
|
||||
//!
|
||||
//! `MLS_128_DHKEMX25519_AES128GCM_SHA256_Ed25519` — same as M2.
|
||||
//!
|
||||
//! # Ratchet tree
|
||||
//!
|
||||
//! `use_ratchet_tree_extension = true` so that the ratchet tree is embedded
|
||||
//! in Welcome messages. `new_from_welcome` is called with `ratchet_tree = None`;
|
||||
//! openmls extracts the tree from the Welcome's `GroupInfo` extension.
|
||||
|
||||
use std::{path::Path, sync::Arc};
|
||||
|
||||
use openmls::prelude::{
|
||||
Ciphersuite, Credential, CredentialType, CredentialWithKey, CryptoConfig, GroupId, KeyPackage,
|
||||
KeyPackageIn, MlsGroup, MlsGroupConfig, MlsMessageInBody, MlsMessageOut,
|
||||
ProcessedMessageContent, ProtocolMessage, ProtocolVersion, TlsDeserializeTrait,
|
||||
TlsSerializeTrait,
|
||||
};
|
||||
use openmls_traits::OpenMlsCryptoProvider;
|
||||
|
||||
use crate::{
|
||||
error::CoreError,
|
||||
identity::IdentityKeypair,
|
||||
keystore::{DiskKeyStore, StoreCrypto},
|
||||
};
|
||||
|
||||
// ── Constants ─────────────────────────────────────────────────────────────────
|
||||
|
||||
const CIPHERSUITE: Ciphersuite = Ciphersuite::MLS_128_DHKEMX25519_AES128GCM_SHA256_Ed25519;
|
||||
|
||||
// ── GroupMember ───────────────────────────────────────────────────────────────
|
||||
|
||||
/// Per-client MLS state: identity keypair, crypto backend, and optional group.
|
||||
///
|
||||
/// # Lifecycle
|
||||
///
|
||||
/// ```text
|
||||
/// GroupMember::new(identity)
|
||||
/// ├─ generate_key_package() → upload to AS
|
||||
/// ├─ create_group(group_id) → become sole member
|
||||
/// │ └─ add_member(kp) → invite a peer; returns (commit, welcome)
|
||||
/// └─ join_group(welcome) → join after receiving a Welcome
|
||||
/// ├─ send_message(msg) → encrypt application data
|
||||
/// └─ receive_message(b) → decrypt; returns Some(plaintext) or None
|
||||
/// ```
|
||||
pub struct GroupMember {
|
||||
/// Persistent crypto backend. Holds the in-memory key store with HPKE
|
||||
/// private keys created during `generate_key_package`.
|
||||
backend: StoreCrypto,
|
||||
/// Long-term Ed25519 identity keypair. Also used as the MLS `Signer`.
|
||||
identity: Arc<IdentityKeypair>,
|
||||
/// Active MLS group, if any.
|
||||
group: Option<MlsGroup>,
|
||||
/// Shared group configuration (wire format, ratchet tree extension, etc.).
|
||||
config: MlsGroupConfig,
|
||||
}
|
||||
|
||||
impl GroupMember {
|
||||
/// Create a new `GroupMember` with a fresh crypto backend.
|
||||
pub fn new(identity: Arc<IdentityKeypair>) -> Self {
|
||||
Self::new_with_state(identity, DiskKeyStore::ephemeral(), None)
|
||||
}
|
||||
|
||||
/// Create a `GroupMember` with a persistent keystore at `path`.
|
||||
pub fn new_persistent(
|
||||
identity: Arc<IdentityKeypair>,
|
||||
path: impl AsRef<Path>,
|
||||
) -> Result<Self, CoreError> {
|
||||
let key_store = DiskKeyStore::persistent(path)
|
||||
.map_err(|e| CoreError::Io(format!("keystore: {e}")))?;
|
||||
Ok(Self::new_with_state(identity, key_store, None))
|
||||
}
|
||||
|
||||
/// Create a `GroupMember` from pre-existing state (identity + optional group + store).
|
||||
pub fn new_with_state(
|
||||
identity: Arc<IdentityKeypair>,
|
||||
key_store: DiskKeyStore,
|
||||
group: Option<MlsGroup>,
|
||||
) -> Self {
|
||||
let config = MlsGroupConfig::builder()
|
||||
.use_ratchet_tree_extension(true)
|
||||
.build();
|
||||
|
||||
Self {
|
||||
backend: StoreCrypto::new(key_store),
|
||||
identity,
|
||||
group,
|
||||
config,
|
||||
}
|
||||
}
|
||||
|
||||
// ── KeyPackage ────────────────────────────────────────────────────────────
|
||||
|
||||
/// Generate a fresh single-use MLS KeyPackage.
|
||||
///
|
||||
/// The HPKE init private key is stored in `self.backend`'s key store.
|
||||
/// **The same `GroupMember` instance must later call `join_group`** so
|
||||
/// that `new_from_welcome` can retrieve the private key.
|
||||
///
|
||||
/// # Returns
|
||||
///
|
||||
/// TLS-encoded KeyPackage bytes, ready for upload to the Authentication
|
||||
/// Service.
|
||||
///
|
||||
/// # Errors
|
||||
///
|
||||
/// Returns [`CoreError::Mls`] if openmls fails to create the KeyPackage.
|
||||
pub fn generate_key_package(&mut self) -> Result<Vec<u8>, CoreError> {
|
||||
let credential_with_key = self.make_credential_with_key()?;
|
||||
|
||||
let key_package = KeyPackage::builder()
|
||||
.build(
|
||||
CryptoConfig::with_default_version(CIPHERSUITE),
|
||||
&self.backend,
|
||||
self.identity.as_ref(),
|
||||
credential_with_key,
|
||||
)
|
||||
.map_err(|e| CoreError::Mls(format!("{e:?}")))?;
|
||||
|
||||
key_package
|
||||
.tls_serialize_detached()
|
||||
.map_err(|e| CoreError::Mls(format!("{e:?}")))
|
||||
}
|
||||
|
||||
// ── Group creation ────────────────────────────────────────────────────────
|
||||
|
||||
/// Create a new MLS group with `group_id` as the group identifier.
|
||||
///
|
||||
/// The caller becomes the sole member (epoch 0). Use `add_member` to
|
||||
/// invite additional members.
|
||||
///
|
||||
/// `group_id` can be any non-empty byte string; SHA-256 of a human-readable
|
||||
/// name is a good choice.
|
||||
///
|
||||
/// # Errors
|
||||
///
|
||||
/// Returns [`CoreError::Mls`] if the group already exists or openmls fails.
|
||||
pub fn create_group(&mut self, group_id: &[u8]) -> Result<(), CoreError> {
|
||||
let credential_with_key = self.make_credential_with_key()?;
|
||||
let mls_id = GroupId::from_slice(group_id);
|
||||
|
||||
let group = MlsGroup::new_with_group_id(
|
||||
&self.backend,
|
||||
self.identity.as_ref(),
|
||||
&self.config,
|
||||
mls_id,
|
||||
credential_with_key,
|
||||
)
|
||||
.map_err(|e| CoreError::Mls(format!("{e:?}")))?;
|
||||
|
||||
self.group = Some(group);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// ── Membership ────────────────────────────────────────────────────────────
|
||||
|
||||
/// Add a new member by their TLS-encoded KeyPackage bytes.
|
||||
///
|
||||
/// Produces a Commit (to update existing members' state) and a Welcome
|
||||
/// (to bootstrap the new member). The caller is responsible for
|
||||
/// distributing these:
|
||||
///
|
||||
/// - Send `commit_bytes` to all **existing** group members via the DS.
|
||||
/// (In the 2-party case where the creator is the only member, this can
|
||||
/// be discarded — the creator applies it locally via this method.)
|
||||
/// - Send `welcome_bytes` to the **new** member via the DS.
|
||||
///
|
||||
/// This method also merges the pending Commit into the local group state
|
||||
/// (advancing the epoch), so the caller is immediately ready to encrypt.
|
||||
///
|
||||
/// # Returns
|
||||
///
|
||||
/// `(commit_bytes, welcome_bytes)` — both TLS-encoded MLS messages.
|
||||
///
|
||||
/// # Errors
|
||||
///
|
||||
/// Returns [`CoreError::Mls`] if the KeyPackage is malformed, no active
|
||||
/// group exists, or openmls fails.
|
||||
pub fn add_member(
|
||||
&mut self,
|
||||
mut key_package_bytes: &[u8],
|
||||
) -> Result<(Vec<u8>, Vec<u8>), CoreError> {
|
||||
let group = self
|
||||
.group
|
||||
.as_mut()
|
||||
.ok_or_else(|| CoreError::Mls("no active group".into()))?;
|
||||
|
||||
// Deserialise and validate the peer's KeyPackage. KeyPackage only derives
|
||||
// TlsSerialize; KeyPackageIn derives TlsDeserialize and provides validate()
|
||||
// which verifies the signature and returns a trusted KeyPackage.
|
||||
let key_package: KeyPackage =
|
||||
KeyPackageIn::tls_deserialize(&mut key_package_bytes)
|
||||
.map_err(|e| CoreError::Mls(format!("KeyPackage deserialise: {e:?}")))?
|
||||
.validate(self.backend.crypto(), ProtocolVersion::Mls10)
|
||||
.map_err(|e| CoreError::Mls(format!("KeyPackage validate: {e:?}")))?;
|
||||
|
||||
// Create the Commit + Welcome. The third return value (GroupInfo) is for
|
||||
// external commits and is not needed here.
|
||||
let (commit_out, welcome_out, _group_info) = group
|
||||
.add_members(&self.backend, self.identity.as_ref(), &[key_package])
|
||||
.map_err(|e| CoreError::Mls(format!("add_members: {e:?}")))?;
|
||||
|
||||
// Merge the pending Commit into our own state, advancing the epoch.
|
||||
group
|
||||
.merge_pending_commit(&self.backend)
|
||||
.map_err(|e| CoreError::Mls(format!("merge_pending_commit: {e:?}")))?;
|
||||
|
||||
let commit_bytes = commit_out
|
||||
.to_bytes()
|
||||
.map_err(|e| CoreError::Mls(format!("commit serialise: {e:?}")))?;
|
||||
let welcome_bytes = welcome_out
|
||||
.to_bytes()
|
||||
.map_err(|e| CoreError::Mls(format!("welcome serialise: {e:?}")))?;
|
||||
|
||||
Ok((commit_bytes, welcome_bytes))
|
||||
}
|
||||
|
||||
/// Join an existing MLS group from a TLS-encoded Welcome message.
|
||||
///
|
||||
/// The caller must have previously called [`generate_key_package`] on
|
||||
/// **this same instance** so that the HPKE init private key is in the
|
||||
/// backend's key store.
|
||||
///
|
||||
/// # Errors
|
||||
///
|
||||
/// Returns [`CoreError::Mls`] if the Welcome does not match any known
|
||||
/// KeyPackage, or openmls validation fails.
|
||||
///
|
||||
/// [`generate_key_package`]: Self::generate_key_package
|
||||
pub fn join_group(&mut self, mut welcome_bytes: &[u8]) -> Result<(), CoreError> {
|
||||
// Deserialise MlsMessageIn, then extract the inner Welcome.
|
||||
let msg_in = openmls::prelude::MlsMessageIn::tls_deserialize(&mut welcome_bytes)
|
||||
.map_err(|e| CoreError::Mls(format!("Welcome deserialise: {e:?}")))?;
|
||||
|
||||
// into_welcome() is feature-gated in openmls 0.5; extract() is public.
|
||||
let welcome = match msg_in.extract() {
|
||||
MlsMessageInBody::Welcome(w) => w,
|
||||
_ => return Err(CoreError::Mls("expected a Welcome message".into())),
|
||||
};
|
||||
|
||||
// ratchet_tree = None because use_ratchet_tree_extension = true embeds
|
||||
// the tree inside the Welcome's GroupInfo extension.
|
||||
let group = MlsGroup::new_from_welcome(&self.backend, &self.config, welcome, None)
|
||||
.map_err(|e| CoreError::Mls(format!("new_from_welcome: {e:?}")))?;
|
||||
|
||||
self.group = Some(group);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// ── Application messages ──────────────────────────────────────────────────
|
||||
|
||||
/// Encrypt `plaintext` as an MLS Application message.
|
||||
///
|
||||
/// # Returns
|
||||
///
|
||||
/// TLS-encoded `MlsMessageOut` bytes (PrivateMessage variant).
|
||||
///
|
||||
/// # Errors
|
||||
///
|
||||
/// Returns [`CoreError::Mls`] if there is no active group or encryption fails.
|
||||
pub fn send_message(&mut self, plaintext: &[u8]) -> Result<Vec<u8>, CoreError> {
|
||||
let group = self
|
||||
.group
|
||||
.as_mut()
|
||||
.ok_or_else(|| CoreError::Mls("no active group".into()))?;
|
||||
|
||||
let mls_msg: MlsMessageOut = group
|
||||
.create_message(&self.backend, self.identity.as_ref(), plaintext)
|
||||
.map_err(|e| CoreError::Mls(format!("create_message: {e:?}")))?;
|
||||
|
||||
mls_msg
|
||||
.to_bytes()
|
||||
.map_err(|e| CoreError::Mls(format!("message serialise: {e:?}")))
|
||||
}
|
||||
|
||||
/// Process an incoming TLS-encoded MLS message.
|
||||
///
|
||||
/// # Returns
|
||||
///
|
||||
/// - `Ok(Some(plaintext))` for Application messages.
|
||||
/// - `Ok(None)` for Commit messages (group state is updated internally).
|
||||
///
|
||||
/// # Errors
|
||||
///
|
||||
/// Returns [`CoreError::Mls`] if the message is malformed, fails
|
||||
/// authentication, or the group state is inconsistent.
|
||||
pub fn receive_message(&mut self, mut bytes: &[u8]) -> Result<Option<Vec<u8>>, CoreError> {
|
||||
let group = self
|
||||
.group
|
||||
.as_mut()
|
||||
.ok_or_else(|| CoreError::Mls("no active group".into()))?;
|
||||
|
||||
let msg_in = openmls::prelude::MlsMessageIn::tls_deserialize(&mut bytes)
|
||||
.map_err(|e| CoreError::Mls(format!("message deserialise: {e:?}")))?;
|
||||
|
||||
// into_protocol_message() is feature-gated; extract() + manual construction is not.
|
||||
let protocol_message = match msg_in.extract() {
|
||||
MlsMessageInBody::PrivateMessage(m) => ProtocolMessage::PrivateMessage(m),
|
||||
MlsMessageInBody::PublicMessage(m) => ProtocolMessage::PublicMessage(m),
|
||||
_ => return Err(CoreError::Mls("not a protocol message".into())),
|
||||
};
|
||||
|
||||
let processed = group
|
||||
.process_message(&self.backend, protocol_message)
|
||||
.map_err(|e| CoreError::Mls(format!("process_message: {e:?}")))?;
|
||||
|
||||
match processed.into_content() {
|
||||
ProcessedMessageContent::ApplicationMessage(app) => Ok(Some(app.into_bytes())),
|
||||
ProcessedMessageContent::StagedCommitMessage(staged) => {
|
||||
// Merge the Commit into the local state (epoch advances).
|
||||
group
|
||||
.merge_staged_commit(&self.backend, *staged)
|
||||
.map_err(|e| CoreError::Mls(format!("merge_staged_commit: {e:?}")))?;
|
||||
Ok(None)
|
||||
}
|
||||
// Proposals are stored for a later Commit; nothing to return yet.
|
||||
ProcessedMessageContent::ProposalMessage(proposal) => {
|
||||
group.store_pending_proposal(*proposal);
|
||||
Ok(None)
|
||||
}
|
||||
ProcessedMessageContent::ExternalJoinProposalMessage(proposal) => {
|
||||
group.store_pending_proposal(*proposal);
|
||||
Ok(None)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Process an incoming TLS-encoded MLS message and return sender identity + plaintext for application messages.
|
||||
///
|
||||
/// Same as [`receive_message`], but for Application messages returns
|
||||
/// `Some((sender_identity_bytes, plaintext))` so the client can display who sent the message.
|
||||
/// `sender_identity_bytes` is the MLS credential identity (e.g. Ed25519 public key for Basic credential).
|
||||
///
|
||||
/// Returns `Ok(None)` for Commit and Proposal messages (group state is updated internally).
|
||||
pub fn receive_message_with_sender(
|
||||
&mut self,
|
||||
mut bytes: &[u8],
|
||||
) -> Result<Option<(Vec<u8>, Vec<u8>)>, CoreError> {
|
||||
let group = self
|
||||
.group
|
||||
.as_mut()
|
||||
.ok_or_else(|| CoreError::Mls("no active group".into()))?;
|
||||
|
||||
let msg_in = openmls::prelude::MlsMessageIn::tls_deserialize(&mut bytes)
|
||||
.map_err(|e| CoreError::Mls(format!("message deserialise: {e:?}")))?;
|
||||
|
||||
let protocol_message = match msg_in.extract() {
|
||||
MlsMessageInBody::PrivateMessage(m) => ProtocolMessage::PrivateMessage(m),
|
||||
MlsMessageInBody::PublicMessage(m) => ProtocolMessage::PublicMessage(m),
|
||||
_ => return Err(CoreError::Mls("not a protocol message".into())),
|
||||
};
|
||||
|
||||
let processed = group
|
||||
.process_message(&self.backend, protocol_message)
|
||||
.map_err(|e| CoreError::Mls(format!("process_message: {e:?}")))?;
|
||||
|
||||
let sender_identity = processed.credential().identity().to_vec();
|
||||
|
||||
match processed.into_content() {
|
||||
ProcessedMessageContent::ApplicationMessage(app) => {
|
||||
Ok(Some((sender_identity, app.into_bytes())))
|
||||
}
|
||||
ProcessedMessageContent::StagedCommitMessage(staged) => {
|
||||
group
|
||||
.merge_staged_commit(&self.backend, *staged)
|
||||
.map_err(|e| CoreError::Mls(format!("merge_staged_commit: {e:?}")))?;
|
||||
Ok(None)
|
||||
}
|
||||
ProcessedMessageContent::ProposalMessage(proposal) => {
|
||||
group.store_pending_proposal(*proposal);
|
||||
Ok(None)
|
||||
}
|
||||
ProcessedMessageContent::ExternalJoinProposalMessage(proposal) => {
|
||||
group.store_pending_proposal(*proposal);
|
||||
Ok(None)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// ── Accessors ─────────────────────────────────────────────────────────────
|
||||
|
||||
/// Return the MLS group ID bytes, or `None` if no group is active.
|
||||
pub fn group_id(&self) -> Option<Vec<u8>> {
|
||||
self.group
|
||||
.as_ref()
|
||||
.map(|g| g.group_id().as_slice().to_vec())
|
||||
}
|
||||
|
||||
/// Return a reference to the identity keypair.
|
||||
pub fn identity(&self) -> &IdentityKeypair {
|
||||
&self.identity
|
||||
}
|
||||
|
||||
/// Return the private seed of the identity (for persistence).
|
||||
pub fn identity_seed(&self) -> [u8; 32] {
|
||||
self.identity.seed_bytes()
|
||||
}
|
||||
|
||||
/// Return a reference to the underlying crypto backend.
|
||||
pub fn backend(&self) -> &StoreCrypto {
|
||||
&self.backend
|
||||
}
|
||||
|
||||
/// Return a reference to the MLS group, if active.
|
||||
pub fn group_ref(&self) -> Option<&MlsGroup> {
|
||||
self.group.as_ref()
|
||||
}
|
||||
|
||||
/// Return the identity (credential) bytes of all current group members.
|
||||
///
|
||||
/// Each entry is the raw credential payload (Ed25519 public key bytes)
|
||||
/// extracted from the member's MLS leaf node.
|
||||
pub fn member_identities(&self) -> Vec<Vec<u8>> {
|
||||
let group = match self.group.as_ref() {
|
||||
Some(g) => g,
|
||||
None => return Vec::new(),
|
||||
};
|
||||
group
|
||||
.members()
|
||||
.map(|m| m.credential.identity().to_vec())
|
||||
.collect()
|
||||
}
|
||||
|
||||
// ── Private helpers ───────────────────────────────────────────────────────
|
||||
|
||||
fn make_credential_with_key(&self) -> Result<CredentialWithKey, CoreError> {
|
||||
let credential = Credential::new(
|
||||
self.identity.public_key_bytes().to_vec(),
|
||||
CredentialType::Basic,
|
||||
)
|
||||
.map_err(|e| CoreError::Mls(format!("{e:?}")))?;
|
||||
|
||||
Ok(CredentialWithKey {
|
||||
credential,
|
||||
signature_key: self.identity.public_key_bytes().to_vec().into(),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// ── Unit tests ────────────────────────────────────────────────────────────────
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
/// Full two-party MLS round-trip: creator creates group, adds joiner, then they exchange messages.
|
||||
#[test]
|
||||
fn two_party_mls_round_trip() {
|
||||
let creator_id = Arc::new(IdentityKeypair::generate());
|
||||
let joiner_id = Arc::new(IdentityKeypair::generate());
|
||||
|
||||
let mut creator = GroupMember::new(Arc::clone(&creator_id));
|
||||
let mut joiner = GroupMember::new(Arc::clone(&joiner_id));
|
||||
|
||||
let joiner_kp = joiner
|
||||
.generate_key_package()
|
||||
.expect("joiner KeyPackage");
|
||||
|
||||
creator
|
||||
.create_group(b"test-group-m3")
|
||||
.expect("creator create group");
|
||||
|
||||
let (_, welcome) = creator
|
||||
.add_member(&joiner_kp)
|
||||
.expect("creator add joiner");
|
||||
|
||||
joiner.join_group(&welcome).expect("joiner join group");
|
||||
|
||||
let ct_creator = creator.send_message(b"hello").expect("creator send");
|
||||
let pt_joiner = joiner
|
||||
.receive_message(&ct_creator)
|
||||
.expect("joiner recv")
|
||||
.expect("application message");
|
||||
assert_eq!(pt_joiner, b"hello");
|
||||
|
||||
let ct_joiner = joiner.send_message(b"hello back").expect("joiner send");
|
||||
let pt_creator = creator
|
||||
.receive_message(&ct_joiner)
|
||||
.expect("creator recv")
|
||||
.expect("application message");
|
||||
assert_eq!(pt_creator, b"hello back");
|
||||
}
|
||||
|
||||
/// `group_id()` returns None before create_group, Some afterwards.
|
||||
#[test]
|
||||
fn group_id_lifecycle() {
|
||||
let id = Arc::new(IdentityKeypair::generate());
|
||||
let mut member = GroupMember::new(id);
|
||||
|
||||
assert!(member.group_id().is_none(), "no group before create");
|
||||
member.create_group(b"gid").unwrap();
|
||||
assert_eq!(
|
||||
member.group_id().unwrap(),
|
||||
b"gid".as_slice(),
|
||||
"group_id must match what was passed"
|
||||
);
|
||||
}
|
||||
}
|
||||
@@ -1,135 +0,0 @@
|
||||
//! Ed25519 identity keypair for MLS credentials and AS registration.
|
||||
//!
|
||||
//! The [`IdentityKeypair`] is the long-term identity key embedded in MLS
|
||||
//! `BasicCredential`s. It is used for signing MLS messages and as the
|
||||
//! indexing key for the Authentication Service.
|
||||
//!
|
||||
//! # Zeroize
|
||||
//!
|
||||
//! The 32-byte private seed is stored as `Zeroizing<[u8; 32]>`, which zeroes
|
||||
//! the bytes on drop. `[u8; 32]` is `Copy + Default` and satisfies zeroize's
|
||||
//! `DefaultIsZeroes` constraint, avoiding a conflict with ed25519-dalek's
|
||||
//! `SigningKey` zeroize impl.
|
||||
//!
|
||||
//! # Fingerprint
|
||||
//!
|
||||
//! A 32-byte SHA-256 digest of the raw public key bytes is used as a compact,
|
||||
//! collision-resistant identifier for logging.
|
||||
|
||||
use ed25519_dalek::{Signer as DalekSigner, SigningKey, VerifyingKey};
|
||||
use openmls_traits::signatures::Signer;
|
||||
use openmls_traits::types::{Error as MlsError, SignatureScheme};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use sha2::{Digest, Sha256};
|
||||
use zeroize::Zeroizing;
|
||||
|
||||
/// An Ed25519 identity keypair.
|
||||
///
|
||||
/// Created with [`IdentityKeypair::generate`]. The private signing key seed
|
||||
/// is zeroed when this struct is dropped.
|
||||
pub struct IdentityKeypair {
|
||||
/// Raw 32-byte private seed — zeroized on drop.
|
||||
///
|
||||
/// Stored as bytes rather than `SigningKey` to satisfy zeroize's
|
||||
/// `DefaultIsZeroes` bound on `Zeroizing<T>`.
|
||||
seed: Zeroizing<[u8; 32]>,
|
||||
/// Corresponding 32-byte public verifying key.
|
||||
verifying: VerifyingKey,
|
||||
}
|
||||
|
||||
impl IdentityKeypair {
|
||||
/// Recreate an identity keypair from a 32-byte seed.
|
||||
pub fn from_seed(seed: [u8; 32]) -> Self {
|
||||
let signing = SigningKey::from_bytes(&seed);
|
||||
let verifying = signing.verifying_key();
|
||||
Self {
|
||||
seed: Zeroizing::new(seed),
|
||||
verifying,
|
||||
}
|
||||
}
|
||||
|
||||
/// Return the raw 32-byte private seed (for persistence).
|
||||
pub fn seed_bytes(&self) -> [u8; 32] {
|
||||
*self.seed
|
||||
}
|
||||
}
|
||||
|
||||
impl IdentityKeypair {
|
||||
/// Generate a fresh random Ed25519 identity keypair.
|
||||
pub fn generate() -> Self {
|
||||
use rand::rngs::OsRng;
|
||||
let signing = SigningKey::generate(&mut OsRng);
|
||||
let verifying = signing.verifying_key();
|
||||
let seed = Zeroizing::new(signing.to_bytes());
|
||||
Self { seed, verifying }
|
||||
}
|
||||
|
||||
/// Return the raw 32-byte Ed25519 public key.
|
||||
///
|
||||
/// This is the byte array used as `identityKey` in `auth.capnp` calls.
|
||||
pub fn public_key_bytes(&self) -> [u8; 32] {
|
||||
self.verifying.to_bytes()
|
||||
}
|
||||
|
||||
/// Return the SHA-256 fingerprint of the public key (32 bytes).
|
||||
pub fn fingerprint(&self) -> [u8; 32] {
|
||||
let mut hasher = Sha256::new();
|
||||
hasher.update(self.verifying.to_bytes());
|
||||
hasher.finalize().into()
|
||||
}
|
||||
|
||||
/// Reconstruct the `SigningKey` from the stored seed bytes.
|
||||
fn signing_key(&self) -> SigningKey {
|
||||
SigningKey::from_bytes(&self.seed)
|
||||
}
|
||||
}
|
||||
|
||||
/// Implement the openmls `Signer` trait so `IdentityKeypair` can be passed
|
||||
/// directly to `KeyPackage::builder().build(...)` without needing the external
|
||||
/// `openmls_basic_credential` crate.
|
||||
impl Signer for IdentityKeypair {
|
||||
fn sign(&self, payload: &[u8]) -> Result<Vec<u8>, MlsError> {
|
||||
let sk = self.signing_key();
|
||||
let sig: ed25519_dalek::Signature = sk.sign(payload);
|
||||
Ok(sig.to_bytes().to_vec())
|
||||
}
|
||||
|
||||
fn signature_scheme(&self) -> SignatureScheme {
|
||||
SignatureScheme::ED25519
|
||||
}
|
||||
}
|
||||
|
||||
impl Serialize for IdentityKeypair {
|
||||
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
|
||||
where
|
||||
S: serde::Serializer,
|
||||
{
|
||||
serializer.serialize_bytes(&self.seed[..])
|
||||
}
|
||||
}
|
||||
|
||||
impl<'de> Deserialize<'de> for IdentityKeypair {
|
||||
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
|
||||
where
|
||||
D: serde::Deserializer<'de>,
|
||||
{
|
||||
let bytes: Vec<u8> = serde::Deserialize::deserialize(deserializer)?;
|
||||
let seed: [u8; 32] = bytes
|
||||
.as_slice()
|
||||
.try_into()
|
||||
.map_err(|_| serde::de::Error::custom("identity seed must be 32 bytes"))?;
|
||||
Ok(IdentityKeypair::from_seed(seed))
|
||||
}
|
||||
}
|
||||
|
||||
impl std::fmt::Debug for IdentityKeypair {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
let fp = self.fingerprint();
|
||||
f.debug_struct("IdentityKeypair")
|
||||
.field(
|
||||
"fingerprint",
|
||||
&format!("{:02x}{:02x}{:02x}{:02x}…", fp[0], fp[1], fp[2], fp[3]),
|
||||
)
|
||||
.finish_non_exhaustive()
|
||||
}
|
||||
}
|
||||
@@ -1,144 +0,0 @@
|
||||
use std::{
|
||||
collections::HashMap,
|
||||
fs,
|
||||
path::{Path, PathBuf},
|
||||
sync::RwLock,
|
||||
};
|
||||
|
||||
use openmls_rust_crypto::RustCrypto;
|
||||
use openmls_traits::{
|
||||
key_store::{MlsEntity, OpenMlsKeyStore},
|
||||
OpenMlsCryptoProvider,
|
||||
};
|
||||
|
||||
/// A disk-backed key store implementing `OpenMlsKeyStore`.
|
||||
///
|
||||
/// In-memory when `path` is `None`; otherwise flushes the entire map to disk on
|
||||
/// every store/delete so HPKE init keys survive process restarts.
|
||||
#[derive(Debug)]
|
||||
pub struct DiskKeyStore {
|
||||
path: Option<PathBuf>,
|
||||
values: RwLock<HashMap<Vec<u8>, Vec<u8>>>,
|
||||
}
|
||||
|
||||
#[derive(thiserror::Error, Debug, PartialEq, Eq)]
|
||||
pub enum DiskKeyStoreError {
|
||||
#[error("serialization error")]
|
||||
Serialization,
|
||||
#[error("io error: {0}")]
|
||||
Io(String),
|
||||
}
|
||||
|
||||
impl DiskKeyStore {
|
||||
/// In-memory keystore (no persistence).
|
||||
pub fn ephemeral() -> Self {
|
||||
Self {
|
||||
path: None,
|
||||
values: RwLock::new(HashMap::new()),
|
||||
}
|
||||
}
|
||||
|
||||
/// Persistent keystore backed by `path`. Creates an empty store if missing.
|
||||
pub fn persistent(path: impl AsRef<Path>) -> Result<Self, DiskKeyStoreError> {
|
||||
let path = path.as_ref().to_path_buf();
|
||||
let values = if path.exists() {
|
||||
let bytes = fs::read(&path).map_err(|e| DiskKeyStoreError::Io(e.to_string()))?;
|
||||
if bytes.is_empty() {
|
||||
HashMap::new()
|
||||
} else {
|
||||
bincode::deserialize(&bytes).map_err(|_| DiskKeyStoreError::Serialization)?
|
||||
}
|
||||
} else {
|
||||
HashMap::new()
|
||||
};
|
||||
|
||||
Ok(Self {
|
||||
path: Some(path),
|
||||
values: RwLock::new(values),
|
||||
})
|
||||
}
|
||||
|
||||
fn flush(&self) -> Result<(), DiskKeyStoreError> {
|
||||
let Some(path) = &self.path else {
|
||||
return Ok(());
|
||||
};
|
||||
let values = self.values.read().unwrap();
|
||||
let bytes = bincode::serialize(&*values).map_err(|_| DiskKeyStoreError::Serialization)?;
|
||||
if let Some(parent) = path.parent() {
|
||||
fs::create_dir_all(parent).map_err(|e| DiskKeyStoreError::Io(e.to_string()))?;
|
||||
}
|
||||
fs::write(path, bytes).map_err(|e| DiskKeyStoreError::Io(e.to_string()))
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for DiskKeyStore {
|
||||
fn default() -> Self {
|
||||
Self::ephemeral()
|
||||
}
|
||||
}
|
||||
|
||||
impl OpenMlsKeyStore for DiskKeyStore {
|
||||
type Error = DiskKeyStoreError;
|
||||
|
||||
fn store<V: MlsEntity>(&self, k: &[u8], v: &V) -> Result<(), Self::Error> {
|
||||
let value = serde_json::to_vec(v).map_err(|_| DiskKeyStoreError::Serialization)?;
|
||||
let mut values = self.values.write().unwrap();
|
||||
values.insert(k.to_vec(), value);
|
||||
drop(values);
|
||||
self.flush()
|
||||
}
|
||||
|
||||
fn read<V: MlsEntity>(&self, k: &[u8]) -> Option<V> {
|
||||
let values = self.values.read().unwrap();
|
||||
values
|
||||
.get(k)
|
||||
.and_then(|bytes| serde_json::from_slice(bytes).ok())
|
||||
}
|
||||
|
||||
fn delete<V: MlsEntity>(&self, k: &[u8]) -> Result<(), Self::Error> {
|
||||
let mut values = self.values.write().unwrap();
|
||||
values.remove(k);
|
||||
drop(values);
|
||||
self.flush()
|
||||
}
|
||||
}
|
||||
|
||||
/// Crypto provider that couples RustCrypto with a disk-backed key store.
|
||||
#[derive(Debug)]
|
||||
pub struct StoreCrypto {
|
||||
crypto: RustCrypto,
|
||||
key_store: DiskKeyStore,
|
||||
}
|
||||
|
||||
impl StoreCrypto {
|
||||
pub fn new(key_store: DiskKeyStore) -> Self {
|
||||
Self {
|
||||
crypto: RustCrypto::default(),
|
||||
key_store,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for StoreCrypto {
|
||||
fn default() -> Self {
|
||||
Self::new(DiskKeyStore::ephemeral())
|
||||
}
|
||||
}
|
||||
|
||||
impl OpenMlsCryptoProvider for StoreCrypto {
|
||||
type CryptoProvider = RustCrypto;
|
||||
type RandProvider = RustCrypto;
|
||||
type KeyStoreProvider = DiskKeyStore;
|
||||
|
||||
fn crypto(&self) -> &Self::CryptoProvider {
|
||||
&self.crypto
|
||||
}
|
||||
|
||||
fn rand(&self) -> &Self::RandProvider {
|
||||
&self.crypto
|
||||
}
|
||||
|
||||
fn key_store(&self) -> &Self::KeyStoreProvider {
|
||||
&self.key_store
|
||||
}
|
||||
}
|
||||
@@ -1,41 +0,0 @@
|
||||
//! Core cryptographic primitives, MLS group state machine, and hybrid
|
||||
//! post-quantum KEM for quicnprotochat.
|
||||
//!
|
||||
//! # Module layout
|
||||
//!
|
||||
//! | Module | Responsibility |
|
||||
//! |---------------|------------------------------------------------------------------|
|
||||
//! | `app_message` | Rich application payload (Chat, Reply, Reaction, ReadReceipt, Typing) |
|
||||
//! | `error` | [`CoreError`] type |
|
||||
//! | `identity` | [`IdentityKeypair`] — Ed25519 identity key for MLS credentials |
|
||||
//! | `keypackage` | [`generate_key_package`] — standalone KeyPackage generation |
|
||||
//! | `group` | [`GroupMember`] — MLS group lifecycle (create/join/send/recv) |
|
||||
//! | `hybrid_kem` | Hybrid X25519 + ML-KEM-768 key encapsulation |
|
||||
//! | `keystore` | [`DiskKeyStore`] — OpenMLS key store with optional persistence |
|
||||
|
||||
mod app_message;
|
||||
mod error;
|
||||
mod group;
|
||||
pub mod hybrid_crypto;
|
||||
pub mod hybrid_kem;
|
||||
mod identity;
|
||||
mod keypackage;
|
||||
mod keystore;
|
||||
pub mod opaque_auth;
|
||||
|
||||
// ── Public API ────────────────────────────────────────────────────────────────
|
||||
|
||||
pub use app_message::{
|
||||
serialize, serialize_chat, serialize_reaction, serialize_read_receipt, serialize_reply,
|
||||
serialize_typing, parse, generate_message_id, AppMessage, MessageType, VERSION as APP_MESSAGE_VERSION,
|
||||
};
|
||||
pub use error::CoreError;
|
||||
pub use group::GroupMember;
|
||||
pub use hybrid_kem::{
|
||||
hybrid_decrypt, hybrid_encrypt, HybridKemError, HybridKeypair, HybridKeypairBytes,
|
||||
HybridPublicKey,
|
||||
};
|
||||
pub use hybrid_crypto::{HybridCrypto, HybridCryptoProvider};
|
||||
pub use identity::IdentityKeypair;
|
||||
pub use keypackage::{generate_key_package, validate_keypackage_ciphersuite};
|
||||
pub use keystore::DiskKeyStore;
|
||||
@@ -1,22 +0,0 @@
|
||||
[package]
|
||||
name = "quicnprotochat-gui"
|
||||
version = "0.1.0"
|
||||
edition = "2021"
|
||||
description = "Native GUI for quicnprotochat (Tauri 2)."
|
||||
license = "MIT"
|
||||
|
||||
[[bin]]
|
||||
name = "quicnprotochat-gui"
|
||||
path = "src/main.rs"
|
||||
|
||||
[dependencies]
|
||||
quicnprotochat-core = { path = "../quicnprotochat-core" }
|
||||
quicnprotochat-client = { path = "../quicnprotochat-client" }
|
||||
quicnprotochat-proto = { path = "../quicnprotochat-proto" }
|
||||
tauri = { version = "2", features = [] }
|
||||
tokio = { workspace = true }
|
||||
serde = { workspace = true }
|
||||
serde_json = { workspace = true }
|
||||
|
||||
[build-dependencies]
|
||||
tauri-build = "2"
|
||||
@@ -1,32 +0,0 @@
|
||||
# quicnprotochat-gui
|
||||
|
||||
Native GUI for quicnprotochat using [Tauri 2](https://v2.tauri.app/). The UI runs in a webview; all server-facing work (capnp-rpc, `node_service::Client`) runs on a **dedicated backend thread** with a tokio `LocalSet`, since that code is `!Send`.
|
||||
|
||||
## Backend threading model
|
||||
|
||||
- A single **backend thread** runs a tokio `LocalSet` and a request-response loop.
|
||||
- The UI thread sends commands over an `mpsc` channel: `Whoami { state_path, password }` or `Health { server, ca_cert, server_name }`.
|
||||
- For each request, the backend runs sync code (whoami) or `LocalSet::run_until(async { ... })` (health). It then sends `Result<String, String>` back on the provided reply channel.
|
||||
- Tauri commands (`whoami`, `health`) block on that reply so the frontend gets a simple async-style result.
|
||||
|
||||
## How to run
|
||||
|
||||
From the workspace root:
|
||||
|
||||
```bash
|
||||
cargo run -p quicnprotochat-gui
|
||||
```
|
||||
|
||||
**Linux:** Tauri uses GTK. Install development packages if the build fails, e.g.:
|
||||
|
||||
- Debian/Ubuntu: `sudo apt install libgtk-3-dev libwebkit2gtk-4.1-dev`
|
||||
- Fedora: `sudo dnf install gtk3-devel webkit2gtk4.1-devel`
|
||||
|
||||
## Frontend
|
||||
|
||||
The frontend is static HTML in `ui/index.html` (no npm or build step). It provides:
|
||||
|
||||
- **Whoami** – state path (and optional password); calls `whoami` and shows JSON (identity_key, fingerprint, etc.).
|
||||
- **Health** – server address; calls `health` and shows server status and RTT JSON.
|
||||
|
||||
Default CA cert and server name for health are the same as the CLI (`data/server-cert.der`, `localhost`) unless overridden via optional params.
|
||||
@@ -1,3 +0,0 @@
|
||||
fn main() {
|
||||
tauri_build::build()
|
||||
}
|
||||
@@ -1,11 +0,0 @@
|
||||
{
|
||||
"$schema": "https://schema.tauri.app/config/2/capability",
|
||||
"identifier": "default",
|
||||
"description": "Capability for the main window (custom commands whoami, health are allowed by default)",
|
||||
"windows": ["main"],
|
||||
"permissions": [
|
||||
"core:default",
|
||||
"core:window:allow-close",
|
||||
"core:window:allow-set-title"
|
||||
]
|
||||
}
|
||||
File diff suppressed because one or more lines are too long
@@ -1 +0,0 @@
|
||||
{"default":{"identifier":"default","description":"Capability for the main window (custom commands whoami, health are allowed by default)","local":true,"windows":["main"],"permissions":["core:default","core:window:allow-close","core:window:allow-set-title"]}}
|
||||
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
Binary file not shown.
|
Before Width: | Height: | Size: 2.1 KiB |
@@ -1,86 +0,0 @@
|
||||
//! Backend service running on a dedicated thread with a tokio LocalSet.
|
||||
//!
|
||||
//! All server-facing work (capnp-rpc, node_service::Client) is !Send and must run on this
|
||||
//! single thread. The UI thread sends commands over a channel; this thread runs
|
||||
//! `LocalSet::run_until` for each request and sends the result back.
|
||||
|
||||
use std::path::PathBuf;
|
||||
use std::sync::mpsc;
|
||||
use std::thread;
|
||||
|
||||
use tokio::runtime::Builder;
|
||||
use tokio::task::LocalSet;
|
||||
|
||||
use quicnprotochat_client::{cmd_health_json, whoami_json};
|
||||
|
||||
/// Commands the UI can send to the backend thread.
|
||||
pub enum BackendCommand {
|
||||
Whoami {
|
||||
state_path: String,
|
||||
password: Option<String>,
|
||||
},
|
||||
Health {
|
||||
server: String,
|
||||
ca_cert: PathBuf,
|
||||
server_name: String,
|
||||
},
|
||||
}
|
||||
|
||||
/// Response sent back to the UI.
|
||||
pub type BackendResponse = Result<String, String>;
|
||||
|
||||
/// Spawn the backend thread and return a sender to post commands and a join handle.
|
||||
/// The backend runs a tokio LocalSet and processes one command at a time:
|
||||
/// for each received command it runs `LocalSet::run_until(future)` (for async commands)
|
||||
/// or runs sync code (whoami), then sends the result on the provided reply channel.
|
||||
pub fn spawn_backend() -> (mpsc::Sender<(BackendCommand, mpsc::Sender<BackendResponse>)>, thread::JoinHandle<()>) {
|
||||
let (tx, rx) = mpsc::channel::<(BackendCommand, mpsc::Sender<BackendResponse>)>();
|
||||
|
||||
let handle = thread::spawn(move || {
|
||||
let rt = Builder::new_current_thread()
|
||||
.enable_all()
|
||||
.build()
|
||||
.expect("backend tokio runtime");
|
||||
let local = LocalSet::new();
|
||||
|
||||
while let Ok((cmd, reply_tx)) = rx.recv() {
|
||||
let result = run_command(&local, &rt, cmd);
|
||||
let _ = reply_tx.send(result);
|
||||
}
|
||||
});
|
||||
|
||||
(tx, handle)
|
||||
}
|
||||
|
||||
fn run_command(
|
||||
local: &LocalSet,
|
||||
rt: &tokio::runtime::Runtime,
|
||||
cmd: BackendCommand,
|
||||
) -> BackendResponse {
|
||||
match cmd {
|
||||
BackendCommand::Whoami { state_path, password } => {
|
||||
let path = PathBuf::from(&state_path);
|
||||
whoami_json(&path, password.as_deref()).map_err(|e| e.to_string())
|
||||
}
|
||||
BackendCommand::Health {
|
||||
server,
|
||||
ca_cert,
|
||||
server_name,
|
||||
} => {
|
||||
// Request-response: we run LocalSet::run_until for this single request so capnp-rpc
|
||||
// and connect_node stay on this thread (!Send).
|
||||
let fut = cmd_health_json(&server, &ca_cert, &server_name);
|
||||
rt.block_on(local.run_until(fut)).map_err(|e| e.to_string())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Default CA cert path (relative to cwd or absolute); same default as CLI.
|
||||
pub fn default_ca_cert() -> PathBuf {
|
||||
PathBuf::from("data/server-cert.der")
|
||||
}
|
||||
|
||||
/// Default TLS server name.
|
||||
pub fn default_server_name() -> String {
|
||||
"localhost".to_string()
|
||||
}
|
||||
@@ -1,76 +0,0 @@
|
||||
//! quicnprotochat native GUI (Tauri 2).
|
||||
//!
|
||||
//! The backend runs on a dedicated thread with a tokio LocalSet; all server-facing
|
||||
//! work (capnp-rpc, node_service::Client) is dispatched there. Tauri commands
|
||||
//! block on the request-response channel until the backend returns.
|
||||
|
||||
mod backend;
|
||||
|
||||
use std::path::PathBuf;
|
||||
use std::sync::mpsc;
|
||||
|
||||
use backend::{spawn_backend, BackendCommand};
|
||||
|
||||
/// Shared state: sender to the backend thread.
|
||||
struct BackendState {
|
||||
tx: mpsc::Sender<(BackendCommand, mpsc::Sender<backend::BackendResponse>)>,
|
||||
}
|
||||
|
||||
/// Runs whoami on the backend thread and returns JSON string (identity_key, fingerprint, etc.).
|
||||
#[tauri::command]
|
||||
fn whoami(
|
||||
state: tauri::State<BackendState>,
|
||||
state_path: String,
|
||||
password: Option<String>,
|
||||
) -> Result<String, String> {
|
||||
let (reply_tx, reply_rx) = mpsc::channel();
|
||||
state
|
||||
.tx
|
||||
.send((
|
||||
BackendCommand::Whoami {
|
||||
state_path,
|
||||
password,
|
||||
},
|
||||
reply_tx,
|
||||
))
|
||||
.map_err(|e| e.to_string())?;
|
||||
reply_rx.recv().map_err(|e| e.to_string())?
|
||||
}
|
||||
|
||||
/// Runs health check on the backend thread (LocalSet::run_until) and returns status JSON.
|
||||
#[tauri::command]
|
||||
fn health(
|
||||
state: tauri::State<BackendState>,
|
||||
server: String,
|
||||
ca_cert: Option<String>,
|
||||
server_name: Option<String>,
|
||||
) -> Result<String, String> {
|
||||
let ca_cert = ca_cert
|
||||
.map(PathBuf::from)
|
||||
.unwrap_or_else(backend::default_ca_cert);
|
||||
let server_name = server_name.unwrap_or_else(backend::default_server_name);
|
||||
let (reply_tx, reply_rx) = mpsc::channel();
|
||||
state
|
||||
.tx
|
||||
.send((
|
||||
BackendCommand::Health {
|
||||
server,
|
||||
ca_cert,
|
||||
server_name,
|
||||
},
|
||||
reply_tx,
|
||||
))
|
||||
.map_err(|e| e.to_string())?;
|
||||
reply_rx.recv().map_err(|e| e.to_string())?
|
||||
}
|
||||
|
||||
#[cfg_attr(mobile, tauri::mobile_entry_point)]
|
||||
pub fn run() {
|
||||
let (backend_tx, _backend_handle) = spawn_backend();
|
||||
|
||||
tauri::Builder::default()
|
||||
.manage(BackendState { tx: backend_tx })
|
||||
.invoke_handler(tauri::generate_handler![whoami, health])
|
||||
.run(tauri::generate_context!())
|
||||
.expect("error while running tauri application");
|
||||
}
|
||||
@@ -1,5 +0,0 @@
|
||||
//! Desktop entry point for quicnprotochat-gui.
|
||||
|
||||
fn main() {
|
||||
quicnprotochat_gui::run()
|
||||
}
|
||||
@@ -1,24 +0,0 @@
|
||||
{
|
||||
"$schema": "https://schema.tauri.app/config/2",
|
||||
"productName": "quicnprotochat-gui",
|
||||
"identifier": "chat.quicnproto.gui",
|
||||
"build": {
|
||||
"frontendDist": "./ui",
|
||||
"beforeBuildCommand": "",
|
||||
"beforeDevCommand": ""
|
||||
},
|
||||
"app": {
|
||||
"windows": [
|
||||
{
|
||||
"title": "quicnprotochat",
|
||||
"width": 640,
|
||||
"height": 480
|
||||
}
|
||||
],
|
||||
"security": {
|
||||
"csp": null
|
||||
}
|
||||
},
|
||||
"bundle": {},
|
||||
"plugins": {}
|
||||
}
|
||||
@@ -1,54 +0,0 @@
|
||||
<!DOCTYPE html>
|
||||
<html lang="en">
|
||||
<head>
|
||||
<meta charset="UTF-8" />
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1.0" />
|
||||
<title>quicnprotochat</title>
|
||||
<style>
|
||||
body { font-family: system-ui, sans-serif; margin: 1rem; }
|
||||
button { margin: 0.25rem; padding: 0.5rem 1rem; cursor: pointer; }
|
||||
#output { white-space: pre-wrap; background: #f0f0f0; padding: 0.75rem; margin-top: 1rem; min-height: 4rem; border-radius: 4px; }
|
||||
.error { color: #c00; }
|
||||
</style>
|
||||
</head>
|
||||
<body>
|
||||
<h1>quicnprotochat</h1>
|
||||
<p>
|
||||
<button id="whoami">Whoami</button>
|
||||
<button id="health">Health</button>
|
||||
</p>
|
||||
<label>State path: <input id="statePath" type="text" value="quicnprotochat-state.bin" size="32" /></label>
|
||||
<br />
|
||||
<label>Server: <input id="server" type="text" value="127.0.0.1:7000" size="24" /></label>
|
||||
<div id="output">Click Whoami or Health. Results appear here.</div>
|
||||
|
||||
<script>
|
||||
const output = document.getElementById('output');
|
||||
const statePath = document.getElementById('statePath');
|
||||
const server = document.getElementById('server');
|
||||
|
||||
function show(result, isError = false) {
|
||||
output.textContent = result;
|
||||
output.className = isError ? 'error' : '';
|
||||
}
|
||||
|
||||
const invoke = window.__TAURI__?.core?.invoke;
|
||||
if (!invoke) {
|
||||
show('Tauri API not available (not running inside Tauri?).', true);
|
||||
} else {
|
||||
document.getElementById('whoami').addEventListener('click', function () {
|
||||
show('Running whoami…');
|
||||
invoke('whoami', { statePath: statePath.value.trim(), password: null })
|
||||
.then(function (s) { show(s); })
|
||||
.catch(function (e) { show(String(e), true); });
|
||||
});
|
||||
document.getElementById('health').addEventListener('click', function () {
|
||||
show('Running health…');
|
||||
invoke('health', { server: server.value.trim() })
|
||||
.then(function (s) { show(s); })
|
||||
.catch(function (e) { show(String(e), true); });
|
||||
});
|
||||
}
|
||||
</script>
|
||||
</body>
|
||||
</html>
|
||||
@@ -1,12 +0,0 @@
|
||||
[package]
|
||||
name = "quicnprotochat-p2p"
|
||||
version = "0.1.0"
|
||||
edition = "2021"
|
||||
description = "P2P transport layer for quicnprotochat using iroh."
|
||||
license = "MIT"
|
||||
|
||||
[dependencies]
|
||||
iroh = "0.96"
|
||||
tokio = { workspace = true }
|
||||
tracing = { workspace = true }
|
||||
anyhow = { workspace = true }
|
||||
@@ -1,186 +0,0 @@
|
||||
//! P2P transport layer for quicnprotochat using iroh.
|
||||
//!
|
||||
//! Provides direct peer-to-peer QUIC connections with NAT traversal via iroh
|
||||
//! relay servers. When both peers are online, messages bypass the central
|
||||
//! server entirely.
|
||||
//!
|
||||
//! # Architecture
|
||||
//!
|
||||
//! ```text
|
||||
//! Client A ── iroh direct (QUIC) ── Client B (preferred: low latency)
|
||||
//! │ │
|
||||
//! └── QUIC/TLS ── Server ── QUIC/TLS ┘ (fallback: store-and-forward)
|
||||
//! ```
|
||||
|
||||
use iroh::{Endpoint, EndpointAddr, PublicKey, SecretKey};
|
||||
|
||||
/// ALPN protocol identifier for quicnprotochat P2P messaging.
|
||||
const P2P_ALPN: &[u8] = b"quicnprotochat/p2p/1";
|
||||
|
||||
/// A P2P node backed by an iroh endpoint.
|
||||
///
|
||||
/// Manages direct QUIC connections to peers with automatic NAT traversal.
|
||||
pub struct P2pNode {
|
||||
endpoint: Endpoint,
|
||||
}
|
||||
|
||||
/// Received P2P message with sender information.
|
||||
pub struct P2pMessage {
|
||||
pub sender: PublicKey,
|
||||
pub payload: Vec<u8>,
|
||||
}
|
||||
|
||||
impl P2pNode {
|
||||
/// Start a new P2P node.
|
||||
///
|
||||
/// Generates a fresh identity or reuses a provided secret key.
|
||||
pub async fn start(secret_key: Option<SecretKey>) -> anyhow::Result<Self> {
|
||||
let mut builder = Endpoint::builder();
|
||||
if let Some(sk) = secret_key {
|
||||
builder = builder.secret_key(sk);
|
||||
}
|
||||
builder = builder.alpns(vec![P2P_ALPN.to_vec()]);
|
||||
|
||||
let endpoint = builder.bind().await?;
|
||||
|
||||
tracing::info!(
|
||||
node_id = %endpoint.id().fmt_short(),
|
||||
"P2P node started"
|
||||
);
|
||||
|
||||
Ok(Self { endpoint })
|
||||
}
|
||||
|
||||
/// This node's public key (used as node ID for peer discovery).
|
||||
pub fn node_id(&self) -> PublicKey {
|
||||
self.endpoint.id()
|
||||
}
|
||||
|
||||
/// This node's secret key (for persistence across restarts).
|
||||
pub fn secret_key(&self) -> SecretKey {
|
||||
self.endpoint.secret_key().clone()
|
||||
}
|
||||
|
||||
/// Get the node's network address information for publishing to discovery.
|
||||
pub fn endpoint_addr(&self) -> EndpointAddr {
|
||||
self.endpoint.addr()
|
||||
}
|
||||
|
||||
/// Send a payload directly to a peer via P2P QUIC.
|
||||
pub async fn send(&self, peer: impl Into<EndpointAddr>, payload: &[u8]) -> anyhow::Result<()> {
|
||||
let peer = peer.into();
|
||||
let conn = self.endpoint.connect(peer, P2P_ALPN).await?;
|
||||
|
||||
let mut send = conn.open_uni().await.map_err(|e| anyhow::anyhow!("{e}"))?;
|
||||
|
||||
// Simple framing: 4-byte length prefix + payload.
|
||||
let len = (payload.len() as u32).to_be_bytes();
|
||||
send.write_all(&len)
|
||||
.await
|
||||
.map_err(|e| anyhow::anyhow!("{e}"))?;
|
||||
send.write_all(payload)
|
||||
.await
|
||||
.map_err(|e| anyhow::anyhow!("{e}"))?;
|
||||
send.finish().map_err(|e| anyhow::anyhow!("{e}"))?;
|
||||
// Wait until the peer has consumed the stream before dropping.
|
||||
send.stopped().await.map_err(|e| anyhow::anyhow!("{e}"))?;
|
||||
|
||||
tracing::debug!(
|
||||
peer = %conn.remote_id().fmt_short(),
|
||||
bytes = payload.len(),
|
||||
"P2P message sent"
|
||||
);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Accept a single incoming P2P message.
|
||||
///
|
||||
/// Blocks until a peer connects and sends data.
|
||||
pub async fn recv(&self) -> anyhow::Result<P2pMessage> {
|
||||
let incoming = self
|
||||
.endpoint
|
||||
.accept()
|
||||
.await
|
||||
.ok_or_else(|| anyhow::anyhow!("no more incoming connections"))?;
|
||||
|
||||
let conn = incoming.await.map_err(|e| anyhow::anyhow!("{e}"))?;
|
||||
let sender = conn.remote_id();
|
||||
|
||||
let mut recv = conn
|
||||
.accept_uni()
|
||||
.await
|
||||
.map_err(|e| anyhow::anyhow!("{e}"))?;
|
||||
|
||||
// Read length-prefixed payload.
|
||||
let mut len_buf = [0u8; 4];
|
||||
recv.read_exact(&mut len_buf)
|
||||
.await
|
||||
.map_err(|e| anyhow::anyhow!("{e}"))?;
|
||||
let len = u32::from_be_bytes(len_buf) as usize;
|
||||
|
||||
if len > 5 * 1024 * 1024 {
|
||||
anyhow::bail!("P2P payload too large: {len} bytes");
|
||||
}
|
||||
|
||||
let mut payload = vec![0u8; len];
|
||||
recv.read_exact(&mut payload)
|
||||
.await
|
||||
.map_err(|e| anyhow::anyhow!("{e}"))?;
|
||||
|
||||
tracing::debug!(
|
||||
peer = %sender.fmt_short(),
|
||||
bytes = len,
|
||||
"P2P message received"
|
||||
);
|
||||
|
||||
Ok(P2pMessage { sender, payload })
|
||||
}
|
||||
|
||||
/// Gracefully shut down the P2P node.
|
||||
pub async fn close(self) {
|
||||
self.endpoint.close().await;
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use iroh::RelayMode;
|
||||
|
||||
/// Create a local-only P2P node with relays disabled (for testing).
|
||||
async fn local_node() -> P2pNode {
|
||||
let endpoint = Endpoint::builder()
|
||||
.alpns(vec![P2P_ALPN.to_vec()])
|
||||
.relay_mode(RelayMode::Disabled)
|
||||
.bind()
|
||||
.await
|
||||
.unwrap();
|
||||
P2pNode { endpoint }
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn p2p_round_trip() {
|
||||
let sender = local_node().await;
|
||||
let receiver = local_node().await;
|
||||
|
||||
let receiver_addr = receiver.endpoint_addr();
|
||||
let sender_id = sender.node_id();
|
||||
let payload = b"hello via P2P";
|
||||
|
||||
let recv_handle = tokio::spawn(async move {
|
||||
let msg = receiver.recv().await.unwrap();
|
||||
assert_eq!(msg.payload, payload.to_vec());
|
||||
assert_eq!(msg.sender, sender_id);
|
||||
});
|
||||
|
||||
tokio::time::sleep(std::time::Duration::from_millis(200)).await;
|
||||
|
||||
sender.send(receiver_addr, payload).await.unwrap();
|
||||
|
||||
recv_handle.await.unwrap();
|
||||
|
||||
tokio::time::sleep(std::time::Duration::from_millis(100)).await;
|
||||
sender.close().await;
|
||||
}
|
||||
}
|
||||
@@ -1,15 +0,0 @@
|
||||
[package]
|
||||
name = "quicnprotochat-proto"
|
||||
version = "0.1.0"
|
||||
edition = "2021"
|
||||
description = "Cap'n Proto schemas, generated types, and serialisation helpers for quicnprotochat. No crypto, no I/O."
|
||||
license = "MIT"
|
||||
|
||||
# build.rs invokes capnpc to generate Rust source from .capnp schemas.
|
||||
build = "build.rs"
|
||||
|
||||
[dependencies]
|
||||
capnp = { workspace = true }
|
||||
|
||||
[build-dependencies]
|
||||
capnpc = { workspace = true }
|
||||
@@ -1,54 +0,0 @@
|
||||
//! Build script for quicnprotochat-proto.
|
||||
//!
|
||||
//! Invokes the `capnp` compiler to generate Rust types from `.capnp` schemas
|
||||
//! located in the workspace-root `schemas/` directory.
|
||||
//!
|
||||
//! # Prerequisites
|
||||
//!
|
||||
//! The `capnp` CLI must be installed and on `PATH`.
|
||||
//!
|
||||
//! Debian/Ubuntu: apt-get install capnproto
|
||||
//! macOS: brew install capnp
|
||||
//! Docker: see docker/Dockerfile
|
||||
|
||||
use std::{env, path::PathBuf};
|
||||
|
||||
fn main() {
|
||||
let manifest_dir =
|
||||
PathBuf::from(env::var("CARGO_MANIFEST_DIR").expect("CARGO_MANIFEST_DIR not set by Cargo"));
|
||||
|
||||
// Workspace root is two levels above this crate (quicnprotochat/crates/quicnprotochat-proto).
|
||||
let workspace_root = manifest_dir
|
||||
.join("../..")
|
||||
.canonicalize()
|
||||
.expect("could not canonicalize workspace root path");
|
||||
|
||||
let schemas_dir = workspace_root.join("schemas");
|
||||
|
||||
// Re-run this build script whenever any schema file changes.
|
||||
println!(
|
||||
"cargo:rerun-if-changed={}",
|
||||
schemas_dir.join("auth.capnp").display()
|
||||
);
|
||||
println!(
|
||||
"cargo:rerun-if-changed={}",
|
||||
schemas_dir.join("delivery.capnp").display()
|
||||
);
|
||||
println!(
|
||||
"cargo:rerun-if-changed={}",
|
||||
schemas_dir.join("node.capnp").display()
|
||||
);
|
||||
|
||||
capnpc::CompilerCommand::new()
|
||||
// Treat `schemas/` as the include root so that inter-schema imports
|
||||
// resolve correctly.
|
||||
.src_prefix(&schemas_dir)
|
||||
.file(schemas_dir.join("auth.capnp"))
|
||||
.file(schemas_dir.join("delivery.capnp"))
|
||||
.file(schemas_dir.join("node.capnp"))
|
||||
.run()
|
||||
.expect(
|
||||
"Cap'n Proto schema compilation failed. \
|
||||
Is `capnp` installed? (apt-get install capnproto / brew install capnp)",
|
||||
);
|
||||
}
|
||||
@@ -1,65 +0,0 @@
|
||||
//! Cap'n Proto schemas, generated types, and serialisation helpers for quicnprotochat.
|
||||
//!
|
||||
//! Generated Cap'n Proto code emits unnecessary parentheses; allow per coding standards.
|
||||
#![allow(unused_parens)]
|
||||
|
||||
//! # Design constraints
|
||||
//!
|
||||
//! This crate is intentionally restricted:
|
||||
//! - **No crypto** — key material never enters this crate.
|
||||
//! - **No I/O** — callers own transport; this crate only converts bytes ↔ types.
|
||||
//! - **No async** — pure synchronous data-layer code.
|
||||
//!
|
||||
//! # Generated code
|
||||
//!
|
||||
//! `build.rs` invokes `capnpc` at compile time and writes generated Rust source
|
||||
//! into `$OUT_DIR`. The `include!` macros below splice that code in as a module.
|
||||
|
||||
// ── Generated types ───────────────────────────────────────────────────────────
|
||||
|
||||
/// Cap'n Proto generated types for `schemas/auth.capnp`.
|
||||
///
|
||||
/// Do not edit this module by hand — it is entirely machine-generated.
|
||||
pub mod auth_capnp {
|
||||
include!(concat!(env!("OUT_DIR"), "/auth_capnp.rs"));
|
||||
}
|
||||
|
||||
/// Cap'n Proto generated types for `schemas/delivery.capnp`.
|
||||
///
|
||||
/// Do not edit this module by hand — it is entirely machine-generated.
|
||||
pub mod delivery_capnp {
|
||||
include!(concat!(env!("OUT_DIR"), "/delivery_capnp.rs"));
|
||||
}
|
||||
|
||||
/// Cap'n Proto generated types for `schemas/node.capnp`.
|
||||
///
|
||||
/// Do not edit this module by hand — it is entirely machine-generated.
|
||||
pub mod node_capnp {
|
||||
include!(concat!(env!("OUT_DIR"), "/node_capnp.rs"));
|
||||
}
|
||||
|
||||
// ── Low-level byte ↔ message conversions ──────────────────────────────────────
|
||||
|
||||
/// Serialise a Cap'n Proto message builder to unpacked wire bytes.
|
||||
///
|
||||
/// The output includes the segment table header. For transport, the
|
||||
/// `quicnprotochat-core` frame codec prepends a 4-byte little-endian length field.
|
||||
pub fn to_bytes<A: capnp::message::Allocator>(
|
||||
msg: &capnp::message::Builder<A>,
|
||||
) -> Result<Vec<u8>, capnp::Error> {
|
||||
let mut buf = Vec::new();
|
||||
capnp::serialize::write_message(&mut buf, msg)?;
|
||||
Ok(buf)
|
||||
}
|
||||
|
||||
/// Deserialise unpacked wire bytes into a message with owned segments.
|
||||
///
|
||||
/// Uses `ReaderOptions::new()` (default limits: 64 MiB, 512 nesting levels).
|
||||
/// Callers that receive data from untrusted peers should consider tightening
|
||||
/// the traversal limit via `ReaderOptions::traversal_limit_in_words`.
|
||||
pub fn from_bytes(
|
||||
bytes: &[u8],
|
||||
) -> Result<capnp::message::Reader<capnp::serialize::OwnedSegments>, capnp::Error> {
|
||||
let mut cursor = std::io::Cursor::new(bytes);
|
||||
capnp::serialize::read_message(&mut cursor, capnp::message::ReaderOptions::new())
|
||||
}
|
||||
@@ -1,58 +0,0 @@
|
||||
[package]
|
||||
name = "quicnprotochat-server"
|
||||
version = "0.1.0"
|
||||
edition = "2021"
|
||||
description = "Delivery Service and Authentication Service for quicnprotochat."
|
||||
license = "MIT"
|
||||
|
||||
[[bin]]
|
||||
name = "quicnprotochat-server"
|
||||
path = "src/main.rs"
|
||||
|
||||
[dependencies]
|
||||
quicnprotochat-core = { path = "../quicnprotochat-core" }
|
||||
quicnprotochat-proto = { path = "../quicnprotochat-proto" }
|
||||
|
||||
# Serialisation + RPC
|
||||
capnp = { workspace = true }
|
||||
capnp-rpc = { workspace = true }
|
||||
|
||||
# Async
|
||||
tokio = { workspace = true }
|
||||
tokio-util = { workspace = true }
|
||||
futures = { workspace = true }
|
||||
|
||||
# Server utilities
|
||||
dashmap = { workspace = true }
|
||||
sha2 = { workspace = true }
|
||||
tracing = { workspace = true }
|
||||
tracing-subscriber = { workspace = true }
|
||||
quinn = { workspace = true }
|
||||
quinn-proto = { workspace = true }
|
||||
rustls = { workspace = true }
|
||||
rcgen = { workspace = true }
|
||||
|
||||
# Crypto — OPAQUE PAKE
|
||||
opaque-ke = { workspace = true }
|
||||
rand = { workspace = true }
|
||||
subtle = { workspace = true }
|
||||
|
||||
# Database
|
||||
rusqlite = { workspace = true }
|
||||
|
||||
# Error handling
|
||||
anyhow = { workspace = true }
|
||||
thiserror = { workspace = true }
|
||||
bincode = { workspace = true }
|
||||
serde = { workspace = true }
|
||||
|
||||
# CLI
|
||||
clap = { workspace = true }
|
||||
toml = { version = "0.8" }
|
||||
|
||||
# Metrics (Prometheus)
|
||||
metrics = "0.22"
|
||||
metrics-exporter-prometheus = "0.15"
|
||||
|
||||
[dev-dependencies]
|
||||
tempfile = "3"
|
||||
@@ -1,187 +0,0 @@
|
||||
use std::path::{Path, PathBuf};
|
||||
|
||||
use anyhow::Context;
|
||||
use serde::Deserialize;
|
||||
|
||||
pub const DEFAULT_LISTEN: &str = "0.0.0.0:7000";
|
||||
pub const DEFAULT_DATA_DIR: &str = "data";
|
||||
pub const DEFAULT_TLS_CERT: &str = "data/server-cert.der";
|
||||
pub const DEFAULT_TLS_KEY: &str = "data/server-key.der";
|
||||
pub const DEFAULT_STORE_BACKEND: &str = "file";
|
||||
pub const DEFAULT_DB_PATH: &str = "data/quicnprotochat.db";
|
||||
|
||||
#[derive(Debug, Default, Deserialize)]
|
||||
pub struct FileConfig {
|
||||
pub listen: Option<String>,
|
||||
pub data_dir: Option<String>,
|
||||
pub tls_cert: Option<PathBuf>,
|
||||
pub tls_key: Option<PathBuf>,
|
||||
pub auth_token: Option<String>,
|
||||
pub allow_insecure_auth: Option<bool>,
|
||||
/// When true, enqueue does not require an identity-bound session: only a valid token is required.
|
||||
/// The server does not associate the request with a specific sender (Sealed Sender).
|
||||
#[serde(default)]
|
||||
pub sealed_sender: Option<bool>,
|
||||
pub store_backend: Option<String>,
|
||||
pub db_path: Option<PathBuf>,
|
||||
pub db_key: Option<String>,
|
||||
/// Metrics HTTP listen address (e.g. "0.0.0.0:9090"). If set, /metrics is served there.
|
||||
pub metrics_listen: Option<String>,
|
||||
/// When true and metrics_listen is set, start the metrics server.
|
||||
#[serde(default)]
|
||||
pub metrics_enabled: Option<bool>,
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct EffectiveConfig {
|
||||
pub listen: String,
|
||||
pub data_dir: String,
|
||||
pub tls_cert: PathBuf,
|
||||
pub tls_key: PathBuf,
|
||||
pub auth_token: Option<String>,
|
||||
pub allow_insecure_auth: bool,
|
||||
/// When true, enqueue does not require identity; valid token only (Sealed Sender).
|
||||
pub sealed_sender: bool,
|
||||
pub store_backend: String,
|
||||
pub db_path: PathBuf,
|
||||
pub db_key: String,
|
||||
/// If Some(addr), metrics server listens here (e.g. "0.0.0.0:9090").
|
||||
pub metrics_listen: Option<String>,
|
||||
/// Start metrics server only when true and metrics_listen is set.
|
||||
pub metrics_enabled: bool,
|
||||
}
|
||||
|
||||
pub fn load_config(path: Option<&Path>) -> anyhow::Result<FileConfig> {
|
||||
let path = match path {
|
||||
Some(p) => PathBuf::from(p),
|
||||
None => PathBuf::from("quicnprotochat-server.toml"),
|
||||
};
|
||||
|
||||
if !path.exists() {
|
||||
return Ok(FileConfig::default());
|
||||
}
|
||||
|
||||
let contents =
|
||||
std::fs::read_to_string(&path).with_context(|| format!("read config file {path:?}"))?;
|
||||
let cfg: FileConfig =
|
||||
toml::from_str(&contents).with_context(|| format!("parse config file {path:?}"))?;
|
||||
Ok(cfg)
|
||||
}
|
||||
|
||||
pub fn merge_config(args: &crate::Args, file: &FileConfig) -> EffectiveConfig {
|
||||
let listen = if args.listen == DEFAULT_LISTEN {
|
||||
file.listen
|
||||
.clone()
|
||||
.unwrap_or_else(|| DEFAULT_LISTEN.to_string())
|
||||
} else {
|
||||
args.listen.clone()
|
||||
};
|
||||
|
||||
let data_dir = if args.data_dir == DEFAULT_DATA_DIR {
|
||||
file.data_dir
|
||||
.clone()
|
||||
.unwrap_or_else(|| DEFAULT_DATA_DIR.to_string())
|
||||
} else {
|
||||
args.data_dir.clone()
|
||||
};
|
||||
|
||||
let tls_cert = if args.tls_cert == PathBuf::from(DEFAULT_TLS_CERT) {
|
||||
file.tls_cert
|
||||
.clone()
|
||||
.unwrap_or_else(|| PathBuf::from(DEFAULT_TLS_CERT))
|
||||
} else {
|
||||
args.tls_cert.clone()
|
||||
};
|
||||
|
||||
let tls_key = if args.tls_key == PathBuf::from(DEFAULT_TLS_KEY) {
|
||||
file.tls_key
|
||||
.clone()
|
||||
.unwrap_or_else(|| PathBuf::from(DEFAULT_TLS_KEY))
|
||||
} else {
|
||||
args.tls_key.clone()
|
||||
};
|
||||
|
||||
let auth_token = if args.auth_token.is_some() {
|
||||
args.auth_token.clone()
|
||||
} else {
|
||||
file.auth_token.clone()
|
||||
};
|
||||
|
||||
let allow_insecure_auth = if args.allow_insecure_auth {
|
||||
true
|
||||
} else {
|
||||
file.allow_insecure_auth.unwrap_or(false)
|
||||
};
|
||||
|
||||
let sealed_sender = args.sealed_sender || file.sealed_sender.unwrap_or(false);
|
||||
|
||||
let store_backend = if args.store_backend == DEFAULT_STORE_BACKEND {
|
||||
file.store_backend
|
||||
.clone()
|
||||
.unwrap_or_else(|| DEFAULT_STORE_BACKEND.to_string())
|
||||
} else {
|
||||
args.store_backend.clone()
|
||||
};
|
||||
|
||||
let db_path = if args.db_path == PathBuf::from(DEFAULT_DB_PATH) {
|
||||
file.db_path
|
||||
.clone()
|
||||
.unwrap_or_else(|| PathBuf::from(DEFAULT_DB_PATH))
|
||||
} else {
|
||||
args.db_path.clone()
|
||||
};
|
||||
|
||||
let db_key = if args.db_key.is_empty() {
|
||||
file.db_key.clone().unwrap_or_else(|| args.db_key.clone())
|
||||
} else {
|
||||
args.db_key.clone()
|
||||
};
|
||||
|
||||
let metrics_listen = args
|
||||
.metrics_listen
|
||||
.clone()
|
||||
.or_else(|| file.metrics_listen.clone());
|
||||
let metrics_enabled = args
|
||||
.metrics_enabled
|
||||
.or(file.metrics_enabled)
|
||||
.unwrap_or(metrics_listen.is_some());
|
||||
|
||||
EffectiveConfig {
|
||||
listen,
|
||||
data_dir,
|
||||
tls_cert,
|
||||
tls_key,
|
||||
auth_token,
|
||||
allow_insecure_auth,
|
||||
sealed_sender,
|
||||
store_backend,
|
||||
db_path,
|
||||
db_key,
|
||||
metrics_listen,
|
||||
metrics_enabled,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn validate_production_config(effective: &EffectiveConfig) -> anyhow::Result<()> {
|
||||
let token = effective
|
||||
.auth_token
|
||||
.as_deref()
|
||||
.filter(|s| !s.is_empty())
|
||||
.ok_or_else(|| {
|
||||
anyhow::anyhow!("production requires QUICNPROTOCHAT_AUTH_TOKEN (non-empty)")
|
||||
})?;
|
||||
if token == "devtoken" {
|
||||
anyhow::bail!(
|
||||
"production forbids auth_token 'devtoken'; set a strong QUICNPROTOCHAT_AUTH_TOKEN"
|
||||
);
|
||||
}
|
||||
if effective.store_backend == "sql" && effective.db_key.is_empty() {
|
||||
anyhow::bail!("production with store_backend=sql requires non-empty QUICNPROTOCHAT_DB_KEY");
|
||||
}
|
||||
if !effective.tls_cert.exists() || !effective.tls_key.exists() {
|
||||
anyhow::bail!(
|
||||
"production requires existing TLS cert and key (no auto-generation); provide QUICNPROTOCHAT_TLS_CERT and QUICNPROTOCHAT_TLS_KEY"
|
||||
);
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
@@ -1,298 +0,0 @@
|
||||
//! quicnprotochat-server — unified Authentication + Delivery service.
|
||||
//!
|
||||
//! The server hosts Authentication + Delivery services over QUIC + Cap'n Proto.
|
||||
|
||||
use std::{net::SocketAddr, path::PathBuf, sync::Arc};
|
||||
|
||||
use anyhow::Context;
|
||||
use clap::Parser;
|
||||
use dashmap::DashMap;
|
||||
use opaque_ke::ServerSetup;
|
||||
use quicnprotochat_core::opaque_auth::OpaqueSuite;
|
||||
use quinn::Endpoint;
|
||||
use rand::rngs::OsRng;
|
||||
use tokio::sync::Notify;
|
||||
use tokio::task::LocalSet;
|
||||
|
||||
mod auth;
|
||||
mod config;
|
||||
mod error_codes;
|
||||
mod metrics;
|
||||
mod node_service;
|
||||
mod sql_store;
|
||||
mod tls;
|
||||
mod storage;
|
||||
|
||||
use auth::{AuthConfig, PendingLogin, RateEntry, SessionInfo};
|
||||
use config::{
|
||||
load_config, merge_config, validate_production_config, DEFAULT_DATA_DIR, DEFAULT_DB_PATH,
|
||||
DEFAULT_LISTEN, DEFAULT_STORE_BACKEND, DEFAULT_TLS_CERT, DEFAULT_TLS_KEY,
|
||||
};
|
||||
use node_service::{handle_node_connection, spawn_cleanup_task};
|
||||
use sql_store::SqlStore;
|
||||
use storage::{FileBackedStore, Store};
|
||||
use tls::build_server_config;
|
||||
|
||||
// ── CLI ───────────────────────────────────────────────────────────────────────
|
||||
|
||||
#[derive(Debug, Parser)]
|
||||
#[command(
|
||||
name = "quicnprotochat-server",
|
||||
about = "quicnprotochat Delivery Service + Authentication Service",
|
||||
version
|
||||
)]
|
||||
struct Args {
|
||||
/// Optional path to a TOML config file (fields map to CLI flags).
|
||||
#[arg(long, env = "QUICNPROTOCHAT_CONFIG")]
|
||||
config: Option<PathBuf>,
|
||||
|
||||
/// QUIC listen address (host:port).
|
||||
#[arg(long, default_value = DEFAULT_LISTEN, env = "QUICNPROTOCHAT_LISTEN")]
|
||||
listen: String,
|
||||
|
||||
/// Directory for persisted server data (KeyPackages + delivery queues).
|
||||
#[arg(long, default_value = DEFAULT_DATA_DIR, env = "QUICNPROTOCHAT_DATA_DIR")]
|
||||
data_dir: String,
|
||||
|
||||
/// TLS certificate path (generated automatically if missing).
|
||||
#[arg(long, default_value = DEFAULT_TLS_CERT, env = "QUICNPROTOCHAT_TLS_CERT")]
|
||||
tls_cert: PathBuf,
|
||||
|
||||
/// TLS private key path (generated automatically if missing).
|
||||
#[arg(long, default_value = DEFAULT_TLS_KEY, env = "QUICNPROTOCHAT_TLS_KEY")]
|
||||
tls_key: PathBuf,
|
||||
|
||||
/// Required bearer token for auth.version=1 requests. Use --allow-insecure-auth to run without it (dev only).
|
||||
#[arg(long, env = "QUICNPROTOCHAT_AUTH_TOKEN")]
|
||||
auth_token: Option<String>,
|
||||
|
||||
/// Allow running without QUICNPROTOCHAT_AUTH_TOKEN (development only).
|
||||
#[arg(long, env = "QUICNPROTOCHAT_ALLOW_INSECURE_AUTH", default_value_t = false)]
|
||||
allow_insecure_auth: bool,
|
||||
|
||||
/// Enable Sealed Sender: enqueue does not require identity-bound session, only a valid token.
|
||||
#[arg(long, env = "QUICNPROTOCHAT_SEALED_SENDER", default_value_t = false)]
|
||||
sealed_sender: bool,
|
||||
|
||||
/// Storage backend: "file" (bincode) or "sql" (SQLCipher-encrypted).
|
||||
#[arg(long, default_value = DEFAULT_STORE_BACKEND, env = "QUICNPROTOCHAT_STORE_BACKEND")]
|
||||
store_backend: String,
|
||||
|
||||
/// Path to the SQLCipher database file (only used when --store-backend=sql).
|
||||
#[arg(long, default_value = DEFAULT_DB_PATH, env = "QUICNPROTOCHAT_DB_PATH")]
|
||||
db_path: PathBuf,
|
||||
|
||||
/// SQLCipher encryption key. Empty string disables encryption.
|
||||
#[arg(long, default_value = "", env = "QUICNPROTOCHAT_DB_KEY")]
|
||||
db_key: String,
|
||||
|
||||
/// Metrics HTTP listen address (e.g. 0.0.0.0:9090). If set and metrics enabled, /metrics is served.
|
||||
#[arg(long, env = "QUICNPROTOCHAT_METRICS_LISTEN")]
|
||||
metrics_listen: Option<String>,
|
||||
|
||||
/// Enable metrics server when metrics_listen is set.
|
||||
#[arg(long, env = "QUICNPROTOCHAT_METRICS_ENABLED")]
|
||||
metrics_enabled: Option<bool>,
|
||||
}
|
||||
|
||||
// ── Entry point ───────────────────────────────────────────────────────────────
|
||||
|
||||
#[tokio::main]
|
||||
async fn main() -> anyhow::Result<()> {
|
||||
let _ = rustls::crypto::ring::default_provider().install_default();
|
||||
|
||||
tracing_subscriber::fmt()
|
||||
.with_env_filter(
|
||||
tracing_subscriber::EnvFilter::try_from_default_env()
|
||||
.unwrap_or_else(|_| tracing_subscriber::EnvFilter::new("info")),
|
||||
)
|
||||
.init();
|
||||
|
||||
let args = Args::parse();
|
||||
let file_cfg = load_config(args.config.as_deref())?;
|
||||
let effective = merge_config(&args, &file_cfg);
|
||||
|
||||
let production = std::env::var("QUICNPROTOCHAT_PRODUCTION")
|
||||
.map(|v| matches!(v.to_lowercase().as_str(), "1" | "true" | "yes"))
|
||||
.unwrap_or(false);
|
||||
if production {
|
||||
validate_production_config(&effective)?;
|
||||
}
|
||||
|
||||
// Optional metrics server: only start when metrics_enabled and metrics_listen are set.
|
||||
if effective.metrics_enabled {
|
||||
if let Some(addr_str) = &effective.metrics_listen {
|
||||
let addr: std::net::SocketAddr = addr_str
|
||||
.parse()
|
||||
.context("metrics_listen must be host:port (e.g. 0.0.0.0:9090)")?;
|
||||
metrics_exporter_prometheus::PrometheusBuilder::new()
|
||||
.with_http_listener(addr)
|
||||
.install()
|
||||
.context("failed to install Prometheus metrics exporter")?;
|
||||
tracing::info!(addr = %addr_str, "metrics server listening on /metrics");
|
||||
}
|
||||
}
|
||||
|
||||
// In non-production, require an explicit opt-out before running without a static token.
|
||||
if !production
|
||||
&& effective
|
||||
.auth_token
|
||||
.as_deref()
|
||||
.map(|s| s.is_empty())
|
||||
.unwrap_or(true)
|
||||
&& !effective.allow_insecure_auth
|
||||
{
|
||||
anyhow::bail!(
|
||||
"missing QUICNPROTOCHAT_AUTH_TOKEN; set one or pass --allow-insecure-auth for development"
|
||||
);
|
||||
}
|
||||
|
||||
if effective.allow_insecure_auth
|
||||
&& effective
|
||||
.auth_token
|
||||
.as_deref()
|
||||
.map(|s| s.is_empty())
|
||||
.unwrap_or(true)
|
||||
{
|
||||
tracing::warn!("running without QUICNPROTOCHAT_AUTH_TOKEN (allow-insecure-auth enabled); development only");
|
||||
}
|
||||
|
||||
let listen: SocketAddr = effective
|
||||
.listen
|
||||
.parse()
|
||||
.context("--listen must be host:port")?;
|
||||
|
||||
let server_config = build_server_config(&effective.tls_cert, &effective.tls_key, production)
|
||||
.context("failed to build TLS/QUIC server config")?;
|
||||
|
||||
// Shared storage — persisted to disk for restart safety.
|
||||
let store: Arc<dyn Store> = match effective.store_backend.as_str() {
|
||||
"sql" => {
|
||||
if let Some(parent) = effective.db_path.parent() {
|
||||
std::fs::create_dir_all(parent).context("create db dir")?;
|
||||
}
|
||||
tracing::info!(
|
||||
path = %effective.db_path.display(),
|
||||
encrypted = !effective.db_key.is_empty(),
|
||||
"opening SQLCipher store"
|
||||
);
|
||||
if effective.db_key.is_empty() {
|
||||
tracing::warn!("db_key is empty; SQL store will be plaintext (development only)");
|
||||
}
|
||||
Arc::new(SqlStore::open(&effective.db_path, &effective.db_key)?)
|
||||
}
|
||||
"file" | _ => {
|
||||
tracing::info!(dir = %effective.data_dir, "opening file-backed store");
|
||||
Arc::new(FileBackedStore::open(&effective.data_dir)?)
|
||||
}
|
||||
};
|
||||
|
||||
let auth_cfg = Arc::new(AuthConfig::new(
|
||||
effective.auth_token.clone(),
|
||||
effective.allow_insecure_auth,
|
||||
));
|
||||
let waiters: Arc<DashMap<Vec<u8>, Arc<Notify>>> = Arc::new(DashMap::new());
|
||||
|
||||
// OPAQUE ServerSetup: load from storage or generate fresh.
|
||||
let opaque_setup: Arc<ServerSetup<OpaqueSuite>> = match store.get_server_setup() {
|
||||
Ok(Some(bytes)) => {
|
||||
let setup = ServerSetup::<OpaqueSuite>::deserialize(&bytes)
|
||||
.map_err(|e| anyhow::anyhow!("corrupt OPAQUE server setup: {e}"))?;
|
||||
tracing::info!("loaded persisted OPAQUE ServerSetup");
|
||||
Arc::new(setup)
|
||||
}
|
||||
Ok(None) => {
|
||||
let setup = ServerSetup::<OpaqueSuite>::new(&mut OsRng);
|
||||
let bytes = setup.serialize().to_vec();
|
||||
store
|
||||
.store_server_setup(bytes)
|
||||
.context("persist OPAQUE ServerSetup")?;
|
||||
tracing::info!("generated and persisted new OPAQUE ServerSetup");
|
||||
Arc::new(setup)
|
||||
}
|
||||
Err(e) => return Err(anyhow::anyhow!("load OPAQUE server setup: {e}")),
|
||||
};
|
||||
|
||||
let pending_logins: Arc<DashMap<String, PendingLogin>> = Arc::new(DashMap::new());
|
||||
let sessions: Arc<DashMap<Vec<u8>, SessionInfo>> = Arc::new(DashMap::new());
|
||||
let rate_limits: Arc<DashMap<Vec<u8>, RateEntry>> = Arc::new(DashMap::new());
|
||||
|
||||
// Background cleanup task (expire sessions, pending logins, rate limits, and stale messages).
|
||||
spawn_cleanup_task(
|
||||
Arc::clone(&sessions),
|
||||
Arc::clone(&pending_logins),
|
||||
Arc::clone(&rate_limits),
|
||||
Arc::clone(&store),
|
||||
);
|
||||
|
||||
let endpoint = Endpoint::server(server_config, listen)?;
|
||||
|
||||
tracing::info!(
|
||||
addr = %effective.listen,
|
||||
"accepting QUIC connections"
|
||||
);
|
||||
|
||||
// capnp-rpc is !Send (Rc internals), so all RPC tasks must stay on a LocalSet.
|
||||
let local = LocalSet::new();
|
||||
local
|
||||
.run_until(async move {
|
||||
loop {
|
||||
tokio::select! {
|
||||
biased;
|
||||
|
||||
incoming = endpoint.accept() => {
|
||||
let incoming = match incoming {
|
||||
Some(i) => i,
|
||||
None => break,
|
||||
};
|
||||
|
||||
let connecting = match incoming.accept() {
|
||||
Ok(c) => c,
|
||||
Err(e) => {
|
||||
tracing::warn!(error = %e, "failed to accept incoming connection");
|
||||
continue;
|
||||
}
|
||||
};
|
||||
|
||||
let store = Arc::clone(&store);
|
||||
let waiters = Arc::clone(&waiters);
|
||||
let auth_cfg = Arc::clone(&auth_cfg);
|
||||
let opaque_setup = Arc::clone(&opaque_setup);
|
||||
let pending_logins = Arc::clone(&pending_logins);
|
||||
let sessions = Arc::clone(&sessions);
|
||||
let rate_limits = Arc::clone(&rate_limits);
|
||||
let sealed_sender = effective.sealed_sender;
|
||||
|
||||
tokio::task::spawn_local(async move {
|
||||
if let Err(e) = handle_node_connection(
|
||||
connecting,
|
||||
store,
|
||||
waiters,
|
||||
auth_cfg,
|
||||
opaque_setup,
|
||||
pending_logins,
|
||||
sessions,
|
||||
rate_limits,
|
||||
sealed_sender,
|
||||
)
|
||||
.await
|
||||
{
|
||||
tracing::warn!(error = %e, "connection error");
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
_ = tokio::signal::ctrl_c() => {
|
||||
tracing::info!("shutdown signal received, draining QUIC connections");
|
||||
endpoint.close(0u32.into(), b"server shutdown");
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok::<(), anyhow::Error>(())
|
||||
})
|
||||
.await?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
@@ -1,318 +0,0 @@
|
||||
use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
|
||||
use capnp::capability::Promise;
|
||||
use dashmap::DashMap;
|
||||
use quicnprotochat_proto::node_capnp::node_service;
|
||||
use tokio::sync::Notify;
|
||||
use tokio::time::timeout;
|
||||
|
||||
use crate::auth::{
|
||||
check_rate_limit, coded_error, fmt_hex, require_identity_or_request, validate_auth_context,
|
||||
};
|
||||
use crate::error_codes::*;
|
||||
use crate::metrics;
|
||||
use crate::storage::{StorageError, Store};
|
||||
|
||||
use super::{NodeServiceImpl, CURRENT_WIRE_VERSION};
|
||||
|
||||
// Audit events here must not include secrets: no payload content, no full recipient/token bytes (prefix only).
|
||||
|
||||
const MAX_PAYLOAD_BYTES: usize = 5 * 1024 * 1024; // 5 MB cap per message
|
||||
const MAX_QUEUE_DEPTH: usize = 1000;
|
||||
|
||||
fn storage_err(err: StorageError) -> capnp::Error {
|
||||
coded_error(E009_STORAGE_ERROR, err)
|
||||
}
|
||||
|
||||
pub fn fill_payloads_wait(
|
||||
results: &mut node_service::FetchWaitResults,
|
||||
messages: Vec<(u64, Vec<u8>)>,
|
||||
) {
|
||||
let mut list = results.get().init_payloads(messages.len() as u32);
|
||||
for (i, (seq, data)) in messages.iter().enumerate() {
|
||||
let mut entry = list.reborrow().get(i as u32);
|
||||
entry.set_seq(*seq);
|
||||
entry.set_data(data);
|
||||
}
|
||||
}
|
||||
|
||||
impl NodeServiceImpl {
|
||||
pub fn handle_enqueue(
|
||||
&mut self,
|
||||
params: node_service::EnqueueParams,
|
||||
mut results: node_service::EnqueueResults,
|
||||
) -> Promise<(), capnp::Error> {
|
||||
let p = match params.get() {
|
||||
Ok(p) => p,
|
||||
Err(e) => return Promise::err(coded_error(E020_BAD_PARAMS, e)),
|
||||
};
|
||||
let recipient_key = match p.get_recipient_key() {
|
||||
Ok(v) => v.to_vec(),
|
||||
Err(e) => return Promise::err(coded_error(E020_BAD_PARAMS, e)),
|
||||
};
|
||||
let payload = match p.get_payload() {
|
||||
Ok(v) => v.to_vec(),
|
||||
Err(e) => return Promise::err(coded_error(E020_BAD_PARAMS, e)),
|
||||
};
|
||||
let channel_id = p.get_channel_id().unwrap_or_default().to_vec();
|
||||
let version = p.get_version();
|
||||
let auth_ctx = match validate_auth_context(&self.auth_cfg, &self.sessions, p.get_auth()) {
|
||||
Ok(ctx) => ctx,
|
||||
Err(e) => return Promise::err(e),
|
||||
};
|
||||
|
||||
if recipient_key.len() != 32 {
|
||||
return Promise::err(coded_error(
|
||||
E004_IDENTITY_KEY_LENGTH,
|
||||
format!("recipientKey must be exactly 32 bytes, got {}", recipient_key.len()),
|
||||
));
|
||||
}
|
||||
if payload.is_empty() {
|
||||
return Promise::err(coded_error(E005_PAYLOAD_EMPTY, "payload must not be empty"));
|
||||
}
|
||||
if payload.len() > MAX_PAYLOAD_BYTES {
|
||||
return Promise::err(coded_error(
|
||||
E006_PAYLOAD_TOO_LARGE,
|
||||
format!("payload exceeds max size ({} bytes)", MAX_PAYLOAD_BYTES),
|
||||
));
|
||||
}
|
||||
if version != CURRENT_WIRE_VERSION {
|
||||
return Promise::err(coded_error(
|
||||
E012_WIRE_VERSION,
|
||||
format!("unsupported wire version {} (expected {CURRENT_WIRE_VERSION})", version),
|
||||
));
|
||||
}
|
||||
|
||||
if let Err(e) = check_rate_limit(&self.rate_limits, &auth_ctx.token) {
|
||||
// Audit: rate limit hit — do not log token or identity.
|
||||
tracing::warn!("rate_limit_hit");
|
||||
metrics::record_rate_limit_hit_total();
|
||||
return Promise::err(e);
|
||||
}
|
||||
|
||||
// When sealed_sender is true, enqueue does not require identity; valid token only.
|
||||
if !self.sealed_sender {
|
||||
if let Err(e) = require_identity_or_request(
|
||||
&auth_ctx,
|
||||
&recipient_key,
|
||||
self.auth_cfg.allow_insecure_identity_from_request,
|
||||
) {
|
||||
return Promise::err(e);
|
||||
}
|
||||
}
|
||||
|
||||
match self.store.queue_depth(&recipient_key, &channel_id) {
|
||||
Ok(depth) if depth >= MAX_QUEUE_DEPTH => {
|
||||
return Promise::err(coded_error(
|
||||
E015_QUEUE_FULL,
|
||||
format!("queue depth {} exceeds limit {}", depth, MAX_QUEUE_DEPTH),
|
||||
));
|
||||
}
|
||||
Err(e) => return Promise::err(storage_err(e)),
|
||||
_ => {}
|
||||
}
|
||||
|
||||
let payload_len = payload.len();
|
||||
let seq = match self
|
||||
.store
|
||||
.enqueue(&recipient_key, &channel_id, payload)
|
||||
.map_err(storage_err)
|
||||
{
|
||||
Ok(seq) => seq,
|
||||
Err(e) => return Promise::err(e),
|
||||
};
|
||||
|
||||
results.get().set_seq(seq);
|
||||
|
||||
// Metrics and audit. Audit events must not include secrets (no payload, no full keys).
|
||||
metrics::record_enqueue_total();
|
||||
metrics::record_enqueue_bytes(payload_len as u64);
|
||||
if let Ok(depth) = self.store.queue_depth(&recipient_key, &channel_id) {
|
||||
metrics::record_delivery_queue_depth(depth);
|
||||
}
|
||||
tracing::info!(
|
||||
recipient_prefix = %fmt_hex(&recipient_key[..4]),
|
||||
payload_len = payload_len,
|
||||
seq = seq,
|
||||
"audit: enqueue"
|
||||
);
|
||||
|
||||
crate::auth::waiter(&self.waiters, &recipient_key).notify_waiters();
|
||||
|
||||
Promise::ok(())
|
||||
}
|
||||
|
||||
pub fn handle_fetch(
|
||||
&mut self,
|
||||
params: node_service::FetchParams,
|
||||
mut results: node_service::FetchResults,
|
||||
) -> Promise<(), capnp::Error> {
|
||||
let recipient_key = match params.get() {
|
||||
Ok(p) => match p.get_recipient_key() {
|
||||
Ok(v) => v.to_vec(),
|
||||
Err(e) => return Promise::err(coded_error(E020_BAD_PARAMS, e)),
|
||||
},
|
||||
Err(e) => return Promise::err(coded_error(E020_BAD_PARAMS, e)),
|
||||
};
|
||||
let channel_id = params
|
||||
.get()
|
||||
.ok()
|
||||
.and_then(|p| p.get_channel_id().ok())
|
||||
.map(|c| c.to_vec())
|
||||
.unwrap_or_default();
|
||||
let version = params
|
||||
.get()
|
||||
.ok()
|
||||
.map(|p| p.get_version())
|
||||
.unwrap_or(CURRENT_WIRE_VERSION);
|
||||
let limit = params.get().ok().map(|p| p.get_limit()).unwrap_or(0);
|
||||
let auth_ctx = match params
|
||||
.get()
|
||||
.ok()
|
||||
.map(|p| validate_auth_context(&self.auth_cfg, &self.sessions, p.get_auth()))
|
||||
.transpose()
|
||||
{
|
||||
Ok(ctx) => ctx,
|
||||
Err(e) => return Promise::err(e),
|
||||
};
|
||||
|
||||
if recipient_key.len() != 32 {
|
||||
return Promise::err(coded_error(
|
||||
E004_IDENTITY_KEY_LENGTH,
|
||||
format!("recipientKey must be exactly 32 bytes, got {}", recipient_key.len()),
|
||||
));
|
||||
}
|
||||
if version != CURRENT_WIRE_VERSION {
|
||||
return Promise::err(coded_error(
|
||||
E012_WIRE_VERSION,
|
||||
format!("unsupported wire version {} (expected {CURRENT_WIRE_VERSION})", version),
|
||||
));
|
||||
}
|
||||
|
||||
let auth_ctx = match auth_ctx {
|
||||
Some(ctx) => ctx,
|
||||
None => return Promise::err(coded_error(E003_INVALID_TOKEN, "auth required")),
|
||||
};
|
||||
|
||||
if let Err(e) = require_identity_or_request(
|
||||
&auth_ctx,
|
||||
&recipient_key,
|
||||
self.auth_cfg.allow_insecure_identity_from_request,
|
||||
) {
|
||||
return Promise::err(e);
|
||||
}
|
||||
|
||||
let messages = if limit > 0 {
|
||||
match self
|
||||
.store
|
||||
.fetch_limited(&recipient_key, &channel_id, limit as usize)
|
||||
.map_err(storage_err)
|
||||
{
|
||||
Ok(m) => m,
|
||||
Err(e) => return Promise::err(e),
|
||||
}
|
||||
} else {
|
||||
match self
|
||||
.store
|
||||
.fetch(&recipient_key, &channel_id)
|
||||
.map_err(storage_err)
|
||||
{
|
||||
Ok(m) => m,
|
||||
Err(e) => return Promise::err(e),
|
||||
}
|
||||
};
|
||||
|
||||
// Audit: fetch — do not log payload or full keys.
|
||||
metrics::record_fetch_total();
|
||||
tracing::info!(
|
||||
recipient_prefix = %fmt_hex(&recipient_key[..4]),
|
||||
count = messages.len(),
|
||||
"audit: fetch"
|
||||
);
|
||||
|
||||
let mut list = results.get().init_payloads(messages.len() as u32);
|
||||
for (i, (seq, data)) in messages.iter().enumerate() {
|
||||
let mut entry = list.reborrow().get(i as u32);
|
||||
entry.set_seq(*seq);
|
||||
entry.set_data(data);
|
||||
}
|
||||
|
||||
Promise::ok(())
|
||||
}
|
||||
|
||||
pub fn handle_fetch_wait(
|
||||
&mut self,
|
||||
params: node_service::FetchWaitParams,
|
||||
mut results: node_service::FetchWaitResults,
|
||||
) -> Promise<(), capnp::Error> {
|
||||
let p = match params.get() {
|
||||
Ok(p) => p,
|
||||
Err(e) => return Promise::err(coded_error(E020_BAD_PARAMS, e)),
|
||||
};
|
||||
let recipient_key = match p.get_recipient_key() {
|
||||
Ok(v) => v.to_vec(),
|
||||
Err(e) => return Promise::err(coded_error(E020_BAD_PARAMS, e)),
|
||||
};
|
||||
let channel_id = p.get_channel_id().unwrap_or_default().to_vec();
|
||||
let version = p.get_version();
|
||||
let timeout_ms = p.get_timeout_ms();
|
||||
let limit = p.get_limit();
|
||||
let auth_ctx = match validate_auth_context(&self.auth_cfg, &self.sessions, p.get_auth()) {
|
||||
Ok(ctx) => ctx,
|
||||
Err(e) => return Promise::err(e),
|
||||
};
|
||||
|
||||
if recipient_key.len() != 32 {
|
||||
return Promise::err(coded_error(
|
||||
E004_IDENTITY_KEY_LENGTH,
|
||||
format!("recipientKey must be exactly 32 bytes, got {}", recipient_key.len()),
|
||||
));
|
||||
}
|
||||
if version != CURRENT_WIRE_VERSION {
|
||||
return Promise::err(coded_error(
|
||||
E012_WIRE_VERSION,
|
||||
format!("unsupported wire version {} (expected {CURRENT_WIRE_VERSION})", version),
|
||||
));
|
||||
}
|
||||
|
||||
if let Err(e) = require_identity_or_request(
|
||||
&auth_ctx,
|
||||
&recipient_key,
|
||||
self.auth_cfg.allow_insecure_identity_from_request,
|
||||
) {
|
||||
return Promise::err(e);
|
||||
}
|
||||
|
||||
let store = Arc::clone(&self.store);
|
||||
let waiters: Arc<DashMap<Vec<u8>, Arc<Notify>>> = self.waiters.clone();
|
||||
|
||||
Promise::from_future(async move {
|
||||
let fetch_fn = |s: &Arc<dyn Store>, rk: &[u8], ch: &[u8], lim: u32| -> Result<Vec<(u64, Vec<u8>)>, capnp::Error> {
|
||||
if lim > 0 {
|
||||
s.fetch_limited(rk, ch, lim as usize).map_err(storage_err)
|
||||
} else {
|
||||
s.fetch(rk, ch).map_err(storage_err)
|
||||
}
|
||||
};
|
||||
|
||||
let messages = fetch_fn(&store, &recipient_key, &channel_id, limit)?;
|
||||
|
||||
if messages.is_empty() && timeout_ms > 0 {
|
||||
let waiter = waiters
|
||||
.entry(recipient_key.clone())
|
||||
.or_insert_with(|| Arc::new(Notify::new()))
|
||||
.clone();
|
||||
let _ = timeout(Duration::from_millis(timeout_ms), waiter.notified()).await;
|
||||
let msgs = fetch_fn(&store, &recipient_key, &channel_id, limit)?;
|
||||
fill_payloads_wait(&mut results, msgs);
|
||||
metrics::record_fetch_wait_total();
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
fill_payloads_wait(&mut results, messages);
|
||||
metrics::record_fetch_wait_total();
|
||||
Ok(())
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -1,548 +0,0 @@
|
||||
//! SQLCipher-backed persistent storage.
|
||||
|
||||
use std::path::Path;
|
||||
use std::sync::Mutex;
|
||||
|
||||
use rusqlite::{params, Connection};
|
||||
|
||||
use crate::storage::{StorageError, Store};
|
||||
|
||||
/// Schema version after introducing the migration runner (existing DBs had 1).
|
||||
const SCHEMA_VERSION: i32 = 3;
|
||||
|
||||
/// Migrations: (migration_number, SQL). Files named NNN_name.sql, applied in order when N > user_version.
|
||||
const MIGRATIONS: &[(i32, &str)] = &[
|
||||
(1, include_str!("../migrations/001_initial.sql")),
|
||||
(3, include_str!("../migrations/002_add_seq.sql")),
|
||||
];
|
||||
|
||||
/// Runs pending migrations on an open connection: applies any migration whose number is greater
|
||||
/// than the current PRAGMA user_version, then sets user_version to SCHEMA_VERSION.
|
||||
fn run_migrations(conn: &Connection) -> Result<(), StorageError> {
|
||||
let current_version: i32 = conn
|
||||
.pragma_query_value(None, "user_version", |row| row.get(0))
|
||||
.map_err(|e| StorageError::Db(format!("PRAGMA user_version failed: {e}")))?;
|
||||
|
||||
for (migration_num, sql) in MIGRATIONS {
|
||||
if *migration_num > current_version {
|
||||
conn.execute_batch(sql).map_err(|e| StorageError::Db(e.to_string()))?;
|
||||
}
|
||||
}
|
||||
|
||||
conn.pragma_update(None, "user_version", SCHEMA_VERSION)
|
||||
.map_err(|e| StorageError::Db(format!("set user_version failed: {e}")))?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// SQLCipher-encrypted storage backend.
|
||||
pub struct SqlStore {
|
||||
conn: Mutex<Connection>,
|
||||
}
|
||||
|
||||
impl SqlStore {
|
||||
fn lock_conn(&self) -> Result<std::sync::MutexGuard<'_, Connection>, StorageError> {
|
||||
self.conn
|
||||
.lock()
|
||||
.map_err(|e| StorageError::Db(format!("lock poisoned: {e}")))
|
||||
}
|
||||
|
||||
pub fn open(path: impl AsRef<Path>, key: &str) -> Result<Self, StorageError> {
|
||||
let conn = Connection::open(path).map_err(|e| StorageError::Db(e.to_string()))?;
|
||||
|
||||
if !key.is_empty() {
|
||||
conn.pragma_update(None, "key", key)
|
||||
.map_err(|e| StorageError::Db(format!("PRAGMA key failed: {e}")))?;
|
||||
}
|
||||
|
||||
conn.execute_batch(
|
||||
"PRAGMA journal_mode = WAL;
|
||||
PRAGMA synchronous = NORMAL;
|
||||
PRAGMA foreign_keys = ON;",
|
||||
)
|
||||
.map_err(|e| StorageError::Db(e.to_string()))?;
|
||||
|
||||
let current_version: i32 = conn
|
||||
.pragma_query_value(None, "user_version", |row| row.get(0))
|
||||
.map_err(|e| StorageError::Db(format!("PRAGMA user_version failed: {e}")))?;
|
||||
|
||||
if current_version > SCHEMA_VERSION {
|
||||
return Err(StorageError::Db(format!(
|
||||
"database schema version {current_version} is newer than supported {SCHEMA_VERSION}"
|
||||
)));
|
||||
}
|
||||
|
||||
run_migrations(&conn)?;
|
||||
|
||||
Ok(Self {
|
||||
conn: Mutex::new(conn),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
impl Store for SqlStore {
|
||||
fn upload_key_package(
|
||||
&self,
|
||||
identity_key: &[u8],
|
||||
package: Vec<u8>,
|
||||
) -> Result<(), StorageError> {
|
||||
let conn = self.lock_conn()?;
|
||||
conn.execute(
|
||||
"INSERT INTO key_packages (identity_key, package_data) VALUES (?1, ?2)",
|
||||
params![identity_key, package],
|
||||
)
|
||||
.map_err(|e| StorageError::Db(e.to_string()))?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn fetch_key_package(&self, identity_key: &[u8]) -> Result<Option<Vec<u8>>, StorageError> {
|
||||
let conn = self.lock_conn()?;
|
||||
|
||||
let mut stmt = conn
|
||||
.prepare(
|
||||
"SELECT id, package_data FROM key_packages
|
||||
WHERE identity_key = ?1
|
||||
ORDER BY id ASC
|
||||
LIMIT 1",
|
||||
)
|
||||
.map_err(|e| StorageError::Db(e.to_string()))?;
|
||||
|
||||
let row = stmt
|
||||
.query_row(params![identity_key], |row| {
|
||||
Ok((row.get::<_, i64>(0)?, row.get::<_, Vec<u8>>(1)?))
|
||||
})
|
||||
.optional()
|
||||
.map_err(|e| StorageError::Db(e.to_string()))?;
|
||||
|
||||
match row {
|
||||
Some((id, package)) => {
|
||||
conn.execute("DELETE FROM key_packages WHERE id = ?1", params![id])
|
||||
.map_err(|e| StorageError::Db(e.to_string()))?;
|
||||
Ok(Some(package))
|
||||
}
|
||||
None => Ok(None),
|
||||
}
|
||||
}
|
||||
|
||||
fn enqueue(
|
||||
&self,
|
||||
recipient_key: &[u8],
|
||||
channel_id: &[u8],
|
||||
payload: Vec<u8>,
|
||||
) -> Result<u64, StorageError> {
|
||||
let conn = self.lock_conn()?;
|
||||
// Atomically get-and-increment the per-inbox sequence counter.
|
||||
// RETURNING gives us the post-update next_seq; the assigned seq is next_seq - 1.
|
||||
let seq: i64 = conn
|
||||
.query_row(
|
||||
"INSERT INTO delivery_seq_counters (recipient_key, channel_id, next_seq)
|
||||
VALUES (?1, ?2, 1)
|
||||
ON CONFLICT(recipient_key, channel_id) DO UPDATE SET next_seq = next_seq + 1
|
||||
RETURNING next_seq - 1",
|
||||
params![recipient_key, channel_id],
|
||||
|row| row.get(0),
|
||||
)
|
||||
.map_err(|e| StorageError::Db(e.to_string()))?;
|
||||
conn.execute(
|
||||
"INSERT INTO deliveries (recipient_key, channel_id, seq, payload) VALUES (?1, ?2, ?3, ?4)",
|
||||
params![recipient_key, channel_id, seq, payload],
|
||||
)
|
||||
.map_err(|e| StorageError::Db(e.to_string()))?;
|
||||
Ok(seq as u64)
|
||||
}
|
||||
|
||||
fn fetch(
|
||||
&self,
|
||||
recipient_key: &[u8],
|
||||
channel_id: &[u8],
|
||||
) -> Result<Vec<(u64, Vec<u8>)>, StorageError> {
|
||||
let conn = self.lock_conn()?;
|
||||
|
||||
let mut stmt = conn
|
||||
.prepare(
|
||||
"SELECT id, seq, payload FROM deliveries
|
||||
WHERE recipient_key = ?1 AND channel_id = ?2
|
||||
ORDER BY seq ASC",
|
||||
)
|
||||
.map_err(|e| StorageError::Db(e.to_string()))?;
|
||||
|
||||
let rows: Vec<(i64, i64, Vec<u8>)> = stmt
|
||||
.query_map(params![recipient_key, channel_id], |row| {
|
||||
Ok((row.get(0)?, row.get(1)?, row.get(2)?))
|
||||
})
|
||||
.map_err(|e| StorageError::Db(e.to_string()))?
|
||||
.collect::<Result<Vec<_>, _>>()
|
||||
.map_err(|e| StorageError::Db(e.to_string()))?;
|
||||
|
||||
if !rows.is_empty() {
|
||||
let ids: Vec<i64> = rows.iter().map(|(id, _, _)| *id).collect();
|
||||
let placeholders: String = ids.iter().map(|_| "?").collect::<Vec<_>>().join(",");
|
||||
let sql = format!("DELETE FROM deliveries WHERE id IN ({placeholders})");
|
||||
let params: Vec<&dyn rusqlite::types::ToSql> = ids
|
||||
.iter()
|
||||
.map(|id| id as &dyn rusqlite::types::ToSql)
|
||||
.collect();
|
||||
conn.execute(&sql, params.as_slice())
|
||||
.map_err(|e| StorageError::Db(e.to_string()))?;
|
||||
}
|
||||
|
||||
Ok(rows.into_iter().map(|(_, seq, payload)| (seq as u64, payload)).collect())
|
||||
}
|
||||
|
||||
fn fetch_limited(
|
||||
&self,
|
||||
recipient_key: &[u8],
|
||||
channel_id: &[u8],
|
||||
limit: usize,
|
||||
) -> Result<Vec<(u64, Vec<u8>)>, StorageError> {
|
||||
let conn = self.lock_conn()?;
|
||||
|
||||
let mut stmt = conn
|
||||
.prepare(
|
||||
"SELECT id, seq, payload FROM deliveries
|
||||
WHERE recipient_key = ?1 AND channel_id = ?2
|
||||
ORDER BY seq ASC
|
||||
LIMIT ?3",
|
||||
)
|
||||
.map_err(|e| StorageError::Db(e.to_string()))?;
|
||||
|
||||
let rows: Vec<(i64, i64, Vec<u8>)> = stmt
|
||||
.query_map(params![recipient_key, channel_id, limit as i64], |row| {
|
||||
Ok((row.get(0)?, row.get(1)?, row.get(2)?))
|
||||
})
|
||||
.map_err(|e| StorageError::Db(e.to_string()))?
|
||||
.collect::<Result<Vec<_>, _>>()
|
||||
.map_err(|e| StorageError::Db(e.to_string()))?;
|
||||
|
||||
if !rows.is_empty() {
|
||||
let ids: Vec<i64> = rows.iter().map(|(id, _, _)| *id).collect();
|
||||
let placeholders: String = ids.iter().map(|_| "?").collect::<Vec<_>>().join(",");
|
||||
let sql = format!("DELETE FROM deliveries WHERE id IN ({placeholders})");
|
||||
let params: Vec<&dyn rusqlite::types::ToSql> = ids
|
||||
.iter()
|
||||
.map(|id| id as &dyn rusqlite::types::ToSql)
|
||||
.collect();
|
||||
conn.execute(&sql, params.as_slice())
|
||||
.map_err(|e| StorageError::Db(e.to_string()))?;
|
||||
}
|
||||
|
||||
Ok(rows.into_iter().map(|(_, seq, payload)| (seq as u64, payload)).collect())
|
||||
}
|
||||
|
||||
fn queue_depth(&self, recipient_key: &[u8], channel_id: &[u8]) -> Result<usize, StorageError> {
|
||||
let conn = self.lock_conn()?;
|
||||
let count: i64 = conn
|
||||
.query_row(
|
||||
"SELECT COUNT(*) FROM deliveries WHERE recipient_key = ?1 AND channel_id = ?2",
|
||||
params![recipient_key, channel_id],
|
||||
|row| row.get(0),
|
||||
)
|
||||
.map_err(|e| StorageError::Db(e.to_string()))?;
|
||||
Ok(count as usize)
|
||||
}
|
||||
|
||||
fn gc_expired_messages(&self, max_age_secs: u64) -> Result<usize, StorageError> {
|
||||
let conn = self.lock_conn()?;
|
||||
let cutoff = std::time::SystemTime::now()
|
||||
.duration_since(std::time::UNIX_EPOCH)
|
||||
.unwrap_or_default()
|
||||
.as_secs()
|
||||
.saturating_sub(max_age_secs);
|
||||
let deleted = conn
|
||||
.execute(
|
||||
"DELETE FROM deliveries WHERE created_at < ?1",
|
||||
params![cutoff as i64],
|
||||
)
|
||||
.map_err(|e| StorageError::Db(e.to_string()))?;
|
||||
Ok(deleted)
|
||||
}
|
||||
|
||||
fn upload_hybrid_key(
|
||||
&self,
|
||||
identity_key: &[u8],
|
||||
hybrid_pk: Vec<u8>,
|
||||
) -> Result<(), StorageError> {
|
||||
let conn = self.lock_conn()?;
|
||||
conn.execute(
|
||||
"INSERT OR REPLACE INTO hybrid_keys (identity_key, hybrid_public_key) VALUES (?1, ?2)",
|
||||
params![identity_key, hybrid_pk],
|
||||
)
|
||||
.map_err(|e| StorageError::Db(e.to_string()))?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn fetch_hybrid_key(&self, identity_key: &[u8]) -> Result<Option<Vec<u8>>, StorageError> {
|
||||
let conn = self.lock_conn()?;
|
||||
let mut stmt = conn
|
||||
.prepare("SELECT hybrid_public_key FROM hybrid_keys WHERE identity_key = ?1")
|
||||
.map_err(|e| StorageError::Db(e.to_string()))?;
|
||||
|
||||
stmt.query_row(params![identity_key], |row| row.get(0))
|
||||
.optional()
|
||||
.map_err(|e| StorageError::Db(e.to_string()))
|
||||
}
|
||||
|
||||
fn store_server_setup(&self, setup: Vec<u8>) -> Result<(), StorageError> {
|
||||
let conn = self.lock_conn()?;
|
||||
conn.execute(
|
||||
"INSERT OR REPLACE INTO server_setup (id, setup_data) VALUES (1, ?1)",
|
||||
params![setup],
|
||||
)
|
||||
.map_err(|e| StorageError::Db(e.to_string()))?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn get_server_setup(&self) -> Result<Option<Vec<u8>>, StorageError> {
|
||||
let conn = self.lock_conn()?;
|
||||
let mut stmt = conn
|
||||
.prepare("SELECT setup_data FROM server_setup WHERE id = 1")
|
||||
.map_err(|e| StorageError::Db(e.to_string()))?;
|
||||
|
||||
stmt.query_row([], |row| row.get(0))
|
||||
.optional()
|
||||
.map_err(|e| StorageError::Db(e.to_string()))
|
||||
}
|
||||
|
||||
fn store_user_record(&self, username: &str, record: Vec<u8>) -> Result<(), StorageError> {
|
||||
let conn = self.lock_conn()?;
|
||||
conn.execute(
|
||||
"INSERT OR REPLACE INTO users (username, opaque_record) VALUES (?1, ?2)",
|
||||
params![username, record],
|
||||
)
|
||||
.map_err(|e| StorageError::Db(e.to_string()))?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn get_user_record(&self, username: &str) -> Result<Option<Vec<u8>>, StorageError> {
|
||||
let conn = self.lock_conn()?;
|
||||
let mut stmt = conn
|
||||
.prepare("SELECT opaque_record FROM users WHERE username = ?1")
|
||||
.map_err(|e| StorageError::Db(e.to_string()))?;
|
||||
|
||||
stmt.query_row(params![username], |row| row.get(0))
|
||||
.optional()
|
||||
.map_err(|e| StorageError::Db(e.to_string()))
|
||||
}
|
||||
|
||||
fn has_user_record(&self, username: &str) -> Result<bool, StorageError> {
|
||||
let conn = self.lock_conn()?;
|
||||
let exists: bool = conn
|
||||
.query_row(
|
||||
"SELECT EXISTS(SELECT 1 FROM users WHERE username = ?1)",
|
||||
params![username],
|
||||
|row| row.get(0),
|
||||
)
|
||||
.map_err(|e| StorageError::Db(e.to_string()))?;
|
||||
Ok(exists)
|
||||
}
|
||||
|
||||
fn store_user_identity_key(
|
||||
&self,
|
||||
username: &str,
|
||||
identity_key: Vec<u8>,
|
||||
) -> Result<(), StorageError> {
|
||||
let conn = self.lock_conn()?;
|
||||
conn.execute(
|
||||
"INSERT OR REPLACE INTO user_identity_keys (username, identity_key) VALUES (?1, ?2)",
|
||||
params![username, identity_key],
|
||||
)
|
||||
.map_err(|e| StorageError::Db(e.to_string()))?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn get_user_identity_key(&self, username: &str) -> Result<Option<Vec<u8>>, StorageError> {
|
||||
let conn = self.lock_conn()?;
|
||||
let mut stmt = conn
|
||||
.prepare("SELECT identity_key FROM user_identity_keys WHERE username = ?1")
|
||||
.map_err(|e| StorageError::Db(e.to_string()))?;
|
||||
|
||||
stmt.query_row(params![username], |row| row.get(0))
|
||||
.optional()
|
||||
.map_err(|e| StorageError::Db(e.to_string()))
|
||||
}
|
||||
|
||||
fn publish_endpoint(
|
||||
&self,
|
||||
identity_key: &[u8],
|
||||
node_addr: Vec<u8>,
|
||||
) -> Result<(), StorageError> {
|
||||
let conn = self.lock_conn()?;
|
||||
conn.execute(
|
||||
"INSERT OR REPLACE INTO endpoints (identity_key, node_addr) VALUES (?1, ?2)",
|
||||
params![identity_key, node_addr],
|
||||
)
|
||||
.map_err(|e| StorageError::Db(e.to_string()))?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn resolve_endpoint(&self, identity_key: &[u8]) -> Result<Option<Vec<u8>>, StorageError> {
|
||||
let conn = self.lock_conn()?;
|
||||
let mut stmt = conn
|
||||
.prepare("SELECT node_addr FROM endpoints WHERE identity_key = ?1")
|
||||
.map_err(|e| StorageError::Db(e.to_string()))?;
|
||||
|
||||
stmt.query_row(params![identity_key], |row| row.get(0))
|
||||
.optional()
|
||||
.map_err(|e| StorageError::Db(e.to_string()))
|
||||
}
|
||||
}
|
||||
|
||||
/// Convenience extension for `rusqlite::OptionalExtension`.
|
||||
trait OptionalExt<T> {
|
||||
fn optional(self) -> Result<Option<T>, rusqlite::Error>;
|
||||
}
|
||||
|
||||
impl<T> OptionalExt<T> for Result<T, rusqlite::Error> {
|
||||
fn optional(self) -> Result<Option<T>, rusqlite::Error> {
|
||||
match self {
|
||||
Ok(v) => Ok(Some(v)),
|
||||
Err(rusqlite::Error::QueryReturnedNoRows) => Ok(None),
|
||||
Err(e) => Err(e),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use std::path::PathBuf;
|
||||
|
||||
fn open_in_memory() -> SqlStore {
|
||||
SqlStore::open(":memory:", "").unwrap()
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn sets_user_version_after_migrate() {
|
||||
let dir = tempfile::tempdir().expect("tempdir");
|
||||
let db_path: PathBuf = dir.path().join("store.db");
|
||||
|
||||
{
|
||||
let store = SqlStore::open(&db_path, "").expect("open store");
|
||||
let _guard = store.lock_conn().unwrap();
|
||||
}
|
||||
|
||||
let conn = rusqlite::Connection::open(&db_path).expect("reopen db");
|
||||
let version: i32 = conn
|
||||
.pragma_query_value(None, "user_version", |row| row.get(0))
|
||||
.expect("read user_version");
|
||||
|
||||
assert_eq!(version, SCHEMA_VERSION);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn key_package_fifo() {
|
||||
let store = open_in_memory();
|
||||
let identity = [1u8; 32];
|
||||
|
||||
store
|
||||
.upload_key_package(&identity, b"kp1".to_vec())
|
||||
.unwrap();
|
||||
store
|
||||
.upload_key_package(&identity, b"kp2".to_vec())
|
||||
.unwrap();
|
||||
|
||||
assert_eq!(
|
||||
store.fetch_key_package(&identity).unwrap(),
|
||||
Some(b"kp1".to_vec())
|
||||
);
|
||||
assert_eq!(
|
||||
store.fetch_key_package(&identity).unwrap(),
|
||||
Some(b"kp2".to_vec())
|
||||
);
|
||||
assert_eq!(store.fetch_key_package(&identity).unwrap(), None);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn delivery_round_trip() {
|
||||
let store = open_in_memory();
|
||||
let rk = [1u8; 32];
|
||||
let ch = b"channel-1";
|
||||
|
||||
let seq0 = store.enqueue(&rk, ch, b"msg1".to_vec()).unwrap();
|
||||
let seq1 = store.enqueue(&rk, ch, b"msg2".to_vec()).unwrap();
|
||||
assert_eq!(seq0, 0);
|
||||
assert_eq!(seq1, 1);
|
||||
|
||||
let msgs = store.fetch(&rk, ch).unwrap();
|
||||
assert_eq!(msgs, vec![(0u64, b"msg1".to_vec()), (1u64, b"msg2".to_vec())]);
|
||||
|
||||
assert!(store.fetch(&rk, ch).unwrap().is_empty());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn fetch_limited_partial_drain() {
|
||||
let store = open_in_memory();
|
||||
let rk = [5u8; 32];
|
||||
let ch = b"ch";
|
||||
|
||||
store.enqueue(&rk, ch, b"a".to_vec()).unwrap();
|
||||
store.enqueue(&rk, ch, b"b".to_vec()).unwrap();
|
||||
store.enqueue(&rk, ch, b"c".to_vec()).unwrap();
|
||||
|
||||
let msgs = store.fetch_limited(&rk, ch, 2).unwrap();
|
||||
assert_eq!(msgs, vec![(0u64, b"a".to_vec()), (1u64, b"b".to_vec())]);
|
||||
|
||||
let remaining = store.fetch(&rk, ch).unwrap();
|
||||
assert_eq!(remaining, vec![(2u64, b"c".to_vec())]);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn queue_depth_count() {
|
||||
let store = open_in_memory();
|
||||
let rk = [6u8; 32];
|
||||
let ch = b"ch";
|
||||
|
||||
assert_eq!(store.queue_depth(&rk, ch).unwrap(), 0);
|
||||
store.enqueue(&rk, ch, b"x".to_vec()).unwrap();
|
||||
store.enqueue(&rk, ch, b"y".to_vec()).unwrap();
|
||||
assert_eq!(store.queue_depth(&rk, ch).unwrap(), 2);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn has_user_record_check() {
|
||||
let store = open_in_memory();
|
||||
assert!(!store.has_user_record("user1").unwrap());
|
||||
store
|
||||
.store_user_record("user1", b"record".to_vec())
|
||||
.unwrap();
|
||||
assert!(store.has_user_record("user1").unwrap());
|
||||
assert!(!store.has_user_record("user2").unwrap());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn user_identity_key_round_trip() {
|
||||
let store = open_in_memory();
|
||||
assert!(store.get_user_identity_key("user1").unwrap().is_none());
|
||||
store
|
||||
.store_user_identity_key("user1", vec![1u8; 32])
|
||||
.unwrap();
|
||||
assert_eq!(
|
||||
store.get_user_identity_key("user1").unwrap(),
|
||||
Some(vec![1u8; 32])
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn hybrid_key_round_trip() {
|
||||
let store = open_in_memory();
|
||||
let ik = [2u8; 32];
|
||||
let pk = b"hybrid_public_key_data".to_vec();
|
||||
|
||||
store.upload_hybrid_key(&ik, pk.clone()).unwrap();
|
||||
assert_eq!(store.fetch_hybrid_key(&ik).unwrap(), Some(pk));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn separate_channels_isolated() {
|
||||
let store = open_in_memory();
|
||||
let rk = [4u8; 32];
|
||||
|
||||
store.enqueue(&rk, b"ch-a", b"a1".to_vec()).unwrap();
|
||||
store.enqueue(&rk, b"ch-b", b"b1".to_vec()).unwrap();
|
||||
|
||||
let a_msgs = store.fetch(&rk, b"ch-a").unwrap();
|
||||
assert_eq!(a_msgs, vec![(0u64, b"a1".to_vec())]);
|
||||
|
||||
let b_msgs = store.fetch(&rk, b"ch-b").unwrap();
|
||||
assert_eq!(b_msgs, vec![(0u64, b"b1".to_vec())]);
|
||||
}
|
||||
}
|
||||
@@ -1,490 +0,0 @@
|
||||
use std::{
|
||||
collections::{HashMap, VecDeque},
|
||||
fs,
|
||||
hash::Hash,
|
||||
path::{Path, PathBuf},
|
||||
sync::Mutex,
|
||||
};
|
||||
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
#[derive(thiserror::Error, Debug)]
|
||||
pub enum StorageError {
|
||||
#[error("io error: {0}")]
|
||||
Io(String),
|
||||
#[error("serialization error")]
|
||||
Serde,
|
||||
#[error("database error: {0}")]
|
||||
Db(String),
|
||||
}
|
||||
|
||||
fn lock<T>(m: &Mutex<T>) -> Result<std::sync::MutexGuard<'_, T>, StorageError> {
|
||||
m.lock()
|
||||
.map_err(|e| StorageError::Io(format!("lock poisoned: {e}")))
|
||||
}
|
||||
|
||||
// ── Store trait ──────────────────────────────────────────────────────────────
|
||||
|
||||
/// Abstraction over storage backends (file-backed, SQLCipher, etc.).
|
||||
pub trait Store: Send + Sync {
|
||||
fn upload_key_package(&self, identity_key: &[u8], package: Vec<u8>)
|
||||
-> Result<(), StorageError>;
|
||||
|
||||
fn fetch_key_package(&self, identity_key: &[u8]) -> Result<Option<Vec<u8>>, StorageError>;
|
||||
|
||||
/// Enqueue a payload and return the monotonically increasing per-inbox sequence number
|
||||
/// assigned to this message. Clients sort by seq before MLS processing.
|
||||
fn enqueue(
|
||||
&self,
|
||||
recipient_key: &[u8],
|
||||
channel_id: &[u8],
|
||||
payload: Vec<u8>,
|
||||
) -> Result<u64, StorageError>;
|
||||
|
||||
/// Fetch and drain all queued messages, returning `(seq, payload)` pairs ordered by seq.
|
||||
fn fetch(
|
||||
&self,
|
||||
recipient_key: &[u8],
|
||||
channel_id: &[u8],
|
||||
) -> Result<Vec<(u64, Vec<u8>)>, StorageError>;
|
||||
|
||||
/// Fetch up to `limit` messages without draining the entire queue (Fix 8).
|
||||
/// Returns `(seq, payload)` pairs ordered by seq.
|
||||
fn fetch_limited(
|
||||
&self,
|
||||
recipient_key: &[u8],
|
||||
channel_id: &[u8],
|
||||
limit: usize,
|
||||
) -> Result<Vec<(u64, Vec<u8>)>, StorageError>;
|
||||
|
||||
/// Return the number of queued messages for (recipient, channel) (Fix 7).
|
||||
fn queue_depth(&self, recipient_key: &[u8], channel_id: &[u8]) -> Result<usize, StorageError>;
|
||||
|
||||
/// Delete messages older than `max_age_secs`. Returns count deleted (Fix 7).
|
||||
fn gc_expired_messages(&self, max_age_secs: u64) -> Result<usize, StorageError>;
|
||||
|
||||
fn upload_hybrid_key(
|
||||
&self,
|
||||
identity_key: &[u8],
|
||||
hybrid_pk: Vec<u8>,
|
||||
) -> Result<(), StorageError>;
|
||||
|
||||
fn fetch_hybrid_key(&self, identity_key: &[u8]) -> Result<Option<Vec<u8>>, StorageError>;
|
||||
|
||||
/// Store the OPAQUE `ServerSetup` (generated once, loaded on restart).
|
||||
fn store_server_setup(&self, setup: Vec<u8>) -> Result<(), StorageError>;
|
||||
|
||||
/// Load the persisted `ServerSetup`, if any.
|
||||
fn get_server_setup(&self) -> Result<Option<Vec<u8>>, StorageError>;
|
||||
|
||||
/// Store an OPAQUE user record (serialized `ServerRegistration`).
|
||||
fn store_user_record(&self, username: &str, record: Vec<u8>) -> Result<(), StorageError>;
|
||||
|
||||
/// Retrieve an OPAQUE user record by username.
|
||||
fn get_user_record(&self, username: &str) -> Result<Option<Vec<u8>>, StorageError>;
|
||||
|
||||
/// Check if a user record already exists (Fix 5).
|
||||
fn has_user_record(&self, username: &str) -> Result<bool, StorageError>;
|
||||
|
||||
/// Store identity key for a user (Fix 2).
|
||||
fn store_user_identity_key(
|
||||
&self,
|
||||
username: &str,
|
||||
identity_key: Vec<u8>,
|
||||
) -> Result<(), StorageError>;
|
||||
|
||||
/// Retrieve identity key for a user (Fix 2).
|
||||
fn get_user_identity_key(&self, username: &str) -> Result<Option<Vec<u8>>, StorageError>;
|
||||
|
||||
/// Publish a P2P endpoint address for an identity key.
|
||||
fn publish_endpoint(&self, identity_key: &[u8], node_addr: Vec<u8>)
|
||||
-> Result<(), StorageError>;
|
||||
|
||||
/// Resolve a peer's P2P endpoint address.
|
||||
fn resolve_endpoint(&self, identity_key: &[u8]) -> Result<Option<Vec<u8>>, StorageError>;
|
||||
}
|
||||
|
||||
// ── ChannelKey ───────────────────────────────────────────────────────────────
|
||||
|
||||
#[derive(Serialize, Deserialize, Clone, Eq, PartialEq, Debug)]
|
||||
pub struct ChannelKey {
|
||||
pub channel_id: Vec<u8>,
|
||||
pub recipient_key: Vec<u8>,
|
||||
}
|
||||
|
||||
impl Hash for ChannelKey {
|
||||
fn hash<H: std::hash::Hasher>(&self, state: &mut H) {
|
||||
self.channel_id.hash(state);
|
||||
self.recipient_key.hash(state);
|
||||
}
|
||||
}
|
||||
|
||||
// ── FileBackedStore ──────────────────────────────────────────────────────────
|
||||
|
||||
#[derive(Serialize, Deserialize, Default)]
|
||||
struct QueueMapV1 {
|
||||
map: HashMap<Vec<u8>, VecDeque<Vec<u8>>>,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Default)]
|
||||
struct QueueMapV2 {
|
||||
map: HashMap<ChannelKey, VecDeque<Vec<u8>>>,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Default, Clone)]
|
||||
struct SeqEntry {
|
||||
seq: u64,
|
||||
data: Vec<u8>,
|
||||
}
|
||||
|
||||
/// V3 delivery store: each queue entry carries a monotonic per-inbox sequence number.
|
||||
#[derive(Serialize, Deserialize, Default)]
|
||||
struct QueueMapV3 {
|
||||
map: HashMap<ChannelKey, VecDeque<SeqEntry>>,
|
||||
next_seq: HashMap<ChannelKey, u64>,
|
||||
}
|
||||
|
||||
/// File-backed storage for KeyPackages and delivery queues.
|
||||
///
|
||||
/// Each mutation flushes the entire map to disk. Suitable for MVP-scale loads.
|
||||
pub struct FileBackedStore {
|
||||
kp_path: PathBuf,
|
||||
ds_path: PathBuf,
|
||||
hk_path: PathBuf,
|
||||
setup_path: PathBuf,
|
||||
users_path: PathBuf,
|
||||
identity_keys_path: PathBuf,
|
||||
key_packages: Mutex<HashMap<Vec<u8>, VecDeque<Vec<u8>>>>,
|
||||
deliveries: Mutex<QueueMapV3>,
|
||||
hybrid_keys: Mutex<HashMap<Vec<u8>, Vec<u8>>>,
|
||||
users: Mutex<HashMap<String, Vec<u8>>>,
|
||||
identity_keys: Mutex<HashMap<String, Vec<u8>>>,
|
||||
endpoints: Mutex<HashMap<Vec<u8>, Vec<u8>>>,
|
||||
}
|
||||
|
||||
impl FileBackedStore {
|
||||
pub fn open(dir: impl AsRef<Path>) -> Result<Self, StorageError> {
|
||||
let dir = dir.as_ref();
|
||||
if !dir.exists() {
|
||||
fs::create_dir_all(dir).map_err(|e| StorageError::Io(e.to_string()))?;
|
||||
}
|
||||
let kp_path = dir.join("keypackages.bin");
|
||||
let ds_path = dir.join("deliveries.bin");
|
||||
let hk_path = dir.join("hybridkeys.bin");
|
||||
let setup_path = dir.join("server_setup.bin");
|
||||
let users_path = dir.join("users.bin");
|
||||
let identity_keys_path = dir.join("identity_keys.bin");
|
||||
|
||||
let key_packages = Mutex::new(Self::load_kp_map(&kp_path)?);
|
||||
let deliveries = Mutex::new(Self::load_delivery_map_v3(&ds_path)?);
|
||||
let hybrid_keys = Mutex::new(Self::load_hybrid_keys(&hk_path)?);
|
||||
let users = Mutex::new(Self::load_users(&users_path)?);
|
||||
let identity_keys = Mutex::new(Self::load_map_string_bytes(&identity_keys_path)?);
|
||||
|
||||
Ok(Self {
|
||||
kp_path,
|
||||
ds_path,
|
||||
hk_path,
|
||||
setup_path,
|
||||
users_path,
|
||||
identity_keys_path,
|
||||
key_packages,
|
||||
deliveries,
|
||||
hybrid_keys,
|
||||
users,
|
||||
identity_keys,
|
||||
endpoints: Mutex::new(HashMap::new()),
|
||||
})
|
||||
}
|
||||
|
||||
fn load_kp_map(path: &Path) -> Result<HashMap<Vec<u8>, VecDeque<Vec<u8>>>, StorageError> {
|
||||
if !path.exists() {
|
||||
return Ok(HashMap::new());
|
||||
}
|
||||
let bytes = fs::read(path).map_err(|e| StorageError::Io(e.to_string()))?;
|
||||
if bytes.is_empty() {
|
||||
return Ok(HashMap::new());
|
||||
}
|
||||
let map: QueueMapV1 = bincode::deserialize(&bytes).map_err(|_| StorageError::Serde)?;
|
||||
Ok(map.map)
|
||||
}
|
||||
|
||||
fn flush_kp_map(
|
||||
&self,
|
||||
path: &Path,
|
||||
map: &HashMap<Vec<u8>, VecDeque<Vec<u8>>>,
|
||||
) -> Result<(), StorageError> {
|
||||
let payload = QueueMapV1 { map: map.clone() };
|
||||
let bytes = bincode::serialize(&payload).map_err(|_| StorageError::Serde)?;
|
||||
if let Some(parent) = path.parent() {
|
||||
fs::create_dir_all(parent).map_err(|e| StorageError::Io(e.to_string()))?;
|
||||
}
|
||||
fs::write(path, bytes).map_err(|e| StorageError::Io(e.to_string()))
|
||||
}
|
||||
|
||||
/// Load deliveries as V3. Falls back to V2 format (assigns seqs starting at 0).
|
||||
fn load_delivery_map_v3(path: &Path) -> Result<QueueMapV3, StorageError> {
|
||||
if !path.exists() {
|
||||
return Ok(QueueMapV3::default());
|
||||
}
|
||||
let bytes = fs::read(path).map_err(|e| StorageError::Io(e.to_string()))?;
|
||||
if bytes.is_empty() {
|
||||
return Ok(QueueMapV3::default());
|
||||
}
|
||||
// Try V3 first.
|
||||
if let Ok(v3) = bincode::deserialize::<QueueMapV3>(&bytes) {
|
||||
return Ok(v3);
|
||||
}
|
||||
// Fall back to V2: assign ascending seqs starting at 0 per channel.
|
||||
let v2 = bincode::deserialize::<QueueMapV2>(&bytes)
|
||||
.map_err(|_| StorageError::Io("deliveries file: unrecognised format".into()))?;
|
||||
let mut v3 = QueueMapV3::default();
|
||||
for (key, queue) in v2.map {
|
||||
let entries: VecDeque<SeqEntry> = queue
|
||||
.into_iter()
|
||||
.enumerate()
|
||||
.map(|(i, data)| SeqEntry { seq: i as u64, data })
|
||||
.collect();
|
||||
let next = entries.len() as u64;
|
||||
v3.next_seq.insert(key.clone(), next);
|
||||
v3.map.insert(key, entries);
|
||||
}
|
||||
Ok(v3)
|
||||
}
|
||||
|
||||
fn flush_delivery_map(&self, path: &Path, map: &QueueMapV3) -> Result<(), StorageError> {
|
||||
let bytes = bincode::serialize(map).map_err(|_| StorageError::Serde)?;
|
||||
if let Some(parent) = path.parent() {
|
||||
fs::create_dir_all(parent).map_err(|e| StorageError::Io(e.to_string()))?;
|
||||
}
|
||||
fs::write(path, bytes).map_err(|e| StorageError::Io(e.to_string()))
|
||||
}
|
||||
|
||||
fn load_hybrid_keys(path: &Path) -> Result<HashMap<Vec<u8>, Vec<u8>>, StorageError> {
|
||||
if !path.exists() {
|
||||
return Ok(HashMap::new());
|
||||
}
|
||||
let bytes = fs::read(path).map_err(|e| StorageError::Io(e.to_string()))?;
|
||||
if bytes.is_empty() {
|
||||
return Ok(HashMap::new());
|
||||
}
|
||||
bincode::deserialize(&bytes).map_err(|_| StorageError::Serde)
|
||||
}
|
||||
|
||||
fn flush_hybrid_keys(
|
||||
&self,
|
||||
path: &Path,
|
||||
map: &HashMap<Vec<u8>, Vec<u8>>,
|
||||
) -> Result<(), StorageError> {
|
||||
let bytes = bincode::serialize(map).map_err(|_| StorageError::Serde)?;
|
||||
if let Some(parent) = path.parent() {
|
||||
fs::create_dir_all(parent).map_err(|e| StorageError::Io(e.to_string()))?;
|
||||
}
|
||||
fs::write(path, bytes).map_err(|e| StorageError::Io(e.to_string()))
|
||||
}
|
||||
|
||||
fn load_users(path: &Path) -> Result<HashMap<String, Vec<u8>>, StorageError> {
|
||||
if !path.exists() {
|
||||
return Ok(HashMap::new());
|
||||
}
|
||||
let bytes = fs::read(path).map_err(|e| StorageError::Io(e.to_string()))?;
|
||||
if bytes.is_empty() {
|
||||
return Ok(HashMap::new());
|
||||
}
|
||||
bincode::deserialize(&bytes).map_err(|_| StorageError::Serde)
|
||||
}
|
||||
|
||||
fn flush_users(&self, path: &Path, map: &HashMap<String, Vec<u8>>) -> Result<(), StorageError> {
|
||||
let bytes = bincode::serialize(map).map_err(|_| StorageError::Serde)?;
|
||||
if let Some(parent) = path.parent() {
|
||||
fs::create_dir_all(parent).map_err(|e| StorageError::Io(e.to_string()))?;
|
||||
}
|
||||
fs::write(path, bytes).map_err(|e| StorageError::Io(e.to_string()))
|
||||
}
|
||||
|
||||
fn load_map_string_bytes(path: &Path) -> Result<HashMap<String, Vec<u8>>, StorageError> {
|
||||
Self::load_users(path)
|
||||
}
|
||||
|
||||
fn flush_map_string_bytes(
|
||||
&self,
|
||||
path: &Path,
|
||||
map: &HashMap<String, Vec<u8>>,
|
||||
) -> Result<(), StorageError> {
|
||||
self.flush_users(path, map)
|
||||
}
|
||||
}
|
||||
|
||||
impl Store for FileBackedStore {
|
||||
fn upload_key_package(
|
||||
&self,
|
||||
identity_key: &[u8],
|
||||
package: Vec<u8>,
|
||||
) -> Result<(), StorageError> {
|
||||
let mut map = lock(&self.key_packages)?;
|
||||
map.entry(identity_key.to_vec())
|
||||
.or_default()
|
||||
.push_back(package);
|
||||
self.flush_kp_map(&self.kp_path, &*map)
|
||||
}
|
||||
|
||||
fn fetch_key_package(&self, identity_key: &[u8]) -> Result<Option<Vec<u8>>, StorageError> {
|
||||
let mut map = lock(&self.key_packages)?;
|
||||
let package = map.get_mut(identity_key).and_then(|q| q.pop_front());
|
||||
self.flush_kp_map(&self.kp_path, &*map)?;
|
||||
Ok(package)
|
||||
}
|
||||
|
||||
fn enqueue(
|
||||
&self,
|
||||
recipient_key: &[u8],
|
||||
channel_id: &[u8],
|
||||
payload: Vec<u8>,
|
||||
) -> Result<u64, StorageError> {
|
||||
let mut inner = lock(&self.deliveries)?;
|
||||
let key = ChannelKey {
|
||||
channel_id: channel_id.to_vec(),
|
||||
recipient_key: recipient_key.to_vec(),
|
||||
};
|
||||
let seq = *inner.next_seq.entry(key.clone()).or_insert(0);
|
||||
*inner.next_seq.get_mut(&key).unwrap() = seq + 1;
|
||||
inner.map.entry(key).or_default().push_back(SeqEntry { seq, data: payload });
|
||||
self.flush_delivery_map(&self.ds_path, &*inner)?;
|
||||
Ok(seq)
|
||||
}
|
||||
|
||||
fn fetch(
|
||||
&self,
|
||||
recipient_key: &[u8],
|
||||
channel_id: &[u8],
|
||||
) -> Result<Vec<(u64, Vec<u8>)>, StorageError> {
|
||||
let mut inner = lock(&self.deliveries)?;
|
||||
let key = ChannelKey {
|
||||
channel_id: channel_id.to_vec(),
|
||||
recipient_key: recipient_key.to_vec(),
|
||||
};
|
||||
let messages: Vec<(u64, Vec<u8>)> = inner
|
||||
.map
|
||||
.get_mut(&key)
|
||||
.map(|q| q.drain(..).map(|e| (e.seq, e.data)).collect())
|
||||
.unwrap_or_default();
|
||||
self.flush_delivery_map(&self.ds_path, &*inner)?;
|
||||
Ok(messages)
|
||||
}
|
||||
|
||||
fn fetch_limited(
|
||||
&self,
|
||||
recipient_key: &[u8],
|
||||
channel_id: &[u8],
|
||||
limit: usize,
|
||||
) -> Result<Vec<(u64, Vec<u8>)>, StorageError> {
|
||||
let mut inner = lock(&self.deliveries)?;
|
||||
let key = ChannelKey {
|
||||
channel_id: channel_id.to_vec(),
|
||||
recipient_key: recipient_key.to_vec(),
|
||||
};
|
||||
let messages: Vec<(u64, Vec<u8>)> = inner
|
||||
.map
|
||||
.get_mut(&key)
|
||||
.map(|q| {
|
||||
let count = limit.min(q.len());
|
||||
q.drain(..count).map(|e| (e.seq, e.data)).collect()
|
||||
})
|
||||
.unwrap_or_default();
|
||||
self.flush_delivery_map(&self.ds_path, &*inner)?;
|
||||
Ok(messages)
|
||||
}
|
||||
|
||||
fn queue_depth(&self, recipient_key: &[u8], channel_id: &[u8]) -> Result<usize, StorageError> {
|
||||
let inner = lock(&self.deliveries)?;
|
||||
let key = ChannelKey {
|
||||
channel_id: channel_id.to_vec(),
|
||||
recipient_key: recipient_key.to_vec(),
|
||||
};
|
||||
Ok(inner.map.get(&key).map(|q| q.len()).unwrap_or(0))
|
||||
}
|
||||
|
||||
fn gc_expired_messages(&self, _max_age_secs: u64) -> Result<usize, StorageError> {
|
||||
// FileBackedStore does not track timestamps per message — no-op.
|
||||
Ok(0)
|
||||
}
|
||||
|
||||
fn upload_hybrid_key(
|
||||
&self,
|
||||
identity_key: &[u8],
|
||||
hybrid_pk: Vec<u8>,
|
||||
) -> Result<(), StorageError> {
|
||||
let mut map = lock(&self.hybrid_keys)?;
|
||||
map.insert(identity_key.to_vec(), hybrid_pk);
|
||||
self.flush_hybrid_keys(&self.hk_path, &*map)
|
||||
}
|
||||
|
||||
fn fetch_hybrid_key(&self, identity_key: &[u8]) -> Result<Option<Vec<u8>>, StorageError> {
|
||||
let map = lock(&self.hybrid_keys)?;
|
||||
Ok(map.get(identity_key).cloned())
|
||||
}
|
||||
|
||||
fn store_server_setup(&self, setup: Vec<u8>) -> Result<(), StorageError> {
|
||||
if let Some(parent) = self.setup_path.parent() {
|
||||
fs::create_dir_all(parent).map_err(|e| StorageError::Io(e.to_string()))?;
|
||||
}
|
||||
fs::write(&self.setup_path, setup).map_err(|e| StorageError::Io(e.to_string()))
|
||||
}
|
||||
|
||||
fn get_server_setup(&self) -> Result<Option<Vec<u8>>, StorageError> {
|
||||
if !self.setup_path.exists() {
|
||||
return Ok(None);
|
||||
}
|
||||
let bytes = fs::read(&self.setup_path).map_err(|e| StorageError::Io(e.to_string()))?;
|
||||
if bytes.is_empty() {
|
||||
return Ok(None);
|
||||
}
|
||||
Ok(Some(bytes))
|
||||
}
|
||||
|
||||
fn store_user_record(&self, username: &str, record: Vec<u8>) -> Result<(), StorageError> {
|
||||
let mut map = lock(&self.users)?;
|
||||
map.insert(username.to_string(), record);
|
||||
self.flush_users(&self.users_path, &*map)
|
||||
}
|
||||
|
||||
fn get_user_record(&self, username: &str) -> Result<Option<Vec<u8>>, StorageError> {
|
||||
let map = lock(&self.users)?;
|
||||
Ok(map.get(username).cloned())
|
||||
}
|
||||
|
||||
fn has_user_record(&self, username: &str) -> Result<bool, StorageError> {
|
||||
let map = lock(&self.users)?;
|
||||
Ok(map.contains_key(username))
|
||||
}
|
||||
|
||||
fn store_user_identity_key(
|
||||
&self,
|
||||
username: &str,
|
||||
identity_key: Vec<u8>,
|
||||
) -> Result<(), StorageError> {
|
||||
let mut map = lock(&self.identity_keys)?;
|
||||
map.insert(username.to_string(), identity_key);
|
||||
self.flush_map_string_bytes(&self.identity_keys_path, &*map)
|
||||
}
|
||||
|
||||
fn get_user_identity_key(&self, username: &str) -> Result<Option<Vec<u8>>, StorageError> {
|
||||
let map = lock(&self.identity_keys)?;
|
||||
Ok(map.get(username).cloned())
|
||||
}
|
||||
|
||||
fn publish_endpoint(
|
||||
&self,
|
||||
identity_key: &[u8],
|
||||
node_addr: Vec<u8>,
|
||||
) -> Result<(), StorageError> {
|
||||
let mut map = lock(&self.endpoints)?;
|
||||
map.insert(identity_key.to_vec(), node_addr);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn resolve_endpoint(&self, identity_key: &[u8]) -> Result<Option<Vec<u8>>, StorageError> {
|
||||
let map = lock(&self.endpoints)?;
|
||||
Ok(map.get(identity_key).cloned())
|
||||
}
|
||||
}
|
||||
104
crates/quicprochat-client/Cargo.toml
Normal file
104
crates/quicprochat-client/Cargo.toml
Normal file
@@ -0,0 +1,104 @@
|
||||
[package]
|
||||
name = "quicprochat-client"
|
||||
version = "0.1.0"
|
||||
edition.workspace = true
|
||||
description = "CLI client for quicprochat."
|
||||
license = "Apache-2.0 OR MIT"
|
||||
repository.workspace = true
|
||||
|
||||
[[bin]]
|
||||
name = "qpc"
|
||||
path = "src/main.rs"
|
||||
|
||||
[dependencies]
|
||||
quicprochat-core = { path = "../quicprochat-core" }
|
||||
quicprochat-proto = { path = "../quicprochat-proto" }
|
||||
quicprochat-kt = { path = "../quicprochat-kt" }
|
||||
openmls_rust_crypto = { workspace = true }
|
||||
|
||||
# Serialisation + RPC
|
||||
capnp = { workspace = true }
|
||||
capnp-rpc = { workspace = true }
|
||||
|
||||
# Async
|
||||
tokio = { workspace = true }
|
||||
tokio-util = { workspace = true }
|
||||
futures = { workspace = true }
|
||||
serde = { workspace = true }
|
||||
serde_json = { workspace = true }
|
||||
bincode = { workspace = true }
|
||||
|
||||
# Crypto — OPAQUE PAKE
|
||||
opaque-ke = { workspace = true }
|
||||
rand = { workspace = true }
|
||||
|
||||
# Error handling
|
||||
anyhow = { workspace = true }
|
||||
thiserror = { workspace = true }
|
||||
|
||||
# Crypto — for fingerprint verification in fetch-key subcommand
|
||||
sha2 = { workspace = true }
|
||||
argon2 = { workspace = true }
|
||||
chacha20poly1305 = { workspace = true }
|
||||
ciborium = { workspace = true }
|
||||
zeroize = { workspace = true }
|
||||
quinn = { workspace = true }
|
||||
quinn-proto = { workspace = true }
|
||||
rustls = { workspace = true }
|
||||
|
||||
# Logging
|
||||
tracing = { workspace = true }
|
||||
tracing-subscriber = { workspace = true }
|
||||
|
||||
# CLI + config
|
||||
clap = { workspace = true }
|
||||
toml = { workspace = true }
|
||||
|
||||
# Local message/conversation storage
|
||||
rusqlite = { workspace = true }
|
||||
|
||||
# Hex encoding/decoding
|
||||
hex = { workspace = true }
|
||||
|
||||
# Secure password prompting (no echo)
|
||||
rpassword = "5"
|
||||
|
||||
# mDNS discovery for mesh mode (Freifunk). Only compiled with --features mesh.
|
||||
mdns-sd = { version = "0.12", optional = true }
|
||||
|
||||
# Optional P2P transport for direct node-to-node messaging.
|
||||
quicprochat-p2p = { path = "../quicprochat-p2p", optional = true }
|
||||
|
||||
# Optional TUI dependencies (Ratatui full-screen interface).
|
||||
ratatui = { version = "0.29", optional = true, default-features = false, features = ["crossterm"] }
|
||||
crossterm = { version = "0.28", optional = true }
|
||||
|
||||
# YAML playbook parsing (only compiled with --features playbook).
|
||||
serde_yaml = { version = "0.9", optional = true }
|
||||
|
||||
# v2 SDK-based CLI (thin shell over quicprochat-sdk).
|
||||
quicprochat-sdk = { path = "../quicprochat-sdk", optional = true }
|
||||
quicprochat-rpc = { path = "../quicprochat-rpc", optional = true }
|
||||
rustyline = { workspace = true, optional = true }
|
||||
|
||||
[lints]
|
||||
workspace = true
|
||||
|
||||
[features]
|
||||
# Enable mesh-mode features: mDNS local peer discovery + P2P transport.
|
||||
# Build: cargo build -p quicprochat-client --features mesh
|
||||
mesh = ["dep:mdns-sd", "dep:quicprochat-p2p"]
|
||||
# Enable full-screen Ratatui TUI: cargo build -p quicprochat-client --features tui
|
||||
tui = ["dep:ratatui", "dep:crossterm"]
|
||||
# Enable playbook (scripted command execution): YAML parser + serde derives.
|
||||
# Build: cargo build -p quicprochat-client --features playbook
|
||||
playbook = ["dep:serde_yaml"]
|
||||
# v2 CLI over SDK: cargo build -p quicprochat-client --features v2
|
||||
v2 = ["dep:quicprochat-sdk", "dep:quicprochat-rpc", "dep:rustyline"]
|
||||
|
||||
[dev-dependencies]
|
||||
dashmap = { workspace = true }
|
||||
assert_cmd = "2"
|
||||
tempfile = "3"
|
||||
portpicker = "0.1"
|
||||
rand = "0.8"
|
||||
524
crates/quicprochat-client/src/client/command_engine.rs
Normal file
524
crates/quicprochat-client/src/client/command_engine.rs
Normal file
@@ -0,0 +1,524 @@
|
||||
//! Command engine: typed command enum, registry, and execution bridge.
|
||||
//!
|
||||
//! Maps every REPL slash command and lifecycle operation into a single `Command`
|
||||
//! enum with typed parameters. `CommandRegistry` parses raw input and delegates
|
||||
//! execution to the existing `cmd_*` handlers in `repl.rs`.
|
||||
|
||||
use std::collections::HashMap;
|
||||
|
||||
use quicprochat_proto::node_capnp::node_service;
|
||||
|
||||
use super::repl::{Input, SlashCommand, parse_input};
|
||||
use super::session::SessionState;
|
||||
|
||||
// ── Comparison operator for assert conditions ────────────────────────────────
|
||||
|
||||
/// Comparison operator used in playbook assertions.
|
||||
#[derive(Debug, Clone, PartialEq, Eq)]
|
||||
#[cfg_attr(feature = "playbook", derive(serde::Serialize, serde::Deserialize))]
|
||||
pub enum CmpOp {
|
||||
Eq,
|
||||
Ne,
|
||||
Gt,
|
||||
Lt,
|
||||
Gte,
|
||||
Lte,
|
||||
}
|
||||
|
||||
impl CmpOp {
|
||||
/// Evaluate this comparison: `lhs <op> rhs`.
|
||||
pub fn eval(&self, lhs: usize, rhs: usize) -> bool {
|
||||
match self {
|
||||
CmpOp::Eq => lhs == rhs,
|
||||
CmpOp::Ne => lhs != rhs,
|
||||
CmpOp::Gt => lhs > rhs,
|
||||
CmpOp::Lt => lhs < rhs,
|
||||
CmpOp::Gte => lhs >= rhs,
|
||||
CmpOp::Lte => lhs <= rhs,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// ── Assert conditions for playbook testing ───────────────────────────────────
|
||||
|
||||
/// Conditions that can be asserted in a playbook step.
|
||||
#[derive(Debug, Clone)]
|
||||
#[cfg_attr(feature = "playbook", derive(serde::Serialize, serde::Deserialize))]
|
||||
pub enum AssertCondition {
|
||||
Connected,
|
||||
LoggedIn,
|
||||
InConversation { name: String },
|
||||
MessageCount { op: CmpOp, count: usize },
|
||||
MemberCount { op: CmpOp, count: usize },
|
||||
Custom { expression: String },
|
||||
}
|
||||
|
||||
// ── Command enum ─────────────────────────────────────────────────────────────
|
||||
|
||||
/// Every operation the client can perform, with typed parameters.
|
||||
///
|
||||
/// This is a superset of `SlashCommand` — it adds lifecycle operations
|
||||
/// (`Connect`, `Login`, `Register`, `SendMessage`, `Wait`, `Assert`, `SetVar`)
|
||||
/// that are needed for non-interactive / playbook execution.
|
||||
#[derive(Debug, Clone)]
|
||||
#[cfg_attr(feature = "playbook", derive(serde::Serialize, serde::Deserialize))]
|
||||
pub enum Command {
|
||||
// ── Lifecycle (not in SlashCommand) ──────────────────────────────────
|
||||
Connect {
|
||||
server: String,
|
||||
ca_cert: Option<String>,
|
||||
insecure: bool,
|
||||
},
|
||||
Login {
|
||||
username: String,
|
||||
password: String,
|
||||
},
|
||||
Register {
|
||||
username: String,
|
||||
password: String,
|
||||
},
|
||||
SendMessage {
|
||||
text: String,
|
||||
},
|
||||
Wait {
|
||||
duration_ms: u64,
|
||||
},
|
||||
Assert {
|
||||
condition: AssertCondition,
|
||||
},
|
||||
SetVar {
|
||||
name: String,
|
||||
value: String,
|
||||
},
|
||||
|
||||
// ── SlashCommand mirror ─────────────────────────────────────────────
|
||||
Help,
|
||||
Quit,
|
||||
Whoami,
|
||||
List,
|
||||
Switch { target: String },
|
||||
Dm { username: String },
|
||||
CreateGroup { name: String },
|
||||
Invite { target: String },
|
||||
Remove { target: String },
|
||||
Leave,
|
||||
Join,
|
||||
Members,
|
||||
GroupInfo,
|
||||
Rename { name: String },
|
||||
History { count: usize },
|
||||
|
||||
// Mesh
|
||||
MeshStart,
|
||||
MeshStop,
|
||||
MeshPeers,
|
||||
MeshServer { addr: String },
|
||||
MeshSend { peer_id: String, message: String },
|
||||
MeshBroadcast { topic: String, message: String },
|
||||
MeshSubscribe { topic: String },
|
||||
MeshRoute,
|
||||
MeshIdentity,
|
||||
MeshStore,
|
||||
MeshTrace { address: String },
|
||||
MeshStats,
|
||||
|
||||
// Security / crypto
|
||||
Verify { username: String },
|
||||
UpdateKey,
|
||||
Typing,
|
||||
TypingNotify { enabled: bool },
|
||||
React { emoji: String, index: Option<usize> },
|
||||
Edit { index: usize, new_text: String },
|
||||
Delete { index: usize },
|
||||
SendFile { path: String },
|
||||
Download { index: usize },
|
||||
DeleteAccount,
|
||||
Disappear { arg: Option<String> },
|
||||
Privacy { arg: Option<String> },
|
||||
VerifyFs,
|
||||
RotateAllKeys,
|
||||
Devices,
|
||||
RegisterDevice { name: String },
|
||||
RevokeDevice { id_prefix: String },
|
||||
}
|
||||
|
||||
impl Command {
|
||||
/// Convert a `Command` to a `SlashCommand` when possible.
|
||||
///
|
||||
/// Returns `None` for lifecycle commands that have no `SlashCommand`
|
||||
/// equivalent (`Connect`, `Login`, `Register`, `SendMessage`, `Wait`,
|
||||
/// `Assert`, `SetVar`).
|
||||
pub(crate) fn to_slash(&self) -> Option<SlashCommand> {
|
||||
match self.clone() {
|
||||
// Lifecycle — no SlashCommand equivalent
|
||||
Command::Connect { .. }
|
||||
| Command::Login { .. }
|
||||
| Command::Register { .. }
|
||||
| Command::SendMessage { .. }
|
||||
| Command::Wait { .. }
|
||||
| Command::Assert { .. }
|
||||
| Command::SetVar { .. } => None,
|
||||
|
||||
// 1:1 mirror
|
||||
Command::Help => Some(SlashCommand::Help),
|
||||
Command::Quit => Some(SlashCommand::Quit),
|
||||
Command::Whoami => Some(SlashCommand::Whoami),
|
||||
Command::List => Some(SlashCommand::List),
|
||||
Command::Switch { target } => Some(SlashCommand::Switch { target }),
|
||||
Command::Dm { username } => Some(SlashCommand::Dm { username }),
|
||||
Command::CreateGroup { name } => Some(SlashCommand::CreateGroup { name }),
|
||||
Command::Invite { target } => Some(SlashCommand::Invite { target }),
|
||||
Command::Remove { target } => Some(SlashCommand::Remove { target }),
|
||||
Command::Leave => Some(SlashCommand::Leave),
|
||||
Command::Join => Some(SlashCommand::Join),
|
||||
Command::Members => Some(SlashCommand::Members),
|
||||
Command::GroupInfo => Some(SlashCommand::GroupInfo),
|
||||
Command::Rename { name } => Some(SlashCommand::Rename { name }),
|
||||
Command::History { count } => Some(SlashCommand::History { count }),
|
||||
Command::MeshStart => Some(SlashCommand::MeshStart),
|
||||
Command::MeshStop => Some(SlashCommand::MeshStop),
|
||||
Command::MeshPeers => Some(SlashCommand::MeshPeers),
|
||||
Command::MeshServer { addr } => Some(SlashCommand::MeshServer { addr }),
|
||||
Command::MeshSend { peer_id, message } => {
|
||||
Some(SlashCommand::MeshSend { peer_id, message })
|
||||
}
|
||||
Command::MeshBroadcast { topic, message } => {
|
||||
Some(SlashCommand::MeshBroadcast { topic, message })
|
||||
}
|
||||
Command::MeshSubscribe { topic } => Some(SlashCommand::MeshSubscribe { topic }),
|
||||
Command::MeshRoute => Some(SlashCommand::MeshRoute),
|
||||
Command::MeshIdentity => Some(SlashCommand::MeshIdentity),
|
||||
Command::MeshStore => Some(SlashCommand::MeshStore),
|
||||
Command::MeshTrace { address } => Some(SlashCommand::MeshTrace { address }),
|
||||
Command::MeshStats => Some(SlashCommand::MeshStats),
|
||||
Command::Verify { username } => Some(SlashCommand::Verify { username }),
|
||||
Command::UpdateKey => Some(SlashCommand::UpdateKey),
|
||||
Command::Typing => Some(SlashCommand::Typing),
|
||||
Command::TypingNotify { enabled } => Some(SlashCommand::TypingNotify { enabled }),
|
||||
Command::React { emoji, index } => Some(SlashCommand::React { emoji, index }),
|
||||
Command::Edit { index, new_text } => Some(SlashCommand::Edit { index, new_text }),
|
||||
Command::Delete { index } => Some(SlashCommand::Delete { index }),
|
||||
Command::SendFile { path } => Some(SlashCommand::SendFile { path }),
|
||||
Command::Download { index } => Some(SlashCommand::Download { index }),
|
||||
Command::DeleteAccount => Some(SlashCommand::DeleteAccount),
|
||||
Command::Disappear { arg } => Some(SlashCommand::Disappear { arg }),
|
||||
Command::Privacy { arg } => Some(SlashCommand::Privacy { arg }),
|
||||
Command::VerifyFs => Some(SlashCommand::VerifyFs),
|
||||
Command::RotateAllKeys => Some(SlashCommand::RotateAllKeys),
|
||||
Command::Devices => Some(SlashCommand::Devices),
|
||||
Command::RegisterDevice { name } => Some(SlashCommand::RegisterDevice { name }),
|
||||
Command::RevokeDevice { id_prefix } => {
|
||||
Some(SlashCommand::RevokeDevice { id_prefix })
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// ── CommandResult ────────────────────────────────────────────────────────────
|
||||
|
||||
/// Outcome of executing a single `Command`.
|
||||
#[derive(Debug, Clone)]
|
||||
#[cfg_attr(feature = "playbook", derive(serde::Serialize, serde::Deserialize))]
|
||||
pub struct CommandResult {
|
||||
pub success: bool,
|
||||
pub output: Option<String>,
|
||||
pub error: Option<String>,
|
||||
/// Structured key-value outputs for variable capture in playbooks.
|
||||
pub data: HashMap<String, String>,
|
||||
}
|
||||
|
||||
impl CommandResult {
|
||||
fn ok() -> Self {
|
||||
Self {
|
||||
success: true,
|
||||
output: None,
|
||||
error: None,
|
||||
data: HashMap::new(),
|
||||
}
|
||||
}
|
||||
|
||||
fn err(msg: String) -> Self {
|
||||
Self {
|
||||
success: false,
|
||||
output: None,
|
||||
error: Some(msg),
|
||||
data: HashMap::new(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// ── CommandRegistry ──────────────────────────────────────────────────────────
|
||||
|
||||
/// Parses raw input into `Command` and delegates execution to the existing
|
||||
/// REPL handlers.
|
||||
pub struct CommandRegistry;
|
||||
|
||||
impl CommandRegistry {
|
||||
/// Parse a raw input line into a `Command`.
|
||||
///
|
||||
/// Returns `None` for empty input. Returns `Some(Command::SendMessage)`
|
||||
/// for plain chat text. Slash commands are parsed via the existing
|
||||
/// `parse_input` function.
|
||||
pub fn parse(line: &str) -> Option<Command> {
|
||||
match parse_input(line) {
|
||||
Input::Empty => None,
|
||||
Input::ChatMessage(text) => Some(Command::SendMessage { text }),
|
||||
Input::Slash(sc) => Some(slash_to_command(sc)),
|
||||
}
|
||||
}
|
||||
|
||||
/// Execute a `Command`, delegating slash commands to the existing
|
||||
/// `handle_slash` dispatch and handling lifecycle commands directly.
|
||||
///
|
||||
/// Currently, output from `cmd_*` handlers goes to stdout (unchanged).
|
||||
/// `CommandResult` captures success/failure status; stdout capture can
|
||||
/// be added later.
|
||||
pub async fn execute(
|
||||
cmd: &Command,
|
||||
session: &mut SessionState,
|
||||
client: &node_service::Client,
|
||||
) -> CommandResult {
|
||||
match cmd {
|
||||
Command::Wait { duration_ms } => {
|
||||
tokio::time::sleep(std::time::Duration::from_millis(*duration_ms)).await;
|
||||
CommandResult::ok()
|
||||
}
|
||||
Command::SetVar { name, value } => {
|
||||
let mut result = CommandResult::ok();
|
||||
result.data.insert(name.clone(), value.clone());
|
||||
result
|
||||
}
|
||||
Command::Assert { condition } => execute_assert(condition, session),
|
||||
Command::Connect { .. } | Command::Login { .. } | Command::Register { .. } => {
|
||||
// These lifecycle commands require external context (endpoint,
|
||||
// OPAQUE state) that lives outside SessionState. The playbook
|
||||
// executor will handle them directly; calling execute() for
|
||||
// them is an error.
|
||||
CommandResult::err(
|
||||
"lifecycle commands (connect/login/register) must be handled by the playbook executor".into(),
|
||||
)
|
||||
}
|
||||
Command::SendMessage { text } => {
|
||||
match super::repl::do_send(session, client, text).await {
|
||||
Ok(()) => CommandResult::ok(),
|
||||
Err(e) => CommandResult::err(format!("{e:#}")),
|
||||
}
|
||||
}
|
||||
Command::Quit => CommandResult::ok(),
|
||||
other => {
|
||||
// All remaining variants have a SlashCommand equivalent.
|
||||
if let Some(sc) = other.to_slash() {
|
||||
match execute_slash(session, client, sc).await {
|
||||
Ok(()) => CommandResult::ok(),
|
||||
Err(e) => CommandResult::err(format!("{e:#}")),
|
||||
}
|
||||
} else {
|
||||
CommandResult::err("command has no slash equivalent".into())
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// ── Conversion helpers ──────────────────────────────────────────────────────
|
||||
|
||||
/// Convert a `SlashCommand` into the corresponding `Command`.
|
||||
fn slash_to_command(sc: SlashCommand) -> Command {
|
||||
match sc {
|
||||
SlashCommand::Help => Command::Help,
|
||||
SlashCommand::Quit => Command::Quit,
|
||||
SlashCommand::Whoami => Command::Whoami,
|
||||
SlashCommand::List => Command::List,
|
||||
SlashCommand::Switch { target } => Command::Switch { target },
|
||||
SlashCommand::Dm { username } => Command::Dm { username },
|
||||
SlashCommand::CreateGroup { name } => Command::CreateGroup { name },
|
||||
SlashCommand::Invite { target } => Command::Invite { target },
|
||||
SlashCommand::Remove { target } => Command::Remove { target },
|
||||
SlashCommand::Leave => Command::Leave,
|
||||
SlashCommand::Join => Command::Join,
|
||||
SlashCommand::Members => Command::Members,
|
||||
SlashCommand::GroupInfo => Command::GroupInfo,
|
||||
SlashCommand::Rename { name } => Command::Rename { name },
|
||||
SlashCommand::History { count } => Command::History { count },
|
||||
SlashCommand::MeshStart => Command::MeshStart,
|
||||
SlashCommand::MeshStop => Command::MeshStop,
|
||||
SlashCommand::MeshPeers => Command::MeshPeers,
|
||||
SlashCommand::MeshServer { addr } => Command::MeshServer { addr },
|
||||
SlashCommand::MeshSend { peer_id, message } => Command::MeshSend { peer_id, message },
|
||||
SlashCommand::MeshBroadcast { topic, message } => {
|
||||
Command::MeshBroadcast { topic, message }
|
||||
}
|
||||
SlashCommand::MeshSubscribe { topic } => Command::MeshSubscribe { topic },
|
||||
SlashCommand::MeshRoute => Command::MeshRoute,
|
||||
SlashCommand::MeshIdentity => Command::MeshIdentity,
|
||||
SlashCommand::MeshStore => Command::MeshStore,
|
||||
SlashCommand::MeshTrace { address } => Command::MeshTrace { address },
|
||||
SlashCommand::MeshStats => Command::MeshStats,
|
||||
SlashCommand::Verify { username } => Command::Verify { username },
|
||||
SlashCommand::UpdateKey => Command::UpdateKey,
|
||||
SlashCommand::Typing => Command::Typing,
|
||||
SlashCommand::TypingNotify { enabled } => Command::TypingNotify { enabled },
|
||||
SlashCommand::React { emoji, index } => Command::React { emoji, index },
|
||||
SlashCommand::Edit { index, new_text } => Command::Edit { index, new_text },
|
||||
SlashCommand::Delete { index } => Command::Delete { index },
|
||||
SlashCommand::SendFile { path } => Command::SendFile { path },
|
||||
SlashCommand::Download { index } => Command::Download { index },
|
||||
SlashCommand::DeleteAccount => Command::DeleteAccount,
|
||||
SlashCommand::Disappear { arg } => Command::Disappear { arg },
|
||||
SlashCommand::Privacy { arg } => Command::Privacy { arg },
|
||||
SlashCommand::VerifyFs => Command::VerifyFs,
|
||||
SlashCommand::RotateAllKeys => Command::RotateAllKeys,
|
||||
SlashCommand::Devices => Command::Devices,
|
||||
SlashCommand::RegisterDevice { name } => Command::RegisterDevice { name },
|
||||
SlashCommand::RevokeDevice { id_prefix } => Command::RevokeDevice { id_prefix },
|
||||
}
|
||||
}
|
||||
|
||||
// ── Execution helpers ───────────────────────────────────────────────────────
|
||||
|
||||
/// Execute a `SlashCommand` using the existing `cmd_*` handlers from `repl.rs`.
|
||||
///
|
||||
/// This duplicates the dispatch table from `handle_slash` but returns
|
||||
/// `anyhow::Result<()>` instead of printing errors inline — the caller
|
||||
/// decides how to surface errors.
|
||||
async fn execute_slash(
|
||||
session: &mut SessionState,
|
||||
client: &node_service::Client,
|
||||
cmd: SlashCommand,
|
||||
) -> anyhow::Result<()> {
|
||||
use super::repl::*;
|
||||
match cmd {
|
||||
SlashCommand::Help => {
|
||||
print_help();
|
||||
Ok(())
|
||||
}
|
||||
SlashCommand::Quit => Ok(()),
|
||||
SlashCommand::Whoami => cmd_whoami(session),
|
||||
SlashCommand::List => cmd_list(session),
|
||||
SlashCommand::Switch { target } => cmd_switch(session, &target),
|
||||
SlashCommand::Dm { username } => cmd_dm(session, client, &username).await,
|
||||
SlashCommand::CreateGroup { name } => cmd_create_group(session, &name),
|
||||
SlashCommand::Invite { target } => cmd_invite(session, client, &target).await,
|
||||
SlashCommand::Remove { target } => cmd_remove(session, client, &target).await,
|
||||
SlashCommand::Leave => cmd_leave(session, client).await,
|
||||
SlashCommand::Join => cmd_join(session, client).await,
|
||||
SlashCommand::Members => cmd_members(session, client).await,
|
||||
SlashCommand::GroupInfo => cmd_group_info(session, client).await,
|
||||
SlashCommand::Rename { name } => cmd_rename(session, &name),
|
||||
SlashCommand::History { count } => cmd_history(session, count),
|
||||
SlashCommand::MeshStart => cmd_mesh_start(session).await,
|
||||
SlashCommand::MeshStop => cmd_mesh_stop(session).await,
|
||||
SlashCommand::MeshPeers => cmd_mesh_peers(),
|
||||
SlashCommand::MeshServer { addr } => {
|
||||
super::display::print_status(&format!(
|
||||
"mesh server hint: reconnect with --server {addr} to use this node"
|
||||
));
|
||||
Ok(())
|
||||
}
|
||||
SlashCommand::MeshSend { peer_id, message } => cmd_mesh_send(session, &peer_id, &message).await,
|
||||
SlashCommand::MeshBroadcast { topic, message } => cmd_mesh_broadcast(session, &topic, &message).await,
|
||||
SlashCommand::MeshSubscribe { topic } => cmd_mesh_subscribe(session, &topic),
|
||||
SlashCommand::MeshRoute => cmd_mesh_route(session),
|
||||
SlashCommand::MeshIdentity => cmd_mesh_identity(session),
|
||||
SlashCommand::MeshStore => cmd_mesh_store(session),
|
||||
SlashCommand::MeshTrace { address } => cmd_mesh_trace(session, &address),
|
||||
SlashCommand::MeshStats => cmd_mesh_stats(session),
|
||||
SlashCommand::Verify { username } => cmd_verify(session, client, &username).await,
|
||||
SlashCommand::UpdateKey => cmd_update_key(session, client).await,
|
||||
SlashCommand::Typing => cmd_typing(session, client).await,
|
||||
SlashCommand::TypingNotify { enabled } => {
|
||||
session.typing_notify_enabled = enabled;
|
||||
super::display::print_status(&format!(
|
||||
"typing notifications {}",
|
||||
if enabled { "enabled" } else { "disabled" }
|
||||
));
|
||||
Ok(())
|
||||
}
|
||||
SlashCommand::React { emoji, index } => cmd_react(session, client, &emoji, index).await,
|
||||
SlashCommand::Edit { index, new_text } => {
|
||||
cmd_edit(session, client, index, &new_text).await
|
||||
}
|
||||
SlashCommand::Delete { index } => cmd_delete(session, client, index).await,
|
||||
SlashCommand::SendFile { path } => cmd_send_file(session, client, &path).await,
|
||||
SlashCommand::Download { index } => cmd_download(session, client, index).await,
|
||||
SlashCommand::DeleteAccount => cmd_delete_account(session, client).await,
|
||||
SlashCommand::Disappear { arg } => cmd_disappear(session, arg.as_deref()),
|
||||
SlashCommand::Privacy { arg } => cmd_privacy(session, arg.as_deref()),
|
||||
SlashCommand::VerifyFs => cmd_verify_fs(session),
|
||||
SlashCommand::RotateAllKeys => cmd_rotate_all_keys(session, client).await,
|
||||
SlashCommand::Devices => cmd_devices(client).await,
|
||||
SlashCommand::RegisterDevice { name } => cmd_register_device(client, &name).await,
|
||||
SlashCommand::RevokeDevice { id_prefix } => cmd_revoke_device(client, &id_prefix).await,
|
||||
}
|
||||
}
|
||||
|
||||
/// Assert a condition against the current session state.
|
||||
fn execute_assert(condition: &AssertCondition, session: &SessionState) -> CommandResult {
|
||||
match condition {
|
||||
AssertCondition::Connected => {
|
||||
// We have a session => we got past connect. Always true when
|
||||
// execute() is called with a valid client reference.
|
||||
CommandResult::ok()
|
||||
}
|
||||
AssertCondition::LoggedIn => {
|
||||
let guard = crate::AUTH_CONTEXT
|
||||
.read()
|
||||
.expect("AUTH_CONTEXT poisoned");
|
||||
if guard.is_some() {
|
||||
CommandResult::ok()
|
||||
} else {
|
||||
CommandResult::err("not logged in".into())
|
||||
}
|
||||
}
|
||||
AssertCondition::InConversation { name } => {
|
||||
if let Some(display) = session.active_display_name() {
|
||||
if display.contains(name.as_str()) {
|
||||
CommandResult::ok()
|
||||
} else {
|
||||
CommandResult::err(format!(
|
||||
"active conversation is '{display}', expected '{name}'"
|
||||
))
|
||||
}
|
||||
} else {
|
||||
CommandResult::err("no active conversation".into())
|
||||
}
|
||||
}
|
||||
AssertCondition::MessageCount { op, count } => {
|
||||
let actual = session
|
||||
.active_conversation
|
||||
.as_ref()
|
||||
.and_then(|id| session.conv_store.load_all_messages(id).ok())
|
||||
.map(|msgs| msgs.len())
|
||||
.unwrap_or(0);
|
||||
if op.eval(actual, *count) {
|
||||
CommandResult::ok()
|
||||
} else {
|
||||
CommandResult::err(format!(
|
||||
"message count assertion failed: {actual} {op:?} {count}"
|
||||
))
|
||||
}
|
||||
}
|
||||
AssertCondition::MemberCount { op, count } => {
|
||||
let actual = session
|
||||
.active_conversation
|
||||
.as_ref()
|
||||
.and_then(|id| session.members.get(id))
|
||||
.map(|m| m.member_identities().len())
|
||||
.unwrap_or(0);
|
||||
if op.eval(actual, *count) {
|
||||
CommandResult::ok()
|
||||
} else {
|
||||
CommandResult::err(format!(
|
||||
"member count assertion failed: {actual} {op:?} {count}"
|
||||
))
|
||||
}
|
||||
}
|
||||
AssertCondition::Custom { expression } => {
|
||||
// Custom expressions are not evaluated yet; always pass.
|
||||
let mut result = CommandResult::ok();
|
||||
result.data.insert("expression".into(), expression.clone());
|
||||
result
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -5,9 +5,9 @@ use opaque_ke::{
|
||||
ClientLogin, ClientLoginFinishParameters, ClientRegistration,
|
||||
ClientRegistrationFinishParameters, CredentialResponse, RegistrationResponse,
|
||||
};
|
||||
use quicnprotochat_core::{
|
||||
use quicprochat_core::{
|
||||
generate_key_package, hybrid_decrypt, hybrid_encrypt, opaque_auth::OpaqueSuite,
|
||||
GroupMember, HybridKeypair, IdentityKeypair,
|
||||
GroupMember, HybridKeypair, IdentityKeypair, ReceivedMessage,
|
||||
};
|
||||
|
||||
use super::{
|
||||
@@ -27,8 +27,8 @@ pub fn cmd_whoami(state_path: &Path, password: Option<&str>) -> anyhow::Result<(
|
||||
let pk_bytes = identity.public_key_bytes();
|
||||
let fingerprint = sha256(&pk_bytes);
|
||||
|
||||
println!("identity_key : {}", hex::encode(&pk_bytes));
|
||||
println!("fingerprint : {}", hex::encode(&fingerprint));
|
||||
println!("identity_key : {}", hex::encode(pk_bytes));
|
||||
println!("fingerprint : {}", hex::encode(fingerprint));
|
||||
println!(
|
||||
"hybrid_key : {}",
|
||||
if state.hybrid_key.is_some() {
|
||||
@@ -203,6 +203,7 @@ pub async fn cmd_register_user(
|
||||
}
|
||||
|
||||
/// Log in via the OPAQUE protocol and receive a session token.
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
pub async fn cmd_login(
|
||||
server: &str,
|
||||
ca_cert: &Path,
|
||||
@@ -310,6 +311,129 @@ fn derive_identity_for_login(
|
||||
))
|
||||
}
|
||||
|
||||
// ── OPAQUE helpers (used by both one-shot commands and REPL bootstrap) ───────
|
||||
|
||||
/// Perform OPAQUE registration. Returns Ok(()) on success.
|
||||
/// The error message contains "E018" if the user already exists.
|
||||
/// Does NOT require init_auth() — OPAQUE RPCs are unauthenticated.
|
||||
pub(crate) async fn opaque_register(
|
||||
client: &quicprochat_proto::node_capnp::node_service::Client,
|
||||
username: &str,
|
||||
password: &str,
|
||||
identity_key: Option<&[u8]>,
|
||||
) -> anyhow::Result<()> {
|
||||
let mut rng = rand::rngs::OsRng;
|
||||
|
||||
let reg_start = ClientRegistration::<OpaqueSuite>::start(&mut rng, password.as_bytes())
|
||||
.map_err(|e| anyhow::anyhow!("OPAQUE register start: {e}"))?;
|
||||
|
||||
let mut req = client.opaque_register_start_request();
|
||||
{
|
||||
let mut p = req.get();
|
||||
p.set_username(username);
|
||||
p.set_request(®_start.message.serialize());
|
||||
}
|
||||
let resp = req.send().promise.await.context("opaque_register_start RPC failed")?;
|
||||
let response_bytes = resp
|
||||
.get()
|
||||
.context("register_start: bad response")?
|
||||
.get_response()
|
||||
.context("register_start: missing response")?
|
||||
.to_vec();
|
||||
|
||||
let reg_response = RegistrationResponse::<OpaqueSuite>::deserialize(&response_bytes)
|
||||
.map_err(|e| anyhow::anyhow!("invalid registration response: {e}"))?;
|
||||
|
||||
let reg_finish = reg_start
|
||||
.state
|
||||
.finish(
|
||||
&mut rng,
|
||||
password.as_bytes(),
|
||||
reg_response,
|
||||
ClientRegistrationFinishParameters::<OpaqueSuite>::default(),
|
||||
)
|
||||
.map_err(|e| anyhow::anyhow!("OPAQUE register finish: {e}"))?;
|
||||
|
||||
let mut req = client.opaque_register_finish_request();
|
||||
{
|
||||
let mut p = req.get();
|
||||
p.set_username(username);
|
||||
p.set_upload(®_finish.message.serialize());
|
||||
if let Some(ik) = identity_key {
|
||||
p.set_identity_key(ik);
|
||||
} else {
|
||||
p.set_identity_key(&[]);
|
||||
}
|
||||
}
|
||||
let resp = req.send().promise.await.context("opaque_register_finish RPC failed")?;
|
||||
let success = resp
|
||||
.get()
|
||||
.context("register_finish: bad response")?
|
||||
.get_success();
|
||||
|
||||
anyhow::ensure!(success, "server rejected registration");
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Perform OPAQUE login and return the raw session token bytes.
|
||||
/// Does NOT require init_auth() — OPAQUE RPCs are unauthenticated.
|
||||
pub async fn opaque_login(
|
||||
client: &quicprochat_proto::node_capnp::node_service::Client,
|
||||
username: &str,
|
||||
password: &str,
|
||||
identity_key: &[u8],
|
||||
) -> anyhow::Result<Vec<u8>> {
|
||||
let mut rng = rand::rngs::OsRng;
|
||||
|
||||
let login_start = ClientLogin::<OpaqueSuite>::start(&mut rng, password.as_bytes())
|
||||
.map_err(|e| anyhow::anyhow!("OPAQUE login start: {e}"))?;
|
||||
|
||||
let mut req = client.opaque_login_start_request();
|
||||
{
|
||||
let mut p = req.get();
|
||||
p.set_username(username);
|
||||
p.set_request(&login_start.message.serialize());
|
||||
}
|
||||
let resp = req.send().promise.await.context("opaque_login_start RPC failed")?;
|
||||
let response_bytes = resp
|
||||
.get()
|
||||
.context("login_start: bad response")?
|
||||
.get_response()
|
||||
.context("login_start: missing response")?
|
||||
.to_vec();
|
||||
|
||||
let credential_response = CredentialResponse::<OpaqueSuite>::deserialize(&response_bytes)
|
||||
.map_err(|e| anyhow::anyhow!("invalid credential response: {e}"))?;
|
||||
|
||||
let login_finish = login_start
|
||||
.state
|
||||
.finish(
|
||||
&mut rng,
|
||||
password.as_bytes(),
|
||||
credential_response,
|
||||
ClientLoginFinishParameters::<OpaqueSuite>::default(),
|
||||
)
|
||||
.map_err(|e| anyhow::anyhow!("OPAQUE login finish (bad password?): {e}"))?;
|
||||
|
||||
let mut req = client.opaque_login_finish_request();
|
||||
{
|
||||
let mut p = req.get();
|
||||
p.set_username(username);
|
||||
p.set_finalization(&login_finish.message.serialize());
|
||||
p.set_identity_key(identity_key);
|
||||
}
|
||||
let resp = req.send().promise.await.context("opaque_login_finish RPC failed")?;
|
||||
let session_token = resp
|
||||
.get()
|
||||
.context("login_finish: bad response")?
|
||||
.get_session_token()
|
||||
.context("login_finish: missing session_token")?
|
||||
.to_vec();
|
||||
|
||||
anyhow::ensure!(!session_token.is_empty(), "server returned empty session token");
|
||||
Ok(session_token)
|
||||
}
|
||||
|
||||
/// Generate a KeyPackage for a fresh identity and upload it to the AS.
|
||||
pub async fn cmd_register(server: &str, ca_cert: &Path, server_name: &str) -> anyhow::Result<()> {
|
||||
let identity = IdentityKeypair::generate();
|
||||
@@ -399,7 +523,7 @@ async fn do_upload_keypackage(
|
||||
|
||||
anyhow::ensure!(server_fp == fingerprint, "fingerprint mismatch");
|
||||
|
||||
if let Some(ref hkp) = hybrid_kp {
|
||||
if let Some(hkp) = &hybrid_kp {
|
||||
upload_hybrid_key(
|
||||
&node_client,
|
||||
&member.identity().public_key_bytes(),
|
||||
@@ -523,8 +647,8 @@ pub async fn cmd_fetch_key(
|
||||
|
||||
/// Run a two-party MLS demo against the unified server.
|
||||
pub async fn cmd_demo_group(server: &str, ca_cert: &Path, server_name: &str) -> anyhow::Result<()> {
|
||||
let creator_state_path = PathBuf::from("quicnprotochat-demo-creator.bin");
|
||||
let joiner_state_path = PathBuf::from("quicnprotochat-demo-joiner.bin");
|
||||
let creator_state_path = PathBuf::from("qpc-demo-creator.bin");
|
||||
let joiner_state_path = PathBuf::from("qpc-demo-joiner.bin");
|
||||
|
||||
let (mut creator, creator_hybrid_opt) =
|
||||
load_or_init_state(&creator_state_path, None)?.into_parts(&creator_state_path)?;
|
||||
@@ -574,7 +698,7 @@ pub async fn cmd_demo_group(server: &str, ca_cert: &Path, server_name: &str) ->
|
||||
.await?
|
||||
.context("joiner hybrid key not found")?;
|
||||
let wrapped_welcome =
|
||||
hybrid_encrypt(&joiner_hybrid_pk, &welcome).context("hybrid encrypt welcome")?;
|
||||
hybrid_encrypt(&joiner_hybrid_pk, &welcome, b"", b"").context("hybrid encrypt welcome")?;
|
||||
enqueue(&creator_ds, &joiner_identity, &wrapped_welcome).await?;
|
||||
|
||||
let welcome_payloads = fetch_all(&joiner_ds, &joiner_identity).await?;
|
||||
@@ -584,7 +708,7 @@ pub async fn cmd_demo_group(server: &str, ca_cert: &Path, server_name: &str) ->
|
||||
.context("Welcome was not delivered to joiner via DS")?;
|
||||
|
||||
let welcome_bytes =
|
||||
hybrid_decrypt(&joiner_hybrid, &raw_welcome).context("hybrid decrypt welcome failed")?;
|
||||
hybrid_decrypt(&joiner_hybrid, &raw_welcome, b"", b"").context("hybrid decrypt welcome failed")?;
|
||||
joiner
|
||||
.join_group(&welcome_bytes)
|
||||
.context("join_group failed")?;
|
||||
@@ -593,7 +717,7 @@ pub async fn cmd_demo_group(server: &str, ca_cert: &Path, server_name: &str) ->
|
||||
.send_message(b"hello")
|
||||
.context("send_message failed")?;
|
||||
let wrapped_creator_joiner =
|
||||
hybrid_encrypt(&joiner_hybrid_pk, &ct_creator_to_joiner).context("hybrid encrypt failed")?;
|
||||
hybrid_encrypt(&joiner_hybrid_pk, &ct_creator_to_joiner, b"", b"").context("hybrid encrypt failed")?;
|
||||
enqueue(&creator_ds, &joiner_identity, &wrapped_creator_joiner).await?;
|
||||
|
||||
let joiner_msgs = fetch_all(&joiner_ds, &joiner_identity).await?;
|
||||
@@ -601,10 +725,11 @@ pub async fn cmd_demo_group(server: &str, ca_cert: &Path, server_name: &str) ->
|
||||
.first()
|
||||
.context("joiner: missing ciphertext from DS")?;
|
||||
let inner_creator_joiner =
|
||||
hybrid_decrypt(&joiner_hybrid, raw_creator_joiner).context("hybrid decrypt failed")?;
|
||||
let plaintext_creator_joiner = joiner
|
||||
.receive_message(&inner_creator_joiner)?
|
||||
.context("expected application message")?;
|
||||
hybrid_decrypt(&joiner_hybrid, raw_creator_joiner, b"", b"").context("hybrid decrypt failed")?;
|
||||
let plaintext_creator_joiner = match joiner.receive_message(&inner_creator_joiner)? {
|
||||
ReceivedMessage::Application(pt) => pt,
|
||||
other => anyhow::bail!("expected application message, got {other:?}"),
|
||||
};
|
||||
println!(
|
||||
"creator -> joiner plaintext: {}",
|
||||
String::from_utf8_lossy(&plaintext_creator_joiner)
|
||||
@@ -617,7 +742,7 @@ pub async fn cmd_demo_group(server: &str, ca_cert: &Path, server_name: &str) ->
|
||||
.send_message(b"hello back")
|
||||
.context("send_message failed")?;
|
||||
let wrapped_joiner_creator =
|
||||
hybrid_encrypt(&creator_hybrid_pk, &ct_joiner_to_creator).context("hybrid encrypt failed")?;
|
||||
hybrid_encrypt(&creator_hybrid_pk, &ct_joiner_to_creator, b"", b"").context("hybrid encrypt failed")?;
|
||||
enqueue(&joiner_ds, &creator_identity, &wrapped_joiner_creator).await?;
|
||||
|
||||
let creator_msgs = fetch_all(&creator_ds, &creator_identity).await?;
|
||||
@@ -625,10 +750,11 @@ pub async fn cmd_demo_group(server: &str, ca_cert: &Path, server_name: &str) ->
|
||||
.first()
|
||||
.context("creator: missing ciphertext from DS")?;
|
||||
let inner_joiner_creator =
|
||||
hybrid_decrypt(&creator_hybrid, raw_joiner_creator).context("hybrid decrypt failed")?;
|
||||
let plaintext_joiner_creator = creator
|
||||
.receive_message(&inner_joiner_creator)?
|
||||
.context("expected application message")?;
|
||||
hybrid_decrypt(&creator_hybrid, raw_joiner_creator, b"", b"").context("hybrid decrypt failed")?;
|
||||
let plaintext_joiner_creator = match creator.receive_message(&inner_joiner_creator)? {
|
||||
ReceivedMessage::Application(pt) => pt,
|
||||
other => anyhow::bail!("expected application message, got {other:?}"),
|
||||
};
|
||||
println!(
|
||||
"joiner -> creator plaintext: {}",
|
||||
String::from_utf8_lossy(&plaintext_joiner_creator)
|
||||
@@ -701,7 +827,7 @@ pub async fn cmd_invite(
|
||||
}
|
||||
let peer_hpk = fetch_hybrid_key(&node_client, mk).await?;
|
||||
let commit_payload = if let Some(ref pk) = peer_hpk {
|
||||
hybrid_encrypt(pk, &commit).context("hybrid encrypt commit")?
|
||||
hybrid_encrypt(pk, &commit, b"", b"").context("hybrid encrypt commit")?
|
||||
} else {
|
||||
commit.clone()
|
||||
};
|
||||
@@ -710,7 +836,7 @@ pub async fn cmd_invite(
|
||||
|
||||
let peer_hybrid_pk = fetch_hybrid_key(&node_client, &peer_key).await?;
|
||||
let payload = if let Some(ref pk) = peer_hybrid_pk {
|
||||
hybrid_encrypt(pk, &welcome).context("hybrid encrypt welcome failed")?
|
||||
hybrid_encrypt(pk, &welcome, b"", b"").context("hybrid encrypt welcome failed")?
|
||||
} else {
|
||||
welcome
|
||||
};
|
||||
@@ -774,12 +900,22 @@ pub async fn cmd_join(
|
||||
let _ = member.receive_message(&mls_payload);
|
||||
}
|
||||
|
||||
// Auto-replenish KeyPackage after join consumed the original one.
|
||||
let tls_bytes = member
|
||||
.generate_key_package()
|
||||
.context("KeyPackage replenishment failed")?;
|
||||
upload_key_package(&node_client, &member.identity().public_key_bytes(), &tls_bytes)
|
||||
.await
|
||||
.context("KeyPackage replenishment upload failed")?;
|
||||
println!("KeyPackage auto-replenished after join");
|
||||
|
||||
save_state(state_path, &member, hybrid_kp.as_ref(), password)?;
|
||||
println!("joined group successfully");
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Send an application message via DS (single recipient or broadcast to all other members).
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
pub async fn cmd_send(
|
||||
state_path: &Path,
|
||||
server: &str,
|
||||
@@ -820,7 +956,7 @@ pub async fn cmd_send(
|
||||
for recipient in &recipients {
|
||||
let peer_hybrid_pk = fetch_hybrid_key(&node_client, recipient).await?;
|
||||
let payload = if let Some(ref pk) = peer_hybrid_pk {
|
||||
hybrid_encrypt(pk, &ct).context("hybrid encrypt failed")?
|
||||
hybrid_encrypt(pk, &ct, b"", b"").context("hybrid encrypt failed")?
|
||||
} else {
|
||||
ct.clone()
|
||||
};
|
||||
@@ -871,7 +1007,7 @@ pub async fn cmd_recv(
|
||||
// application messages that depend on the resulting epoch.
|
||||
payloads.sort_by_key(|(seq, _)| *seq);
|
||||
|
||||
let mut retry_mls: Vec<Vec<u8>> = Vec::new();
|
||||
let mut pending: Vec<(usize, Vec<u8>)> = Vec::new();
|
||||
for (idx, (_, payload)) in payloads.iter().enumerate() {
|
||||
let mls_payload = match try_hybrid_decrypt(hybrid_kp.as_ref(), payload) {
|
||||
Ok(b) => b,
|
||||
@@ -881,20 +1017,34 @@ pub async fn cmd_recv(
|
||||
}
|
||||
};
|
||||
match member.receive_message(&mls_payload) {
|
||||
Ok(Some(pt)) => println!("[{idx}] plaintext: {}", String::from_utf8_lossy(&pt)),
|
||||
Ok(None) => println!("[{idx}] commit applied"),
|
||||
Err(_) => retry_mls.push(mls_payload),
|
||||
Ok(ReceivedMessage::Application(pt)) => println!("[{idx}] plaintext: {}", String::from_utf8_lossy(&pt)),
|
||||
Ok(ReceivedMessage::StateChanged) | Ok(ReceivedMessage::SelfRemoved) => println!("[{idx}] commit applied"),
|
||||
Err(_) => pending.push((idx, mls_payload)),
|
||||
}
|
||||
}
|
||||
// Retry messages that failed on the first pass (e.g. app messages whose
|
||||
// epoch was not yet advanced until a commit earlier in the batch was applied).
|
||||
for mls_payload in &retry_mls {
|
||||
match member.receive_message(mls_payload) {
|
||||
Ok(Some(pt)) => println!("[retry] plaintext: {}", String::from_utf8_lossy(&pt)),
|
||||
Ok(None) => {}
|
||||
Err(e) => println!("[retry] error: {e}"),
|
||||
// Retry until no more progress (handles multi-epoch batches).
|
||||
loop {
|
||||
let before = pending.len();
|
||||
pending.retain(|(idx, mls_payload)| {
|
||||
match member.receive_message(mls_payload) {
|
||||
Ok(ReceivedMessage::Application(pt)) => {
|
||||
println!("[{idx}/retry] plaintext: {}", String::from_utf8_lossy(&pt));
|
||||
false
|
||||
}
|
||||
Ok(ReceivedMessage::StateChanged) | Ok(ReceivedMessage::SelfRemoved) => {
|
||||
println!("[{idx}/retry] commit applied");
|
||||
false
|
||||
}
|
||||
Err(_) => true,
|
||||
}
|
||||
});
|
||||
if pending.len() == before {
|
||||
break; // No progress — remaining messages are unprocessable
|
||||
}
|
||||
}
|
||||
for (idx, _) in &pending {
|
||||
println!("[{idx}] error: unprocessable after all retries");
|
||||
}
|
||||
|
||||
save_state(state_path, &member, hybrid_kp.as_ref(), password)?;
|
||||
|
||||
@@ -906,8 +1056,8 @@ pub async fn cmd_recv(
|
||||
|
||||
/// Fetch pending payloads, process in order (merge commits, collect plaintexts), save state.
|
||||
/// Returns only application-message plaintexts. Used by E2E tests and callers that need returned messages.
|
||||
/// Uses two passes so that if the server delivers an application message before a Commit, the second pass
|
||||
/// processes it after commits are merged.
|
||||
/// Retries in a loop until no more progress, handling multi-epoch batches where commits must be
|
||||
/// applied before later application messages can be decrypted.
|
||||
pub async fn receive_pending_plaintexts(
|
||||
state_path: &Path,
|
||||
server: &str,
|
||||
@@ -925,21 +1075,33 @@ pub async fn receive_pending_plaintexts(
|
||||
payloads.sort_by_key(|(seq, _)| *seq);
|
||||
|
||||
let mut plaintexts = Vec::new();
|
||||
let mut retry_mls: Vec<Vec<u8>> = Vec::new();
|
||||
let mut pending: Vec<Vec<u8>> = Vec::new();
|
||||
for (_, payload) in &payloads {
|
||||
let mls_payload = match try_hybrid_decrypt(hybrid_kp.as_ref(), payload) {
|
||||
Ok(b) => b,
|
||||
Err(_) => continue,
|
||||
};
|
||||
match member.receive_message(&mls_payload) {
|
||||
Ok(Some(pt)) => plaintexts.push(pt),
|
||||
Ok(None) => {}
|
||||
Err(_) => retry_mls.push(mls_payload),
|
||||
Ok(ReceivedMessage::Application(pt)) => plaintexts.push(pt),
|
||||
Ok(ReceivedMessage::StateChanged) | Ok(ReceivedMessage::SelfRemoved) => {}
|
||||
Err(_) => pending.push(mls_payload),
|
||||
}
|
||||
}
|
||||
for mls_payload in &retry_mls {
|
||||
if let Ok(Some(pt)) = member.receive_message(mls_payload) {
|
||||
plaintexts.push(pt);
|
||||
// Retry until no more progress (handles multi-epoch batches).
|
||||
loop {
|
||||
let before = pending.len();
|
||||
pending.retain(|mls_payload| {
|
||||
match member.receive_message(mls_payload) {
|
||||
Ok(ReceivedMessage::Application(pt)) => {
|
||||
plaintexts.push(pt);
|
||||
false
|
||||
}
|
||||
Ok(ReceivedMessage::StateChanged) | Ok(ReceivedMessage::SelfRemoved) => false,
|
||||
Err(_) => true,
|
||||
}
|
||||
});
|
||||
if pending.len() == before {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -955,8 +1117,8 @@ pub fn whoami_json(state_path: &Path, password: Option<&str>) -> anyhow::Result<
|
||||
let fingerprint = sha256(&pk_bytes);
|
||||
Ok(format!(
|
||||
r#"{{"identity_key":"{}", "fingerprint":"{}", "hybrid_key":{}, "group":{}}}"#,
|
||||
hex::encode(&pk_bytes),
|
||||
hex::encode(&fingerprint),
|
||||
hex::encode(pk_bytes),
|
||||
hex::encode(fingerprint),
|
||||
state.hybrid_key.is_some(),
|
||||
state.group.is_some(),
|
||||
))
|
||||
@@ -1069,7 +1231,7 @@ pub async fn cmd_chat(
|
||||
.context("send_message failed")?;
|
||||
let peer_hybrid_pk = fetch_hybrid_key(&client, &peer_key).await?;
|
||||
let payload = if let Some(ref pk) = peer_hybrid_pk {
|
||||
hybrid_encrypt(pk, &ct).context("hybrid encrypt failed")?
|
||||
hybrid_encrypt(pk, &ct, b"", b"").context("hybrid encrypt failed")?
|
||||
} else {
|
||||
ct
|
||||
};
|
||||
@@ -1085,21 +1247,39 @@ pub async fn cmd_chat(
|
||||
_ = poll.tick() => {
|
||||
let mut payloads = fetch_wait(&client, &identity_bytes, 0).await?;
|
||||
payloads.sort_by_key(|(seq, _)| *seq);
|
||||
let mut retry_payloads: Vec<Vec<u8>> = Vec::new();
|
||||
for (_, payload) in &payloads {
|
||||
let mls_payload = match try_hybrid_decrypt(hybrid_kp.as_ref(), payload) {
|
||||
Ok(b) => b,
|
||||
Err(_) => continue,
|
||||
};
|
||||
match member.receive_message(&mls_payload) {
|
||||
Ok(Some(pt)) => {
|
||||
Ok(ReceivedMessage::Application(pt)) => {
|
||||
let s = String::from_utf8_lossy(&pt);
|
||||
println!("\r\n[peer] {s}\n> ");
|
||||
std::io::stdout().flush().context("flush stdout")?;
|
||||
}
|
||||
Ok(None) => {}
|
||||
Err(_) => {}
|
||||
Ok(ReceivedMessage::StateChanged) | Ok(ReceivedMessage::SelfRemoved) => {}
|
||||
Err(_) => retry_payloads.push(mls_payload),
|
||||
}
|
||||
}
|
||||
// Retry failed messages (epoch may have advanced from commits in this batch)
|
||||
loop {
|
||||
let before = retry_payloads.len();
|
||||
retry_payloads.retain(|mls_payload| {
|
||||
match member.receive_message(mls_payload) {
|
||||
Ok(ReceivedMessage::Application(pt)) => {
|
||||
let s = String::from_utf8_lossy(&pt);
|
||||
println!("\r\n[peer] {s}\n> ");
|
||||
let _ = std::io::stdout().flush();
|
||||
false
|
||||
}
|
||||
Ok(ReceivedMessage::StateChanged) | Ok(ReceivedMessage::SelfRemoved) => false,
|
||||
Err(_) => true,
|
||||
}
|
||||
});
|
||||
if retry_payloads.len() == before { break; }
|
||||
}
|
||||
if !payloads.is_empty() {
|
||||
save_state(state_path, &member, hybrid_kp.as_ref(), password)?;
|
||||
}
|
||||
@@ -1110,3 +1290,111 @@ pub async fn cmd_chat(
|
||||
println!();
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// ── Transcript export ─────────────────────────────────────────────────────────
|
||||
|
||||
/// Export the message history for a conversation to an encrypted, tamper-evident
|
||||
/// transcript file.
|
||||
///
|
||||
/// `conv_db` is the path to the conversation SQLite database (`.convdb` file).
|
||||
/// `conv_id_hex` is the 32-hex-character conversation ID to export.
|
||||
/// `output` is the path for the `.qpct` transcript file to write.
|
||||
/// `transcript_password` is used to derive the encryption key (Argon2id).
|
||||
/// `db_password` is the optional SQLCipher password for the conversation database.
|
||||
pub fn cmd_export(
|
||||
conv_db: &Path,
|
||||
conv_id_hex: &str,
|
||||
output: &Path,
|
||||
transcript_password: &str,
|
||||
db_password: Option<&str>,
|
||||
) -> anyhow::Result<()> {
|
||||
use quicprochat_core::{TranscriptRecord, TranscriptWriter};
|
||||
use super::conversation::{ConversationId, ConversationStore};
|
||||
|
||||
// Decode conversation ID from hex.
|
||||
let id_bytes = hex::decode(conv_id_hex)
|
||||
.map_err(|e| anyhow::anyhow!("conv-id must be 32 hex characters (16 bytes): {e}"))?;
|
||||
let conv_id = ConversationId::from_slice(&id_bytes)
|
||||
.ok_or_else(|| anyhow::anyhow!("conv-id must be exactly 16 bytes (32 hex chars), got {} bytes", id_bytes.len()))?;
|
||||
|
||||
// Open conversation database.
|
||||
let store = ConversationStore::open(conv_db, db_password)
|
||||
.context("open conversation database")?;
|
||||
|
||||
// Load conversation metadata (to display name in output).
|
||||
let conv = store
|
||||
.load_conversation(&conv_id)?
|
||||
.with_context(|| format!("conversation '{conv_id_hex}' not found in database"))?;
|
||||
|
||||
// Load all messages (oldest first).
|
||||
let messages = store.load_all_messages(&conv_id)?;
|
||||
|
||||
if messages.is_empty() {
|
||||
println!("No messages in conversation '{}'.", conv.display_name);
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
// Create output file.
|
||||
if let Some(parent) = output.parent() {
|
||||
std::fs::create_dir_all(parent).ok();
|
||||
}
|
||||
let mut file = std::fs::File::create(output)
|
||||
.with_context(|| format!("create transcript file '{}'", output.display()))?;
|
||||
|
||||
// Write transcript header + records.
|
||||
let mut writer = TranscriptWriter::new(transcript_password, &mut file)
|
||||
.context("initialise transcript writer")?;
|
||||
|
||||
let mut written = 0u64;
|
||||
for (seq, msg) in messages.iter().enumerate() {
|
||||
writer
|
||||
.write_record(
|
||||
&TranscriptRecord {
|
||||
seq: seq as u64,
|
||||
sender_identity: &msg.sender_key,
|
||||
timestamp_ms: msg.timestamp_ms,
|
||||
plaintext: &msg.body,
|
||||
},
|
||||
&mut file,
|
||||
)
|
||||
.context("write transcript record")?;
|
||||
written += 1;
|
||||
}
|
||||
|
||||
println!(
|
||||
"Exported {} message(s) from '{}' to '{}'.",
|
||||
written,
|
||||
conv.display_name,
|
||||
output.display()
|
||||
);
|
||||
println!("Decrypt with: qpc export verify --input <file> --password <password>");
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Verify the hash-chain integrity of a transcript file without decrypting content.
|
||||
///
|
||||
/// Prints a summary. Does not require the encryption password (structural check only).
|
||||
pub fn cmd_export_verify(input: &Path) -> anyhow::Result<()> {
|
||||
use quicprochat_core::{validate_transcript_structure, ChainVerdict};
|
||||
|
||||
let data = std::fs::read(input)
|
||||
.with_context(|| format!("read transcript file '{}'", input.display()))?;
|
||||
|
||||
match validate_transcript_structure(&data)? {
|
||||
ChainVerdict::Ok { records } => {
|
||||
println!(
|
||||
"OK: transcript '{}' is structurally valid. {} record(s) found, hash chain intact.",
|
||||
input.display(),
|
||||
records
|
||||
);
|
||||
}
|
||||
ChainVerdict::Broken => {
|
||||
anyhow::bail!(
|
||||
"FAIL: hash chain is broken in '{}' — file may have been tampered with.",
|
||||
input.display()
|
||||
);
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
798
crates/quicprochat-client/src/client/conversation.rs
Normal file
798
crates/quicprochat-client/src/client/conversation.rs
Normal file
@@ -0,0 +1,798 @@
|
||||
//! Multi-conversation state backed by SQLite (SQLCipher-encrypted when a
|
||||
//! password is provided).
|
||||
//!
|
||||
//! Each conversation (DM or group) has its own MLS group blob, keystore blob,
|
||||
//! member list, and message history.
|
||||
|
||||
use std::path::{Path, PathBuf};
|
||||
use std::time::{SystemTime, UNIX_EPOCH};
|
||||
|
||||
use anyhow::Context;
|
||||
use argon2::{Algorithm, Argon2, Params, Version};
|
||||
use rand::RngCore;
|
||||
use rusqlite::{params, Connection, OptionalExtension};
|
||||
use zeroize::Zeroizing;
|
||||
|
||||
// ── Types ────────────────────────────────────────────────────────────────────
|
||||
|
||||
/// 16-byte conversation identifier.
|
||||
/// - DMs: the channel_id returned by `createChannel` (server-assigned UUID).
|
||||
/// - Groups: SHA-256(group_name)[..16].
|
||||
#[derive(Clone, Debug, PartialEq, Eq, Hash)]
|
||||
pub struct ConversationId(pub [u8; 16]);
|
||||
|
||||
impl ConversationId {
|
||||
pub fn from_slice(s: &[u8]) -> Option<Self> {
|
||||
if s.len() == 16 {
|
||||
let mut buf = [0u8; 16];
|
||||
buf.copy_from_slice(s);
|
||||
Some(Self(buf))
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
/// Derive a conversation ID from a group name via SHA-256 truncation.
|
||||
pub fn from_group_name(name: &str) -> Self {
|
||||
use sha2::{Sha256, Digest};
|
||||
let hash = Sha256::digest(name.as_bytes());
|
||||
let mut buf = [0u8; 16];
|
||||
buf.copy_from_slice(&hash[..16]);
|
||||
Self(buf)
|
||||
}
|
||||
|
||||
pub fn hex(&self) -> String {
|
||||
hex::encode(self.0)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, PartialEq, Eq)]
|
||||
pub enum ConversationKind {
|
||||
/// 1:1 DM channel with a specific peer.
|
||||
Dm {
|
||||
peer_key: Vec<u8>,
|
||||
peer_username: Option<String>,
|
||||
},
|
||||
/// Named group with N members.
|
||||
Group { name: String },
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct Conversation {
|
||||
pub id: ConversationId,
|
||||
pub kind: ConversationKind,
|
||||
pub display_name: String,
|
||||
/// Serialized MLS group (bincode).
|
||||
pub mls_group_blob: Option<Vec<u8>>,
|
||||
/// Serialized keystore (bincode HashMap).
|
||||
pub keystore_blob: Option<Vec<u8>>,
|
||||
/// Member identity keys (bincode Vec<Vec<u8>>).
|
||||
pub member_keys: Vec<Vec<u8>>,
|
||||
pub unread_count: u32,
|
||||
pub last_activity_ms: u64,
|
||||
pub created_at_ms: u64,
|
||||
/// Whether this conversation uses hybrid (X25519 + ML-KEM-768) MLS keys.
|
||||
pub is_hybrid: bool,
|
||||
/// Highest server-side delivery sequence number seen.
|
||||
pub last_seen_seq: u64,
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct StoredMessage {
|
||||
pub conversation_id: ConversationId,
|
||||
pub message_id: Option<[u8; 16]>,
|
||||
pub sender_key: Vec<u8>,
|
||||
pub sender_name: Option<String>,
|
||||
pub body: String,
|
||||
pub msg_type: String,
|
||||
pub ref_msg_id: Option<[u8; 16]>,
|
||||
pub timestamp_ms: u64,
|
||||
pub is_outgoing: bool,
|
||||
}
|
||||
|
||||
// ── Key derivation (Argon2id, matching state.rs parameters) ─────────────────
|
||||
|
||||
const ARGON2_M_COST: u32 = 19 * 1024;
|
||||
const ARGON2_T_COST: u32 = 2;
|
||||
const ARGON2_P_COST: u32 = 1;
|
||||
const SALT_LEN: usize = 16;
|
||||
|
||||
/// Derive a 32-byte SQLCipher key from the user password and a random salt.
|
||||
fn derive_convdb_key(password: &str, salt: &[u8]) -> anyhow::Result<Zeroizing<[u8; 32]>> {
|
||||
let params = Params::new(ARGON2_M_COST, ARGON2_T_COST, ARGON2_P_COST, Some(32))
|
||||
.map_err(|e| anyhow::anyhow!("argon2 params: {e}"))?;
|
||||
let argon2 = Argon2::new(Algorithm::Argon2id, Version::default(), params);
|
||||
let mut key = Zeroizing::new([0u8; 32]);
|
||||
argon2
|
||||
.hash_password_into(password.as_bytes(), salt, &mut *key)
|
||||
.map_err(|e| anyhow::anyhow!("convdb key derivation: {e}"))?;
|
||||
Ok(key)
|
||||
}
|
||||
|
||||
/// Read or create a 16-byte random salt at `salt_path` (mode 0o600).
|
||||
fn get_or_create_salt(salt_path: &Path) -> anyhow::Result<Vec<u8>> {
|
||||
if salt_path.exists() {
|
||||
let bytes = std::fs::read(salt_path).context("read convdb salt")?;
|
||||
anyhow::ensure!(bytes.len() == SALT_LEN, "invalid convdb salt length");
|
||||
return Ok(bytes);
|
||||
}
|
||||
let mut salt = vec![0u8; SALT_LEN];
|
||||
rand::rngs::OsRng.fill_bytes(&mut salt);
|
||||
std::fs::write(salt_path, &salt).context("write convdb salt")?;
|
||||
#[cfg(unix)]
|
||||
{
|
||||
use std::os::unix::fs::PermissionsExt;
|
||||
std::fs::set_permissions(salt_path, std::fs::Permissions::from_mode(0o600)).ok();
|
||||
}
|
||||
Ok(salt)
|
||||
}
|
||||
|
||||
// ── ConversationStore ────────────────────────────────────────────────────────
|
||||
|
||||
pub struct ConversationStore {
|
||||
conn: Connection,
|
||||
}
|
||||
|
||||
impl ConversationStore {
|
||||
/// Open or create the conversation database at `db_path`.
|
||||
/// If `password` is `Some`, the database is encrypted with SQLCipher using
|
||||
/// an Argon2id-derived key. Existing unencrypted databases are migrated
|
||||
/// transparently.
|
||||
pub fn open(db_path: &Path, password: Option<&str>) -> anyhow::Result<Self> {
|
||||
if let Some(parent) = db_path.parent() {
|
||||
std::fs::create_dir_all(parent).ok();
|
||||
}
|
||||
|
||||
match password {
|
||||
Some(pw) => Self::open_encrypted(db_path, pw),
|
||||
None => Self::open_plain(db_path),
|
||||
}
|
||||
}
|
||||
|
||||
fn open_plain(db_path: &Path) -> anyhow::Result<Self> {
|
||||
let conn = Connection::open(db_path).context("open conversation db")?;
|
||||
conn.execute_batch("PRAGMA journal_mode=WAL; PRAGMA foreign_keys=ON;")
|
||||
.context("set pragmas")?;
|
||||
Self::migrate(&conn)?;
|
||||
Ok(Self { conn })
|
||||
}
|
||||
|
||||
fn open_encrypted(db_path: &Path, password: &str) -> anyhow::Result<Self> {
|
||||
let salt_path = PathBuf::from(format!("{}-salt", db_path.display()));
|
||||
let already_encrypted = salt_path.exists();
|
||||
|
||||
// Migrate an existing unencrypted database before opening with encryption.
|
||||
if db_path.exists() && !already_encrypted {
|
||||
Self::migrate_plain_to_encrypted(db_path, &salt_path, password)?;
|
||||
// After migration, salt file exists and DB is encrypted — fall through.
|
||||
}
|
||||
|
||||
let salt = get_or_create_salt(&salt_path)?;
|
||||
let key = derive_convdb_key(password, &salt)?;
|
||||
#[allow(clippy::needless_borrows_for_generic_args)]
|
||||
let hex_key = Zeroizing::new(hex::encode(&*key));
|
||||
|
||||
let conn = Connection::open(db_path).context("open conversation db")?;
|
||||
conn.pragma_update(None, "key", format!("x'{}'", &*hex_key))
|
||||
.context("set SQLCipher key")?;
|
||||
conn.execute_batch("PRAGMA journal_mode=WAL; PRAGMA foreign_keys=ON;")
|
||||
.context("set pragmas")?;
|
||||
Self::migrate(&conn)?;
|
||||
Ok(Self { conn })
|
||||
}
|
||||
|
||||
/// Migrate an unencrypted `.convdb` to an encrypted one in-place.
|
||||
fn migrate_plain_to_encrypted(
|
||||
db_path: &Path,
|
||||
salt_path: &Path,
|
||||
password: &str,
|
||||
) -> anyhow::Result<()> {
|
||||
let salt = get_or_create_salt(salt_path)?;
|
||||
let key = derive_convdb_key(password, &salt)?;
|
||||
#[allow(clippy::needless_borrows_for_generic_args)]
|
||||
let hex_key = Zeroizing::new(hex::encode(&*key));
|
||||
|
||||
let enc_path = db_path.with_extension("convdb-enc");
|
||||
|
||||
// Open the existing plaintext database.
|
||||
let plain = Connection::open(db_path).context("open plain convdb for migration")?;
|
||||
plain.execute_batch("PRAGMA journal_mode=WAL; PRAGMA foreign_keys=ON;").ok();
|
||||
|
||||
// Attach a new encrypted database and export into it.
|
||||
// Sanitize the path to prevent SQL injection (ATTACH does not support parameterized paths).
|
||||
let enc_path_str = enc_path.display().to_string();
|
||||
anyhow::ensure!(
|
||||
!enc_path_str.contains('\''),
|
||||
"database path must not contain single quotes: {enc_path_str}"
|
||||
);
|
||||
plain
|
||||
.execute_batch(&format!(
|
||||
"ATTACH DATABASE '{enc_path_str}' AS encrypted KEY \"x'{}'\";",
|
||||
&*hex_key
|
||||
))
|
||||
.context("attach encrypted db for migration")?;
|
||||
plain
|
||||
.execute_batch("SELECT sqlcipher_export('encrypted');")
|
||||
.context("sqlcipher_export to encrypted db")?;
|
||||
plain
|
||||
.execute_batch("DETACH DATABASE encrypted;")
|
||||
.context("detach encrypted db")?;
|
||||
|
||||
drop(plain);
|
||||
|
||||
// Swap files: encrypted → original.
|
||||
std::fs::rename(&enc_path, db_path).context("replace convdb with encrypted version")?;
|
||||
// Clean up WAL/SHM left from the plaintext open.
|
||||
let wal = PathBuf::from(format!("{}-wal", db_path.display()));
|
||||
let shm = PathBuf::from(format!("{}-shm", db_path.display()));
|
||||
std::fs::remove_file(&wal).ok();
|
||||
std::fs::remove_file(&shm).ok();
|
||||
|
||||
tracing::info!("migrated conversation database to encrypted storage");
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn migrate(conn: &Connection) -> anyhow::Result<()> {
|
||||
conn.execute_batch(
|
||||
"CREATE TABLE IF NOT EXISTS conversations (
|
||||
id BLOB PRIMARY KEY,
|
||||
kind TEXT NOT NULL,
|
||||
display_name TEXT NOT NULL,
|
||||
peer_key BLOB,
|
||||
peer_username TEXT,
|
||||
group_name TEXT,
|
||||
mls_group_blob BLOB,
|
||||
keystore_blob BLOB,
|
||||
member_keys BLOB,
|
||||
unread_count INTEGER NOT NULL DEFAULT 0,
|
||||
last_activity_ms INTEGER NOT NULL DEFAULT 0,
|
||||
created_at_ms INTEGER NOT NULL DEFAULT 0
|
||||
);
|
||||
|
||||
CREATE TABLE IF NOT EXISTS messages (
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
conversation_id BLOB NOT NULL REFERENCES conversations(id),
|
||||
message_id BLOB,
|
||||
sender_key BLOB NOT NULL,
|
||||
sender_name TEXT,
|
||||
body TEXT NOT NULL,
|
||||
msg_type TEXT NOT NULL,
|
||||
ref_msg_id BLOB,
|
||||
timestamp_ms INTEGER NOT NULL,
|
||||
is_outgoing INTEGER NOT NULL DEFAULT 0
|
||||
);
|
||||
|
||||
CREATE INDEX IF NOT EXISTS idx_messages_conv
|
||||
ON messages(conversation_id, timestamp_ms);
|
||||
|
||||
CREATE TABLE IF NOT EXISTS outbox (
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
conversation_id BLOB NOT NULL,
|
||||
recipient_key BLOB NOT NULL,
|
||||
payload BLOB NOT NULL,
|
||||
created_at_ms INTEGER NOT NULL,
|
||||
retry_count INTEGER NOT NULL DEFAULT 0,
|
||||
status TEXT NOT NULL DEFAULT 'pending'
|
||||
);
|
||||
CREATE INDEX IF NOT EXISTS idx_outbox_status
|
||||
ON outbox(status, created_at_ms);",
|
||||
)
|
||||
.context("migrate conversation db")?;
|
||||
|
||||
// Additive migrations for new columns (safe to re-run; errors ignored if column already exists).
|
||||
conn.execute_batch("ALTER TABLE conversations ADD COLUMN is_hybrid INTEGER NOT NULL DEFAULT 0;").ok();
|
||||
conn.execute_batch("ALTER TABLE conversations ADD COLUMN last_seen_seq INTEGER NOT NULL DEFAULT 0;").ok();
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// ── Conversation CRUD ────────────────────────────────────────────────
|
||||
|
||||
pub fn save_conversation(&self, conv: &Conversation) -> anyhow::Result<()> {
|
||||
let (kind_str, peer_key, peer_username, group_name) = match &conv.kind {
|
||||
ConversationKind::Dm {
|
||||
peer_key,
|
||||
peer_username,
|
||||
} => ("dm", Some(peer_key.as_slice()), peer_username.as_deref(), None),
|
||||
ConversationKind::Group { name } => ("group", None, None, Some(name.as_str())),
|
||||
};
|
||||
let member_keys_blob = bincode::serialize(&conv.member_keys)
|
||||
.context("serialize member_keys")?;
|
||||
|
||||
self.conn.execute(
|
||||
"INSERT INTO conversations
|
||||
(id, kind, display_name, peer_key, peer_username, group_name,
|
||||
mls_group_blob, keystore_blob, member_keys, unread_count,
|
||||
last_activity_ms, created_at_ms, is_hybrid, last_seen_seq)
|
||||
VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7, ?8, ?9, ?10, ?11, ?12, ?13, ?14)
|
||||
ON CONFLICT(id) DO UPDATE SET
|
||||
display_name = excluded.display_name,
|
||||
mls_group_blob = excluded.mls_group_blob,
|
||||
keystore_blob = excluded.keystore_blob,
|
||||
member_keys = excluded.member_keys,
|
||||
unread_count = excluded.unread_count,
|
||||
last_activity_ms = excluded.last_activity_ms,
|
||||
is_hybrid = excluded.is_hybrid,
|
||||
last_seen_seq = excluded.last_seen_seq",
|
||||
params![
|
||||
conv.id.0.as_slice(),
|
||||
kind_str,
|
||||
conv.display_name,
|
||||
peer_key,
|
||||
peer_username,
|
||||
group_name,
|
||||
conv.mls_group_blob,
|
||||
conv.keystore_blob,
|
||||
member_keys_blob,
|
||||
conv.unread_count,
|
||||
conv.last_activity_ms,
|
||||
conv.created_at_ms,
|
||||
conv.is_hybrid as i32,
|
||||
conv.last_seen_seq as i64,
|
||||
],
|
||||
)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn load_conversation(&self, id: &ConversationId) -> anyhow::Result<Option<Conversation>> {
|
||||
self.conn
|
||||
.query_row(
|
||||
"SELECT kind, display_name, peer_key, peer_username, group_name,
|
||||
mls_group_blob, keystore_blob, member_keys, unread_count,
|
||||
last_activity_ms, created_at_ms, is_hybrid, last_seen_seq
|
||||
FROM conversations WHERE id = ?1",
|
||||
params![id.0.as_slice()],
|
||||
|row| {
|
||||
let kind_str: String = row.get(0)?;
|
||||
let display_name: String = row.get(1)?;
|
||||
let peer_key: Option<Vec<u8>> = row.get(2)?;
|
||||
let peer_username: Option<String> = row.get(3)?;
|
||||
let group_name: Option<String> = row.get(4)?;
|
||||
let mls_group_blob: Option<Vec<u8>> = row.get(5)?;
|
||||
let keystore_blob: Option<Vec<u8>> = row.get(6)?;
|
||||
let member_keys_blob: Option<Vec<u8>> = row.get(7)?;
|
||||
let unread_count: u32 = row.get(8)?;
|
||||
let last_activity_ms: u64 = row.get(9)?;
|
||||
let created_at_ms: u64 = row.get(10)?;
|
||||
let is_hybrid_int: i32 = row.get(11)?;
|
||||
let last_seen_seq: i64 = row.get(12)?;
|
||||
|
||||
let kind = if kind_str == "dm" {
|
||||
ConversationKind::Dm {
|
||||
peer_key: peer_key.unwrap_or_default(),
|
||||
peer_username,
|
||||
}
|
||||
} else {
|
||||
ConversationKind::Group {
|
||||
name: group_name.unwrap_or_default(),
|
||||
}
|
||||
};
|
||||
|
||||
let member_keys: Vec<Vec<u8>> = member_keys_blob
|
||||
.and_then(|b| match bincode::deserialize(&b) {
|
||||
Ok(v) => Some(v),
|
||||
Err(e) => {
|
||||
tracing::warn!(conv = %hex::encode(id.0), "bincode deserialize member_keys failed: {e}");
|
||||
None
|
||||
}
|
||||
})
|
||||
.unwrap_or_default();
|
||||
|
||||
Ok(Conversation {
|
||||
id: id.clone(),
|
||||
kind,
|
||||
display_name,
|
||||
mls_group_blob,
|
||||
keystore_blob,
|
||||
member_keys,
|
||||
unread_count,
|
||||
last_activity_ms,
|
||||
created_at_ms,
|
||||
is_hybrid: is_hybrid_int != 0,
|
||||
last_seen_seq: last_seen_seq as u64,
|
||||
})
|
||||
},
|
||||
)
|
||||
.optional()
|
||||
.context("load conversation")
|
||||
}
|
||||
|
||||
pub fn list_conversations(&self) -> anyhow::Result<Vec<Conversation>> {
|
||||
let mut stmt = self.conn.prepare(
|
||||
"SELECT id, kind, display_name, peer_key, peer_username, group_name,
|
||||
mls_group_blob, keystore_blob, member_keys, unread_count,
|
||||
last_activity_ms, created_at_ms, is_hybrid, last_seen_seq
|
||||
FROM conversations ORDER BY last_activity_ms DESC",
|
||||
)?;
|
||||
let rows = stmt.query_map([], |row| {
|
||||
let id_blob: Vec<u8> = row.get(0)?;
|
||||
let kind_str: String = row.get(1)?;
|
||||
let display_name: String = row.get(2)?;
|
||||
let peer_key: Option<Vec<u8>> = row.get(3)?;
|
||||
let peer_username: Option<String> = row.get(4)?;
|
||||
let group_name: Option<String> = row.get(5)?;
|
||||
let mls_group_blob: Option<Vec<u8>> = row.get(6)?;
|
||||
let keystore_blob: Option<Vec<u8>> = row.get(7)?;
|
||||
let member_keys_blob: Option<Vec<u8>> = row.get(8)?;
|
||||
let unread_count: u32 = row.get(9)?;
|
||||
let last_activity_ms: u64 = row.get(10)?;
|
||||
let created_at_ms: u64 = row.get(11)?;
|
||||
let is_hybrid_int: i32 = row.get(12)?;
|
||||
let last_seen_seq: i64 = row.get(13)?;
|
||||
|
||||
let id = ConversationId::from_slice(&id_blob).unwrap_or(ConversationId([0; 16]));
|
||||
let kind = if kind_str == "dm" {
|
||||
ConversationKind::Dm {
|
||||
peer_key: peer_key.unwrap_or_default(),
|
||||
peer_username,
|
||||
}
|
||||
} else {
|
||||
ConversationKind::Group {
|
||||
name: group_name.unwrap_or_default(),
|
||||
}
|
||||
};
|
||||
let member_keys: Vec<Vec<u8>> = member_keys_blob
|
||||
.and_then(|b| match bincode::deserialize(&b) {
|
||||
Ok(v) => Some(v),
|
||||
Err(e) => {
|
||||
tracing::warn!(conv = %hex::encode(&id_blob), "bincode deserialize member_keys failed: {e}");
|
||||
None
|
||||
}
|
||||
})
|
||||
.unwrap_or_default();
|
||||
|
||||
Ok(Conversation {
|
||||
id,
|
||||
kind,
|
||||
display_name,
|
||||
mls_group_blob,
|
||||
keystore_blob,
|
||||
member_keys,
|
||||
unread_count,
|
||||
last_activity_ms,
|
||||
created_at_ms,
|
||||
is_hybrid: is_hybrid_int != 0,
|
||||
last_seen_seq: last_seen_seq as u64,
|
||||
})
|
||||
})?;
|
||||
|
||||
let mut convs = Vec::new();
|
||||
for row in rows {
|
||||
convs.push(row?);
|
||||
}
|
||||
Ok(convs)
|
||||
}
|
||||
|
||||
/// Find a DM conversation by the peer's identity key.
|
||||
pub fn find_dm_by_peer(&self, peer_key: &[u8]) -> anyhow::Result<Option<Conversation>> {
|
||||
let id_blob: Option<Vec<u8>> = self
|
||||
.conn
|
||||
.query_row(
|
||||
"SELECT id FROM conversations WHERE kind = 'dm' AND peer_key = ?1",
|
||||
params![peer_key],
|
||||
|row| row.get(0),
|
||||
)
|
||||
.optional()?;
|
||||
|
||||
match id_blob {
|
||||
Some(blob) => {
|
||||
let id = ConversationId::from_slice(&blob)
|
||||
.context("invalid conversation id in db")?;
|
||||
self.load_conversation(&id)
|
||||
}
|
||||
None => Ok(None),
|
||||
}
|
||||
}
|
||||
|
||||
/// Find a group conversation by name.
|
||||
pub fn find_group_by_name(&self, name: &str) -> anyhow::Result<Option<Conversation>> {
|
||||
let id_blob: Option<Vec<u8>> = self
|
||||
.conn
|
||||
.query_row(
|
||||
"SELECT id FROM conversations WHERE kind = 'group' AND group_name = ?1",
|
||||
params![name],
|
||||
|row| row.get(0),
|
||||
)
|
||||
.optional()?;
|
||||
|
||||
match id_blob {
|
||||
Some(blob) => {
|
||||
let id = ConversationId::from_slice(&blob)
|
||||
.context("invalid conversation id in db")?;
|
||||
self.load_conversation(&id)
|
||||
}
|
||||
None => Ok(None),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn increment_unread(&self, id: &ConversationId) -> anyhow::Result<()> {
|
||||
self.conn.execute(
|
||||
"UPDATE conversations SET unread_count = unread_count + 1 WHERE id = ?1",
|
||||
params![id.0.as_slice()],
|
||||
)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn reset_unread(&self, id: &ConversationId) -> anyhow::Result<()> {
|
||||
self.conn.execute(
|
||||
"UPDATE conversations SET unread_count = 0 WHERE id = ?1",
|
||||
params![id.0.as_slice()],
|
||||
)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn update_activity(&self, id: &ConversationId, ts_ms: u64) -> anyhow::Result<()> {
|
||||
self.conn.execute(
|
||||
"UPDATE conversations SET last_activity_ms = ?2 WHERE id = ?1 AND last_activity_ms < ?2",
|
||||
params![id.0.as_slice(), ts_ms],
|
||||
)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// ── Message CRUD ─────────────────────────────────────────────────────
|
||||
|
||||
pub fn save_message(&self, msg: &StoredMessage) -> anyhow::Result<()> {
|
||||
self.conn.execute(
|
||||
"INSERT INTO messages
|
||||
(conversation_id, message_id, sender_key, sender_name, body,
|
||||
msg_type, ref_msg_id, timestamp_ms, is_outgoing)
|
||||
VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7, ?8, ?9)",
|
||||
params![
|
||||
msg.conversation_id.0.as_slice(),
|
||||
msg.message_id.as_ref().map(|id| id.as_slice()),
|
||||
msg.sender_key,
|
||||
msg.sender_name,
|
||||
msg.body,
|
||||
msg.msg_type,
|
||||
msg.ref_msg_id.as_ref().map(|id| id.as_slice()),
|
||||
msg.timestamp_ms,
|
||||
msg.is_outgoing as i32,
|
||||
],
|
||||
)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn load_recent_messages(
|
||||
&self,
|
||||
conv_id: &ConversationId,
|
||||
limit: usize,
|
||||
) -> anyhow::Result<Vec<StoredMessage>> {
|
||||
let mut stmt = self.conn.prepare(
|
||||
"SELECT message_id, sender_key, sender_name, body, msg_type,
|
||||
ref_msg_id, timestamp_ms, is_outgoing
|
||||
FROM messages
|
||||
WHERE conversation_id = ?1
|
||||
ORDER BY timestamp_ms DESC
|
||||
LIMIT ?2",
|
||||
)?;
|
||||
let rows = stmt.query_map(params![conv_id.0.as_slice(), limit.min(u32::MAX as usize) as u32], |row| {
|
||||
let message_id: Option<Vec<u8>> = row.get(0)?;
|
||||
let sender_key: Vec<u8> = row.get(1)?;
|
||||
let sender_name: Option<String> = row.get(2)?;
|
||||
let body: String = row.get(3)?;
|
||||
let msg_type: String = row.get(4)?;
|
||||
let ref_msg_id: Option<Vec<u8>> = row.get(5)?;
|
||||
let timestamp_ms: u64 = row.get(6)?;
|
||||
let is_outgoing: i32 = row.get(7)?;
|
||||
|
||||
fn to_16(v: &[u8]) -> Option<[u8; 16]> {
|
||||
if v.len() == 16 {
|
||||
let mut buf = [0u8; 16];
|
||||
buf.copy_from_slice(v);
|
||||
Some(buf)
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
Ok(StoredMessage {
|
||||
conversation_id: conv_id.clone(),
|
||||
message_id: message_id.as_deref().and_then(to_16),
|
||||
sender_key,
|
||||
sender_name,
|
||||
body,
|
||||
msg_type,
|
||||
ref_msg_id: ref_msg_id.as_deref().and_then(to_16),
|
||||
timestamp_ms,
|
||||
is_outgoing: is_outgoing != 0,
|
||||
})
|
||||
})?;
|
||||
|
||||
let mut msgs = Vec::new();
|
||||
for row in rows {
|
||||
msgs.push(row?);
|
||||
}
|
||||
// Reverse so oldest first
|
||||
msgs.reverse();
|
||||
Ok(msgs)
|
||||
}
|
||||
|
||||
/// Load all messages for a conversation, oldest first (no limit).
|
||||
pub fn load_all_messages(&self, conv_id: &ConversationId) -> anyhow::Result<Vec<StoredMessage>> {
|
||||
let mut stmt = self.conn.prepare(
|
||||
"SELECT message_id, sender_key, sender_name, body, msg_type,
|
||||
ref_msg_id, timestamp_ms, is_outgoing
|
||||
FROM messages
|
||||
WHERE conversation_id = ?1
|
||||
ORDER BY timestamp_ms ASC, id ASC",
|
||||
)?;
|
||||
let rows = stmt.query_map(params![conv_id.0.as_slice()], |row| {
|
||||
let message_id: Option<Vec<u8>> = row.get(0)?;
|
||||
let sender_key: Vec<u8> = row.get(1)?;
|
||||
let sender_name: Option<String> = row.get(2)?;
|
||||
let body: String = row.get(3)?;
|
||||
let msg_type: String = row.get(4)?;
|
||||
let ref_msg_id: Option<Vec<u8>> = row.get(5)?;
|
||||
let timestamp_ms: u64 = row.get(6)?;
|
||||
let is_outgoing: i32 = row.get(7)?;
|
||||
|
||||
fn to_16(v: &[u8]) -> Option<[u8; 16]> {
|
||||
if v.len() == 16 {
|
||||
let mut buf = [0u8; 16];
|
||||
buf.copy_from_slice(v);
|
||||
Some(buf)
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
Ok(StoredMessage {
|
||||
conversation_id: conv_id.clone(),
|
||||
message_id: message_id.as_deref().and_then(to_16),
|
||||
sender_key,
|
||||
sender_name,
|
||||
body,
|
||||
msg_type,
|
||||
ref_msg_id: ref_msg_id.as_deref().and_then(to_16),
|
||||
timestamp_ms,
|
||||
is_outgoing: is_outgoing != 0,
|
||||
})
|
||||
})?;
|
||||
|
||||
let mut msgs = Vec::new();
|
||||
for row in rows {
|
||||
msgs.push(row?);
|
||||
}
|
||||
Ok(msgs)
|
||||
}
|
||||
|
||||
/// Update the body of an existing message (for edits).
|
||||
pub fn update_message_body(
|
||||
&self,
|
||||
conv_id: &ConversationId,
|
||||
message_id: &[u8; 16],
|
||||
new_body: &str,
|
||||
) -> anyhow::Result<bool> {
|
||||
let rows = self.conn.execute(
|
||||
"UPDATE messages SET body = ?3 WHERE conversation_id = ?1 AND message_id = ?2",
|
||||
params![conv_id.0.as_slice(), message_id.as_slice(), new_body],
|
||||
)?;
|
||||
Ok(rows > 0)
|
||||
}
|
||||
|
||||
/// Mark a message as deleted (sets body to "[deleted]" and msg_type to "deleted").
|
||||
pub fn delete_message(
|
||||
&self,
|
||||
conv_id: &ConversationId,
|
||||
message_id: &[u8; 16],
|
||||
) -> anyhow::Result<bool> {
|
||||
let rows = self.conn.execute(
|
||||
"UPDATE messages SET body = '[deleted]', msg_type = 'deleted' WHERE conversation_id = ?1 AND message_id = ?2",
|
||||
params![conv_id.0.as_slice(), message_id.as_slice()],
|
||||
)?;
|
||||
Ok(rows > 0)
|
||||
}
|
||||
|
||||
/// Save a message, deduplicating by message_id within the same conversation.
|
||||
/// Returns `true` if the message was saved (new), `false` if it was a duplicate.
|
||||
pub fn save_message_dedup(&self, msg: &StoredMessage) -> anyhow::Result<bool> {
|
||||
if let Some(ref mid) = msg.message_id {
|
||||
let exists: bool = self.conn.query_row(
|
||||
"SELECT EXISTS(SELECT 1 FROM messages WHERE message_id = ?1 AND conversation_id = ?2)",
|
||||
params![mid.as_slice(), msg.conversation_id.0.as_slice()],
|
||||
|row| row.get(0),
|
||||
)?;
|
||||
if exists {
|
||||
return Ok(false);
|
||||
}
|
||||
}
|
||||
self.save_message(msg)?;
|
||||
Ok(true)
|
||||
}
|
||||
|
||||
// ── Sequence tracking ──────────────────────────────────────────────
|
||||
|
||||
pub fn update_last_seen_seq(&self, id: &ConversationId, seq: u64) -> anyhow::Result<()> {
|
||||
self.conn.execute(
|
||||
"UPDATE conversations SET last_seen_seq = ?2 WHERE id = ?1 AND last_seen_seq < ?2",
|
||||
params![id.0.as_slice(), seq as i64],
|
||||
)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// ── Outbox (offline queue) ────────────────────────────────────────
|
||||
|
||||
pub fn enqueue_outbox(
|
||||
&self,
|
||||
conv_id: &ConversationId,
|
||||
recipient_key: &[u8],
|
||||
payload: &[u8],
|
||||
) -> anyhow::Result<()> {
|
||||
self.conn.execute(
|
||||
"INSERT INTO outbox (conversation_id, recipient_key, payload, created_at_ms)
|
||||
VALUES (?1, ?2, ?3, ?4)",
|
||||
params![conv_id.0.as_slice(), recipient_key, payload, now_ms() as i64],
|
||||
)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn load_pending_outbox(&self) -> anyhow::Result<Vec<OutboxEntry>> {
|
||||
let mut stmt = self.conn.prepare(
|
||||
"SELECT id, conversation_id, recipient_key, payload, retry_count
|
||||
FROM outbox WHERE status = 'pending' ORDER BY created_at_ms",
|
||||
)?;
|
||||
let rows = stmt.query_map([], |row| {
|
||||
let id: i64 = row.get(0)?;
|
||||
let conv_blob: Vec<u8> = row.get(1)?;
|
||||
let recipient_key: Vec<u8> = row.get(2)?;
|
||||
let payload: Vec<u8> = row.get(3)?;
|
||||
let retry_count: u32 = row.get(4)?;
|
||||
Ok(OutboxEntry {
|
||||
id,
|
||||
conversation_id: ConversationId::from_slice(&conv_blob)
|
||||
.unwrap_or(ConversationId([0; 16])),
|
||||
recipient_key,
|
||||
payload,
|
||||
retry_count,
|
||||
})
|
||||
})?;
|
||||
let mut entries = Vec::new();
|
||||
for row in rows {
|
||||
entries.push(row?);
|
||||
}
|
||||
Ok(entries)
|
||||
}
|
||||
|
||||
pub fn mark_outbox_sent(&self, id: i64) -> anyhow::Result<()> {
|
||||
self.conn.execute(
|
||||
"UPDATE outbox SET status = 'sent' WHERE id = ?1",
|
||||
params![id],
|
||||
)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn mark_outbox_failed(&self, id: i64, retry_count: u32) -> anyhow::Result<()> {
|
||||
let new_status = if retry_count > 5 { "failed" } else { "pending" };
|
||||
self.conn.execute(
|
||||
"UPDATE outbox SET retry_count = ?2, status = ?3 WHERE id = ?1",
|
||||
params![id, retry_count, new_status],
|
||||
)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Delete messages older than `cutoff_ms` (epoch milliseconds) across all conversations.
|
||||
pub fn delete_messages_before(&self, cutoff_ms: u64) -> anyhow::Result<usize> {
|
||||
let rows = self.conn.execute(
|
||||
"DELETE FROM messages WHERE timestamp_ms < ?1",
|
||||
params![cutoff_ms as i64],
|
||||
)?;
|
||||
Ok(rows)
|
||||
}
|
||||
}
|
||||
|
||||
/// An entry in the offline outbox queue.
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct OutboxEntry {
|
||||
pub id: i64,
|
||||
pub conversation_id: ConversationId,
|
||||
pub recipient_key: Vec<u8>,
|
||||
pub payload: Vec<u8>,
|
||||
pub retry_count: u32,
|
||||
}
|
||||
|
||||
pub fn now_ms() -> u64 {
|
||||
SystemTime::now()
|
||||
.duration_since(UNIX_EPOCH)
|
||||
.unwrap_or_default()
|
||||
.as_millis() as u64
|
||||
}
|
||||
82
crates/quicprochat-client/src/client/display.rs
Normal file
82
crates/quicprochat-client/src/client/display.rs
Normal file
@@ -0,0 +1,82 @@
|
||||
//! Terminal display helpers for the REPL.
|
||||
|
||||
use super::conversation::StoredMessage;
|
||||
use super::session::SessionState;
|
||||
|
||||
// ANSI color codes
|
||||
const RESET: &str = "\x1b[0m";
|
||||
const BOLD: &str = "\x1b[1m";
|
||||
const DIM: &str = "\x1b[2m";
|
||||
const GREEN: &str = "\x1b[32m";
|
||||
const CYAN: &str = "\x1b[36m";
|
||||
const YELLOW: &str = "\x1b[33m";
|
||||
|
||||
/// Print the REPL prompt showing the active conversation and unread count.
|
||||
pub fn print_prompt(session: &SessionState) {
|
||||
use std::io::Write;
|
||||
let name = session
|
||||
.active_display_name()
|
||||
.unwrap_or_else(|| "no conversation".into());
|
||||
let unread = session.total_unread();
|
||||
if unread > 0 {
|
||||
print!("{DIM}[{RESET}{BOLD}{name}{RESET} {YELLOW}{unread} unread{RESET}{DIM}]{RESET} > ");
|
||||
} else {
|
||||
print!("{DIM}[{RESET}{BOLD}{name}{RESET}{DIM}]{RESET} > ");
|
||||
}
|
||||
let _ = std::io::stdout().flush();
|
||||
}
|
||||
|
||||
/// Print an incoming or outgoing message.
|
||||
pub fn print_message(msg: &StoredMessage) {
|
||||
let body = if msg.msg_type == "reaction" {
|
||||
format!("reacted {}", msg.body)
|
||||
} else {
|
||||
msg.body.clone()
|
||||
};
|
||||
if msg.is_outgoing {
|
||||
println!("\r{GREEN}> {body}{RESET}");
|
||||
} else {
|
||||
let fallback = hex::encode(&msg.sender_key[..4]);
|
||||
let sender = msg.sender_name.as_deref().unwrap_or(&fallback);
|
||||
println!("\r{CYAN}{BOLD}[{sender}]{RESET} {body}");
|
||||
}
|
||||
}
|
||||
|
||||
/// Print a message received in real-time (clears current line first).
|
||||
pub fn print_incoming(sender: &str, body: &str) {
|
||||
use std::io::Write;
|
||||
// Clear current line, print message, then re-show prompt context
|
||||
print!("\r\x1b[2K");
|
||||
println!("{CYAN}{BOLD}[{sender}]{RESET} {body}");
|
||||
let _ = std::io::stdout().flush();
|
||||
}
|
||||
|
||||
/// Print a system/status message.
|
||||
pub fn print_status(msg: &str) {
|
||||
println!("{DIM} {msg}{RESET}");
|
||||
}
|
||||
|
||||
/// Print a transient typing indicator (clears current line first).
|
||||
pub fn print_typing(sender: &str) {
|
||||
use std::io::Write;
|
||||
print!("\r\x1b[2K");
|
||||
println!("{DIM} {sender} is typing...{RESET}");
|
||||
let _ = std::io::stdout().flush();
|
||||
}
|
||||
|
||||
/// Print an error message.
|
||||
pub fn print_error(msg: &str) {
|
||||
println!("{YELLOW} error: {msg}{RESET}");
|
||||
}
|
||||
|
||||
/// Format a conversation list entry for `/list`.
|
||||
pub fn format_conv_line(display_name: &str, kind: &str, unread: u32, members: usize) -> String {
|
||||
let unread_str = if unread > 0 {
|
||||
format!(" {YELLOW}({unread} new){RESET}")
|
||||
} else {
|
||||
String::new()
|
||||
};
|
||||
format!(
|
||||
" {BOLD}{display_name}{RESET} {DIM}[{kind}, {members} members]{RESET}{unread_str}"
|
||||
)
|
||||
}
|
||||
7
crates/quicprochat-client/src/client/hex.rs
Normal file
7
crates/quicprochat-client/src/client/hex.rs
Normal file
@@ -0,0 +1,7 @@
|
||||
pub fn encode(bytes: impl AsRef<[u8]>) -> String {
|
||||
hex::encode(bytes)
|
||||
}
|
||||
|
||||
pub fn decode(s: &str) -> Result<Vec<u8>, &'static str> {
|
||||
hex::decode(s).map_err(|_| "invalid hex string")
|
||||
}
|
||||
148
crates/quicprochat-client/src/client/mesh_discovery.rs
Normal file
148
crates/quicprochat-client/src/client/mesh_discovery.rs
Normal file
@@ -0,0 +1,148 @@
|
||||
//! mDNS-based peer discovery for Freifunk / community mesh deployments.
|
||||
//!
|
||||
//! Browse for `_quicprochat._udp.local.` services on the local network and
|
||||
//! surface them as [`DiscoveredPeer`] structs. Servers announce themselves
|
||||
//! automatically on startup; this module lets clients find them without manual
|
||||
//! configuration.
|
||||
//!
|
||||
//! # Usage
|
||||
//!
|
||||
//! ```no_run
|
||||
//! use quicprochat_client::client::mesh_discovery::MeshDiscovery;
|
||||
//!
|
||||
//! let disc = MeshDiscovery::start()?;
|
||||
//! // Give mDNS time to collect announcements before reading.
|
||||
//! std::thread::sleep(std::time::Duration::from_secs(2));
|
||||
//! for peer in disc.peers() {
|
||||
//! println!("found: {} at {}", peer.domain, peer.server_addr);
|
||||
//! }
|
||||
//! # Ok::<(), quicprochat_client::client::mesh_discovery::MeshDiscoveryError>(())
|
||||
//! ```
|
||||
|
||||
#[cfg(feature = "mesh")]
|
||||
use mdns_sd::{ServiceDaemon, ServiceEvent};
|
||||
use std::net::SocketAddr;
|
||||
#[cfg(feature = "mesh")]
|
||||
use std::sync::{Arc, Mutex};
|
||||
#[cfg(feature = "mesh")]
|
||||
use std::collections::HashMap;
|
||||
|
||||
/// A qpc server discovered on the local network via mDNS.
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct DiscoveredPeer {
|
||||
/// Federation domain of the remote server (e.g. `"node1.freifunk.net"`).
|
||||
pub domain: String,
|
||||
/// QUIC RPC address to connect to.
|
||||
pub server_addr: SocketAddr,
|
||||
}
|
||||
|
||||
/// A running mDNS browse session.
|
||||
///
|
||||
/// Starts immediately on construction; drop to stop browsing.
|
||||
pub struct MeshDiscovery {
|
||||
#[cfg(feature = "mesh")]
|
||||
_daemon: ServiceDaemon,
|
||||
#[cfg(feature = "mesh")]
|
||||
peers: Arc<Mutex<HashMap<String, DiscoveredPeer>>>,
|
||||
}
|
||||
|
||||
#[derive(thiserror::Error, Debug)]
|
||||
pub enum MeshDiscoveryError {
|
||||
#[error("mDNS daemon failed to start: {0}")]
|
||||
DaemonError(String),
|
||||
#[error("mDNS browse failed: {0}")]
|
||||
BrowseError(String),
|
||||
#[error("mesh feature not compiled (rebuild with --features mesh)")]
|
||||
FeatureDisabled,
|
||||
}
|
||||
|
||||
impl MeshDiscovery {
|
||||
/// Start browsing for `_quicprochat._udp.local.` services.
|
||||
///
|
||||
/// Returns immediately; peers are collected in the background.
|
||||
/// Returns [`MeshDiscoveryError::FeatureDisabled`] when built without the
|
||||
/// `mesh` feature.
|
||||
pub fn start() -> Result<Self, MeshDiscoveryError> {
|
||||
#[cfg(feature = "mesh")]
|
||||
{
|
||||
Self::start_inner()
|
||||
}
|
||||
#[cfg(not(feature = "mesh"))]
|
||||
{
|
||||
Err(MeshDiscoveryError::FeatureDisabled)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(feature = "mesh")]
|
||||
fn start_inner() -> Result<Self, MeshDiscoveryError> {
|
||||
let daemon = ServiceDaemon::new()
|
||||
.map_err(|e| MeshDiscoveryError::DaemonError(e.to_string()))?;
|
||||
|
||||
let receiver = daemon
|
||||
.browse("_quicprochat._udp.local.")
|
||||
.map_err(|e| MeshDiscoveryError::BrowseError(e.to_string()))?;
|
||||
|
||||
let peers: Arc<Mutex<HashMap<String, DiscoveredPeer>>> =
|
||||
Arc::new(Mutex::new(HashMap::new()));
|
||||
let peers_bg = Arc::clone(&peers);
|
||||
|
||||
// Process mDNS events in a background thread (ServiceDaemon is sync).
|
||||
std::thread::spawn(move || {
|
||||
for event in receiver {
|
||||
match event {
|
||||
ServiceEvent::ServiceResolved(info) => {
|
||||
// Extract the qpc server address from TXT records.
|
||||
let server_addr_str = info
|
||||
.get_property_val_str("server")
|
||||
.map(|s| s.to_string());
|
||||
let domain = info
|
||||
.get_property_val_str("domain")
|
||||
.map(|s| s.to_string())
|
||||
.unwrap_or_else(|| info.get_fullname().to_string());
|
||||
|
||||
if let Some(addr_str) = server_addr_str {
|
||||
if let Ok(addr) = addr_str.parse::<SocketAddr>() {
|
||||
let peer = DiscoveredPeer {
|
||||
domain: domain.clone(),
|
||||
server_addr: addr,
|
||||
};
|
||||
if let Ok(mut map) = peers_bg.lock() {
|
||||
map.insert(domain, peer);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
ServiceEvent::ServiceRemoved(_ty, fullname) => {
|
||||
if let Ok(mut map) = peers_bg.lock() {
|
||||
map.retain(|_, p| {
|
||||
!fullname.contains(&p.domain)
|
||||
});
|
||||
}
|
||||
}
|
||||
// Other events (SearchStarted, SearchStopped) are informational.
|
||||
_ => {}
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
Ok(Self {
|
||||
_daemon: daemon,
|
||||
peers,
|
||||
})
|
||||
}
|
||||
|
||||
/// Return a snapshot of all peers discovered so far.
|
||||
pub fn peers(&self) -> Vec<DiscoveredPeer> {
|
||||
#[cfg(feature = "mesh")]
|
||||
{
|
||||
self.peers
|
||||
.lock()
|
||||
.map(|m| m.values().cloned().collect())
|
||||
.unwrap_or_default()
|
||||
}
|
||||
#[cfg(not(feature = "mesh"))]
|
||||
{
|
||||
vec![]
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,8 +1,23 @@
|
||||
pub mod command_engine;
|
||||
pub mod commands;
|
||||
pub mod conversation;
|
||||
pub mod display;
|
||||
pub mod hex;
|
||||
pub mod mesh_discovery;
|
||||
#[cfg(feature = "playbook")]
|
||||
pub mod playbook;
|
||||
pub mod repl;
|
||||
pub mod retry;
|
||||
pub mod rpc;
|
||||
pub mod session;
|
||||
pub mod state;
|
||||
pub mod token_cache;
|
||||
#[cfg(feature = "tui")]
|
||||
pub mod tui;
|
||||
#[cfg(feature = "v2")]
|
||||
pub mod v2_repl;
|
||||
#[cfg(all(feature = "v2", feature = "tui"))]
|
||||
pub mod v2_tui;
|
||||
|
||||
pub use commands::*;
|
||||
pub use rpc::{connect_node, enqueue, fetch_all, fetch_hybrid_key, fetch_key_package, fetch_wait, upload_hybrid_key, upload_key_package};
|
||||
872
crates/quicprochat-client/src/client/playbook.rs
Normal file
872
crates/quicprochat-client/src/client/playbook.rs
Normal file
@@ -0,0 +1,872 @@
|
||||
//! YAML playbook parser and executor.
|
||||
//!
|
||||
//! Playbooks describe a sequence of client commands in YAML format.
|
||||
//! They support variable substitution, assertions, loops, and per-step
|
||||
//! error handling policies.
|
||||
//!
|
||||
//! ```yaml
|
||||
//! name: "smoke test"
|
||||
//! steps:
|
||||
//! - command: dm
|
||||
//! args: { username: "bob" }
|
||||
//! - command: send
|
||||
//! args: { text: "Hello from playbook" }
|
||||
//! - command: assert
|
||||
//! condition: message_count
|
||||
//! op: gte
|
||||
//! value: 1
|
||||
//! ```
|
||||
//!
|
||||
//! Requires the `playbook` cargo feature.
|
||||
|
||||
use std::collections::HashMap;
|
||||
use std::path::Path;
|
||||
use std::time::{Duration, Instant};
|
||||
|
||||
use anyhow::{Context, bail};
|
||||
use quicprochat_proto::node_capnp::node_service;
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use super::command_engine::{AssertCondition, CmpOp, Command, CommandRegistry};
|
||||
use super::session::SessionState;
|
||||
|
||||
// ── Playbook structs ────────────────────────────────────────────────────────
|
||||
|
||||
/// A parsed YAML playbook.
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct Playbook {
|
||||
pub name: String,
|
||||
#[serde(default)]
|
||||
pub description: Option<String>,
|
||||
#[serde(default)]
|
||||
pub variables: HashMap<String, String>,
|
||||
pub steps: Vec<PlaybookStep>,
|
||||
}
|
||||
|
||||
/// A single step in a playbook.
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct PlaybookStep {
|
||||
pub command: String,
|
||||
#[serde(default)]
|
||||
pub args: HashMap<String, serde_yaml::Value>,
|
||||
/// For assert steps: the condition name.
|
||||
#[serde(default)]
|
||||
pub condition: Option<String>,
|
||||
/// For assert steps: comparison operator.
|
||||
#[serde(default)]
|
||||
pub op: Option<String>,
|
||||
/// For assert steps: expected value.
|
||||
#[serde(default)]
|
||||
pub value: Option<serde_yaml::Value>,
|
||||
/// Capture the command output into this variable name.
|
||||
#[serde(default)]
|
||||
pub capture: Option<String>,
|
||||
/// Error handling policy for this step.
|
||||
#[serde(default)]
|
||||
pub on_error: OnError,
|
||||
/// Optional loop specification.
|
||||
#[serde(rename = "loop", default)]
|
||||
pub loop_spec: Option<LoopSpec>,
|
||||
}
|
||||
|
||||
/// What to do when a step fails.
|
||||
#[derive(Debug, Clone, Serialize, Deserialize, Default, PartialEq)]
|
||||
#[serde(rename_all = "lowercase")]
|
||||
pub enum OnError {
|
||||
#[default]
|
||||
Fail,
|
||||
Skip,
|
||||
Continue,
|
||||
}
|
||||
|
||||
/// Loop specification for repeating a step.
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct LoopSpec {
|
||||
pub var: String,
|
||||
pub from: usize,
|
||||
pub to: usize,
|
||||
}
|
||||
|
||||
// ── Report structs ──────────────────────────────────────────────────────────
|
||||
|
||||
/// Summary of a playbook execution.
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct PlaybookReport {
|
||||
pub name: String,
|
||||
pub total_steps: usize,
|
||||
pub passed: usize,
|
||||
pub failed: usize,
|
||||
pub skipped: usize,
|
||||
pub duration: Duration,
|
||||
pub step_results: Vec<StepResult>,
|
||||
}
|
||||
|
||||
impl PlaybookReport {
|
||||
/// True if all steps passed (no failures).
|
||||
pub fn all_passed(&self) -> bool {
|
||||
self.failed == 0
|
||||
}
|
||||
}
|
||||
|
||||
impl std::fmt::Display for PlaybookReport {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
writeln!(f, "Playbook: {}", self.name)?;
|
||||
writeln!(
|
||||
f,
|
||||
"Result: {} passed, {} failed, {} skipped ({} total)",
|
||||
self.passed, self.failed, self.skipped, self.total_steps,
|
||||
)?;
|
||||
writeln!(f, "Duration: {:.2}s", self.duration.as_secs_f64())?;
|
||||
for sr in &self.step_results {
|
||||
let status = if sr.success { "OK" } else { "FAIL" };
|
||||
write!(
|
||||
f,
|
||||
" [{}/{}] {} ... {} ({:.1}ms)",
|
||||
sr.step_index + 1,
|
||||
self.total_steps,
|
||||
sr.command,
|
||||
status,
|
||||
sr.duration.as_secs_f64() * 1000.0,
|
||||
)?;
|
||||
if let Some(ref e) = sr.error {
|
||||
write!(f, " — {e}")?;
|
||||
}
|
||||
writeln!(f)?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
/// Result of a single step execution.
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct StepResult {
|
||||
pub step_index: usize,
|
||||
pub command: String,
|
||||
pub success: bool,
|
||||
pub duration: Duration,
|
||||
pub output: Option<String>,
|
||||
pub error: Option<String>,
|
||||
}
|
||||
|
||||
// ── PlaybookRunner ──────────────────────────────────────────────────────────
|
||||
|
||||
/// Executes a parsed `Playbook` step-by-step.
|
||||
pub struct PlaybookRunner {
|
||||
playbook: Playbook,
|
||||
vars: HashMap<String, String>,
|
||||
}
|
||||
|
||||
impl PlaybookRunner {
|
||||
/// Load a playbook from a YAML file.
|
||||
pub fn from_file(path: &Path) -> anyhow::Result<Self> {
|
||||
let content =
|
||||
std::fs::read_to_string(path).with_context(|| format!("read {}", path.display()))?;
|
||||
Self::from_str(&content)
|
||||
}
|
||||
|
||||
/// Parse a playbook from a YAML string.
|
||||
pub fn from_str(yaml: &str) -> anyhow::Result<Self> {
|
||||
let playbook: Playbook =
|
||||
serde_yaml::from_str(yaml).context("parse playbook YAML")?;
|
||||
let vars = playbook.variables.clone();
|
||||
Ok(Self { playbook, vars })
|
||||
}
|
||||
|
||||
/// Override or add variables before execution.
|
||||
pub fn set_var(&mut self, name: impl Into<String>, value: impl Into<String>) {
|
||||
self.vars.insert(name.into(), value.into());
|
||||
}
|
||||
|
||||
/// Execute all steps, returning a report.
|
||||
pub async fn run(
|
||||
&mut self,
|
||||
session: &mut SessionState,
|
||||
client: &node_service::Client,
|
||||
) -> PlaybookReport {
|
||||
let start = Instant::now();
|
||||
let total = self.expanded_step_count();
|
||||
let mut results = Vec::new();
|
||||
let mut passed = 0usize;
|
||||
let mut failed = 0usize;
|
||||
let mut skipped = 0usize;
|
||||
let mut step_idx = 0usize;
|
||||
let mut abort = false;
|
||||
|
||||
for step in &self.playbook.steps.clone() {
|
||||
if abort {
|
||||
skipped += 1;
|
||||
results.push(StepResult {
|
||||
step_index: step_idx,
|
||||
command: step.command.clone(),
|
||||
success: false,
|
||||
duration: Duration::ZERO,
|
||||
output: None,
|
||||
error: Some("skipped (prior failure)".into()),
|
||||
});
|
||||
step_idx += 1;
|
||||
continue;
|
||||
}
|
||||
|
||||
if let Some(ref ls) = step.loop_spec {
|
||||
for i in ls.from..=ls.to {
|
||||
self.vars.insert(ls.var.clone(), i.to_string());
|
||||
let sr = self.execute_step(step, step_idx, total, session, client).await;
|
||||
if sr.success {
|
||||
passed += 1;
|
||||
} else {
|
||||
failed += 1;
|
||||
if step.on_error == OnError::Fail {
|
||||
abort = true;
|
||||
}
|
||||
}
|
||||
results.push(sr);
|
||||
step_idx += 1;
|
||||
if abort {
|
||||
break;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
let sr = self.execute_step(step, step_idx, total, session, client).await;
|
||||
if sr.success {
|
||||
passed += 1;
|
||||
} else {
|
||||
match step.on_error {
|
||||
OnError::Fail => {
|
||||
failed += 1;
|
||||
abort = true;
|
||||
}
|
||||
OnError::Skip => skipped += 1,
|
||||
OnError::Continue => failed += 1,
|
||||
}
|
||||
}
|
||||
results.push(sr);
|
||||
step_idx += 1;
|
||||
}
|
||||
}
|
||||
|
||||
PlaybookReport {
|
||||
name: self.playbook.name.clone(),
|
||||
total_steps: step_idx,
|
||||
passed,
|
||||
failed,
|
||||
skipped,
|
||||
duration: start.elapsed(),
|
||||
step_results: results,
|
||||
}
|
||||
}
|
||||
|
||||
/// Execute a single step.
|
||||
async fn execute_step(
|
||||
&mut self,
|
||||
step: &PlaybookStep,
|
||||
index: usize,
|
||||
total: usize,
|
||||
session: &mut SessionState,
|
||||
client: &node_service::Client,
|
||||
) -> StepResult {
|
||||
let t = Instant::now();
|
||||
let cmd = match self.step_to_command(step) {
|
||||
Ok(c) => c,
|
||||
Err(e) => {
|
||||
return StepResult {
|
||||
step_index: index,
|
||||
command: step.command.clone(),
|
||||
success: false,
|
||||
duration: t.elapsed(),
|
||||
output: None,
|
||||
error: Some(format!("{e:#}")),
|
||||
};
|
||||
}
|
||||
};
|
||||
|
||||
eprintln!(
|
||||
"[{}/{}] {} ...",
|
||||
index + 1,
|
||||
total,
|
||||
step.command,
|
||||
);
|
||||
|
||||
let cr = CommandRegistry::execute(&cmd, session, client).await;
|
||||
|
||||
// Capture output into variable if requested.
|
||||
if let Some(ref var_name) = step.capture {
|
||||
if let Some(ref out) = cr.output {
|
||||
self.vars.insert(var_name.clone(), out.clone());
|
||||
}
|
||||
for (k, v) in &cr.data {
|
||||
self.vars.insert(format!("{var_name}.{k}"), v.clone());
|
||||
}
|
||||
}
|
||||
|
||||
StepResult {
|
||||
step_index: index,
|
||||
command: step.command.clone(),
|
||||
success: cr.success,
|
||||
duration: t.elapsed(),
|
||||
output: cr.output,
|
||||
error: cr.error,
|
||||
}
|
||||
}
|
||||
|
||||
/// Convert a YAML step into a typed `Command`.
|
||||
fn step_to_command(&self, step: &PlaybookStep) -> anyhow::Result<Command> {
|
||||
let cmd_name = step.command.as_str();
|
||||
match cmd_name {
|
||||
// ── Lifecycle commands ────────────────────────────────────────
|
||||
"connect" => Ok(Command::Connect {
|
||||
server: self.resolve_str(&step.args, "server")?,
|
||||
ca_cert: self.opt_str(&step.args, "ca_cert"),
|
||||
insecure: self.opt_bool(&step.args, "insecure"),
|
||||
}),
|
||||
"login" => Ok(Command::Login {
|
||||
username: self.resolve_str(&step.args, "username")?,
|
||||
password: self.resolve_str(&step.args, "password")?,
|
||||
}),
|
||||
"register" => Ok(Command::Register {
|
||||
username: self.resolve_str(&step.args, "username")?,
|
||||
password: self.resolve_str(&step.args, "password")?,
|
||||
}),
|
||||
"send" | "send-message" => Ok(Command::SendMessage {
|
||||
text: self.resolve_str(&step.args, "text")?,
|
||||
}),
|
||||
"wait" => Ok(Command::Wait {
|
||||
duration_ms: self.resolve_u64(&step.args, "duration_ms")?,
|
||||
}),
|
||||
"set-var" | "setvar" => Ok(Command::SetVar {
|
||||
name: self.resolve_str(&step.args, "name")?,
|
||||
value: self.resolve_str(&step.args, "value")?,
|
||||
}),
|
||||
"assert" => {
|
||||
let condition = self.build_assert_condition(step)?;
|
||||
Ok(Command::Assert { condition })
|
||||
}
|
||||
|
||||
// ── Session / identity ───────────────────────────────────────
|
||||
"help" => Ok(Command::Help),
|
||||
"quit" | "exit" => Ok(Command::Quit),
|
||||
"whoami" => Ok(Command::Whoami),
|
||||
"list" | "ls" => Ok(Command::List),
|
||||
"switch" | "sw" => Ok(Command::Switch {
|
||||
target: self.resolve_str(&step.args, "target")?,
|
||||
}),
|
||||
"dm" => Ok(Command::Dm {
|
||||
username: self.resolve_str(&step.args, "username")?,
|
||||
}),
|
||||
"create-group" | "cg" => Ok(Command::CreateGroup {
|
||||
name: self.resolve_str(&step.args, "name")?,
|
||||
}),
|
||||
"invite" => Ok(Command::Invite {
|
||||
target: self.resolve_str(&step.args, "target")?,
|
||||
}),
|
||||
"remove" | "kick" => Ok(Command::Remove {
|
||||
target: self.resolve_str(&step.args, "target")?,
|
||||
}),
|
||||
"leave" => Ok(Command::Leave),
|
||||
"join" => Ok(Command::Join),
|
||||
"members" => Ok(Command::Members),
|
||||
"group-info" | "gi" => Ok(Command::GroupInfo),
|
||||
"rename" => Ok(Command::Rename {
|
||||
name: self.resolve_str(&step.args, "name")?,
|
||||
}),
|
||||
"history" | "hist" => Ok(Command::History {
|
||||
count: self.opt_usize(&step.args, "count").unwrap_or(20),
|
||||
}),
|
||||
|
||||
// ── Security / crypto ────────────────────────────────────────
|
||||
"verify" => Ok(Command::Verify {
|
||||
username: self.resolve_str(&step.args, "username")?,
|
||||
}),
|
||||
"update-key" | "rotate-key" => Ok(Command::UpdateKey),
|
||||
"typing" => Ok(Command::Typing),
|
||||
"typing-notify" => Ok(Command::TypingNotify {
|
||||
enabled: self.opt_bool(&step.args, "enabled"),
|
||||
}),
|
||||
"react" => Ok(Command::React {
|
||||
emoji: self.resolve_str(&step.args, "emoji")?,
|
||||
index: self.opt_usize(&step.args, "index"),
|
||||
}),
|
||||
"edit" => Ok(Command::Edit {
|
||||
index: self.resolve_usize(&step.args, "index")?,
|
||||
new_text: self.resolve_str(&step.args, "new_text")?,
|
||||
}),
|
||||
"delete" | "del" => Ok(Command::Delete {
|
||||
index: self.resolve_usize(&step.args, "index")?,
|
||||
}),
|
||||
"send-file" | "sf" => Ok(Command::SendFile {
|
||||
path: self.resolve_str(&step.args, "path")?,
|
||||
}),
|
||||
"download" | "dl" => Ok(Command::Download {
|
||||
index: self.resolve_usize(&step.args, "index")?,
|
||||
}),
|
||||
"delete-account" => Ok(Command::DeleteAccount),
|
||||
"disappear" => Ok(Command::Disappear {
|
||||
arg: self.opt_str(&step.args, "duration"),
|
||||
}),
|
||||
"privacy" => Ok(Command::Privacy {
|
||||
arg: self.opt_str(&step.args, "setting"),
|
||||
}),
|
||||
"verify-fs" => Ok(Command::VerifyFs),
|
||||
"rotate-all-keys" => Ok(Command::RotateAllKeys),
|
||||
"devices" => Ok(Command::Devices),
|
||||
"register-device" => Ok(Command::RegisterDevice {
|
||||
name: self.resolve_str(&step.args, "name")?,
|
||||
}),
|
||||
"revoke-device" => Ok(Command::RevokeDevice {
|
||||
id_prefix: self.resolve_str(&step.args, "id_prefix")?,
|
||||
}),
|
||||
|
||||
// ── Mesh ─────────────────────────────────────────────────────
|
||||
"mesh-peers" => Ok(Command::MeshPeers),
|
||||
"mesh-server" => Ok(Command::MeshServer {
|
||||
addr: self.resolve_str(&step.args, "addr")?,
|
||||
}),
|
||||
"mesh-send" => Ok(Command::MeshSend {
|
||||
peer_id: self.resolve_str(&step.args, "peer_id")?,
|
||||
message: self.resolve_str(&step.args, "message")?,
|
||||
}),
|
||||
"mesh-broadcast" => Ok(Command::MeshBroadcast {
|
||||
topic: self.resolve_str(&step.args, "topic")?,
|
||||
message: self.resolve_str(&step.args, "message")?,
|
||||
}),
|
||||
"mesh-subscribe" => Ok(Command::MeshSubscribe {
|
||||
topic: self.resolve_str(&step.args, "topic")?,
|
||||
}),
|
||||
"mesh-route" => Ok(Command::MeshRoute),
|
||||
"mesh-identity" | "mesh-id" => Ok(Command::MeshIdentity),
|
||||
"mesh-store" => Ok(Command::MeshStore),
|
||||
"mesh-trace" => Ok(Command::MeshTrace {
|
||||
address: self.resolve_str(&step.args, "address")?,
|
||||
}),
|
||||
"mesh-stats" => Ok(Command::MeshStats),
|
||||
|
||||
other => bail!("unknown command: {other}"),
|
||||
}
|
||||
}
|
||||
|
||||
/// Build an `AssertCondition` from a playbook step.
|
||||
fn build_assert_condition(&self, step: &PlaybookStep) -> anyhow::Result<AssertCondition> {
|
||||
let cond = step
|
||||
.condition
|
||||
.as_deref()
|
||||
.context("assert step requires 'condition' field")?;
|
||||
match cond {
|
||||
"connected" => Ok(AssertCondition::Connected),
|
||||
"logged_in" => Ok(AssertCondition::LoggedIn),
|
||||
"in_conversation" => {
|
||||
let name = self.resolve_str(&step.args, "name")
|
||||
.or_else(|_| step.value.as_ref()
|
||||
.and_then(|v| v.as_str())
|
||||
.map(|s| self.substitute(s))
|
||||
.context("assert in_conversation requires 'name' arg or 'value'"))?;
|
||||
Ok(AssertCondition::InConversation { name })
|
||||
}
|
||||
"message_count" => {
|
||||
let op = self.parse_cmp_op(step.op.as_deref().unwrap_or("gte"))?;
|
||||
let count = step
|
||||
.value
|
||||
.as_ref()
|
||||
.and_then(|v| v.as_u64())
|
||||
.context("message_count assert requires numeric 'value'")?
|
||||
as usize;
|
||||
Ok(AssertCondition::MessageCount { op, count })
|
||||
}
|
||||
"member_count" => {
|
||||
let op = self.parse_cmp_op(step.op.as_deref().unwrap_or("gte"))?;
|
||||
let count = step
|
||||
.value
|
||||
.as_ref()
|
||||
.and_then(|v| v.as_u64())
|
||||
.context("member_count assert requires numeric 'value'")?
|
||||
as usize;
|
||||
Ok(AssertCondition::MemberCount { op, count })
|
||||
}
|
||||
other => Ok(AssertCondition::Custom {
|
||||
expression: other.to_string(),
|
||||
}),
|
||||
}
|
||||
}
|
||||
|
||||
fn parse_cmp_op(&self, s: &str) -> anyhow::Result<CmpOp> {
|
||||
match s {
|
||||
"eq" | "==" => Ok(CmpOp::Eq),
|
||||
"ne" | "!=" => Ok(CmpOp::Ne),
|
||||
"gt" | ">" => Ok(CmpOp::Gt),
|
||||
"lt" | "<" => Ok(CmpOp::Lt),
|
||||
"gte" | ">=" => Ok(CmpOp::Gte),
|
||||
"lte" | "<=" => Ok(CmpOp::Lte),
|
||||
other => bail!("unknown comparison operator: {other}"),
|
||||
}
|
||||
}
|
||||
|
||||
// ── Variable substitution helpers ────────────────────────────────────
|
||||
|
||||
/// Substitute `$varname` and `${VAR:-default}` in a string.
|
||||
fn substitute(&self, s: &str) -> String {
|
||||
let mut result = String::with_capacity(s.len());
|
||||
let mut chars = s.chars().peekable();
|
||||
while let Some(c) = chars.next() {
|
||||
if c == '$' {
|
||||
if chars.peek() == Some(&'{') {
|
||||
chars.next(); // consume '{'
|
||||
let mut key = String::new();
|
||||
let mut default = None;
|
||||
while let Some(&ch) = chars.peek() {
|
||||
if ch == '}' {
|
||||
chars.next();
|
||||
break;
|
||||
}
|
||||
if ch == ':' && chars.clone().nth(1) == Some('-') {
|
||||
chars.next(); // consume ':'
|
||||
chars.next(); // consume '-'
|
||||
let mut def = String::new();
|
||||
while let Some(&dch) = chars.peek() {
|
||||
if dch == '}' {
|
||||
chars.next();
|
||||
break;
|
||||
}
|
||||
def.push(dch);
|
||||
chars.next();
|
||||
}
|
||||
default = Some(def);
|
||||
break;
|
||||
}
|
||||
key.push(ch);
|
||||
chars.next();
|
||||
}
|
||||
if let Some(val) = self.vars.get(&key) {
|
||||
result.push_str(val);
|
||||
} else if let Ok(val) = std::env::var(&key) {
|
||||
result.push_str(&val);
|
||||
} else if let Some(def) = default {
|
||||
result.push_str(&def);
|
||||
}
|
||||
} else {
|
||||
let mut key = String::new();
|
||||
while let Some(&ch) = chars.peek() {
|
||||
if ch.is_alphanumeric() || ch == '_' {
|
||||
key.push(ch);
|
||||
chars.next();
|
||||
} else {
|
||||
break;
|
||||
}
|
||||
}
|
||||
if let Some(val) = self.vars.get(&key) {
|
||||
result.push_str(val);
|
||||
} else {
|
||||
result.push('$');
|
||||
result.push_str(&key);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
result.push(c);
|
||||
}
|
||||
}
|
||||
result
|
||||
}
|
||||
|
||||
/// Resolve a required string argument with variable substitution.
|
||||
fn resolve_str(
|
||||
&self,
|
||||
args: &HashMap<String, serde_yaml::Value>,
|
||||
key: &str,
|
||||
) -> anyhow::Result<String> {
|
||||
let val = args
|
||||
.get(key)
|
||||
.with_context(|| format!("missing required argument: {key}"))?;
|
||||
match val {
|
||||
serde_yaml::Value::String(s) => Ok(self.substitute(s)),
|
||||
serde_yaml::Value::Number(n) => Ok(n.to_string()),
|
||||
serde_yaml::Value::Bool(b) => Ok(b.to_string()),
|
||||
other => Ok(format!("{other:?}")),
|
||||
}
|
||||
}
|
||||
|
||||
/// Resolve an optional string argument.
|
||||
fn opt_str(
|
||||
&self,
|
||||
args: &HashMap<String, serde_yaml::Value>,
|
||||
key: &str,
|
||||
) -> Option<String> {
|
||||
args.get(key).map(|v| match v {
|
||||
serde_yaml::Value::String(s) => self.substitute(s),
|
||||
serde_yaml::Value::Number(n) => n.to_string(),
|
||||
serde_yaml::Value::Bool(b) => b.to_string(),
|
||||
other => format!("{other:?}"),
|
||||
})
|
||||
}
|
||||
|
||||
/// Resolve an optional bool argument (defaults to false).
|
||||
fn opt_bool(
|
||||
&self,
|
||||
args: &HashMap<String, serde_yaml::Value>,
|
||||
key: &str,
|
||||
) -> bool {
|
||||
args.get(key)
|
||||
.and_then(|v| v.as_bool())
|
||||
.unwrap_or(false)
|
||||
}
|
||||
|
||||
/// Resolve a required usize argument.
|
||||
fn resolve_usize(
|
||||
&self,
|
||||
args: &HashMap<String, serde_yaml::Value>,
|
||||
key: &str,
|
||||
) -> anyhow::Result<usize> {
|
||||
let val = args
|
||||
.get(key)
|
||||
.with_context(|| format!("missing required argument: {key}"))?;
|
||||
val.as_u64()
|
||||
.map(|n| n as usize)
|
||||
.with_context(|| format!("argument '{key}' must be a positive integer"))
|
||||
}
|
||||
|
||||
/// Resolve a required u64 argument.
|
||||
fn resolve_u64(
|
||||
&self,
|
||||
args: &HashMap<String, serde_yaml::Value>,
|
||||
key: &str,
|
||||
) -> anyhow::Result<u64> {
|
||||
let val = args
|
||||
.get(key)
|
||||
.with_context(|| format!("missing required argument: {key}"))?;
|
||||
val.as_u64()
|
||||
.with_context(|| format!("argument '{key}' must be a positive integer"))
|
||||
}
|
||||
|
||||
/// Resolve an optional usize argument.
|
||||
fn opt_usize(
|
||||
&self,
|
||||
args: &HashMap<String, serde_yaml::Value>,
|
||||
key: &str,
|
||||
) -> Option<usize> {
|
||||
args.get(key).and_then(|v| v.as_u64()).map(|n| n as usize)
|
||||
}
|
||||
|
||||
/// Count total expanded steps (including loop iterations).
|
||||
fn expanded_step_count(&self) -> usize {
|
||||
self.playbook
|
||||
.steps
|
||||
.iter()
|
||||
.map(|s| {
|
||||
if let Some(ref ls) = s.loop_spec {
|
||||
if ls.to >= ls.from {
|
||||
ls.to - ls.from + 1
|
||||
} else {
|
||||
0
|
||||
}
|
||||
} else {
|
||||
1
|
||||
}
|
||||
})
|
||||
.sum()
|
||||
}
|
||||
}
|
||||
|
||||
// ── Tests ───────────────────────────────────────────────────────────────────
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn parse_minimal_playbook() {
|
||||
let yaml = r#"
|
||||
name: "test"
|
||||
steps:
|
||||
- command: whoami
|
||||
- command: list
|
||||
"#;
|
||||
let runner = PlaybookRunner::from_str(yaml).unwrap();
|
||||
assert_eq!(runner.playbook.name, "test");
|
||||
assert_eq!(runner.playbook.steps.len(), 2);
|
||||
assert_eq!(runner.playbook.steps[0].command, "whoami");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn parse_playbook_with_variables() {
|
||||
let yaml = r#"
|
||||
name: "var test"
|
||||
variables:
|
||||
user: alice
|
||||
server: "127.0.0.1:5001"
|
||||
steps:
|
||||
- command: dm
|
||||
args:
|
||||
username: "$user"
|
||||
"#;
|
||||
let runner = PlaybookRunner::from_str(yaml).unwrap();
|
||||
assert_eq!(runner.vars["user"], "alice");
|
||||
assert_eq!(runner.vars["server"], "127.0.0.1:5001");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn variable_substitution() {
|
||||
let mut vars = HashMap::new();
|
||||
vars.insert("name".to_string(), "alice".to_string());
|
||||
vars.insert("port".to_string(), "5001".to_string());
|
||||
let runner = PlaybookRunner {
|
||||
playbook: Playbook {
|
||||
name: "test".into(),
|
||||
description: None,
|
||||
variables: HashMap::new(),
|
||||
steps: vec![],
|
||||
},
|
||||
vars,
|
||||
};
|
||||
assert_eq!(runner.substitute("hello $name"), "hello alice");
|
||||
assert_eq!(runner.substitute("port=$port!"), "port=5001!");
|
||||
assert_eq!(runner.substitute("${name}@server"), "alice@server");
|
||||
assert_eq!(
|
||||
runner.substitute("${missing:-default}"),
|
||||
"default"
|
||||
);
|
||||
assert_eq!(runner.substitute("no vars here"), "no vars here");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn step_to_command_mapping() {
|
||||
let yaml = r#"
|
||||
name: "mapping test"
|
||||
variables:
|
||||
user: bob
|
||||
steps:
|
||||
- command: dm
|
||||
args:
|
||||
username: "$user"
|
||||
- command: send
|
||||
args:
|
||||
text: "hello"
|
||||
- command: history
|
||||
args:
|
||||
count: 10
|
||||
- command: wait
|
||||
args:
|
||||
duration_ms: 500
|
||||
"#;
|
||||
let runner = PlaybookRunner::from_str(yaml).unwrap();
|
||||
let cmd0 = runner.step_to_command(&runner.playbook.steps[0]).unwrap();
|
||||
assert!(matches!(cmd0, Command::Dm { username } if username == "bob"));
|
||||
|
||||
let cmd1 = runner.step_to_command(&runner.playbook.steps[1]).unwrap();
|
||||
assert!(matches!(cmd1, Command::SendMessage { text } if text == "hello"));
|
||||
|
||||
let cmd2 = runner.step_to_command(&runner.playbook.steps[2]).unwrap();
|
||||
assert!(matches!(cmd2, Command::History { count: 10 }));
|
||||
|
||||
let cmd3 = runner.step_to_command(&runner.playbook.steps[3]).unwrap();
|
||||
assert!(matches!(cmd3, Command::Wait { duration_ms: 500 }));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn parse_assert_step() {
|
||||
let yaml = r#"
|
||||
name: "assert test"
|
||||
steps:
|
||||
- command: assert
|
||||
condition: message_count
|
||||
op: gte
|
||||
value: 5
|
||||
"#;
|
||||
let runner = PlaybookRunner::from_str(yaml).unwrap();
|
||||
let cmd = runner.step_to_command(&runner.playbook.steps[0]).unwrap();
|
||||
match cmd {
|
||||
Command::Assert {
|
||||
condition: AssertCondition::MessageCount { op, count },
|
||||
} => {
|
||||
assert_eq!(op, CmpOp::Gte);
|
||||
assert_eq!(count, 5);
|
||||
}
|
||||
other => panic!("expected Assert MessageCount, got {other:?}"),
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn parse_loop_spec() {
|
||||
let yaml = r#"
|
||||
name: "loop test"
|
||||
steps:
|
||||
- command: send
|
||||
args:
|
||||
text: "msg $i"
|
||||
loop:
|
||||
var: i
|
||||
from: 1
|
||||
to: 5
|
||||
"#;
|
||||
let runner = PlaybookRunner::from_str(yaml).unwrap();
|
||||
assert_eq!(runner.expanded_step_count(), 5);
|
||||
let ls = runner.playbook.steps[0].loop_spec.as_ref().unwrap();
|
||||
assert_eq!(ls.var, "i");
|
||||
assert_eq!(ls.from, 1);
|
||||
assert_eq!(ls.to, 5);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn on_error_defaults_to_fail() {
|
||||
let yaml = r#"
|
||||
name: "error test"
|
||||
steps:
|
||||
- command: whoami
|
||||
- command: list
|
||||
on_error: continue
|
||||
- command: quit
|
||||
on_error: skip
|
||||
"#;
|
||||
let runner = PlaybookRunner::from_str(yaml).unwrap();
|
||||
assert_eq!(runner.playbook.steps[0].on_error, OnError::Fail);
|
||||
assert_eq!(runner.playbook.steps[1].on_error, OnError::Continue);
|
||||
assert_eq!(runner.playbook.steps[2].on_error, OnError::Skip);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn cmp_op_parsing() {
|
||||
let runner = PlaybookRunner::from_str("name: t\nsteps: []").unwrap();
|
||||
assert!(matches!(runner.parse_cmp_op("eq"), Ok(CmpOp::Eq)));
|
||||
assert!(matches!(runner.parse_cmp_op("=="), Ok(CmpOp::Eq)));
|
||||
assert!(matches!(runner.parse_cmp_op("gte"), Ok(CmpOp::Gte)));
|
||||
assert!(matches!(runner.parse_cmp_op(">="), Ok(CmpOp::Gte)));
|
||||
assert!(matches!(runner.parse_cmp_op("<"), Ok(CmpOp::Lt)));
|
||||
assert!(runner.parse_cmp_op("invalid").is_err());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn report_display() {
|
||||
let report = PlaybookReport {
|
||||
name: "test".into(),
|
||||
total_steps: 3,
|
||||
passed: 2,
|
||||
failed: 1,
|
||||
skipped: 0,
|
||||
duration: Duration::from_millis(150),
|
||||
step_results: vec![
|
||||
StepResult {
|
||||
step_index: 0,
|
||||
command: "whoami".into(),
|
||||
success: true,
|
||||
duration: Duration::from_millis(10),
|
||||
output: None,
|
||||
error: None,
|
||||
},
|
||||
StepResult {
|
||||
step_index: 1,
|
||||
command: "dm".into(),
|
||||
success: true,
|
||||
duration: Duration::from_millis(50),
|
||||
output: None,
|
||||
error: None,
|
||||
},
|
||||
StepResult {
|
||||
step_index: 2,
|
||||
command: "assert".into(),
|
||||
success: false,
|
||||
duration: Duration::from_millis(1),
|
||||
output: None,
|
||||
error: Some("message count 0 < 1".into()),
|
||||
},
|
||||
],
|
||||
};
|
||||
let s = format!("{report}");
|
||||
assert!(s.contains("2 passed, 1 failed"));
|
||||
assert!(s.contains("[3/3] assert ... FAIL"));
|
||||
}
|
||||
}
|
||||
3540
crates/quicprochat-client/src/client/repl.rs
Normal file
3540
crates/quicprochat-client/src/client/repl.rs
Normal file
File diff suppressed because it is too large
Load Diff
207
crates/quicprochat-client/src/client/retry.rs
Normal file
207
crates/quicprochat-client/src/client/retry.rs
Normal file
@@ -0,0 +1,207 @@
|
||||
//! Retry with exponential backoff for transient RPC failures.
|
||||
|
||||
use std::future::Future;
|
||||
use std::time::Duration;
|
||||
|
||||
use rand::Rng;
|
||||
use tracing::warn;
|
||||
|
||||
/// Default maximum number of retry attempts (including the first try).
|
||||
pub const DEFAULT_MAX_RETRIES: u32 = 3;
|
||||
/// Default base delay in milliseconds for exponential backoff.
|
||||
pub const DEFAULT_BASE_DELAY_MS: u64 = 500;
|
||||
|
||||
/// Runs an async operation with retries. On `Ok(t)` returns immediately.
|
||||
/// On `Err(e)`: if `is_retriable(&e)` and `attempt < max_retries`, sleeps with
|
||||
/// exponential backoff (plus jitter) then retries; otherwise returns the last error.
|
||||
pub async fn retry_async<F, Fut, T, E, P>(
|
||||
op: F,
|
||||
max_retries: u32,
|
||||
base_delay_ms: u64,
|
||||
is_retriable: P,
|
||||
) -> Result<T, E>
|
||||
where
|
||||
F: Fn() -> Fut,
|
||||
Fut: Future<Output = Result<T, E>>,
|
||||
P: Fn(&E) -> bool,
|
||||
{
|
||||
let mut last_err: Option<E> = None;
|
||||
for attempt in 0..max_retries {
|
||||
match op().await {
|
||||
Ok(t) => return Ok(t),
|
||||
Err(e) => {
|
||||
if !is_retriable(&e) || attempt + 1 >= max_retries {
|
||||
return Err(e);
|
||||
}
|
||||
let delay_ms = base_delay_ms * 2u64.saturating_pow(attempt);
|
||||
let jitter_ms = rand::thread_rng().gen_range(0..=delay_ms / 2);
|
||||
let total_ms = delay_ms + jitter_ms;
|
||||
warn!(
|
||||
attempt = attempt + 1,
|
||||
max_retries,
|
||||
delay_ms = total_ms,
|
||||
"RPC failed, retrying after backoff"
|
||||
);
|
||||
last_err = Some(e);
|
||||
tokio::time::sleep(Duration::from_millis(total_ms)).await;
|
||||
}
|
||||
}
|
||||
}
|
||||
match last_err {
|
||||
Some(e) => Err(e),
|
||||
None => unreachable!(
|
||||
"retry_async: last_err is always Some when loop exits after an Err"
|
||||
),
|
||||
}
|
||||
}
|
||||
|
||||
/// Classifies `anyhow::Error` for retry: returns `false` for auth or invalid-param
|
||||
/// errors (do not retry), `true` for transient errors (network, timeout, server 5xx).
|
||||
/// When in doubt, returns `true` (retry).
|
||||
pub fn anyhow_is_retriable(err: &anyhow::Error) -> bool {
|
||||
let s = format!("{:#}", err);
|
||||
let s_lower = s.to_lowercase();
|
||||
// Do not retry: auth / permission
|
||||
if s_lower.contains("unauthorized")
|
||||
|| s_lower.contains("auth failed")
|
||||
|| s_lower.contains("access denied")
|
||||
|| s_lower.contains("401")
|
||||
|| s_lower.contains("forbidden")
|
||||
|| s_lower.contains("403")
|
||||
|| s_lower.contains("token")
|
||||
{
|
||||
return false;
|
||||
}
|
||||
// Do not retry: bad request / invalid params
|
||||
if s_lower.contains("bad request")
|
||||
|| s_lower.contains("400")
|
||||
|| s_lower.contains("invalid param")
|
||||
|| s_lower.contains("fingerprint mismatch")
|
||||
{
|
||||
return false;
|
||||
}
|
||||
// Retry: network, timeout, connection, server error, or anything else
|
||||
true
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
#[allow(clippy::unwrap_used)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[tokio::test]
|
||||
async fn retry_success_first_attempt() {
|
||||
let result = retry_async(|| async { Ok::<_, String>(42) }, 3, 10, |_| true).await;
|
||||
assert_eq!(result.unwrap(), 42);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn retry_succeeds_after_one_failure() {
|
||||
let counter = std::sync::Arc::new(std::sync::atomic::AtomicU32::new(0));
|
||||
let c = counter.clone();
|
||||
let result = retry_async(
|
||||
|| {
|
||||
let c = c.clone();
|
||||
async move {
|
||||
let n = c.fetch_add(1, std::sync::atomic::Ordering::SeqCst);
|
||||
if n == 0 {
|
||||
Err("transient failure".to_string())
|
||||
} else {
|
||||
Ok(99)
|
||||
}
|
||||
}
|
||||
},
|
||||
3,
|
||||
1, // minimal delay for test speed
|
||||
|_| true,
|
||||
)
|
||||
.await;
|
||||
assert_eq!(result.unwrap(), 99);
|
||||
assert_eq!(counter.load(std::sync::atomic::Ordering::SeqCst), 2);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn retry_non_retriable_fails_immediately() {
|
||||
let counter = std::sync::Arc::new(std::sync::atomic::AtomicU32::new(0));
|
||||
let c = counter.clone();
|
||||
let result = retry_async(
|
||||
|| {
|
||||
let c = c.clone();
|
||||
async move {
|
||||
c.fetch_add(1, std::sync::atomic::Ordering::SeqCst);
|
||||
Err::<(), _>("permanent error")
|
||||
}
|
||||
},
|
||||
5,
|
||||
1,
|
||||
|_: &&str| false, // nothing is retriable
|
||||
)
|
||||
.await;
|
||||
assert!(result.is_err());
|
||||
assert_eq!(counter.load(std::sync::atomic::Ordering::SeqCst), 1);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn retry_exhausts_all_attempts() {
|
||||
let counter = std::sync::Arc::new(std::sync::atomic::AtomicU32::new(0));
|
||||
let c = counter.clone();
|
||||
let result = retry_async(
|
||||
|| {
|
||||
let c = c.clone();
|
||||
async move {
|
||||
c.fetch_add(1, std::sync::atomic::Ordering::SeqCst);
|
||||
Err::<(), _>("still failing")
|
||||
}
|
||||
},
|
||||
3,
|
||||
1,
|
||||
|_| true,
|
||||
)
|
||||
.await;
|
||||
assert!(result.is_err());
|
||||
assert_eq!(counter.load(std::sync::atomic::Ordering::SeqCst), 3);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn anyhow_is_retriable_classifications() {
|
||||
// Auth errors should NOT be retriable
|
||||
let auth_errors = [
|
||||
"unauthorized access",
|
||||
"HTTP 401 Unauthorized",
|
||||
"forbidden resource",
|
||||
"HTTP 403 Forbidden",
|
||||
"auth failed for user",
|
||||
"access denied",
|
||||
"invalid token",
|
||||
];
|
||||
for msg in &auth_errors {
|
||||
let err = anyhow::anyhow!("{msg}");
|
||||
assert!(!anyhow_is_retriable(&err), "expected non-retriable: {msg}");
|
||||
}
|
||||
|
||||
// Bad-request errors should NOT be retriable
|
||||
let bad_req_errors = [
|
||||
"bad request: missing field",
|
||||
"HTTP 400 Bad Request",
|
||||
"invalid param: username",
|
||||
"fingerprint mismatch",
|
||||
];
|
||||
for msg in &bad_req_errors {
|
||||
let err = anyhow::anyhow!("{msg}");
|
||||
assert!(!anyhow_is_retriable(&err), "expected non-retriable: {msg}");
|
||||
}
|
||||
|
||||
// Transient errors SHOULD be retriable
|
||||
let transient_errors = [
|
||||
"connection refused",
|
||||
"network timeout",
|
||||
"server error 500",
|
||||
"stream reset",
|
||||
"something unknown happened",
|
||||
];
|
||||
for msg in &transient_errors {
|
||||
let err = anyhow::anyhow!("{msg}");
|
||||
assert!(anyhow_is_retriable(&err), "expected retriable: {msg}");
|
||||
}
|
||||
}
|
||||
}
|
||||
978
crates/quicprochat-client/src/client/rpc.rs
Normal file
978
crates/quicprochat-client/src/client/rpc.rs
Normal file
@@ -0,0 +1,978 @@
|
||||
use std::net::SocketAddr;
|
||||
use std::path::Path;
|
||||
use std::sync::Arc;
|
||||
|
||||
use anyhow::Context;
|
||||
use quinn::{ClientConfig, Endpoint};
|
||||
use quinn_proto::crypto::rustls::QuicClientConfig;
|
||||
use rustls::pki_types::{CertificateDer, ServerName, UnixTime};
|
||||
use rustls::{ClientConfig as RustlsClientConfig, RootCertStore};
|
||||
use tokio_util::compat::{TokioAsyncReadCompatExt, TokioAsyncWriteCompatExt};
|
||||
use capnp_rpc::{rpc_twoparty_capnp::Side, twoparty, RpcSystem};
|
||||
|
||||
use quicprochat_core::HybridPublicKey;
|
||||
use quicprochat_proto::node_capnp::{auth, node_service};
|
||||
|
||||
use crate::{AUTH_CONTEXT, INSECURE_SKIP_VERIFY};
|
||||
|
||||
use super::retry::{anyhow_is_retriable, retry_async, DEFAULT_BASE_DELAY_MS, DEFAULT_MAX_RETRIES};
|
||||
|
||||
/// Cap'n Proto traversal limit (words). 4 Mi words = 32 MiB; bounds DoS from deeply nested or large messages.
|
||||
const CAPNP_TRAVERSAL_LIMIT_WORDS: usize = 4 * 1024 * 1024;
|
||||
|
||||
/// A [`rustls::client::danger::ServerCertVerifier`] that accepts any certificate.
|
||||
///
|
||||
/// **Development only.** Using this in production disables all TLS guarantees.
|
||||
#[derive(Debug)]
|
||||
struct InsecureServerCertVerifier;
|
||||
|
||||
impl rustls::client::danger::ServerCertVerifier for InsecureServerCertVerifier {
|
||||
fn verify_server_cert(
|
||||
&self,
|
||||
_end_entity: &CertificateDer<'_>,
|
||||
_intermediates: &[CertificateDer<'_>],
|
||||
_server_name: &ServerName<'_>,
|
||||
_ocsp_response: &[u8],
|
||||
_now: UnixTime,
|
||||
) -> Result<rustls::client::danger::ServerCertVerified, rustls::Error> {
|
||||
Ok(rustls::client::danger::ServerCertVerified::assertion())
|
||||
}
|
||||
|
||||
fn verify_tls12_signature(
|
||||
&self,
|
||||
_message: &[u8],
|
||||
_cert: &CertificateDer<'_>,
|
||||
_dss: &rustls::DigitallySignedStruct,
|
||||
) -> Result<rustls::client::danger::HandshakeSignatureValid, rustls::Error> {
|
||||
Ok(rustls::client::danger::HandshakeSignatureValid::assertion())
|
||||
}
|
||||
|
||||
fn verify_tls13_signature(
|
||||
&self,
|
||||
_message: &[u8],
|
||||
_cert: &CertificateDer<'_>,
|
||||
_dss: &rustls::DigitallySignedStruct,
|
||||
) -> Result<rustls::client::danger::HandshakeSignatureValid, rustls::Error> {
|
||||
Ok(rustls::client::danger::HandshakeSignatureValid::assertion())
|
||||
}
|
||||
|
||||
fn supported_verify_schemes(&self) -> Vec<rustls::SignatureScheme> {
|
||||
rustls::crypto::ring::default_provider()
|
||||
.signature_verification_algorithms
|
||||
.supported_schemes()
|
||||
}
|
||||
}
|
||||
|
||||
/// Establish a QUIC/TLS connection and return a `NodeService` client.
|
||||
///
|
||||
/// Must be called from within a `LocalSet` because capnp-rpc is `!Send`.
|
||||
///
|
||||
/// Reads [`INSECURE_SKIP_VERIFY`] to decide whether to bypass certificate
|
||||
/// verification (set once at startup via [`crate::set_insecure_skip_verify`]).
|
||||
pub async fn connect_node(
|
||||
server: &str,
|
||||
ca_cert: &Path,
|
||||
server_name: &str,
|
||||
) -> anyhow::Result<node_service::Client> {
|
||||
let skip = INSECURE_SKIP_VERIFY.load(std::sync::atomic::Ordering::Relaxed);
|
||||
connect_node_opt(server, ca_cert, server_name, skip).await
|
||||
}
|
||||
|
||||
/// Like [`connect_node`] but with an explicit `insecure_skip_verify` toggle.
|
||||
///
|
||||
/// When `insecure_skip_verify` is `true`, certificate verification is disabled entirely.
|
||||
/// This is intended for development and testing only.
|
||||
pub async fn connect_node_opt(
|
||||
server: &str,
|
||||
ca_cert: &Path,
|
||||
server_name: &str,
|
||||
insecure_skip_verify: bool,
|
||||
) -> anyhow::Result<node_service::Client> {
|
||||
let addr: SocketAddr = server
|
||||
.parse()
|
||||
.with_context(|| format!("server must be host:port, got {server}"))?;
|
||||
|
||||
let mut tls = if insecure_skip_verify {
|
||||
RustlsClientConfig::builder()
|
||||
.dangerous()
|
||||
.with_custom_certificate_verifier(Arc::new(InsecureServerCertVerifier))
|
||||
.with_no_client_auth()
|
||||
} else {
|
||||
let cert_bytes =
|
||||
std::fs::read(ca_cert).with_context(|| format!("read ca_cert {ca_cert:?}"))?;
|
||||
let mut roots = RootCertStore::empty();
|
||||
roots
|
||||
.add(CertificateDer::from(cert_bytes))
|
||||
.context("add root cert")?;
|
||||
RustlsClientConfig::builder()
|
||||
.with_root_certificates(roots)
|
||||
.with_no_client_auth()
|
||||
};
|
||||
tls.alpn_protocols = vec![b"capnp".to_vec()];
|
||||
|
||||
let crypto = QuicClientConfig::try_from(tls)
|
||||
.map_err(|e| anyhow::anyhow!("invalid client TLS config: {e}"))?;
|
||||
|
||||
let bind_addr: SocketAddr = "0.0.0.0:0".parse().context("parse client bind address")?;
|
||||
let mut endpoint = Endpoint::client(bind_addr)?;
|
||||
endpoint.set_default_client_config(ClientConfig::new(Arc::new(crypto)));
|
||||
|
||||
let connection = endpoint
|
||||
.connect(addr, server_name)
|
||||
.context("quic connect init")?
|
||||
.await
|
||||
.context("quic connect failed")?;
|
||||
|
||||
let (send, recv) = connection.open_bi().await.context("open bi stream")?;
|
||||
|
||||
let mut reader_opts = capnp::message::ReaderOptions::new();
|
||||
reader_opts.traversal_limit_in_words(Some(CAPNP_TRAVERSAL_LIMIT_WORDS));
|
||||
let network = twoparty::VatNetwork::new(
|
||||
recv.compat(),
|
||||
send.compat_write(),
|
||||
Side::Client,
|
||||
reader_opts,
|
||||
);
|
||||
|
||||
let mut rpc_system = RpcSystem::new(Box::new(network), None);
|
||||
let client: node_service::Client = rpc_system.bootstrap(Side::Server);
|
||||
|
||||
tokio::task::spawn_local(rpc_system);
|
||||
|
||||
Ok(client)
|
||||
}
|
||||
|
||||
pub fn set_auth(auth: &mut auth::Builder<'_>) -> anyhow::Result<()> {
|
||||
let guard = AUTH_CONTEXT
|
||||
.read()
|
||||
.map_err(|e| anyhow::anyhow!("AUTH_CONTEXT lock poisoned: {e}"))?;
|
||||
let ctx = guard.as_ref().ok_or_else(|| {
|
||||
anyhow::anyhow!(
|
||||
"init_auth must be called before RPCs (use a bearer or session token for authenticated commands)"
|
||||
)
|
||||
})?;
|
||||
auth.set_version(ctx.version);
|
||||
auth.set_access_token(&ctx.access_token);
|
||||
auth.set_device_id(&ctx.device_id);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Upload a KeyPackage and verify the fingerprint echoed by the AS.
|
||||
pub async fn upload_key_package(
|
||||
client: &node_service::Client,
|
||||
identity_key: &[u8],
|
||||
package: &[u8],
|
||||
) -> anyhow::Result<()> {
|
||||
let mut req = client.upload_key_package_request();
|
||||
{
|
||||
let mut p = req.get();
|
||||
p.set_identity_key(identity_key);
|
||||
p.set_package(package);
|
||||
let mut auth = p.reborrow().init_auth();
|
||||
set_auth(&mut auth)?;
|
||||
}
|
||||
|
||||
let resp = req
|
||||
.send()
|
||||
.promise
|
||||
.await
|
||||
.context("upload_key_package RPC failed")?;
|
||||
|
||||
let server_fp = resp
|
||||
.get()
|
||||
.context("upload_key_package: bad response")?
|
||||
.get_fingerprint()
|
||||
.context("upload_key_package: missing fingerprint")?
|
||||
.to_vec();
|
||||
|
||||
let local_fp = super::state::sha256(package);
|
||||
anyhow::ensure!(server_fp == local_fp, "fingerprint mismatch");
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Fetch a KeyPackage for `identity_key` from the AS.
|
||||
pub async fn fetch_key_package(
|
||||
client: &node_service::Client,
|
||||
identity_key: &[u8],
|
||||
) -> anyhow::Result<Vec<u8>> {
|
||||
let mut req = client.fetch_key_package_request();
|
||||
{
|
||||
let mut p = req.get();
|
||||
p.set_identity_key(identity_key);
|
||||
let mut auth = p.reborrow().init_auth();
|
||||
set_auth(&mut auth)?;
|
||||
}
|
||||
|
||||
let resp = req
|
||||
.send()
|
||||
.promise
|
||||
.await
|
||||
.context("fetch_key_package RPC failed")?;
|
||||
|
||||
let pkg = resp
|
||||
.get()
|
||||
.context("fetch_key_package: bad response")?
|
||||
.get_package()
|
||||
.context("fetch_key_package: missing package field")?
|
||||
.to_vec();
|
||||
|
||||
Ok(pkg)
|
||||
}
|
||||
|
||||
/// Enqueue an opaque payload to the DS for `recipient_key`.
|
||||
/// Returns the per-inbox sequence number assigned by the server.
|
||||
/// Retries on transient failures with exponential backoff.
|
||||
pub async fn enqueue(
|
||||
client: &node_service::Client,
|
||||
recipient_key: &[u8],
|
||||
payload: &[u8],
|
||||
) -> anyhow::Result<u64> {
|
||||
enqueue_with_ttl(client, recipient_key, payload, None).await
|
||||
}
|
||||
|
||||
/// Enqueue with an optional TTL (seconds). 0 or None means no expiry.
|
||||
pub async fn enqueue_with_ttl(
|
||||
client: &node_service::Client,
|
||||
recipient_key: &[u8],
|
||||
payload: &[u8],
|
||||
ttl_secs: Option<u32>,
|
||||
) -> anyhow::Result<u64> {
|
||||
let client = client.clone();
|
||||
let recipient_key = recipient_key.to_vec();
|
||||
let payload = payload.to_vec();
|
||||
retry_async(
|
||||
|| {
|
||||
let client = client.clone();
|
||||
let recipient_key = recipient_key.clone();
|
||||
let payload = payload.clone();
|
||||
async move {
|
||||
let mut req = client.enqueue_request();
|
||||
{
|
||||
let mut p = req.get();
|
||||
p.set_recipient_key(&recipient_key);
|
||||
p.set_payload(&payload);
|
||||
p.set_channel_id(&[]);
|
||||
p.set_version(1);
|
||||
if let Some(ttl) = ttl_secs {
|
||||
p.set_ttl_secs(ttl);
|
||||
}
|
||||
let mut auth = p.reborrow().init_auth();
|
||||
set_auth(&mut auth)?;
|
||||
}
|
||||
let resp = req.send().promise.await.context("enqueue RPC failed")?;
|
||||
let seq = resp.get().context("enqueue: bad response")?.get_seq();
|
||||
Ok(seq)
|
||||
}
|
||||
},
|
||||
DEFAULT_MAX_RETRIES,
|
||||
DEFAULT_BASE_DELAY_MS,
|
||||
anyhow_is_retriable,
|
||||
)
|
||||
.await
|
||||
}
|
||||
|
||||
/// Fetch and drain all payloads for `recipient_key`.
|
||||
/// Returns `(seq, payload)` pairs — sort by `seq` before MLS processing.
|
||||
/// Retries on transient failures with exponential backoff.
|
||||
pub async fn fetch_all(
|
||||
client: &node_service::Client,
|
||||
recipient_key: &[u8],
|
||||
) -> anyhow::Result<Vec<(u64, Vec<u8>)>> {
|
||||
let client = client.clone();
|
||||
let recipient_key = recipient_key.to_vec();
|
||||
retry_async(
|
||||
|| {
|
||||
let client = client.clone();
|
||||
let recipient_key = recipient_key.clone();
|
||||
async move {
|
||||
let mut req = client.fetch_request();
|
||||
{
|
||||
let mut p = req.get();
|
||||
p.set_recipient_key(&recipient_key);
|
||||
p.set_channel_id(&[]);
|
||||
p.set_version(1);
|
||||
p.set_limit(0); // fetch all
|
||||
let mut auth = p.reborrow().init_auth();
|
||||
set_auth(&mut auth)?;
|
||||
}
|
||||
|
||||
let resp = req.send().promise.await.context("fetch RPC failed")?;
|
||||
|
||||
let list = resp
|
||||
.get()
|
||||
.context("fetch: bad response")?
|
||||
.get_payloads()
|
||||
.context("fetch: missing payloads")?;
|
||||
|
||||
let mut payloads = Vec::with_capacity(list.len() as usize);
|
||||
for i in 0..list.len() {
|
||||
let entry = list.get(i);
|
||||
let seq = entry.get_seq();
|
||||
let data = entry
|
||||
.get_data()
|
||||
.context("fetch: envelope data read failed")?
|
||||
.to_vec();
|
||||
payloads.push((seq, data));
|
||||
}
|
||||
|
||||
Ok(payloads)
|
||||
}
|
||||
},
|
||||
DEFAULT_MAX_RETRIES,
|
||||
DEFAULT_BASE_DELAY_MS,
|
||||
anyhow_is_retriable,
|
||||
)
|
||||
.await
|
||||
}
|
||||
|
||||
/// Long-poll for payloads with optional timeout (ms).
|
||||
/// Returns `(seq, payload)` pairs — sort by `seq` before MLS processing.
|
||||
/// Retries on transient failures with exponential backoff.
|
||||
pub async fn fetch_wait(
|
||||
client: &node_service::Client,
|
||||
recipient_key: &[u8],
|
||||
timeout_ms: u64,
|
||||
) -> anyhow::Result<Vec<(u64, Vec<u8>)>> {
|
||||
let client = client.clone();
|
||||
let recipient_key = recipient_key.to_vec();
|
||||
retry_async(
|
||||
|| {
|
||||
let client = client.clone();
|
||||
let recipient_key = recipient_key.clone();
|
||||
async move {
|
||||
let mut req = client.fetch_wait_request();
|
||||
{
|
||||
let mut p = req.get();
|
||||
p.set_recipient_key(&recipient_key);
|
||||
p.set_timeout_ms(timeout_ms);
|
||||
p.set_channel_id(&[]);
|
||||
p.set_version(1);
|
||||
p.set_limit(0); // fetch all
|
||||
let mut auth = p.reborrow().init_auth();
|
||||
set_auth(&mut auth)?;
|
||||
}
|
||||
|
||||
let resp = req.send().promise.await.context("fetch_wait RPC failed")?;
|
||||
|
||||
let list = resp
|
||||
.get()
|
||||
.context("fetch_wait: bad response")?
|
||||
.get_payloads()
|
||||
.context("fetch_wait: missing payloads")?;
|
||||
|
||||
let mut payloads = Vec::with_capacity(list.len() as usize);
|
||||
for i in 0..list.len() {
|
||||
let entry = list.get(i);
|
||||
let seq = entry.get_seq();
|
||||
let data = entry
|
||||
.get_data()
|
||||
.context("fetch_wait: envelope data read failed")?
|
||||
.to_vec();
|
||||
payloads.push((seq, data));
|
||||
}
|
||||
|
||||
Ok(payloads)
|
||||
}
|
||||
},
|
||||
DEFAULT_MAX_RETRIES,
|
||||
DEFAULT_BASE_DELAY_MS,
|
||||
anyhow_is_retriable,
|
||||
)
|
||||
.await
|
||||
}
|
||||
|
||||
/// Upload a hybrid (X25519 + ML-KEM-768) public key for an identity.
|
||||
pub async fn upload_hybrid_key(
|
||||
client: &node_service::Client,
|
||||
identity_key: &[u8],
|
||||
hybrid_pk: &HybridPublicKey,
|
||||
) -> anyhow::Result<()> {
|
||||
let mut req = client.upload_hybrid_key_request();
|
||||
{
|
||||
let mut p = req.get();
|
||||
p.set_identity_key(identity_key);
|
||||
p.set_hybrid_public_key(&hybrid_pk.to_bytes());
|
||||
let mut auth = p.reborrow().init_auth();
|
||||
set_auth(&mut auth)?;
|
||||
}
|
||||
req.send()
|
||||
.promise
|
||||
.await
|
||||
.context("upload_hybrid_key RPC failed")?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Fetch a peer's hybrid public key from the server.
|
||||
///
|
||||
/// Returns `None` if the peer has not uploaded a hybrid key.
|
||||
pub async fn fetch_hybrid_key(
|
||||
client: &node_service::Client,
|
||||
identity_key: &[u8],
|
||||
) -> anyhow::Result<Option<HybridPublicKey>> {
|
||||
let mut req = client.fetch_hybrid_key_request();
|
||||
{
|
||||
let mut p = req.get();
|
||||
p.set_identity_key(identity_key);
|
||||
let mut auth = p.reborrow().init_auth();
|
||||
set_auth(&mut auth)?;
|
||||
}
|
||||
|
||||
let resp = req
|
||||
.send()
|
||||
.promise
|
||||
.await
|
||||
.context("fetch_hybrid_key RPC failed")?;
|
||||
|
||||
let pk_bytes = resp
|
||||
.get()
|
||||
.context("fetch_hybrid_key: bad response")?
|
||||
.get_hybrid_public_key()
|
||||
.context("fetch_hybrid_key: missing field")?
|
||||
.to_vec();
|
||||
|
||||
if pk_bytes.is_empty() {
|
||||
return Ok(None);
|
||||
}
|
||||
|
||||
let pk = HybridPublicKey::from_bytes(&pk_bytes).context("invalid hybrid public key")?;
|
||||
Ok(Some(pk))
|
||||
}
|
||||
|
||||
/// Decrypt a hybrid envelope. Requires a hybrid key; no fallback to plaintext MLS.
|
||||
pub fn try_hybrid_decrypt(
|
||||
hybrid_kp: Option<&quicprochat_core::HybridKeypair>,
|
||||
payload: &[u8],
|
||||
) -> anyhow::Result<Vec<u8>> {
|
||||
let kp = hybrid_kp.ok_or_else(|| anyhow::anyhow!("hybrid key required for decryption"))?;
|
||||
quicprochat_core::hybrid_decrypt(kp, payload, b"", b"").map_err(|e| anyhow::anyhow!("{e}"))
|
||||
}
|
||||
|
||||
/// Peek at queued payloads without removing them.
|
||||
/// Returns `(seq, payload)` pairs sorted by seq.
|
||||
/// Retries on transient failures with exponential backoff.
|
||||
pub async fn peek(
|
||||
client: &node_service::Client,
|
||||
recipient_key: &[u8],
|
||||
) -> anyhow::Result<Vec<(u64, Vec<u8>)>> {
|
||||
let client = client.clone();
|
||||
let recipient_key = recipient_key.to_vec();
|
||||
retry_async(
|
||||
|| {
|
||||
let client = client.clone();
|
||||
let recipient_key = recipient_key.clone();
|
||||
async move {
|
||||
let mut req = client.peek_request();
|
||||
{
|
||||
let mut p = req.get();
|
||||
p.set_recipient_key(&recipient_key);
|
||||
p.set_channel_id(&[]);
|
||||
p.set_version(1);
|
||||
p.set_limit(0); // peek all
|
||||
let mut auth = p.reborrow().init_auth();
|
||||
set_auth(&mut auth)?;
|
||||
}
|
||||
|
||||
let resp = req.send().promise.await.context("peek RPC failed")?;
|
||||
|
||||
let list = resp
|
||||
.get()
|
||||
.context("peek: bad response")?
|
||||
.get_payloads()
|
||||
.context("peek: missing payloads")?;
|
||||
|
||||
let mut payloads = Vec::with_capacity(list.len() as usize);
|
||||
for i in 0..list.len() {
|
||||
let entry = list.get(i);
|
||||
let seq = entry.get_seq();
|
||||
let data = entry
|
||||
.get_data()
|
||||
.context("peek: envelope data read failed")?
|
||||
.to_vec();
|
||||
payloads.push((seq, data));
|
||||
}
|
||||
|
||||
Ok(payloads)
|
||||
}
|
||||
},
|
||||
DEFAULT_MAX_RETRIES,
|
||||
DEFAULT_BASE_DELAY_MS,
|
||||
anyhow_is_retriable,
|
||||
)
|
||||
.await
|
||||
}
|
||||
|
||||
/// Acknowledge all messages up to and including `seq_up_to`.
|
||||
/// Retries on transient failures with exponential backoff.
|
||||
pub async fn ack(
|
||||
client: &node_service::Client,
|
||||
recipient_key: &[u8],
|
||||
seq_up_to: u64,
|
||||
) -> anyhow::Result<()> {
|
||||
let client = client.clone();
|
||||
let recipient_key = recipient_key.to_vec();
|
||||
retry_async(
|
||||
|| {
|
||||
let client = client.clone();
|
||||
let recipient_key = recipient_key.clone();
|
||||
async move {
|
||||
let mut req = client.ack_request();
|
||||
{
|
||||
let mut p = req.get();
|
||||
p.set_recipient_key(&recipient_key);
|
||||
p.set_channel_id(&[]);
|
||||
p.set_version(1);
|
||||
p.set_seq_up_to(seq_up_to);
|
||||
let mut auth = p.reborrow().init_auth();
|
||||
set_auth(&mut auth)?;
|
||||
}
|
||||
req.send().promise.await.context("ack RPC failed")?;
|
||||
Ok(())
|
||||
}
|
||||
},
|
||||
DEFAULT_MAX_RETRIES,
|
||||
DEFAULT_BASE_DELAY_MS,
|
||||
anyhow_is_retriable,
|
||||
)
|
||||
.await
|
||||
}
|
||||
|
||||
/// Fetch multiple peers' hybrid keys in a single round-trip.
|
||||
/// Returns `None` for peers who have not uploaded a hybrid key.
|
||||
/// Retries on transient failures with exponential backoff.
|
||||
pub async fn fetch_hybrid_keys(
|
||||
client: &node_service::Client,
|
||||
identity_keys: &[&[u8]],
|
||||
) -> anyhow::Result<Vec<Option<HybridPublicKey>>> {
|
||||
let client = client.clone();
|
||||
let identity_keys: Vec<Vec<u8>> = identity_keys.iter().map(|k| k.to_vec()).collect();
|
||||
retry_async(
|
||||
|| {
|
||||
let client = client.clone();
|
||||
let identity_keys = identity_keys.clone();
|
||||
async move {
|
||||
let mut req = client.fetch_hybrid_keys_request();
|
||||
{
|
||||
let mut p = req.get();
|
||||
let mut list = p.reborrow().init_identity_keys(identity_keys.len() as u32);
|
||||
for (i, ik) in identity_keys.iter().enumerate() {
|
||||
list.set(i as u32, ik);
|
||||
}
|
||||
let mut auth = p.reborrow().init_auth();
|
||||
set_auth(&mut auth)?;
|
||||
}
|
||||
|
||||
let resp = req
|
||||
.send()
|
||||
.promise
|
||||
.await
|
||||
.context("fetch_hybrid_keys RPC failed")?;
|
||||
|
||||
let keys = resp
|
||||
.get()
|
||||
.context("fetch_hybrid_keys: bad response")?
|
||||
.get_keys()
|
||||
.context("fetch_hybrid_keys: missing keys")?;
|
||||
|
||||
let mut result = Vec::with_capacity(keys.len() as usize);
|
||||
for i in 0..keys.len() {
|
||||
let pk_bytes = keys
|
||||
.get(i)
|
||||
.context("fetch_hybrid_keys: key read failed")?
|
||||
.to_vec();
|
||||
if pk_bytes.is_empty() {
|
||||
result.push(None);
|
||||
} else {
|
||||
let pk = HybridPublicKey::from_bytes(&pk_bytes)
|
||||
.context("invalid hybrid public key")?;
|
||||
result.push(Some(pk));
|
||||
}
|
||||
}
|
||||
|
||||
Ok(result)
|
||||
}
|
||||
},
|
||||
DEFAULT_MAX_RETRIES,
|
||||
DEFAULT_BASE_DELAY_MS,
|
||||
anyhow_is_retriable,
|
||||
)
|
||||
.await
|
||||
}
|
||||
|
||||
/// Enqueue the same payload to multiple recipients in a single round-trip.
|
||||
/// Returns per-recipient sequence numbers.
|
||||
/// Retries on transient failures with exponential backoff.
|
||||
pub async fn batch_enqueue(
|
||||
client: &node_service::Client,
|
||||
recipient_keys: &[&[u8]],
|
||||
payload: &[u8],
|
||||
) -> anyhow::Result<Vec<u64>> {
|
||||
let client = client.clone();
|
||||
let recipient_keys: Vec<Vec<u8>> = recipient_keys.iter().map(|k| k.to_vec()).collect();
|
||||
let payload = payload.to_vec();
|
||||
retry_async(
|
||||
|| {
|
||||
let client = client.clone();
|
||||
let recipient_keys = recipient_keys.clone();
|
||||
let payload = payload.clone();
|
||||
async move {
|
||||
let mut req = client.batch_enqueue_request();
|
||||
{
|
||||
let mut p = req.get();
|
||||
let mut list = p.reborrow().init_recipient_keys(recipient_keys.len() as u32);
|
||||
for (i, rk) in recipient_keys.iter().enumerate() {
|
||||
list.set(i as u32, rk);
|
||||
}
|
||||
p.set_payload(&payload);
|
||||
p.set_channel_id(&[]);
|
||||
p.set_version(1);
|
||||
let mut auth = p.reborrow().init_auth();
|
||||
set_auth(&mut auth)?;
|
||||
}
|
||||
|
||||
let resp = req
|
||||
.send()
|
||||
.promise
|
||||
.await
|
||||
.context("batch_enqueue RPC failed")?;
|
||||
|
||||
let seqs = resp
|
||||
.get()
|
||||
.context("batch_enqueue: bad response")?
|
||||
.get_seqs()
|
||||
.context("batch_enqueue: missing seqs")?;
|
||||
|
||||
let mut result = Vec::with_capacity(seqs.len() as usize);
|
||||
for i in 0..seqs.len() {
|
||||
result.push(seqs.get(i));
|
||||
}
|
||||
|
||||
Ok(result)
|
||||
}
|
||||
},
|
||||
DEFAULT_MAX_RETRIES,
|
||||
DEFAULT_BASE_DELAY_MS,
|
||||
anyhow_is_retriable,
|
||||
)
|
||||
.await
|
||||
}
|
||||
|
||||
/// Resolve a username to its Ed25519 identity key (32 bytes).
|
||||
///
|
||||
/// When the server returns a non-empty `inclusionProof`, the client verifies it
|
||||
/// against the identity key using the Key Transparency Merkle proof. Proof
|
||||
/// verification failure is treated as a hard error (the server is misbehaving).
|
||||
/// If the server sends no proof (empty field), the key is returned as-is —
|
||||
/// callers can decide whether to require proofs for security-critical flows.
|
||||
///
|
||||
/// Returns `None` if the username is not registered.
|
||||
pub async fn resolve_user(
|
||||
client: &node_service::Client,
|
||||
username: &str,
|
||||
) -> anyhow::Result<Option<Vec<u8>>> {
|
||||
let mut req = client.resolve_user_request();
|
||||
{
|
||||
let mut p = req.get();
|
||||
p.set_username(username);
|
||||
let mut auth = p.reborrow().init_auth();
|
||||
set_auth(&mut auth)?;
|
||||
}
|
||||
|
||||
let resp = req
|
||||
.send()
|
||||
.promise
|
||||
.await
|
||||
.context("resolve_user RPC failed")?;
|
||||
|
||||
let reader = resp.get().context("resolve_user: bad response")?;
|
||||
|
||||
let key = reader
|
||||
.get_identity_key()
|
||||
.context("resolve_user: missing identity_key field")?
|
||||
.to_vec();
|
||||
|
||||
if key.is_empty() {
|
||||
return Ok(None);
|
||||
}
|
||||
|
||||
// Verify the KT inclusion proof when the server sends one.
|
||||
let proof_bytes = reader
|
||||
.get_inclusion_proof()
|
||||
.context("resolve_user: missing inclusion_proof field")?
|
||||
.to_vec();
|
||||
|
||||
if !proof_bytes.is_empty() {
|
||||
let proof = quicprochat_kt::InclusionProof::from_bytes(&proof_bytes)
|
||||
.context("resolve_user: inclusion proof deserialise failed")?;
|
||||
quicprochat_kt::verify_inclusion(&proof, username, &key)
|
||||
.context("resolve_user: KT inclusion proof verification FAILED — possible key mislabelling")?;
|
||||
}
|
||||
|
||||
Ok(Some(key))
|
||||
}
|
||||
|
||||
/// Reverse lookup: resolve an identity key to the registered username.
|
||||
/// Returns `None` if no username is associated with the key.
|
||||
pub async fn resolve_identity(
|
||||
client: &node_service::Client,
|
||||
identity_key: &[u8],
|
||||
) -> anyhow::Result<Option<String>> {
|
||||
let mut req = client.resolve_identity_request();
|
||||
{
|
||||
let mut p = req.get();
|
||||
p.set_identity_key(identity_key);
|
||||
let mut auth = p.reborrow().init_auth();
|
||||
set_auth(&mut auth)?;
|
||||
}
|
||||
|
||||
let resp = req
|
||||
.send()
|
||||
.promise
|
||||
.await
|
||||
.context("resolve_identity RPC failed")?;
|
||||
|
||||
let username = resp
|
||||
.get()
|
||||
.context("resolve_identity: bad response")?
|
||||
.get_username()
|
||||
.context("resolve_identity: missing field")?
|
||||
.to_str()
|
||||
.unwrap_or("")
|
||||
.to_string();
|
||||
|
||||
if username.is_empty() {
|
||||
Ok(None)
|
||||
} else {
|
||||
Ok(Some(username))
|
||||
}
|
||||
}
|
||||
|
||||
/// Create a 1:1 DM channel with a peer.
|
||||
///
|
||||
/// Returns `(channel_id, was_new)` where `channel_id` is the stable 16-byte identifier and
|
||||
/// `was_new` is `true` iff this call created the channel for the first time. When `was_new` is
|
||||
/// `false`, the channel already existed (created by the peer), and the caller should wait for
|
||||
/// the peer's MLS Welcome to arrive via the background poller rather than creating a new MLS group.
|
||||
pub async fn create_channel(
|
||||
client: &node_service::Client,
|
||||
peer_key: &[u8],
|
||||
) -> anyhow::Result<(Vec<u8>, bool)> {
|
||||
let mut req = client.create_channel_request();
|
||||
{
|
||||
let mut p = req.get();
|
||||
p.set_peer_key(peer_key);
|
||||
let mut auth = p.reborrow().init_auth();
|
||||
set_auth(&mut auth)?;
|
||||
}
|
||||
|
||||
let resp = req
|
||||
.send()
|
||||
.promise
|
||||
.await
|
||||
.context("create_channel RPC failed")?;
|
||||
|
||||
let reader = resp.get().context("create_channel: bad response")?;
|
||||
let channel_id = reader
|
||||
.get_channel_id()
|
||||
.context("create_channel: missing channel_id")?
|
||||
.to_vec();
|
||||
let was_new = reader.get_was_new();
|
||||
|
||||
Ok((channel_id, was_new))
|
||||
}
|
||||
|
||||
/// Upload a single chunk of a blob to the server.
|
||||
///
|
||||
/// `blob_hash` is the expected SHA-256 hash (32 bytes) of the complete blob.
|
||||
/// Returns the `blob_id` once the server has received and verified the final chunk.
|
||||
pub async fn upload_blob_chunk(
|
||||
client: &node_service::Client,
|
||||
blob_hash: &[u8],
|
||||
chunk: &[u8],
|
||||
offset: u64,
|
||||
total_size: u64,
|
||||
mime_type: &str,
|
||||
) -> anyhow::Result<Vec<u8>> {
|
||||
let mut req = client.upload_blob_request();
|
||||
{
|
||||
let mut p = req.get();
|
||||
let mut auth = p.reborrow().init_auth();
|
||||
set_auth(&mut auth)?;
|
||||
p.set_blob_hash(blob_hash);
|
||||
p.set_chunk(chunk);
|
||||
p.set_offset(offset);
|
||||
p.set_total_size(total_size);
|
||||
p.set_mime_type(mime_type);
|
||||
}
|
||||
let resp = req.send().promise.await.context("upload_blob RPC failed")?;
|
||||
let blob_id = resp
|
||||
.get()
|
||||
.context("upload_blob: bad response")?
|
||||
.get_blob_id()
|
||||
.context("upload_blob: missing blob_id")?
|
||||
.to_vec();
|
||||
Ok(blob_id)
|
||||
}
|
||||
|
||||
/// Download a single chunk of a blob from the server.
|
||||
///
|
||||
/// Returns `(chunk_bytes, total_size, mime_type)`.
|
||||
pub async fn download_blob_chunk(
|
||||
client: &node_service::Client,
|
||||
blob_id: &[u8],
|
||||
offset: u64,
|
||||
length: u32,
|
||||
) -> anyhow::Result<(Vec<u8>, u64, String)> {
|
||||
let mut req = client.download_blob_request();
|
||||
{
|
||||
let mut p = req.get();
|
||||
let mut auth = p.reborrow().init_auth();
|
||||
set_auth(&mut auth)?;
|
||||
p.set_blob_id(blob_id);
|
||||
p.set_offset(offset);
|
||||
p.set_length(length);
|
||||
}
|
||||
let resp = req.send().promise.await.context("download_blob RPC failed")?;
|
||||
let reader = resp.get().context("download_blob: bad response")?;
|
||||
let chunk = reader.get_chunk().context("download_blob: missing chunk")?.to_vec();
|
||||
let total_size = reader.get_total_size();
|
||||
let mime_type = reader
|
||||
.get_mime_type()
|
||||
.context("download_blob: missing mime_type")?
|
||||
.to_str()
|
||||
.unwrap_or("application/octet-stream")
|
||||
.to_string();
|
||||
Ok((chunk, total_size, mime_type))
|
||||
}
|
||||
|
||||
/// Delete the authenticated user's account on the server.
|
||||
/// Requires an identity-bound session (OPAQUE login).
|
||||
pub async fn delete_account(
|
||||
client: &node_service::Client,
|
||||
) -> anyhow::Result<bool> {
|
||||
let mut req = client.delete_account_request();
|
||||
{
|
||||
let mut p = req.get();
|
||||
let mut auth = p.reborrow().init_auth();
|
||||
set_auth(&mut auth)?;
|
||||
}
|
||||
|
||||
let resp = req
|
||||
.send()
|
||||
.promise
|
||||
.await
|
||||
.context("delete_account RPC failed")?;
|
||||
|
||||
let success = resp
|
||||
.get()
|
||||
.context("delete_account: bad response")?
|
||||
.get_success();
|
||||
|
||||
Ok(success)
|
||||
}
|
||||
|
||||
/// Register a device for the authenticated identity.
|
||||
pub async fn register_device(
|
||||
client: &node_service::Client,
|
||||
device_id: &[u8],
|
||||
device_name: &str,
|
||||
) -> anyhow::Result<bool> {
|
||||
let mut req = client.register_device_request();
|
||||
{
|
||||
let mut p = req.get();
|
||||
p.set_device_id(device_id);
|
||||
p.set_device_name(device_name);
|
||||
let mut auth = p.reborrow().init_auth();
|
||||
set_auth(&mut auth)?;
|
||||
}
|
||||
|
||||
let resp = req
|
||||
.send()
|
||||
.promise
|
||||
.await
|
||||
.context("register_device RPC failed")?;
|
||||
|
||||
let success = resp
|
||||
.get()
|
||||
.context("register_device: bad response")?
|
||||
.get_success();
|
||||
|
||||
Ok(success)
|
||||
}
|
||||
|
||||
/// List all registered devices for the authenticated identity.
|
||||
pub async fn list_devices(
|
||||
client: &node_service::Client,
|
||||
) -> anyhow::Result<Vec<(Vec<u8>, String, u64)>> {
|
||||
let mut req = client.list_devices_request();
|
||||
{
|
||||
let mut p = req.get();
|
||||
let mut auth = p.reborrow().init_auth();
|
||||
set_auth(&mut auth)?;
|
||||
}
|
||||
|
||||
let resp = req
|
||||
.send()
|
||||
.promise
|
||||
.await
|
||||
.context("list_devices RPC failed")?;
|
||||
|
||||
let devices = resp
|
||||
.get()
|
||||
.context("list_devices: bad response")?
|
||||
.get_devices()
|
||||
.context("list_devices: missing devices field")?;
|
||||
|
||||
let mut result = Vec::with_capacity(devices.len() as usize);
|
||||
for i in 0..devices.len() {
|
||||
let entry = devices.get(i);
|
||||
let device_id = entry
|
||||
.get_device_id()
|
||||
.context("list_devices: missing device_id")?
|
||||
.to_vec();
|
||||
let device_name = entry
|
||||
.get_device_name()
|
||||
.context("list_devices: missing device_name")?
|
||||
.to_str()
|
||||
.unwrap_or("")
|
||||
.to_string();
|
||||
let registered_at = entry.get_registered_at();
|
||||
result.push((device_id, device_name, registered_at));
|
||||
}
|
||||
|
||||
Ok(result)
|
||||
}
|
||||
|
||||
/// Revoke (remove) a registered device.
|
||||
pub async fn revoke_device(
|
||||
client: &node_service::Client,
|
||||
device_id: &[u8],
|
||||
) -> anyhow::Result<bool> {
|
||||
let mut req = client.revoke_device_request();
|
||||
{
|
||||
let mut p = req.get();
|
||||
p.set_device_id(device_id);
|
||||
let mut auth = p.reborrow().init_auth();
|
||||
set_auth(&mut auth)?;
|
||||
}
|
||||
|
||||
let resp = req
|
||||
.send()
|
||||
.promise
|
||||
.await
|
||||
.context("revoke_device RPC failed")?;
|
||||
|
||||
let success = resp
|
||||
.get()
|
||||
.context("revoke_device: bad response")?
|
||||
.get_success();
|
||||
|
||||
Ok(success)
|
||||
}
|
||||
|
||||
/// Return the current Unix timestamp in milliseconds.
|
||||
pub fn current_timestamp_ms() -> u64 {
|
||||
std::time::SystemTime::now()
|
||||
.duration_since(std::time::UNIX_EPOCH)
|
||||
.unwrap_or_default()
|
||||
.as_millis() as u64
|
||||
}
|
||||
305
crates/quicprochat-client/src/client/session.rs
Normal file
305
crates/quicprochat-client/src/client/session.rs
Normal file
@@ -0,0 +1,305 @@
|
||||
//! Runtime session state for the interactive REPL.
|
||||
//!
|
||||
//! Wraps the legacy `StoredState` (identity + hybrid key) and adds
|
||||
//! multi-conversation management via `ConversationStore`.
|
||||
|
||||
use std::collections::HashMap;
|
||||
use std::path::{Path, PathBuf};
|
||||
use std::sync::Arc;
|
||||
use std::time::Instant;
|
||||
|
||||
use anyhow::Context;
|
||||
use zeroize::Zeroizing;
|
||||
|
||||
use quicprochat_core::{DiskKeyStore, GroupMember, HybridKeypair, IdentityKeypair};
|
||||
|
||||
use super::conversation::{
|
||||
now_ms, Conversation, ConversationId, ConversationKind, ConversationStore,
|
||||
};
|
||||
use super::state::load_or_init_state;
|
||||
|
||||
/// Runtime state for an interactive REPL session.
|
||||
pub struct SessionState {
|
||||
/// Long-term identity keypair.
|
||||
pub identity: Arc<IdentityKeypair>,
|
||||
/// Post-quantum hybrid keypair.
|
||||
pub hybrid_kp: Option<HybridKeypair>,
|
||||
/// Path to the legacy state file (for backward compat with one-shot commands).
|
||||
pub state_path: PathBuf,
|
||||
/// Optional password for the legacy state file. Zeroized on drop. (M9)
|
||||
pub password: Option<Zeroizing<String>>,
|
||||
/// SQLite-backed conversation + message store.
|
||||
pub conv_store: ConversationStore,
|
||||
/// Currently active conversation.
|
||||
pub active_conversation: Option<ConversationId>,
|
||||
/// In-memory GroupMember instances keyed by conversation ID.
|
||||
pub members: HashMap<ConversationId, GroupMember>,
|
||||
/// Holds the GroupMember whose KeyPackage was uploaded to the server.
|
||||
/// Its keystore contains the HPKE init private key needed to decrypt
|
||||
/// incoming Welcome messages. Consumed on auto-join, then replenished.
|
||||
pub pending_member: Option<GroupMember>,
|
||||
/// Whether to display typing indicators from others (session preference).
|
||||
pub typing_notify_enabled: bool,
|
||||
/// Tracks who is currently typing and when the indicator was last received.
|
||||
/// Entries older than 10 seconds are considered expired.
|
||||
pub typing_indicators: HashMap<String, Instant>,
|
||||
/// Per-conversation disappearing message TTL in seconds. None = messages persist.
|
||||
pub disappear_ttl: HashMap<ConversationId, u32>,
|
||||
/// When true, /members and /group-info redact identity keys as `[redacted-XXXX]`.
|
||||
pub redact_keys: bool,
|
||||
/// When Some(secs), auto-clear local messages older than this duration.
|
||||
pub auto_clear_secs: Option<u32>,
|
||||
/// When true, send periodic dummy messages for traffic analysis resistance.
|
||||
pub padding_enabled: bool,
|
||||
/// Last epoch at which we sent a message (for /verify-fs).
|
||||
pub last_send_epoch: Option<u64>,
|
||||
/// P2P node for direct mesh messaging (requires `--features mesh`).
|
||||
#[cfg(feature = "mesh")]
|
||||
pub p2p_node: Option<Arc<quicprochat_p2p::P2pNode>>,
|
||||
}
|
||||
|
||||
impl SessionState {
|
||||
/// Load identity from the legacy state file, open the conversation store,
|
||||
/// and migrate any existing single-group state into the conversation DB.
|
||||
pub fn load(
|
||||
state_path: &Path,
|
||||
password: Option<&str>,
|
||||
) -> anyhow::Result<Self> {
|
||||
let state = load_or_init_state(state_path, password)?;
|
||||
|
||||
let identity = Arc::new(IdentityKeypair::from_seed(state.identity_seed));
|
||||
let hybrid_kp = state
|
||||
.hybrid_key
|
||||
.as_ref()
|
||||
.map(HybridKeypair::from_bytes)
|
||||
.transpose()
|
||||
.context("decode hybrid key")?;
|
||||
|
||||
// Open the conversation DB next to the state file.
|
||||
// When a state password is provided, encrypt the DB with SQLCipher.
|
||||
let db_path = state_path.with_extension("convdb");
|
||||
let conv_store = ConversationStore::open(&db_path, password)?;
|
||||
|
||||
let mut session = Self {
|
||||
identity,
|
||||
hybrid_kp,
|
||||
state_path: state_path.to_path_buf(),
|
||||
password: password.map(|p| Zeroizing::new(String::from(p))),
|
||||
conv_store,
|
||||
active_conversation: None,
|
||||
members: HashMap::new(),
|
||||
pending_member: None,
|
||||
typing_notify_enabled: true,
|
||||
typing_indicators: HashMap::new(),
|
||||
disappear_ttl: HashMap::new(),
|
||||
redact_keys: false,
|
||||
auto_clear_secs: None,
|
||||
padding_enabled: false,
|
||||
last_send_epoch: None,
|
||||
#[cfg(feature = "mesh")]
|
||||
p2p_node: None,
|
||||
};
|
||||
|
||||
// Migrate legacy single-group into conversations if present and not yet migrated.
|
||||
if state.group.is_some() {
|
||||
session.migrate_legacy_group(state_path, &state.group)?;
|
||||
}
|
||||
|
||||
// Load all existing conversations' GroupMembers into memory.
|
||||
session.load_all_members()?;
|
||||
|
||||
Ok(session)
|
||||
}
|
||||
|
||||
/// Migrate the legacy single-group from StoredState into the conversation DB.
|
||||
fn migrate_legacy_group(
|
||||
&mut self,
|
||||
_state_path: &Path,
|
||||
group_blob: &Option<Vec<u8>>,
|
||||
) -> anyhow::Result<()> {
|
||||
let blob = match group_blob {
|
||||
Some(b) => b,
|
||||
None => return Ok(()),
|
||||
};
|
||||
|
||||
// Legacy group blobs used openmls 0.5 serde format. After the 0.8
|
||||
// upgrade the blob format changed to storage-provider state. Attempt
|
||||
// to load from the new format; if that fails, skip the legacy group.
|
||||
let group_id_guess = &blob[..blob.len().min(16)];
|
||||
let member = match GroupMember::new_from_storage_bytes(
|
||||
Arc::clone(&self.identity),
|
||||
blob,
|
||||
group_id_guess,
|
||||
false, // legacy groups are classical
|
||||
) {
|
||||
Ok(m) => m,
|
||||
Err(e) => {
|
||||
tracing::warn!(error = %e, "skipping incompatible legacy group blob (openmls version mismatch)");
|
||||
return Ok(());
|
||||
}
|
||||
};
|
||||
|
||||
let group_id_bytes = member.group_id().unwrap_or_default();
|
||||
|
||||
// Use the first 16 bytes of the group_id as the ConversationId.
|
||||
let conv_id = if group_id_bytes.len() >= 16 {
|
||||
ConversationId::from_slice(&group_id_bytes[..16])
|
||||
.unwrap_or(ConversationId([0; 16]))
|
||||
} else {
|
||||
ConversationId::from_group_name(&hex::encode(&group_id_bytes))
|
||||
};
|
||||
|
||||
// Check if already migrated.
|
||||
if self.conv_store.load_conversation(&conv_id)?.is_some() {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
let member_keys = member.member_identities();
|
||||
let short_id = &hex::encode(&group_id_bytes)[..8.min(group_id_bytes.len() * 2)];
|
||||
let conv = Conversation {
|
||||
id: conv_id.clone(),
|
||||
kind: ConversationKind::Group {
|
||||
name: format!("legacy-{short_id}"),
|
||||
},
|
||||
display_name: format!("legacy-{short_id}"),
|
||||
mls_group_blob: Some(blob.clone()),
|
||||
keystore_blob: None,
|
||||
member_keys,
|
||||
unread_count: 0,
|
||||
last_activity_ms: now_ms(),
|
||||
created_at_ms: now_ms(),
|
||||
is_hybrid: false,
|
||||
last_seen_seq: 0,
|
||||
};
|
||||
|
||||
self.conv_store.save_conversation(&conv)?;
|
||||
self.members.insert(conv_id, member);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Load all conversations from the DB and create in-memory GroupMember instances.
|
||||
fn load_all_members(&mut self) -> anyhow::Result<()> {
|
||||
let convs = self.conv_store.list_conversations()?;
|
||||
for conv in convs {
|
||||
if self.members.contains_key(&conv.id) {
|
||||
continue;
|
||||
}
|
||||
let member = self.create_member_from_conv(&conv)?;
|
||||
self.members.insert(conv.id.clone(), member);
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Create a GroupMember from a stored conversation.
|
||||
fn create_member_from_conv(&self, conv: &Conversation) -> anyhow::Result<GroupMember> {
|
||||
if let Some(blob) = conv.mls_group_blob.as_ref() {
|
||||
let group_id = conv.id.0.as_slice();
|
||||
let member = GroupMember::new_from_storage_bytes(
|
||||
Arc::clone(&self.identity),
|
||||
blob,
|
||||
group_id,
|
||||
conv.is_hybrid,
|
||||
)
|
||||
.context("restore MLS state from conversation db")?;
|
||||
Ok(member)
|
||||
} else {
|
||||
// No MLS state — create an empty member.
|
||||
let ks_path = self.keystore_path_for(&conv.id);
|
||||
let ks = DiskKeyStore::persistent(&ks_path)
|
||||
.unwrap_or_else(|e| {
|
||||
tracing::warn!(path = %ks_path.display(), error = %e, "DiskKeyStore open failed, falling back to ephemeral");
|
||||
DiskKeyStore::ephemeral()
|
||||
});
|
||||
Ok(GroupMember::new_with_state(
|
||||
Arc::clone(&self.identity),
|
||||
ks,
|
||||
None,
|
||||
conv.is_hybrid,
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
/// Path for a per-conversation keystore file.
|
||||
fn keystore_path_for(&self, conv_id: &ConversationId) -> PathBuf {
|
||||
let dir = self.state_path.with_extension("keystores");
|
||||
dir.join(format!("{}.ks", conv_id.hex()))
|
||||
}
|
||||
|
||||
/// Persist a conversation's MLS group state back to the DB.
|
||||
pub fn save_member(&self, conv_id: &ConversationId) -> anyhow::Result<()> {
|
||||
let member = self.members.get(conv_id).context("no such conversation")?;
|
||||
let blob = member
|
||||
.serialize_mls_state()
|
||||
.context("serialize MLS state")?;
|
||||
|
||||
let member_keys = member.member_identities();
|
||||
|
||||
// Update the mls_group_blob and member_keys in the DB.
|
||||
if let Some(mut conv) = self.conv_store.load_conversation(conv_id)? {
|
||||
conv.mls_group_blob = blob;
|
||||
conv.member_keys = member_keys;
|
||||
self.conv_store.save_conversation(&conv)?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Persist all in-memory group states back to the DB.
|
||||
pub fn save_all(&self) -> anyhow::Result<()> {
|
||||
for conv_id in self.members.keys() {
|
||||
if let Err(e) = self.save_member(conv_id) {
|
||||
tracing::warn!(conv = %conv_id.hex(), error = %e, "failed to save conversation");
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Add a new conversation and its GroupMember to the session.
|
||||
pub fn add_conversation(
|
||||
&mut self,
|
||||
conv: Conversation,
|
||||
member: GroupMember,
|
||||
) -> anyhow::Result<()> {
|
||||
// Ensure keystore directory exists
|
||||
let ks_path = self.keystore_path_for(&conv.id);
|
||||
if let Some(parent) = ks_path.parent() {
|
||||
std::fs::create_dir_all(parent).ok();
|
||||
}
|
||||
|
||||
self.conv_store.save_conversation(&conv)?;
|
||||
self.members.insert(conv.id.clone(), member);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Get a mutable reference to a conversation's GroupMember.
|
||||
pub fn get_member_mut(&mut self, conv_id: &ConversationId) -> Option<&mut GroupMember> {
|
||||
self.members.get_mut(conv_id)
|
||||
}
|
||||
|
||||
/// Public key bytes for this identity.
|
||||
pub fn identity_bytes(&self) -> Vec<u8> {
|
||||
self.identity.public_key_bytes().to_vec()
|
||||
}
|
||||
|
||||
/// Short hex prefix of the identity key for display.
|
||||
pub fn identity_short(&self) -> String {
|
||||
hex::encode(&self.identity.public_key_bytes()[..4])
|
||||
}
|
||||
|
||||
/// Get display name of a conversation.
|
||||
pub fn active_display_name(&self) -> Option<String> {
|
||||
let id = self.active_conversation.as_ref()?;
|
||||
self.conv_store.load_conversation(id).ok().flatten().map(|c| c.display_name)
|
||||
}
|
||||
|
||||
/// Count total unread across all conversations.
|
||||
pub fn total_unread(&self) -> u32 {
|
||||
self.conv_store
|
||||
.list_conversations()
|
||||
.unwrap_or_default()
|
||||
.iter()
|
||||
.map(|c| c.unread_count)
|
||||
.sum()
|
||||
}
|
||||
}
|
||||
@@ -2,7 +2,7 @@ use std::path::{Path, PathBuf};
|
||||
use std::sync::Arc;
|
||||
|
||||
use anyhow::Context;
|
||||
use argon2::Argon2;
|
||||
use argon2::{Algorithm, Argon2, Params, Version};
|
||||
use chacha20poly1305::{
|
||||
aead::{Aead, KeyInit},
|
||||
ChaCha20Poly1305, Key, Nonce,
|
||||
@@ -10,7 +10,7 @@ use chacha20poly1305::{
|
||||
use rand::RngCore;
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use quicnprotochat_core::{DiskKeyStore, GroupMember, HybridKeypair, HybridKeypairBytes, IdentityKeypair};
|
||||
use quicprochat_core::{DiskKeyStore, GroupMember, HybridKeypair, HybridKeypairBytes, IdentityKeypair};
|
||||
|
||||
/// Magic bytes for encrypted client state files.
|
||||
const STATE_MAGIC: &[u8; 4] = b"QPCE";
|
||||
@@ -27,17 +27,31 @@ pub struct StoredState {
|
||||
/// Cached member public keys for group participants.
|
||||
#[serde(default)]
|
||||
pub member_keys: Vec<Vec<u8>>,
|
||||
/// MLS group ID bytes, needed to reload the group from StorageProvider state.
|
||||
#[serde(default)]
|
||||
pub group_id: Option<Vec<u8>>,
|
||||
}
|
||||
|
||||
impl StoredState {
|
||||
pub fn into_parts(self, state_path: &Path) -> anyhow::Result<(GroupMember, Option<HybridKeypair>)> {
|
||||
let identity = Arc::new(IdentityKeypair::from_seed(self.identity_seed));
|
||||
let group = self
|
||||
.group
|
||||
.map(|bytes| bincode::deserialize(&bytes).context("decode group"))
|
||||
.transpose()?;
|
||||
let key_store = DiskKeyStore::persistent(keystore_path(state_path))?;
|
||||
let member = GroupMember::new_with_state(identity, key_store, group);
|
||||
let hybrid = self.hybrid_key.is_some();
|
||||
|
||||
let member = match (self.group.as_ref(), self.group_id.as_ref()) {
|
||||
(Some(storage_bytes), Some(gid)) => {
|
||||
GroupMember::new_from_storage_bytes(
|
||||
identity,
|
||||
storage_bytes,
|
||||
gid,
|
||||
hybrid,
|
||||
)
|
||||
.context("restore MLS state from stored state")?
|
||||
}
|
||||
_ => {
|
||||
let key_store = DiskKeyStore::persistent(keystore_path(state_path))?;
|
||||
GroupMember::new_with_state(identity, key_store, None, hybrid)
|
||||
}
|
||||
};
|
||||
|
||||
let hybrid_kp = self
|
||||
.hybrid_key
|
||||
@@ -49,23 +63,34 @@ impl StoredState {
|
||||
|
||||
pub fn from_parts(member: &GroupMember, hybrid_kp: Option<&HybridKeypair>) -> anyhow::Result<Self> {
|
||||
let group = member
|
||||
.group_ref()
|
||||
.map(|g| bincode::serialize(g).context("serialize group"))
|
||||
.transpose()?;
|
||||
.serialize_mls_state()
|
||||
.context("serialize MLS state")?;
|
||||
|
||||
Ok(Self {
|
||||
identity_seed: member.identity_seed(),
|
||||
identity_seed: *member.identity_seed(),
|
||||
group,
|
||||
hybrid_key: hybrid_kp.map(|kp| kp.to_bytes()),
|
||||
member_keys: Vec::new(),
|
||||
group_id: member.group_id(),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
/// Derive a 32-byte key from a password and salt using Argon2id.
|
||||
/// Argon2id parameters for client state key derivation (auditable; matches argon2 crate defaults).
|
||||
/// - Memory: 19 MiB (m_cost = 19*1024 KiB)
|
||||
/// - Time: 2 iterations
|
||||
/// - Parallelism: 1 lane
|
||||
const ARGON2_STATE_M_COST: u32 = 19 * 1024;
|
||||
const ARGON2_STATE_T_COST: u32 = 2;
|
||||
const ARGON2_STATE_P_COST: u32 = 1;
|
||||
|
||||
/// Derive a 32-byte key from a password and salt using Argon2id with explicit parameters.
|
||||
fn derive_state_key(password: &str, salt: &[u8]) -> anyhow::Result<[u8; 32]> {
|
||||
let params = Params::new(ARGON2_STATE_M_COST, ARGON2_STATE_T_COST, ARGON2_STATE_P_COST, Some(32))
|
||||
.map_err(|e| anyhow::anyhow!("argon2 params: {e}"))?;
|
||||
let argon2 = Argon2::new(Algorithm::Argon2id, Version::default(), params);
|
||||
let mut key = [0u8; 32];
|
||||
Argon2::default()
|
||||
argon2
|
||||
.hash_password_into(password.as_bytes(), salt, &mut key)
|
||||
.map_err(|e| anyhow::anyhow!("argon2 key derivation failed: {e}"))?;
|
||||
Ok(key)
|
||||
@@ -79,8 +104,8 @@ pub fn encrypt_state(password: &str, plaintext: &[u8]) -> anyhow::Result<Vec<u8>
|
||||
let mut nonce_bytes = [0u8; STATE_NONCE_LEN];
|
||||
rand::rngs::OsRng.fill_bytes(&mut nonce_bytes);
|
||||
|
||||
let key = derive_state_key(password, &salt)?;
|
||||
let cipher = ChaCha20Poly1305::new(Key::from_slice(&key));
|
||||
let key = zeroize::Zeroizing::new(derive_state_key(password, &salt)?);
|
||||
let cipher = ChaCha20Poly1305::new(Key::from_slice(&*key));
|
||||
let nonce = Nonce::from_slice(&nonce_bytes);
|
||||
|
||||
let ciphertext = cipher
|
||||
@@ -108,8 +133,8 @@ pub fn decrypt_state(password: &str, data: &[u8]) -> anyhow::Result<Vec<u8>> {
|
||||
let nonce_bytes = &data[4 + STATE_SALT_LEN..header_len];
|
||||
let ciphertext = &data[header_len..];
|
||||
|
||||
let key = derive_state_key(password, salt)?;
|
||||
let cipher = ChaCha20Poly1305::new(Key::from_slice(&key));
|
||||
let key = zeroize::Zeroizing::new(derive_state_key(password, salt)?);
|
||||
let cipher = ChaCha20Poly1305::new(Key::from_slice(&*key));
|
||||
let nonce = Nonce::from_slice(nonce_bytes);
|
||||
|
||||
let plaintext = cipher
|
||||
@@ -138,7 +163,7 @@ pub fn load_or_init_state(path: &Path, password: Option<&str>) -> anyhow::Result
|
||||
let identity = IdentityKeypair::generate();
|
||||
let hybrid_kp = HybridKeypair::generate();
|
||||
let key_store = DiskKeyStore::persistent(keystore_path(path))?;
|
||||
let member = GroupMember::new_with_state(Arc::new(identity), key_store, None);
|
||||
let member = GroupMember::new_with_state(Arc::new(identity), key_store, None, false);
|
||||
let state = StoredState::from_parts(&member, Some(&hybrid_kp))?;
|
||||
write_state(path, &state, password)?;
|
||||
Ok(state)
|
||||
@@ -179,7 +204,9 @@ pub fn write_state(path: &Path, state: &StoredState, password: Option<&str>) ->
|
||||
plaintext
|
||||
};
|
||||
|
||||
std::fs::write(path, bytes).with_context(|| format!("write state {path:?}"))?;
|
||||
let tmp = path.with_extension("tmp");
|
||||
std::fs::write(&tmp, bytes).with_context(|| format!("write state temp {tmp:?}"))?;
|
||||
std::fs::rename(&tmp, path).with_context(|| format!("rename state {tmp:?} -> {path:?}"))?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@@ -203,6 +230,7 @@ pub fn sha256(bytes: &[u8]) -> Vec<u8> {
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
#[allow(clippy::unwrap_used)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
@@ -222,4 +250,60 @@ mod tests {
|
||||
let encrypted = encrypt_state("correct", plaintext).unwrap();
|
||||
assert!(decrypt_state("wrong", &encrypted).is_err());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn state_encrypt_decrypt_round_trip() {
|
||||
let state = StoredState {
|
||||
identity_seed: [42u8; 32],
|
||||
hybrid_key: None,
|
||||
group: None,
|
||||
member_keys: Vec::new(),
|
||||
group_id: None,
|
||||
};
|
||||
let password = "test-password";
|
||||
let plaintext = bincode::serialize(&state).unwrap();
|
||||
let encrypted = encrypt_state(password, &plaintext).unwrap();
|
||||
let decrypted = decrypt_state(password, &encrypted).unwrap();
|
||||
let recovered: StoredState = bincode::deserialize(&decrypted).unwrap();
|
||||
assert_eq!(recovered.identity_seed, state.identity_seed);
|
||||
assert!(recovered.hybrid_key.is_none());
|
||||
assert!(recovered.group.is_none());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn state_encrypt_decrypt_with_hybrid_key() {
|
||||
use zeroize::Zeroizing;
|
||||
let state = StoredState {
|
||||
identity_seed: [7u8; 32],
|
||||
hybrid_key: Some(HybridKeypairBytes {
|
||||
x25519_sk: Zeroizing::new([1u8; 32]),
|
||||
mlkem_dk: Zeroizing::new(vec![3u8; 2400]),
|
||||
mlkem_ek: vec![4u8; 1184],
|
||||
}),
|
||||
group: None,
|
||||
member_keys: Vec::new(),
|
||||
group_id: None,
|
||||
};
|
||||
let password = "another-password";
|
||||
let plaintext = bincode::serialize(&state).unwrap();
|
||||
let encrypted = encrypt_state(password, &plaintext).unwrap();
|
||||
let decrypted = decrypt_state(password, &encrypted).unwrap();
|
||||
let recovered: StoredState = bincode::deserialize(&decrypted).unwrap();
|
||||
assert_eq!(recovered.identity_seed, state.identity_seed);
|
||||
assert!(recovered.hybrid_key.is_some());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn state_wrong_password_fails() {
|
||||
let state = StoredState {
|
||||
identity_seed: [99u8; 32],
|
||||
hybrid_key: None,
|
||||
group: None,
|
||||
member_keys: Vec::new(),
|
||||
group_id: None,
|
||||
};
|
||||
let plaintext = bincode::serialize(&state).unwrap();
|
||||
let encrypted = encrypt_state("correct", &plaintext).unwrap();
|
||||
assert!(decrypt_state("wrong", &encrypted).is_err());
|
||||
}
|
||||
}
|
||||
179
crates/quicprochat-client/src/client/token_cache.rs
Normal file
179
crates/quicprochat-client/src/client/token_cache.rs
Normal file
@@ -0,0 +1,179 @@
|
||||
//! Cached session token stored next to the state file.
|
||||
//!
|
||||
//! File format (no password): two lines — username and hex-encoded session token.
|
||||
//! File format (with password): QPCE-encrypted version of the above.
|
||||
//! The token has a server-side 24h TTL; no client-side expiry tracking.
|
||||
|
||||
use std::path::{Path, PathBuf};
|
||||
|
||||
use anyhow::Context;
|
||||
|
||||
use super::state::{decrypt_state, encrypt_state, is_encrypted_state};
|
||||
|
||||
pub struct CachedSession {
|
||||
pub username: String,
|
||||
pub token_hex: String,
|
||||
}
|
||||
|
||||
/// Derive the session cache path: `{state_path}.session`.
|
||||
fn session_cache_path(state_path: &Path) -> PathBuf {
|
||||
state_path.with_extension("session")
|
||||
}
|
||||
|
||||
/// Parse the two-line format (username + token_hex) from plaintext bytes.
|
||||
fn parse_session_lines(text: &str) -> Option<CachedSession> {
|
||||
let mut lines = text.lines();
|
||||
let username = lines.next()?.trim().to_string();
|
||||
let token_hex = lines.next()?.trim().to_string();
|
||||
if username.is_empty() || token_hex.is_empty() {
|
||||
return None;
|
||||
}
|
||||
if hex::decode(&token_hex).is_err() {
|
||||
return None;
|
||||
}
|
||||
Some(CachedSession { username, token_hex })
|
||||
}
|
||||
|
||||
/// Load a cached session token. Returns None if file is missing or malformed.
|
||||
/// Decrypts if the file is QPCE-encrypted (requires `password`).
|
||||
pub fn load_cached_session(state_path: &Path, password: Option<&str>) -> Option<CachedSession> {
|
||||
let path = session_cache_path(state_path);
|
||||
let raw = std::fs::read(&path).ok()?;
|
||||
|
||||
if is_encrypted_state(&raw) {
|
||||
let pw = password?;
|
||||
let plaintext = decrypt_state(pw, &raw).ok()?;
|
||||
let text = String::from_utf8(plaintext).ok()?;
|
||||
parse_session_lines(&text)
|
||||
} else {
|
||||
let text = String::from_utf8(raw).ok()?;
|
||||
parse_session_lines(&text)
|
||||
}
|
||||
}
|
||||
|
||||
/// Save a session token to the cache file (mode 0o600 on Unix).
|
||||
/// Encrypts with QPCE if `password` is provided.
|
||||
pub fn save_cached_session(
|
||||
state_path: &Path,
|
||||
username: &str,
|
||||
token_hex: &str,
|
||||
password: Option<&str>,
|
||||
) -> anyhow::Result<()> {
|
||||
let path = session_cache_path(state_path);
|
||||
let contents = format!("{username}\n{token_hex}\n");
|
||||
|
||||
let bytes = match password {
|
||||
Some(pw) => encrypt_state(pw, contents.as_bytes())?,
|
||||
None => {
|
||||
#[cfg(not(unix))]
|
||||
tracing::warn!(
|
||||
"storing session token as plaintext (no password set); \
|
||||
file permissions cannot be restricted on this platform"
|
||||
);
|
||||
contents.into_bytes()
|
||||
}
|
||||
};
|
||||
|
||||
std::fs::write(&path, bytes).with_context(|| format!("write session cache {path:?}"))?;
|
||||
|
||||
#[cfg(unix)]
|
||||
{
|
||||
use std::os::unix::fs::PermissionsExt;
|
||||
let perms = std::fs::Permissions::from_mode(0o600);
|
||||
std::fs::set_permissions(&path, perms).ok();
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Remove the cached session file.
|
||||
pub fn clear_cached_session(state_path: &Path) {
|
||||
let path = session_cache_path(state_path);
|
||||
std::fs::remove_file(&path).ok();
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
#[allow(clippy::unwrap_used)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn plaintext_round_trip() {
|
||||
let dir = tempfile::tempdir().unwrap();
|
||||
let state_path = dir.path().join("state.bin");
|
||||
|
||||
let token = hex::encode(b"session-token-bytes");
|
||||
save_cached_session(&state_path, "alice", &token, None).unwrap();
|
||||
let loaded = load_cached_session(&state_path, None).unwrap();
|
||||
assert_eq!(loaded.username, "alice");
|
||||
assert_eq!(loaded.token_hex, token);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn encrypted_round_trip() {
|
||||
let dir = tempfile::tempdir().unwrap();
|
||||
let state_path = dir.path().join("state.bin");
|
||||
let password = "strong-password";
|
||||
|
||||
let token = hex::encode(b"encrypted-token");
|
||||
save_cached_session(&state_path, "bob", &token, Some(password)).unwrap();
|
||||
|
||||
// Encrypted file should start with QPCE magic
|
||||
let raw = std::fs::read(session_cache_path(&state_path)).unwrap();
|
||||
assert_eq!(&raw[..4], b"QPCE");
|
||||
|
||||
let loaded = load_cached_session(&state_path, Some(password)).unwrap();
|
||||
assert_eq!(loaded.username, "bob");
|
||||
assert_eq!(loaded.token_hex, token);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn wrong_password_returns_none() {
|
||||
let dir = tempfile::tempdir().unwrap();
|
||||
let state_path = dir.path().join("state.bin");
|
||||
|
||||
let token = hex::encode(b"secret-token");
|
||||
save_cached_session(&state_path, "carol", &token, Some("correct")).unwrap();
|
||||
let result = load_cached_session(&state_path, Some("wrong"));
|
||||
assert!(result.is_none());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn missing_file_returns_none() {
|
||||
let dir = tempfile::tempdir().unwrap();
|
||||
let state_path = dir.path().join("nonexistent.bin");
|
||||
assert!(load_cached_session(&state_path, None).is_none());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn clear_removes_file() {
|
||||
let dir = tempfile::tempdir().unwrap();
|
||||
let state_path = dir.path().join("state.bin");
|
||||
|
||||
let token = hex::encode(b"to-be-deleted");
|
||||
save_cached_session(&state_path, "dave", &token, None).unwrap();
|
||||
assert!(session_cache_path(&state_path).exists());
|
||||
|
||||
clear_cached_session(&state_path);
|
||||
assert!(!session_cache_path(&state_path).exists());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn malformed_content_returns_none() {
|
||||
let dir = tempfile::tempdir().unwrap();
|
||||
let state_path = dir.path().join("state.bin");
|
||||
let cache_path = session_cache_path(&state_path);
|
||||
|
||||
// Not valid hex on second line
|
||||
std::fs::write(&cache_path, "alice\nnot-hex-data\n").unwrap();
|
||||
assert!(load_cached_session(&state_path, None).is_none());
|
||||
|
||||
// Only one line
|
||||
std::fs::write(&cache_path, "alice\n").unwrap();
|
||||
assert!(load_cached_session(&state_path, None).is_none());
|
||||
|
||||
// Empty file
|
||||
std::fs::write(&cache_path, "").unwrap();
|
||||
assert!(load_cached_session(&state_path, None).is_none());
|
||||
}
|
||||
}
|
||||
824
crates/quicprochat-client/src/client/tui/mod.rs
Normal file
824
crates/quicprochat-client/src/client/tui/mod.rs
Normal file
@@ -0,0 +1,824 @@
|
||||
//! Full-screen Ratatui TUI for quicprochat.
|
||||
//!
|
||||
//! Layout:
|
||||
//! ┌──────────────┬──────────────────────────────────────────┐
|
||||
//! │ Channels │ Messages │
|
||||
//! │ (20%) │ (80%) │
|
||||
//! │ │ │
|
||||
//! │ ├──────────────────────────────────────────┤
|
||||
//! │ │ Input bar │
|
||||
//! └──────────────┴──────────────────────────────────────────┘
|
||||
//!
|
||||
//! Keyboard:
|
||||
//! Enter — send message
|
||||
//! Up / Down — scroll message history
|
||||
//! Tab — next channel
|
||||
//! Shift+Tab — prev channel
|
||||
//! Ctrl+C / q — quit
|
||||
|
||||
use std::path::Path;
|
||||
use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
|
||||
use anyhow::Context;
|
||||
use crossterm::{
|
||||
event::{self, DisableMouseCapture, EnableMouseCapture, Event, KeyCode, KeyModifiers},
|
||||
execute,
|
||||
terminal::{disable_raw_mode, enable_raw_mode, EnterAlternateScreen, LeaveAlternateScreen},
|
||||
};
|
||||
use ratatui::{
|
||||
backend::CrosstermBackend,
|
||||
layout::{Constraint, Direction, Layout, Rect},
|
||||
style::{Color, Modifier, Style},
|
||||
text::{Line, Span},
|
||||
widgets::{Block, Borders, List, ListItem, ListState, Paragraph, Wrap},
|
||||
Frame, Terminal,
|
||||
};
|
||||
use tokio::sync::mpsc;
|
||||
use tokio::time::interval;
|
||||
|
||||
use crate::{ClientAuth, init_auth};
|
||||
use super::commands::{opaque_login, opaque_register};
|
||||
use super::conversation::{now_ms, ConversationId, StoredMessage};
|
||||
use super::rpc::{
|
||||
connect_node, enqueue, fetch_hybrid_key, fetch_wait, try_hybrid_decrypt, upload_hybrid_key,
|
||||
upload_key_package,
|
||||
};
|
||||
use super::session::SessionState;
|
||||
use super::state::load_or_init_state;
|
||||
use super::token_cache::{load_cached_session, save_cached_session};
|
||||
|
||||
use quicprochat_core::{
|
||||
AppMessage, DiskKeyStore, GroupMember, IdentityKeypair, ReceivedMessage,
|
||||
hybrid_encrypt, parse as parse_app_msg, serialize_chat,
|
||||
};
|
||||
use quicprochat_proto::node_capnp::node_service;
|
||||
|
||||
// ── App events ───────────────────────────────────────────────────────────────
|
||||
|
||||
/// Events sent from background tasks into the main TUI loop.
|
||||
enum TuiEvent {
|
||||
/// A key event from the terminal.
|
||||
Key(event::KeyEvent),
|
||||
/// New messages received from the server (conv_id, sender_short, body).
|
||||
NewMessages(Vec<(ConversationId, String, String)>),
|
||||
/// Tick — redraw periodically even if nothing happened.
|
||||
Tick,
|
||||
}
|
||||
|
||||
// ── Display message ───────────────────────────────────────────────────────────
|
||||
|
||||
#[derive(Clone)]
|
||||
struct DisplayMessage {
|
||||
sender: String,
|
||||
body: String,
|
||||
timestamp_ms: u64,
|
||||
is_outgoing: bool,
|
||||
}
|
||||
|
||||
// ── App state ─────────────────────────────────────────────────────────────────
|
||||
|
||||
struct App {
|
||||
/// Channel (conversation) names shown in the sidebar.
|
||||
channel_names: Vec<String>,
|
||||
/// Conversation IDs, parallel to `channel_names`.
|
||||
channel_ids: Vec<ConversationId>,
|
||||
/// Unread message counts, parallel to `channel_names`.
|
||||
unread_counts: Vec<u32>,
|
||||
/// Index of the selected channel in the sidebar.
|
||||
selected_channel: usize,
|
||||
/// Messages for the currently active channel.
|
||||
messages: Vec<DisplayMessage>,
|
||||
/// Current input buffer.
|
||||
input: String,
|
||||
/// Scroll offset (0 = bottom).
|
||||
scroll_offset: usize,
|
||||
/// Whether the user has requested quit.
|
||||
should_quit: bool,
|
||||
/// Short identity string for display.
|
||||
identity_short: String,
|
||||
}
|
||||
|
||||
impl App {
|
||||
fn new(session: &SessionState) -> anyhow::Result<Self> {
|
||||
let convs = session.conv_store.list_conversations()?;
|
||||
let channel_names: Vec<String> = convs.iter().map(|c| c.display_name.clone()).collect();
|
||||
let channel_ids: Vec<ConversationId> = convs.iter().map(|c| c.id.clone()).collect();
|
||||
let unread_counts: Vec<u32> = convs.iter().map(|c| c.unread_count).collect();
|
||||
|
||||
Ok(Self {
|
||||
channel_names,
|
||||
channel_ids,
|
||||
unread_counts,
|
||||
selected_channel: 0,
|
||||
messages: Vec::new(),
|
||||
input: String::new(),
|
||||
scroll_offset: 0,
|
||||
should_quit: false,
|
||||
identity_short: session.identity_short(),
|
||||
})
|
||||
}
|
||||
|
||||
fn active_conv_id(&self) -> Option<&ConversationId> {
|
||||
self.channel_ids.get(self.selected_channel)
|
||||
}
|
||||
|
||||
/// Reload messages for the currently selected channel from the session store.
|
||||
fn reload_messages(&mut self, session: &SessionState) -> anyhow::Result<()> {
|
||||
let conv_id = match self.active_conv_id() {
|
||||
Some(id) => id.clone(),
|
||||
None => {
|
||||
self.messages.clear();
|
||||
return Ok(());
|
||||
}
|
||||
};
|
||||
let stored = session.conv_store.load_recent_messages(&conv_id, 200)?;
|
||||
self.messages = stored
|
||||
.into_iter()
|
||||
.map(|m| {
|
||||
let sender = if m.is_outgoing {
|
||||
format!("me({})", &self.identity_short)
|
||||
} else if let Some(name) = &m.sender_name {
|
||||
name.clone()
|
||||
} else {
|
||||
// Shorten sender key to 8 hex chars.
|
||||
let hex_short = hex::encode(&m.sender_key[..m.sender_key.len().min(4)]);
|
||||
format!("{hex_short}")
|
||||
};
|
||||
DisplayMessage {
|
||||
sender,
|
||||
body: m.body,
|
||||
timestamp_ms: m.timestamp_ms,
|
||||
is_outgoing: m.is_outgoing,
|
||||
}
|
||||
})
|
||||
.collect();
|
||||
// Reset scroll to bottom on channel switch.
|
||||
self.scroll_offset = 0;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn select_next_channel(&mut self, session: &SessionState) {
|
||||
if self.channel_names.is_empty() {
|
||||
return;
|
||||
}
|
||||
self.selected_channel = (self.selected_channel + 1) % self.channel_names.len();
|
||||
let _ = self.reload_messages(session);
|
||||
}
|
||||
|
||||
fn select_prev_channel(&mut self, session: &SessionState) {
|
||||
if self.channel_names.is_empty() {
|
||||
return;
|
||||
}
|
||||
if self.selected_channel == 0 {
|
||||
self.selected_channel = self.channel_names.len() - 1;
|
||||
} else {
|
||||
self.selected_channel -= 1;
|
||||
}
|
||||
let _ = self.reload_messages(session);
|
||||
}
|
||||
|
||||
fn scroll_up(&mut self) {
|
||||
self.scroll_offset = self.scroll_offset.saturating_add(1);
|
||||
}
|
||||
|
||||
fn scroll_down(&mut self) {
|
||||
self.scroll_offset = self.scroll_offset.saturating_sub(1);
|
||||
}
|
||||
|
||||
/// Append newly received messages to the in-memory list (no DB reload needed
|
||||
/// since we already have them from the poll task, but we also save them via
|
||||
/// the session so they appear on reload).
|
||||
fn append_messages(&mut self, msgs: Vec<(ConversationId, String, String)>) {
|
||||
let active = self.active_conv_id().cloned();
|
||||
for (conv_id, sender, body) in msgs {
|
||||
if active.as_ref() == Some(&conv_id) {
|
||||
self.messages.push(DisplayMessage {
|
||||
sender,
|
||||
body,
|
||||
timestamp_ms: now_ms(),
|
||||
is_outgoing: false,
|
||||
});
|
||||
// Snap to bottom if user wasn't scrolled.
|
||||
if self.scroll_offset == 0 {
|
||||
// Already at bottom — nothing to do.
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// ── Drawing ───────────────────────────────────────────────────────────────────
|
||||
|
||||
fn ui(frame: &mut Frame, app: &App) {
|
||||
let size = frame.area();
|
||||
|
||||
// Top-level split: sidebar | main area.
|
||||
let h_chunks = Layout::default()
|
||||
.direction(Direction::Horizontal)
|
||||
.constraints([Constraint::Percentage(20), Constraint::Percentage(80)])
|
||||
.split(size);
|
||||
|
||||
// Main area split: messages | input bar.
|
||||
let v_chunks = Layout::default()
|
||||
.direction(Direction::Vertical)
|
||||
.constraints([Constraint::Min(3), Constraint::Length(3)])
|
||||
.split(h_chunks[1]);
|
||||
|
||||
draw_sidebar(frame, app, h_chunks[0]);
|
||||
draw_messages(frame, app, v_chunks[0]);
|
||||
draw_input(frame, app, v_chunks[1]);
|
||||
}
|
||||
|
||||
fn draw_sidebar(frame: &mut Frame, app: &App, area: Rect) {
|
||||
let items: Vec<ListItem> = app
|
||||
.channel_names
|
||||
.iter()
|
||||
.enumerate()
|
||||
.map(|(i, name)| {
|
||||
let unread = app.unread_counts.get(i).copied().unwrap_or(0);
|
||||
let is_selected = i == app.selected_channel;
|
||||
|
||||
let label = if unread > 0 && !is_selected {
|
||||
format!("{name} ({unread})")
|
||||
} else {
|
||||
name.clone()
|
||||
};
|
||||
|
||||
let style = if is_selected {
|
||||
Style::default()
|
||||
.fg(Color::Cyan)
|
||||
.add_modifier(Modifier::BOLD | Modifier::REVERSED)
|
||||
} else if unread > 0 {
|
||||
Style::default()
|
||||
.fg(Color::Yellow)
|
||||
.add_modifier(Modifier::BOLD)
|
||||
} else {
|
||||
Style::default().fg(Color::Cyan)
|
||||
};
|
||||
ListItem::new(Line::from(Span::styled(label, style)))
|
||||
})
|
||||
.collect();
|
||||
|
||||
let block = Block::default()
|
||||
.title(" Channels ")
|
||||
.borders(Borders::ALL)
|
||||
.style(Style::default().fg(Color::DarkGray));
|
||||
|
||||
let mut list_state = ListState::default();
|
||||
if !app.channel_names.is_empty() {
|
||||
list_state.select(Some(app.selected_channel));
|
||||
}
|
||||
|
||||
frame.render_stateful_widget(
|
||||
List::new(items).block(block),
|
||||
area,
|
||||
&mut list_state,
|
||||
);
|
||||
}
|
||||
|
||||
fn draw_messages(frame: &mut Frame, app: &App, area: Rect) {
|
||||
let channel_title = app
|
||||
.channel_names
|
||||
.get(app.selected_channel)
|
||||
.map(|n| format!(" {n} "))
|
||||
.unwrap_or_else(|| " Messages ".to_string());
|
||||
|
||||
let block = Block::default()
|
||||
.title(channel_title)
|
||||
.borders(Borders::ALL)
|
||||
.style(Style::default().fg(Color::DarkGray));
|
||||
|
||||
let inner_height = area.height.saturating_sub(2) as usize;
|
||||
|
||||
// Build lines from messages (newest at bottom).
|
||||
let mut lines: Vec<Line> = app
|
||||
.messages
|
||||
.iter()
|
||||
.map(|m| {
|
||||
let ts = format_timestamp(m.timestamp_ms);
|
||||
let ts_span = Span::styled(ts, Style::default().fg(Color::DarkGray));
|
||||
|
||||
let sender_style = if m.is_outgoing {
|
||||
Style::default().fg(Color::Yellow).add_modifier(Modifier::BOLD)
|
||||
} else {
|
||||
Style::default().fg(Color::Green).add_modifier(Modifier::BOLD)
|
||||
};
|
||||
let sender_span = Span::styled(format!(" {} ", m.sender), sender_style);
|
||||
let body_span = Span::raw(m.body.clone());
|
||||
|
||||
Line::from(vec![ts_span, sender_span, body_span])
|
||||
})
|
||||
.collect();
|
||||
|
||||
// Apply scroll: scroll_offset=0 means newest at bottom.
|
||||
let total = lines.len();
|
||||
let visible_start = if total > inner_height {
|
||||
let bottom = total - app.scroll_offset.min(total);
|
||||
bottom.saturating_sub(inner_height)
|
||||
} else {
|
||||
0
|
||||
};
|
||||
let visible_end = if total > inner_height {
|
||||
total - app.scroll_offset.min(total)
|
||||
} else {
|
||||
total
|
||||
};
|
||||
let visible_lines: Vec<Line> = lines
|
||||
.drain(visible_start..visible_end.min(lines.len()))
|
||||
.collect();
|
||||
|
||||
let paragraph = Paragraph::new(visible_lines)
|
||||
.block(block)
|
||||
.wrap(Wrap { trim: false });
|
||||
|
||||
frame.render_widget(paragraph, area);
|
||||
}
|
||||
|
||||
fn draw_input(frame: &mut Frame, app: &App, area: Rect) {
|
||||
let block = Block::default()
|
||||
.title(" Input (Enter=send, Tab=switch channel, q/Ctrl+C=quit) ")
|
||||
.borders(Borders::ALL)
|
||||
.style(Style::default().fg(Color::DarkGray));
|
||||
|
||||
let input_text = Paragraph::new(app.input.as_str())
|
||||
.block(block)
|
||||
.style(Style::default().fg(Color::White));
|
||||
|
||||
frame.render_widget(input_text, area);
|
||||
|
||||
// Position cursor at end of input.
|
||||
let cursor_x = area.x + 1 + app.input.len() as u16;
|
||||
let cursor_y = area.y + 1;
|
||||
if cursor_x < area.x + area.width - 1 {
|
||||
frame.set_cursor_position((cursor_x, cursor_y));
|
||||
}
|
||||
}
|
||||
|
||||
fn format_timestamp(ms: u64) -> String {
|
||||
// Simple HH:MM format from epoch ms.
|
||||
let secs = ms / 1000;
|
||||
let hours = (secs / 3600) % 24;
|
||||
let minutes = (secs / 60) % 60;
|
||||
format!("[{:02}:{:02}] ", hours, minutes)
|
||||
}
|
||||
|
||||
// ── Message polling task ──────────────────────────────────────────────────────
|
||||
|
||||
/// Background task that polls the server for new messages and sends them via `tx`.
|
||||
async fn poll_task(
|
||||
mut session: SessionState,
|
||||
client: node_service::Client,
|
||||
tx: mpsc::Sender<TuiEvent>,
|
||||
) {
|
||||
let mut poll_interval = interval(Duration::from_millis(1000));
|
||||
poll_interval.set_missed_tick_behavior(tokio::time::MissedTickBehavior::Skip);
|
||||
|
||||
loop {
|
||||
poll_interval.tick().await;
|
||||
|
||||
let identity_bytes = session.identity_bytes();
|
||||
let payloads = match fetch_wait(&client, &identity_bytes, 0).await {
|
||||
Ok(p) => p,
|
||||
Err(_) => continue,
|
||||
};
|
||||
|
||||
if payloads.is_empty() {
|
||||
continue;
|
||||
}
|
||||
|
||||
let mut new_msgs: Vec<(ConversationId, String, String)> = Vec::new();
|
||||
let my_key = session.identity_bytes();
|
||||
|
||||
let mut sorted = payloads;
|
||||
sorted.sort_by_key(|(seq, _)| *seq);
|
||||
|
||||
for (_seq, payload) in &sorted {
|
||||
let mls_payload = match try_hybrid_decrypt(session.hybrid_kp.as_ref(), payload) {
|
||||
Ok(b) => b,
|
||||
Err(_) => payload.clone(),
|
||||
};
|
||||
|
||||
let conv_ids: Vec<ConversationId> = session.members.keys().cloned().collect();
|
||||
|
||||
for conv_id in &conv_ids {
|
||||
let member = match session.members.get_mut(conv_id) {
|
||||
Some(m) => m,
|
||||
None => continue,
|
||||
};
|
||||
|
||||
match member.receive_message(&mls_payload) {
|
||||
Ok(ReceivedMessage::Application(plaintext)) => {
|
||||
let (sender_key, app_bytes) = {
|
||||
let after_unpad = quicprochat_core::padding::unpad(&plaintext)
|
||||
.unwrap_or_else(|_| plaintext.clone());
|
||||
|
||||
if quicprochat_core::sealed_sender::is_sealed(&after_unpad) {
|
||||
match quicprochat_core::sealed_sender::unseal(&after_unpad) {
|
||||
Ok((sk, inner)) => (sk.to_vec(), inner),
|
||||
Err(_) => (my_key.clone(), after_unpad),
|
||||
}
|
||||
} else {
|
||||
(my_key.clone(), after_unpad)
|
||||
}
|
||||
};
|
||||
|
||||
let (body, msg_id, msg_type, ref_msg_id) =
|
||||
match parse_app_msg(&app_bytes) {
|
||||
Ok((_, AppMessage::Chat { message_id, body })) => (
|
||||
String::from_utf8_lossy(&body).to_string(),
|
||||
Some(message_id),
|
||||
"chat",
|
||||
None,
|
||||
),
|
||||
Ok((_, AppMessage::Reply { ref_msg_id, body })) => (
|
||||
String::from_utf8_lossy(&body).to_string(),
|
||||
None,
|
||||
"reply",
|
||||
Some(ref_msg_id),
|
||||
),
|
||||
Ok((_, AppMessage::Reaction { ref_msg_id, emoji })) => (
|
||||
String::from_utf8_lossy(&emoji).to_string(),
|
||||
None,
|
||||
"reaction",
|
||||
Some(ref_msg_id),
|
||||
),
|
||||
_ => (
|
||||
String::from_utf8_lossy(&app_bytes).to_string(),
|
||||
None,
|
||||
"chat",
|
||||
None,
|
||||
),
|
||||
};
|
||||
|
||||
let stored = StoredMessage {
|
||||
conversation_id: conv_id.clone(),
|
||||
message_id: msg_id,
|
||||
sender_key: sender_key.clone(),
|
||||
sender_name: None,
|
||||
body: body.clone(),
|
||||
msg_type: msg_type.into(),
|
||||
ref_msg_id,
|
||||
timestamp_ms: now_ms(),
|
||||
is_outgoing: false,
|
||||
};
|
||||
|
||||
if session.conv_store.save_message(&stored).is_ok() {
|
||||
let sender_short = hex::encode(&sender_key[..sender_key.len().min(4)]);
|
||||
new_msgs.push((conv_id.clone(), sender_short, body));
|
||||
}
|
||||
|
||||
let _ = session.conv_store.update_activity(conv_id, now_ms());
|
||||
let _ = session.save_member(conv_id);
|
||||
break;
|
||||
}
|
||||
Ok(ReceivedMessage::StateChanged) | Ok(ReceivedMessage::SelfRemoved) => {
|
||||
let _ = session.save_member(conv_id);
|
||||
break;
|
||||
}
|
||||
_ => {}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if !new_msgs.is_empty() {
|
||||
if tx.send(TuiEvent::NewMessages(new_msgs)).await.is_err() {
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// ── Send message ──────────────────────────────────────────────────────────────
|
||||
|
||||
async fn send_message(
|
||||
session: &mut SessionState,
|
||||
client: &node_service::Client,
|
||||
conv_id: &ConversationId,
|
||||
text: &str,
|
||||
) -> anyhow::Result<()> {
|
||||
let my_key = session.identity_bytes();
|
||||
let identity = Arc::clone(&session.identity);
|
||||
|
||||
let member = session
|
||||
.members
|
||||
.get_mut(conv_id)
|
||||
.context("no GroupMember for this conversation")?;
|
||||
|
||||
// Wrap in structured AppMessage format.
|
||||
let app_payload = serialize_chat(text.as_bytes(), None)
|
||||
.context("serialize app message")?;
|
||||
|
||||
// Metadata protection: seal + pad.
|
||||
let sealed = quicprochat_core::sealed_sender::seal(&identity, &app_payload);
|
||||
let padded = quicprochat_core::padding::pad(&sealed);
|
||||
|
||||
let ct = member.send_message(&padded).context("MLS encrypt")?;
|
||||
|
||||
let recipients: Vec<Vec<u8>> = member
|
||||
.member_identities()
|
||||
.into_iter()
|
||||
.filter(|id| id.as_slice() != my_key.as_slice())
|
||||
.collect();
|
||||
|
||||
for recipient_key in &recipients {
|
||||
let peer_hybrid_pk = fetch_hybrid_key(client, recipient_key).await?;
|
||||
let payload = if let Some(ref pk) = peer_hybrid_pk {
|
||||
hybrid_encrypt(pk, &ct, b"", b"").context("hybrid encrypt")?
|
||||
} else {
|
||||
ct.clone()
|
||||
};
|
||||
enqueue(client, recipient_key, &payload).await?;
|
||||
}
|
||||
|
||||
// Extract message_id from what we just serialized.
|
||||
let msg_id = parse_app_msg(&app_payload)
|
||||
.ok()
|
||||
.and_then(|(_, m)| match m {
|
||||
AppMessage::Chat { message_id, .. } => Some(message_id),
|
||||
_ => None,
|
||||
});
|
||||
|
||||
// Save outgoing message.
|
||||
let stored = StoredMessage {
|
||||
conversation_id: conv_id.clone(),
|
||||
message_id: msg_id,
|
||||
sender_key: my_key,
|
||||
sender_name: Some("you".into()),
|
||||
body: text.to_string(),
|
||||
msg_type: "chat".into(),
|
||||
ref_msg_id: None,
|
||||
timestamp_ms: now_ms(),
|
||||
is_outgoing: true,
|
||||
};
|
||||
session.conv_store.save_message(&stored)?;
|
||||
session.conv_store.update_activity(conv_id, now_ms())?;
|
||||
session.save_member(conv_id)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// ── TUI entry point ───────────────────────────────────────────────────────────
|
||||
|
||||
/// Entry point for `qpc tui`. Sets up the terminal, runs the event loop, and
|
||||
/// restores the terminal on exit.
|
||||
pub async fn run_tui(
|
||||
state_path: &Path,
|
||||
server: &str,
|
||||
ca_cert: &Path,
|
||||
server_name: &str,
|
||||
password: Option<&str>,
|
||||
username: Option<&str>,
|
||||
opaque_password: Option<&str>,
|
||||
access_token: &str,
|
||||
device_id: Option<&str>,
|
||||
) -> anyhow::Result<()> {
|
||||
// ── Auth ──────────────────────────────────────────────────────────────────
|
||||
let resolved_token = resolve_tui_access_token(
|
||||
state_path,
|
||||
server,
|
||||
ca_cert,
|
||||
server_name,
|
||||
password,
|
||||
username,
|
||||
opaque_password,
|
||||
access_token,
|
||||
)
|
||||
.await?;
|
||||
|
||||
let token_bytes = hex::decode(&resolved_token)
|
||||
.unwrap_or_else(|_| resolved_token.into_bytes());
|
||||
let auth_ctx = ClientAuth::from_raw(token_bytes, device_id.map(String::from));
|
||||
init_auth(auth_ctx);
|
||||
|
||||
// ── Session + RPC ─────────────────────────────────────────────────────────
|
||||
let mut session = SessionState::load(state_path, password)?;
|
||||
let client = connect_node(server, ca_cert, server_name).await?;
|
||||
|
||||
// Auto-upload KeyPackage.
|
||||
let _ = auto_upload_keys_tui(&session, &client).await;
|
||||
|
||||
// ── Terminal setup ────────────────────────────────────────────────────────
|
||||
enable_raw_mode().context("enable raw mode")?;
|
||||
let mut stdout = std::io::stdout();
|
||||
execute!(stdout, EnterAlternateScreen, EnableMouseCapture)
|
||||
.context("enter alternate screen")?;
|
||||
let backend = CrosstermBackend::new(stdout);
|
||||
let mut terminal = Terminal::new(backend).context("create terminal")?;
|
||||
|
||||
let result = tui_loop(&mut terminal, &mut session, client).await;
|
||||
|
||||
// ── Terminal cleanup (always restore, even on error) ───────────────────
|
||||
disable_raw_mode().ok();
|
||||
execute!(
|
||||
terminal.backend_mut(),
|
||||
LeaveAlternateScreen,
|
||||
DisableMouseCapture
|
||||
)
|
||||
.ok();
|
||||
terminal.show_cursor().ok();
|
||||
|
||||
session.save_all()?;
|
||||
|
||||
result
|
||||
}
|
||||
|
||||
async fn tui_loop(
|
||||
terminal: &mut Terminal<CrosstermBackend<std::io::Stdout>>,
|
||||
session: &mut SessionState,
|
||||
client: node_service::Client,
|
||||
) -> anyhow::Result<()> {
|
||||
let mut app = App::new(session)?;
|
||||
app.reload_messages(session)?;
|
||||
|
||||
let (event_tx, mut event_rx) = mpsc::channel::<TuiEvent>(256);
|
||||
|
||||
// ── Keyboard event task ───────────────────────────────────────────────────
|
||||
let key_tx = event_tx.clone();
|
||||
tokio::task::spawn_local(async move {
|
||||
loop {
|
||||
// crossterm event polling — 50ms timeout so we can tick.
|
||||
match event::poll(Duration::from_millis(50)) {
|
||||
Ok(true) => {
|
||||
if let Ok(Event::Key(key)) = event::read() {
|
||||
if key_tx.send(TuiEvent::Key(key)).await.is_err() {
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
Ok(false) => {
|
||||
// No event — send a tick so the UI redraws.
|
||||
if key_tx.send(TuiEvent::Tick).await.is_err() {
|
||||
break;
|
||||
}
|
||||
}
|
||||
Err(_) => break,
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
// ── Message poll task ─────────────────────────────────────────────────────
|
||||
// Clone session state for the poll task (it needs its own SessionState).
|
||||
let poll_session = SessionState::load(
|
||||
&session.state_path.clone(),
|
||||
session.password.as_ref().map(|p| p.as_str()),
|
||||
)?;
|
||||
let poll_tx = event_tx.clone();
|
||||
tokio::task::spawn_local(poll_task(poll_session, client.clone(), poll_tx));
|
||||
|
||||
// ── Main loop ─────────────────────────────────────────────────────────────
|
||||
loop {
|
||||
terminal.draw(|f| ui(f, &app)).context("draw")?;
|
||||
|
||||
match event_rx.recv().await {
|
||||
None => break,
|
||||
Some(TuiEvent::Tick) => {
|
||||
// Just redraw.
|
||||
}
|
||||
Some(TuiEvent::NewMessages(msgs)) => {
|
||||
app.append_messages(msgs);
|
||||
}
|
||||
Some(TuiEvent::Key(key)) => {
|
||||
match key.code {
|
||||
KeyCode::Char('c') if key.modifiers.contains(KeyModifiers::CONTROL) => {
|
||||
app.should_quit = true;
|
||||
}
|
||||
KeyCode::Char('q') if app.input.is_empty() => {
|
||||
app.should_quit = true;
|
||||
}
|
||||
KeyCode::Enter => {
|
||||
let text = app.input.trim().to_string();
|
||||
if !text.is_empty() {
|
||||
app.input.clear();
|
||||
if let Some(conv_id) = app.active_conv_id().cloned() {
|
||||
match send_message(session, &client, &conv_id, &text).await {
|
||||
Ok(()) => {
|
||||
// Add to in-memory list immediately.
|
||||
app.messages.push(DisplayMessage {
|
||||
sender: format!("me({})", app.identity_short),
|
||||
body: text,
|
||||
timestamp_ms: now_ms(),
|
||||
is_outgoing: true,
|
||||
});
|
||||
}
|
||||
Err(_e) => {
|
||||
// Silently drop — user will see nothing happened.
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
KeyCode::Char(c) => {
|
||||
app.input.push(c);
|
||||
}
|
||||
KeyCode::Backspace => {
|
||||
app.input.pop();
|
||||
}
|
||||
KeyCode::Up => {
|
||||
app.scroll_up();
|
||||
}
|
||||
KeyCode::Down => {
|
||||
app.scroll_down();
|
||||
}
|
||||
KeyCode::Tab => {
|
||||
if key.modifiers.contains(KeyModifiers::SHIFT) {
|
||||
app.select_prev_channel(session);
|
||||
} else {
|
||||
app.select_next_channel(session);
|
||||
}
|
||||
app.reload_messages(session)?;
|
||||
}
|
||||
_ => {}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if app.should_quit {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// ── Startup helpers ───────────────────────────────────────────────────────────
|
||||
|
||||
async fn auto_upload_keys_tui(
|
||||
session: &SessionState,
|
||||
client: &node_service::Client,
|
||||
) -> anyhow::Result<()> {
|
||||
let ks_path = session.state_path.with_extension("pending.ks");
|
||||
let ks = DiskKeyStore::persistent(&ks_path).unwrap_or_else(|_| DiskKeyStore::ephemeral());
|
||||
let mut member = GroupMember::new_with_state(
|
||||
Arc::clone(&session.identity),
|
||||
ks,
|
||||
None,
|
||||
false,
|
||||
);
|
||||
let kp_bytes = member.generate_key_package().context("generate KeyPackage")?;
|
||||
let id_key = session.identity.public_key_bytes();
|
||||
upload_key_package(client, &id_key, &kp_bytes).await?;
|
||||
if let Some(ref hkp) = session.hybrid_kp {
|
||||
upload_hybrid_key(client, &id_key, &hkp.public_key()).await?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn resolve_tui_access_token(
|
||||
state_path: &Path,
|
||||
server: &str,
|
||||
ca_cert: &Path,
|
||||
server_name: &str,
|
||||
state_password: Option<&str>,
|
||||
username: Option<&str>,
|
||||
opaque_password: Option<&str>,
|
||||
cli_access_token: &str,
|
||||
) -> anyhow::Result<String> {
|
||||
if !cli_access_token.is_empty() {
|
||||
return Ok(cli_access_token.to_string());
|
||||
}
|
||||
|
||||
if let Some(cached) = load_cached_session(state_path, state_password) {
|
||||
return Ok(cached.token_hex);
|
||||
}
|
||||
|
||||
let username = match username {
|
||||
Some(u) => u.to_string(),
|
||||
None => {
|
||||
use std::io::Write;
|
||||
eprint!("Username: ");
|
||||
std::io::stderr().flush().ok();
|
||||
let mut input = String::new();
|
||||
std::io::stdin()
|
||||
.read_line(&mut input)
|
||||
.context("failed to read username")?;
|
||||
let trimmed = input.trim().to_string();
|
||||
anyhow::ensure!(!trimmed.is_empty(), "username is required");
|
||||
trimmed
|
||||
}
|
||||
};
|
||||
|
||||
let opaque_password = match opaque_password {
|
||||
Some(p) => p.to_string(),
|
||||
None => rpassword::read_password().context("failed to read password")?,
|
||||
};
|
||||
|
||||
let state = load_or_init_state(state_path, state_password)?;
|
||||
let identity = IdentityKeypair::from_seed(state.identity_seed);
|
||||
let identity_key = identity.public_key_bytes().to_vec();
|
||||
|
||||
let node_client = connect_node(server, ca_cert, server_name).await?;
|
||||
|
||||
match opaque_register(&node_client, &username, &opaque_password, Some(&identity_key)).await {
|
||||
Ok(()) | Err(_) => {}
|
||||
}
|
||||
|
||||
let token_bytes = opaque_login(&node_client, &username, &opaque_password, &identity_key)
|
||||
.await
|
||||
.context("OPAQUE login failed")?;
|
||||
let token_hex = hex::encode(&token_bytes);
|
||||
|
||||
save_cached_session(state_path, &username, &token_hex, state_password)?;
|
||||
|
||||
Ok(token_hex)
|
||||
}
|
||||
1205
crates/quicprochat-client/src/client/v2_repl.rs
Normal file
1205
crates/quicprochat-client/src/client/v2_repl.rs
Normal file
File diff suppressed because it is too large
Load Diff
1282
crates/quicprochat-client/src/client/v2_tui.rs
Normal file
1282
crates/quicprochat-client/src/client/v2_tui.rs
Normal file
File diff suppressed because it is too large
Load Diff
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user