feat: v2 Phase 1 — foundation, proto schemas, RPC framework, SDK skeleton
New workspace structure with 9 crates. Adds: - proto/qpq/v1/*.proto: 11 protobuf schemas covering all 33 RPC methods - quicproquo-proto: dual codegen (capnp legacy + prost v2) - quicproquo-rpc: QUIC RPC framework (framing, server, client, middleware) - quicproquo-sdk: client SDK (QpqClient, events, conversation store) - quicproquo-server/domain/: protocol-agnostic domain types and services - justfile: build commands Wire format: [method_id:u16][req_id:u32][len:u32][protobuf] per QUIC stream. All 151 existing tests pass. Backward compatible with v1 capnp code.
This commit is contained in:
2918
Cargo.lock
generated
2918
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
24
Cargo.toml
24
Cargo.toml
@@ -5,16 +5,12 @@ members = [
|
|||||||
"crates/quicproquo-proto",
|
"crates/quicproquo-proto",
|
||||||
"crates/quicproquo-plugin-api",
|
"crates/quicproquo-plugin-api",
|
||||||
"crates/quicproquo-kt",
|
"crates/quicproquo-kt",
|
||||||
|
"crates/quicproquo-rpc",
|
||||||
|
"crates/quicproquo-sdk",
|
||||||
"crates/quicproquo-server",
|
"crates/quicproquo-server",
|
||||||
"crates/quicproquo-client",
|
"crates/quicproquo-client",
|
||||||
"crates/quicproquo-bot",
|
# P2P crate uses iroh (~90 extra deps). Only compiled when the `mesh`
|
||||||
"crates/quicproquo-gen",
|
# feature is enabled on quicproquo-client.
|
||||||
"crates/quicproquo-gui",
|
|
||||||
"crates/quicproquo-mobile",
|
|
||||||
"crates/quicproquo-ffi",
|
|
||||||
# P2P crate uses iroh (~90 extra deps). Kept in the workspace so it can be
|
|
||||||
# referenced as an optional dependency; only compiled when the `mesh` feature
|
|
||||||
# is enabled on quicproquo-client.
|
|
||||||
"crates/quicproquo-p2p",
|
"crates/quicproquo-p2p",
|
||||||
]
|
]
|
||||||
|
|
||||||
@@ -29,7 +25,6 @@ openmls_traits = { version = "0.2" }
|
|||||||
# duplicate Serialize trait versions in the dependency graph.
|
# duplicate Serialize trait versions in the dependency graph.
|
||||||
tls_codec = { version = "0.3", features = ["derive"] }
|
tls_codec = { version = "0.3", features = ["derive"] }
|
||||||
# ml-kem 0.2 is the current stable release (FIPS 203, ML-KEM-768).
|
# ml-kem 0.2 is the current stable release (FIPS 203, ML-KEM-768).
|
||||||
# All three parameter sets (512/768/1024) are compiled in by default — no feature flag needed.
|
|
||||||
ml-kem = { version = "0.2" }
|
ml-kem = { version = "0.2" }
|
||||||
x25519-dalek = { version = "2", features = ["static_secrets"] }
|
x25519-dalek = { version = "2", features = ["static_secrets"] }
|
||||||
ed25519-dalek = { version = "2", features = ["rand_core"] }
|
ed25519-dalek = { version = "2", features = ["rand_core"] }
|
||||||
@@ -47,7 +42,12 @@ serde = { version = "1", features = ["derive"] }
|
|||||||
serde_json = { version = "1" }
|
serde_json = { version = "1" }
|
||||||
bincode = { version = "1" }
|
bincode = { version = "1" }
|
||||||
|
|
||||||
# ── Serialisation + RPC ───────────────────────────────────────────────────────
|
# ── Serialisation (v2: protobuf) ─────────────────────────────────────────────
|
||||||
|
prost = { version = "0.13" }
|
||||||
|
prost-types = { version = "0.13" }
|
||||||
|
prost-build = { version = "0.13" }
|
||||||
|
|
||||||
|
# ── Serialisation (v1 legacy — capnp, used by existing server/client) ────────
|
||||||
capnp = { version = "0.19" }
|
capnp = { version = "0.19" }
|
||||||
capnp-rpc = { version = "0.19" }
|
capnp-rpc = { version = "0.19" }
|
||||||
|
|
||||||
@@ -60,11 +60,15 @@ quinn-proto = { version = "0.11" }
|
|||||||
rustls = { version = "0.23", default-features = false, features = ["std", "ring"] }
|
rustls = { version = "0.23", default-features = false, features = ["std", "ring"] }
|
||||||
rcgen = { version = "0.13" }
|
rcgen = { version = "0.13" }
|
||||||
|
|
||||||
|
# ── Middleware ────────────────────────────────────────────────────────────────
|
||||||
|
tower = { version = "0.5", features = ["util", "limit", "timeout"] }
|
||||||
|
|
||||||
# ── Database ─────────────────────────────────────────────────────────────
|
# ── Database ─────────────────────────────────────────────────────────────
|
||||||
rusqlite = { version = "0.31", features = ["bundled-sqlcipher"] }
|
rusqlite = { version = "0.31", features = ["bundled-sqlcipher"] }
|
||||||
|
|
||||||
# ── Encoding ─────────────────────────────────────────────────────────────────
|
# ── Encoding ─────────────────────────────────────────────────────────────────
|
||||||
hex = { version = "0.4" }
|
hex = { version = "0.4" }
|
||||||
|
bytes = { version = "1" }
|
||||||
|
|
||||||
# ── Server utilities ──────────────────────────────────────────────────────────
|
# ── Server utilities ──────────────────────────────────────────────────────────
|
||||||
dashmap = { version = "5" }
|
dashmap = { version = "5" }
|
||||||
|
|||||||
@@ -1,22 +1,28 @@
|
|||||||
[package]
|
[package]
|
||||||
name = "quicproquo-proto"
|
name = "quicproquo-proto"
|
||||||
version = "0.1.0"
|
version = "0.2.0"
|
||||||
edition = "2021"
|
edition = "2021"
|
||||||
description = "Cap'n Proto schemas, generated types, and serialisation helpers for quicproquo. No crypto, no I/O."
|
description = "Protocol types for quicproquo — v1 Cap'n Proto (legacy) + v2 Protobuf (prost)"
|
||||||
license = "MIT"
|
|
||||||
|
|
||||||
# build.rs invokes capnpc to generate Rust source from .capnp schemas.
|
build = "build.rs"
|
||||||
build = "build.rs"
|
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
|
# v1 legacy (Cap'n Proto) — used by existing server/client until rewrite
|
||||||
capnp = { workspace = true }
|
capnp = { workspace = true }
|
||||||
|
|
||||||
|
# v2 (Protobuf via prost) — new RPC types
|
||||||
|
prost = { workspace = true }
|
||||||
|
prost-types = { workspace = true }
|
||||||
|
bytes = { workspace = true }
|
||||||
|
|
||||||
|
[build-dependencies]
|
||||||
|
capnpc = { workspace = true }
|
||||||
|
prost-build = { workspace = true }
|
||||||
|
protobuf-src = "2"
|
||||||
|
|
||||||
[lints.rust]
|
[lints.rust]
|
||||||
unsafe_code = "warn"
|
unsafe_code = "warn"
|
||||||
|
|
||||||
[lints.clippy]
|
[lints.clippy]
|
||||||
# Generated Cap'n Proto code uses patterns that trigger clippy lints.
|
# Generated Cap'n Proto code uses patterns that trigger clippy lints.
|
||||||
unwrap_used = "allow"
|
unwrap_used = "allow"
|
||||||
|
|
||||||
[build-dependencies]
|
|
||||||
capnpc = { workspace = true }
|
|
||||||
|
|||||||
@@ -1,51 +1,30 @@
|
|||||||
//! Build script for quicproquo-proto.
|
//! Build script for quicproquo-proto.
|
||||||
//!
|
//!
|
||||||
//! Invokes the `capnp` compiler to generate Rust types from `.capnp` schemas
|
//! Runs two code generators:
|
||||||
//! located in the workspace-root `schemas/` directory.
|
//! 1. Cap'n Proto (v1 legacy) — from `schemas/*.capnp`
|
||||||
//!
|
//! 2. Protobuf/prost (v2) — from `proto/qpq/v1/*.proto`
|
||||||
//! # Prerequisites
|
|
||||||
//!
|
|
||||||
//! The `capnp` CLI must be installed and on `PATH`.
|
|
||||||
//!
|
|
||||||
//! Debian/Ubuntu: apt-get install capnproto
|
|
||||||
//! macOS: brew install capnp
|
|
||||||
//! Docker: see docker/Dockerfile
|
|
||||||
|
|
||||||
use std::{env, path::PathBuf};
|
use std::{env, path::PathBuf};
|
||||||
|
|
||||||
fn main() {
|
fn main() {
|
||||||
let manifest_dir =
|
// Vendor protoc from protobuf-src so the build doesn't require system protoc.
|
||||||
PathBuf::from(env::var("CARGO_MANIFEST_DIR").expect("CARGO_MANIFEST_DIR not set by Cargo"));
|
std::env::set_var("PROTOC", protobuf_src::protoc());
|
||||||
|
|
||||||
// Workspace root is two levels above this crate (quicproquo/crates/quicproquo-proto).
|
let manifest_dir =
|
||||||
|
PathBuf::from(env::var("CARGO_MANIFEST_DIR").expect("CARGO_MANIFEST_DIR"));
|
||||||
let workspace_root = manifest_dir
|
let workspace_root = manifest_dir
|
||||||
.join("../..")
|
.join("../..")
|
||||||
.canonicalize()
|
.canonicalize()
|
||||||
.expect("could not canonicalize workspace root path");
|
.expect("canonicalize workspace root");
|
||||||
|
|
||||||
|
// ── v1: Cap'n Proto codegen ──────────────────────────────────────────────
|
||||||
let schemas_dir = workspace_root.join("schemas");
|
let schemas_dir = workspace_root.join("schemas");
|
||||||
|
|
||||||
// Re-run this build script whenever any schema file changes.
|
for schema in &["auth.capnp", "delivery.capnp", "node.capnp", "federation.capnp"] {
|
||||||
println!(
|
println!("cargo:rerun-if-changed={}", schemas_dir.join(schema).display());
|
||||||
"cargo:rerun-if-changed={}",
|
}
|
||||||
schemas_dir.join("auth.capnp").display()
|
|
||||||
);
|
|
||||||
println!(
|
|
||||||
"cargo:rerun-if-changed={}",
|
|
||||||
schemas_dir.join("delivery.capnp").display()
|
|
||||||
);
|
|
||||||
println!(
|
|
||||||
"cargo:rerun-if-changed={}",
|
|
||||||
schemas_dir.join("node.capnp").display()
|
|
||||||
);
|
|
||||||
println!(
|
|
||||||
"cargo:rerun-if-changed={}",
|
|
||||||
schemas_dir.join("federation.capnp").display()
|
|
||||||
);
|
|
||||||
|
|
||||||
capnpc::CompilerCommand::new()
|
capnpc::CompilerCommand::new()
|
||||||
// Treat `schemas/` as the include root so that inter-schema imports
|
|
||||||
// resolve correctly.
|
|
||||||
.src_prefix(&schemas_dir)
|
.src_prefix(&schemas_dir)
|
||||||
.file(schemas_dir.join("auth.capnp"))
|
.file(schemas_dir.join("auth.capnp"))
|
||||||
.file(schemas_dir.join("delivery.capnp"))
|
.file(schemas_dir.join("delivery.capnp"))
|
||||||
@@ -56,4 +35,32 @@ fn main() {
|
|||||||
"Cap'n Proto schema compilation failed. \
|
"Cap'n Proto schema compilation failed. \
|
||||||
Is `capnp` installed? (apt-get install capnproto / brew install capnp)",
|
Is `capnp` installed? (apt-get install capnproto / brew install capnp)",
|
||||||
);
|
);
|
||||||
|
|
||||||
|
// ── v2: Protobuf/prost codegen ───────────────────────────────────────────
|
||||||
|
let proto_dir = workspace_root.join("proto");
|
||||||
|
|
||||||
|
let proto_files = [
|
||||||
|
"qpq/v1/common.proto",
|
||||||
|
"qpq/v1/auth.proto",
|
||||||
|
"qpq/v1/delivery.proto",
|
||||||
|
"qpq/v1/keys.proto",
|
||||||
|
"qpq/v1/channel.proto",
|
||||||
|
"qpq/v1/user.proto",
|
||||||
|
"qpq/v1/blob.proto",
|
||||||
|
"qpq/v1/device.proto",
|
||||||
|
"qpq/v1/p2p.proto",
|
||||||
|
"qpq/v1/federation.proto",
|
||||||
|
"qpq/v1/push.proto",
|
||||||
|
];
|
||||||
|
|
||||||
|
let full_paths: Vec<PathBuf> = proto_files.iter().map(|f| proto_dir.join(f)).collect();
|
||||||
|
|
||||||
|
for path in &full_paths {
|
||||||
|
println!("cargo:rerun-if-changed={}", path.display());
|
||||||
|
}
|
||||||
|
|
||||||
|
prost_build::Config::new()
|
||||||
|
.out_dir(PathBuf::from(env::var("OUT_DIR").expect("OUT_DIR")))
|
||||||
|
.compile_protos(&full_paths, &[&proto_dir])
|
||||||
|
.expect("prost compile_protos failed");
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,56 +1,38 @@
|
|||||||
//! Cap'n Proto schemas, generated types, and serialisation helpers for quicproquo.
|
//! Protocol types for quicproquo.
|
||||||
|
//!
|
||||||
|
//! This crate contains both:
|
||||||
|
//! - **v1 (legacy)**: Cap'n Proto generated types from `schemas/*.capnp`
|
||||||
|
//! - **v2**: Protobuf generated types from `proto/qpq/v1/*.proto`
|
||||||
//!
|
//!
|
||||||
//! Generated Cap'n Proto code emits unnecessary parentheses; allow per coding standards.
|
|
||||||
#![allow(unused_parens)]
|
|
||||||
|
|
||||||
//! # Design constraints
|
//! # Design constraints
|
||||||
//!
|
//!
|
||||||
//! This crate is intentionally restricted:
|
|
||||||
//! - **No crypto** — key material never enters this crate.
|
//! - **No crypto** — key material never enters this crate.
|
||||||
//! - **No I/O** — callers own transport; this crate only converts bytes ↔ types.
|
//! - **No I/O** — callers own transport; this crate only converts bytes <-> types.
|
||||||
//! - **No async** — pure synchronous data-layer code.
|
//! - **No async** — pure synchronous data-layer code.
|
||||||
//!
|
|
||||||
//! # Generated code
|
|
||||||
//!
|
|
||||||
//! `build.rs` invokes `capnpc` at compile time and writes generated Rust source
|
|
||||||
//! into `$OUT_DIR`. The `include!` macros below splice that code in as a module.
|
|
||||||
|
|
||||||
// ── Generated types ───────────────────────────────────────────────────────────
|
// ════════════════════════════════════════════════════════════════════════════
|
||||||
|
// v1 (legacy): Cap'n Proto generated types
|
||||||
|
// ════════════════════════════════════════════════════════════════════════════
|
||||||
|
|
||||||
|
#![allow(unused_parens)]
|
||||||
|
|
||||||
/// Cap'n Proto generated types for `schemas/auth.capnp`.
|
|
||||||
///
|
|
||||||
/// Do not edit this module by hand — it is entirely machine-generated.
|
|
||||||
pub mod auth_capnp {
|
pub mod auth_capnp {
|
||||||
include!(concat!(env!("OUT_DIR"), "/auth_capnp.rs"));
|
include!(concat!(env!("OUT_DIR"), "/auth_capnp.rs"));
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Cap'n Proto generated types for `schemas/delivery.capnp`.
|
|
||||||
///
|
|
||||||
/// Do not edit this module by hand — it is entirely machine-generated.
|
|
||||||
pub mod delivery_capnp {
|
pub mod delivery_capnp {
|
||||||
include!(concat!(env!("OUT_DIR"), "/delivery_capnp.rs"));
|
include!(concat!(env!("OUT_DIR"), "/delivery_capnp.rs"));
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Cap'n Proto generated types for `schemas/node.capnp`.
|
|
||||||
///
|
|
||||||
/// Do not edit this module by hand — it is entirely machine-generated.
|
|
||||||
pub mod node_capnp {
|
pub mod node_capnp {
|
||||||
include!(concat!(env!("OUT_DIR"), "/node_capnp.rs"));
|
include!(concat!(env!("OUT_DIR"), "/node_capnp.rs"));
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Cap'n Proto generated types for `schemas/federation.capnp`.
|
|
||||||
///
|
|
||||||
/// Do not edit this module by hand — it is entirely machine-generated.
|
|
||||||
pub mod federation_capnp {
|
pub mod federation_capnp {
|
||||||
include!(concat!(env!("OUT_DIR"), "/federation_capnp.rs"));
|
include!(concat!(env!("OUT_DIR"), "/federation_capnp.rs"));
|
||||||
}
|
}
|
||||||
|
|
||||||
// ── Low-level byte ↔ message conversions ──────────────────────────────────────
|
|
||||||
|
|
||||||
/// Serialise a Cap'n Proto message builder to unpacked wire bytes.
|
/// Serialise a Cap'n Proto message builder to unpacked wire bytes.
|
||||||
///
|
|
||||||
/// The output includes the segment table header. For transport, the
|
|
||||||
/// `quicproquo-core` frame codec prepends a 4-byte little-endian length field.
|
|
||||||
pub fn to_bytes<A: capnp::message::Allocator>(
|
pub fn to_bytes<A: capnp::message::Allocator>(
|
||||||
msg: &capnp::message::Builder<A>,
|
msg: &capnp::message::Builder<A>,
|
||||||
) -> Result<Vec<u8>, capnp::Error> {
|
) -> Result<Vec<u8>, capnp::Error> {
|
||||||
@@ -59,25 +41,17 @@ pub fn to_bytes<A: capnp::message::Allocator>(
|
|||||||
Ok(buf)
|
Ok(buf)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Deserialise unpacked wire bytes into a message with owned segments.
|
/// Deserialise unpacked wire bytes into a Cap'n Proto message.
|
||||||
///
|
|
||||||
/// Uses a stricter default traversal limit of 1 Mi words (~8 MiB) instead
|
|
||||||
/// of the Cap'n Proto default of 64 MiB, reducing DoS amplification from
|
|
||||||
/// untrusted input. Use [`from_bytes_with_options`] if you need a custom limit.
|
|
||||||
pub fn from_bytes(
|
pub fn from_bytes(
|
||||||
bytes: &[u8],
|
bytes: &[u8],
|
||||||
) -> Result<capnp::message::Reader<capnp::serialize::OwnedSegments>, capnp::Error> {
|
) -> Result<capnp::message::Reader<capnp::serialize::OwnedSegments>, capnp::Error> {
|
||||||
let mut options = capnp::message::ReaderOptions::new();
|
let mut options = capnp::message::ReaderOptions::new();
|
||||||
options.traversal_limit_in_words(Some(1_048_576)); // 1 Mi words = ~8 MiB
|
options.traversal_limit_in_words(Some(1_048_576));
|
||||||
let mut cursor = std::io::Cursor::new(bytes);
|
let mut cursor = std::io::Cursor::new(bytes);
|
||||||
capnp::serialize::read_message(&mut cursor, options)
|
capnp::serialize::read_message(&mut cursor, options)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Deserialise unpacked wire bytes with caller-specified [`ReaderOptions`].
|
/// Deserialise with custom [`ReaderOptions`].
|
||||||
///
|
|
||||||
/// Prefer [`from_bytes`] for typical use. Use this variant when you need to
|
|
||||||
/// raise the traversal limit for large messages (e.g. blob transfers) or
|
|
||||||
/// lower it further for tighter validation.
|
|
||||||
pub fn from_bytes_with_options(
|
pub fn from_bytes_with_options(
|
||||||
bytes: &[u8],
|
bytes: &[u8],
|
||||||
options: capnp::message::ReaderOptions,
|
options: capnp::message::ReaderOptions,
|
||||||
@@ -85,3 +59,79 @@ pub fn from_bytes_with_options(
|
|||||||
let mut cursor = std::io::Cursor::new(bytes);
|
let mut cursor = std::io::Cursor::new(bytes);
|
||||||
capnp::serialize::read_message(&mut cursor, options)
|
capnp::serialize::read_message(&mut cursor, options)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ════════════════════════════════════════════════════════════════════════════
|
||||||
|
// v2: Protobuf (prost) generated types
|
||||||
|
// ════════════════════════════════════════════════════════════════════════════
|
||||||
|
|
||||||
|
/// Protobuf types for the v2 RPC protocol.
|
||||||
|
pub mod qpq {
|
||||||
|
pub mod v1 {
|
||||||
|
include!(concat!(env!("OUT_DIR"), "/qpq.v1.rs"));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Method ID constants for the v2 RPC dispatch table.
|
||||||
|
pub mod method_ids {
|
||||||
|
// Auth (100-103)
|
||||||
|
pub const OPAQUE_REGISTER_START: u16 = 100;
|
||||||
|
pub const OPAQUE_REGISTER_FINISH: u16 = 101;
|
||||||
|
pub const OPAQUE_LOGIN_START: u16 = 102;
|
||||||
|
pub const OPAQUE_LOGIN_FINISH: u16 = 103;
|
||||||
|
|
||||||
|
// Delivery (200-205)
|
||||||
|
pub const ENQUEUE: u16 = 200;
|
||||||
|
pub const FETCH: u16 = 201;
|
||||||
|
pub const FETCH_WAIT: u16 = 202;
|
||||||
|
pub const PEEK: u16 = 203;
|
||||||
|
pub const ACK: u16 = 204;
|
||||||
|
pub const BATCH_ENQUEUE: u16 = 205;
|
||||||
|
|
||||||
|
// Keys (300-304)
|
||||||
|
pub const UPLOAD_KEY_PACKAGE: u16 = 300;
|
||||||
|
pub const FETCH_KEY_PACKAGE: u16 = 301;
|
||||||
|
pub const UPLOAD_HYBRID_KEY: u16 = 302;
|
||||||
|
pub const FETCH_HYBRID_KEY: u16 = 303;
|
||||||
|
pub const FETCH_HYBRID_KEYS: u16 = 304;
|
||||||
|
|
||||||
|
// Channel (400)
|
||||||
|
pub const CREATE_CHANNEL: u16 = 400;
|
||||||
|
|
||||||
|
// User (500-501)
|
||||||
|
pub const RESOLVE_USER: u16 = 500;
|
||||||
|
pub const RESOLVE_IDENTITY: u16 = 501;
|
||||||
|
|
||||||
|
// Blob (600-601)
|
||||||
|
pub const UPLOAD_BLOB: u16 = 600;
|
||||||
|
pub const DOWNLOAD_BLOB: u16 = 601;
|
||||||
|
|
||||||
|
// Device (700-702)
|
||||||
|
pub const REGISTER_DEVICE: u16 = 700;
|
||||||
|
pub const LIST_DEVICES: u16 = 701;
|
||||||
|
pub const REVOKE_DEVICE: u16 = 702;
|
||||||
|
|
||||||
|
// P2P (800-802)
|
||||||
|
pub const PUBLISH_ENDPOINT: u16 = 800;
|
||||||
|
pub const RESOLVE_ENDPOINT: u16 = 801;
|
||||||
|
pub const HEALTH: u16 = 802;
|
||||||
|
|
||||||
|
// Federation (900-905)
|
||||||
|
pub const RELAY_ENQUEUE: u16 = 900;
|
||||||
|
pub const RELAY_BATCH_ENQUEUE: u16 = 901;
|
||||||
|
pub const PROXY_FETCH_KEY_PACKAGE: u16 = 902;
|
||||||
|
pub const PROXY_FETCH_HYBRID_KEY: u16 = 903;
|
||||||
|
pub const PROXY_RESOLVE_USER: u16 = 904;
|
||||||
|
pub const FEDERATION_HEALTH: u16 = 905;
|
||||||
|
|
||||||
|
// Account (950)
|
||||||
|
pub const DELETE_ACCOUNT: u16 = 950;
|
||||||
|
|
||||||
|
// Push event types (1000+)
|
||||||
|
pub const PUSH_NEW_MESSAGE: u16 = 1000;
|
||||||
|
pub const PUSH_TYPING: u16 = 1001;
|
||||||
|
pub const PUSH_PRESENCE: u16 = 1002;
|
||||||
|
pub const PUSH_MEMBERSHIP: u16 = 1003;
|
||||||
|
}
|
||||||
|
|
||||||
|
pub use prost;
|
||||||
|
pub use bytes;
|
||||||
|
|||||||
25
crates/quicproquo-rpc/Cargo.toml
Normal file
25
crates/quicproquo-rpc/Cargo.toml
Normal file
@@ -0,0 +1,25 @@
|
|||||||
|
[package]
|
||||||
|
name = "quicproquo-rpc"
|
||||||
|
version = "0.1.0"
|
||||||
|
edition = "2021"
|
||||||
|
description = "QUIC RPC framework for quicproquo v2 — framing, dispatch, tower middleware"
|
||||||
|
|
||||||
|
[dependencies]
|
||||||
|
quicproquo-proto = { path = "../quicproquo-proto" }
|
||||||
|
prost = { workspace = true }
|
||||||
|
bytes = { workspace = true }
|
||||||
|
quinn = { workspace = true }
|
||||||
|
rustls = { workspace = true }
|
||||||
|
rcgen = { workspace = true }
|
||||||
|
tokio = { workspace = true }
|
||||||
|
futures = { workspace = true }
|
||||||
|
tower = { workspace = true }
|
||||||
|
tracing = { workspace = true }
|
||||||
|
thiserror = { workspace = true }
|
||||||
|
dashmap = { workspace = true }
|
||||||
|
|
||||||
|
[dev-dependencies]
|
||||||
|
tokio = { workspace = true, features = ["test-util"] }
|
||||||
|
|
||||||
|
[lints]
|
||||||
|
workspace = true
|
||||||
175
crates/quicproquo-rpc/src/client.rs
Normal file
175
crates/quicproquo-rpc/src/client.rs
Normal file
@@ -0,0 +1,175 @@
|
|||||||
|
//! QUIC RPC client — connect to server, send requests, receive push events.
|
||||||
|
|
||||||
|
use std::sync::atomic::{AtomicU32, Ordering};
|
||||||
|
use std::sync::Arc;
|
||||||
|
|
||||||
|
use bytes::{Bytes, BytesMut};
|
||||||
|
use quinn::{Connection, Endpoint};
|
||||||
|
use tokio::sync::mpsc;
|
||||||
|
use tracing::{debug, warn};
|
||||||
|
|
||||||
|
use crate::error::{RpcError, RpcStatus};
|
||||||
|
use crate::framing::{PushFrame, RequestFrame, ResponseFrame};
|
||||||
|
|
||||||
|
/// Configuration for the RPC client.
|
||||||
|
pub struct RpcClientConfig {
|
||||||
|
/// Server address to connect to.
|
||||||
|
pub server_addr: std::net::SocketAddr,
|
||||||
|
/// Server name for TLS verification.
|
||||||
|
pub server_name: String,
|
||||||
|
/// TLS client config (rustls).
|
||||||
|
pub tls_config: Arc<rustls::ClientConfig>,
|
||||||
|
/// ALPN protocol.
|
||||||
|
pub alpn: Vec<u8>,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// A QUIC RPC client connection.
|
||||||
|
pub struct RpcClient {
|
||||||
|
connection: Connection,
|
||||||
|
next_request_id: AtomicU32,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl RpcClient {
|
||||||
|
/// Connect to the RPC server.
|
||||||
|
pub async fn connect(config: RpcClientConfig) -> Result<Self, RpcError> {
|
||||||
|
let mut tls = (*config.tls_config).clone();
|
||||||
|
tls.alpn_protocols = vec![config.alpn];
|
||||||
|
let quic_tls = quinn::crypto::rustls::QuicClientConfig::try_from(tls)
|
||||||
|
.map_err(|e| RpcError::Connection(format!("TLS config: {e}")))?;
|
||||||
|
|
||||||
|
let mut endpoint = Endpoint::client("0.0.0.0:0".parse().expect("valid addr"))
|
||||||
|
.map_err(|e| RpcError::Connection(e.to_string()))?;
|
||||||
|
endpoint.set_default_client_config(quinn::ClientConfig::new(Arc::new(quic_tls)));
|
||||||
|
|
||||||
|
let connection = endpoint
|
||||||
|
.connect(config.server_addr, &config.server_name)
|
||||||
|
.map_err(|e| RpcError::Connection(e.to_string()))?
|
||||||
|
.await
|
||||||
|
.map_err(|e| RpcError::Connection(e.to_string()))?;
|
||||||
|
|
||||||
|
debug!(remote = %connection.remote_address(), "connected to RPC server");
|
||||||
|
|
||||||
|
Ok(Self {
|
||||||
|
connection,
|
||||||
|
next_request_id: AtomicU32::new(1),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Send an RPC request and wait for the response.
|
||||||
|
pub async fn call(
|
||||||
|
&self,
|
||||||
|
method_id: u16,
|
||||||
|
payload: Bytes,
|
||||||
|
) -> Result<Bytes, RpcError> {
|
||||||
|
let request_id = self.next_request_id.fetch_add(1, Ordering::Relaxed);
|
||||||
|
|
||||||
|
let (mut send, mut recv) = self
|
||||||
|
.connection
|
||||||
|
.open_bi()
|
||||||
|
.await
|
||||||
|
.map_err(|e| RpcError::Connection(e.to_string()))?;
|
||||||
|
|
||||||
|
// Send request.
|
||||||
|
let frame = RequestFrame {
|
||||||
|
method_id,
|
||||||
|
request_id,
|
||||||
|
payload,
|
||||||
|
};
|
||||||
|
let encoded = frame.encode();
|
||||||
|
send.write_all(&encoded)
|
||||||
|
.await
|
||||||
|
.map_err(|e| RpcError::Connection(e.to_string()))?;
|
||||||
|
send.finish().map_err(|e| RpcError::Connection(e.to_string()))?;
|
||||||
|
|
||||||
|
// Read response.
|
||||||
|
let mut buf = BytesMut::new();
|
||||||
|
while let Some(chunk) = recv
|
||||||
|
.read_chunk(65536, true)
|
||||||
|
.await
|
||||||
|
.map_err(|e| RpcError::Connection(e.to_string()))?
|
||||||
|
{
|
||||||
|
buf.extend_from_slice(&chunk.bytes);
|
||||||
|
if buf.len() > crate::framing::MAX_PAYLOAD_SIZE + crate::framing::RESPONSE_HEADER_SIZE {
|
||||||
|
return Err(RpcError::PayloadTooLarge {
|
||||||
|
size: buf.len(),
|
||||||
|
max: crate::framing::MAX_PAYLOAD_SIZE,
|
||||||
|
});
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
let response = ResponseFrame::decode(&mut buf)?
|
||||||
|
.ok_or_else(|| RpcError::Decode("incomplete response frame".into()))?;
|
||||||
|
|
||||||
|
if response.request_id != request_id {
|
||||||
|
return Err(RpcError::Decode(format!(
|
||||||
|
"request_id mismatch: sent {request_id}, got {}",
|
||||||
|
response.request_id
|
||||||
|
)));
|
||||||
|
}
|
||||||
|
|
||||||
|
match RpcStatus::from_u8(response.status) {
|
||||||
|
Some(RpcStatus::Ok) => Ok(response.payload),
|
||||||
|
Some(status) => Err(RpcError::Server {
|
||||||
|
status,
|
||||||
|
message: String::from_utf8_lossy(&response.payload).into_owned(),
|
||||||
|
}),
|
||||||
|
None => Err(RpcError::Decode(format!(
|
||||||
|
"unknown status byte: {}",
|
||||||
|
response.status
|
||||||
|
))),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Subscribe to server-push events. Returns a receiver channel.
|
||||||
|
/// Spawns a background task that reads uni-streams.
|
||||||
|
pub fn subscribe_push(&self) -> mpsc::UnboundedReceiver<PushFrame> {
|
||||||
|
let (tx, rx) = mpsc::unbounded_channel();
|
||||||
|
let conn = self.connection.clone();
|
||||||
|
|
||||||
|
tokio::spawn(async move {
|
||||||
|
loop {
|
||||||
|
match conn.accept_uni().await {
|
||||||
|
Ok(mut recv) => {
|
||||||
|
let mut buf = BytesMut::new();
|
||||||
|
loop {
|
||||||
|
match recv.read_chunk(65536, true).await {
|
||||||
|
Ok(Some(chunk)) => buf.extend_from_slice(&chunk.bytes),
|
||||||
|
Ok(None) => break,
|
||||||
|
Err(e) => {
|
||||||
|
debug!("push stream read error: {e}");
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
match PushFrame::decode(&mut buf) {
|
||||||
|
Ok(Some(frame)) => {
|
||||||
|
if tx.send(frame).is_err() {
|
||||||
|
return; // receiver dropped
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Ok(None) => debug!("incomplete push frame"),
|
||||||
|
Err(e) => debug!("push decode error: {e}"),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Err(quinn::ConnectionError::ApplicationClosed(_)) => break,
|
||||||
|
Err(e) => {
|
||||||
|
warn!("accept_uni error: {e}");
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
rx
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Close the connection gracefully.
|
||||||
|
pub fn close(&self) {
|
||||||
|
self.connection.close(0u32.into(), b"bye");
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Get the underlying QUIC connection (for advanced use).
|
||||||
|
pub fn connection(&self) -> &Connection {
|
||||||
|
&self.connection
|
||||||
|
}
|
||||||
|
}
|
||||||
68
crates/quicproquo-rpc/src/error.rs
Normal file
68
crates/quicproquo-rpc/src/error.rs
Normal file
@@ -0,0 +1,68 @@
|
|||||||
|
//! RPC error types.
|
||||||
|
|
||||||
|
/// Status codes for RPC responses.
|
||||||
|
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
|
||||||
|
#[repr(u8)]
|
||||||
|
pub enum RpcStatus {
|
||||||
|
/// Request succeeded.
|
||||||
|
Ok = 0,
|
||||||
|
/// Client sent a malformed request.
|
||||||
|
BadRequest = 1,
|
||||||
|
/// Authentication required or token invalid.
|
||||||
|
Unauthorized = 2,
|
||||||
|
/// Caller lacks permission for this operation.
|
||||||
|
Forbidden = 3,
|
||||||
|
/// Requested resource not found.
|
||||||
|
NotFound = 4,
|
||||||
|
/// Rate limit exceeded.
|
||||||
|
RateLimited = 5,
|
||||||
|
/// Internal server error.
|
||||||
|
Internal = 10,
|
||||||
|
/// Method not recognized.
|
||||||
|
UnknownMethod = 11,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl RpcStatus {
|
||||||
|
/// Decode a status byte. Returns `None` for unknown values.
|
||||||
|
pub fn from_u8(v: u8) -> Option<Self> {
|
||||||
|
match v {
|
||||||
|
0 => Some(Self::Ok),
|
||||||
|
1 => Some(Self::BadRequest),
|
||||||
|
2 => Some(Self::Unauthorized),
|
||||||
|
3 => Some(Self::Forbidden),
|
||||||
|
4 => Some(Self::NotFound),
|
||||||
|
5 => Some(Self::RateLimited),
|
||||||
|
10 => Some(Self::Internal),
|
||||||
|
11 => Some(Self::UnknownMethod),
|
||||||
|
_ => None,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Errors that can occur in the RPC layer.
|
||||||
|
#[derive(Debug, thiserror::Error)]
|
||||||
|
pub enum RpcError {
|
||||||
|
#[error("connection error: {0}")]
|
||||||
|
Connection(String),
|
||||||
|
|
||||||
|
#[error("encoding error: {0}")]
|
||||||
|
Encode(String),
|
||||||
|
|
||||||
|
#[error("decoding error: {0}")]
|
||||||
|
Decode(String),
|
||||||
|
|
||||||
|
#[error("server returned error status {status:?}: {message}")]
|
||||||
|
Server {
|
||||||
|
status: RpcStatus,
|
||||||
|
message: String,
|
||||||
|
},
|
||||||
|
|
||||||
|
#[error("request timed out")]
|
||||||
|
Timeout,
|
||||||
|
|
||||||
|
#[error("stream closed unexpectedly")]
|
||||||
|
StreamClosed,
|
||||||
|
|
||||||
|
#[error("payload too large: {size} bytes (max {max})")]
|
||||||
|
PayloadTooLarge { size: usize, max: usize },
|
||||||
|
}
|
||||||
280
crates/quicproquo-rpc/src/framing.rs
Normal file
280
crates/quicproquo-rpc/src/framing.rs
Normal file
@@ -0,0 +1,280 @@
|
|||||||
|
//! Wire format encoding and decoding for the quicproquo v2 RPC protocol.
|
||||||
|
//!
|
||||||
|
//! ## Request frame
|
||||||
|
//! ```text
|
||||||
|
//! [method_id: u16 BE][request_id: u32 BE][payload_len: u32 BE][protobuf bytes]
|
||||||
|
//! ```
|
||||||
|
//!
|
||||||
|
//! ## Response frame
|
||||||
|
//! ```text
|
||||||
|
//! [status: u8][request_id: u32 BE][payload_len: u32 BE][protobuf bytes]
|
||||||
|
//! ```
|
||||||
|
//!
|
||||||
|
//! ## Push frame (server → client, uni-stream)
|
||||||
|
//! ```text
|
||||||
|
//! [event_type: u16 BE][payload_len: u32 BE][protobuf bytes]
|
||||||
|
//! ```
|
||||||
|
|
||||||
|
use bytes::{Buf, BufMut, Bytes, BytesMut};
|
||||||
|
|
||||||
|
use crate::error::{RpcError, RpcStatus};
|
||||||
|
|
||||||
|
/// Maximum payload size: 4 MiB.
|
||||||
|
pub const MAX_PAYLOAD_SIZE: usize = 4 * 1024 * 1024;
|
||||||
|
|
||||||
|
/// Request header size: 2 (method) + 4 (req_id) + 4 (len) = 10 bytes.
|
||||||
|
pub const REQUEST_HEADER_SIZE: usize = 10;
|
||||||
|
|
||||||
|
/// Response header size: 1 (status) + 4 (req_id) + 4 (len) = 9 bytes.
|
||||||
|
pub const RESPONSE_HEADER_SIZE: usize = 9;
|
||||||
|
|
||||||
|
/// Push header size: 2 (event_type) + 4 (len) = 6 bytes.
|
||||||
|
pub const PUSH_HEADER_SIZE: usize = 6;
|
||||||
|
|
||||||
|
// ── Request ──────────────────────────────────────────────────────────────────
|
||||||
|
|
||||||
|
/// A decoded RPC request frame.
|
||||||
|
#[derive(Debug, Clone)]
|
||||||
|
pub struct RequestFrame {
|
||||||
|
pub method_id: u16,
|
||||||
|
pub request_id: u32,
|
||||||
|
pub payload: Bytes,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl RequestFrame {
|
||||||
|
/// Encode this request into a byte buffer.
|
||||||
|
pub fn encode(&self) -> Bytes {
|
||||||
|
let mut buf = BytesMut::with_capacity(REQUEST_HEADER_SIZE + self.payload.len());
|
||||||
|
buf.put_u16(self.method_id);
|
||||||
|
buf.put_u32(self.request_id);
|
||||||
|
buf.put_u32(self.payload.len() as u32);
|
||||||
|
buf.put(self.payload.clone());
|
||||||
|
buf.freeze()
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Decode a request frame from a byte buffer.
|
||||||
|
/// Returns `None` if the buffer does not contain a complete frame.
|
||||||
|
pub fn decode(buf: &mut BytesMut) -> Result<Option<Self>, RpcError> {
|
||||||
|
if buf.len() < REQUEST_HEADER_SIZE {
|
||||||
|
return Ok(None);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Peek at payload_len without consuming.
|
||||||
|
let payload_len =
|
||||||
|
u32::from_be_bytes([buf[6], buf[7], buf[8], buf[9]]) as usize;
|
||||||
|
|
||||||
|
if payload_len > MAX_PAYLOAD_SIZE {
|
||||||
|
return Err(RpcError::PayloadTooLarge {
|
||||||
|
size: payload_len,
|
||||||
|
max: MAX_PAYLOAD_SIZE,
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
let total = REQUEST_HEADER_SIZE + payload_len;
|
||||||
|
if buf.len() < total {
|
||||||
|
return Ok(None);
|
||||||
|
}
|
||||||
|
|
||||||
|
let method_id = buf.get_u16();
|
||||||
|
let request_id = buf.get_u32();
|
||||||
|
let _len = buf.get_u32();
|
||||||
|
let payload = buf.split_to(payload_len).freeze();
|
||||||
|
|
||||||
|
Ok(Some(Self {
|
||||||
|
method_id,
|
||||||
|
request_id,
|
||||||
|
payload,
|
||||||
|
}))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// ── Response ─────────────────────────────────────────────────────────────────
|
||||||
|
|
||||||
|
/// A decoded RPC response frame.
|
||||||
|
#[derive(Debug, Clone)]
|
||||||
|
pub struct ResponseFrame {
|
||||||
|
pub status: u8,
|
||||||
|
pub request_id: u32,
|
||||||
|
pub payload: Bytes,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl ResponseFrame {
|
||||||
|
/// Encode this response into a byte buffer.
|
||||||
|
pub fn encode(&self) -> Bytes {
|
||||||
|
let mut buf = BytesMut::with_capacity(RESPONSE_HEADER_SIZE + self.payload.len());
|
||||||
|
buf.put_u8(self.status);
|
||||||
|
buf.put_u32(self.request_id);
|
||||||
|
buf.put_u32(self.payload.len() as u32);
|
||||||
|
buf.put(self.payload.clone());
|
||||||
|
buf.freeze()
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Decode a response frame from a byte buffer.
|
||||||
|
pub fn decode(buf: &mut BytesMut) -> Result<Option<Self>, RpcError> {
|
||||||
|
if buf.len() < RESPONSE_HEADER_SIZE {
|
||||||
|
return Ok(None);
|
||||||
|
}
|
||||||
|
|
||||||
|
let payload_len =
|
||||||
|
u32::from_be_bytes([buf[5], buf[6], buf[7], buf[8]]) as usize;
|
||||||
|
|
||||||
|
if payload_len > MAX_PAYLOAD_SIZE {
|
||||||
|
return Err(RpcError::PayloadTooLarge {
|
||||||
|
size: payload_len,
|
||||||
|
max: MAX_PAYLOAD_SIZE,
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
let total = RESPONSE_HEADER_SIZE + payload_len;
|
||||||
|
if buf.len() < total {
|
||||||
|
return Ok(None);
|
||||||
|
}
|
||||||
|
|
||||||
|
let status = buf.get_u8();
|
||||||
|
let request_id = buf.get_u32();
|
||||||
|
let _len = buf.get_u32();
|
||||||
|
let payload = buf.split_to(payload_len).freeze();
|
||||||
|
|
||||||
|
Ok(Some(Self {
|
||||||
|
status,
|
||||||
|
request_id,
|
||||||
|
payload,
|
||||||
|
}))
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Convert the status byte to an `RpcStatus`.
|
||||||
|
pub fn rpc_status(&self) -> Option<RpcStatus> {
|
||||||
|
RpcStatus::from_u8(self.status)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// ── Push ─────────────────────────────────────────────────────────────────────
|
||||||
|
|
||||||
|
/// A decoded server-push event frame (sent on QUIC uni-streams).
|
||||||
|
#[derive(Debug, Clone)]
|
||||||
|
pub struct PushFrame {
|
||||||
|
pub event_type: u16,
|
||||||
|
pub payload: Bytes,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl PushFrame {
|
||||||
|
/// Encode this push frame into a byte buffer.
|
||||||
|
pub fn encode(&self) -> Bytes {
|
||||||
|
let mut buf = BytesMut::with_capacity(PUSH_HEADER_SIZE + self.payload.len());
|
||||||
|
buf.put_u16(self.event_type);
|
||||||
|
buf.put_u32(self.payload.len() as u32);
|
||||||
|
buf.put(self.payload.clone());
|
||||||
|
buf.freeze()
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Decode a push frame from a byte buffer.
|
||||||
|
pub fn decode(buf: &mut BytesMut) -> Result<Option<Self>, RpcError> {
|
||||||
|
if buf.len() < PUSH_HEADER_SIZE {
|
||||||
|
return Ok(None);
|
||||||
|
}
|
||||||
|
|
||||||
|
let payload_len =
|
||||||
|
u32::from_be_bytes([buf[2], buf[3], buf[4], buf[5]]) as usize;
|
||||||
|
|
||||||
|
if payload_len > MAX_PAYLOAD_SIZE {
|
||||||
|
return Err(RpcError::PayloadTooLarge {
|
||||||
|
size: payload_len,
|
||||||
|
max: MAX_PAYLOAD_SIZE,
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
let total = PUSH_HEADER_SIZE + payload_len;
|
||||||
|
if buf.len() < total {
|
||||||
|
return Ok(None);
|
||||||
|
}
|
||||||
|
|
||||||
|
let event_type = buf.get_u16();
|
||||||
|
let _len = buf.get_u32();
|
||||||
|
let payload = buf.split_to(payload_len).freeze();
|
||||||
|
|
||||||
|
Ok(Some(Self {
|
||||||
|
event_type,
|
||||||
|
payload,
|
||||||
|
}))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
mod tests {
|
||||||
|
use super::*;
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn request_roundtrip() {
|
||||||
|
let frame = RequestFrame {
|
||||||
|
method_id: 42,
|
||||||
|
request_id: 1001,
|
||||||
|
payload: Bytes::from_static(b"hello"),
|
||||||
|
};
|
||||||
|
let encoded = frame.encode();
|
||||||
|
let mut buf = BytesMut::from(encoded.as_ref());
|
||||||
|
let decoded = RequestFrame::decode(&mut buf).expect("decode").expect("complete");
|
||||||
|
assert_eq!(decoded.method_id, 42);
|
||||||
|
assert_eq!(decoded.request_id, 1001);
|
||||||
|
assert_eq!(decoded.payload, Bytes::from_static(b"hello"));
|
||||||
|
assert!(buf.is_empty());
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn response_roundtrip() {
|
||||||
|
let frame = ResponseFrame {
|
||||||
|
status: RpcStatus::Ok as u8,
|
||||||
|
request_id: 2002,
|
||||||
|
payload: Bytes::from_static(b"world"),
|
||||||
|
};
|
||||||
|
let encoded = frame.encode();
|
||||||
|
let mut buf = BytesMut::from(encoded.as_ref());
|
||||||
|
let decoded = ResponseFrame::decode(&mut buf).expect("decode").expect("complete");
|
||||||
|
assert_eq!(decoded.status, 0);
|
||||||
|
assert_eq!(decoded.request_id, 2002);
|
||||||
|
assert_eq!(decoded.payload, Bytes::from_static(b"world"));
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn push_roundtrip() {
|
||||||
|
let frame = PushFrame {
|
||||||
|
event_type: 7,
|
||||||
|
payload: Bytes::from_static(b"event-data"),
|
||||||
|
};
|
||||||
|
let encoded = frame.encode();
|
||||||
|
let mut buf = BytesMut::from(encoded.as_ref());
|
||||||
|
let decoded = PushFrame::decode(&mut buf).expect("decode").expect("complete");
|
||||||
|
assert_eq!(decoded.event_type, 7);
|
||||||
|
assert_eq!(decoded.payload, Bytes::from_static(b"event-data"));
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn incomplete_request_returns_none() {
|
||||||
|
let mut buf = BytesMut::from(&[0u8; 5][..]);
|
||||||
|
assert!(RequestFrame::decode(&mut buf).expect("no error").is_none());
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn payload_too_large_rejected() {
|
||||||
|
// Craft a request header with payload_len = MAX + 1.
|
||||||
|
let mut buf = BytesMut::new();
|
||||||
|
buf.put_u16(1);
|
||||||
|
buf.put_u32(1);
|
||||||
|
buf.put_u32((MAX_PAYLOAD_SIZE + 1) as u32);
|
||||||
|
let result = RequestFrame::decode(&mut buf);
|
||||||
|
assert!(matches!(result, Err(RpcError::PayloadTooLarge { .. })));
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn empty_payload_request() {
|
||||||
|
let frame = RequestFrame {
|
||||||
|
method_id: 0,
|
||||||
|
request_id: 0,
|
||||||
|
payload: Bytes::new(),
|
||||||
|
};
|
||||||
|
let encoded = frame.encode();
|
||||||
|
assert_eq!(encoded.len(), REQUEST_HEADER_SIZE);
|
||||||
|
let mut buf = BytesMut::from(encoded.as_ref());
|
||||||
|
let decoded = RequestFrame::decode(&mut buf).expect("decode").expect("complete");
|
||||||
|
assert!(decoded.payload.is_empty());
|
||||||
|
}
|
||||||
|
}
|
||||||
13
crates/quicproquo-rpc/src/lib.rs
Normal file
13
crates/quicproquo-rpc/src/lib.rs
Normal file
@@ -0,0 +1,13 @@
|
|||||||
|
//! QUIC RPC framework for quicproquo v2.
|
||||||
|
//!
|
||||||
|
//! Wire format per QUIC stream:
|
||||||
|
//! - Request: `[method_id: u16][request_id: u32][payload_len: u32][protobuf bytes]`
|
||||||
|
//! - Response: `[status: u8][request_id: u32][payload_len: u32][protobuf bytes]`
|
||||||
|
//! - Push: `[event_type: u16][payload_len: u32][protobuf bytes]` (uni-stream)
|
||||||
|
|
||||||
|
pub mod framing;
|
||||||
|
pub mod method;
|
||||||
|
pub mod server;
|
||||||
|
pub mod client;
|
||||||
|
pub mod middleware;
|
||||||
|
pub mod error;
|
||||||
102
crates/quicproquo-rpc/src/method.rs
Normal file
102
crates/quicproquo-rpc/src/method.rs
Normal file
@@ -0,0 +1,102 @@
|
|||||||
|
//! Method registry — maps method IDs to handler functions.
|
||||||
|
|
||||||
|
use std::collections::HashMap;
|
||||||
|
use std::future::Future;
|
||||||
|
use std::pin::Pin;
|
||||||
|
use std::sync::Arc;
|
||||||
|
|
||||||
|
use bytes::Bytes;
|
||||||
|
|
||||||
|
use crate::error::RpcStatus;
|
||||||
|
|
||||||
|
/// The result of handling an RPC request.
|
||||||
|
pub struct HandlerResult {
|
||||||
|
pub status: RpcStatus,
|
||||||
|
pub payload: Bytes,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl HandlerResult {
|
||||||
|
/// Shorthand for a successful response.
|
||||||
|
pub fn ok(payload: Bytes) -> Self {
|
||||||
|
Self {
|
||||||
|
status: RpcStatus::Ok,
|
||||||
|
payload,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Shorthand for an error response.
|
||||||
|
pub fn err(status: RpcStatus, message: &str) -> Self {
|
||||||
|
Self {
|
||||||
|
status,
|
||||||
|
payload: Bytes::copy_from_slice(message.as_bytes()),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Context passed to every RPC handler.
|
||||||
|
pub struct RequestContext {
|
||||||
|
/// The authenticated identity key of the caller, if any.
|
||||||
|
pub identity_key: Option<Vec<u8>>,
|
||||||
|
/// The session token, if provided.
|
||||||
|
pub session_token: Option<Vec<u8>>,
|
||||||
|
/// The raw request payload (protobuf-encoded).
|
||||||
|
pub payload: Bytes,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Type-erased async handler function.
|
||||||
|
pub type HandlerFn<S> = Arc<
|
||||||
|
dyn Fn(Arc<S>, RequestContext) -> Pin<Box<dyn Future<Output = HandlerResult> + Send>>
|
||||||
|
+ Send
|
||||||
|
+ Sync,
|
||||||
|
>;
|
||||||
|
|
||||||
|
/// Registry mapping method IDs to handler functions.
|
||||||
|
pub struct MethodRegistry<S> {
|
||||||
|
handlers: HashMap<u16, (HandlerFn<S>, &'static str)>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<S: Send + Sync + 'static> MethodRegistry<S> {
|
||||||
|
pub fn new() -> Self {
|
||||||
|
Self {
|
||||||
|
handlers: HashMap::new(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Register a handler for a method ID.
|
||||||
|
pub fn register<F, Fut>(&mut self, method_id: u16, name: &'static str, handler: F)
|
||||||
|
where
|
||||||
|
F: Fn(Arc<S>, RequestContext) -> Fut + Send + Sync + 'static,
|
||||||
|
Fut: Future<Output = HandlerResult> + Send + 'static,
|
||||||
|
{
|
||||||
|
let handler = Arc::new(move |state: Arc<S>, ctx: RequestContext| {
|
||||||
|
Box::pin(handler(state, ctx)) as Pin<Box<dyn Future<Output = HandlerResult> + Send>>
|
||||||
|
});
|
||||||
|
self.handlers.insert(method_id, (handler, name));
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Look up a handler by method ID.
|
||||||
|
pub fn get(&self, method_id: u16) -> Option<&(HandlerFn<S>, &'static str)> {
|
||||||
|
self.handlers.get(&method_id)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Return the number of registered methods.
|
||||||
|
pub fn len(&self) -> usize {
|
||||||
|
self.handlers.len()
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Whether the registry is empty.
|
||||||
|
pub fn is_empty(&self) -> bool {
|
||||||
|
self.handlers.is_empty()
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Iterate over all registered (method_id, name) pairs.
|
||||||
|
pub fn methods(&self) -> impl Iterator<Item = (u16, &'static str)> + '_ {
|
||||||
|
self.handlers.iter().map(|(&id, (_, name))| (id, *name))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<S: Send + Sync + 'static> Default for MethodRegistry<S> {
|
||||||
|
fn default() -> Self {
|
||||||
|
Self::new()
|
||||||
|
}
|
||||||
|
}
|
||||||
96
crates/quicproquo-rpc/src/middleware.rs
Normal file
96
crates/quicproquo-rpc/src/middleware.rs
Normal file
@@ -0,0 +1,96 @@
|
|||||||
|
//! Tower-based middleware layers for the RPC server.
|
||||||
|
//!
|
||||||
|
//! - `AuthLayer`: validates session tokens and attaches identity to context.
|
||||||
|
//! - `RateLimitLayer`: per-IP request rate limiting.
|
||||||
|
|
||||||
|
use std::time::{Duration, Instant};
|
||||||
|
|
||||||
|
use dashmap::DashMap;
|
||||||
|
|
||||||
|
// ── Auth middleware ──────────────────────────────────────────────────────────
|
||||||
|
|
||||||
|
/// Validates bearer tokens and resolves identity keys.
|
||||||
|
pub trait SessionValidator: Send + Sync + 'static {
|
||||||
|
/// Validate a session token, returning the identity key if valid.
|
||||||
|
fn validate(&self, token: &[u8]) -> Option<Vec<u8>>;
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Auth context extracted from a validated session.
|
||||||
|
#[derive(Debug, Clone)]
|
||||||
|
pub struct AuthContext {
|
||||||
|
/// The Ed25519 identity key of the authenticated caller.
|
||||||
|
pub identity_key: Vec<u8>,
|
||||||
|
}
|
||||||
|
|
||||||
|
// ── Rate limiter ─────────────────────────────────────────────────────────────
|
||||||
|
|
||||||
|
/// Simple per-key sliding-window rate limiter.
|
||||||
|
pub struct RateLimiter {
|
||||||
|
/// Max requests per window.
|
||||||
|
max_requests: u32,
|
||||||
|
/// Window duration.
|
||||||
|
window: Duration,
|
||||||
|
/// Map from key → (count, window_start).
|
||||||
|
state: DashMap<Vec<u8>, (u32, Instant)>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl RateLimiter {
|
||||||
|
/// Create a new rate limiter.
|
||||||
|
pub fn new(max_requests: u32, window: Duration) -> Self {
|
||||||
|
Self {
|
||||||
|
max_requests,
|
||||||
|
window,
|
||||||
|
state: DashMap::new(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Check if a request from `key` is allowed. Returns `true` if allowed.
|
||||||
|
pub fn check(&self, key: &[u8]) -> bool {
|
||||||
|
let now = Instant::now();
|
||||||
|
let mut entry = self.state.entry(key.to_vec()).or_insert((0, now));
|
||||||
|
let (count, window_start) = entry.value_mut();
|
||||||
|
|
||||||
|
if now.duration_since(*window_start) >= self.window {
|
||||||
|
// Reset window.
|
||||||
|
*count = 1;
|
||||||
|
*window_start = now;
|
||||||
|
true
|
||||||
|
} else if *count < self.max_requests {
|
||||||
|
*count += 1;
|
||||||
|
true
|
||||||
|
} else {
|
||||||
|
false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Remove expired entries (call periodically for memory hygiene).
|
||||||
|
pub fn gc(&self) {
|
||||||
|
let now = Instant::now();
|
||||||
|
self.state.retain(|_, (_, start)| now.duration_since(*start) < self.window * 2);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
mod tests {
|
||||||
|
use super::*;
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn rate_limiter_allows_within_limit() {
|
||||||
|
let rl = RateLimiter::new(3, Duration::from_secs(60));
|
||||||
|
let key = b"test-key";
|
||||||
|
assert!(rl.check(key));
|
||||||
|
assert!(rl.check(key));
|
||||||
|
assert!(rl.check(key));
|
||||||
|
assert!(!rl.check(key)); // 4th request denied
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn rate_limiter_resets_after_window() {
|
||||||
|
let rl = RateLimiter::new(1, Duration::from_millis(1));
|
||||||
|
let key = b"test-key";
|
||||||
|
assert!(rl.check(key));
|
||||||
|
assert!(!rl.check(key));
|
||||||
|
std::thread::sleep(Duration::from_millis(5));
|
||||||
|
assert!(rl.check(key)); // window expired
|
||||||
|
}
|
||||||
|
}
|
||||||
198
crates/quicproquo-rpc/src/server.rs
Normal file
198
crates/quicproquo-rpc/src/server.rs
Normal file
@@ -0,0 +1,198 @@
|
|||||||
|
//! QUIC RPC server — accepts connections, dispatches requests to handlers.
|
||||||
|
|
||||||
|
use std::sync::Arc;
|
||||||
|
|
||||||
|
use bytes::BytesMut;
|
||||||
|
use quinn::{Endpoint, Incoming, RecvStream, SendStream};
|
||||||
|
use tracing::{debug, info, warn};
|
||||||
|
|
||||||
|
use crate::error::{RpcError, RpcStatus};
|
||||||
|
use crate::framing::{RequestFrame, ResponseFrame, PushFrame};
|
||||||
|
use crate::method::{HandlerResult, MethodRegistry, RequestContext};
|
||||||
|
|
||||||
|
/// Configuration for the RPC server.
|
||||||
|
pub struct RpcServerConfig {
|
||||||
|
/// QUIC listen address.
|
||||||
|
pub listen_addr: std::net::SocketAddr,
|
||||||
|
/// TLS server config (rustls).
|
||||||
|
pub tls_config: Arc<rustls::ServerConfig>,
|
||||||
|
/// ALPN protocol for the RPC service.
|
||||||
|
pub alpn: Vec<u8>,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// The QUIC RPC server.
|
||||||
|
pub struct RpcServer<S: Send + Sync + 'static> {
|
||||||
|
endpoint: Endpoint,
|
||||||
|
state: Arc<S>,
|
||||||
|
registry: Arc<MethodRegistry<S>>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<S: Send + Sync + 'static> RpcServer<S> {
|
||||||
|
/// Create and bind the QUIC endpoint. Does not start accepting yet.
|
||||||
|
pub fn bind(
|
||||||
|
config: RpcServerConfig,
|
||||||
|
state: Arc<S>,
|
||||||
|
registry: MethodRegistry<S>,
|
||||||
|
) -> Result<Self, RpcError> {
|
||||||
|
let mut tls = (*config.tls_config).clone();
|
||||||
|
tls.alpn_protocols = vec![config.alpn];
|
||||||
|
let quic_tls = quinn::crypto::rustls::QuicServerConfig::try_from(tls)
|
||||||
|
.map_err(|e| RpcError::Connection(format!("TLS config: {e}")))?;
|
||||||
|
let server_config = quinn::ServerConfig::with_crypto(Arc::new(quic_tls));
|
||||||
|
|
||||||
|
let endpoint = Endpoint::server(server_config, config.listen_addr)
|
||||||
|
.map_err(|e| RpcError::Connection(format!("bind {}: {e}", config.listen_addr)))?;
|
||||||
|
|
||||||
|
info!(addr = %config.listen_addr, "RPC server bound");
|
||||||
|
|
||||||
|
Ok(Self {
|
||||||
|
endpoint,
|
||||||
|
state,
|
||||||
|
registry: Arc::new(registry),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Accept connections in a loop. Spawns a task per connection.
|
||||||
|
pub async fn serve(self) -> Result<(), RpcError> {
|
||||||
|
info!("RPC server accepting connections");
|
||||||
|
while let Some(incoming) = self.endpoint.accept().await {
|
||||||
|
let state = Arc::clone(&self.state);
|
||||||
|
let registry = Arc::clone(&self.registry);
|
||||||
|
tokio::spawn(async move {
|
||||||
|
if let Err(e) = handle_connection(incoming, state, registry).await {
|
||||||
|
warn!("connection error: {e}");
|
||||||
|
}
|
||||||
|
});
|
||||||
|
}
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Get the local address the server is listening on.
|
||||||
|
pub fn local_addr(&self) -> Result<std::net::SocketAddr, RpcError> {
|
||||||
|
self.endpoint
|
||||||
|
.local_addr()
|
||||||
|
.map_err(|e| RpcError::Connection(e.to_string()))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Handle a single QUIC connection: accept bi-directional streams for RPCs.
|
||||||
|
async fn handle_connection<S: Send + Sync + 'static>(
|
||||||
|
incoming: Incoming,
|
||||||
|
state: Arc<S>,
|
||||||
|
registry: Arc<MethodRegistry<S>>,
|
||||||
|
) -> Result<(), RpcError> {
|
||||||
|
let connection = incoming
|
||||||
|
.await
|
||||||
|
.map_err(|e| RpcError::Connection(e.to_string()))?;
|
||||||
|
|
||||||
|
let remote = connection.remote_address();
|
||||||
|
debug!(remote = %remote, "new connection");
|
||||||
|
|
||||||
|
loop {
|
||||||
|
let stream = connection.accept_bi().await;
|
||||||
|
match stream {
|
||||||
|
Ok((send, recv)) => {
|
||||||
|
let state = Arc::clone(&state);
|
||||||
|
let registry = Arc::clone(®istry);
|
||||||
|
tokio::spawn(async move {
|
||||||
|
if let Err(e) = handle_stream(send, recv, state, registry).await {
|
||||||
|
debug!("stream error: {e}");
|
||||||
|
}
|
||||||
|
});
|
||||||
|
}
|
||||||
|
Err(quinn::ConnectionError::ApplicationClosed(_)) => {
|
||||||
|
debug!(remote = %remote, "connection closed by peer");
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
Err(e) => {
|
||||||
|
debug!(remote = %remote, "accept_bi error: {e}");
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Handle a single bi-directional stream: read request, dispatch, write response.
|
||||||
|
async fn handle_stream<S: Send + Sync + 'static>(
|
||||||
|
mut send: SendStream,
|
||||||
|
mut recv: RecvStream,
|
||||||
|
state: Arc<S>,
|
||||||
|
registry: Arc<MethodRegistry<S>>,
|
||||||
|
) -> Result<(), RpcError> {
|
||||||
|
// Read the complete request from the stream.
|
||||||
|
let mut buf = BytesMut::new();
|
||||||
|
while let Some(chunk) = recv
|
||||||
|
.read_chunk(65536, true)
|
||||||
|
.await
|
||||||
|
.map_err(|e| RpcError::Connection(e.to_string()))?
|
||||||
|
{
|
||||||
|
buf.extend_from_slice(&chunk.bytes);
|
||||||
|
if buf.len() > crate::framing::MAX_PAYLOAD_SIZE + crate::framing::REQUEST_HEADER_SIZE {
|
||||||
|
return Err(RpcError::PayloadTooLarge {
|
||||||
|
size: buf.len(),
|
||||||
|
max: crate::framing::MAX_PAYLOAD_SIZE,
|
||||||
|
});
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
let frame = match RequestFrame::decode(&mut buf)? {
|
||||||
|
Some(f) => f,
|
||||||
|
None => return Err(RpcError::Decode("incomplete request frame".into())),
|
||||||
|
};
|
||||||
|
|
||||||
|
let result = match registry.get(frame.method_id) {
|
||||||
|
Some((handler, name)) => {
|
||||||
|
debug!(method_id = frame.method_id, method = name, req_id = frame.request_id, "dispatching");
|
||||||
|
let ctx = RequestContext {
|
||||||
|
identity_key: None, // populated by auth middleware
|
||||||
|
session_token: None,
|
||||||
|
payload: frame.payload,
|
||||||
|
};
|
||||||
|
handler(Arc::clone(&state), ctx).await
|
||||||
|
}
|
||||||
|
None => {
|
||||||
|
warn!(method_id = frame.method_id, "unknown method");
|
||||||
|
HandlerResult::err(RpcStatus::UnknownMethod, "unknown method")
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
let response = ResponseFrame {
|
||||||
|
status: result.status as u8,
|
||||||
|
request_id: frame.request_id,
|
||||||
|
payload: result.payload,
|
||||||
|
};
|
||||||
|
|
||||||
|
let encoded = response.encode();
|
||||||
|
send.write_all(&encoded)
|
||||||
|
.await
|
||||||
|
.map_err(|e| RpcError::Connection(e.to_string()))?;
|
||||||
|
send.finish().map_err(|e| RpcError::Connection(e.to_string()))?;
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Send a push event to a client via a QUIC uni-stream.
|
||||||
|
pub async fn send_push(
|
||||||
|
connection: &quinn::Connection,
|
||||||
|
event_type: u16,
|
||||||
|
payload: bytes::Bytes,
|
||||||
|
) -> Result<(), RpcError> {
|
||||||
|
let mut send = connection
|
||||||
|
.open_uni()
|
||||||
|
.await
|
||||||
|
.map_err(|e| RpcError::Connection(e.to_string()))?;
|
||||||
|
|
||||||
|
let frame = PushFrame {
|
||||||
|
event_type,
|
||||||
|
payload,
|
||||||
|
};
|
||||||
|
let encoded = frame.encode();
|
||||||
|
send.write_all(&encoded)
|
||||||
|
.await
|
||||||
|
.map_err(|e| RpcError::Connection(e.to_string()))?;
|
||||||
|
send.finish().map_err(|e| RpcError::Connection(e.to_string()))?;
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
32
crates/quicproquo-sdk/Cargo.toml
Normal file
32
crates/quicproquo-sdk/Cargo.toml
Normal file
@@ -0,0 +1,32 @@
|
|||||||
|
[package]
|
||||||
|
name = "quicproquo-sdk"
|
||||||
|
version = "0.1.0"
|
||||||
|
edition = "2021"
|
||||||
|
description = "Client SDK for quicproquo v2 — connect, auth, send, receive, subscribe"
|
||||||
|
|
||||||
|
[dependencies]
|
||||||
|
quicproquo-core = { path = "../quicproquo-core" }
|
||||||
|
quicproquo-proto = { path = "../quicproquo-proto" }
|
||||||
|
quicproquo-rpc = { path = "../quicproquo-rpc" }
|
||||||
|
tokio = { workspace = true }
|
||||||
|
futures = { workspace = true }
|
||||||
|
tracing = { workspace = true }
|
||||||
|
thiserror = { workspace = true }
|
||||||
|
anyhow = { workspace = true }
|
||||||
|
serde = { workspace = true }
|
||||||
|
serde_json = { workspace = true }
|
||||||
|
bincode = { workspace = true }
|
||||||
|
hex = { workspace = true }
|
||||||
|
zeroize = { workspace = true }
|
||||||
|
rusqlite = { workspace = true }
|
||||||
|
argon2 = { workspace = true }
|
||||||
|
rand = { workspace = true }
|
||||||
|
sha2 = { workspace = true }
|
||||||
|
rustls = { workspace = true }
|
||||||
|
quinn = { workspace = true }
|
||||||
|
|
||||||
|
[dev-dependencies]
|
||||||
|
tokio = { workspace = true, features = ["test-util"] }
|
||||||
|
|
||||||
|
[lints]
|
||||||
|
workspace = true
|
||||||
193
crates/quicproquo-sdk/src/client.rs
Normal file
193
crates/quicproquo-sdk/src/client.rs
Normal file
@@ -0,0 +1,193 @@
|
|||||||
|
//! `QpqClient` — the main entry point for the quicproquo SDK.
|
||||||
|
|
||||||
|
use std::sync::Arc;
|
||||||
|
|
||||||
|
use tokio::sync::broadcast;
|
||||||
|
use tracing::info;
|
||||||
|
|
||||||
|
use crate::config::ClientConfig;
|
||||||
|
use crate::conversation::ConversationStore;
|
||||||
|
use crate::error::SdkError;
|
||||||
|
use crate::events::ClientEvent;
|
||||||
|
|
||||||
|
/// The main SDK client. All state is contained within this struct — no globals.
|
||||||
|
pub struct QpqClient {
|
||||||
|
config: ClientConfig,
|
||||||
|
rpc: Option<quicproquo_rpc::client::RpcClient>,
|
||||||
|
event_tx: broadcast::Sender<ClientEvent>,
|
||||||
|
/// The authenticated username, if logged in.
|
||||||
|
username: Option<String>,
|
||||||
|
/// The local identity key (Ed25519 public key, 32 bytes).
|
||||||
|
identity_key: Option<Vec<u8>>,
|
||||||
|
/// Session token from OPAQUE login.
|
||||||
|
session_token: Option<Vec<u8>>,
|
||||||
|
/// Local conversation store (SQLCipher).
|
||||||
|
conv_store: Option<ConversationStore>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl QpqClient {
|
||||||
|
/// Create a new client with the given configuration.
|
||||||
|
pub fn new(config: ClientConfig) -> Self {
|
||||||
|
let (event_tx, _) = broadcast::channel(256);
|
||||||
|
Self {
|
||||||
|
config,
|
||||||
|
rpc: None,
|
||||||
|
event_tx,
|
||||||
|
username: None,
|
||||||
|
identity_key: None,
|
||||||
|
session_token: None,
|
||||||
|
conv_store: None,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Connect to the server.
|
||||||
|
pub async fn connect(&mut self) -> Result<(), SdkError> {
|
||||||
|
let tls_config = build_tls_config(self.config.accept_invalid_certs)?;
|
||||||
|
|
||||||
|
let rpc_config = quicproquo_rpc::client::RpcClientConfig {
|
||||||
|
server_addr: self.config.server_addr,
|
||||||
|
server_name: self.config.server_name.clone(),
|
||||||
|
tls_config: Arc::new(tls_config),
|
||||||
|
alpn: self.config.alpn.clone(),
|
||||||
|
};
|
||||||
|
|
||||||
|
let client = quicproquo_rpc::client::RpcClient::connect(rpc_config).await?;
|
||||||
|
self.rpc = Some(client);
|
||||||
|
|
||||||
|
// Open local conversation store.
|
||||||
|
let store = ConversationStore::open(
|
||||||
|
&self.config.db_path,
|
||||||
|
self.config.db_password.as_deref(),
|
||||||
|
)?;
|
||||||
|
self.conv_store = Some(store);
|
||||||
|
|
||||||
|
self.emit(ClientEvent::Connected);
|
||||||
|
info!(server = %self.config.server_addr, "connected");
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Subscribe to client events. Returns a broadcast receiver.
|
||||||
|
pub fn subscribe(&self) -> broadcast::Receiver<ClientEvent> {
|
||||||
|
self.event_tx.subscribe()
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Get the authenticated username, if logged in.
|
||||||
|
pub fn username(&self) -> Option<&str> {
|
||||||
|
self.username.as_deref()
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Get the local identity key.
|
||||||
|
pub fn identity_key(&self) -> Option<&[u8]> {
|
||||||
|
self.identity_key.as_deref()
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Whether the client is connected.
|
||||||
|
pub fn is_connected(&self) -> bool {
|
||||||
|
self.rpc.is_some()
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Whether the client is authenticated.
|
||||||
|
pub fn is_authenticated(&self) -> bool {
|
||||||
|
self.session_token.is_some()
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Get a reference to the RPC client (for direct calls).
|
||||||
|
pub fn rpc(&self) -> Result<&quicproquo_rpc::client::RpcClient, SdkError> {
|
||||||
|
self.rpc.as_ref().ok_or(SdkError::NotConnected)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Get a reference to the conversation store.
|
||||||
|
pub fn conversations(&self) -> Result<&ConversationStore, SdkError> {
|
||||||
|
self.conv_store
|
||||||
|
.as_ref()
|
||||||
|
.ok_or(SdkError::NotConnected)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Disconnect from the server.
|
||||||
|
pub fn disconnect(&mut self) {
|
||||||
|
if let Some(rpc) = self.rpc.take() {
|
||||||
|
rpc.close();
|
||||||
|
self.emit(ClientEvent::Disconnected {
|
||||||
|
reason: "client closed".into(),
|
||||||
|
});
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn emit(&self, event: ClientEvent) {
|
||||||
|
// Ignore send errors (no subscribers).
|
||||||
|
let _ = self.event_tx.send(event);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Drop for QpqClient {
|
||||||
|
fn drop(&mut self) {
|
||||||
|
self.disconnect();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn build_tls_config(accept_invalid_certs: bool) -> Result<rustls::ClientConfig, SdkError> {
|
||||||
|
let builder = rustls::ClientConfig::builder();
|
||||||
|
|
||||||
|
if accept_invalid_certs {
|
||||||
|
let config = builder
|
||||||
|
.dangerous()
|
||||||
|
.with_custom_certificate_verifier(Arc::new(InsecureVerifier))
|
||||||
|
.with_no_client_auth();
|
||||||
|
Ok(config)
|
||||||
|
} else {
|
||||||
|
let roots = rustls::RootCertStore::empty();
|
||||||
|
let config = builder
|
||||||
|
.with_root_certificates(roots)
|
||||||
|
.with_no_client_auth();
|
||||||
|
Ok(config)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// A TLS verifier that accepts any certificate (for dev mode only).
|
||||||
|
#[derive(Debug)]
|
||||||
|
struct InsecureVerifier;
|
||||||
|
|
||||||
|
impl rustls::client::danger::ServerCertVerifier for InsecureVerifier {
|
||||||
|
fn verify_server_cert(
|
||||||
|
&self,
|
||||||
|
_end_entity: &rustls::pki_types::CertificateDer<'_>,
|
||||||
|
_intermediates: &[rustls::pki_types::CertificateDer<'_>],
|
||||||
|
_server_name: &rustls::pki_types::ServerName<'_>,
|
||||||
|
_ocsp_response: &[u8],
|
||||||
|
_now: rustls::pki_types::UnixTime,
|
||||||
|
) -> Result<rustls::client::danger::ServerCertVerified, rustls::Error> {
|
||||||
|
Ok(rustls::client::danger::ServerCertVerified::assertion())
|
||||||
|
}
|
||||||
|
|
||||||
|
fn verify_tls12_signature(
|
||||||
|
&self,
|
||||||
|
_message: &[u8],
|
||||||
|
_cert: &rustls::pki_types::CertificateDer<'_>,
|
||||||
|
_dss: &rustls::DigitallySignedStruct,
|
||||||
|
) -> Result<rustls::client::danger::HandshakeSignatureValid, rustls::Error> {
|
||||||
|
Ok(rustls::client::danger::HandshakeSignatureValid::assertion())
|
||||||
|
}
|
||||||
|
|
||||||
|
fn verify_tls13_signature(
|
||||||
|
&self,
|
||||||
|
_message: &[u8],
|
||||||
|
_cert: &rustls::pki_types::CertificateDer<'_>,
|
||||||
|
_dss: &rustls::DigitallySignedStruct,
|
||||||
|
) -> Result<rustls::client::danger::HandshakeSignatureValid, rustls::Error> {
|
||||||
|
Ok(rustls::client::danger::HandshakeSignatureValid::assertion())
|
||||||
|
}
|
||||||
|
|
||||||
|
fn supported_verify_schemes(&self) -> Vec<rustls::SignatureScheme> {
|
||||||
|
vec![
|
||||||
|
rustls::SignatureScheme::RSA_PKCS1_SHA256,
|
||||||
|
rustls::SignatureScheme::RSA_PKCS1_SHA384,
|
||||||
|
rustls::SignatureScheme::RSA_PKCS1_SHA512,
|
||||||
|
rustls::SignatureScheme::ECDSA_NISTP256_SHA256,
|
||||||
|
rustls::SignatureScheme::ECDSA_NISTP384_SHA384,
|
||||||
|
rustls::SignatureScheme::ED25519,
|
||||||
|
rustls::SignatureScheme::RSA_PSS_SHA256,
|
||||||
|
rustls::SignatureScheme::RSA_PSS_SHA384,
|
||||||
|
rustls::SignatureScheme::RSA_PSS_SHA512,
|
||||||
|
]
|
||||||
|
}
|
||||||
|
}
|
||||||
44
crates/quicproquo-sdk/src/config.rs
Normal file
44
crates/quicproquo-sdk/src/config.rs
Normal file
@@ -0,0 +1,44 @@
|
|||||||
|
//! Client configuration.
|
||||||
|
|
||||||
|
use std::net::SocketAddr;
|
||||||
|
use std::path::PathBuf;
|
||||||
|
|
||||||
|
/// Configuration for a `QpqClient` instance.
|
||||||
|
#[derive(Debug, Clone)]
|
||||||
|
pub struct ClientConfig {
|
||||||
|
/// Server address (host:port).
|
||||||
|
pub server_addr: SocketAddr,
|
||||||
|
|
||||||
|
/// Server hostname for TLS SNI.
|
||||||
|
pub server_name: String,
|
||||||
|
|
||||||
|
/// Path to the local conversation database.
|
||||||
|
pub db_path: PathBuf,
|
||||||
|
|
||||||
|
/// Password for encrypting the local database (SQLCipher).
|
||||||
|
/// If `None`, the database is stored unencrypted.
|
||||||
|
pub db_password: Option<String>,
|
||||||
|
|
||||||
|
/// Path to the local state file (identity key, MLS state).
|
||||||
|
pub state_path: PathBuf,
|
||||||
|
|
||||||
|
/// Whether to accept self-signed TLS certificates (dev mode only).
|
||||||
|
pub accept_invalid_certs: bool,
|
||||||
|
|
||||||
|
/// ALPN protocol identifier for the RPC service.
|
||||||
|
pub alpn: Vec<u8>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Default for ClientConfig {
|
||||||
|
fn default() -> Self {
|
||||||
|
Self {
|
||||||
|
server_addr: "127.0.0.1:7000".parse().expect("valid addr"),
|
||||||
|
server_name: "localhost".to_string(),
|
||||||
|
db_path: PathBuf::from("conversations.db"),
|
||||||
|
db_password: None,
|
||||||
|
state_path: PathBuf::from("client-state.bin"),
|
||||||
|
accept_invalid_certs: false,
|
||||||
|
alpn: b"qpq/2".to_vec(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
481
crates/quicproquo-sdk/src/conversation.rs
Normal file
481
crates/quicproquo-sdk/src/conversation.rs
Normal file
@@ -0,0 +1,481 @@
|
|||||||
|
//! Conversation management — create DMs, groups, send and receive messages.
|
||||||
|
//!
|
||||||
|
//! This is the SDK-side conversation store (migrated from v1 client).
|
||||||
|
|
||||||
|
use std::path::Path;
|
||||||
|
|
||||||
|
use anyhow::Context;
|
||||||
|
use rusqlite::{params, Connection, OptionalExtension};
|
||||||
|
use zeroize::Zeroizing;
|
||||||
|
|
||||||
|
// ── Types ────────────────────────────────────────────────────────────────────
|
||||||
|
|
||||||
|
/// 16-byte conversation identifier.
|
||||||
|
#[derive(Clone, Debug, PartialEq, Eq, Hash)]
|
||||||
|
pub struct ConversationId(pub [u8; 16]);
|
||||||
|
|
||||||
|
impl ConversationId {
|
||||||
|
pub fn from_slice(s: &[u8]) -> Option<Self> {
|
||||||
|
if s.len() == 16 {
|
||||||
|
let mut buf = [0u8; 16];
|
||||||
|
buf.copy_from_slice(s);
|
||||||
|
Some(Self(buf))
|
||||||
|
} else {
|
||||||
|
None
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Derive a conversation ID from a group name via SHA-256 truncation.
|
||||||
|
pub fn from_group_name(name: &str) -> Self {
|
||||||
|
use sha2::{Digest, Sha256};
|
||||||
|
let hash = Sha256::digest(name.as_bytes());
|
||||||
|
let mut buf = [0u8; 16];
|
||||||
|
buf.copy_from_slice(&hash[..16]);
|
||||||
|
Self(buf)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn hex(&self) -> String {
|
||||||
|
hex::encode(self.0)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// The kind of conversation.
|
||||||
|
#[derive(Clone, Debug, PartialEq, Eq)]
|
||||||
|
pub enum ConversationKind {
|
||||||
|
/// 1:1 DM channel with a specific peer.
|
||||||
|
Dm {
|
||||||
|
peer_key: Vec<u8>,
|
||||||
|
peer_username: Option<String>,
|
||||||
|
},
|
||||||
|
/// Named group with N members.
|
||||||
|
Group { name: String },
|
||||||
|
}
|
||||||
|
|
||||||
|
/// A conversation with its metadata and MLS state.
|
||||||
|
#[derive(Clone, Debug)]
|
||||||
|
pub struct Conversation {
|
||||||
|
pub id: ConversationId,
|
||||||
|
pub kind: ConversationKind,
|
||||||
|
pub display_name: String,
|
||||||
|
/// Serialized MLS group (bincode).
|
||||||
|
pub mls_group_blob: Option<Vec<u8>>,
|
||||||
|
/// Serialized keystore (bincode HashMap).
|
||||||
|
pub keystore_blob: Option<Vec<u8>>,
|
||||||
|
/// Member identity keys.
|
||||||
|
pub member_keys: Vec<Vec<u8>>,
|
||||||
|
pub unread_count: u32,
|
||||||
|
pub last_activity_ms: u64,
|
||||||
|
pub created_at_ms: u64,
|
||||||
|
/// Whether this conversation uses hybrid (X25519 + ML-KEM-768) MLS keys.
|
||||||
|
pub is_hybrid: bool,
|
||||||
|
/// Highest server-side delivery sequence number seen.
|
||||||
|
pub last_seen_seq: u64,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// A stored message.
|
||||||
|
#[derive(Clone, Debug)]
|
||||||
|
pub struct StoredMessage {
|
||||||
|
pub conversation_id: ConversationId,
|
||||||
|
pub message_id: Option<[u8; 16]>,
|
||||||
|
pub sender_key: Vec<u8>,
|
||||||
|
pub sender_name: Option<String>,
|
||||||
|
pub body: String,
|
||||||
|
pub msg_type: String,
|
||||||
|
pub ref_msg_id: Option<[u8; 16]>,
|
||||||
|
pub timestamp_ms: u64,
|
||||||
|
pub is_outgoing: bool,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// An entry in the offline outbox queue.
|
||||||
|
#[derive(Clone, Debug)]
|
||||||
|
pub struct OutboxEntry {
|
||||||
|
pub id: i64,
|
||||||
|
pub conversation_id: ConversationId,
|
||||||
|
pub recipient_key: Vec<u8>,
|
||||||
|
pub payload: Vec<u8>,
|
||||||
|
pub retry_count: u32,
|
||||||
|
}
|
||||||
|
|
||||||
|
// ── ConversationStore ────────────────────────────────────────────────────────
|
||||||
|
|
||||||
|
/// SQLCipher-backed conversation and message store.
|
||||||
|
pub struct ConversationStore {
|
||||||
|
conn: Connection,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl ConversationStore {
|
||||||
|
/// Open or create the conversation database.
|
||||||
|
pub fn open(db_path: &Path, password: Option<&str>) -> anyhow::Result<Self> {
|
||||||
|
if let Some(parent) = db_path.parent() {
|
||||||
|
std::fs::create_dir_all(parent).ok();
|
||||||
|
}
|
||||||
|
|
||||||
|
let conn = Connection::open(db_path).context("open conversation db")?;
|
||||||
|
|
||||||
|
if let Some(pw) = password {
|
||||||
|
let key = derive_db_key(pw, db_path)?;
|
||||||
|
let hex_key = Zeroizing::new(hex::encode(&*key));
|
||||||
|
conn.pragma_update(None, "key", format!("x'{}'", &*hex_key))
|
||||||
|
.context("set SQLCipher key")?;
|
||||||
|
}
|
||||||
|
|
||||||
|
conn.execute_batch("PRAGMA journal_mode=WAL; PRAGMA foreign_keys=ON;")
|
||||||
|
.context("set pragmas")?;
|
||||||
|
Self::migrate(&conn)?;
|
||||||
|
Ok(Self { conn })
|
||||||
|
}
|
||||||
|
|
||||||
|
fn migrate(conn: &Connection) -> anyhow::Result<()> {
|
||||||
|
conn.execute_batch(
|
||||||
|
"CREATE TABLE IF NOT EXISTS conversations (
|
||||||
|
id BLOB PRIMARY KEY,
|
||||||
|
kind TEXT NOT NULL,
|
||||||
|
display_name TEXT NOT NULL,
|
||||||
|
peer_key BLOB,
|
||||||
|
peer_username TEXT,
|
||||||
|
group_name TEXT,
|
||||||
|
mls_group_blob BLOB,
|
||||||
|
keystore_blob BLOB,
|
||||||
|
member_keys BLOB,
|
||||||
|
unread_count INTEGER NOT NULL DEFAULT 0,
|
||||||
|
last_activity_ms INTEGER NOT NULL DEFAULT 0,
|
||||||
|
created_at_ms INTEGER NOT NULL DEFAULT 0,
|
||||||
|
is_hybrid INTEGER NOT NULL DEFAULT 0,
|
||||||
|
last_seen_seq INTEGER NOT NULL DEFAULT 0
|
||||||
|
);
|
||||||
|
|
||||||
|
CREATE TABLE IF NOT EXISTS messages (
|
||||||
|
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||||
|
conversation_id BLOB NOT NULL REFERENCES conversations(id),
|
||||||
|
message_id BLOB,
|
||||||
|
sender_key BLOB NOT NULL,
|
||||||
|
sender_name TEXT,
|
||||||
|
body TEXT NOT NULL,
|
||||||
|
msg_type TEXT NOT NULL,
|
||||||
|
ref_msg_id BLOB,
|
||||||
|
timestamp_ms INTEGER NOT NULL,
|
||||||
|
is_outgoing INTEGER NOT NULL DEFAULT 0
|
||||||
|
);
|
||||||
|
|
||||||
|
CREATE INDEX IF NOT EXISTS idx_messages_conv
|
||||||
|
ON messages(conversation_id, timestamp_ms);
|
||||||
|
|
||||||
|
CREATE TABLE IF NOT EXISTS outbox (
|
||||||
|
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||||
|
conversation_id BLOB NOT NULL,
|
||||||
|
recipient_key BLOB NOT NULL,
|
||||||
|
payload BLOB NOT NULL,
|
||||||
|
created_at_ms INTEGER NOT NULL,
|
||||||
|
retry_count INTEGER NOT NULL DEFAULT 0,
|
||||||
|
status TEXT NOT NULL DEFAULT 'pending'
|
||||||
|
);
|
||||||
|
CREATE INDEX IF NOT EXISTS idx_outbox_status
|
||||||
|
ON outbox(status, created_at_ms);",
|
||||||
|
)
|
||||||
|
.context("migrate conversation db")
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Save or upsert a conversation.
|
||||||
|
pub fn save_conversation(&self, conv: &Conversation) -> anyhow::Result<()> {
|
||||||
|
let (kind_str, peer_key, peer_username, group_name) = match &conv.kind {
|
||||||
|
ConversationKind::Dm { peer_key, peer_username } => {
|
||||||
|
("dm", Some(peer_key.as_slice()), peer_username.as_deref(), None)
|
||||||
|
}
|
||||||
|
ConversationKind::Group { name } => ("group", None, None, Some(name.as_str())),
|
||||||
|
};
|
||||||
|
let member_keys_blob =
|
||||||
|
bincode::serialize(&conv.member_keys).context("serialize member_keys")?;
|
||||||
|
|
||||||
|
self.conn.execute(
|
||||||
|
"INSERT INTO conversations
|
||||||
|
(id, kind, display_name, peer_key, peer_username, group_name,
|
||||||
|
mls_group_blob, keystore_blob, member_keys, unread_count,
|
||||||
|
last_activity_ms, created_at_ms, is_hybrid, last_seen_seq)
|
||||||
|
VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7, ?8, ?9, ?10, ?11, ?12, ?13, ?14)
|
||||||
|
ON CONFLICT(id) DO UPDATE SET
|
||||||
|
display_name = excluded.display_name,
|
||||||
|
mls_group_blob = excluded.mls_group_blob,
|
||||||
|
keystore_blob = excluded.keystore_blob,
|
||||||
|
member_keys = excluded.member_keys,
|
||||||
|
unread_count = excluded.unread_count,
|
||||||
|
last_activity_ms = excluded.last_activity_ms,
|
||||||
|
is_hybrid = excluded.is_hybrid,
|
||||||
|
last_seen_seq = excluded.last_seen_seq",
|
||||||
|
params![
|
||||||
|
conv.id.0.as_slice(),
|
||||||
|
kind_str,
|
||||||
|
conv.display_name,
|
||||||
|
peer_key,
|
||||||
|
peer_username,
|
||||||
|
group_name,
|
||||||
|
conv.mls_group_blob,
|
||||||
|
conv.keystore_blob,
|
||||||
|
member_keys_blob,
|
||||||
|
conv.unread_count,
|
||||||
|
conv.last_activity_ms,
|
||||||
|
conv.created_at_ms,
|
||||||
|
conv.is_hybrid as i32,
|
||||||
|
conv.last_seen_seq as i64,
|
||||||
|
],
|
||||||
|
)?;
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Load a conversation by ID.
|
||||||
|
pub fn load_conversation(&self, id: &ConversationId) -> anyhow::Result<Option<Conversation>> {
|
||||||
|
self.conn
|
||||||
|
.query_row(
|
||||||
|
"SELECT kind, display_name, peer_key, peer_username, group_name,
|
||||||
|
mls_group_blob, keystore_blob, member_keys, unread_count,
|
||||||
|
last_activity_ms, created_at_ms, is_hybrid, last_seen_seq
|
||||||
|
FROM conversations WHERE id = ?1",
|
||||||
|
params![id.0.as_slice()],
|
||||||
|
|row| row_to_conversation(id, row),
|
||||||
|
)
|
||||||
|
.optional()
|
||||||
|
.context("load conversation")
|
||||||
|
}
|
||||||
|
|
||||||
|
/// List all conversations, most recent first.
|
||||||
|
pub fn list_conversations(&self) -> anyhow::Result<Vec<Conversation>> {
|
||||||
|
let mut stmt = self.conn.prepare(
|
||||||
|
"SELECT id, kind, display_name, peer_key, peer_username, group_name,
|
||||||
|
mls_group_blob, keystore_blob, member_keys, unread_count,
|
||||||
|
last_activity_ms, created_at_ms, is_hybrid, last_seen_seq
|
||||||
|
FROM conversations ORDER BY last_activity_ms DESC",
|
||||||
|
)?;
|
||||||
|
let rows = stmt.query_map([], |row| {
|
||||||
|
let id_blob: Vec<u8> = row.get(0)?;
|
||||||
|
let id = ConversationId::from_slice(&id_blob).unwrap_or(ConversationId([0; 16]));
|
||||||
|
row_to_conversation_full(&id, row)
|
||||||
|
})?;
|
||||||
|
let mut convs = Vec::new();
|
||||||
|
for row in rows {
|
||||||
|
convs.push(row?);
|
||||||
|
}
|
||||||
|
Ok(convs)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Find a DM by peer identity key.
|
||||||
|
pub fn find_dm_by_peer(&self, peer_key: &[u8]) -> anyhow::Result<Option<Conversation>> {
|
||||||
|
let id_blob: Option<Vec<u8>> = self
|
||||||
|
.conn
|
||||||
|
.query_row(
|
||||||
|
"SELECT id FROM conversations WHERE kind = 'dm' AND peer_key = ?1",
|
||||||
|
params![peer_key],
|
||||||
|
|row| row.get(0),
|
||||||
|
)
|
||||||
|
.optional()?;
|
||||||
|
match id_blob {
|
||||||
|
Some(blob) => {
|
||||||
|
let id = ConversationId::from_slice(&blob)
|
||||||
|
.context("invalid conversation id")?;
|
||||||
|
self.load_conversation(&id)
|
||||||
|
}
|
||||||
|
None => Ok(None),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Save a message.
|
||||||
|
pub fn save_message(&self, msg: &StoredMessage) -> anyhow::Result<()> {
|
||||||
|
self.conn.execute(
|
||||||
|
"INSERT INTO messages
|
||||||
|
(conversation_id, message_id, sender_key, sender_name, body,
|
||||||
|
msg_type, ref_msg_id, timestamp_ms, is_outgoing)
|
||||||
|
VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7, ?8, ?9)",
|
||||||
|
params![
|
||||||
|
msg.conversation_id.0.as_slice(),
|
||||||
|
msg.message_id.as_ref().map(|id| id.as_slice()),
|
||||||
|
msg.sender_key,
|
||||||
|
msg.sender_name,
|
||||||
|
msg.body,
|
||||||
|
msg.msg_type,
|
||||||
|
msg.ref_msg_id.as_ref().map(|id| id.as_slice()),
|
||||||
|
msg.timestamp_ms,
|
||||||
|
msg.is_outgoing as i32,
|
||||||
|
],
|
||||||
|
)?;
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Load recent messages (newest first, then reversed to chronological).
|
||||||
|
pub fn load_recent_messages(
|
||||||
|
&self,
|
||||||
|
conv_id: &ConversationId,
|
||||||
|
limit: usize,
|
||||||
|
) -> anyhow::Result<Vec<StoredMessage>> {
|
||||||
|
let mut stmt = self.conn.prepare(
|
||||||
|
"SELECT message_id, sender_key, sender_name, body, msg_type,
|
||||||
|
ref_msg_id, timestamp_ms, is_outgoing
|
||||||
|
FROM messages WHERE conversation_id = ?1
|
||||||
|
ORDER BY timestamp_ms DESC LIMIT ?2",
|
||||||
|
)?;
|
||||||
|
let rows = stmt.query_map(
|
||||||
|
params![conv_id.0.as_slice(), limit.min(u32::MAX as usize) as u32],
|
||||||
|
|row| row_to_message(conv_id, row),
|
||||||
|
)?;
|
||||||
|
let mut msgs: Vec<StoredMessage> = rows.collect::<Result<_, _>>()?;
|
||||||
|
msgs.reverse();
|
||||||
|
Ok(msgs)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// ── Helpers ──────────────────────────────────────────────────────────────────
|
||||||
|
|
||||||
|
fn derive_db_key(password: &str, db_path: &Path) -> anyhow::Result<Zeroizing<[u8; 32]>> {
|
||||||
|
use argon2::{Algorithm, Argon2, Params, Version};
|
||||||
|
|
||||||
|
let salt_path = db_path.with_extension("db-salt");
|
||||||
|
let salt = if salt_path.exists() {
|
||||||
|
std::fs::read(&salt_path).context("read db salt")?
|
||||||
|
} else {
|
||||||
|
let mut salt = vec![0u8; 16];
|
||||||
|
rand::RngCore::fill_bytes(&mut rand::rngs::OsRng, &mut salt);
|
||||||
|
std::fs::write(&salt_path, &salt).context("write db salt")?;
|
||||||
|
#[cfg(unix)]
|
||||||
|
{
|
||||||
|
use std::os::unix::fs::PermissionsExt;
|
||||||
|
std::fs::set_permissions(&salt_path, std::fs::Permissions::from_mode(0o600)).ok();
|
||||||
|
}
|
||||||
|
salt
|
||||||
|
};
|
||||||
|
|
||||||
|
let params = Params::new(19 * 1024, 2, 1, Some(32))
|
||||||
|
.map_err(|e| anyhow::anyhow!("argon2 params: {e}"))?;
|
||||||
|
let argon2 = Argon2::new(Algorithm::Argon2id, Version::default(), params);
|
||||||
|
let mut key = Zeroizing::new([0u8; 32]);
|
||||||
|
argon2
|
||||||
|
.hash_password_into(password.as_bytes(), &salt, &mut *key)
|
||||||
|
.map_err(|e| anyhow::anyhow!("db key derivation: {e}"))?;
|
||||||
|
Ok(key)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn row_to_conversation(
|
||||||
|
id: &ConversationId,
|
||||||
|
row: &rusqlite::Row<'_>,
|
||||||
|
) -> rusqlite::Result<Conversation> {
|
||||||
|
let kind_str: String = row.get(0)?;
|
||||||
|
let display_name: String = row.get(1)?;
|
||||||
|
let peer_key: Option<Vec<u8>> = row.get(2)?;
|
||||||
|
let peer_username: Option<String> = row.get(3)?;
|
||||||
|
let group_name: Option<String> = row.get(4)?;
|
||||||
|
let mls_group_blob: Option<Vec<u8>> = row.get(5)?;
|
||||||
|
let keystore_blob: Option<Vec<u8>> = row.get(6)?;
|
||||||
|
let member_keys_blob: Option<Vec<u8>> = row.get(7)?;
|
||||||
|
let unread_count: u32 = row.get(8)?;
|
||||||
|
let last_activity_ms: u64 = row.get(9)?;
|
||||||
|
let created_at_ms: u64 = row.get(10)?;
|
||||||
|
let is_hybrid_int: i32 = row.get(11)?;
|
||||||
|
let last_seen_seq: i64 = row.get(12)?;
|
||||||
|
|
||||||
|
let kind = if kind_str == "dm" {
|
||||||
|
ConversationKind::Dm {
|
||||||
|
peer_key: peer_key.unwrap_or_default(),
|
||||||
|
peer_username,
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
ConversationKind::Group {
|
||||||
|
name: group_name.unwrap_or_default(),
|
||||||
|
}
|
||||||
|
};
|
||||||
|
let member_keys: Vec<Vec<u8>> = member_keys_blob
|
||||||
|
.and_then(|b| bincode::deserialize(&b).ok())
|
||||||
|
.unwrap_or_default();
|
||||||
|
|
||||||
|
Ok(Conversation {
|
||||||
|
id: id.clone(),
|
||||||
|
kind,
|
||||||
|
display_name,
|
||||||
|
mls_group_blob,
|
||||||
|
keystore_blob,
|
||||||
|
member_keys,
|
||||||
|
unread_count,
|
||||||
|
last_activity_ms,
|
||||||
|
created_at_ms,
|
||||||
|
is_hybrid: is_hybrid_int != 0,
|
||||||
|
last_seen_seq: last_seen_seq as u64,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
fn row_to_conversation_full(
|
||||||
|
id: &ConversationId,
|
||||||
|
row: &rusqlite::Row<'_>,
|
||||||
|
) -> rusqlite::Result<Conversation> {
|
||||||
|
let kind_str: String = row.get(1)?;
|
||||||
|
let display_name: String = row.get(2)?;
|
||||||
|
let peer_key: Option<Vec<u8>> = row.get(3)?;
|
||||||
|
let peer_username: Option<String> = row.get(4)?;
|
||||||
|
let group_name: Option<String> = row.get(5)?;
|
||||||
|
let mls_group_blob: Option<Vec<u8>> = row.get(6)?;
|
||||||
|
let keystore_blob: Option<Vec<u8>> = row.get(7)?;
|
||||||
|
let member_keys_blob: Option<Vec<u8>> = row.get(8)?;
|
||||||
|
let unread_count: u32 = row.get(9)?;
|
||||||
|
let last_activity_ms: u64 = row.get(10)?;
|
||||||
|
let created_at_ms: u64 = row.get(11)?;
|
||||||
|
let is_hybrid_int: i32 = row.get(12)?;
|
||||||
|
let last_seen_seq: i64 = row.get(13)?;
|
||||||
|
|
||||||
|
let kind = if kind_str == "dm" {
|
||||||
|
ConversationKind::Dm {
|
||||||
|
peer_key: peer_key.unwrap_or_default(),
|
||||||
|
peer_username,
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
ConversationKind::Group {
|
||||||
|
name: group_name.unwrap_or_default(),
|
||||||
|
}
|
||||||
|
};
|
||||||
|
let member_keys: Vec<Vec<u8>> = member_keys_blob
|
||||||
|
.and_then(|b| bincode::deserialize(&b).ok())
|
||||||
|
.unwrap_or_default();
|
||||||
|
|
||||||
|
Ok(Conversation {
|
||||||
|
id: id.clone(),
|
||||||
|
kind,
|
||||||
|
display_name,
|
||||||
|
mls_group_blob,
|
||||||
|
keystore_blob,
|
||||||
|
member_keys,
|
||||||
|
unread_count,
|
||||||
|
last_activity_ms,
|
||||||
|
created_at_ms,
|
||||||
|
is_hybrid: is_hybrid_int != 0,
|
||||||
|
last_seen_seq: last_seen_seq as u64,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
fn to_16(v: &[u8]) -> Option<[u8; 16]> {
|
||||||
|
if v.len() == 16 {
|
||||||
|
let mut buf = [0u8; 16];
|
||||||
|
buf.copy_from_slice(v);
|
||||||
|
Some(buf)
|
||||||
|
} else {
|
||||||
|
None
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn row_to_message(
|
||||||
|
conv_id: &ConversationId,
|
||||||
|
row: &rusqlite::Row<'_>,
|
||||||
|
) -> rusqlite::Result<StoredMessage> {
|
||||||
|
let message_id: Option<Vec<u8>> = row.get(0)?;
|
||||||
|
let sender_key: Vec<u8> = row.get(1)?;
|
||||||
|
let sender_name: Option<String> = row.get(2)?;
|
||||||
|
let body: String = row.get(3)?;
|
||||||
|
let msg_type: String = row.get(4)?;
|
||||||
|
let ref_msg_id: Option<Vec<u8>> = row.get(5)?;
|
||||||
|
let timestamp_ms: u64 = row.get(6)?;
|
||||||
|
let is_outgoing: i32 = row.get(7)?;
|
||||||
|
|
||||||
|
Ok(StoredMessage {
|
||||||
|
conversation_id: conv_id.clone(),
|
||||||
|
message_id: message_id.as_deref().and_then(to_16),
|
||||||
|
sender_key,
|
||||||
|
sender_name,
|
||||||
|
body,
|
||||||
|
msg_type,
|
||||||
|
ref_msg_id: ref_msg_id.as_deref().and_then(to_16),
|
||||||
|
timestamp_ms,
|
||||||
|
is_outgoing: is_outgoing != 0,
|
||||||
|
})
|
||||||
|
}
|
||||||
29
crates/quicproquo-sdk/src/error.rs
Normal file
29
crates/quicproquo-sdk/src/error.rs
Normal file
@@ -0,0 +1,29 @@
|
|||||||
|
//! SDK error types.
|
||||||
|
|
||||||
|
/// Errors returned by SDK operations.
|
||||||
|
#[derive(Debug, thiserror::Error)]
|
||||||
|
pub enum SdkError {
|
||||||
|
#[error("not connected to server")]
|
||||||
|
NotConnected,
|
||||||
|
|
||||||
|
#[error("not authenticated — call login() first")]
|
||||||
|
NotAuthenticated,
|
||||||
|
|
||||||
|
#[error("authentication failed: {0}")]
|
||||||
|
AuthFailed(String),
|
||||||
|
|
||||||
|
#[error("conversation not found: {0}")]
|
||||||
|
ConversationNotFound(String),
|
||||||
|
|
||||||
|
#[error("crypto error: {0}")]
|
||||||
|
Crypto(String),
|
||||||
|
|
||||||
|
#[error("RPC error: {0}")]
|
||||||
|
Rpc(#[from] quicproquo_rpc::error::RpcError),
|
||||||
|
|
||||||
|
#[error("storage error: {0}")]
|
||||||
|
Storage(String),
|
||||||
|
|
||||||
|
#[error("{0}")]
|
||||||
|
Other(#[from] anyhow::Error),
|
||||||
|
}
|
||||||
56
crates/quicproquo-sdk/src/events.rs
Normal file
56
crates/quicproquo-sdk/src/events.rs
Normal file
@@ -0,0 +1,56 @@
|
|||||||
|
//! Client event system — real-time notifications from the SDK.
|
||||||
|
|
||||||
|
/// Events emitted by the SDK to the UI layer.
|
||||||
|
#[derive(Debug, Clone)]
|
||||||
|
pub enum ClientEvent {
|
||||||
|
/// Successfully connected to the server.
|
||||||
|
Connected,
|
||||||
|
|
||||||
|
/// Disconnected from the server.
|
||||||
|
Disconnected { reason: String },
|
||||||
|
|
||||||
|
/// Authentication succeeded.
|
||||||
|
Authenticated { username: String },
|
||||||
|
|
||||||
|
/// A new message was received in a conversation.
|
||||||
|
MessageReceived {
|
||||||
|
conversation_id: [u8; 16],
|
||||||
|
sender_key: Vec<u8>,
|
||||||
|
sender_name: Option<String>,
|
||||||
|
body: String,
|
||||||
|
timestamp_ms: u64,
|
||||||
|
},
|
||||||
|
|
||||||
|
/// A message was sent successfully.
|
||||||
|
MessageSent {
|
||||||
|
conversation_id: [u8; 16],
|
||||||
|
seq: u64,
|
||||||
|
},
|
||||||
|
|
||||||
|
/// A new conversation was created or discovered.
|
||||||
|
ConversationCreated {
|
||||||
|
conversation_id: [u8; 16],
|
||||||
|
display_name: String,
|
||||||
|
},
|
||||||
|
|
||||||
|
/// A member was added to a group conversation.
|
||||||
|
MemberAdded {
|
||||||
|
conversation_id: [u8; 16],
|
||||||
|
member_key: Vec<u8>,
|
||||||
|
},
|
||||||
|
|
||||||
|
/// A member was removed from a group conversation.
|
||||||
|
MemberRemoved {
|
||||||
|
conversation_id: [u8; 16],
|
||||||
|
member_key: Vec<u8>,
|
||||||
|
},
|
||||||
|
|
||||||
|
/// Server-push event received.
|
||||||
|
PushEvent {
|
||||||
|
event_type: u16,
|
||||||
|
payload: Vec<u8>,
|
||||||
|
},
|
||||||
|
|
||||||
|
/// An error occurred in the background.
|
||||||
|
Error { message: String },
|
||||||
|
}
|
||||||
10
crates/quicproquo-sdk/src/lib.rs
Normal file
10
crates/quicproquo-sdk/src/lib.rs
Normal file
@@ -0,0 +1,10 @@
|
|||||||
|
//! Client SDK for quicproquo v2.
|
||||||
|
//!
|
||||||
|
//! Provides `QpqClient` — a single entry point for connecting, authenticating,
|
||||||
|
//! sending/receiving messages, and subscribing to real-time events.
|
||||||
|
|
||||||
|
pub mod client;
|
||||||
|
pub mod config;
|
||||||
|
pub mod conversation;
|
||||||
|
pub mod events;
|
||||||
|
pub mod error;
|
||||||
72
crates/quicproquo-server/src/domain/auth.rs
Normal file
72
crates/quicproquo-server/src/domain/auth.rs
Normal file
@@ -0,0 +1,72 @@
|
|||||||
|
//! Authentication domain logic — OPAQUE registration and login.
|
||||||
|
//!
|
||||||
|
//! This module contains the pure business logic for OPAQUE auth,
|
||||||
|
//! extracted from `node_service/auth_ops.rs`. It operates on domain
|
||||||
|
//! types and the `Store` trait, with no dependency on Cap'n Proto or Protobuf.
|
||||||
|
|
||||||
|
use std::sync::Arc;
|
||||||
|
|
||||||
|
use dashmap::DashMap;
|
||||||
|
use opaque_ke::ServerSetup;
|
||||||
|
use quicproquo_core::opaque_auth::OpaqueSuite;
|
||||||
|
|
||||||
|
use crate::auth::{AuthConfig, PendingLogin, SessionInfo};
|
||||||
|
use crate::storage::{Store, StorageError};
|
||||||
|
|
||||||
|
use super::types::*;
|
||||||
|
|
||||||
|
/// Shared state needed by auth operations.
|
||||||
|
pub struct AuthService {
|
||||||
|
pub store: Arc<dyn Store>,
|
||||||
|
pub opaque_setup: Arc<ServerSetup<OpaqueSuite>>,
|
||||||
|
pub pending_logins: Arc<DashMap<String, PendingLogin>>,
|
||||||
|
pub sessions: Arc<DashMap<Vec<u8>, SessionInfo>>,
|
||||||
|
pub auth_cfg: Arc<AuthConfig>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl AuthService {
|
||||||
|
/// Validate a session token and return the caller's auth context.
|
||||||
|
pub fn validate_session(&self, token: &[u8]) -> Option<CallerAuth> {
|
||||||
|
let info = self.sessions.get(token)?;
|
||||||
|
if info.expires_at <= crate::auth::current_timestamp() {
|
||||||
|
self.sessions.remove(token);
|
||||||
|
return None;
|
||||||
|
}
|
||||||
|
Some(CallerAuth {
|
||||||
|
identity_key: info.identity_key.clone(),
|
||||||
|
token: token.to_vec(),
|
||||||
|
device_id: None,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Start OPAQUE registration.
|
||||||
|
pub fn register_start(&self, req: RegisterStartReq) -> Result<RegisterStartResp, StorageError> {
|
||||||
|
use opaque_ke::ServerRegistration;
|
||||||
|
|
||||||
|
let result = ServerRegistration::<OpaqueSuite>::start(
|
||||||
|
&self.opaque_setup,
|
||||||
|
opaque_ke::RegistrationRequest::deserialize(&req.request_bytes)
|
||||||
|
.map_err(|e| StorageError::Io(format!("bad registration request: {e}")))?,
|
||||||
|
req.username.as_bytes(),
|
||||||
|
)
|
||||||
|
.map_err(|e| StorageError::Io(format!("OPAQUE register start: {e}")))?;
|
||||||
|
|
||||||
|
let response_bytes = result.message.serialize().to_vec();
|
||||||
|
Ok(RegisterStartResp { response_bytes })
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Finish OPAQUE registration — persist user record and identity key.
|
||||||
|
pub fn register_finish(&self, req: RegisterFinishReq) -> Result<RegisterFinishResp, StorageError> {
|
||||||
|
let upload = opaque_ke::RegistrationUpload::<OpaqueSuite>::deserialize(&req.upload_bytes)
|
||||||
|
.map_err(|e| StorageError::Io(format!("bad registration upload: {e}")))?;
|
||||||
|
|
||||||
|
let record = opaque_ke::ServerRegistration::<OpaqueSuite>::finish(upload);
|
||||||
|
let serialized = record.serialize().to_vec();
|
||||||
|
|
||||||
|
self.store.store_user_record(&req.username, serialized)?;
|
||||||
|
self.store
|
||||||
|
.store_user_identity_key(&req.username, req.identity_key)?;
|
||||||
|
|
||||||
|
Ok(RegisterFinishResp { success: true })
|
||||||
|
}
|
||||||
|
}
|
||||||
110
crates/quicproquo-server/src/domain/delivery.rs
Normal file
110
crates/quicproquo-server/src/domain/delivery.rs
Normal file
@@ -0,0 +1,110 @@
|
|||||||
|
//! Delivery domain logic — enqueue, fetch, peek, ack.
|
||||||
|
//!
|
||||||
|
//! Pure business logic operating on `Store` trait and domain types.
|
||||||
|
|
||||||
|
use std::sync::Arc;
|
||||||
|
|
||||||
|
use dashmap::DashMap;
|
||||||
|
use tokio::sync::Notify;
|
||||||
|
|
||||||
|
use crate::storage::Store;
|
||||||
|
|
||||||
|
use super::types::*;
|
||||||
|
|
||||||
|
/// Shared state needed by delivery operations.
|
||||||
|
pub struct DeliveryService {
|
||||||
|
pub store: Arc<dyn Store>,
|
||||||
|
pub waiters: Arc<DashMap<Vec<u8>, Arc<Notify>>>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl DeliveryService {
|
||||||
|
/// Enqueue a payload for delivery.
|
||||||
|
pub fn enqueue(&self, req: EnqueueReq) -> Result<EnqueueResp, crate::storage::StorageError> {
|
||||||
|
let ttl = if req.ttl_secs > 0 {
|
||||||
|
Some(req.ttl_secs)
|
||||||
|
} else {
|
||||||
|
None
|
||||||
|
};
|
||||||
|
|
||||||
|
let seq = self.store.enqueue(
|
||||||
|
&req.recipient_key,
|
||||||
|
&req.channel_id,
|
||||||
|
req.payload,
|
||||||
|
ttl,
|
||||||
|
)?;
|
||||||
|
|
||||||
|
// Wake any long-polling waiter for this recipient.
|
||||||
|
if let Some(notify) = self.waiters.get(&req.recipient_key) {
|
||||||
|
notify.notify_one();
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(EnqueueResp {
|
||||||
|
seq,
|
||||||
|
delivery_proof: Vec::new(), // TODO: sign in Phase 2
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Fetch and drain queued messages.
|
||||||
|
pub fn fetch(&self, req: FetchReq) -> Result<FetchResp, crate::storage::StorageError> {
|
||||||
|
let messages = if req.limit > 0 {
|
||||||
|
self.store
|
||||||
|
.fetch_limited(&req.recipient_key, &req.channel_id, req.limit as usize)?
|
||||||
|
} else {
|
||||||
|
self.store.fetch(&req.recipient_key, &req.channel_id)?
|
||||||
|
};
|
||||||
|
|
||||||
|
Ok(FetchResp {
|
||||||
|
payloads: messages
|
||||||
|
.into_iter()
|
||||||
|
.map(|(seq, data)| Envelope { seq, data })
|
||||||
|
.collect(),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Peek at messages without removing them.
|
||||||
|
pub fn peek(&self, req: PeekReq) -> Result<PeekResp, crate::storage::StorageError> {
|
||||||
|
let messages = self.store.peek(
|
||||||
|
&req.recipient_key,
|
||||||
|
&req.channel_id,
|
||||||
|
if req.limit > 0 { req.limit as usize } else { 0 },
|
||||||
|
)?;
|
||||||
|
|
||||||
|
Ok(PeekResp {
|
||||||
|
payloads: messages
|
||||||
|
.into_iter()
|
||||||
|
.map(|(seq, data)| Envelope { seq, data })
|
||||||
|
.collect(),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Acknowledge messages up to a sequence number.
|
||||||
|
pub fn ack(&self, req: AckReq) -> Result<(), crate::storage::StorageError> {
|
||||||
|
self.store
|
||||||
|
.ack(&req.recipient_key, &req.channel_id, req.seq_up_to)?;
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Batch enqueue to multiple recipients.
|
||||||
|
pub fn batch_enqueue(
|
||||||
|
&self,
|
||||||
|
req: BatchEnqueueReq,
|
||||||
|
) -> Result<BatchEnqueueResp, crate::storage::StorageError> {
|
||||||
|
let ttl = if req.ttl_secs > 0 {
|
||||||
|
Some(req.ttl_secs)
|
||||||
|
} else {
|
||||||
|
None
|
||||||
|
};
|
||||||
|
|
||||||
|
let mut seqs = Vec::with_capacity(req.recipient_keys.len());
|
||||||
|
for rk in &req.recipient_keys {
|
||||||
|
let seq = self.store.enqueue(rk, &req.channel_id, req.payload.clone(), ttl)?;
|
||||||
|
seqs.push(seq);
|
||||||
|
|
||||||
|
if let Some(notify) = self.waiters.get(rk) {
|
||||||
|
notify.notify_one();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(BatchEnqueueResp { seqs })
|
||||||
|
}
|
||||||
|
}
|
||||||
10
crates/quicproquo-server/src/domain/mod.rs
Normal file
10
crates/quicproquo-server/src/domain/mod.rs
Normal file
@@ -0,0 +1,10 @@
|
|||||||
|
//! Domain types and service logic — protocol-agnostic.
|
||||||
|
//!
|
||||||
|
//! These types define the server's business logic independently of any
|
||||||
|
//! serialization format (Cap'n Proto, Protobuf). RPC handlers translate
|
||||||
|
//! wire-format messages into these types, call service functions, and
|
||||||
|
//! translate the results back.
|
||||||
|
|
||||||
|
pub mod types;
|
||||||
|
pub mod auth;
|
||||||
|
pub mod delivery;
|
||||||
260
crates/quicproquo-server/src/domain/types.rs
Normal file
260
crates/quicproquo-server/src/domain/types.rs
Normal file
@@ -0,0 +1,260 @@
|
|||||||
|
//! Plain Rust request/response types for server domain logic.
|
||||||
|
//!
|
||||||
|
//! No proto, no capnp — just Rust structs.
|
||||||
|
|
||||||
|
// ── Auth ─────────────────────────────────────────────────────────────────────
|
||||||
|
|
||||||
|
/// Caller authentication context (resolved from session token).
|
||||||
|
#[derive(Debug, Clone)]
|
||||||
|
pub struct CallerAuth {
|
||||||
|
/// Ed25519 identity key of the authenticated caller (32 bytes).
|
||||||
|
pub identity_key: Vec<u8>,
|
||||||
|
/// Session token bytes.
|
||||||
|
pub token: Vec<u8>,
|
||||||
|
/// Device ID (optional, for auditing).
|
||||||
|
pub device_id: Option<Vec<u8>>,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// OPAQUE registration start.
|
||||||
|
pub struct RegisterStartReq {
|
||||||
|
pub username: String,
|
||||||
|
pub request_bytes: Vec<u8>,
|
||||||
|
}
|
||||||
|
|
||||||
|
pub struct RegisterStartResp {
|
||||||
|
pub response_bytes: Vec<u8>,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// OPAQUE registration finish.
|
||||||
|
pub struct RegisterFinishReq {
|
||||||
|
pub username: String,
|
||||||
|
pub upload_bytes: Vec<u8>,
|
||||||
|
pub identity_key: Vec<u8>,
|
||||||
|
}
|
||||||
|
|
||||||
|
pub struct RegisterFinishResp {
|
||||||
|
pub success: bool,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// OPAQUE login start.
|
||||||
|
pub struct LoginStartReq {
|
||||||
|
pub username: String,
|
||||||
|
pub request_bytes: Vec<u8>,
|
||||||
|
}
|
||||||
|
|
||||||
|
pub struct LoginStartResp {
|
||||||
|
pub response_bytes: Vec<u8>,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// OPAQUE login finish.
|
||||||
|
pub struct LoginFinishReq {
|
||||||
|
pub username: String,
|
||||||
|
pub finalization_bytes: Vec<u8>,
|
||||||
|
pub identity_key: Vec<u8>,
|
||||||
|
}
|
||||||
|
|
||||||
|
pub struct LoginFinishResp {
|
||||||
|
pub session_token: Vec<u8>,
|
||||||
|
}
|
||||||
|
|
||||||
|
// ── Delivery ─────────────────────────────────────────────────────────────────
|
||||||
|
|
||||||
|
/// An envelope pairing a sequence number with an opaque payload.
|
||||||
|
#[derive(Debug, Clone)]
|
||||||
|
pub struct Envelope {
|
||||||
|
pub seq: u64,
|
||||||
|
pub data: Vec<u8>,
|
||||||
|
}
|
||||||
|
|
||||||
|
pub struct EnqueueReq {
|
||||||
|
pub recipient_key: Vec<u8>,
|
||||||
|
pub payload: Vec<u8>,
|
||||||
|
pub channel_id: Vec<u8>,
|
||||||
|
pub ttl_secs: u32,
|
||||||
|
}
|
||||||
|
|
||||||
|
pub struct EnqueueResp {
|
||||||
|
pub seq: u64,
|
||||||
|
pub delivery_proof: Vec<u8>,
|
||||||
|
}
|
||||||
|
|
||||||
|
pub struct FetchReq {
|
||||||
|
pub recipient_key: Vec<u8>,
|
||||||
|
pub channel_id: Vec<u8>,
|
||||||
|
pub limit: u32,
|
||||||
|
}
|
||||||
|
|
||||||
|
pub struct FetchResp {
|
||||||
|
pub payloads: Vec<Envelope>,
|
||||||
|
}
|
||||||
|
|
||||||
|
pub struct PeekReq {
|
||||||
|
pub recipient_key: Vec<u8>,
|
||||||
|
pub channel_id: Vec<u8>,
|
||||||
|
pub limit: u32,
|
||||||
|
}
|
||||||
|
|
||||||
|
pub struct PeekResp {
|
||||||
|
pub payloads: Vec<Envelope>,
|
||||||
|
}
|
||||||
|
|
||||||
|
pub struct AckReq {
|
||||||
|
pub recipient_key: Vec<u8>,
|
||||||
|
pub channel_id: Vec<u8>,
|
||||||
|
pub seq_up_to: u64,
|
||||||
|
}
|
||||||
|
|
||||||
|
pub struct BatchEnqueueReq {
|
||||||
|
pub recipient_keys: Vec<Vec<u8>>,
|
||||||
|
pub payload: Vec<u8>,
|
||||||
|
pub channel_id: Vec<u8>,
|
||||||
|
pub ttl_secs: u32,
|
||||||
|
}
|
||||||
|
|
||||||
|
pub struct BatchEnqueueResp {
|
||||||
|
pub seqs: Vec<u64>,
|
||||||
|
}
|
||||||
|
|
||||||
|
// ── Keys ─────────────────────────────────────────────────────────────────────
|
||||||
|
|
||||||
|
pub struct UploadKeyPackageReq {
|
||||||
|
pub identity_key: Vec<u8>,
|
||||||
|
pub package: Vec<u8>,
|
||||||
|
}
|
||||||
|
|
||||||
|
pub struct UploadKeyPackageResp {
|
||||||
|
pub fingerprint: Vec<u8>,
|
||||||
|
}
|
||||||
|
|
||||||
|
pub struct FetchKeyPackageReq {
|
||||||
|
pub identity_key: Vec<u8>,
|
||||||
|
}
|
||||||
|
|
||||||
|
pub struct FetchKeyPackageResp {
|
||||||
|
pub package: Vec<u8>,
|
||||||
|
}
|
||||||
|
|
||||||
|
pub struct UploadHybridKeyReq {
|
||||||
|
pub identity_key: Vec<u8>,
|
||||||
|
pub hybrid_public_key: Vec<u8>,
|
||||||
|
}
|
||||||
|
|
||||||
|
pub struct FetchHybridKeyReq {
|
||||||
|
pub identity_key: Vec<u8>,
|
||||||
|
}
|
||||||
|
|
||||||
|
pub struct FetchHybridKeyResp {
|
||||||
|
pub hybrid_public_key: Vec<u8>,
|
||||||
|
}
|
||||||
|
|
||||||
|
pub struct FetchHybridKeysReq {
|
||||||
|
pub identity_keys: Vec<Vec<u8>>,
|
||||||
|
}
|
||||||
|
|
||||||
|
pub struct FetchHybridKeysResp {
|
||||||
|
pub keys: Vec<Vec<u8>>,
|
||||||
|
}
|
||||||
|
|
||||||
|
// ── Channel ──────────────────────────────────────────────────────────────────
|
||||||
|
|
||||||
|
pub struct CreateChannelReq {
|
||||||
|
pub peer_key: Vec<u8>,
|
||||||
|
}
|
||||||
|
|
||||||
|
pub struct CreateChannelResp {
|
||||||
|
pub channel_id: Vec<u8>,
|
||||||
|
pub was_new: bool,
|
||||||
|
}
|
||||||
|
|
||||||
|
// ── User ─────────────────────────────────────────────────────────────────────
|
||||||
|
|
||||||
|
pub struct ResolveUserReq {
|
||||||
|
pub username: String,
|
||||||
|
}
|
||||||
|
|
||||||
|
pub struct ResolveUserResp {
|
||||||
|
pub identity_key: Vec<u8>,
|
||||||
|
pub inclusion_proof: Vec<u8>,
|
||||||
|
}
|
||||||
|
|
||||||
|
pub struct ResolveIdentityReq {
|
||||||
|
pub identity_key: Vec<u8>,
|
||||||
|
}
|
||||||
|
|
||||||
|
pub struct ResolveIdentityResp {
|
||||||
|
pub username: String,
|
||||||
|
}
|
||||||
|
|
||||||
|
// ── Blob ─────────────────────────────────────────────────────────────────────
|
||||||
|
|
||||||
|
pub struct UploadBlobReq {
|
||||||
|
pub blob_hash: Vec<u8>,
|
||||||
|
pub chunk: Vec<u8>,
|
||||||
|
pub offset: u64,
|
||||||
|
pub total_size: u64,
|
||||||
|
pub mime_type: String,
|
||||||
|
}
|
||||||
|
|
||||||
|
pub struct UploadBlobResp {
|
||||||
|
pub blob_id: Vec<u8>,
|
||||||
|
}
|
||||||
|
|
||||||
|
pub struct DownloadBlobReq {
|
||||||
|
pub blob_id: Vec<u8>,
|
||||||
|
pub offset: u64,
|
||||||
|
pub length: u32,
|
||||||
|
}
|
||||||
|
|
||||||
|
pub struct DownloadBlobResp {
|
||||||
|
pub chunk: Vec<u8>,
|
||||||
|
pub total_size: u64,
|
||||||
|
pub mime_type: String,
|
||||||
|
}
|
||||||
|
|
||||||
|
// ── Device ───────────────────────────────────────────────────────────────────
|
||||||
|
|
||||||
|
pub struct RegisterDeviceReq {
|
||||||
|
pub device_id: Vec<u8>,
|
||||||
|
pub device_name: String,
|
||||||
|
}
|
||||||
|
|
||||||
|
pub struct RegisterDeviceResp {
|
||||||
|
pub success: bool,
|
||||||
|
}
|
||||||
|
|
||||||
|
pub struct DeviceInfo {
|
||||||
|
pub device_id: Vec<u8>,
|
||||||
|
pub device_name: String,
|
||||||
|
pub registered_at: u64,
|
||||||
|
}
|
||||||
|
|
||||||
|
pub struct ListDevicesResp {
|
||||||
|
pub devices: Vec<DeviceInfo>,
|
||||||
|
}
|
||||||
|
|
||||||
|
pub struct RevokeDeviceReq {
|
||||||
|
pub device_id: Vec<u8>,
|
||||||
|
}
|
||||||
|
|
||||||
|
pub struct RevokeDeviceResp {
|
||||||
|
pub success: bool,
|
||||||
|
}
|
||||||
|
|
||||||
|
// ── P2P ──────────────────────────────────────────────────────────────────────
|
||||||
|
|
||||||
|
pub struct PublishEndpointReq {
|
||||||
|
pub identity_key: Vec<u8>,
|
||||||
|
pub node_addr: Vec<u8>,
|
||||||
|
}
|
||||||
|
|
||||||
|
pub struct ResolveEndpointReq {
|
||||||
|
pub identity_key: Vec<u8>,
|
||||||
|
}
|
||||||
|
|
||||||
|
pub struct ResolveEndpointResp {
|
||||||
|
pub node_addr: Vec<u8>,
|
||||||
|
}
|
||||||
|
|
||||||
|
pub struct HealthResp {
|
||||||
|
pub status: String,
|
||||||
|
}
|
||||||
@@ -17,6 +17,7 @@ use tokio::task::LocalSet;
|
|||||||
|
|
||||||
mod auth;
|
mod auth;
|
||||||
mod config;
|
mod config;
|
||||||
|
pub mod domain;
|
||||||
mod error_codes;
|
mod error_codes;
|
||||||
mod federation;
|
mod federation;
|
||||||
pub mod hooks;
|
pub mod hooks;
|
||||||
|
|||||||
49
justfile
Normal file
49
justfile
Normal file
@@ -0,0 +1,49 @@
|
|||||||
|
# quicproquo v2 — build commands
|
||||||
|
|
||||||
|
# Default: build all workspace crates
|
||||||
|
build:
|
||||||
|
cargo build --workspace
|
||||||
|
|
||||||
|
# Run all tests
|
||||||
|
test:
|
||||||
|
cargo test --workspace
|
||||||
|
|
||||||
|
# Run core crypto tests only
|
||||||
|
test-core:
|
||||||
|
cargo test -p quicproquo-core
|
||||||
|
|
||||||
|
# Build proto crate (triggers prost codegen)
|
||||||
|
proto:
|
||||||
|
cargo build -p quicproquo-proto
|
||||||
|
|
||||||
|
# Build RPC framework
|
||||||
|
rpc:
|
||||||
|
cargo build -p quicproquo-rpc
|
||||||
|
|
||||||
|
# Build SDK
|
||||||
|
sdk:
|
||||||
|
cargo build -p quicproquo-sdk
|
||||||
|
|
||||||
|
# Build server
|
||||||
|
server:
|
||||||
|
cargo build -p quicproquo-server
|
||||||
|
|
||||||
|
# Build client
|
||||||
|
client:
|
||||||
|
cargo build -p quicproquo-client
|
||||||
|
|
||||||
|
# Check all with clippy
|
||||||
|
lint:
|
||||||
|
cargo clippy --workspace -- -D warnings
|
||||||
|
|
||||||
|
# Format check
|
||||||
|
fmt:
|
||||||
|
cargo fmt --all -- --check
|
||||||
|
|
||||||
|
# Format fix
|
||||||
|
fmt-fix:
|
||||||
|
cargo fmt --all
|
||||||
|
|
||||||
|
# Clean build artifacts
|
||||||
|
clean:
|
||||||
|
cargo clean
|
||||||
43
proto/qpq/v1/auth.proto
Normal file
43
proto/qpq/v1/auth.proto
Normal file
@@ -0,0 +1,43 @@
|
|||||||
|
syntax = "proto3";
|
||||||
|
package qpq.v1;
|
||||||
|
|
||||||
|
// OPAQUE registration + login (4 methods).
|
||||||
|
// Method IDs: 100-103.
|
||||||
|
|
||||||
|
message OpaqueRegisterStartRequest {
|
||||||
|
string username = 1;
|
||||||
|
bytes request = 2;
|
||||||
|
}
|
||||||
|
|
||||||
|
message OpaqueRegisterStartResponse {
|
||||||
|
bytes response = 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
message OpaqueRegisterFinishRequest {
|
||||||
|
string username = 1;
|
||||||
|
bytes upload = 2;
|
||||||
|
bytes identity_key = 3;
|
||||||
|
}
|
||||||
|
|
||||||
|
message OpaqueRegisterFinishResponse {
|
||||||
|
bool success = 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
message OpaqueLoginStartRequest {
|
||||||
|
string username = 1;
|
||||||
|
bytes request = 2;
|
||||||
|
}
|
||||||
|
|
||||||
|
message OpaqueLoginStartResponse {
|
||||||
|
bytes response = 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
message OpaqueLoginFinishRequest {
|
||||||
|
string username = 1;
|
||||||
|
bytes finalization = 2;
|
||||||
|
bytes identity_key = 3;
|
||||||
|
}
|
||||||
|
|
||||||
|
message OpaqueLoginFinishResponse {
|
||||||
|
bytes session_token = 1;
|
||||||
|
}
|
||||||
29
proto/qpq/v1/blob.proto
Normal file
29
proto/qpq/v1/blob.proto
Normal file
@@ -0,0 +1,29 @@
|
|||||||
|
syntax = "proto3";
|
||||||
|
package qpq.v1;
|
||||||
|
|
||||||
|
// Blob upload/download (2 methods).
|
||||||
|
// Method IDs: 600-601.
|
||||||
|
|
||||||
|
message UploadBlobRequest {
|
||||||
|
bytes blob_hash = 1;
|
||||||
|
bytes chunk = 2;
|
||||||
|
uint64 offset = 3;
|
||||||
|
uint64 total_size = 4;
|
||||||
|
string mime_type = 5;
|
||||||
|
}
|
||||||
|
|
||||||
|
message UploadBlobResponse {
|
||||||
|
bytes blob_id = 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
message DownloadBlobRequest {
|
||||||
|
bytes blob_id = 1;
|
||||||
|
uint64 offset = 2;
|
||||||
|
uint32 length = 3;
|
||||||
|
}
|
||||||
|
|
||||||
|
message DownloadBlobResponse {
|
||||||
|
bytes chunk = 1;
|
||||||
|
uint64 total_size = 2;
|
||||||
|
string mime_type = 3;
|
||||||
|
}
|
||||||
14
proto/qpq/v1/channel.proto
Normal file
14
proto/qpq/v1/channel.proto
Normal file
@@ -0,0 +1,14 @@
|
|||||||
|
syntax = "proto3";
|
||||||
|
package qpq.v1;
|
||||||
|
|
||||||
|
// Channel create (1 method).
|
||||||
|
// Method ID: 400.
|
||||||
|
|
||||||
|
message CreateChannelRequest {
|
||||||
|
bytes peer_key = 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
message CreateChannelResponse {
|
||||||
|
bytes channel_id = 1;
|
||||||
|
bool was_new = 2;
|
||||||
|
}
|
||||||
19
proto/qpq/v1/common.proto
Normal file
19
proto/qpq/v1/common.proto
Normal file
@@ -0,0 +1,19 @@
|
|||||||
|
syntax = "proto3";
|
||||||
|
package qpq.v1;
|
||||||
|
|
||||||
|
// Common types shared across services.
|
||||||
|
|
||||||
|
// Auth context included in authenticated RPC requests.
|
||||||
|
// In v2, this is carried as QUIC connection-level state (session token),
|
||||||
|
// not per-message. Included here for federation and internal use.
|
||||||
|
message Auth {
|
||||||
|
bytes access_token = 1;
|
||||||
|
bytes device_id = 2;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Account deletion.
|
||||||
|
message DeleteAccountRequest {}
|
||||||
|
|
||||||
|
message DeleteAccountResponse {
|
||||||
|
bool success = 1;
|
||||||
|
}
|
||||||
72
proto/qpq/v1/delivery.proto
Normal file
72
proto/qpq/v1/delivery.proto
Normal file
@@ -0,0 +1,72 @@
|
|||||||
|
syntax = "proto3";
|
||||||
|
package qpq.v1;
|
||||||
|
|
||||||
|
// Delivery service: enqueue, fetch, peek, ack, batch (6 methods).
|
||||||
|
// Method IDs: 200-205.
|
||||||
|
|
||||||
|
message Envelope {
|
||||||
|
uint64 seq = 1;
|
||||||
|
bytes data = 2;
|
||||||
|
}
|
||||||
|
|
||||||
|
message EnqueueRequest {
|
||||||
|
bytes recipient_key = 1;
|
||||||
|
bytes payload = 2;
|
||||||
|
bytes channel_id = 3;
|
||||||
|
uint32 ttl_secs = 4;
|
||||||
|
}
|
||||||
|
|
||||||
|
message EnqueueResponse {
|
||||||
|
uint64 seq = 1;
|
||||||
|
bytes delivery_proof = 2;
|
||||||
|
}
|
||||||
|
|
||||||
|
message FetchRequest {
|
||||||
|
bytes recipient_key = 1;
|
||||||
|
bytes channel_id = 2;
|
||||||
|
uint32 limit = 3;
|
||||||
|
}
|
||||||
|
|
||||||
|
message FetchResponse {
|
||||||
|
repeated Envelope payloads = 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
message FetchWaitRequest {
|
||||||
|
bytes recipient_key = 1;
|
||||||
|
bytes channel_id = 2;
|
||||||
|
uint64 timeout_ms = 3;
|
||||||
|
uint32 limit = 4;
|
||||||
|
}
|
||||||
|
|
||||||
|
message FetchWaitResponse {
|
||||||
|
repeated Envelope payloads = 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
message PeekRequest {
|
||||||
|
bytes recipient_key = 1;
|
||||||
|
bytes channel_id = 2;
|
||||||
|
uint32 limit = 3;
|
||||||
|
}
|
||||||
|
|
||||||
|
message PeekResponse {
|
||||||
|
repeated Envelope payloads = 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
message AckRequest {
|
||||||
|
bytes recipient_key = 1;
|
||||||
|
bytes channel_id = 2;
|
||||||
|
uint64 seq_up_to = 3;
|
||||||
|
}
|
||||||
|
|
||||||
|
message AckResponse {}
|
||||||
|
|
||||||
|
message BatchEnqueueRequest {
|
||||||
|
repeated bytes recipient_keys = 1;
|
||||||
|
bytes payload = 2;
|
||||||
|
bytes channel_id = 3;
|
||||||
|
uint32 ttl_secs = 4;
|
||||||
|
}
|
||||||
|
|
||||||
|
message BatchEnqueueResponse {
|
||||||
|
repeated uint64 seqs = 1;
|
||||||
|
}
|
||||||
34
proto/qpq/v1/device.proto
Normal file
34
proto/qpq/v1/device.proto
Normal file
@@ -0,0 +1,34 @@
|
|||||||
|
syntax = "proto3";
|
||||||
|
package qpq.v1;
|
||||||
|
|
||||||
|
// Device register/list/revoke (3 methods).
|
||||||
|
// Method IDs: 700-702.
|
||||||
|
|
||||||
|
message RegisterDeviceRequest {
|
||||||
|
bytes device_id = 1;
|
||||||
|
string device_name = 2;
|
||||||
|
}
|
||||||
|
|
||||||
|
message RegisterDeviceResponse {
|
||||||
|
bool success = 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
message ListDevicesRequest {}
|
||||||
|
|
||||||
|
message ListDevicesResponse {
|
||||||
|
repeated Device devices = 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
message Device {
|
||||||
|
bytes device_id = 1;
|
||||||
|
string device_name = 2;
|
||||||
|
uint64 registered_at = 3;
|
||||||
|
}
|
||||||
|
|
||||||
|
message RevokeDeviceRequest {
|
||||||
|
bytes device_id = 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
message RevokeDeviceResponse {
|
||||||
|
bool success = 1;
|
||||||
|
}
|
||||||
65
proto/qpq/v1/federation.proto
Normal file
65
proto/qpq/v1/federation.proto
Normal file
@@ -0,0 +1,65 @@
|
|||||||
|
syntax = "proto3";
|
||||||
|
package qpq.v1;
|
||||||
|
|
||||||
|
// Federation relay + proxy (6 methods).
|
||||||
|
// Method IDs: 900-905.
|
||||||
|
|
||||||
|
message FederationAuth {
|
||||||
|
string origin = 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
message RelayEnqueueRequest {
|
||||||
|
bytes recipient_key = 1;
|
||||||
|
bytes payload = 2;
|
||||||
|
bytes channel_id = 3;
|
||||||
|
FederationAuth auth = 4;
|
||||||
|
}
|
||||||
|
|
||||||
|
message RelayEnqueueResponse {
|
||||||
|
uint64 seq = 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
message RelayBatchEnqueueRequest {
|
||||||
|
repeated bytes recipient_keys = 1;
|
||||||
|
bytes payload = 2;
|
||||||
|
bytes channel_id = 3;
|
||||||
|
FederationAuth auth = 4;
|
||||||
|
}
|
||||||
|
|
||||||
|
message RelayBatchEnqueueResponse {
|
||||||
|
repeated uint64 seqs = 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
message ProxyFetchKeyPackageRequest {
|
||||||
|
bytes identity_key = 1;
|
||||||
|
FederationAuth auth = 2;
|
||||||
|
}
|
||||||
|
|
||||||
|
message ProxyFetchKeyPackageResponse {
|
||||||
|
bytes package = 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
message ProxyFetchHybridKeyRequest {
|
||||||
|
bytes identity_key = 1;
|
||||||
|
FederationAuth auth = 2;
|
||||||
|
}
|
||||||
|
|
||||||
|
message ProxyFetchHybridKeyResponse {
|
||||||
|
bytes hybrid_public_key = 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
message ProxyResolveUserRequest {
|
||||||
|
string username = 1;
|
||||||
|
FederationAuth auth = 2;
|
||||||
|
}
|
||||||
|
|
||||||
|
message ProxyResolveUserResponse {
|
||||||
|
bytes identity_key = 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
message FederationHealthRequest {}
|
||||||
|
|
||||||
|
message FederationHealthResponse {
|
||||||
|
string status = 1;
|
||||||
|
string server_domain = 2;
|
||||||
|
}
|
||||||
45
proto/qpq/v1/keys.proto
Normal file
45
proto/qpq/v1/keys.proto
Normal file
@@ -0,0 +1,45 @@
|
|||||||
|
syntax = "proto3";
|
||||||
|
package qpq.v1;
|
||||||
|
|
||||||
|
// Key package + hybrid key CRUD (5 methods).
|
||||||
|
// Method IDs: 300-304.
|
||||||
|
|
||||||
|
message UploadKeyPackageRequest {
|
||||||
|
bytes identity_key = 1;
|
||||||
|
bytes package = 2;
|
||||||
|
}
|
||||||
|
|
||||||
|
message UploadKeyPackageResponse {
|
||||||
|
bytes fingerprint = 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
message FetchKeyPackageRequest {
|
||||||
|
bytes identity_key = 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
message FetchKeyPackageResponse {
|
||||||
|
bytes package = 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
message UploadHybridKeyRequest {
|
||||||
|
bytes identity_key = 1;
|
||||||
|
bytes hybrid_public_key = 2;
|
||||||
|
}
|
||||||
|
|
||||||
|
message UploadHybridKeyResponse {}
|
||||||
|
|
||||||
|
message FetchHybridKeyRequest {
|
||||||
|
bytes identity_key = 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
message FetchHybridKeyResponse {
|
||||||
|
bytes hybrid_public_key = 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
message FetchHybridKeysRequest {
|
||||||
|
repeated bytes identity_keys = 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
message FetchHybridKeysResponse {
|
||||||
|
repeated bytes keys = 1;
|
||||||
|
}
|
||||||
26
proto/qpq/v1/p2p.proto
Normal file
26
proto/qpq/v1/p2p.proto
Normal file
@@ -0,0 +1,26 @@
|
|||||||
|
syntax = "proto3";
|
||||||
|
package qpq.v1;
|
||||||
|
|
||||||
|
// P2P endpoint publish/resolve + health (3 methods).
|
||||||
|
// Method IDs: 800-802.
|
||||||
|
|
||||||
|
message PublishEndpointRequest {
|
||||||
|
bytes identity_key = 1;
|
||||||
|
bytes node_addr = 2;
|
||||||
|
}
|
||||||
|
|
||||||
|
message PublishEndpointResponse {}
|
||||||
|
|
||||||
|
message ResolveEndpointRequest {
|
||||||
|
bytes identity_key = 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
message ResolveEndpointResponse {
|
||||||
|
bytes node_addr = 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
message HealthRequest {}
|
||||||
|
|
||||||
|
message HealthResponse {
|
||||||
|
string status = 1;
|
||||||
|
}
|
||||||
49
proto/qpq/v1/push.proto
Normal file
49
proto/qpq/v1/push.proto
Normal file
@@ -0,0 +1,49 @@
|
|||||||
|
syntax = "proto3";
|
||||||
|
package qpq.v1;
|
||||||
|
|
||||||
|
// Server-push event types (sent on QUIC uni-streams).
|
||||||
|
// Event type IDs: 1000+.
|
||||||
|
|
||||||
|
// Wrapper for a push event.
|
||||||
|
message PushEvent {
|
||||||
|
oneof event {
|
||||||
|
NewMessage new_message = 1;
|
||||||
|
TypingIndicator typing = 2;
|
||||||
|
PresenceUpdate presence = 3;
|
||||||
|
GroupMembershipChange membership = 4;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
message NewMessage {
|
||||||
|
bytes channel_id = 1;
|
||||||
|
bytes sender_key = 2;
|
||||||
|
uint64 seq = 3;
|
||||||
|
bytes payload = 4;
|
||||||
|
uint64 timestamp_ms = 5;
|
||||||
|
}
|
||||||
|
|
||||||
|
message TypingIndicator {
|
||||||
|
bytes channel_id = 1;
|
||||||
|
bytes sender_key = 2;
|
||||||
|
bool is_typing = 3;
|
||||||
|
}
|
||||||
|
|
||||||
|
message PresenceUpdate {
|
||||||
|
bytes identity_key = 1;
|
||||||
|
bool online = 2;
|
||||||
|
uint64 last_seen_ms = 3;
|
||||||
|
}
|
||||||
|
|
||||||
|
message GroupMembershipChange {
|
||||||
|
bytes channel_id = 1;
|
||||||
|
bytes actor_key = 2;
|
||||||
|
bytes target_key = 3;
|
||||||
|
MembershipAction action = 4;
|
||||||
|
}
|
||||||
|
|
||||||
|
enum MembershipAction {
|
||||||
|
MEMBERSHIP_ACTION_UNSPECIFIED = 0;
|
||||||
|
MEMBERSHIP_ACTION_ADDED = 1;
|
||||||
|
MEMBERSHIP_ACTION_REMOVED = 2;
|
||||||
|
MEMBERSHIP_ACTION_LEFT = 3;
|
||||||
|
}
|
||||||
22
proto/qpq/v1/user.proto
Normal file
22
proto/qpq/v1/user.proto
Normal file
@@ -0,0 +1,22 @@
|
|||||||
|
syntax = "proto3";
|
||||||
|
package qpq.v1;
|
||||||
|
|
||||||
|
// User resolve + identity (2 methods).
|
||||||
|
// Method IDs: 500-501.
|
||||||
|
|
||||||
|
message ResolveUserRequest {
|
||||||
|
string username = 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
message ResolveUserResponse {
|
||||||
|
bytes identity_key = 1;
|
||||||
|
bytes inclusion_proof = 2;
|
||||||
|
}
|
||||||
|
|
||||||
|
message ResolveIdentityRequest {
|
||||||
|
bytes identity_key = 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
message ResolveIdentityResponse {
|
||||||
|
string username = 1;
|
||||||
|
}
|
||||||
Reference in New Issue
Block a user