feat: v2 Phase 1 — foundation, proto schemas, RPC framework, SDK skeleton

New workspace structure with 9 crates. Adds:

- proto/qpq/v1/*.proto: 11 protobuf schemas covering all 33 RPC methods
- quicproquo-proto: dual codegen (capnp legacy + prost v2)
- quicproquo-rpc: QUIC RPC framework (framing, server, client, middleware)
- quicproquo-sdk: client SDK (QpqClient, events, conversation store)
- quicproquo-server/domain/: protocol-agnostic domain types and services
- justfile: build commands

Wire format: [method_id:u16][req_id:u32][len:u32][protobuf] per QUIC stream.
All 151 existing tests pass. Backward compatible with v1 capnp code.
This commit is contained in:
2026-03-04 12:02:07 +01:00
parent 394199b19b
commit a5864127d1
37 changed files with 3115 additions and 2778 deletions

View File

@@ -1,22 +1,28 @@
[package]
name = "quicproquo-proto"
version = "0.1.0"
edition = "2021"
description = "Cap'n Proto schemas, generated types, and serialisation helpers for quicproquo. No crypto, no I/O."
license = "MIT"
name = "quicproquo-proto"
version = "0.2.0"
edition = "2021"
description = "Protocol types for quicproquo — v1 Cap'n Proto (legacy) + v2 Protobuf (prost)"
# build.rs invokes capnpc to generate Rust source from .capnp schemas.
build = "build.rs"
build = "build.rs"
[dependencies]
# v1 legacy (Cap'n Proto) — used by existing server/client until rewrite
capnp = { workspace = true }
# v2 (Protobuf via prost) — new RPC types
prost = { workspace = true }
prost-types = { workspace = true }
bytes = { workspace = true }
[build-dependencies]
capnpc = { workspace = true }
prost-build = { workspace = true }
protobuf-src = "2"
[lints.rust]
unsafe_code = "warn"
[lints.clippy]
# Generated Cap'n Proto code uses patterns that trigger clippy lints.
unwrap_used = "allow"
[build-dependencies]
capnpc = { workspace = true }

View File

@@ -1,51 +1,30 @@
//! Build script for quicproquo-proto.
//!
//! Invokes the `capnp` compiler to generate Rust types from `.capnp` schemas
//! located in the workspace-root `schemas/` directory.
//!
//! # Prerequisites
//!
//! The `capnp` CLI must be installed and on `PATH`.
//!
//! Debian/Ubuntu: apt-get install capnproto
//! macOS: brew install capnp
//! Docker: see docker/Dockerfile
//! Runs two code generators:
//! 1. Cap'n Proto (v1 legacy) — from `schemas/*.capnp`
//! 2. Protobuf/prost (v2) — from `proto/qpq/v1/*.proto`
use std::{env, path::PathBuf};
fn main() {
let manifest_dir =
PathBuf::from(env::var("CARGO_MANIFEST_DIR").expect("CARGO_MANIFEST_DIR not set by Cargo"));
// Vendor protoc from protobuf-src so the build doesn't require system protoc.
std::env::set_var("PROTOC", protobuf_src::protoc());
// Workspace root is two levels above this crate (quicproquo/crates/quicproquo-proto).
let manifest_dir =
PathBuf::from(env::var("CARGO_MANIFEST_DIR").expect("CARGO_MANIFEST_DIR"));
let workspace_root = manifest_dir
.join("../..")
.canonicalize()
.expect("could not canonicalize workspace root path");
.expect("canonicalize workspace root");
// ── v1: Cap'n Proto codegen ──────────────────────────────────────────────
let schemas_dir = workspace_root.join("schemas");
// Re-run this build script whenever any schema file changes.
println!(
"cargo:rerun-if-changed={}",
schemas_dir.join("auth.capnp").display()
);
println!(
"cargo:rerun-if-changed={}",
schemas_dir.join("delivery.capnp").display()
);
println!(
"cargo:rerun-if-changed={}",
schemas_dir.join("node.capnp").display()
);
println!(
"cargo:rerun-if-changed={}",
schemas_dir.join("federation.capnp").display()
);
for schema in &["auth.capnp", "delivery.capnp", "node.capnp", "federation.capnp"] {
println!("cargo:rerun-if-changed={}", schemas_dir.join(schema).display());
}
capnpc::CompilerCommand::new()
// Treat `schemas/` as the include root so that inter-schema imports
// resolve correctly.
.src_prefix(&schemas_dir)
.file(schemas_dir.join("auth.capnp"))
.file(schemas_dir.join("delivery.capnp"))
@@ -56,4 +35,32 @@ fn main() {
"Cap'n Proto schema compilation failed. \
Is `capnp` installed? (apt-get install capnproto / brew install capnp)",
);
// ── v2: Protobuf/prost codegen ───────────────────────────────────────────
let proto_dir = workspace_root.join("proto");
let proto_files = [
"qpq/v1/common.proto",
"qpq/v1/auth.proto",
"qpq/v1/delivery.proto",
"qpq/v1/keys.proto",
"qpq/v1/channel.proto",
"qpq/v1/user.proto",
"qpq/v1/blob.proto",
"qpq/v1/device.proto",
"qpq/v1/p2p.proto",
"qpq/v1/federation.proto",
"qpq/v1/push.proto",
];
let full_paths: Vec<PathBuf> = proto_files.iter().map(|f| proto_dir.join(f)).collect();
for path in &full_paths {
println!("cargo:rerun-if-changed={}", path.display());
}
prost_build::Config::new()
.out_dir(PathBuf::from(env::var("OUT_DIR").expect("OUT_DIR")))
.compile_protos(&full_paths, &[&proto_dir])
.expect("prost compile_protos failed");
}

View File

@@ -1,56 +1,38 @@
//! Cap'n Proto schemas, generated types, and serialisation helpers for quicproquo.
//! Protocol types for quicproquo.
//!
//! This crate contains both:
//! - **v1 (legacy)**: Cap'n Proto generated types from `schemas/*.capnp`
//! - **v2**: Protobuf generated types from `proto/qpq/v1/*.proto`
//!
//! Generated Cap'n Proto code emits unnecessary parentheses; allow per coding standards.
#![allow(unused_parens)]
//! # Design constraints
//!
//! This crate is intentionally restricted:
//! - **No crypto** — key material never enters this crate.
//! - **No I/O** — callers own transport; this crate only converts bytes types.
//! - **No I/O** — callers own transport; this crate only converts bytes <-> types.
//! - **No async** — pure synchronous data-layer code.
//!
//! # Generated code
//!
//! `build.rs` invokes `capnpc` at compile time and writes generated Rust source
//! into `$OUT_DIR`. The `include!` macros below splice that code in as a module.
// ── Generated types ───────────────────────────────────────────────────────────
// ════════════════════════════════════════════════════════════════════════════
// v1 (legacy): Cap'n Proto generated types
// ════════════════════════════════════════════════════════════════════════════
#![allow(unused_parens)]
/// Cap'n Proto generated types for `schemas/auth.capnp`.
///
/// Do not edit this module by hand — it is entirely machine-generated.
pub mod auth_capnp {
include!(concat!(env!("OUT_DIR"), "/auth_capnp.rs"));
}
/// Cap'n Proto generated types for `schemas/delivery.capnp`.
///
/// Do not edit this module by hand — it is entirely machine-generated.
pub mod delivery_capnp {
include!(concat!(env!("OUT_DIR"), "/delivery_capnp.rs"));
}
/// Cap'n Proto generated types for `schemas/node.capnp`.
///
/// Do not edit this module by hand — it is entirely machine-generated.
pub mod node_capnp {
include!(concat!(env!("OUT_DIR"), "/node_capnp.rs"));
}
/// Cap'n Proto generated types for `schemas/federation.capnp`.
///
/// Do not edit this module by hand — it is entirely machine-generated.
pub mod federation_capnp {
include!(concat!(env!("OUT_DIR"), "/federation_capnp.rs"));
}
// ── Low-level byte ↔ message conversions ──────────────────────────────────────
/// Serialise a Cap'n Proto message builder to unpacked wire bytes.
///
/// The output includes the segment table header. For transport, the
/// `quicproquo-core` frame codec prepends a 4-byte little-endian length field.
pub fn to_bytes<A: capnp::message::Allocator>(
msg: &capnp::message::Builder<A>,
) -> Result<Vec<u8>, capnp::Error> {
@@ -59,25 +41,17 @@ pub fn to_bytes<A: capnp::message::Allocator>(
Ok(buf)
}
/// Deserialise unpacked wire bytes into a message with owned segments.
///
/// Uses a stricter default traversal limit of 1 Mi words (~8 MiB) instead
/// of the Cap'n Proto default of 64 MiB, reducing DoS amplification from
/// untrusted input. Use [`from_bytes_with_options`] if you need a custom limit.
/// Deserialise unpacked wire bytes into a Cap'n Proto message.
pub fn from_bytes(
bytes: &[u8],
) -> Result<capnp::message::Reader<capnp::serialize::OwnedSegments>, capnp::Error> {
let mut options = capnp::message::ReaderOptions::new();
options.traversal_limit_in_words(Some(1_048_576)); // 1 Mi words = ~8 MiB
options.traversal_limit_in_words(Some(1_048_576));
let mut cursor = std::io::Cursor::new(bytes);
capnp::serialize::read_message(&mut cursor, options)
}
/// Deserialise unpacked wire bytes with caller-specified [`ReaderOptions`].
///
/// Prefer [`from_bytes`] for typical use. Use this variant when you need to
/// raise the traversal limit for large messages (e.g. blob transfers) or
/// lower it further for tighter validation.
/// Deserialise with custom [`ReaderOptions`].
pub fn from_bytes_with_options(
bytes: &[u8],
options: capnp::message::ReaderOptions,
@@ -85,3 +59,79 @@ pub fn from_bytes_with_options(
let mut cursor = std::io::Cursor::new(bytes);
capnp::serialize::read_message(&mut cursor, options)
}
// ════════════════════════════════════════════════════════════════════════════
// v2: Protobuf (prost) generated types
// ════════════════════════════════════════════════════════════════════════════
/// Protobuf types for the v2 RPC protocol.
pub mod qpq {
pub mod v1 {
include!(concat!(env!("OUT_DIR"), "/qpq.v1.rs"));
}
}
/// Method ID constants for the v2 RPC dispatch table.
pub mod method_ids {
// Auth (100-103)
pub const OPAQUE_REGISTER_START: u16 = 100;
pub const OPAQUE_REGISTER_FINISH: u16 = 101;
pub const OPAQUE_LOGIN_START: u16 = 102;
pub const OPAQUE_LOGIN_FINISH: u16 = 103;
// Delivery (200-205)
pub const ENQUEUE: u16 = 200;
pub const FETCH: u16 = 201;
pub const FETCH_WAIT: u16 = 202;
pub const PEEK: u16 = 203;
pub const ACK: u16 = 204;
pub const BATCH_ENQUEUE: u16 = 205;
// Keys (300-304)
pub const UPLOAD_KEY_PACKAGE: u16 = 300;
pub const FETCH_KEY_PACKAGE: u16 = 301;
pub const UPLOAD_HYBRID_KEY: u16 = 302;
pub const FETCH_HYBRID_KEY: u16 = 303;
pub const FETCH_HYBRID_KEYS: u16 = 304;
// Channel (400)
pub const CREATE_CHANNEL: u16 = 400;
// User (500-501)
pub const RESOLVE_USER: u16 = 500;
pub const RESOLVE_IDENTITY: u16 = 501;
// Blob (600-601)
pub const UPLOAD_BLOB: u16 = 600;
pub const DOWNLOAD_BLOB: u16 = 601;
// Device (700-702)
pub const REGISTER_DEVICE: u16 = 700;
pub const LIST_DEVICES: u16 = 701;
pub const REVOKE_DEVICE: u16 = 702;
// P2P (800-802)
pub const PUBLISH_ENDPOINT: u16 = 800;
pub const RESOLVE_ENDPOINT: u16 = 801;
pub const HEALTH: u16 = 802;
// Federation (900-905)
pub const RELAY_ENQUEUE: u16 = 900;
pub const RELAY_BATCH_ENQUEUE: u16 = 901;
pub const PROXY_FETCH_KEY_PACKAGE: u16 = 902;
pub const PROXY_FETCH_HYBRID_KEY: u16 = 903;
pub const PROXY_RESOLVE_USER: u16 = 904;
pub const FEDERATION_HEALTH: u16 = 905;
// Account (950)
pub const DELETE_ACCOUNT: u16 = 950;
// Push event types (1000+)
pub const PUSH_NEW_MESSAGE: u16 = 1000;
pub const PUSH_TYPING: u16 = 1001;
pub const PUSH_PRESENCE: u16 = 1002;
pub const PUSH_MEMBERSHIP: u16 = 1003;
}
pub use prost;
pub use bytes;

View File

@@ -0,0 +1,25 @@
[package]
name = "quicproquo-rpc"
version = "0.1.0"
edition = "2021"
description = "QUIC RPC framework for quicproquo v2 — framing, dispatch, tower middleware"
[dependencies]
quicproquo-proto = { path = "../quicproquo-proto" }
prost = { workspace = true }
bytes = { workspace = true }
quinn = { workspace = true }
rustls = { workspace = true }
rcgen = { workspace = true }
tokio = { workspace = true }
futures = { workspace = true }
tower = { workspace = true }
tracing = { workspace = true }
thiserror = { workspace = true }
dashmap = { workspace = true }
[dev-dependencies]
tokio = { workspace = true, features = ["test-util"] }
[lints]
workspace = true

View File

@@ -0,0 +1,175 @@
//! QUIC RPC client — connect to server, send requests, receive push events.
use std::sync::atomic::{AtomicU32, Ordering};
use std::sync::Arc;
use bytes::{Bytes, BytesMut};
use quinn::{Connection, Endpoint};
use tokio::sync::mpsc;
use tracing::{debug, warn};
use crate::error::{RpcError, RpcStatus};
use crate::framing::{PushFrame, RequestFrame, ResponseFrame};
/// Configuration for the RPC client.
pub struct RpcClientConfig {
/// Server address to connect to.
pub server_addr: std::net::SocketAddr,
/// Server name for TLS verification.
pub server_name: String,
/// TLS client config (rustls).
pub tls_config: Arc<rustls::ClientConfig>,
/// ALPN protocol.
pub alpn: Vec<u8>,
}
/// A QUIC RPC client connection.
pub struct RpcClient {
connection: Connection,
next_request_id: AtomicU32,
}
impl RpcClient {
/// Connect to the RPC server.
pub async fn connect(config: RpcClientConfig) -> Result<Self, RpcError> {
let mut tls = (*config.tls_config).clone();
tls.alpn_protocols = vec![config.alpn];
let quic_tls = quinn::crypto::rustls::QuicClientConfig::try_from(tls)
.map_err(|e| RpcError::Connection(format!("TLS config: {e}")))?;
let mut endpoint = Endpoint::client("0.0.0.0:0".parse().expect("valid addr"))
.map_err(|e| RpcError::Connection(e.to_string()))?;
endpoint.set_default_client_config(quinn::ClientConfig::new(Arc::new(quic_tls)));
let connection = endpoint
.connect(config.server_addr, &config.server_name)
.map_err(|e| RpcError::Connection(e.to_string()))?
.await
.map_err(|e| RpcError::Connection(e.to_string()))?;
debug!(remote = %connection.remote_address(), "connected to RPC server");
Ok(Self {
connection,
next_request_id: AtomicU32::new(1),
})
}
/// Send an RPC request and wait for the response.
pub async fn call(
&self,
method_id: u16,
payload: Bytes,
) -> Result<Bytes, RpcError> {
let request_id = self.next_request_id.fetch_add(1, Ordering::Relaxed);
let (mut send, mut recv) = self
.connection
.open_bi()
.await
.map_err(|e| RpcError::Connection(e.to_string()))?;
// Send request.
let frame = RequestFrame {
method_id,
request_id,
payload,
};
let encoded = frame.encode();
send.write_all(&encoded)
.await
.map_err(|e| RpcError::Connection(e.to_string()))?;
send.finish().map_err(|e| RpcError::Connection(e.to_string()))?;
// Read response.
let mut buf = BytesMut::new();
while let Some(chunk) = recv
.read_chunk(65536, true)
.await
.map_err(|e| RpcError::Connection(e.to_string()))?
{
buf.extend_from_slice(&chunk.bytes);
if buf.len() > crate::framing::MAX_PAYLOAD_SIZE + crate::framing::RESPONSE_HEADER_SIZE {
return Err(RpcError::PayloadTooLarge {
size: buf.len(),
max: crate::framing::MAX_PAYLOAD_SIZE,
});
}
}
let response = ResponseFrame::decode(&mut buf)?
.ok_or_else(|| RpcError::Decode("incomplete response frame".into()))?;
if response.request_id != request_id {
return Err(RpcError::Decode(format!(
"request_id mismatch: sent {request_id}, got {}",
response.request_id
)));
}
match RpcStatus::from_u8(response.status) {
Some(RpcStatus::Ok) => Ok(response.payload),
Some(status) => Err(RpcError::Server {
status,
message: String::from_utf8_lossy(&response.payload).into_owned(),
}),
None => Err(RpcError::Decode(format!(
"unknown status byte: {}",
response.status
))),
}
}
/// Subscribe to server-push events. Returns a receiver channel.
/// Spawns a background task that reads uni-streams.
pub fn subscribe_push(&self) -> mpsc::UnboundedReceiver<PushFrame> {
let (tx, rx) = mpsc::unbounded_channel();
let conn = self.connection.clone();
tokio::spawn(async move {
loop {
match conn.accept_uni().await {
Ok(mut recv) => {
let mut buf = BytesMut::new();
loop {
match recv.read_chunk(65536, true).await {
Ok(Some(chunk)) => buf.extend_from_slice(&chunk.bytes),
Ok(None) => break,
Err(e) => {
debug!("push stream read error: {e}");
break;
}
}
}
match PushFrame::decode(&mut buf) {
Ok(Some(frame)) => {
if tx.send(frame).is_err() {
return; // receiver dropped
}
}
Ok(None) => debug!("incomplete push frame"),
Err(e) => debug!("push decode error: {e}"),
}
}
Err(quinn::ConnectionError::ApplicationClosed(_)) => break,
Err(e) => {
warn!("accept_uni error: {e}");
break;
}
}
}
});
rx
}
/// Close the connection gracefully.
pub fn close(&self) {
self.connection.close(0u32.into(), b"bye");
}
/// Get the underlying QUIC connection (for advanced use).
pub fn connection(&self) -> &Connection {
&self.connection
}
}

View File

@@ -0,0 +1,68 @@
//! RPC error types.
/// Status codes for RPC responses.
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
#[repr(u8)]
pub enum RpcStatus {
/// Request succeeded.
Ok = 0,
/// Client sent a malformed request.
BadRequest = 1,
/// Authentication required or token invalid.
Unauthorized = 2,
/// Caller lacks permission for this operation.
Forbidden = 3,
/// Requested resource not found.
NotFound = 4,
/// Rate limit exceeded.
RateLimited = 5,
/// Internal server error.
Internal = 10,
/// Method not recognized.
UnknownMethod = 11,
}
impl RpcStatus {
/// Decode a status byte. Returns `None` for unknown values.
pub fn from_u8(v: u8) -> Option<Self> {
match v {
0 => Some(Self::Ok),
1 => Some(Self::BadRequest),
2 => Some(Self::Unauthorized),
3 => Some(Self::Forbidden),
4 => Some(Self::NotFound),
5 => Some(Self::RateLimited),
10 => Some(Self::Internal),
11 => Some(Self::UnknownMethod),
_ => None,
}
}
}
/// Errors that can occur in the RPC layer.
#[derive(Debug, thiserror::Error)]
pub enum RpcError {
#[error("connection error: {0}")]
Connection(String),
#[error("encoding error: {0}")]
Encode(String),
#[error("decoding error: {0}")]
Decode(String),
#[error("server returned error status {status:?}: {message}")]
Server {
status: RpcStatus,
message: String,
},
#[error("request timed out")]
Timeout,
#[error("stream closed unexpectedly")]
StreamClosed,
#[error("payload too large: {size} bytes (max {max})")]
PayloadTooLarge { size: usize, max: usize },
}

View File

@@ -0,0 +1,280 @@
//! Wire format encoding and decoding for the quicproquo v2 RPC protocol.
//!
//! ## Request frame
//! ```text
//! [method_id: u16 BE][request_id: u32 BE][payload_len: u32 BE][protobuf bytes]
//! ```
//!
//! ## Response frame
//! ```text
//! [status: u8][request_id: u32 BE][payload_len: u32 BE][protobuf bytes]
//! ```
//!
//! ## Push frame (server → client, uni-stream)
//! ```text
//! [event_type: u16 BE][payload_len: u32 BE][protobuf bytes]
//! ```
use bytes::{Buf, BufMut, Bytes, BytesMut};
use crate::error::{RpcError, RpcStatus};
/// Maximum payload size: 4 MiB.
pub const MAX_PAYLOAD_SIZE: usize = 4 * 1024 * 1024;
/// Request header size: 2 (method) + 4 (req_id) + 4 (len) = 10 bytes.
pub const REQUEST_HEADER_SIZE: usize = 10;
/// Response header size: 1 (status) + 4 (req_id) + 4 (len) = 9 bytes.
pub const RESPONSE_HEADER_SIZE: usize = 9;
/// Push header size: 2 (event_type) + 4 (len) = 6 bytes.
pub const PUSH_HEADER_SIZE: usize = 6;
// ── Request ──────────────────────────────────────────────────────────────────
/// A decoded RPC request frame.
#[derive(Debug, Clone)]
pub struct RequestFrame {
pub method_id: u16,
pub request_id: u32,
pub payload: Bytes,
}
impl RequestFrame {
/// Encode this request into a byte buffer.
pub fn encode(&self) -> Bytes {
let mut buf = BytesMut::with_capacity(REQUEST_HEADER_SIZE + self.payload.len());
buf.put_u16(self.method_id);
buf.put_u32(self.request_id);
buf.put_u32(self.payload.len() as u32);
buf.put(self.payload.clone());
buf.freeze()
}
/// Decode a request frame from a byte buffer.
/// Returns `None` if the buffer does not contain a complete frame.
pub fn decode(buf: &mut BytesMut) -> Result<Option<Self>, RpcError> {
if buf.len() < REQUEST_HEADER_SIZE {
return Ok(None);
}
// Peek at payload_len without consuming.
let payload_len =
u32::from_be_bytes([buf[6], buf[7], buf[8], buf[9]]) as usize;
if payload_len > MAX_PAYLOAD_SIZE {
return Err(RpcError::PayloadTooLarge {
size: payload_len,
max: MAX_PAYLOAD_SIZE,
});
}
let total = REQUEST_HEADER_SIZE + payload_len;
if buf.len() < total {
return Ok(None);
}
let method_id = buf.get_u16();
let request_id = buf.get_u32();
let _len = buf.get_u32();
let payload = buf.split_to(payload_len).freeze();
Ok(Some(Self {
method_id,
request_id,
payload,
}))
}
}
// ── Response ─────────────────────────────────────────────────────────────────
/// A decoded RPC response frame.
#[derive(Debug, Clone)]
pub struct ResponseFrame {
pub status: u8,
pub request_id: u32,
pub payload: Bytes,
}
impl ResponseFrame {
/// Encode this response into a byte buffer.
pub fn encode(&self) -> Bytes {
let mut buf = BytesMut::with_capacity(RESPONSE_HEADER_SIZE + self.payload.len());
buf.put_u8(self.status);
buf.put_u32(self.request_id);
buf.put_u32(self.payload.len() as u32);
buf.put(self.payload.clone());
buf.freeze()
}
/// Decode a response frame from a byte buffer.
pub fn decode(buf: &mut BytesMut) -> Result<Option<Self>, RpcError> {
if buf.len() < RESPONSE_HEADER_SIZE {
return Ok(None);
}
let payload_len =
u32::from_be_bytes([buf[5], buf[6], buf[7], buf[8]]) as usize;
if payload_len > MAX_PAYLOAD_SIZE {
return Err(RpcError::PayloadTooLarge {
size: payload_len,
max: MAX_PAYLOAD_SIZE,
});
}
let total = RESPONSE_HEADER_SIZE + payload_len;
if buf.len() < total {
return Ok(None);
}
let status = buf.get_u8();
let request_id = buf.get_u32();
let _len = buf.get_u32();
let payload = buf.split_to(payload_len).freeze();
Ok(Some(Self {
status,
request_id,
payload,
}))
}
/// Convert the status byte to an `RpcStatus`.
pub fn rpc_status(&self) -> Option<RpcStatus> {
RpcStatus::from_u8(self.status)
}
}
// ── Push ─────────────────────────────────────────────────────────────────────
/// A decoded server-push event frame (sent on QUIC uni-streams).
#[derive(Debug, Clone)]
pub struct PushFrame {
pub event_type: u16,
pub payload: Bytes,
}
impl PushFrame {
/// Encode this push frame into a byte buffer.
pub fn encode(&self) -> Bytes {
let mut buf = BytesMut::with_capacity(PUSH_HEADER_SIZE + self.payload.len());
buf.put_u16(self.event_type);
buf.put_u32(self.payload.len() as u32);
buf.put(self.payload.clone());
buf.freeze()
}
/// Decode a push frame from a byte buffer.
pub fn decode(buf: &mut BytesMut) -> Result<Option<Self>, RpcError> {
if buf.len() < PUSH_HEADER_SIZE {
return Ok(None);
}
let payload_len =
u32::from_be_bytes([buf[2], buf[3], buf[4], buf[5]]) as usize;
if payload_len > MAX_PAYLOAD_SIZE {
return Err(RpcError::PayloadTooLarge {
size: payload_len,
max: MAX_PAYLOAD_SIZE,
});
}
let total = PUSH_HEADER_SIZE + payload_len;
if buf.len() < total {
return Ok(None);
}
let event_type = buf.get_u16();
let _len = buf.get_u32();
let payload = buf.split_to(payload_len).freeze();
Ok(Some(Self {
event_type,
payload,
}))
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn request_roundtrip() {
let frame = RequestFrame {
method_id: 42,
request_id: 1001,
payload: Bytes::from_static(b"hello"),
};
let encoded = frame.encode();
let mut buf = BytesMut::from(encoded.as_ref());
let decoded = RequestFrame::decode(&mut buf).expect("decode").expect("complete");
assert_eq!(decoded.method_id, 42);
assert_eq!(decoded.request_id, 1001);
assert_eq!(decoded.payload, Bytes::from_static(b"hello"));
assert!(buf.is_empty());
}
#[test]
fn response_roundtrip() {
let frame = ResponseFrame {
status: RpcStatus::Ok as u8,
request_id: 2002,
payload: Bytes::from_static(b"world"),
};
let encoded = frame.encode();
let mut buf = BytesMut::from(encoded.as_ref());
let decoded = ResponseFrame::decode(&mut buf).expect("decode").expect("complete");
assert_eq!(decoded.status, 0);
assert_eq!(decoded.request_id, 2002);
assert_eq!(decoded.payload, Bytes::from_static(b"world"));
}
#[test]
fn push_roundtrip() {
let frame = PushFrame {
event_type: 7,
payload: Bytes::from_static(b"event-data"),
};
let encoded = frame.encode();
let mut buf = BytesMut::from(encoded.as_ref());
let decoded = PushFrame::decode(&mut buf).expect("decode").expect("complete");
assert_eq!(decoded.event_type, 7);
assert_eq!(decoded.payload, Bytes::from_static(b"event-data"));
}
#[test]
fn incomplete_request_returns_none() {
let mut buf = BytesMut::from(&[0u8; 5][..]);
assert!(RequestFrame::decode(&mut buf).expect("no error").is_none());
}
#[test]
fn payload_too_large_rejected() {
// Craft a request header with payload_len = MAX + 1.
let mut buf = BytesMut::new();
buf.put_u16(1);
buf.put_u32(1);
buf.put_u32((MAX_PAYLOAD_SIZE + 1) as u32);
let result = RequestFrame::decode(&mut buf);
assert!(matches!(result, Err(RpcError::PayloadTooLarge { .. })));
}
#[test]
fn empty_payload_request() {
let frame = RequestFrame {
method_id: 0,
request_id: 0,
payload: Bytes::new(),
};
let encoded = frame.encode();
assert_eq!(encoded.len(), REQUEST_HEADER_SIZE);
let mut buf = BytesMut::from(encoded.as_ref());
let decoded = RequestFrame::decode(&mut buf).expect("decode").expect("complete");
assert!(decoded.payload.is_empty());
}
}

View File

@@ -0,0 +1,13 @@
//! QUIC RPC framework for quicproquo v2.
//!
//! Wire format per QUIC stream:
//! - Request: `[method_id: u16][request_id: u32][payload_len: u32][protobuf bytes]`
//! - Response: `[status: u8][request_id: u32][payload_len: u32][protobuf bytes]`
//! - Push: `[event_type: u16][payload_len: u32][protobuf bytes]` (uni-stream)
pub mod framing;
pub mod method;
pub mod server;
pub mod client;
pub mod middleware;
pub mod error;

View File

@@ -0,0 +1,102 @@
//! Method registry — maps method IDs to handler functions.
use std::collections::HashMap;
use std::future::Future;
use std::pin::Pin;
use std::sync::Arc;
use bytes::Bytes;
use crate::error::RpcStatus;
/// The result of handling an RPC request.
pub struct HandlerResult {
pub status: RpcStatus,
pub payload: Bytes,
}
impl HandlerResult {
/// Shorthand for a successful response.
pub fn ok(payload: Bytes) -> Self {
Self {
status: RpcStatus::Ok,
payload,
}
}
/// Shorthand for an error response.
pub fn err(status: RpcStatus, message: &str) -> Self {
Self {
status,
payload: Bytes::copy_from_slice(message.as_bytes()),
}
}
}
/// Context passed to every RPC handler.
pub struct RequestContext {
/// The authenticated identity key of the caller, if any.
pub identity_key: Option<Vec<u8>>,
/// The session token, if provided.
pub session_token: Option<Vec<u8>>,
/// The raw request payload (protobuf-encoded).
pub payload: Bytes,
}
/// Type-erased async handler function.
pub type HandlerFn<S> = Arc<
dyn Fn(Arc<S>, RequestContext) -> Pin<Box<dyn Future<Output = HandlerResult> + Send>>
+ Send
+ Sync,
>;
/// Registry mapping method IDs to handler functions.
pub struct MethodRegistry<S> {
handlers: HashMap<u16, (HandlerFn<S>, &'static str)>,
}
impl<S: Send + Sync + 'static> MethodRegistry<S> {
pub fn new() -> Self {
Self {
handlers: HashMap::new(),
}
}
/// Register a handler for a method ID.
pub fn register<F, Fut>(&mut self, method_id: u16, name: &'static str, handler: F)
where
F: Fn(Arc<S>, RequestContext) -> Fut + Send + Sync + 'static,
Fut: Future<Output = HandlerResult> + Send + 'static,
{
let handler = Arc::new(move |state: Arc<S>, ctx: RequestContext| {
Box::pin(handler(state, ctx)) as Pin<Box<dyn Future<Output = HandlerResult> + Send>>
});
self.handlers.insert(method_id, (handler, name));
}
/// Look up a handler by method ID.
pub fn get(&self, method_id: u16) -> Option<&(HandlerFn<S>, &'static str)> {
self.handlers.get(&method_id)
}
/// Return the number of registered methods.
pub fn len(&self) -> usize {
self.handlers.len()
}
/// Whether the registry is empty.
pub fn is_empty(&self) -> bool {
self.handlers.is_empty()
}
/// Iterate over all registered (method_id, name) pairs.
pub fn methods(&self) -> impl Iterator<Item = (u16, &'static str)> + '_ {
self.handlers.iter().map(|(&id, (_, name))| (id, *name))
}
}
impl<S: Send + Sync + 'static> Default for MethodRegistry<S> {
fn default() -> Self {
Self::new()
}
}

View File

@@ -0,0 +1,96 @@
//! Tower-based middleware layers for the RPC server.
//!
//! - `AuthLayer`: validates session tokens and attaches identity to context.
//! - `RateLimitLayer`: per-IP request rate limiting.
use std::time::{Duration, Instant};
use dashmap::DashMap;
// ── Auth middleware ──────────────────────────────────────────────────────────
/// Validates bearer tokens and resolves identity keys.
pub trait SessionValidator: Send + Sync + 'static {
/// Validate a session token, returning the identity key if valid.
fn validate(&self, token: &[u8]) -> Option<Vec<u8>>;
}
/// Auth context extracted from a validated session.
#[derive(Debug, Clone)]
pub struct AuthContext {
/// The Ed25519 identity key of the authenticated caller.
pub identity_key: Vec<u8>,
}
// ── Rate limiter ─────────────────────────────────────────────────────────────
/// Simple per-key sliding-window rate limiter.
pub struct RateLimiter {
/// Max requests per window.
max_requests: u32,
/// Window duration.
window: Duration,
/// Map from key → (count, window_start).
state: DashMap<Vec<u8>, (u32, Instant)>,
}
impl RateLimiter {
/// Create a new rate limiter.
pub fn new(max_requests: u32, window: Duration) -> Self {
Self {
max_requests,
window,
state: DashMap::new(),
}
}
/// Check if a request from `key` is allowed. Returns `true` if allowed.
pub fn check(&self, key: &[u8]) -> bool {
let now = Instant::now();
let mut entry = self.state.entry(key.to_vec()).or_insert((0, now));
let (count, window_start) = entry.value_mut();
if now.duration_since(*window_start) >= self.window {
// Reset window.
*count = 1;
*window_start = now;
true
} else if *count < self.max_requests {
*count += 1;
true
} else {
false
}
}
/// Remove expired entries (call periodically for memory hygiene).
pub fn gc(&self) {
let now = Instant::now();
self.state.retain(|_, (_, start)| now.duration_since(*start) < self.window * 2);
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn rate_limiter_allows_within_limit() {
let rl = RateLimiter::new(3, Duration::from_secs(60));
let key = b"test-key";
assert!(rl.check(key));
assert!(rl.check(key));
assert!(rl.check(key));
assert!(!rl.check(key)); // 4th request denied
}
#[test]
fn rate_limiter_resets_after_window() {
let rl = RateLimiter::new(1, Duration::from_millis(1));
let key = b"test-key";
assert!(rl.check(key));
assert!(!rl.check(key));
std::thread::sleep(Duration::from_millis(5));
assert!(rl.check(key)); // window expired
}
}

View File

@@ -0,0 +1,198 @@
//! QUIC RPC server — accepts connections, dispatches requests to handlers.
use std::sync::Arc;
use bytes::BytesMut;
use quinn::{Endpoint, Incoming, RecvStream, SendStream};
use tracing::{debug, info, warn};
use crate::error::{RpcError, RpcStatus};
use crate::framing::{RequestFrame, ResponseFrame, PushFrame};
use crate::method::{HandlerResult, MethodRegistry, RequestContext};
/// Configuration for the RPC server.
pub struct RpcServerConfig {
/// QUIC listen address.
pub listen_addr: std::net::SocketAddr,
/// TLS server config (rustls).
pub tls_config: Arc<rustls::ServerConfig>,
/// ALPN protocol for the RPC service.
pub alpn: Vec<u8>,
}
/// The QUIC RPC server.
pub struct RpcServer<S: Send + Sync + 'static> {
endpoint: Endpoint,
state: Arc<S>,
registry: Arc<MethodRegistry<S>>,
}
impl<S: Send + Sync + 'static> RpcServer<S> {
/// Create and bind the QUIC endpoint. Does not start accepting yet.
pub fn bind(
config: RpcServerConfig,
state: Arc<S>,
registry: MethodRegistry<S>,
) -> Result<Self, RpcError> {
let mut tls = (*config.tls_config).clone();
tls.alpn_protocols = vec![config.alpn];
let quic_tls = quinn::crypto::rustls::QuicServerConfig::try_from(tls)
.map_err(|e| RpcError::Connection(format!("TLS config: {e}")))?;
let server_config = quinn::ServerConfig::with_crypto(Arc::new(quic_tls));
let endpoint = Endpoint::server(server_config, config.listen_addr)
.map_err(|e| RpcError::Connection(format!("bind {}: {e}", config.listen_addr)))?;
info!(addr = %config.listen_addr, "RPC server bound");
Ok(Self {
endpoint,
state,
registry: Arc::new(registry),
})
}
/// Accept connections in a loop. Spawns a task per connection.
pub async fn serve(self) -> Result<(), RpcError> {
info!("RPC server accepting connections");
while let Some(incoming) = self.endpoint.accept().await {
let state = Arc::clone(&self.state);
let registry = Arc::clone(&self.registry);
tokio::spawn(async move {
if let Err(e) = handle_connection(incoming, state, registry).await {
warn!("connection error: {e}");
}
});
}
Ok(())
}
/// Get the local address the server is listening on.
pub fn local_addr(&self) -> Result<std::net::SocketAddr, RpcError> {
self.endpoint
.local_addr()
.map_err(|e| RpcError::Connection(e.to_string()))
}
}
/// Handle a single QUIC connection: accept bi-directional streams for RPCs.
async fn handle_connection<S: Send + Sync + 'static>(
incoming: Incoming,
state: Arc<S>,
registry: Arc<MethodRegistry<S>>,
) -> Result<(), RpcError> {
let connection = incoming
.await
.map_err(|e| RpcError::Connection(e.to_string()))?;
let remote = connection.remote_address();
debug!(remote = %remote, "new connection");
loop {
let stream = connection.accept_bi().await;
match stream {
Ok((send, recv)) => {
let state = Arc::clone(&state);
let registry = Arc::clone(&registry);
tokio::spawn(async move {
if let Err(e) = handle_stream(send, recv, state, registry).await {
debug!("stream error: {e}");
}
});
}
Err(quinn::ConnectionError::ApplicationClosed(_)) => {
debug!(remote = %remote, "connection closed by peer");
break;
}
Err(e) => {
debug!(remote = %remote, "accept_bi error: {e}");
break;
}
}
}
Ok(())
}
/// Handle a single bi-directional stream: read request, dispatch, write response.
async fn handle_stream<S: Send + Sync + 'static>(
mut send: SendStream,
mut recv: RecvStream,
state: Arc<S>,
registry: Arc<MethodRegistry<S>>,
) -> Result<(), RpcError> {
// Read the complete request from the stream.
let mut buf = BytesMut::new();
while let Some(chunk) = recv
.read_chunk(65536, true)
.await
.map_err(|e| RpcError::Connection(e.to_string()))?
{
buf.extend_from_slice(&chunk.bytes);
if buf.len() > crate::framing::MAX_PAYLOAD_SIZE + crate::framing::REQUEST_HEADER_SIZE {
return Err(RpcError::PayloadTooLarge {
size: buf.len(),
max: crate::framing::MAX_PAYLOAD_SIZE,
});
}
}
let frame = match RequestFrame::decode(&mut buf)? {
Some(f) => f,
None => return Err(RpcError::Decode("incomplete request frame".into())),
};
let result = match registry.get(frame.method_id) {
Some((handler, name)) => {
debug!(method_id = frame.method_id, method = name, req_id = frame.request_id, "dispatching");
let ctx = RequestContext {
identity_key: None, // populated by auth middleware
session_token: None,
payload: frame.payload,
};
handler(Arc::clone(&state), ctx).await
}
None => {
warn!(method_id = frame.method_id, "unknown method");
HandlerResult::err(RpcStatus::UnknownMethod, "unknown method")
}
};
let response = ResponseFrame {
status: result.status as u8,
request_id: frame.request_id,
payload: result.payload,
};
let encoded = response.encode();
send.write_all(&encoded)
.await
.map_err(|e| RpcError::Connection(e.to_string()))?;
send.finish().map_err(|e| RpcError::Connection(e.to_string()))?;
Ok(())
}
/// Send a push event to a client via a QUIC uni-stream.
pub async fn send_push(
connection: &quinn::Connection,
event_type: u16,
payload: bytes::Bytes,
) -> Result<(), RpcError> {
let mut send = connection
.open_uni()
.await
.map_err(|e| RpcError::Connection(e.to_string()))?;
let frame = PushFrame {
event_type,
payload,
};
let encoded = frame.encode();
send.write_all(&encoded)
.await
.map_err(|e| RpcError::Connection(e.to_string()))?;
send.finish().map_err(|e| RpcError::Connection(e.to_string()))?;
Ok(())
}

View File

@@ -0,0 +1,32 @@
[package]
name = "quicproquo-sdk"
version = "0.1.0"
edition = "2021"
description = "Client SDK for quicproquo v2 — connect, auth, send, receive, subscribe"
[dependencies]
quicproquo-core = { path = "../quicproquo-core" }
quicproquo-proto = { path = "../quicproquo-proto" }
quicproquo-rpc = { path = "../quicproquo-rpc" }
tokio = { workspace = true }
futures = { workspace = true }
tracing = { workspace = true }
thiserror = { workspace = true }
anyhow = { workspace = true }
serde = { workspace = true }
serde_json = { workspace = true }
bincode = { workspace = true }
hex = { workspace = true }
zeroize = { workspace = true }
rusqlite = { workspace = true }
argon2 = { workspace = true }
rand = { workspace = true }
sha2 = { workspace = true }
rustls = { workspace = true }
quinn = { workspace = true }
[dev-dependencies]
tokio = { workspace = true, features = ["test-util"] }
[lints]
workspace = true

View File

@@ -0,0 +1,193 @@
//! `QpqClient` — the main entry point for the quicproquo SDK.
use std::sync::Arc;
use tokio::sync::broadcast;
use tracing::info;
use crate::config::ClientConfig;
use crate::conversation::ConversationStore;
use crate::error::SdkError;
use crate::events::ClientEvent;
/// The main SDK client. All state is contained within this struct — no globals.
pub struct QpqClient {
config: ClientConfig,
rpc: Option<quicproquo_rpc::client::RpcClient>,
event_tx: broadcast::Sender<ClientEvent>,
/// The authenticated username, if logged in.
username: Option<String>,
/// The local identity key (Ed25519 public key, 32 bytes).
identity_key: Option<Vec<u8>>,
/// Session token from OPAQUE login.
session_token: Option<Vec<u8>>,
/// Local conversation store (SQLCipher).
conv_store: Option<ConversationStore>,
}
impl QpqClient {
/// Create a new client with the given configuration.
pub fn new(config: ClientConfig) -> Self {
let (event_tx, _) = broadcast::channel(256);
Self {
config,
rpc: None,
event_tx,
username: None,
identity_key: None,
session_token: None,
conv_store: None,
}
}
/// Connect to the server.
pub async fn connect(&mut self) -> Result<(), SdkError> {
let tls_config = build_tls_config(self.config.accept_invalid_certs)?;
let rpc_config = quicproquo_rpc::client::RpcClientConfig {
server_addr: self.config.server_addr,
server_name: self.config.server_name.clone(),
tls_config: Arc::new(tls_config),
alpn: self.config.alpn.clone(),
};
let client = quicproquo_rpc::client::RpcClient::connect(rpc_config).await?;
self.rpc = Some(client);
// Open local conversation store.
let store = ConversationStore::open(
&self.config.db_path,
self.config.db_password.as_deref(),
)?;
self.conv_store = Some(store);
self.emit(ClientEvent::Connected);
info!(server = %self.config.server_addr, "connected");
Ok(())
}
/// Subscribe to client events. Returns a broadcast receiver.
pub fn subscribe(&self) -> broadcast::Receiver<ClientEvent> {
self.event_tx.subscribe()
}
/// Get the authenticated username, if logged in.
pub fn username(&self) -> Option<&str> {
self.username.as_deref()
}
/// Get the local identity key.
pub fn identity_key(&self) -> Option<&[u8]> {
self.identity_key.as_deref()
}
/// Whether the client is connected.
pub fn is_connected(&self) -> bool {
self.rpc.is_some()
}
/// Whether the client is authenticated.
pub fn is_authenticated(&self) -> bool {
self.session_token.is_some()
}
/// Get a reference to the RPC client (for direct calls).
pub fn rpc(&self) -> Result<&quicproquo_rpc::client::RpcClient, SdkError> {
self.rpc.as_ref().ok_or(SdkError::NotConnected)
}
/// Get a reference to the conversation store.
pub fn conversations(&self) -> Result<&ConversationStore, SdkError> {
self.conv_store
.as_ref()
.ok_or(SdkError::NotConnected)
}
/// Disconnect from the server.
pub fn disconnect(&mut self) {
if let Some(rpc) = self.rpc.take() {
rpc.close();
self.emit(ClientEvent::Disconnected {
reason: "client closed".into(),
});
}
}
fn emit(&self, event: ClientEvent) {
// Ignore send errors (no subscribers).
let _ = self.event_tx.send(event);
}
}
impl Drop for QpqClient {
fn drop(&mut self) {
self.disconnect();
}
}
fn build_tls_config(accept_invalid_certs: bool) -> Result<rustls::ClientConfig, SdkError> {
let builder = rustls::ClientConfig::builder();
if accept_invalid_certs {
let config = builder
.dangerous()
.with_custom_certificate_verifier(Arc::new(InsecureVerifier))
.with_no_client_auth();
Ok(config)
} else {
let roots = rustls::RootCertStore::empty();
let config = builder
.with_root_certificates(roots)
.with_no_client_auth();
Ok(config)
}
}
/// A TLS verifier that accepts any certificate (for dev mode only).
#[derive(Debug)]
struct InsecureVerifier;
impl rustls::client::danger::ServerCertVerifier for InsecureVerifier {
fn verify_server_cert(
&self,
_end_entity: &rustls::pki_types::CertificateDer<'_>,
_intermediates: &[rustls::pki_types::CertificateDer<'_>],
_server_name: &rustls::pki_types::ServerName<'_>,
_ocsp_response: &[u8],
_now: rustls::pki_types::UnixTime,
) -> Result<rustls::client::danger::ServerCertVerified, rustls::Error> {
Ok(rustls::client::danger::ServerCertVerified::assertion())
}
fn verify_tls12_signature(
&self,
_message: &[u8],
_cert: &rustls::pki_types::CertificateDer<'_>,
_dss: &rustls::DigitallySignedStruct,
) -> Result<rustls::client::danger::HandshakeSignatureValid, rustls::Error> {
Ok(rustls::client::danger::HandshakeSignatureValid::assertion())
}
fn verify_tls13_signature(
&self,
_message: &[u8],
_cert: &rustls::pki_types::CertificateDer<'_>,
_dss: &rustls::DigitallySignedStruct,
) -> Result<rustls::client::danger::HandshakeSignatureValid, rustls::Error> {
Ok(rustls::client::danger::HandshakeSignatureValid::assertion())
}
fn supported_verify_schemes(&self) -> Vec<rustls::SignatureScheme> {
vec![
rustls::SignatureScheme::RSA_PKCS1_SHA256,
rustls::SignatureScheme::RSA_PKCS1_SHA384,
rustls::SignatureScheme::RSA_PKCS1_SHA512,
rustls::SignatureScheme::ECDSA_NISTP256_SHA256,
rustls::SignatureScheme::ECDSA_NISTP384_SHA384,
rustls::SignatureScheme::ED25519,
rustls::SignatureScheme::RSA_PSS_SHA256,
rustls::SignatureScheme::RSA_PSS_SHA384,
rustls::SignatureScheme::RSA_PSS_SHA512,
]
}
}

View File

@@ -0,0 +1,44 @@
//! Client configuration.
use std::net::SocketAddr;
use std::path::PathBuf;
/// Configuration for a `QpqClient` instance.
#[derive(Debug, Clone)]
pub struct ClientConfig {
/// Server address (host:port).
pub server_addr: SocketAddr,
/// Server hostname for TLS SNI.
pub server_name: String,
/// Path to the local conversation database.
pub db_path: PathBuf,
/// Password for encrypting the local database (SQLCipher).
/// If `None`, the database is stored unencrypted.
pub db_password: Option<String>,
/// Path to the local state file (identity key, MLS state).
pub state_path: PathBuf,
/// Whether to accept self-signed TLS certificates (dev mode only).
pub accept_invalid_certs: bool,
/// ALPN protocol identifier for the RPC service.
pub alpn: Vec<u8>,
}
impl Default for ClientConfig {
fn default() -> Self {
Self {
server_addr: "127.0.0.1:7000".parse().expect("valid addr"),
server_name: "localhost".to_string(),
db_path: PathBuf::from("conversations.db"),
db_password: None,
state_path: PathBuf::from("client-state.bin"),
accept_invalid_certs: false,
alpn: b"qpq/2".to_vec(),
}
}
}

View File

@@ -0,0 +1,481 @@
//! Conversation management — create DMs, groups, send and receive messages.
//!
//! This is the SDK-side conversation store (migrated from v1 client).
use std::path::Path;
use anyhow::Context;
use rusqlite::{params, Connection, OptionalExtension};
use zeroize::Zeroizing;
// ── Types ────────────────────────────────────────────────────────────────────
/// 16-byte conversation identifier.
#[derive(Clone, Debug, PartialEq, Eq, Hash)]
pub struct ConversationId(pub [u8; 16]);
impl ConversationId {
pub fn from_slice(s: &[u8]) -> Option<Self> {
if s.len() == 16 {
let mut buf = [0u8; 16];
buf.copy_from_slice(s);
Some(Self(buf))
} else {
None
}
}
/// Derive a conversation ID from a group name via SHA-256 truncation.
pub fn from_group_name(name: &str) -> Self {
use sha2::{Digest, Sha256};
let hash = Sha256::digest(name.as_bytes());
let mut buf = [0u8; 16];
buf.copy_from_slice(&hash[..16]);
Self(buf)
}
pub fn hex(&self) -> String {
hex::encode(self.0)
}
}
/// The kind of conversation.
#[derive(Clone, Debug, PartialEq, Eq)]
pub enum ConversationKind {
/// 1:1 DM channel with a specific peer.
Dm {
peer_key: Vec<u8>,
peer_username: Option<String>,
},
/// Named group with N members.
Group { name: String },
}
/// A conversation with its metadata and MLS state.
#[derive(Clone, Debug)]
pub struct Conversation {
pub id: ConversationId,
pub kind: ConversationKind,
pub display_name: String,
/// Serialized MLS group (bincode).
pub mls_group_blob: Option<Vec<u8>>,
/// Serialized keystore (bincode HashMap).
pub keystore_blob: Option<Vec<u8>>,
/// Member identity keys.
pub member_keys: Vec<Vec<u8>>,
pub unread_count: u32,
pub last_activity_ms: u64,
pub created_at_ms: u64,
/// Whether this conversation uses hybrid (X25519 + ML-KEM-768) MLS keys.
pub is_hybrid: bool,
/// Highest server-side delivery sequence number seen.
pub last_seen_seq: u64,
}
/// A stored message.
#[derive(Clone, Debug)]
pub struct StoredMessage {
pub conversation_id: ConversationId,
pub message_id: Option<[u8; 16]>,
pub sender_key: Vec<u8>,
pub sender_name: Option<String>,
pub body: String,
pub msg_type: String,
pub ref_msg_id: Option<[u8; 16]>,
pub timestamp_ms: u64,
pub is_outgoing: bool,
}
/// An entry in the offline outbox queue.
#[derive(Clone, Debug)]
pub struct OutboxEntry {
pub id: i64,
pub conversation_id: ConversationId,
pub recipient_key: Vec<u8>,
pub payload: Vec<u8>,
pub retry_count: u32,
}
// ── ConversationStore ────────────────────────────────────────────────────────
/// SQLCipher-backed conversation and message store.
pub struct ConversationStore {
conn: Connection,
}
impl ConversationStore {
/// Open or create the conversation database.
pub fn open(db_path: &Path, password: Option<&str>) -> anyhow::Result<Self> {
if let Some(parent) = db_path.parent() {
std::fs::create_dir_all(parent).ok();
}
let conn = Connection::open(db_path).context("open conversation db")?;
if let Some(pw) = password {
let key = derive_db_key(pw, db_path)?;
let hex_key = Zeroizing::new(hex::encode(&*key));
conn.pragma_update(None, "key", format!("x'{}'", &*hex_key))
.context("set SQLCipher key")?;
}
conn.execute_batch("PRAGMA journal_mode=WAL; PRAGMA foreign_keys=ON;")
.context("set pragmas")?;
Self::migrate(&conn)?;
Ok(Self { conn })
}
fn migrate(conn: &Connection) -> anyhow::Result<()> {
conn.execute_batch(
"CREATE TABLE IF NOT EXISTS conversations (
id BLOB PRIMARY KEY,
kind TEXT NOT NULL,
display_name TEXT NOT NULL,
peer_key BLOB,
peer_username TEXT,
group_name TEXT,
mls_group_blob BLOB,
keystore_blob BLOB,
member_keys BLOB,
unread_count INTEGER NOT NULL DEFAULT 0,
last_activity_ms INTEGER NOT NULL DEFAULT 0,
created_at_ms INTEGER NOT NULL DEFAULT 0,
is_hybrid INTEGER NOT NULL DEFAULT 0,
last_seen_seq INTEGER NOT NULL DEFAULT 0
);
CREATE TABLE IF NOT EXISTS messages (
id INTEGER PRIMARY KEY AUTOINCREMENT,
conversation_id BLOB NOT NULL REFERENCES conversations(id),
message_id BLOB,
sender_key BLOB NOT NULL,
sender_name TEXT,
body TEXT NOT NULL,
msg_type TEXT NOT NULL,
ref_msg_id BLOB,
timestamp_ms INTEGER NOT NULL,
is_outgoing INTEGER NOT NULL DEFAULT 0
);
CREATE INDEX IF NOT EXISTS idx_messages_conv
ON messages(conversation_id, timestamp_ms);
CREATE TABLE IF NOT EXISTS outbox (
id INTEGER PRIMARY KEY AUTOINCREMENT,
conversation_id BLOB NOT NULL,
recipient_key BLOB NOT NULL,
payload BLOB NOT NULL,
created_at_ms INTEGER NOT NULL,
retry_count INTEGER NOT NULL DEFAULT 0,
status TEXT NOT NULL DEFAULT 'pending'
);
CREATE INDEX IF NOT EXISTS idx_outbox_status
ON outbox(status, created_at_ms);",
)
.context("migrate conversation db")
}
/// Save or upsert a conversation.
pub fn save_conversation(&self, conv: &Conversation) -> anyhow::Result<()> {
let (kind_str, peer_key, peer_username, group_name) = match &conv.kind {
ConversationKind::Dm { peer_key, peer_username } => {
("dm", Some(peer_key.as_slice()), peer_username.as_deref(), None)
}
ConversationKind::Group { name } => ("group", None, None, Some(name.as_str())),
};
let member_keys_blob =
bincode::serialize(&conv.member_keys).context("serialize member_keys")?;
self.conn.execute(
"INSERT INTO conversations
(id, kind, display_name, peer_key, peer_username, group_name,
mls_group_blob, keystore_blob, member_keys, unread_count,
last_activity_ms, created_at_ms, is_hybrid, last_seen_seq)
VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7, ?8, ?9, ?10, ?11, ?12, ?13, ?14)
ON CONFLICT(id) DO UPDATE SET
display_name = excluded.display_name,
mls_group_blob = excluded.mls_group_blob,
keystore_blob = excluded.keystore_blob,
member_keys = excluded.member_keys,
unread_count = excluded.unread_count,
last_activity_ms = excluded.last_activity_ms,
is_hybrid = excluded.is_hybrid,
last_seen_seq = excluded.last_seen_seq",
params![
conv.id.0.as_slice(),
kind_str,
conv.display_name,
peer_key,
peer_username,
group_name,
conv.mls_group_blob,
conv.keystore_blob,
member_keys_blob,
conv.unread_count,
conv.last_activity_ms,
conv.created_at_ms,
conv.is_hybrid as i32,
conv.last_seen_seq as i64,
],
)?;
Ok(())
}
/// Load a conversation by ID.
pub fn load_conversation(&self, id: &ConversationId) -> anyhow::Result<Option<Conversation>> {
self.conn
.query_row(
"SELECT kind, display_name, peer_key, peer_username, group_name,
mls_group_blob, keystore_blob, member_keys, unread_count,
last_activity_ms, created_at_ms, is_hybrid, last_seen_seq
FROM conversations WHERE id = ?1",
params![id.0.as_slice()],
|row| row_to_conversation(id, row),
)
.optional()
.context("load conversation")
}
/// List all conversations, most recent first.
pub fn list_conversations(&self) -> anyhow::Result<Vec<Conversation>> {
let mut stmt = self.conn.prepare(
"SELECT id, kind, display_name, peer_key, peer_username, group_name,
mls_group_blob, keystore_blob, member_keys, unread_count,
last_activity_ms, created_at_ms, is_hybrid, last_seen_seq
FROM conversations ORDER BY last_activity_ms DESC",
)?;
let rows = stmt.query_map([], |row| {
let id_blob: Vec<u8> = row.get(0)?;
let id = ConversationId::from_slice(&id_blob).unwrap_or(ConversationId([0; 16]));
row_to_conversation_full(&id, row)
})?;
let mut convs = Vec::new();
for row in rows {
convs.push(row?);
}
Ok(convs)
}
/// Find a DM by peer identity key.
pub fn find_dm_by_peer(&self, peer_key: &[u8]) -> anyhow::Result<Option<Conversation>> {
let id_blob: Option<Vec<u8>> = self
.conn
.query_row(
"SELECT id FROM conversations WHERE kind = 'dm' AND peer_key = ?1",
params![peer_key],
|row| row.get(0),
)
.optional()?;
match id_blob {
Some(blob) => {
let id = ConversationId::from_slice(&blob)
.context("invalid conversation id")?;
self.load_conversation(&id)
}
None => Ok(None),
}
}
/// Save a message.
pub fn save_message(&self, msg: &StoredMessage) -> anyhow::Result<()> {
self.conn.execute(
"INSERT INTO messages
(conversation_id, message_id, sender_key, sender_name, body,
msg_type, ref_msg_id, timestamp_ms, is_outgoing)
VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7, ?8, ?9)",
params![
msg.conversation_id.0.as_slice(),
msg.message_id.as_ref().map(|id| id.as_slice()),
msg.sender_key,
msg.sender_name,
msg.body,
msg.msg_type,
msg.ref_msg_id.as_ref().map(|id| id.as_slice()),
msg.timestamp_ms,
msg.is_outgoing as i32,
],
)?;
Ok(())
}
/// Load recent messages (newest first, then reversed to chronological).
pub fn load_recent_messages(
&self,
conv_id: &ConversationId,
limit: usize,
) -> anyhow::Result<Vec<StoredMessage>> {
let mut stmt = self.conn.prepare(
"SELECT message_id, sender_key, sender_name, body, msg_type,
ref_msg_id, timestamp_ms, is_outgoing
FROM messages WHERE conversation_id = ?1
ORDER BY timestamp_ms DESC LIMIT ?2",
)?;
let rows = stmt.query_map(
params![conv_id.0.as_slice(), limit.min(u32::MAX as usize) as u32],
|row| row_to_message(conv_id, row),
)?;
let mut msgs: Vec<StoredMessage> = rows.collect::<Result<_, _>>()?;
msgs.reverse();
Ok(msgs)
}
}
// ── Helpers ──────────────────────────────────────────────────────────────────
fn derive_db_key(password: &str, db_path: &Path) -> anyhow::Result<Zeroizing<[u8; 32]>> {
use argon2::{Algorithm, Argon2, Params, Version};
let salt_path = db_path.with_extension("db-salt");
let salt = if salt_path.exists() {
std::fs::read(&salt_path).context("read db salt")?
} else {
let mut salt = vec![0u8; 16];
rand::RngCore::fill_bytes(&mut rand::rngs::OsRng, &mut salt);
std::fs::write(&salt_path, &salt).context("write db salt")?;
#[cfg(unix)]
{
use std::os::unix::fs::PermissionsExt;
std::fs::set_permissions(&salt_path, std::fs::Permissions::from_mode(0o600)).ok();
}
salt
};
let params = Params::new(19 * 1024, 2, 1, Some(32))
.map_err(|e| anyhow::anyhow!("argon2 params: {e}"))?;
let argon2 = Argon2::new(Algorithm::Argon2id, Version::default(), params);
let mut key = Zeroizing::new([0u8; 32]);
argon2
.hash_password_into(password.as_bytes(), &salt, &mut *key)
.map_err(|e| anyhow::anyhow!("db key derivation: {e}"))?;
Ok(key)
}
fn row_to_conversation(
id: &ConversationId,
row: &rusqlite::Row<'_>,
) -> rusqlite::Result<Conversation> {
let kind_str: String = row.get(0)?;
let display_name: String = row.get(1)?;
let peer_key: Option<Vec<u8>> = row.get(2)?;
let peer_username: Option<String> = row.get(3)?;
let group_name: Option<String> = row.get(4)?;
let mls_group_blob: Option<Vec<u8>> = row.get(5)?;
let keystore_blob: Option<Vec<u8>> = row.get(6)?;
let member_keys_blob: Option<Vec<u8>> = row.get(7)?;
let unread_count: u32 = row.get(8)?;
let last_activity_ms: u64 = row.get(9)?;
let created_at_ms: u64 = row.get(10)?;
let is_hybrid_int: i32 = row.get(11)?;
let last_seen_seq: i64 = row.get(12)?;
let kind = if kind_str == "dm" {
ConversationKind::Dm {
peer_key: peer_key.unwrap_or_default(),
peer_username,
}
} else {
ConversationKind::Group {
name: group_name.unwrap_or_default(),
}
};
let member_keys: Vec<Vec<u8>> = member_keys_blob
.and_then(|b| bincode::deserialize(&b).ok())
.unwrap_or_default();
Ok(Conversation {
id: id.clone(),
kind,
display_name,
mls_group_blob,
keystore_blob,
member_keys,
unread_count,
last_activity_ms,
created_at_ms,
is_hybrid: is_hybrid_int != 0,
last_seen_seq: last_seen_seq as u64,
})
}
fn row_to_conversation_full(
id: &ConversationId,
row: &rusqlite::Row<'_>,
) -> rusqlite::Result<Conversation> {
let kind_str: String = row.get(1)?;
let display_name: String = row.get(2)?;
let peer_key: Option<Vec<u8>> = row.get(3)?;
let peer_username: Option<String> = row.get(4)?;
let group_name: Option<String> = row.get(5)?;
let mls_group_blob: Option<Vec<u8>> = row.get(6)?;
let keystore_blob: Option<Vec<u8>> = row.get(7)?;
let member_keys_blob: Option<Vec<u8>> = row.get(8)?;
let unread_count: u32 = row.get(9)?;
let last_activity_ms: u64 = row.get(10)?;
let created_at_ms: u64 = row.get(11)?;
let is_hybrid_int: i32 = row.get(12)?;
let last_seen_seq: i64 = row.get(13)?;
let kind = if kind_str == "dm" {
ConversationKind::Dm {
peer_key: peer_key.unwrap_or_default(),
peer_username,
}
} else {
ConversationKind::Group {
name: group_name.unwrap_or_default(),
}
};
let member_keys: Vec<Vec<u8>> = member_keys_blob
.and_then(|b| bincode::deserialize(&b).ok())
.unwrap_or_default();
Ok(Conversation {
id: id.clone(),
kind,
display_name,
mls_group_blob,
keystore_blob,
member_keys,
unread_count,
last_activity_ms,
created_at_ms,
is_hybrid: is_hybrid_int != 0,
last_seen_seq: last_seen_seq as u64,
})
}
fn to_16(v: &[u8]) -> Option<[u8; 16]> {
if v.len() == 16 {
let mut buf = [0u8; 16];
buf.copy_from_slice(v);
Some(buf)
} else {
None
}
}
fn row_to_message(
conv_id: &ConversationId,
row: &rusqlite::Row<'_>,
) -> rusqlite::Result<StoredMessage> {
let message_id: Option<Vec<u8>> = row.get(0)?;
let sender_key: Vec<u8> = row.get(1)?;
let sender_name: Option<String> = row.get(2)?;
let body: String = row.get(3)?;
let msg_type: String = row.get(4)?;
let ref_msg_id: Option<Vec<u8>> = row.get(5)?;
let timestamp_ms: u64 = row.get(6)?;
let is_outgoing: i32 = row.get(7)?;
Ok(StoredMessage {
conversation_id: conv_id.clone(),
message_id: message_id.as_deref().and_then(to_16),
sender_key,
sender_name,
body,
msg_type,
ref_msg_id: ref_msg_id.as_deref().and_then(to_16),
timestamp_ms,
is_outgoing: is_outgoing != 0,
})
}

View File

@@ -0,0 +1,29 @@
//! SDK error types.
/// Errors returned by SDK operations.
#[derive(Debug, thiserror::Error)]
pub enum SdkError {
#[error("not connected to server")]
NotConnected,
#[error("not authenticated — call login() first")]
NotAuthenticated,
#[error("authentication failed: {0}")]
AuthFailed(String),
#[error("conversation not found: {0}")]
ConversationNotFound(String),
#[error("crypto error: {0}")]
Crypto(String),
#[error("RPC error: {0}")]
Rpc(#[from] quicproquo_rpc::error::RpcError),
#[error("storage error: {0}")]
Storage(String),
#[error("{0}")]
Other(#[from] anyhow::Error),
}

View File

@@ -0,0 +1,56 @@
//! Client event system — real-time notifications from the SDK.
/// Events emitted by the SDK to the UI layer.
#[derive(Debug, Clone)]
pub enum ClientEvent {
/// Successfully connected to the server.
Connected,
/// Disconnected from the server.
Disconnected { reason: String },
/// Authentication succeeded.
Authenticated { username: String },
/// A new message was received in a conversation.
MessageReceived {
conversation_id: [u8; 16],
sender_key: Vec<u8>,
sender_name: Option<String>,
body: String,
timestamp_ms: u64,
},
/// A message was sent successfully.
MessageSent {
conversation_id: [u8; 16],
seq: u64,
},
/// A new conversation was created or discovered.
ConversationCreated {
conversation_id: [u8; 16],
display_name: String,
},
/// A member was added to a group conversation.
MemberAdded {
conversation_id: [u8; 16],
member_key: Vec<u8>,
},
/// A member was removed from a group conversation.
MemberRemoved {
conversation_id: [u8; 16],
member_key: Vec<u8>,
},
/// Server-push event received.
PushEvent {
event_type: u16,
payload: Vec<u8>,
},
/// An error occurred in the background.
Error { message: String },
}

View File

@@ -0,0 +1,10 @@
//! Client SDK for quicproquo v2.
//!
//! Provides `QpqClient` — a single entry point for connecting, authenticating,
//! sending/receiving messages, and subscribing to real-time events.
pub mod client;
pub mod config;
pub mod conversation;
pub mod events;
pub mod error;

View File

@@ -0,0 +1,72 @@
//! Authentication domain logic — OPAQUE registration and login.
//!
//! This module contains the pure business logic for OPAQUE auth,
//! extracted from `node_service/auth_ops.rs`. It operates on domain
//! types and the `Store` trait, with no dependency on Cap'n Proto or Protobuf.
use std::sync::Arc;
use dashmap::DashMap;
use opaque_ke::ServerSetup;
use quicproquo_core::opaque_auth::OpaqueSuite;
use crate::auth::{AuthConfig, PendingLogin, SessionInfo};
use crate::storage::{Store, StorageError};
use super::types::*;
/// Shared state needed by auth operations.
pub struct AuthService {
pub store: Arc<dyn Store>,
pub opaque_setup: Arc<ServerSetup<OpaqueSuite>>,
pub pending_logins: Arc<DashMap<String, PendingLogin>>,
pub sessions: Arc<DashMap<Vec<u8>, SessionInfo>>,
pub auth_cfg: Arc<AuthConfig>,
}
impl AuthService {
/// Validate a session token and return the caller's auth context.
pub fn validate_session(&self, token: &[u8]) -> Option<CallerAuth> {
let info = self.sessions.get(token)?;
if info.expires_at <= crate::auth::current_timestamp() {
self.sessions.remove(token);
return None;
}
Some(CallerAuth {
identity_key: info.identity_key.clone(),
token: token.to_vec(),
device_id: None,
})
}
/// Start OPAQUE registration.
pub fn register_start(&self, req: RegisterStartReq) -> Result<RegisterStartResp, StorageError> {
use opaque_ke::ServerRegistration;
let result = ServerRegistration::<OpaqueSuite>::start(
&self.opaque_setup,
opaque_ke::RegistrationRequest::deserialize(&req.request_bytes)
.map_err(|e| StorageError::Io(format!("bad registration request: {e}")))?,
req.username.as_bytes(),
)
.map_err(|e| StorageError::Io(format!("OPAQUE register start: {e}")))?;
let response_bytes = result.message.serialize().to_vec();
Ok(RegisterStartResp { response_bytes })
}
/// Finish OPAQUE registration — persist user record and identity key.
pub fn register_finish(&self, req: RegisterFinishReq) -> Result<RegisterFinishResp, StorageError> {
let upload = opaque_ke::RegistrationUpload::<OpaqueSuite>::deserialize(&req.upload_bytes)
.map_err(|e| StorageError::Io(format!("bad registration upload: {e}")))?;
let record = opaque_ke::ServerRegistration::<OpaqueSuite>::finish(upload);
let serialized = record.serialize().to_vec();
self.store.store_user_record(&req.username, serialized)?;
self.store
.store_user_identity_key(&req.username, req.identity_key)?;
Ok(RegisterFinishResp { success: true })
}
}

View File

@@ -0,0 +1,110 @@
//! Delivery domain logic — enqueue, fetch, peek, ack.
//!
//! Pure business logic operating on `Store` trait and domain types.
use std::sync::Arc;
use dashmap::DashMap;
use tokio::sync::Notify;
use crate::storage::Store;
use super::types::*;
/// Shared state needed by delivery operations.
pub struct DeliveryService {
pub store: Arc<dyn Store>,
pub waiters: Arc<DashMap<Vec<u8>, Arc<Notify>>>,
}
impl DeliveryService {
/// Enqueue a payload for delivery.
pub fn enqueue(&self, req: EnqueueReq) -> Result<EnqueueResp, crate::storage::StorageError> {
let ttl = if req.ttl_secs > 0 {
Some(req.ttl_secs)
} else {
None
};
let seq = self.store.enqueue(
&req.recipient_key,
&req.channel_id,
req.payload,
ttl,
)?;
// Wake any long-polling waiter for this recipient.
if let Some(notify) = self.waiters.get(&req.recipient_key) {
notify.notify_one();
}
Ok(EnqueueResp {
seq,
delivery_proof: Vec::new(), // TODO: sign in Phase 2
})
}
/// Fetch and drain queued messages.
pub fn fetch(&self, req: FetchReq) -> Result<FetchResp, crate::storage::StorageError> {
let messages = if req.limit > 0 {
self.store
.fetch_limited(&req.recipient_key, &req.channel_id, req.limit as usize)?
} else {
self.store.fetch(&req.recipient_key, &req.channel_id)?
};
Ok(FetchResp {
payloads: messages
.into_iter()
.map(|(seq, data)| Envelope { seq, data })
.collect(),
})
}
/// Peek at messages without removing them.
pub fn peek(&self, req: PeekReq) -> Result<PeekResp, crate::storage::StorageError> {
let messages = self.store.peek(
&req.recipient_key,
&req.channel_id,
if req.limit > 0 { req.limit as usize } else { 0 },
)?;
Ok(PeekResp {
payloads: messages
.into_iter()
.map(|(seq, data)| Envelope { seq, data })
.collect(),
})
}
/// Acknowledge messages up to a sequence number.
pub fn ack(&self, req: AckReq) -> Result<(), crate::storage::StorageError> {
self.store
.ack(&req.recipient_key, &req.channel_id, req.seq_up_to)?;
Ok(())
}
/// Batch enqueue to multiple recipients.
pub fn batch_enqueue(
&self,
req: BatchEnqueueReq,
) -> Result<BatchEnqueueResp, crate::storage::StorageError> {
let ttl = if req.ttl_secs > 0 {
Some(req.ttl_secs)
} else {
None
};
let mut seqs = Vec::with_capacity(req.recipient_keys.len());
for rk in &req.recipient_keys {
let seq = self.store.enqueue(rk, &req.channel_id, req.payload.clone(), ttl)?;
seqs.push(seq);
if let Some(notify) = self.waiters.get(rk) {
notify.notify_one();
}
}
Ok(BatchEnqueueResp { seqs })
}
}

View File

@@ -0,0 +1,10 @@
//! Domain types and service logic — protocol-agnostic.
//!
//! These types define the server's business logic independently of any
//! serialization format (Cap'n Proto, Protobuf). RPC handlers translate
//! wire-format messages into these types, call service functions, and
//! translate the results back.
pub mod types;
pub mod auth;
pub mod delivery;

View File

@@ -0,0 +1,260 @@
//! Plain Rust request/response types for server domain logic.
//!
//! No proto, no capnp — just Rust structs.
// ── Auth ─────────────────────────────────────────────────────────────────────
/// Caller authentication context (resolved from session token).
#[derive(Debug, Clone)]
pub struct CallerAuth {
/// Ed25519 identity key of the authenticated caller (32 bytes).
pub identity_key: Vec<u8>,
/// Session token bytes.
pub token: Vec<u8>,
/// Device ID (optional, for auditing).
pub device_id: Option<Vec<u8>>,
}
/// OPAQUE registration start.
pub struct RegisterStartReq {
pub username: String,
pub request_bytes: Vec<u8>,
}
pub struct RegisterStartResp {
pub response_bytes: Vec<u8>,
}
/// OPAQUE registration finish.
pub struct RegisterFinishReq {
pub username: String,
pub upload_bytes: Vec<u8>,
pub identity_key: Vec<u8>,
}
pub struct RegisterFinishResp {
pub success: bool,
}
/// OPAQUE login start.
pub struct LoginStartReq {
pub username: String,
pub request_bytes: Vec<u8>,
}
pub struct LoginStartResp {
pub response_bytes: Vec<u8>,
}
/// OPAQUE login finish.
pub struct LoginFinishReq {
pub username: String,
pub finalization_bytes: Vec<u8>,
pub identity_key: Vec<u8>,
}
pub struct LoginFinishResp {
pub session_token: Vec<u8>,
}
// ── Delivery ─────────────────────────────────────────────────────────────────
/// An envelope pairing a sequence number with an opaque payload.
#[derive(Debug, Clone)]
pub struct Envelope {
pub seq: u64,
pub data: Vec<u8>,
}
pub struct EnqueueReq {
pub recipient_key: Vec<u8>,
pub payload: Vec<u8>,
pub channel_id: Vec<u8>,
pub ttl_secs: u32,
}
pub struct EnqueueResp {
pub seq: u64,
pub delivery_proof: Vec<u8>,
}
pub struct FetchReq {
pub recipient_key: Vec<u8>,
pub channel_id: Vec<u8>,
pub limit: u32,
}
pub struct FetchResp {
pub payloads: Vec<Envelope>,
}
pub struct PeekReq {
pub recipient_key: Vec<u8>,
pub channel_id: Vec<u8>,
pub limit: u32,
}
pub struct PeekResp {
pub payloads: Vec<Envelope>,
}
pub struct AckReq {
pub recipient_key: Vec<u8>,
pub channel_id: Vec<u8>,
pub seq_up_to: u64,
}
pub struct BatchEnqueueReq {
pub recipient_keys: Vec<Vec<u8>>,
pub payload: Vec<u8>,
pub channel_id: Vec<u8>,
pub ttl_secs: u32,
}
pub struct BatchEnqueueResp {
pub seqs: Vec<u64>,
}
// ── Keys ─────────────────────────────────────────────────────────────────────
pub struct UploadKeyPackageReq {
pub identity_key: Vec<u8>,
pub package: Vec<u8>,
}
pub struct UploadKeyPackageResp {
pub fingerprint: Vec<u8>,
}
pub struct FetchKeyPackageReq {
pub identity_key: Vec<u8>,
}
pub struct FetchKeyPackageResp {
pub package: Vec<u8>,
}
pub struct UploadHybridKeyReq {
pub identity_key: Vec<u8>,
pub hybrid_public_key: Vec<u8>,
}
pub struct FetchHybridKeyReq {
pub identity_key: Vec<u8>,
}
pub struct FetchHybridKeyResp {
pub hybrid_public_key: Vec<u8>,
}
pub struct FetchHybridKeysReq {
pub identity_keys: Vec<Vec<u8>>,
}
pub struct FetchHybridKeysResp {
pub keys: Vec<Vec<u8>>,
}
// ── Channel ──────────────────────────────────────────────────────────────────
pub struct CreateChannelReq {
pub peer_key: Vec<u8>,
}
pub struct CreateChannelResp {
pub channel_id: Vec<u8>,
pub was_new: bool,
}
// ── User ─────────────────────────────────────────────────────────────────────
pub struct ResolveUserReq {
pub username: String,
}
pub struct ResolveUserResp {
pub identity_key: Vec<u8>,
pub inclusion_proof: Vec<u8>,
}
pub struct ResolveIdentityReq {
pub identity_key: Vec<u8>,
}
pub struct ResolveIdentityResp {
pub username: String,
}
// ── Blob ─────────────────────────────────────────────────────────────────────
pub struct UploadBlobReq {
pub blob_hash: Vec<u8>,
pub chunk: Vec<u8>,
pub offset: u64,
pub total_size: u64,
pub mime_type: String,
}
pub struct UploadBlobResp {
pub blob_id: Vec<u8>,
}
pub struct DownloadBlobReq {
pub blob_id: Vec<u8>,
pub offset: u64,
pub length: u32,
}
pub struct DownloadBlobResp {
pub chunk: Vec<u8>,
pub total_size: u64,
pub mime_type: String,
}
// ── Device ───────────────────────────────────────────────────────────────────
pub struct RegisterDeviceReq {
pub device_id: Vec<u8>,
pub device_name: String,
}
pub struct RegisterDeviceResp {
pub success: bool,
}
pub struct DeviceInfo {
pub device_id: Vec<u8>,
pub device_name: String,
pub registered_at: u64,
}
pub struct ListDevicesResp {
pub devices: Vec<DeviceInfo>,
}
pub struct RevokeDeviceReq {
pub device_id: Vec<u8>,
}
pub struct RevokeDeviceResp {
pub success: bool,
}
// ── P2P ──────────────────────────────────────────────────────────────────────
pub struct PublishEndpointReq {
pub identity_key: Vec<u8>,
pub node_addr: Vec<u8>,
}
pub struct ResolveEndpointReq {
pub identity_key: Vec<u8>,
}
pub struct ResolveEndpointResp {
pub node_addr: Vec<u8>,
}
pub struct HealthResp {
pub status: String,
}

View File

@@ -17,6 +17,7 @@ use tokio::task::LocalSet;
mod auth;
mod config;
pub mod domain;
mod error_codes;
mod federation;
pub mod hooks;