feat: add post-quantum hybrid KEM + SQLCipher persistence

Feature 1 — Post-Quantum Hybrid KEM (X25519 + ML-KEM-768):
- Create hybrid_kem.rs with keygen, encrypt, decrypt + 11 unit tests
- Wire format: version(1) | x25519_eph_pk(32) | mlkem_ct(1088) | nonce(12) | ct
- Add uploadHybridKey/fetchHybridKey RPCs to node.capnp schema
- Server: hybrid key storage in FileBackedStore + RPC handlers
- Client: hybrid keypair in StoredState, auto-wrap/unwrap in send/recv/invite/join
- demo-group runs full hybrid PQ envelope round-trip

Feature 2 — SQLCipher Persistence:
- Extract Store trait from FileBackedStore API
- Create SqlStore (rusqlite + bundled-sqlcipher) with encrypted-at-rest SQLite
- Schema: key_packages, deliveries, hybrid_keys tables with indexes
- Server CLI: --store-backend=sql, --db-path, --db-key flags
- 5 unit tests for SqlStore (FIFO, round-trip, upsert, channel isolation)

Also includes: client lib.rs refactor, auth config, TOML config file support,
mdBook documentation, and various cleanups by user.

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
This commit is contained in:
2026-02-22 08:07:48 +01:00
parent d1ddef4cea
commit f334ed3d43
81 changed files with 14502 additions and 2289 deletions

View File

@@ -0,0 +1,315 @@
//! SQLCipher-backed persistent storage.
//!
//! Uses `rusqlite` with `bundled-sqlcipher` for encrypted-at-rest storage.
//! Implements the same [`Store`] trait as [`FileBackedStore`] but with proper
//! ACID transactions and indexed queries.
use std::path::Path;
use std::sync::Mutex;
use rusqlite::{params, Connection};
use crate::storage::{StorageError, Store};
/// SQLCipher-encrypted storage backend.
///
/// All data is stored in a single encrypted SQLite database. The encryption
/// key is set via `PRAGMA key` at open time.
pub struct SqlStore {
conn: Mutex<Connection>,
}
impl SqlStore {
/// Open (or create) an encrypted database at `path`.
///
/// `key` is the passphrase used by SQLCipher. Pass an empty string for an
/// unencrypted database (useful for testing).
pub fn open(path: impl AsRef<Path>, key: &str) -> Result<Self, StorageError> {
let conn = Connection::open(path).map_err(|e| StorageError::Db(e.to_string()))?;
if !key.is_empty() {
conn.pragma_update(None, "key", key)
.map_err(|e| StorageError::Db(format!("PRAGMA key failed: {e}")))?;
}
// Performance pragmas — safe for a single-writer server.
conn.execute_batch(
"PRAGMA journal_mode = WAL;
PRAGMA synchronous = NORMAL;
PRAGMA foreign_keys = ON;",
)
.map_err(|e| StorageError::Db(e.to_string()))?;
let store = Self {
conn: Mutex::new(conn),
};
store.migrate()?;
Ok(store)
}
/// Create schema tables if they don't exist yet.
fn migrate(&self) -> Result<(), StorageError> {
let conn = self.conn.lock().unwrap();
conn.execute_batch(
"CREATE TABLE IF NOT EXISTS key_packages (
id INTEGER PRIMARY KEY AUTOINCREMENT,
identity_key BLOB NOT NULL,
package_data BLOB NOT NULL,
created_at INTEGER DEFAULT (strftime('%s','now'))
);
CREATE TABLE IF NOT EXISTS deliveries (
id INTEGER PRIMARY KEY AUTOINCREMENT,
recipient_key BLOB NOT NULL,
channel_id BLOB NOT NULL DEFAULT X'',
payload BLOB NOT NULL,
created_at INTEGER DEFAULT (strftime('%s','now'))
);
CREATE TABLE IF NOT EXISTS hybrid_keys (
identity_key BLOB PRIMARY KEY,
hybrid_public_key BLOB NOT NULL
);
CREATE INDEX IF NOT EXISTS idx_kp_identity
ON key_packages(identity_key);
CREATE INDEX IF NOT EXISTS idx_del_recipient_channel
ON deliveries(recipient_key, channel_id);",
)
.map_err(|e| StorageError::Db(e.to_string()))?;
Ok(())
}
}
impl Store for SqlStore {
fn upload_key_package(
&self,
identity_key: &[u8],
package: Vec<u8>,
) -> Result<(), StorageError> {
let conn = self.conn.lock().unwrap();
conn.execute(
"INSERT INTO key_packages (identity_key, package_data) VALUES (?1, ?2)",
params![identity_key, package],
)
.map_err(|e| StorageError::Db(e.to_string()))?;
Ok(())
}
fn fetch_key_package(&self, identity_key: &[u8]) -> Result<Option<Vec<u8>>, StorageError> {
let conn = self.conn.lock().unwrap();
// Find the oldest KeyPackage (FIFO) and delete it atomically.
let mut stmt = conn
.prepare(
"SELECT id, package_data FROM key_packages
WHERE identity_key = ?1
ORDER BY id ASC
LIMIT 1",
)
.map_err(|e| StorageError::Db(e.to_string()))?;
let row = stmt
.query_row(params![identity_key], |row| {
Ok((row.get::<_, i64>(0)?, row.get::<_, Vec<u8>>(1)?))
})
.optional()
.map_err(|e| StorageError::Db(e.to_string()))?;
match row {
Some((id, package)) => {
conn.execute("DELETE FROM key_packages WHERE id = ?1", params![id])
.map_err(|e| StorageError::Db(e.to_string()))?;
Ok(Some(package))
}
None => Ok(None),
}
}
fn enqueue(
&self,
recipient_key: &[u8],
channel_id: &[u8],
payload: Vec<u8>,
) -> Result<(), StorageError> {
let conn = self.conn.lock().unwrap();
conn.execute(
"INSERT INTO deliveries (recipient_key, channel_id, payload) VALUES (?1, ?2, ?3)",
params![recipient_key, channel_id, payload],
)
.map_err(|e| StorageError::Db(e.to_string()))?;
Ok(())
}
fn fetch(
&self,
recipient_key: &[u8],
channel_id: &[u8],
) -> Result<Vec<Vec<u8>>, StorageError> {
let conn = self.conn.lock().unwrap();
let mut stmt = conn
.prepare(
"SELECT id, payload FROM deliveries
WHERE recipient_key = ?1 AND channel_id = ?2
ORDER BY id ASC",
)
.map_err(|e| StorageError::Db(e.to_string()))?;
let rows: Vec<(i64, Vec<u8>)> = stmt
.query_map(params![recipient_key, channel_id], |row| {
Ok((row.get(0)?, row.get(1)?))
})
.map_err(|e| StorageError::Db(e.to_string()))?
.collect::<Result<Vec<_>, _>>()
.map_err(|e| StorageError::Db(e.to_string()))?;
if !rows.is_empty() {
let ids: Vec<i64> = rows.iter().map(|(id, _)| *id).collect();
// Delete fetched rows in a single statement.
let placeholders: String = ids.iter().map(|_| "?").collect::<Vec<_>>().join(",");
let sql = format!("DELETE FROM deliveries WHERE id IN ({placeholders})");
let params: Vec<&dyn rusqlite::types::ToSql> =
ids.iter().map(|id| id as &dyn rusqlite::types::ToSql).collect();
conn.execute(&sql, params.as_slice())
.map_err(|e| StorageError::Db(e.to_string()))?;
}
Ok(rows.into_iter().map(|(_, payload)| payload).collect())
}
fn upload_hybrid_key(
&self,
identity_key: &[u8],
hybrid_pk: Vec<u8>,
) -> Result<(), StorageError> {
let conn = self.conn.lock().unwrap();
conn.execute(
"INSERT OR REPLACE INTO hybrid_keys (identity_key, hybrid_public_key) VALUES (?1, ?2)",
params![identity_key, hybrid_pk],
)
.map_err(|e| StorageError::Db(e.to_string()))?;
Ok(())
}
fn fetch_hybrid_key(&self, identity_key: &[u8]) -> Result<Option<Vec<u8>>, StorageError> {
let conn = self.conn.lock().unwrap();
let mut stmt = conn
.prepare("SELECT hybrid_public_key FROM hybrid_keys WHERE identity_key = ?1")
.map_err(|e| StorageError::Db(e.to_string()))?;
stmt.query_row(params![identity_key], |row| row.get(0))
.optional()
.map_err(|e| StorageError::Db(e.to_string()))
}
}
/// Convenience extension for `rusqlite::OptionalExtension`.
trait OptionalExt<T> {
fn optional(self) -> Result<Option<T>, rusqlite::Error>;
}
impl<T> OptionalExt<T> for Result<T, rusqlite::Error> {
fn optional(self) -> Result<Option<T>, rusqlite::Error> {
match self {
Ok(v) => Ok(Some(v)),
Err(rusqlite::Error::QueryReturnedNoRows) => Ok(None),
Err(e) => Err(e),
}
}
}
#[cfg(test)]
mod tests {
use super::*;
fn open_in_memory() -> SqlStore {
SqlStore::open(":memory:", "").unwrap()
}
#[test]
fn key_package_fifo() {
let store = open_in_memory();
let ik = b"alice_identity_key__32bytes_long";
// Pad to 32 bytes to match real usage
let mut identity = [0u8; 32];
identity[..ik.len()].copy_from_slice(ik);
store
.upload_key_package(&identity, b"kp1".to_vec())
.unwrap();
store
.upload_key_package(&identity, b"kp2".to_vec())
.unwrap();
assert_eq!(
store.fetch_key_package(&identity).unwrap(),
Some(b"kp1".to_vec())
);
assert_eq!(
store.fetch_key_package(&identity).unwrap(),
Some(b"kp2".to_vec())
);
assert_eq!(store.fetch_key_package(&identity).unwrap(), None);
}
#[test]
fn delivery_round_trip() {
let store = open_in_memory();
let rk = [1u8; 32];
let ch = b"channel-1";
store.enqueue(&rk, ch, b"msg1".to_vec()).unwrap();
store.enqueue(&rk, ch, b"msg2".to_vec()).unwrap();
let msgs = store.fetch(&rk, ch).unwrap();
assert_eq!(msgs, vec![b"msg1".to_vec(), b"msg2".to_vec()]);
// Queue is drained.
assert!(store.fetch(&rk, ch).unwrap().is_empty());
}
#[test]
fn hybrid_key_round_trip() {
let store = open_in_memory();
let ik = [2u8; 32];
let pk = b"hybrid_public_key_data".to_vec();
store.upload_hybrid_key(&ik, pk.clone()).unwrap();
assert_eq!(store.fetch_hybrid_key(&ik).unwrap(), Some(pk));
}
#[test]
fn hybrid_key_upsert() {
let store = open_in_memory();
let ik = [3u8; 32];
store
.upload_hybrid_key(&ik, b"v1".to_vec())
.unwrap();
store
.upload_hybrid_key(&ik, b"v2".to_vec())
.unwrap();
assert_eq!(
store.fetch_hybrid_key(&ik).unwrap(),
Some(b"v2".to_vec())
);
}
#[test]
fn separate_channels_isolated() {
let store = open_in_memory();
let rk = [4u8; 32];
store.enqueue(&rk, b"ch-a", b"a1".to_vec()).unwrap();
store.enqueue(&rk, b"ch-b", b"b1".to_vec()).unwrap();
let a_msgs = store.fetch(&rk, b"ch-a").unwrap();
assert_eq!(a_msgs, vec![b"a1".to_vec()]);
let b_msgs = store.fetch(&rk, b"ch-b").unwrap();
assert_eq!(b_msgs, vec![b"b1".to_vec()]);
}
}