feat: upgrade OpenMLS 0.5 → 0.8 for security patches and GREASE support

Migrates all MLS code in quicprochat-core from OpenMLS 0.5 to 0.8:
- StorageProvider replaces OpenMlsKeyStore (keystore.rs full rewrite)
- HybridCryptoProvider updated for new OpenMlsProvider trait
- Group operations updated for new API signatures
- MLS state persistence via MemoryStorage serialization
- tls_codec 0.3 → 0.4, openmls_traits/rust_crypto 0.2 → 0.5
This commit is contained in:
2026-03-08 17:50:15 +01:00
parent 077f48f19c
commit a05da9b751
20 changed files with 1433 additions and 657 deletions

815
Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@@ -26,12 +26,13 @@ categories = ["cryptography", "network-programming"]
[workspace.dependencies] [workspace.dependencies]
# ── Crypto ──────────────────────────────────────────────────────────────────── # ── Crypto ────────────────────────────────────────────────────────────────────
openmls = { version = "0.5", default-features = false, features = ["crypto-subtle"] } openmls = { version = "0.8" }
openmls_rust_crypto = { version = "0.2" } openmls_rust_crypto = { version = "0.5" }
openmls_traits = { version = "0.2" } openmls_traits = { version = "0.5" }
# tls_codec must match the version used by openmls 0.5 (which uses 0.3) to avoid openmls_memory_storage = { version = "0.5" }
# tls_codec must match the version used by openmls 0.8 (which uses 0.4) to avoid
# duplicate Serialize trait versions in the dependency graph. # duplicate Serialize trait versions in the dependency graph.
tls_codec = { version = "0.3", features = ["derive"] } tls_codec = { version = "0.4", features = ["derive"] }
# ml-kem 0.2 is the current stable release (FIPS 203, ML-KEM-768). # ml-kem 0.2 is the current stable release (FIPS 203, ML-KEM-768).
ml-kem = { version = "0.2" } ml-kem = { version = "0.2" }
x25519-dalek = { version = "2", features = ["static_secrets"] } x25519-dalek = { version = "2", features = ["static_secrets"] }
@@ -87,7 +88,8 @@ tracing-subscriber = { version = "0.3", features = ["env-filter"] }
anyhow = { version = "1" } anyhow = { version = "1" }
thiserror = { version = "1" } thiserror = { version = "1" }
# ── CLI ─────────────────────────────────────────────────────────────────────── # ── Config / CLI ──────────────────────────────────────────────────────────────
toml = { version = "0.8" }
clap = { version = "4", features = ["derive", "env"] } clap = { version = "4", features = ["derive", "env"] }
rustyline = { version = "14" } rustyline = { version = "14" }

View File

@@ -9,6 +9,7 @@
</p> </p>
<p align="center"> <p align="center">
<a href="docs/src/design-rationale/messenger-comparison.md">Why quicprochat?</a> &middot;
<a href="ROADMAP.md">Roadmap</a> &middot; <a href="ROADMAP.md">Roadmap</a> &middot;
<a href="docs/sdk/index.md">SDK Docs</a> &middot; <a href="docs/sdk/index.md">SDK Docs</a> &middot;
<a href="docs/operations/monitoring.md">Operations</a> &middot; <a href="docs/operations/monitoring.md">Operations</a> &middot;

View File

@@ -50,8 +50,9 @@ rustls = { workspace = true }
tracing = { workspace = true } tracing = { workspace = true }
tracing-subscriber = { workspace = true } tracing-subscriber = { workspace = true }
# CLI # CLI + config
clap = { workspace = true } clap = { workspace = true }
toml = { workspace = true }
# Local message/conversation storage # Local message/conversation storage
rusqlite = { workspace = true } rusqlite = { workspace = true }

View File

@@ -1449,10 +1449,8 @@ pub(crate) async fn cmd_dm(
}, },
display_name: format!("@{username}"), display_name: format!("@{username}"),
mls_group_blob: member mls_group_blob: member
.group_ref() .serialize_mls_state()
.map(bincode::serialize) .context("serialize MLS state")?,
.transpose()
.context("serialize group")?,
keystore_blob: None, keystore_blob: None,
member_keys, member_keys,
unread_count: 0, unread_count: 0,
@@ -1493,10 +1491,8 @@ pub(crate) fn cmd_create_group(session: &mut SessionState, name: &str) -> anyhow
kind: ConversationKind::Group { name: name.to_string() }, kind: ConversationKind::Group { name: name.to_string() },
display_name: format!("#{name}"), display_name: format!("#{name}"),
mls_group_blob: member mls_group_blob: member
.group_ref() .serialize_mls_state()
.map(bincode::serialize) .context("serialize MLS state")?,
.transpose()
.context("serialize group")?,
keystore_blob: None, keystore_blob: None,
member_keys, member_keys,
unread_count: 0, unread_count: 0,
@@ -1780,9 +1776,7 @@ pub(crate) async fn cmd_join(
kind: ConversationKind::Group { name: display.clone() }, kind: ConversationKind::Group { name: display.clone() },
display_name: format!("#{display}"), display_name: format!("#{display}"),
mls_group_blob: new_member mls_group_blob: new_member
.group_ref() .serialize_mls_state()
.map(bincode::serialize)
.transpose()
.context("serialize joined group")?, .context("serialize joined group")?,
keystore_blob: None, keystore_blob: None,
member_keys, member_keys,
@@ -3186,8 +3180,9 @@ async fn try_auto_join(
}; };
let mls_blob = member let mls_blob = member
.group_ref() .serialize_mls_state()
.and_then(|g| bincode::serialize(g).ok()); .ok()
.flatten();
let conv = Conversation { let conv = Conversation {
id: conv_id.clone(), id: conv_id.clone(),

View File

@@ -16,7 +16,7 @@ use quicprochat_core::{DiskKeyStore, GroupMember, HybridKeypair, IdentityKeypair
use super::conversation::{ use super::conversation::{
now_ms, Conversation, ConversationId, ConversationKind, ConversationStore, now_ms, Conversation, ConversationId, ConversationKind, ConversationStore,
}; };
use super::state::{load_or_init_state, keystore_path}; use super::state::load_or_init_state;
/// Runtime state for an interactive REPL session. /// Runtime state for an interactive REPL session.
pub struct SessionState { pub struct SessionState {
@@ -109,7 +109,7 @@ impl SessionState {
/// Migrate the legacy single-group from StoredState into the conversation DB. /// Migrate the legacy single-group from StoredState into the conversation DB.
fn migrate_legacy_group( fn migrate_legacy_group(
&mut self, &mut self,
state_path: &Path, _state_path: &Path,
group_blob: &Option<Vec<u8>>, group_blob: &Option<Vec<u8>>,
) -> anyhow::Result<()> { ) -> anyhow::Result<()> {
let blob = match group_blob { let blob = match group_blob {
@@ -117,16 +117,22 @@ impl SessionState {
None => return Ok(()), None => return Ok(()),
}; };
// Reconstruct GroupMember using the legacy keystore and group blob. // Legacy group blobs used openmls 0.5 serde format. After the 0.8
let ks_path = keystore_path(state_path); // upgrade the blob format changed to storage-provider state. Attempt
let ks = DiskKeyStore::persistent(&ks_path)?; // to load from the new format; if that fails, skip the legacy group.
let group = bincode::deserialize(blob).context("decode legacy group")?; let group_id_guess = &blob[..blob.len().min(16)];
let member = GroupMember::new_with_state( let member = match GroupMember::new_from_storage_bytes(
Arc::clone(&self.identity), Arc::clone(&self.identity),
ks, blob,
Some(group), group_id_guess,
false, // legacy groups are classical false, // legacy groups are classical
); ) {
Ok(m) => m,
Err(e) => {
tracing::warn!(error = %e, "skipping incompatible legacy group blob (openmls version mismatch)");
return Ok(());
}
};
let group_id_bytes = member.group_id().unwrap_or_default(); let group_id_bytes = member.group_id().unwrap_or_default();
@@ -182,27 +188,32 @@ impl SessionState {
/// Create a GroupMember from a stored conversation. /// Create a GroupMember from a stored conversation.
fn create_member_from_conv(&self, conv: &Conversation) -> anyhow::Result<GroupMember> { fn create_member_from_conv(&self, conv: &Conversation) -> anyhow::Result<GroupMember> {
if let Some(blob) = conv.mls_group_blob.as_ref() {
let group_id = conv.id.0.as_slice();
let member = GroupMember::new_from_storage_bytes(
Arc::clone(&self.identity),
blob,
group_id,
conv.is_hybrid,
)
.context("restore MLS state from conversation db")?;
Ok(member)
} else {
// No MLS state — create an empty member.
let ks_path = self.keystore_path_for(&conv.id); let ks_path = self.keystore_path_for(&conv.id);
let ks = DiskKeyStore::persistent(&ks_path) let ks = DiskKeyStore::persistent(&ks_path)
.unwrap_or_else(|e| { .unwrap_or_else(|e| {
tracing::warn!(path = %ks_path.display(), error = %e, "DiskKeyStore open failed, falling back to ephemeral"); tracing::warn!(path = %ks_path.display(), error = %e, "DiskKeyStore open failed, falling back to ephemeral");
DiskKeyStore::ephemeral() DiskKeyStore::ephemeral()
}); });
let group = conv
.mls_group_blob
.as_ref()
.map(|b| bincode::deserialize(b))
.transpose()
.context("decode MLS group from conversation db")?;
Ok(GroupMember::new_with_state( Ok(GroupMember::new_with_state(
Arc::clone(&self.identity), Arc::clone(&self.identity),
ks, ks,
group, None,
conv.is_hybrid, conv.is_hybrid,
)) ))
} }
}
/// Path for a per-conversation keystore file. /// Path for a per-conversation keystore file.
fn keystore_path_for(&self, conv_id: &ConversationId) -> PathBuf { fn keystore_path_for(&self, conv_id: &ConversationId) -> PathBuf {
@@ -214,10 +225,8 @@ impl SessionState {
pub fn save_member(&self, conv_id: &ConversationId) -> anyhow::Result<()> { pub fn save_member(&self, conv_id: &ConversationId) -> anyhow::Result<()> {
let member = self.members.get(conv_id).context("no such conversation")?; let member = self.members.get(conv_id).context("no such conversation")?;
let blob = member let blob = member
.group_ref() .serialize_mls_state()
.map(bincode::serialize) .context("serialize MLS state")?;
.transpose()
.context("serialize MLS group")?;
let member_keys = member.member_identities(); let member_keys = member.member_identities();

View File

@@ -27,18 +27,31 @@ pub struct StoredState {
/// Cached member public keys for group participants. /// Cached member public keys for group participants.
#[serde(default)] #[serde(default)]
pub member_keys: Vec<Vec<u8>>, pub member_keys: Vec<Vec<u8>>,
/// MLS group ID bytes, needed to reload the group from StorageProvider state.
#[serde(default)]
pub group_id: Option<Vec<u8>>,
} }
impl StoredState { impl StoredState {
pub fn into_parts(self, state_path: &Path) -> anyhow::Result<(GroupMember, Option<HybridKeypair>)> { pub fn into_parts(self, state_path: &Path) -> anyhow::Result<(GroupMember, Option<HybridKeypair>)> {
let identity = Arc::new(IdentityKeypair::from_seed(self.identity_seed)); let identity = Arc::new(IdentityKeypair::from_seed(self.identity_seed));
let group = self
.group
.map(|bytes| bincode::deserialize(&bytes).context("decode group"))
.transpose()?;
let key_store = DiskKeyStore::persistent(keystore_path(state_path))?;
let hybrid = self.hybrid_key.is_some(); let hybrid = self.hybrid_key.is_some();
let member = GroupMember::new_with_state(identity, key_store, group, hybrid);
let member = match (self.group.as_ref(), self.group_id.as_ref()) {
(Some(storage_bytes), Some(gid)) => {
GroupMember::new_from_storage_bytes(
identity,
storage_bytes,
gid,
hybrid,
)
.context("restore MLS state from stored state")?
}
_ => {
let key_store = DiskKeyStore::persistent(keystore_path(state_path))?;
GroupMember::new_with_state(identity, key_store, None, hybrid)
}
};
let hybrid_kp = self let hybrid_kp = self
.hybrid_key .hybrid_key
@@ -50,15 +63,15 @@ impl StoredState {
pub fn from_parts(member: &GroupMember, hybrid_kp: Option<&HybridKeypair>) -> anyhow::Result<Self> { pub fn from_parts(member: &GroupMember, hybrid_kp: Option<&HybridKeypair>) -> anyhow::Result<Self> {
let group = member let group = member
.group_ref() .serialize_mls_state()
.map(|g| bincode::serialize(g).context("serialize group")) .context("serialize MLS state")?;
.transpose()?;
Ok(Self { Ok(Self {
identity_seed: *member.identity_seed(), identity_seed: *member.identity_seed(),
group, group,
hybrid_key: hybrid_kp.map(|kp| kp.to_bytes()), hybrid_key: hybrid_kp.map(|kp| kp.to_bytes()),
member_keys: Vec::new(), member_keys: Vec::new(),
group_id: member.group_id(),
}) })
} }
} }
@@ -245,6 +258,7 @@ mod tests {
hybrid_key: None, hybrid_key: None,
group: None, group: None,
member_keys: Vec::new(), member_keys: Vec::new(),
group_id: None,
}; };
let password = "test-password"; let password = "test-password";
let plaintext = bincode::serialize(&state).unwrap(); let plaintext = bincode::serialize(&state).unwrap();
@@ -268,6 +282,7 @@ mod tests {
}), }),
group: None, group: None,
member_keys: Vec::new(), member_keys: Vec::new(),
group_id: None,
}; };
let password = "another-password"; let password = "another-password";
let plaintext = bincode::serialize(&state).unwrap(); let plaintext = bincode::serialize(&state).unwrap();
@@ -285,6 +300,7 @@ mod tests {
hybrid_key: None, hybrid_key: None,
group: None, group: None,
member_keys: Vec::new(), member_keys: Vec::new(),
group_id: None,
}; };
let plaintext = bincode::serialize(&state).unwrap(); let plaintext = bincode::serialize(&state).unwrap();
let encrypted = encrypt_state("correct", &plaintext).unwrap(); let encrypted = encrypt_state("correct", &plaintext).unwrap();

View File

@@ -28,12 +28,159 @@ use quicprochat_client::{
#[cfg(all(feature = "tui", not(feature = "v2")))] #[cfg(all(feature = "tui", not(feature = "v2")))]
use quicprochat_client::client::tui::run_tui; use quicprochat_client::client::tui::run_tui;
// ── Config file loading ──────────────────────────────────────────────────────
//
// Loads a TOML config file and sets QPQ_* environment variables for values
// not already set. This runs BEFORE clap parses, so the natural precedence is:
// CLI flags > environment variables > config file > compiled defaults.
//
// Config file search order:
// 1. --config <path> (parsed manually from argv)
// 2. $QPC_CONFIG env var
// 3. $XDG_CONFIG_HOME/qpc/config.toml (usually ~/.config/qpc/config.toml)
// 4. ~/.qpc.toml
#[cfg(not(feature = "v2"))]
mod client_config {
use serde::Deserialize;
use std::path::PathBuf;
#[derive(Debug, Default, Deserialize)]
pub struct ClientFileConfig {
pub server: Option<String>,
pub server_name: Option<String>,
pub ca_cert: Option<String>,
pub username: Option<String>,
pub password: Option<String>,
pub access_token: Option<String>,
pub device_id: Option<String>,
pub state_password: Option<String>,
pub state: Option<String>,
pub danger_accept_invalid_certs: Option<bool>,
pub no_server: Option<bool>,
}
/// Find and load the config file. Returns the parsed config (or default if
/// no file is found).
pub fn load_client_config() -> ClientFileConfig {
let path = find_config_path();
let path = match path {
Some(p) if p.exists() => p,
_ => return ClientFileConfig::default(),
};
match std::fs::read_to_string(&path) {
Ok(contents) => match toml::from_str(&contents) {
Ok(cfg) => {
eprintln!("Loaded config: {}", path.display());
cfg
}
Err(e) => {
eprintln!("Warning: failed to parse {}: {e}", path.display());
ClientFileConfig::default()
}
},
Err(e) => {
eprintln!("Warning: failed to read {}: {e}", path.display());
ClientFileConfig::default()
}
}
}
fn find_config_path() -> Option<PathBuf> {
// 1. --config <path> from argv (before clap parses).
let args: Vec<String> = std::env::args().collect();
for i in 0..args.len().saturating_sub(1) {
if args[i] == "--config" || args[i] == "-c" {
return Some(PathBuf::from(&args[i + 1]));
}
}
// 2. $QPC_CONFIG env var.
if let Ok(p) = std::env::var("QPC_CONFIG") {
return Some(PathBuf::from(p));
}
// 3. $XDG_CONFIG_HOME/qpc/config.toml
let xdg = std::env::var("XDG_CONFIG_HOME")
.map(PathBuf::from)
.unwrap_or_else(|_| {
let home = std::env::var("HOME").unwrap_or_else(|_| ".".to_string());
PathBuf::from(home).join(".config")
});
let xdg_path = xdg.join("qpc").join("config.toml");
if xdg_path.exists() {
return Some(xdg_path);
}
// 4. ~/.qpc.toml
if let Ok(home) = std::env::var("HOME") {
let home_path = PathBuf::from(home).join(".qpc.toml");
if home_path.exists() {
return Some(home_path);
}
}
None
}
/// Set QPQ_* env vars from config values, but only if they're not already set.
pub fn apply_config_to_env(cfg: &ClientFileConfig) {
fn set_if_empty(key: &str, val: &str) {
if std::env::var(key).is_err() {
std::env::set_var(key, val);
}
}
if let Some(ref v) = cfg.server {
set_if_empty("QPQ_SERVER", v);
}
if let Some(ref v) = cfg.server_name {
set_if_empty("QPQ_SERVER_NAME", v);
}
if let Some(ref v) = cfg.ca_cert {
set_if_empty("QPQ_CA_CERT", v);
}
if let Some(ref v) = cfg.username {
set_if_empty("QPQ_USERNAME", v);
}
if let Some(ref v) = cfg.password {
set_if_empty("QPQ_PASSWORD", v);
}
if let Some(ref v) = cfg.access_token {
set_if_empty("QPQ_ACCESS_TOKEN", v);
}
if let Some(ref v) = cfg.device_id {
set_if_empty("QPQ_DEVICE_ID", v);
}
if let Some(ref v) = cfg.state_password {
set_if_empty("QPQ_STATE_PASSWORD", v);
}
if let Some(ref v) = cfg.state {
set_if_empty("QPQ_STATE", v);
}
if let Some(v) = cfg.danger_accept_invalid_certs {
if v {
set_if_empty("QPQ_DANGER_ACCEPT_INVALID_CERTS", "true");
}
}
if let Some(v) = cfg.no_server {
if v {
set_if_empty("QPQ_NO_SERVER", "true");
}
}
}
}
// ── CLI ─────────────────────────────────────────────────────────────────────── // ── CLI ───────────────────────────────────────────────────────────────────────
#[cfg(not(feature = "v2"))] #[cfg(not(feature = "v2"))]
#[derive(Debug, Parser)] #[derive(Debug, Parser)]
#[command(name = "qpc", about = "quicprochat CLI client", version)] #[command(name = "qpc", about = "quicprochat CLI client", version)]
struct Args { struct Args {
/// Path to a TOML config file (auto-detected from ~/.config/qpc/config.toml or ~/.qpc.toml).
#[arg(long, short = 'c', global = true, env = "QPC_CONFIG")]
config: Option<PathBuf>,
/// Path to the server's TLS certificate (self-signed by default). /// Path to the server's TLS certificate (self-signed by default).
#[arg( #[arg(
long, long,
@@ -540,6 +687,13 @@ async fn main() -> anyhow::Result<()> {
) )
.init(); .init();
// Load config file and apply to env BEFORE clap parses (so config values
// act as defaults that env vars and CLI flags can override).
{
let cfg = client_config::load_client_config();
client_config::apply_config_to_env(&cfg);
}
let args = Args::parse(); let args = Args::parse();
if args.danger_accept_invalid_certs { if args.danger_accept_invalid_certs {

View File

@@ -15,6 +15,7 @@ native = [
"dep:openmls", "dep:openmls",
"dep:openmls_rust_crypto", "dep:openmls_rust_crypto",
"dep:openmls_traits", "dep:openmls_traits",
"dep:openmls_memory_storage",
"dep:tls_codec", "dep:tls_codec",
"dep:opaque-ke", "dep:opaque-ke",
"dep:bincode", "dep:bincode",
@@ -49,6 +50,7 @@ opaque-ke = { workspace = true, optional = true }
openmls = { workspace = true, optional = true } openmls = { workspace = true, optional = true }
openmls_rust_crypto = { workspace = true, optional = true } openmls_rust_crypto = { workspace = true, optional = true }
openmls_traits = { workspace = true, optional = true } openmls_traits = { workspace = true, optional = true }
openmls_memory_storage = { workspace = true, optional = true }
tls_codec = { workspace = true, optional = true } tls_codec = { workspace = true, optional = true }
bincode = { workspace = true, optional = true } bincode = { workspace = true, optional = true }

View File

@@ -29,7 +29,7 @@
//! # Ratchet tree //! # Ratchet tree
//! //!
//! `use_ratchet_tree_extension = true` so that the ratchet tree is embedded //! `use_ratchet_tree_extension = true` so that the ratchet tree is embedded
//! in Welcome messages. `new_from_welcome` is called with `ratchet_tree = None`; //! in Welcome messages. `new_from_welcome` is called without a ratchet_tree;
//! openmls extracts the tree from the Welcome's `GroupInfo` extension. //! openmls extracts the tree from the Welcome's `GroupInfo` extension.
use std::{path::Path, sync::Arc}; use std::{path::Path, sync::Arc};
@@ -37,12 +37,13 @@ use std::{path::Path, sync::Arc};
use zeroize::Zeroizing; use zeroize::Zeroizing;
use openmls::prelude::{ use openmls::prelude::{
Ciphersuite, Credential, CredentialType, CredentialWithKey, CryptoConfig, GroupId, KeyPackage, BasicCredential, Ciphersuite, Credential, CredentialWithKey, GroupId, KeyPackage,
KeyPackageIn, MlsGroup, MlsGroupConfig, MlsMessageInBody, MlsMessageOut, KeyPackageIn, LeafNodeParameters, MlsGroup, MlsGroupCreateConfig, MlsGroupJoinConfig,
ProcessedMessageContent, ProtocolMessage, ProtocolVersion, TlsDeserializeTrait, MlsMessageBodyIn, MlsMessageOut, ProcessedMessageContent, ProtocolMessage,
TlsSerializeTrait, ProtocolVersion, StagedWelcome,
}; };
use openmls_traits::OpenMlsCryptoProvider; use openmls_traits::OpenMlsProvider;
use tls_codec::{Deserialize as TlsDeserializeTrait, Serialize as TlsSerializeTrait};
use crate::{ use crate::{
error::CoreError, error::CoreError,
@@ -102,8 +103,10 @@ pub struct GroupMember {
identity: Arc<IdentityKeypair>, identity: Arc<IdentityKeypair>,
/// Active MLS group, if any. /// Active MLS group, if any.
group: Option<MlsGroup>, group: Option<MlsGroup>,
/// Shared group configuration (wire format, ratchet tree extension, etc.). /// Shared group creation configuration (wire format, ratchet tree extension, etc.).
config: MlsGroupConfig, create_config: MlsGroupCreateConfig,
/// Shared group join configuration (wire format, ratchet tree extension, etc.).
join_config: MlsGroupJoinConfig,
/// Whether this member uses hybrid (X25519 + ML-KEM-768) HPKE keys. /// Whether this member uses hybrid (X25519 + ML-KEM-768) HPKE keys.
hybrid: bool, hybrid: bool,
} }
@@ -139,7 +142,11 @@ impl GroupMember {
group: Option<MlsGroup>, group: Option<MlsGroup>,
hybrid: bool, hybrid: bool,
) -> Self { ) -> Self {
let config = MlsGroupConfig::builder() let create_config = MlsGroupCreateConfig::builder()
.use_ratchet_tree_extension(true)
.build();
let join_config = MlsGroupJoinConfig::builder()
.use_ratchet_tree_extension(true) .use_ratchet_tree_extension(true)
.build(); .build();
@@ -153,7 +160,8 @@ impl GroupMember {
backend, backend,
identity, identity,
group, group,
config, create_config,
join_config,
hybrid, hybrid,
} }
} }
@@ -175,18 +183,19 @@ impl GroupMember {
/// ///
/// Returns [`CoreError::Mls`] if openmls fails to create the KeyPackage. /// Returns [`CoreError::Mls`] if openmls fails to create the KeyPackage.
pub fn generate_key_package(&mut self) -> Result<Vec<u8>, CoreError> { pub fn generate_key_package(&mut self) -> Result<Vec<u8>, CoreError> {
let credential_with_key = self.make_credential_with_key()?; let credential_with_key = self.make_credential_with_key();
let key_package = KeyPackage::builder() let key_package_bundle = KeyPackage::builder()
.build( .build(
CryptoConfig::with_default_version(CIPHERSUITE), CIPHERSUITE,
&self.backend, &self.backend,
self.identity.as_ref(), self.identity.as_ref(),
credential_with_key, credential_with_key,
) )
.map_err(|e| CoreError::Mls(format!("{e:?}")))?; .map_err(|e| CoreError::Mls(format!("{e:?}")))?;
key_package key_package_bundle
.key_package()
.tls_serialize_detached() .tls_serialize_detached()
.map_err(|e| CoreError::Mls(format!("{e:?}"))) .map_err(|e| CoreError::Mls(format!("{e:?}")))
} }
@@ -205,13 +214,13 @@ impl GroupMember {
/// ///
/// Returns [`CoreError::Mls`] if the group already exists or openmls fails. /// Returns [`CoreError::Mls`] if the group already exists or openmls fails.
pub fn create_group(&mut self, group_id: &[u8]) -> Result<(), CoreError> { pub fn create_group(&mut self, group_id: &[u8]) -> Result<(), CoreError> {
let credential_with_key = self.make_credential_with_key()?; let credential_with_key = self.make_credential_with_key();
let mls_id = GroupId::from_slice(group_id); let mls_id = GroupId::from_slice(group_id);
let group = MlsGroup::new_with_group_id( let group = MlsGroup::new_with_group_id(
&self.backend, &self.backend,
self.identity.as_ref(), self.identity.as_ref(),
&self.config, &self.create_config,
mls_id, mls_id,
credential_with_key, credential_with_key,
) )
@@ -303,7 +312,7 @@ impl GroupMember {
let leaf_index = group let leaf_index = group
.members() .members()
.find(|m| m.credential.identity() == member_identity) .find(|m| m.credential.serialized_content() == member_identity)
.map(|m| m.index) .map(|m| m.index)
.ok_or_else(|| CoreError::Mls("member not found in group".into()))?; .ok_or_else(|| CoreError::Mls("member not found in group".into()))?;
@@ -384,7 +393,11 @@ impl GroupMember {
.ok_or_else(|| CoreError::Mls("no active group".into()))?; .ok_or_else(|| CoreError::Mls("no active group".into()))?;
let (proposal_out, _ref) = group let (proposal_out, _ref) = group
.propose_self_update(&self.backend, self.identity.as_ref(), None) .propose_self_update(
&self.backend,
self.identity.as_ref(),
LeafNodeParameters::default(),
)
.map_err(|e| CoreError::Mls(format!("propose_self_update: {e:?}")))?; .map_err(|e| CoreError::Mls(format!("propose_self_update: {e:?}")))?;
proposal_out proposal_out
@@ -396,7 +409,7 @@ impl GroupMember {
pub fn has_pending_proposals(&self) -> bool { pub fn has_pending_proposals(&self) -> bool {
self.group self.group
.as_ref() .as_ref()
.map(|g| g.pending_proposals().next().is_some()) .map(|g| g.has_pending_proposals())
.unwrap_or(false) .unwrap_or(false)
} }
@@ -417,17 +430,23 @@ impl GroupMember {
let msg_in = openmls::prelude::MlsMessageIn::tls_deserialize(&mut welcome_bytes) let msg_in = openmls::prelude::MlsMessageIn::tls_deserialize(&mut welcome_bytes)
.map_err(|e| CoreError::Mls(format!("Welcome deserialise: {e:?}")))?; .map_err(|e| CoreError::Mls(format!("Welcome deserialise: {e:?}")))?;
// into_welcome() is feature-gated in openmls 0.5; extract() is public.
let welcome = match msg_in.extract() { let welcome = match msg_in.extract() {
MlsMessageInBody::Welcome(w) => w, MlsMessageBodyIn::Welcome(w) => w,
_ => return Err(CoreError::Mls("expected a Welcome message".into())), _ => return Err(CoreError::Mls("expected a Welcome message".into())),
}; };
// ratchet_tree = None because use_ratchet_tree_extension = true embeds let staged = StagedWelcome::new_from_welcome(
// the tree inside the Welcome's GroupInfo extension. &self.backend,
let group = MlsGroup::new_from_welcome(&self.backend, &self.config, welcome, None) &self.join_config,
welcome,
None, // ratchet tree extracted from the Welcome's GroupInfo extension
)
.map_err(|e| CoreError::Mls(format!("new_from_welcome: {e:?}")))?; .map_err(|e| CoreError::Mls(format!("new_from_welcome: {e:?}")))?;
let group = staged
.into_group(&self.backend)
.map_err(|e| CoreError::Mls(format!("into_group: {e:?}")))?;
self.group = Some(group); self.group = Some(group);
Ok(()) Ok(())
} }
@@ -508,10 +527,9 @@ impl GroupMember {
let msg_in = openmls::prelude::MlsMessageIn::tls_deserialize(&mut bytes) let msg_in = openmls::prelude::MlsMessageIn::tls_deserialize(&mut bytes)
.map_err(|e| CoreError::Mls(format!("message deserialise: {e:?}")))?; .map_err(|e| CoreError::Mls(format!("message deserialise: {e:?}")))?;
// into_protocol_message() is feature-gated; extract() + manual construction is not. let protocol_message: ProtocolMessage = match msg_in.extract() {
let protocol_message = match msg_in.extract() { MlsMessageBodyIn::PrivateMessage(m) => m.into(),
MlsMessageInBody::PrivateMessage(m) => ProtocolMessage::PrivateMessage(m), MlsMessageBodyIn::PublicMessage(m) => m.into(),
MlsMessageInBody::PublicMessage(m) => ProtocolMessage::PublicMessage(m),
_ => return Err(CoreError::Mls("not a protocol message".into())), _ => return Err(CoreError::Mls("not a protocol message".into())),
}; };
@@ -519,7 +537,7 @@ impl GroupMember {
.process_message(&self.backend, protocol_message) .process_message(&self.backend, protocol_message)
.map_err(|e| CoreError::Mls(format!("process_message: {e:?}")))?; .map_err(|e| CoreError::Mls(format!("process_message: {e:?}")))?;
let sender_identity = processed.credential().identity().to_vec(); let sender_identity = processed.credential().serialized_content().to_vec();
match processed.into_content() { match processed.into_content() {
ProcessedMessageContent::ApplicationMessage(app) => { ProcessedMessageContent::ApplicationMessage(app) => {
@@ -545,11 +563,15 @@ impl GroupMember {
} }
// Proposals are stored for a later Commit; nothing to return yet. // Proposals are stored for a later Commit; nothing to return yet.
ProcessedMessageContent::ProposalMessage(proposal) => { ProcessedMessageContent::ProposalMessage(proposal) => {
group.store_pending_proposal(*proposal); group
.store_pending_proposal(self.backend.storage(), *proposal)
.map_err(|e| CoreError::Mls(format!("store_pending_proposal: {e:?}")))?;
Ok((sender_identity, ReceivedMessage::StateChanged)) Ok((sender_identity, ReceivedMessage::StateChanged))
} }
ProcessedMessageContent::ExternalJoinProposalMessage(proposal) => { ProcessedMessageContent::ExternalJoinProposalMessage(proposal) => {
group.store_pending_proposal(*proposal); group
.store_pending_proposal(self.backend.storage(), *proposal)
.map_err(|e| CoreError::Mls(format!("store_pending_proposal: {e:?}")))?;
Ok((sender_identity, ReceivedMessage::StateChanged)) Ok((sender_identity, ReceivedMessage::StateChanged))
} }
} }
@@ -597,6 +619,69 @@ impl GroupMember {
self.group.as_ref() self.group.as_ref()
} }
/// Serialize the MLS group state (via the backing `StorageProvider`).
///
/// In openmls 0.8 the `MlsGroup` is no longer `Serialize`; its state is
/// held inside the `StorageProvider`. This method serializes the full
/// provider storage to bytes, which can later be restored with
/// [`new_from_storage_bytes`].
///
/// Returns `None` if no active group exists.
///
/// [`new_from_storage_bytes`]: Self::new_from_storage_bytes
pub fn serialize_mls_state(&self) -> Result<Option<Vec<u8>>, CoreError> {
if self.group.is_none() {
return Ok(None);
}
let bytes = self
.backend
.storage()
.to_bytes()
.map_err(|e| CoreError::Mls(format!("serialize storage: {e}")))?;
Ok(Some(bytes))
}
/// Create a `GroupMember` from previously serialized storage bytes.
///
/// Reconstructs the `DiskKeyStore` from the blob, then loads the
/// `MlsGroup` from the storage provider using the given `group_id`.
pub fn new_from_storage_bytes(
identity: Arc<IdentityKeypair>,
storage_bytes: &[u8],
group_id: &[u8],
hybrid: bool,
) -> Result<Self, CoreError> {
let key_store = DiskKeyStore::from_bytes(storage_bytes)
.map_err(|e| CoreError::Mls(format!("deserialize storage: {e}")))?;
let create_config = MlsGroupCreateConfig::builder()
.use_ratchet_tree_extension(true)
.build();
let join_config = MlsGroupJoinConfig::builder()
.use_ratchet_tree_extension(true)
.build();
let backend = if hybrid {
HybridCryptoProvider::new_hybrid(key_store)
} else {
HybridCryptoProvider::new_classical(key_store)
};
let mls_group_id = GroupId::from_slice(group_id);
let group = MlsGroup::load(backend.storage(), &mls_group_id)
.map_err(|e| CoreError::Mls(format!("load group from storage: {e}")))?;
Ok(Self {
backend,
identity,
group,
create_config,
join_config,
hybrid,
})
}
/// Return the identity (credential) bytes of all current group members. /// Return the identity (credential) bytes of all current group members.
/// ///
/// Each entry is the raw credential payload (Ed25519 public key bytes) /// Each entry is the raw credential payload (Ed25519 public key bytes)
@@ -608,23 +693,20 @@ impl GroupMember {
}; };
group group
.members() .members()
.map(|m| m.credential.identity().to_vec()) .map(|m| m.credential.serialized_content().to_vec())
.collect() .collect()
} }
// ── Private helpers ─────────────────────────────────────────────────────── // ── Private helpers ───────────────────────────────────────────────────────
fn make_credential_with_key(&self) -> Result<CredentialWithKey, CoreError> { fn make_credential_with_key(&self) -> CredentialWithKey {
let credential = Credential::new( let credential: Credential =
self.identity.public_key_bytes().to_vec(), BasicCredential::new(self.identity.public_key_bytes().to_vec()).into();
CredentialType::Basic,
)
.map_err(|e| CoreError::Mls(format!("{e:?}")))?;
Ok(CredentialWithKey { CredentialWithKey {
credential, credential,
signature_key: self.identity.public_key_bytes().to_vec().into(), signature_key: self.identity.public_key_bytes().to_vec().into(),
}) }
} }
} }
@@ -758,11 +840,6 @@ mod tests {
let (_commit_a, welcome_a) = creator.add_member(&a_kp).expect("add A"); let (_commit_a, welcome_a) = creator.add_member(&a_kp).expect("add A");
a.join_group(&welcome_a).expect("A join"); a.join_group(&welcome_a).expect("A join");
// A must process the commit that added them (it's a StateChanged for A since
// the commit itself is what brought them in — but actually A joined via Welcome,
// so A doesn't process the add-commit). The creator already merged the pending
// commit in add_member, so creator is at epoch 2.
// Add B — at this point creator is at epoch 2 (after adding A). // Add B — at this point creator is at epoch 2 (after adding A).
let (commit_b, welcome_b) = creator.add_member(&b_kp).expect("add B"); let (commit_b, welcome_b) = creator.add_member(&b_kp).expect("add B");
b.join_group(&welcome_b).expect("B join"); b.join_group(&welcome_b).expect("B join");
@@ -958,7 +1035,7 @@ mod tests {
); );
} }
/// 10 messages alternating AliceBob and BobAlice all decrypt successfully. /// 10 messages alternating Alice->Bob and Bob->Alice all decrypt successfully.
/// Verifies that epoch state stays in sync across multiple application messages. /// Verifies that epoch state stays in sync across multiple application messages.
#[test] #[test]
fn multi_message_roundtrip_epoch_stays_in_sync() { fn multi_message_roundtrip_epoch_stays_in_sync() {

View File

@@ -27,8 +27,9 @@ use openmls_traits::{
crypto::OpenMlsCrypto, crypto::OpenMlsCrypto,
types::{ types::{
CryptoError, ExporterSecret, HpkeCiphertext, HpkeConfig, HpkeKeyPair, HpkeKemType, CryptoError, ExporterSecret, HpkeCiphertext, HpkeConfig, HpkeKeyPair, HpkeKemType,
KemOutput,
}, },
OpenMlsCryptoProvider, OpenMlsProvider,
}; };
use tls_codec::SecretVLBytes; use tls_codec::SecretVLBytes;
@@ -128,6 +129,15 @@ impl OpenMlsCrypto for HybridCrypto {
self.rust_crypto.hkdf_extract(hash_type, salt, ikm) self.rust_crypto.hkdf_extract(hash_type, salt, ikm)
} }
fn hmac(
&self,
hash_type: HashType,
key: &[u8],
message: &[u8],
) -> Result<SecretVLBytes, CryptoError> {
self.rust_crypto.hmac(hash_type, key, message)
}
fn hkdf_expand( fn hkdf_expand(
&self, &self,
hash_type: HashType, hash_type: HashType,
@@ -189,25 +199,18 @@ impl OpenMlsCrypto for HybridCrypto {
info: &[u8], info: &[u8],
aad: &[u8], aad: &[u8],
ptxt: &[u8], ptxt: &[u8],
) -> HpkeCiphertext { ) -> Result<HpkeCiphertext, CryptoError> {
if Self::is_hybrid_public_key(pk_r) { if Self::is_hybrid_public_key(pk_r) {
// The trait `OpenMlsCrypto::hpke_seal` returns `HpkeCiphertext` (not
// `Result`), so we cannot propagate errors through the return type.
// Returning an empty ciphertext would silently cause data loss.
// Instead, panic on failure — a hybrid key that passes the length
// check but fails deserialization or encryption indicates a critical
// bug (corrupted key material), not a recoverable condition.
let recipient_pk = HybridPublicKey::from_bytes(pk_r) let recipient_pk = HybridPublicKey::from_bytes(pk_r)
.expect("hybrid public key deserialization failed — key material is corrupted"); .map_err(|_| CryptoError::CryptoLibraryError)?;
// Pass HPKE info and aad through for proper context binding (RFC 9180).
let envelope = hybrid_encrypt(&recipient_pk, ptxt, info, aad) let envelope = hybrid_encrypt(&recipient_pk, ptxt, info, aad)
.expect("hybrid HPKE encryption failed — critical crypto error"); .map_err(|_| CryptoError::CryptoLibraryError)?;
let kem_output = envelope[..HYBRID_KEM_OUTPUT_LEN].to_vec(); let kem_output = envelope[..HYBRID_KEM_OUTPUT_LEN].to_vec();
let ciphertext = envelope[HYBRID_KEM_OUTPUT_LEN..].to_vec(); let ciphertext = envelope[HYBRID_KEM_OUTPUT_LEN..].to_vec();
HpkeCiphertext { Ok(HpkeCiphertext {
kem_output: kem_output.into(), kem_output: kem_output.into(),
ciphertext: ciphertext.into(), ciphertext: ciphertext.into(),
} })
} else { } else {
self.rust_crypto.hpke_seal(config, pk_r, info, aad, ptxt) self.rust_crypto.hpke_seal(config, pk_r, info, aad, ptxt)
} }
@@ -245,7 +248,7 @@ impl OpenMlsCrypto for HybridCrypto {
info: &[u8], info: &[u8],
exporter_context: &[u8], exporter_context: &[u8],
exporter_length: usize, exporter_length: usize,
) -> Result<(Vec<u8>, ExporterSecret), CryptoError> { ) -> Result<(KemOutput, ExporterSecret), CryptoError> {
if Self::is_hybrid_public_key(pk_r) { if Self::is_hybrid_public_key(pk_r) {
// A key that passes the hybrid length check but fails deserialization // A key that passes the hybrid length check but fails deserialization
// is corrupted — return an error instead of silently downgrading to // is corrupted — return an error instead of silently downgrading to
@@ -286,14 +289,14 @@ impl OpenMlsCrypto for HybridCrypto {
} }
} }
fn derive_hpke_keypair(&self, config: HpkeConfig, ikm: &[u8]) -> HpkeKeyPair { fn derive_hpke_keypair(&self, config: HpkeConfig, ikm: &[u8]) -> Result<HpkeKeyPair, CryptoError> {
if self.hybrid_enabled && config.0 == HpkeKemType::DhKem25519 { if self.hybrid_enabled && config.0 == HpkeKemType::DhKem25519 {
let kp = HybridKeypair::derive_from_ikm(ikm); let kp = HybridKeypair::derive_from_ikm(ikm);
let private_bytes = kp.private_to_bytes(); let private_bytes = kp.private_to_bytes();
HpkeKeyPair { Ok(HpkeKeyPair {
private: private_bytes.as_slice().into(), private: private_bytes.as_slice().into(),
public: kp.public_key().to_bytes(), public: kp.public_key().to_bytes(),
} })
} else { } else {
self.rust_crypto.derive_hpke_keypair(config, ikm) self.rust_crypto.derive_hpke_keypair(config, ikm)
} }
@@ -343,10 +346,10 @@ impl Default for HybridCryptoProvider {
} }
} }
impl OpenMlsCryptoProvider for HybridCryptoProvider { impl OpenMlsProvider for HybridCryptoProvider {
type CryptoProvider = HybridCrypto; type CryptoProvider = HybridCrypto;
type RandProvider = RustCrypto; type RandProvider = RustCrypto;
type KeyStoreProvider = DiskKeyStore; type StorageProvider = DiskKeyStore;
fn crypto(&self) -> &Self::CryptoProvider { fn crypto(&self) -> &Self::CryptoProvider {
&self.crypto &self.crypto
@@ -356,7 +359,7 @@ impl OpenMlsCryptoProvider for HybridCryptoProvider {
self.crypto.rust_crypto() self.crypto.rust_crypto()
} }
fn key_store(&self) -> &Self::KeyStoreProvider { fn storage(&self) -> &Self::StorageProvider {
&self.key_store &self.key_store
} }
} }
@@ -383,7 +386,7 @@ mod tests {
let crypto = HybridCrypto::new(); let crypto = HybridCrypto::new();
let ikm = b"test-ikm-for-hybrid-hpke-keypair"; let ikm = b"test-ikm-for-hybrid-hpke-keypair";
let keypair = crypto.derive_hpke_keypair(hpke_config_dhkem_x25519(), ikm); let keypair = crypto.derive_hpke_keypair(hpke_config_dhkem_x25519(), ikm).unwrap();
assert_eq!(keypair.public.len(), HYBRID_PUBLIC_KEY_LEN); assert_eq!(keypair.public.len(), HYBRID_PUBLIC_KEY_LEN);
assert_eq!(keypair.private.as_ref().len(), HYBRID_PRIVATE_KEY_LEN); assert_eq!(keypair.private.as_ref().len(), HYBRID_PRIVATE_KEY_LEN);
@@ -397,7 +400,7 @@ mod tests {
info, info,
aad, aad,
plaintext, plaintext,
); ).unwrap();
assert!(!ct.kem_output.as_slice().is_empty()); assert!(!ct.kem_output.as_slice().is_empty());
assert!(!ct.ciphertext.as_slice().is_empty()); assert!(!ct.ciphertext.as_slice().is_empty());
@@ -419,7 +422,7 @@ mod tests {
let crypto = HybridCrypto::new(); let crypto = HybridCrypto::new();
let ikm = b"exporter-ikm"; let ikm = b"exporter-ikm";
let keypair = crypto.derive_hpke_keypair(hpke_config_dhkem_x25519(), ikm); let keypair = crypto.derive_hpke_keypair(hpke_config_dhkem_x25519(), ikm).unwrap();
let info = b""; let info = b"";
let exporter_context = b"MLS 1.0 external init"; let exporter_context = b"MLS 1.0 external init";
let exporter_length = 32; let exporter_length = 32;
@@ -457,7 +460,7 @@ mod tests {
let crypto = HybridCrypto::new_classical(); let crypto = HybridCrypto::new_classical();
let ikm = b"test-ikm-for-classical-hpke"; let ikm = b"test-ikm-for-classical-hpke";
let keypair = crypto.derive_hpke_keypair(hpke_config_dhkem_x25519(), ikm); let keypair = crypto.derive_hpke_keypair(hpke_config_dhkem_x25519(), ikm).unwrap();
// Classical X25519 keys are 32 bytes // Classical X25519 keys are 32 bytes
assert_eq!(keypair.public.len(), 32); assert_eq!(keypair.public.len(), 32);
assert_eq!(keypair.private.as_ref().len(), 32); assert_eq!(keypair.private.as_ref().len(), 32);
@@ -469,7 +472,7 @@ mod tests {
let crypto = HybridCrypto::new_classical(); let crypto = HybridCrypto::new_classical();
let ikm = b"test-ikm-for-classical-round-trip"; let ikm = b"test-ikm-for-classical-round-trip";
let keypair = crypto.derive_hpke_keypair(hpke_config_dhkem_x25519(), ikm); let keypair = crypto.derive_hpke_keypair(hpke_config_dhkem_x25519(), ikm).unwrap();
assert_eq!(keypair.public.len(), 32); // classical key assert_eq!(keypair.public.len(), 32); // classical key
let plaintext = b"hello classical MLS"; let plaintext = b"hello classical MLS";
@@ -482,7 +485,7 @@ mod tests {
info, info,
aad, aad,
plaintext, plaintext,
); ).unwrap();
assert!(!ct.kem_output.as_slice().is_empty()); assert!(!ct.kem_output.as_slice().is_empty());
let decrypted = crypto let decrypted = crypto
@@ -501,7 +504,7 @@ mod tests {
#[test] #[test]
fn key_package_generation_with_hybrid_provider() { fn key_package_generation_with_hybrid_provider() {
use openmls::prelude::{ use openmls::prelude::{
Credential, CredentialType, CredentialWithKey, CryptoConfig, KeyPackage, BasicCredential, CredentialWithKey, KeyPackage,
}; };
use std::sync::Arc; use std::sync::Arc;
use tls_codec::Serialize; use tls_codec::Serialize;
@@ -514,26 +517,24 @@ mod tests {
let provider = HybridCryptoProvider::default(); let provider = HybridCryptoProvider::default();
let identity = Arc::new(IdentityKeypair::generate()); let identity = Arc::new(IdentityKeypair::generate());
let credential = Credential::new( let credential: openmls::prelude::Credential =
identity.public_key_bytes().to_vec(), BasicCredential::new(identity.public_key_bytes().to_vec()).into();
CredentialType::Basic,
)
.unwrap();
let credential_with_key = CredentialWithKey { let credential_with_key = CredentialWithKey {
credential, credential,
signature_key: identity.public_key_bytes().to_vec().into(), signature_key: identity.public_key_bytes().to_vec().into(),
}; };
let key_package = KeyPackage::builder() let key_package_bundle = KeyPackage::builder()
.build( .build(
CryptoConfig::with_default_version(CIPHERSUITE), CIPHERSUITE,
&provider, &provider,
identity.as_ref(), identity.as_ref(),
credential_with_key, credential_with_key,
) )
.expect("KeyPackage with hybrid HPKE"); .expect("KeyPackage with hybrid HPKE");
let bytes = key_package let bytes = key_package_bundle
.key_package()
.tls_serialize_detached() .tls_serialize_detached()
.expect("serialize KeyPackage"); .expect("serialize KeyPackage");
assert!(!bytes.is_empty()); assert!(!bytes.is_empty());

View File

@@ -90,7 +90,7 @@ impl IdentityKeypair {
/// `openmls_basic_credential` crate. /// `openmls_basic_credential` crate.
#[cfg(feature = "native")] #[cfg(feature = "native")]
impl openmls_traits::signatures::Signer for IdentityKeypair { impl openmls_traits::signatures::Signer for IdentityKeypair {
fn sign(&self, payload: &[u8]) -> Result<Vec<u8>, openmls_traits::types::Error> { fn sign(&self, payload: &[u8]) -> Result<Vec<u8>, openmls_traits::signatures::SignerError> {
let sk = self.signing_key(); let sk = self.signing_key();
let sig: ed25519_dalek::Signature = sk.sign(payload); let sig: ed25519_dalek::Signature = sk.sign(payload);
Ok(sig.to_bytes().to_vec()) Ok(sig.to_bytes().to_vec())

View File

@@ -17,10 +17,10 @@
//! The resulting bytes are opaque to the quicprochat transport layer. //! The resulting bytes are opaque to the quicprochat transport layer.
use openmls::prelude::{ use openmls::prelude::{
Ciphersuite, Credential, CredentialType, CredentialWithKey, CryptoConfig, KeyPackage, BasicCredential, Ciphersuite, CredentialWithKey, KeyPackage, KeyPackageIn,
KeyPackageIn, TlsDeserializeTrait, TlsSerializeTrait,
}; };
use openmls_rust_crypto::OpenMlsRustCrypto; use openmls_rust_crypto::OpenMlsRustCrypto;
use tls_codec::{Deserialize as TlsDeserializeTrait, Serialize as TlsSerializeTrait};
use sha2::{Digest, Sha256}; use sha2::{Digest, Sha256};
use crate::{error::CoreError, identity::IdentityKeypair}; use crate::{error::CoreError, identity::IdentityKeypair};
@@ -74,8 +74,8 @@ pub fn generate_key_package(identity: &IdentityKeypair) -> Result<(Vec<u8>, Vec<
// Build a BasicCredential using the raw Ed25519 public key bytes as the // Build a BasicCredential using the raw Ed25519 public key bytes as the
// MLS identity. Per RFC 9420, any byte string may serve as the identity. // MLS identity. Per RFC 9420, any byte string may serve as the identity.
let credential = Credential::new(identity.public_key_bytes().to_vec(), CredentialType::Basic) let credential: openmls::prelude::Credential =
.map_err(|e| CoreError::Mls(format!("{e:?}")))?; BasicCredential::new(identity.public_key_bytes().to_vec()).into();
// The `signature_key` in CredentialWithKey is the Ed25519 public key that // The `signature_key` in CredentialWithKey is the Ed25519 public key that
// will be used to verify the KeyPackage's leaf node signature. // will be used to verify the KeyPackage's leaf node signature.
@@ -87,19 +87,13 @@ pub fn generate_key_package(identity: &IdentityKeypair) -> Result<(Vec<u8>, Vec<
// `IdentityKeypair` implements `openmls_traits::signatures::Signer` // `IdentityKeypair` implements `openmls_traits::signatures::Signer`
// so it can be passed directly to the builder. // so it can be passed directly to the builder.
let key_package = KeyPackage::builder() let key_package_bundle = KeyPackage::builder()
.build( .build(CIPHERSUITE, &backend, identity, credential_with_key)
CryptoConfig::with_default_version(CIPHERSUITE),
&backend,
identity,
credential_with_key,
)
.map_err(|e| CoreError::Mls(format!("{e:?}")))?; .map_err(|e| CoreError::Mls(format!("{e:?}")))?;
// TLS-encode the KeyPackage using the trait from the openmls prelude. // TLS-encode the KeyPackage.
// This uses tls_codec 0.3 (the same version openmls uses internally), let tls_bytes = key_package_bundle
// avoiding a duplicate-trait conflict with tls_codec 0.4. .key_package()
let tls_bytes = key_package
.tls_serialize_detached() .tls_serialize_detached()
.map_err(|e| CoreError::Mls(format!("{e:?}")))?; .map_err(|e| CoreError::Mls(format!("{e:?}")))?;

View File

@@ -1,23 +1,21 @@
use std::{ use std::{
collections::HashMap,
fs, fs,
path::{Path, PathBuf}, path::{Path, PathBuf},
sync::RwLock,
}; };
use openmls_traits::key_store::{MlsEntity, OpenMlsKeyStore}; use openmls_memory_storage::MemoryStorage;
use openmls_traits::storage::{traits, StorageProvider, CURRENT_VERSION};
/// A disk-backed key store implementing `OpenMlsKeyStore`. /// A disk-backed storage provider implementing `StorageProvider`.
/// ///
/// In-memory when `path` is `None`; otherwise flushes the entire map to disk on /// Wraps `openmls_memory_storage::MemoryStorage` and flushes to disk on every
/// every store/delete so HPKE init keys survive process restarts. /// write so that HPKE init keys and group state survive process restarts.
/// ///
/// # Serialization /// # Serialization
/// ///
/// Uses bincode for both individual MLS entity values and the outer HashMap /// Uses bincode for the outer `HashMap<Vec<u8>, Vec<u8>>` container when
/// container. This is required because OpenMLS types use bincode-compatible /// persisting to disk. The inner values use serde_json (matching
/// serialization, and `HashMap<Vec<u8>, Vec<u8>>` requires a binary format /// `MemoryStorage`'s serialization format).
/// (JSON mandates string keys).
/// ///
/// # Persistence security /// # Persistence security
/// ///
@@ -26,15 +24,17 @@ use openmls_traits::key_store::{MlsEntity, OpenMlsKeyStore};
#[derive(Debug)] #[derive(Debug)]
pub struct DiskKeyStore { pub struct DiskKeyStore {
path: Option<PathBuf>, path: Option<PathBuf>,
values: RwLock<HashMap<Vec<u8>, Vec<u8>>>, storage: MemoryStorage,
} }
#[derive(thiserror::Error, Debug, PartialEq, Eq)] #[derive(thiserror::Error, Debug)]
pub enum DiskKeyStoreError { pub enum DiskKeyStoreError {
#[error("serialization error")] #[error("serialization error")]
Serialization, Serialization,
#[error("io error: {0}")] #[error("io error: {0}")]
Io(String), Io(String),
#[error("memory storage error: {0}")]
MemoryStorage(#[from] openmls_memory_storage::MemoryStorageError),
} }
impl DiskKeyStore { impl DiskKeyStore {
@@ -42,28 +42,35 @@ impl DiskKeyStore {
pub fn ephemeral() -> Self { pub fn ephemeral() -> Self {
Self { Self {
path: None, path: None,
values: RwLock::new(HashMap::new()), storage: MemoryStorage::default(),
} }
} }
/// Persistent keystore backed by `path`. Creates an empty store if missing. /// Persistent keystore backed by `path`. Creates an empty store if missing.
pub fn persistent(path: impl AsRef<Path>) -> Result<Self, DiskKeyStoreError> { pub fn persistent(path: impl AsRef<Path>) -> Result<Self, DiskKeyStoreError> {
let path = path.as_ref().to_path_buf(); let path = path.as_ref().to_path_buf();
let values = if path.exists() { let storage = if path.exists() {
let bytes = fs::read(&path).map_err(|e| DiskKeyStoreError::Io(e.to_string()))?; let bytes = fs::read(&path).map_err(|e| DiskKeyStoreError::Io(e.to_string()))?;
if bytes.is_empty() { if bytes.is_empty() {
HashMap::new() MemoryStorage::default()
} else { } else {
let map: std::collections::HashMap<Vec<u8>, Vec<u8>> =
bincode::deserialize(&bytes) bincode::deserialize(&bytes)
.map_err(|_| DiskKeyStoreError::Serialization)? .map_err(|_| DiskKeyStoreError::Serialization)?;
let storage = MemoryStorage::default();
let mut values = storage.values.write()
.map_err(|_| DiskKeyStoreError::Io("lock poisoned".into()))?;
*values = map;
drop(values);
storage
} }
} else { } else {
HashMap::new() MemoryStorage::default()
}; };
let store = Self { let store = Self {
path: Some(path), path: Some(path),
values: RwLock::new(values), storage,
}; };
// Set restrictive file permissions on the keystore file. // Set restrictive file permissions on the keystore file.
@@ -76,8 +83,10 @@ impl DiskKeyStore {
let Some(path) = &self.path else { let Some(path) = &self.path else {
return Ok(()); return Ok(());
}; };
let values = self.values.read().map_err(|_| DiskKeyStoreError::Io("lock poisoned".into()))?; let values = self.storage.values.read()
let bytes = bincode::serialize(&*values).map_err(|_| DiskKeyStoreError::Serialization)?; .map_err(|_| DiskKeyStoreError::Io("lock poisoned".into()))?;
let bytes = bincode::serialize(&*values)
.map_err(|_| DiskKeyStoreError::Serialization)?;
if let Some(parent) = path.parent() { if let Some(parent) = path.parent() {
fs::create_dir_all(parent).map_err(|e| DiskKeyStoreError::Io(e.to_string()))?; fs::create_dir_all(parent).map_err(|e| DiskKeyStoreError::Io(e.to_string()))?;
} }
@@ -86,6 +95,32 @@ impl DiskKeyStore {
Ok(()) Ok(())
} }
/// Serialize the backing storage to bytes (bincode).
///
/// This captures all key material *and* MLS group state held by the
/// `StorageProvider`, allowing the caller to persist it in a database
/// column instead of (or in addition to) on-disk files.
pub fn to_bytes(&self) -> Result<Vec<u8>, DiskKeyStoreError> {
let values = self.storage.values.read()
.map_err(|_| DiskKeyStoreError::Io("lock poisoned".into()))?;
bincode::serialize(&*values).map_err(|_| DiskKeyStoreError::Serialization)
}
/// Restore a `DiskKeyStore` from bytes previously produced by [`to_bytes`].
pub fn from_bytes(bytes: &[u8]) -> Result<Self, DiskKeyStoreError> {
let map: std::collections::HashMap<Vec<u8>, Vec<u8>> =
bincode::deserialize(bytes).map_err(|_| DiskKeyStoreError::Serialization)?;
let storage = MemoryStorage::default();
let mut values = storage.values.write()
.map_err(|_| DiskKeyStoreError::Io("lock poisoned".into()))?;
*values = map;
drop(values);
Ok(Self {
path: None,
storage,
})
}
/// Restrict file permissions to owner-only (0o600) on Unix. /// Restrict file permissions to owner-only (0o600) on Unix.
#[cfg(unix)] #[cfg(unix)]
fn set_file_permissions(&self) -> Result<(), DiskKeyStoreError> { fn set_file_permissions(&self) -> Result<(), DiskKeyStoreError> {
@@ -112,31 +147,567 @@ impl Default for DiskKeyStore {
} }
} }
impl OpenMlsKeyStore for DiskKeyStore { /// Delegate all `StorageProvider` methods to the inner `MemoryStorage`,
/// flushing to disk after every write/delete operation.
///
/// The flush errors are mapped to `DiskKeyStoreError` via the
/// `MemoryStorageError` conversion. If a flush fails, the in-memory state
/// is still updated (matching the old DiskKeyStore behavior).
impl StorageProvider<CURRENT_VERSION> for DiskKeyStore {
type Error = DiskKeyStoreError; type Error = DiskKeyStoreError;
fn store<V: MlsEntity>(&self, k: &[u8], v: &V) -> Result<(), Self::Error> { fn write_mls_join_config<
let value = bincode::serialize(v).map_err(|_| DiskKeyStoreError::Serialization)?; GroupId: traits::GroupId<CURRENT_VERSION>,
let mut values = self.values.write().map_err(|_| DiskKeyStoreError::Io("lock poisoned".into()))?; MlsGroupJoinConfig: traits::MlsGroupJoinConfig<CURRENT_VERSION>,
values.insert(k.to_vec(), value); >(
drop(values); &self,
group_id: &GroupId,
config: &MlsGroupJoinConfig,
) -> Result<(), Self::Error> {
self.storage.write_mls_join_config(group_id, config)?;
self.flush() self.flush()
} }
fn read<V: MlsEntity>(&self, k: &[u8]) -> Option<V> { fn append_own_leaf_node<
let values = match self.values.read() { GroupId: traits::GroupId<CURRENT_VERSION>,
Ok(v) => v, LeafNode: traits::LeafNode<CURRENT_VERSION>,
Err(_) => return None, >(
}; &self,
values group_id: &GroupId,
.get(k) leaf_node: &LeafNode,
.and_then(|bytes| bincode::deserialize(bytes).ok()) ) -> Result<(), Self::Error> {
self.storage.append_own_leaf_node(group_id, leaf_node)?;
self.flush()
} }
fn delete<V: MlsEntity>(&self, k: &[u8]) -> Result<(), Self::Error> { fn queue_proposal<
let mut values = self.values.write().map_err(|_| DiskKeyStoreError::Io("lock poisoned".into()))?; GroupId: traits::GroupId<CURRENT_VERSION>,
values.remove(k); ProposalRef: traits::ProposalRef<CURRENT_VERSION>,
drop(values); QueuedProposal: traits::QueuedProposal<CURRENT_VERSION>,
>(
&self,
group_id: &GroupId,
proposal_ref: &ProposalRef,
proposal: &QueuedProposal,
) -> Result<(), Self::Error> {
self.storage.queue_proposal(group_id, proposal_ref, proposal)?;
self.flush()
}
fn write_tree<
GroupId: traits::GroupId<CURRENT_VERSION>,
TreeSync: traits::TreeSync<CURRENT_VERSION>,
>(
&self,
group_id: &GroupId,
tree: &TreeSync,
) -> Result<(), Self::Error> {
self.storage.write_tree(group_id, tree)?;
self.flush()
}
fn write_interim_transcript_hash<
GroupId: traits::GroupId<CURRENT_VERSION>,
InterimTranscriptHash: traits::InterimTranscriptHash<CURRENT_VERSION>,
>(
&self,
group_id: &GroupId,
interim_transcript_hash: &InterimTranscriptHash,
) -> Result<(), Self::Error> {
self.storage.write_interim_transcript_hash(group_id, interim_transcript_hash)?;
self.flush()
}
fn write_context<
GroupId: traits::GroupId<CURRENT_VERSION>,
GroupContext: traits::GroupContext<CURRENT_VERSION>,
>(
&self,
group_id: &GroupId,
group_context: &GroupContext,
) -> Result<(), Self::Error> {
self.storage.write_context(group_id, group_context)?;
self.flush()
}
fn write_confirmation_tag<
GroupId: traits::GroupId<CURRENT_VERSION>,
ConfirmationTag: traits::ConfirmationTag<CURRENT_VERSION>,
>(
&self,
group_id: &GroupId,
confirmation_tag: &ConfirmationTag,
) -> Result<(), Self::Error> {
self.storage.write_confirmation_tag(group_id, confirmation_tag)?;
self.flush()
}
fn write_group_state<
GroupState: traits::GroupState<CURRENT_VERSION>,
GroupId: traits::GroupId<CURRENT_VERSION>,
>(
&self,
group_id: &GroupId,
group_state: &GroupState,
) -> Result<(), Self::Error> {
self.storage.write_group_state(group_id, group_state)?;
self.flush()
}
fn write_message_secrets<
GroupId: traits::GroupId<CURRENT_VERSION>,
MessageSecrets: traits::MessageSecrets<CURRENT_VERSION>,
>(
&self,
group_id: &GroupId,
message_secrets: &MessageSecrets,
) -> Result<(), Self::Error> {
self.storage.write_message_secrets(group_id, message_secrets)?;
self.flush()
}
fn write_resumption_psk_store<
GroupId: traits::GroupId<CURRENT_VERSION>,
ResumptionPskStore: traits::ResumptionPskStore<CURRENT_VERSION>,
>(
&self,
group_id: &GroupId,
resumption_psk_store: &ResumptionPskStore,
) -> Result<(), Self::Error> {
self.storage.write_resumption_psk_store(group_id, resumption_psk_store)?;
self.flush()
}
fn write_own_leaf_index<
GroupId: traits::GroupId<CURRENT_VERSION>,
LeafNodeIndex: traits::LeafNodeIndex<CURRENT_VERSION>,
>(
&self,
group_id: &GroupId,
own_leaf_index: &LeafNodeIndex,
) -> Result<(), Self::Error> {
self.storage.write_own_leaf_index(group_id, own_leaf_index)?;
self.flush()
}
fn write_group_epoch_secrets<
GroupId: traits::GroupId<CURRENT_VERSION>,
GroupEpochSecrets: traits::GroupEpochSecrets<CURRENT_VERSION>,
>(
&self,
group_id: &GroupId,
group_epoch_secrets: &GroupEpochSecrets,
) -> Result<(), Self::Error> {
self.storage.write_group_epoch_secrets(group_id, group_epoch_secrets)?;
self.flush()
}
fn write_signature_key_pair<
SignaturePublicKey: traits::SignaturePublicKey<CURRENT_VERSION>,
SignatureKeyPair: traits::SignatureKeyPair<CURRENT_VERSION>,
>(
&self,
public_key: &SignaturePublicKey,
signature_key_pair: &SignatureKeyPair,
) -> Result<(), Self::Error> {
self.storage.write_signature_key_pair(public_key, signature_key_pair)?;
self.flush()
}
fn write_encryption_key_pair<
EncryptionKey: traits::EncryptionKey<CURRENT_VERSION>,
HpkeKeyPair: traits::HpkeKeyPair<CURRENT_VERSION>,
>(
&self,
public_key: &EncryptionKey,
key_pair: &HpkeKeyPair,
) -> Result<(), Self::Error> {
self.storage.write_encryption_key_pair(public_key, key_pair)?;
self.flush()
}
fn write_encryption_epoch_key_pairs<
GroupId: traits::GroupId<CURRENT_VERSION>,
EpochKey: traits::EpochKey<CURRENT_VERSION>,
HpkeKeyPair: traits::HpkeKeyPair<CURRENT_VERSION>,
>(
&self,
group_id: &GroupId,
epoch: &EpochKey,
leaf_index: u32,
key_pairs: &[HpkeKeyPair],
) -> Result<(), Self::Error> {
self.storage.write_encryption_epoch_key_pairs(group_id, epoch, leaf_index, key_pairs)?;
self.flush()
}
fn write_key_package<
HashReference: traits::HashReference<CURRENT_VERSION>,
KeyPackage: traits::KeyPackage<CURRENT_VERSION>,
>(
&self,
hash_ref: &HashReference,
key_package: &KeyPackage,
) -> Result<(), Self::Error> {
self.storage.write_key_package(hash_ref, key_package)?;
self.flush()
}
fn write_psk<
PskId: traits::PskId<CURRENT_VERSION>,
PskBundle: traits::PskBundle<CURRENT_VERSION>,
>(
&self,
psk_id: &PskId,
psk: &PskBundle,
) -> Result<(), Self::Error> {
self.storage.write_psk(psk_id, psk)?;
self.flush()
}
// --- getters (no flush needed) ---
fn mls_group_join_config<
GroupId: traits::GroupId<CURRENT_VERSION>,
MlsGroupJoinConfig: traits::MlsGroupJoinConfig<CURRENT_VERSION>,
>(
&self,
group_id: &GroupId,
) -> Result<Option<MlsGroupJoinConfig>, Self::Error> {
Ok(self.storage.mls_group_join_config(group_id)?)
}
fn own_leaf_nodes<
GroupId: traits::GroupId<CURRENT_VERSION>,
LeafNode: traits::LeafNode<CURRENT_VERSION>,
>(
&self,
group_id: &GroupId,
) -> Result<Vec<LeafNode>, Self::Error> {
Ok(self.storage.own_leaf_nodes(group_id)?)
}
fn queued_proposal_refs<
GroupId: traits::GroupId<CURRENT_VERSION>,
ProposalRef: traits::ProposalRef<CURRENT_VERSION>,
>(
&self,
group_id: &GroupId,
) -> Result<Vec<ProposalRef>, Self::Error> {
Ok(self.storage.queued_proposal_refs(group_id)?)
}
fn queued_proposals<
GroupId: traits::GroupId<CURRENT_VERSION>,
ProposalRef: traits::ProposalRef<CURRENT_VERSION>,
QueuedProposal: traits::QueuedProposal<CURRENT_VERSION>,
>(
&self,
group_id: &GroupId,
) -> Result<Vec<(ProposalRef, QueuedProposal)>, Self::Error> {
Ok(self.storage.queued_proposals(group_id)?)
}
fn tree<
GroupId: traits::GroupId<CURRENT_VERSION>,
TreeSync: traits::TreeSync<CURRENT_VERSION>,
>(
&self,
group_id: &GroupId,
) -> Result<Option<TreeSync>, Self::Error> {
Ok(self.storage.tree(group_id)?)
}
fn group_context<
GroupId: traits::GroupId<CURRENT_VERSION>,
GroupContext: traits::GroupContext<CURRENT_VERSION>,
>(
&self,
group_id: &GroupId,
) -> Result<Option<GroupContext>, Self::Error> {
Ok(self.storage.group_context(group_id)?)
}
fn interim_transcript_hash<
GroupId: traits::GroupId<CURRENT_VERSION>,
InterimTranscriptHash: traits::InterimTranscriptHash<CURRENT_VERSION>,
>(
&self,
group_id: &GroupId,
) -> Result<Option<InterimTranscriptHash>, Self::Error> {
Ok(self.storage.interim_transcript_hash(group_id)?)
}
fn confirmation_tag<
GroupId: traits::GroupId<CURRENT_VERSION>,
ConfirmationTag: traits::ConfirmationTag<CURRENT_VERSION>,
>(
&self,
group_id: &GroupId,
) -> Result<Option<ConfirmationTag>, Self::Error> {
Ok(self.storage.confirmation_tag(group_id)?)
}
fn group_state<
GroupState: traits::GroupState<CURRENT_VERSION>,
GroupId: traits::GroupId<CURRENT_VERSION>,
>(
&self,
group_id: &GroupId,
) -> Result<Option<GroupState>, Self::Error> {
Ok(self.storage.group_state(group_id)?)
}
fn message_secrets<
GroupId: traits::GroupId<CURRENT_VERSION>,
MessageSecrets: traits::MessageSecrets<CURRENT_VERSION>,
>(
&self,
group_id: &GroupId,
) -> Result<Option<MessageSecrets>, Self::Error> {
Ok(self.storage.message_secrets(group_id)?)
}
fn resumption_psk_store<
GroupId: traits::GroupId<CURRENT_VERSION>,
ResumptionPskStore: traits::ResumptionPskStore<CURRENT_VERSION>,
>(
&self,
group_id: &GroupId,
) -> Result<Option<ResumptionPskStore>, Self::Error> {
Ok(self.storage.resumption_psk_store(group_id)?)
}
fn own_leaf_index<
GroupId: traits::GroupId<CURRENT_VERSION>,
LeafNodeIndex: traits::LeafNodeIndex<CURRENT_VERSION>,
>(
&self,
group_id: &GroupId,
) -> Result<Option<LeafNodeIndex>, Self::Error> {
Ok(self.storage.own_leaf_index(group_id)?)
}
fn group_epoch_secrets<
GroupId: traits::GroupId<CURRENT_VERSION>,
GroupEpochSecrets: traits::GroupEpochSecrets<CURRENT_VERSION>,
>(
&self,
group_id: &GroupId,
) -> Result<Option<GroupEpochSecrets>, Self::Error> {
Ok(self.storage.group_epoch_secrets(group_id)?)
}
fn signature_key_pair<
SignaturePublicKey: traits::SignaturePublicKey<CURRENT_VERSION>,
SignatureKeyPair: traits::SignatureKeyPair<CURRENT_VERSION>,
>(
&self,
public_key: &SignaturePublicKey,
) -> Result<Option<SignatureKeyPair>, Self::Error> {
Ok(self.storage.signature_key_pair(public_key)?)
}
fn encryption_key_pair<
HpkeKeyPair: traits::HpkeKeyPair<CURRENT_VERSION>,
EncryptionKey: traits::EncryptionKey<CURRENT_VERSION>,
>(
&self,
public_key: &EncryptionKey,
) -> Result<Option<HpkeKeyPair>, Self::Error> {
Ok(self.storage.encryption_key_pair(public_key)?)
}
fn encryption_epoch_key_pairs<
GroupId: traits::GroupId<CURRENT_VERSION>,
EpochKey: traits::EpochKey<CURRENT_VERSION>,
HpkeKeyPair: traits::HpkeKeyPair<CURRENT_VERSION>,
>(
&self,
group_id: &GroupId,
epoch: &EpochKey,
leaf_index: u32,
) -> Result<Vec<HpkeKeyPair>, Self::Error> {
Ok(self.storage.encryption_epoch_key_pairs(group_id, epoch, leaf_index)?)
}
fn key_package<
KeyPackageRef: traits::HashReference<CURRENT_VERSION>,
KeyPackage: traits::KeyPackage<CURRENT_VERSION>,
>(
&self,
hash_ref: &KeyPackageRef,
) -> Result<Option<KeyPackage>, Self::Error> {
Ok(self.storage.key_package(hash_ref)?)
}
fn psk<
PskBundle: traits::PskBundle<CURRENT_VERSION>,
PskId: traits::PskId<CURRENT_VERSION>,
>(
&self,
psk_id: &PskId,
) -> Result<Option<PskBundle>, Self::Error> {
Ok(self.storage.psk(psk_id)?)
}
// --- deleters (flush needed) ---
fn remove_proposal<
GroupId: traits::GroupId<CURRENT_VERSION>,
ProposalRef: traits::ProposalRef<CURRENT_VERSION>,
>(
&self,
group_id: &GroupId,
proposal_ref: &ProposalRef,
) -> Result<(), Self::Error> {
self.storage.remove_proposal(group_id, proposal_ref)?;
self.flush()
}
fn delete_own_leaf_nodes<GroupId: traits::GroupId<CURRENT_VERSION>>(
&self,
group_id: &GroupId,
) -> Result<(), Self::Error> {
self.storage.delete_own_leaf_nodes(group_id)?;
self.flush()
}
fn delete_group_config<GroupId: traits::GroupId<CURRENT_VERSION>>(
&self,
group_id: &GroupId,
) -> Result<(), Self::Error> {
self.storage.delete_group_config(group_id)?;
self.flush()
}
fn delete_tree<GroupId: traits::GroupId<CURRENT_VERSION>>(
&self,
group_id: &GroupId,
) -> Result<(), Self::Error> {
self.storage.delete_tree(group_id)?;
self.flush()
}
fn delete_confirmation_tag<GroupId: traits::GroupId<CURRENT_VERSION>>(
&self,
group_id: &GroupId,
) -> Result<(), Self::Error> {
self.storage.delete_confirmation_tag(group_id)?;
self.flush()
}
fn delete_group_state<GroupId: traits::GroupId<CURRENT_VERSION>>(
&self,
group_id: &GroupId,
) -> Result<(), Self::Error> {
self.storage.delete_group_state(group_id)?;
self.flush()
}
fn delete_context<GroupId: traits::GroupId<CURRENT_VERSION>>(
&self,
group_id: &GroupId,
) -> Result<(), Self::Error> {
self.storage.delete_context(group_id)?;
self.flush()
}
fn delete_interim_transcript_hash<GroupId: traits::GroupId<CURRENT_VERSION>>(
&self,
group_id: &GroupId,
) -> Result<(), Self::Error> {
self.storage.delete_interim_transcript_hash(group_id)?;
self.flush()
}
fn delete_message_secrets<GroupId: traits::GroupId<CURRENT_VERSION>>(
&self,
group_id: &GroupId,
) -> Result<(), Self::Error> {
self.storage.delete_message_secrets(group_id)?;
self.flush()
}
fn delete_all_resumption_psk_secrets<GroupId: traits::GroupId<CURRENT_VERSION>>(
&self,
group_id: &GroupId,
) -> Result<(), Self::Error> {
self.storage.delete_all_resumption_psk_secrets(group_id)?;
self.flush()
}
fn delete_own_leaf_index<GroupId: traits::GroupId<CURRENT_VERSION>>(
&self,
group_id: &GroupId,
) -> Result<(), Self::Error> {
self.storage.delete_own_leaf_index(group_id)?;
self.flush()
}
fn delete_group_epoch_secrets<GroupId: traits::GroupId<CURRENT_VERSION>>(
&self,
group_id: &GroupId,
) -> Result<(), Self::Error> {
self.storage.delete_group_epoch_secrets(group_id)?;
self.flush()
}
fn clear_proposal_queue<
GroupId: traits::GroupId<CURRENT_VERSION>,
ProposalRef: traits::ProposalRef<CURRENT_VERSION>,
>(
&self,
group_id: &GroupId,
) -> Result<(), Self::Error> {
self.storage.clear_proposal_queue::<GroupId, ProposalRef>(group_id)?;
self.flush()
}
fn delete_signature_key_pair<
SignaturePublicKey: traits::SignaturePublicKey<CURRENT_VERSION>,
>(
&self,
public_key: &SignaturePublicKey,
) -> Result<(), Self::Error> {
self.storage.delete_signature_key_pair(public_key)?;
self.flush()
}
fn delete_encryption_key_pair<EncryptionKey: traits::EncryptionKey<CURRENT_VERSION>>(
&self,
public_key: &EncryptionKey,
) -> Result<(), Self::Error> {
self.storage.delete_encryption_key_pair(public_key)?;
self.flush()
}
fn delete_encryption_epoch_key_pairs<
GroupId: traits::GroupId<CURRENT_VERSION>,
EpochKey: traits::EpochKey<CURRENT_VERSION>,
>(
&self,
group_id: &GroupId,
epoch: &EpochKey,
leaf_index: u32,
) -> Result<(), Self::Error> {
self.storage.delete_encryption_epoch_key_pairs(group_id, epoch, leaf_index)?;
self.flush()
}
fn delete_key_package<KeyPackageRef: traits::HashReference<CURRENT_VERSION>>(
&self,
hash_ref: &KeyPackageRef,
) -> Result<(), Self::Error> {
self.storage.delete_key_package(hash_ref)?;
self.flush()
}
fn delete_psk<PskKey: traits::PskId<CURRENT_VERSION>>(
&self,
psk_id: &PskKey,
) -> Result<(), Self::Error> {
self.storage.delete_psk(psk_id)?;
self.flush() self.flush()
} }
} }

View File

@@ -77,10 +77,8 @@ pub async fn create_dm(
// Save conversation with MLS state. // Save conversation with MLS state.
let member_keys = member.member_identities(); let member_keys = member.member_identities();
let mls_blob = member let mls_blob = member
.group_ref() .serialize_mls_state()
.map(bincode::serialize) .map_err(|e| SdkError::Storage(format!("serialize MLS state: {e}")))?;
.transpose()
.map_err(|e| SdkError::Storage(format!("serialize MLS group: {e}")))?;
let conv = Conversation { let conv = Conversation {
id: conv_id.clone(), id: conv_id.clone(),
@@ -147,10 +145,8 @@ pub fn create_group(
let member_keys = member.member_identities(); let member_keys = member.member_identities();
let mls_blob = member let mls_blob = member
.group_ref() .serialize_mls_state()
.map(bincode::serialize) .map_err(|e| SdkError::Storage(format!("serialize MLS state: {e}")))?;
.transpose()
.map_err(|e| SdkError::Storage(format!("serialize MLS group: {e}")))?;
let conv = Conversation { let conv = Conversation {
id: conv_id.clone(), id: conv_id.clone(),
@@ -252,10 +248,8 @@ pub fn join_from_welcome(
let member_keys = member.member_identities(); let member_keys = member.member_identities();
let mls_blob = member let mls_blob = member
.group_ref() .serialize_mls_state()
.map(bincode::serialize) .map_err(|e| SdkError::Storage(format!("serialize MLS state: {e}")))?;
.transpose()
.map_err(|e| SdkError::Storage(format!("serialize MLS group: {e}")))?;
// Upsert conversation — the stub may already exist from create_dm. // Upsert conversation — the stub may already exist from create_dm.
let existing = conv_store let existing = conv_store
@@ -509,10 +503,8 @@ pub fn save_mls_state(
.ok_or_else(|| SdkError::ConversationNotFound(conv_id.hex()))?; .ok_or_else(|| SdkError::ConversationNotFound(conv_id.hex()))?;
conv.mls_group_blob = member conv.mls_group_blob = member
.group_ref() .serialize_mls_state()
.map(bincode::serialize) .map_err(|e| SdkError::Storage(format!("serialize MLS state: {e}")))?;
.transpose()
.map_err(|e| SdkError::Storage(format!("serialize MLS group: {e}")))?;
conv.member_keys = member.member_identities(); conv.member_keys = member.member_identities();
conv.is_hybrid = member.is_hybrid(); conv.is_hybrid = member.is_hybrid();
@@ -529,16 +521,19 @@ pub fn restore_mls_state(
conv: &Conversation, conv: &Conversation,
identity: &Arc<IdentityKeypair>, identity: &Arc<IdentityKeypair>,
) -> Result<GroupMember, SdkError> { ) -> Result<GroupMember, SdkError> {
let group_blob = conv let storage_blob = conv
.mls_group_blob .mls_group_blob
.as_ref() .as_ref()
.ok_or_else(|| SdkError::Crypto("no MLS group blob in conversation".into()))?; .ok_or_else(|| SdkError::Crypto("no MLS group blob in conversation".into()))?;
let mls_group = bincode::deserialize(group_blob) let group_id = conv.id.0.as_slice();
.map_err(|e| SdkError::Crypto(format!("deserialize MLS group: {e}")))?; let member = GroupMember::new_from_storage_bytes(
Arc::clone(identity),
let ks = quicprochat_core::DiskKeyStore::ephemeral(); storage_blob,
let member = GroupMember::new_with_state(Arc::clone(identity), ks, Some(mls_group), conv.is_hybrid); group_id,
conv.is_hybrid,
)
.map_err(|e| SdkError::Crypto(format!("restore MLS state: {e}")))?;
Ok(member) Ok(member)
} }

View File

@@ -65,7 +65,7 @@ serde_json = { workspace = true }
# CLI # CLI
clap = { workspace = true } clap = { workspace = true }
toml = { version = "0.8" } toml = { workspace = true }
# WebSocket JSON-RPC bridge for browser clients # WebSocket JSON-RPC bridge for browser clients
tokio-tungstenite = "0.26" tokio-tungstenite = "0.26"

View File

@@ -6,6 +6,7 @@
# Why quicprochat? # Why quicprochat?
- [How quicprochat Compares to WhatsApp, Telegram, and Signal](design-rationale/messenger-comparison.md)
- [Comparison with Classical Chat Protocols](design-rationale/protocol-comparison.md) - [Comparison with Classical Chat Protocols](design-rationale/protocol-comparison.md)
- [Why This Design, Not Signal/Matrix/...](design-rationale/why-not-signal.md) - [Why This Design, Not Signal/Matrix/...](design-rationale/why-not-signal.md)

View File

@@ -6,9 +6,9 @@ version = 4
name = "logging_plugin" name = "logging_plugin"
version = "0.1.0" version = "0.1.0"
dependencies = [ dependencies = [
"quicproquo-plugin-api", "quicprochat-plugin-api",
] ]
[[package]] [[package]]
name = "quicproquo-plugin-api" name = "quicprochat-plugin-api"
version = "0.1.0" version = "0.1.0"

View File

@@ -3,12 +3,12 @@
version = 4 version = 4
[[package]] [[package]]
name = "quicproquo-plugin-api" name = "quicprochat-plugin-api"
version = "0.1.0" version = "0.1.0"
[[package]] [[package]]
name = "rate_limit_plugin" name = "rate_limit_plugin"
version = "0.1.0" version = "0.1.0"
dependencies = [ dependencies = [
"quicproquo-plugin-api", "quicprochat-plugin-api",
] ]

View File

@@ -425,7 +425,7 @@ dependencies = [
] ]
[[package]] [[package]]
name = "qpq-wasm-crypto" name = "qpc-wasm-crypto"
version = "0.1.0" version = "0.1.0"
dependencies = [ dependencies = [
"js-sys", "js-sys",