feat: upgrade OpenMLS 0.5 → 0.8 for security patches and GREASE support

Migrates all MLS code in quicprochat-core from OpenMLS 0.5 to 0.8:
- StorageProvider replaces OpenMlsKeyStore (keystore.rs full rewrite)
- HybridCryptoProvider updated for new OpenMlsProvider trait
- Group operations updated for new API signatures
- MLS state persistence via MemoryStorage serialization
- tls_codec 0.3 → 0.4, openmls_traits/rust_crypto 0.2 → 0.5
This commit is contained in:
2026-03-08 17:50:15 +01:00
parent 077f48f19c
commit a05da9b751
20 changed files with 1433 additions and 657 deletions

View File

@@ -15,6 +15,7 @@ native = [
"dep:openmls",
"dep:openmls_rust_crypto",
"dep:openmls_traits",
"dep:openmls_memory_storage",
"dep:tls_codec",
"dep:opaque-ke",
"dep:bincode",
@@ -49,6 +50,7 @@ opaque-ke = { workspace = true, optional = true }
openmls = { workspace = true, optional = true }
openmls_rust_crypto = { workspace = true, optional = true }
openmls_traits = { workspace = true, optional = true }
openmls_memory_storage = { workspace = true, optional = true }
tls_codec = { workspace = true, optional = true }
bincode = { workspace = true, optional = true }

View File

@@ -29,7 +29,7 @@
//! # Ratchet tree
//!
//! `use_ratchet_tree_extension = true` so that the ratchet tree is embedded
//! in Welcome messages. `new_from_welcome` is called with `ratchet_tree = None`;
//! in Welcome messages. `new_from_welcome` is called without a ratchet_tree;
//! openmls extracts the tree from the Welcome's `GroupInfo` extension.
use std::{path::Path, sync::Arc};
@@ -37,12 +37,13 @@ use std::{path::Path, sync::Arc};
use zeroize::Zeroizing;
use openmls::prelude::{
Ciphersuite, Credential, CredentialType, CredentialWithKey, CryptoConfig, GroupId, KeyPackage,
KeyPackageIn, MlsGroup, MlsGroupConfig, MlsMessageInBody, MlsMessageOut,
ProcessedMessageContent, ProtocolMessage, ProtocolVersion, TlsDeserializeTrait,
TlsSerializeTrait,
BasicCredential, Ciphersuite, Credential, CredentialWithKey, GroupId, KeyPackage,
KeyPackageIn, LeafNodeParameters, MlsGroup, MlsGroupCreateConfig, MlsGroupJoinConfig,
MlsMessageBodyIn, MlsMessageOut, ProcessedMessageContent, ProtocolMessage,
ProtocolVersion, StagedWelcome,
};
use openmls_traits::OpenMlsCryptoProvider;
use openmls_traits::OpenMlsProvider;
use tls_codec::{Deserialize as TlsDeserializeTrait, Serialize as TlsSerializeTrait};
use crate::{
error::CoreError,
@@ -102,8 +103,10 @@ pub struct GroupMember {
identity: Arc<IdentityKeypair>,
/// Active MLS group, if any.
group: Option<MlsGroup>,
/// Shared group configuration (wire format, ratchet tree extension, etc.).
config: MlsGroupConfig,
/// Shared group creation configuration (wire format, ratchet tree extension, etc.).
create_config: MlsGroupCreateConfig,
/// Shared group join configuration (wire format, ratchet tree extension, etc.).
join_config: MlsGroupJoinConfig,
/// Whether this member uses hybrid (X25519 + ML-KEM-768) HPKE keys.
hybrid: bool,
}
@@ -139,7 +142,11 @@ impl GroupMember {
group: Option<MlsGroup>,
hybrid: bool,
) -> Self {
let config = MlsGroupConfig::builder()
let create_config = MlsGroupCreateConfig::builder()
.use_ratchet_tree_extension(true)
.build();
let join_config = MlsGroupJoinConfig::builder()
.use_ratchet_tree_extension(true)
.build();
@@ -153,7 +160,8 @@ impl GroupMember {
backend,
identity,
group,
config,
create_config,
join_config,
hybrid,
}
}
@@ -175,18 +183,19 @@ impl GroupMember {
///
/// Returns [`CoreError::Mls`] if openmls fails to create the KeyPackage.
pub fn generate_key_package(&mut self) -> Result<Vec<u8>, CoreError> {
let credential_with_key = self.make_credential_with_key()?;
let credential_with_key = self.make_credential_with_key();
let key_package = KeyPackage::builder()
let key_package_bundle = KeyPackage::builder()
.build(
CryptoConfig::with_default_version(CIPHERSUITE),
CIPHERSUITE,
&self.backend,
self.identity.as_ref(),
credential_with_key,
)
.map_err(|e| CoreError::Mls(format!("{e:?}")))?;
key_package
key_package_bundle
.key_package()
.tls_serialize_detached()
.map_err(|e| CoreError::Mls(format!("{e:?}")))
}
@@ -205,13 +214,13 @@ impl GroupMember {
///
/// Returns [`CoreError::Mls`] if the group already exists or openmls fails.
pub fn create_group(&mut self, group_id: &[u8]) -> Result<(), CoreError> {
let credential_with_key = self.make_credential_with_key()?;
let credential_with_key = self.make_credential_with_key();
let mls_id = GroupId::from_slice(group_id);
let group = MlsGroup::new_with_group_id(
&self.backend,
self.identity.as_ref(),
&self.config,
&self.create_config,
mls_id,
credential_with_key,
)
@@ -303,7 +312,7 @@ impl GroupMember {
let leaf_index = group
.members()
.find(|m| m.credential.identity() == member_identity)
.find(|m| m.credential.serialized_content() == member_identity)
.map(|m| m.index)
.ok_or_else(|| CoreError::Mls("member not found in group".into()))?;
@@ -384,7 +393,11 @@ impl GroupMember {
.ok_or_else(|| CoreError::Mls("no active group".into()))?;
let (proposal_out, _ref) = group
.propose_self_update(&self.backend, self.identity.as_ref(), None)
.propose_self_update(
&self.backend,
self.identity.as_ref(),
LeafNodeParameters::default(),
)
.map_err(|e| CoreError::Mls(format!("propose_self_update: {e:?}")))?;
proposal_out
@@ -396,7 +409,7 @@ impl GroupMember {
pub fn has_pending_proposals(&self) -> bool {
self.group
.as_ref()
.map(|g| g.pending_proposals().next().is_some())
.map(|g| g.has_pending_proposals())
.unwrap_or(false)
}
@@ -417,16 +430,22 @@ impl GroupMember {
let msg_in = openmls::prelude::MlsMessageIn::tls_deserialize(&mut welcome_bytes)
.map_err(|e| CoreError::Mls(format!("Welcome deserialise: {e:?}")))?;
// into_welcome() is feature-gated in openmls 0.5; extract() is public.
let welcome = match msg_in.extract() {
MlsMessageInBody::Welcome(w) => w,
MlsMessageBodyIn::Welcome(w) => w,
_ => return Err(CoreError::Mls("expected a Welcome message".into())),
};
// ratchet_tree = None because use_ratchet_tree_extension = true embeds
// the tree inside the Welcome's GroupInfo extension.
let group = MlsGroup::new_from_welcome(&self.backend, &self.config, welcome, None)
.map_err(|e| CoreError::Mls(format!("new_from_welcome: {e:?}")))?;
let staged = StagedWelcome::new_from_welcome(
&self.backend,
&self.join_config,
welcome,
None, // ratchet tree extracted from the Welcome's GroupInfo extension
)
.map_err(|e| CoreError::Mls(format!("new_from_welcome: {e:?}")))?;
let group = staged
.into_group(&self.backend)
.map_err(|e| CoreError::Mls(format!("into_group: {e:?}")))?;
self.group = Some(group);
Ok(())
@@ -508,10 +527,9 @@ impl GroupMember {
let msg_in = openmls::prelude::MlsMessageIn::tls_deserialize(&mut bytes)
.map_err(|e| CoreError::Mls(format!("message deserialise: {e:?}")))?;
// into_protocol_message() is feature-gated; extract() + manual construction is not.
let protocol_message = match msg_in.extract() {
MlsMessageInBody::PrivateMessage(m) => ProtocolMessage::PrivateMessage(m),
MlsMessageInBody::PublicMessage(m) => ProtocolMessage::PublicMessage(m),
let protocol_message: ProtocolMessage = match msg_in.extract() {
MlsMessageBodyIn::PrivateMessage(m) => m.into(),
MlsMessageBodyIn::PublicMessage(m) => m.into(),
_ => return Err(CoreError::Mls("not a protocol message".into())),
};
@@ -519,7 +537,7 @@ impl GroupMember {
.process_message(&self.backend, protocol_message)
.map_err(|e| CoreError::Mls(format!("process_message: {e:?}")))?;
let sender_identity = processed.credential().identity().to_vec();
let sender_identity = processed.credential().serialized_content().to_vec();
match processed.into_content() {
ProcessedMessageContent::ApplicationMessage(app) => {
@@ -545,11 +563,15 @@ impl GroupMember {
}
// Proposals are stored for a later Commit; nothing to return yet.
ProcessedMessageContent::ProposalMessage(proposal) => {
group.store_pending_proposal(*proposal);
group
.store_pending_proposal(self.backend.storage(), *proposal)
.map_err(|e| CoreError::Mls(format!("store_pending_proposal: {e:?}")))?;
Ok((sender_identity, ReceivedMessage::StateChanged))
}
ProcessedMessageContent::ExternalJoinProposalMessage(proposal) => {
group.store_pending_proposal(*proposal);
group
.store_pending_proposal(self.backend.storage(), *proposal)
.map_err(|e| CoreError::Mls(format!("store_pending_proposal: {e:?}")))?;
Ok((sender_identity, ReceivedMessage::StateChanged))
}
}
@@ -597,6 +619,69 @@ impl GroupMember {
self.group.as_ref()
}
/// Serialize the MLS group state (via the backing `StorageProvider`).
///
/// In openmls 0.8 the `MlsGroup` is no longer `Serialize`; its state is
/// held inside the `StorageProvider`. This method serializes the full
/// provider storage to bytes, which can later be restored with
/// [`new_from_storage_bytes`].
///
/// Returns `None` if no active group exists.
///
/// [`new_from_storage_bytes`]: Self::new_from_storage_bytes
pub fn serialize_mls_state(&self) -> Result<Option<Vec<u8>>, CoreError> {
if self.group.is_none() {
return Ok(None);
}
let bytes = self
.backend
.storage()
.to_bytes()
.map_err(|e| CoreError::Mls(format!("serialize storage: {e}")))?;
Ok(Some(bytes))
}
/// Create a `GroupMember` from previously serialized storage bytes.
///
/// Reconstructs the `DiskKeyStore` from the blob, then loads the
/// `MlsGroup` from the storage provider using the given `group_id`.
pub fn new_from_storage_bytes(
identity: Arc<IdentityKeypair>,
storage_bytes: &[u8],
group_id: &[u8],
hybrid: bool,
) -> Result<Self, CoreError> {
let key_store = DiskKeyStore::from_bytes(storage_bytes)
.map_err(|e| CoreError::Mls(format!("deserialize storage: {e}")))?;
let create_config = MlsGroupCreateConfig::builder()
.use_ratchet_tree_extension(true)
.build();
let join_config = MlsGroupJoinConfig::builder()
.use_ratchet_tree_extension(true)
.build();
let backend = if hybrid {
HybridCryptoProvider::new_hybrid(key_store)
} else {
HybridCryptoProvider::new_classical(key_store)
};
let mls_group_id = GroupId::from_slice(group_id);
let group = MlsGroup::load(backend.storage(), &mls_group_id)
.map_err(|e| CoreError::Mls(format!("load group from storage: {e}")))?;
Ok(Self {
backend,
identity,
group,
create_config,
join_config,
hybrid,
})
}
/// Return the identity (credential) bytes of all current group members.
///
/// Each entry is the raw credential payload (Ed25519 public key bytes)
@@ -608,23 +693,20 @@ impl GroupMember {
};
group
.members()
.map(|m| m.credential.identity().to_vec())
.map(|m| m.credential.serialized_content().to_vec())
.collect()
}
// ── Private helpers ───────────────────────────────────────────────────────
fn make_credential_with_key(&self) -> Result<CredentialWithKey, CoreError> {
let credential = Credential::new(
self.identity.public_key_bytes().to_vec(),
CredentialType::Basic,
)
.map_err(|e| CoreError::Mls(format!("{e:?}")))?;
fn make_credential_with_key(&self) -> CredentialWithKey {
let credential: Credential =
BasicCredential::new(self.identity.public_key_bytes().to_vec()).into();
Ok(CredentialWithKey {
CredentialWithKey {
credential,
signature_key: self.identity.public_key_bytes().to_vec().into(),
})
}
}
}
@@ -758,11 +840,6 @@ mod tests {
let (_commit_a, welcome_a) = creator.add_member(&a_kp).expect("add A");
a.join_group(&welcome_a).expect("A join");
// A must process the commit that added them (it's a StateChanged for A since
// the commit itself is what brought them in — but actually A joined via Welcome,
// so A doesn't process the add-commit). The creator already merged the pending
// commit in add_member, so creator is at epoch 2.
// Add B — at this point creator is at epoch 2 (after adding A).
let (commit_b, welcome_b) = creator.add_member(&b_kp).expect("add B");
b.join_group(&welcome_b).expect("B join");
@@ -958,7 +1035,7 @@ mod tests {
);
}
/// 10 messages alternating AliceBob and BobAlice all decrypt successfully.
/// 10 messages alternating Alice->Bob and Bob->Alice all decrypt successfully.
/// Verifies that epoch state stays in sync across multiple application messages.
#[test]
fn multi_message_roundtrip_epoch_stays_in_sync() {

View File

@@ -27,8 +27,9 @@ use openmls_traits::{
crypto::OpenMlsCrypto,
types::{
CryptoError, ExporterSecret, HpkeCiphertext, HpkeConfig, HpkeKeyPair, HpkeKemType,
KemOutput,
},
OpenMlsCryptoProvider,
OpenMlsProvider,
};
use tls_codec::SecretVLBytes;
@@ -128,6 +129,15 @@ impl OpenMlsCrypto for HybridCrypto {
self.rust_crypto.hkdf_extract(hash_type, salt, ikm)
}
fn hmac(
&self,
hash_type: HashType,
key: &[u8],
message: &[u8],
) -> Result<SecretVLBytes, CryptoError> {
self.rust_crypto.hmac(hash_type, key, message)
}
fn hkdf_expand(
&self,
hash_type: HashType,
@@ -189,25 +199,18 @@ impl OpenMlsCrypto for HybridCrypto {
info: &[u8],
aad: &[u8],
ptxt: &[u8],
) -> HpkeCiphertext {
) -> Result<HpkeCiphertext, CryptoError> {
if Self::is_hybrid_public_key(pk_r) {
// The trait `OpenMlsCrypto::hpke_seal` returns `HpkeCiphertext` (not
// `Result`), so we cannot propagate errors through the return type.
// Returning an empty ciphertext would silently cause data loss.
// Instead, panic on failure — a hybrid key that passes the length
// check but fails deserialization or encryption indicates a critical
// bug (corrupted key material), not a recoverable condition.
let recipient_pk = HybridPublicKey::from_bytes(pk_r)
.expect("hybrid public key deserialization failed — key material is corrupted");
// Pass HPKE info and aad through for proper context binding (RFC 9180).
.map_err(|_| CryptoError::CryptoLibraryError)?;
let envelope = hybrid_encrypt(&recipient_pk, ptxt, info, aad)
.expect("hybrid HPKE encryption failed — critical crypto error");
.map_err(|_| CryptoError::CryptoLibraryError)?;
let kem_output = envelope[..HYBRID_KEM_OUTPUT_LEN].to_vec();
let ciphertext = envelope[HYBRID_KEM_OUTPUT_LEN..].to_vec();
HpkeCiphertext {
Ok(HpkeCiphertext {
kem_output: kem_output.into(),
ciphertext: ciphertext.into(),
}
})
} else {
self.rust_crypto.hpke_seal(config, pk_r, info, aad, ptxt)
}
@@ -245,7 +248,7 @@ impl OpenMlsCrypto for HybridCrypto {
info: &[u8],
exporter_context: &[u8],
exporter_length: usize,
) -> Result<(Vec<u8>, ExporterSecret), CryptoError> {
) -> Result<(KemOutput, ExporterSecret), CryptoError> {
if Self::is_hybrid_public_key(pk_r) {
// A key that passes the hybrid length check but fails deserialization
// is corrupted — return an error instead of silently downgrading to
@@ -286,14 +289,14 @@ impl OpenMlsCrypto for HybridCrypto {
}
}
fn derive_hpke_keypair(&self, config: HpkeConfig, ikm: &[u8]) -> HpkeKeyPair {
fn derive_hpke_keypair(&self, config: HpkeConfig, ikm: &[u8]) -> Result<HpkeKeyPair, CryptoError> {
if self.hybrid_enabled && config.0 == HpkeKemType::DhKem25519 {
let kp = HybridKeypair::derive_from_ikm(ikm);
let private_bytes = kp.private_to_bytes();
HpkeKeyPair {
Ok(HpkeKeyPair {
private: private_bytes.as_slice().into(),
public: kp.public_key().to_bytes(),
}
})
} else {
self.rust_crypto.derive_hpke_keypair(config, ikm)
}
@@ -343,10 +346,10 @@ impl Default for HybridCryptoProvider {
}
}
impl OpenMlsCryptoProvider for HybridCryptoProvider {
impl OpenMlsProvider for HybridCryptoProvider {
type CryptoProvider = HybridCrypto;
type RandProvider = RustCrypto;
type KeyStoreProvider = DiskKeyStore;
type StorageProvider = DiskKeyStore;
fn crypto(&self) -> &Self::CryptoProvider {
&self.crypto
@@ -356,7 +359,7 @@ impl OpenMlsCryptoProvider for HybridCryptoProvider {
self.crypto.rust_crypto()
}
fn key_store(&self) -> &Self::KeyStoreProvider {
fn storage(&self) -> &Self::StorageProvider {
&self.key_store
}
}
@@ -383,7 +386,7 @@ mod tests {
let crypto = HybridCrypto::new();
let ikm = b"test-ikm-for-hybrid-hpke-keypair";
let keypair = crypto.derive_hpke_keypair(hpke_config_dhkem_x25519(), ikm);
let keypair = crypto.derive_hpke_keypair(hpke_config_dhkem_x25519(), ikm).unwrap();
assert_eq!(keypair.public.len(), HYBRID_PUBLIC_KEY_LEN);
assert_eq!(keypair.private.as_ref().len(), HYBRID_PRIVATE_KEY_LEN);
@@ -397,7 +400,7 @@ mod tests {
info,
aad,
plaintext,
);
).unwrap();
assert!(!ct.kem_output.as_slice().is_empty());
assert!(!ct.ciphertext.as_slice().is_empty());
@@ -419,7 +422,7 @@ mod tests {
let crypto = HybridCrypto::new();
let ikm = b"exporter-ikm";
let keypair = crypto.derive_hpke_keypair(hpke_config_dhkem_x25519(), ikm);
let keypair = crypto.derive_hpke_keypair(hpke_config_dhkem_x25519(), ikm).unwrap();
let info = b"";
let exporter_context = b"MLS 1.0 external init";
let exporter_length = 32;
@@ -457,7 +460,7 @@ mod tests {
let crypto = HybridCrypto::new_classical();
let ikm = b"test-ikm-for-classical-hpke";
let keypair = crypto.derive_hpke_keypair(hpke_config_dhkem_x25519(), ikm);
let keypair = crypto.derive_hpke_keypair(hpke_config_dhkem_x25519(), ikm).unwrap();
// Classical X25519 keys are 32 bytes
assert_eq!(keypair.public.len(), 32);
assert_eq!(keypair.private.as_ref().len(), 32);
@@ -469,7 +472,7 @@ mod tests {
let crypto = HybridCrypto::new_classical();
let ikm = b"test-ikm-for-classical-round-trip";
let keypair = crypto.derive_hpke_keypair(hpke_config_dhkem_x25519(), ikm);
let keypair = crypto.derive_hpke_keypair(hpke_config_dhkem_x25519(), ikm).unwrap();
assert_eq!(keypair.public.len(), 32); // classical key
let plaintext = b"hello classical MLS";
@@ -482,7 +485,7 @@ mod tests {
info,
aad,
plaintext,
);
).unwrap();
assert!(!ct.kem_output.as_slice().is_empty());
let decrypted = crypto
@@ -501,7 +504,7 @@ mod tests {
#[test]
fn key_package_generation_with_hybrid_provider() {
use openmls::prelude::{
Credential, CredentialType, CredentialWithKey, CryptoConfig, KeyPackage,
BasicCredential, CredentialWithKey, KeyPackage,
};
use std::sync::Arc;
use tls_codec::Serialize;
@@ -514,26 +517,24 @@ mod tests {
let provider = HybridCryptoProvider::default();
let identity = Arc::new(IdentityKeypair::generate());
let credential = Credential::new(
identity.public_key_bytes().to_vec(),
CredentialType::Basic,
)
.unwrap();
let credential: openmls::prelude::Credential =
BasicCredential::new(identity.public_key_bytes().to_vec()).into();
let credential_with_key = CredentialWithKey {
credential,
signature_key: identity.public_key_bytes().to_vec().into(),
};
let key_package = KeyPackage::builder()
let key_package_bundle = KeyPackage::builder()
.build(
CryptoConfig::with_default_version(CIPHERSUITE),
CIPHERSUITE,
&provider,
identity.as_ref(),
credential_with_key,
)
.expect("KeyPackage with hybrid HPKE");
let bytes = key_package
let bytes = key_package_bundle
.key_package()
.tls_serialize_detached()
.expect("serialize KeyPackage");
assert!(!bytes.is_empty());

View File

@@ -90,7 +90,7 @@ impl IdentityKeypair {
/// `openmls_basic_credential` crate.
#[cfg(feature = "native")]
impl openmls_traits::signatures::Signer for IdentityKeypair {
fn sign(&self, payload: &[u8]) -> Result<Vec<u8>, openmls_traits::types::Error> {
fn sign(&self, payload: &[u8]) -> Result<Vec<u8>, openmls_traits::signatures::SignerError> {
let sk = self.signing_key();
let sig: ed25519_dalek::Signature = sk.sign(payload);
Ok(sig.to_bytes().to_vec())

View File

@@ -17,10 +17,10 @@
//! The resulting bytes are opaque to the quicprochat transport layer.
use openmls::prelude::{
Ciphersuite, Credential, CredentialType, CredentialWithKey, CryptoConfig, KeyPackage,
KeyPackageIn, TlsDeserializeTrait, TlsSerializeTrait,
BasicCredential, Ciphersuite, CredentialWithKey, KeyPackage, KeyPackageIn,
};
use openmls_rust_crypto::OpenMlsRustCrypto;
use tls_codec::{Deserialize as TlsDeserializeTrait, Serialize as TlsSerializeTrait};
use sha2::{Digest, Sha256};
use crate::{error::CoreError, identity::IdentityKeypair};
@@ -74,8 +74,8 @@ pub fn generate_key_package(identity: &IdentityKeypair) -> Result<(Vec<u8>, Vec<
// Build a BasicCredential using the raw Ed25519 public key bytes as the
// MLS identity. Per RFC 9420, any byte string may serve as the identity.
let credential = Credential::new(identity.public_key_bytes().to_vec(), CredentialType::Basic)
.map_err(|e| CoreError::Mls(format!("{e:?}")))?;
let credential: openmls::prelude::Credential =
BasicCredential::new(identity.public_key_bytes().to_vec()).into();
// The `signature_key` in CredentialWithKey is the Ed25519 public key that
// will be used to verify the KeyPackage's leaf node signature.
@@ -87,19 +87,13 @@ pub fn generate_key_package(identity: &IdentityKeypair) -> Result<(Vec<u8>, Vec<
// `IdentityKeypair` implements `openmls_traits::signatures::Signer`
// so it can be passed directly to the builder.
let key_package = KeyPackage::builder()
.build(
CryptoConfig::with_default_version(CIPHERSUITE),
&backend,
identity,
credential_with_key,
)
let key_package_bundle = KeyPackage::builder()
.build(CIPHERSUITE, &backend, identity, credential_with_key)
.map_err(|e| CoreError::Mls(format!("{e:?}")))?;
// TLS-encode the KeyPackage using the trait from the openmls prelude.
// This uses tls_codec 0.3 (the same version openmls uses internally),
// avoiding a duplicate-trait conflict with tls_codec 0.4.
let tls_bytes = key_package
// TLS-encode the KeyPackage.
let tls_bytes = key_package_bundle
.key_package()
.tls_serialize_detached()
.map_err(|e| CoreError::Mls(format!("{e:?}")))?;

View File

@@ -1,23 +1,21 @@
use std::{
collections::HashMap,
fs,
path::{Path, PathBuf},
sync::RwLock,
};
use openmls_traits::key_store::{MlsEntity, OpenMlsKeyStore};
use openmls_memory_storage::MemoryStorage;
use openmls_traits::storage::{traits, StorageProvider, CURRENT_VERSION};
/// A disk-backed key store implementing `OpenMlsKeyStore`.
/// A disk-backed storage provider implementing `StorageProvider`.
///
/// In-memory when `path` is `None`; otherwise flushes the entire map to disk on
/// every store/delete so HPKE init keys survive process restarts.
/// Wraps `openmls_memory_storage::MemoryStorage` and flushes to disk on every
/// write so that HPKE init keys and group state survive process restarts.
///
/// # Serialization
///
/// Uses bincode for both individual MLS entity values and the outer HashMap
/// container. This is required because OpenMLS types use bincode-compatible
/// serialization, and `HashMap<Vec<u8>, Vec<u8>>` requires a binary format
/// (JSON mandates string keys).
/// Uses bincode for the outer `HashMap<Vec<u8>, Vec<u8>>` container when
/// persisting to disk. The inner values use serde_json (matching
/// `MemoryStorage`'s serialization format).
///
/// # Persistence security
///
@@ -26,15 +24,17 @@ use openmls_traits::key_store::{MlsEntity, OpenMlsKeyStore};
#[derive(Debug)]
pub struct DiskKeyStore {
path: Option<PathBuf>,
values: RwLock<HashMap<Vec<u8>, Vec<u8>>>,
storage: MemoryStorage,
}
#[derive(thiserror::Error, Debug, PartialEq, Eq)]
#[derive(thiserror::Error, Debug)]
pub enum DiskKeyStoreError {
#[error("serialization error")]
Serialization,
#[error("io error: {0}")]
Io(String),
#[error("memory storage error: {0}")]
MemoryStorage(#[from] openmls_memory_storage::MemoryStorageError),
}
impl DiskKeyStore {
@@ -42,28 +42,35 @@ impl DiskKeyStore {
pub fn ephemeral() -> Self {
Self {
path: None,
values: RwLock::new(HashMap::new()),
storage: MemoryStorage::default(),
}
}
/// Persistent keystore backed by `path`. Creates an empty store if missing.
pub fn persistent(path: impl AsRef<Path>) -> Result<Self, DiskKeyStoreError> {
let path = path.as_ref().to_path_buf();
let values = if path.exists() {
let storage = if path.exists() {
let bytes = fs::read(&path).map_err(|e| DiskKeyStoreError::Io(e.to_string()))?;
if bytes.is_empty() {
HashMap::new()
MemoryStorage::default()
} else {
bincode::deserialize(&bytes)
.map_err(|_| DiskKeyStoreError::Serialization)?
let map: std::collections::HashMap<Vec<u8>, Vec<u8>> =
bincode::deserialize(&bytes)
.map_err(|_| DiskKeyStoreError::Serialization)?;
let storage = MemoryStorage::default();
let mut values = storage.values.write()
.map_err(|_| DiskKeyStoreError::Io("lock poisoned".into()))?;
*values = map;
drop(values);
storage
}
} else {
HashMap::new()
MemoryStorage::default()
};
let store = Self {
path: Some(path),
values: RwLock::new(values),
storage,
};
// Set restrictive file permissions on the keystore file.
@@ -76,8 +83,10 @@ impl DiskKeyStore {
let Some(path) = &self.path else {
return Ok(());
};
let values = self.values.read().map_err(|_| DiskKeyStoreError::Io("lock poisoned".into()))?;
let bytes = bincode::serialize(&*values).map_err(|_| DiskKeyStoreError::Serialization)?;
let values = self.storage.values.read()
.map_err(|_| DiskKeyStoreError::Io("lock poisoned".into()))?;
let bytes = bincode::serialize(&*values)
.map_err(|_| DiskKeyStoreError::Serialization)?;
if let Some(parent) = path.parent() {
fs::create_dir_all(parent).map_err(|e| DiskKeyStoreError::Io(e.to_string()))?;
}
@@ -86,6 +95,32 @@ impl DiskKeyStore {
Ok(())
}
/// Serialize the backing storage to bytes (bincode).
///
/// This captures all key material *and* MLS group state held by the
/// `StorageProvider`, allowing the caller to persist it in a database
/// column instead of (or in addition to) on-disk files.
pub fn to_bytes(&self) -> Result<Vec<u8>, DiskKeyStoreError> {
let values = self.storage.values.read()
.map_err(|_| DiskKeyStoreError::Io("lock poisoned".into()))?;
bincode::serialize(&*values).map_err(|_| DiskKeyStoreError::Serialization)
}
/// Restore a `DiskKeyStore` from bytes previously produced by [`to_bytes`].
pub fn from_bytes(bytes: &[u8]) -> Result<Self, DiskKeyStoreError> {
let map: std::collections::HashMap<Vec<u8>, Vec<u8>> =
bincode::deserialize(bytes).map_err(|_| DiskKeyStoreError::Serialization)?;
let storage = MemoryStorage::default();
let mut values = storage.values.write()
.map_err(|_| DiskKeyStoreError::Io("lock poisoned".into()))?;
*values = map;
drop(values);
Ok(Self {
path: None,
storage,
})
}
/// Restrict file permissions to owner-only (0o600) on Unix.
#[cfg(unix)]
fn set_file_permissions(&self) -> Result<(), DiskKeyStoreError> {
@@ -112,31 +147,567 @@ impl Default for DiskKeyStore {
}
}
impl OpenMlsKeyStore for DiskKeyStore {
/// Delegate all `StorageProvider` methods to the inner `MemoryStorage`,
/// flushing to disk after every write/delete operation.
///
/// The flush errors are mapped to `DiskKeyStoreError` via the
/// `MemoryStorageError` conversion. If a flush fails, the in-memory state
/// is still updated (matching the old DiskKeyStore behavior).
impl StorageProvider<CURRENT_VERSION> for DiskKeyStore {
type Error = DiskKeyStoreError;
fn store<V: MlsEntity>(&self, k: &[u8], v: &V) -> Result<(), Self::Error> {
let value = bincode::serialize(v).map_err(|_| DiskKeyStoreError::Serialization)?;
let mut values = self.values.write().map_err(|_| DiskKeyStoreError::Io("lock poisoned".into()))?;
values.insert(k.to_vec(), value);
drop(values);
fn write_mls_join_config<
GroupId: traits::GroupId<CURRENT_VERSION>,
MlsGroupJoinConfig: traits::MlsGroupJoinConfig<CURRENT_VERSION>,
>(
&self,
group_id: &GroupId,
config: &MlsGroupJoinConfig,
) -> Result<(), Self::Error> {
self.storage.write_mls_join_config(group_id, config)?;
self.flush()
}
fn read<V: MlsEntity>(&self, k: &[u8]) -> Option<V> {
let values = match self.values.read() {
Ok(v) => v,
Err(_) => return None,
};
values
.get(k)
.and_then(|bytes| bincode::deserialize(bytes).ok())
fn append_own_leaf_node<
GroupId: traits::GroupId<CURRENT_VERSION>,
LeafNode: traits::LeafNode<CURRENT_VERSION>,
>(
&self,
group_id: &GroupId,
leaf_node: &LeafNode,
) -> Result<(), Self::Error> {
self.storage.append_own_leaf_node(group_id, leaf_node)?;
self.flush()
}
fn delete<V: MlsEntity>(&self, k: &[u8]) -> Result<(), Self::Error> {
let mut values = self.values.write().map_err(|_| DiskKeyStoreError::Io("lock poisoned".into()))?;
values.remove(k);
drop(values);
fn queue_proposal<
GroupId: traits::GroupId<CURRENT_VERSION>,
ProposalRef: traits::ProposalRef<CURRENT_VERSION>,
QueuedProposal: traits::QueuedProposal<CURRENT_VERSION>,
>(
&self,
group_id: &GroupId,
proposal_ref: &ProposalRef,
proposal: &QueuedProposal,
) -> Result<(), Self::Error> {
self.storage.queue_proposal(group_id, proposal_ref, proposal)?;
self.flush()
}
fn write_tree<
GroupId: traits::GroupId<CURRENT_VERSION>,
TreeSync: traits::TreeSync<CURRENT_VERSION>,
>(
&self,
group_id: &GroupId,
tree: &TreeSync,
) -> Result<(), Self::Error> {
self.storage.write_tree(group_id, tree)?;
self.flush()
}
fn write_interim_transcript_hash<
GroupId: traits::GroupId<CURRENT_VERSION>,
InterimTranscriptHash: traits::InterimTranscriptHash<CURRENT_VERSION>,
>(
&self,
group_id: &GroupId,
interim_transcript_hash: &InterimTranscriptHash,
) -> Result<(), Self::Error> {
self.storage.write_interim_transcript_hash(group_id, interim_transcript_hash)?;
self.flush()
}
fn write_context<
GroupId: traits::GroupId<CURRENT_VERSION>,
GroupContext: traits::GroupContext<CURRENT_VERSION>,
>(
&self,
group_id: &GroupId,
group_context: &GroupContext,
) -> Result<(), Self::Error> {
self.storage.write_context(group_id, group_context)?;
self.flush()
}
fn write_confirmation_tag<
GroupId: traits::GroupId<CURRENT_VERSION>,
ConfirmationTag: traits::ConfirmationTag<CURRENT_VERSION>,
>(
&self,
group_id: &GroupId,
confirmation_tag: &ConfirmationTag,
) -> Result<(), Self::Error> {
self.storage.write_confirmation_tag(group_id, confirmation_tag)?;
self.flush()
}
fn write_group_state<
GroupState: traits::GroupState<CURRENT_VERSION>,
GroupId: traits::GroupId<CURRENT_VERSION>,
>(
&self,
group_id: &GroupId,
group_state: &GroupState,
) -> Result<(), Self::Error> {
self.storage.write_group_state(group_id, group_state)?;
self.flush()
}
fn write_message_secrets<
GroupId: traits::GroupId<CURRENT_VERSION>,
MessageSecrets: traits::MessageSecrets<CURRENT_VERSION>,
>(
&self,
group_id: &GroupId,
message_secrets: &MessageSecrets,
) -> Result<(), Self::Error> {
self.storage.write_message_secrets(group_id, message_secrets)?;
self.flush()
}
fn write_resumption_psk_store<
GroupId: traits::GroupId<CURRENT_VERSION>,
ResumptionPskStore: traits::ResumptionPskStore<CURRENT_VERSION>,
>(
&self,
group_id: &GroupId,
resumption_psk_store: &ResumptionPskStore,
) -> Result<(), Self::Error> {
self.storage.write_resumption_psk_store(group_id, resumption_psk_store)?;
self.flush()
}
fn write_own_leaf_index<
GroupId: traits::GroupId<CURRENT_VERSION>,
LeafNodeIndex: traits::LeafNodeIndex<CURRENT_VERSION>,
>(
&self,
group_id: &GroupId,
own_leaf_index: &LeafNodeIndex,
) -> Result<(), Self::Error> {
self.storage.write_own_leaf_index(group_id, own_leaf_index)?;
self.flush()
}
fn write_group_epoch_secrets<
GroupId: traits::GroupId<CURRENT_VERSION>,
GroupEpochSecrets: traits::GroupEpochSecrets<CURRENT_VERSION>,
>(
&self,
group_id: &GroupId,
group_epoch_secrets: &GroupEpochSecrets,
) -> Result<(), Self::Error> {
self.storage.write_group_epoch_secrets(group_id, group_epoch_secrets)?;
self.flush()
}
fn write_signature_key_pair<
SignaturePublicKey: traits::SignaturePublicKey<CURRENT_VERSION>,
SignatureKeyPair: traits::SignatureKeyPair<CURRENT_VERSION>,
>(
&self,
public_key: &SignaturePublicKey,
signature_key_pair: &SignatureKeyPair,
) -> Result<(), Self::Error> {
self.storage.write_signature_key_pair(public_key, signature_key_pair)?;
self.flush()
}
fn write_encryption_key_pair<
EncryptionKey: traits::EncryptionKey<CURRENT_VERSION>,
HpkeKeyPair: traits::HpkeKeyPair<CURRENT_VERSION>,
>(
&self,
public_key: &EncryptionKey,
key_pair: &HpkeKeyPair,
) -> Result<(), Self::Error> {
self.storage.write_encryption_key_pair(public_key, key_pair)?;
self.flush()
}
fn write_encryption_epoch_key_pairs<
GroupId: traits::GroupId<CURRENT_VERSION>,
EpochKey: traits::EpochKey<CURRENT_VERSION>,
HpkeKeyPair: traits::HpkeKeyPair<CURRENT_VERSION>,
>(
&self,
group_id: &GroupId,
epoch: &EpochKey,
leaf_index: u32,
key_pairs: &[HpkeKeyPair],
) -> Result<(), Self::Error> {
self.storage.write_encryption_epoch_key_pairs(group_id, epoch, leaf_index, key_pairs)?;
self.flush()
}
fn write_key_package<
HashReference: traits::HashReference<CURRENT_VERSION>,
KeyPackage: traits::KeyPackage<CURRENT_VERSION>,
>(
&self,
hash_ref: &HashReference,
key_package: &KeyPackage,
) -> Result<(), Self::Error> {
self.storage.write_key_package(hash_ref, key_package)?;
self.flush()
}
fn write_psk<
PskId: traits::PskId<CURRENT_VERSION>,
PskBundle: traits::PskBundle<CURRENT_VERSION>,
>(
&self,
psk_id: &PskId,
psk: &PskBundle,
) -> Result<(), Self::Error> {
self.storage.write_psk(psk_id, psk)?;
self.flush()
}
// --- getters (no flush needed) ---
fn mls_group_join_config<
GroupId: traits::GroupId<CURRENT_VERSION>,
MlsGroupJoinConfig: traits::MlsGroupJoinConfig<CURRENT_VERSION>,
>(
&self,
group_id: &GroupId,
) -> Result<Option<MlsGroupJoinConfig>, Self::Error> {
Ok(self.storage.mls_group_join_config(group_id)?)
}
fn own_leaf_nodes<
GroupId: traits::GroupId<CURRENT_VERSION>,
LeafNode: traits::LeafNode<CURRENT_VERSION>,
>(
&self,
group_id: &GroupId,
) -> Result<Vec<LeafNode>, Self::Error> {
Ok(self.storage.own_leaf_nodes(group_id)?)
}
fn queued_proposal_refs<
GroupId: traits::GroupId<CURRENT_VERSION>,
ProposalRef: traits::ProposalRef<CURRENT_VERSION>,
>(
&self,
group_id: &GroupId,
) -> Result<Vec<ProposalRef>, Self::Error> {
Ok(self.storage.queued_proposal_refs(group_id)?)
}
fn queued_proposals<
GroupId: traits::GroupId<CURRENT_VERSION>,
ProposalRef: traits::ProposalRef<CURRENT_VERSION>,
QueuedProposal: traits::QueuedProposal<CURRENT_VERSION>,
>(
&self,
group_id: &GroupId,
) -> Result<Vec<(ProposalRef, QueuedProposal)>, Self::Error> {
Ok(self.storage.queued_proposals(group_id)?)
}
fn tree<
GroupId: traits::GroupId<CURRENT_VERSION>,
TreeSync: traits::TreeSync<CURRENT_VERSION>,
>(
&self,
group_id: &GroupId,
) -> Result<Option<TreeSync>, Self::Error> {
Ok(self.storage.tree(group_id)?)
}
fn group_context<
GroupId: traits::GroupId<CURRENT_VERSION>,
GroupContext: traits::GroupContext<CURRENT_VERSION>,
>(
&self,
group_id: &GroupId,
) -> Result<Option<GroupContext>, Self::Error> {
Ok(self.storage.group_context(group_id)?)
}
fn interim_transcript_hash<
GroupId: traits::GroupId<CURRENT_VERSION>,
InterimTranscriptHash: traits::InterimTranscriptHash<CURRENT_VERSION>,
>(
&self,
group_id: &GroupId,
) -> Result<Option<InterimTranscriptHash>, Self::Error> {
Ok(self.storage.interim_transcript_hash(group_id)?)
}
fn confirmation_tag<
GroupId: traits::GroupId<CURRENT_VERSION>,
ConfirmationTag: traits::ConfirmationTag<CURRENT_VERSION>,
>(
&self,
group_id: &GroupId,
) -> Result<Option<ConfirmationTag>, Self::Error> {
Ok(self.storage.confirmation_tag(group_id)?)
}
fn group_state<
GroupState: traits::GroupState<CURRENT_VERSION>,
GroupId: traits::GroupId<CURRENT_VERSION>,
>(
&self,
group_id: &GroupId,
) -> Result<Option<GroupState>, Self::Error> {
Ok(self.storage.group_state(group_id)?)
}
fn message_secrets<
GroupId: traits::GroupId<CURRENT_VERSION>,
MessageSecrets: traits::MessageSecrets<CURRENT_VERSION>,
>(
&self,
group_id: &GroupId,
) -> Result<Option<MessageSecrets>, Self::Error> {
Ok(self.storage.message_secrets(group_id)?)
}
fn resumption_psk_store<
GroupId: traits::GroupId<CURRENT_VERSION>,
ResumptionPskStore: traits::ResumptionPskStore<CURRENT_VERSION>,
>(
&self,
group_id: &GroupId,
) -> Result<Option<ResumptionPskStore>, Self::Error> {
Ok(self.storage.resumption_psk_store(group_id)?)
}
fn own_leaf_index<
GroupId: traits::GroupId<CURRENT_VERSION>,
LeafNodeIndex: traits::LeafNodeIndex<CURRENT_VERSION>,
>(
&self,
group_id: &GroupId,
) -> Result<Option<LeafNodeIndex>, Self::Error> {
Ok(self.storage.own_leaf_index(group_id)?)
}
fn group_epoch_secrets<
GroupId: traits::GroupId<CURRENT_VERSION>,
GroupEpochSecrets: traits::GroupEpochSecrets<CURRENT_VERSION>,
>(
&self,
group_id: &GroupId,
) -> Result<Option<GroupEpochSecrets>, Self::Error> {
Ok(self.storage.group_epoch_secrets(group_id)?)
}
fn signature_key_pair<
SignaturePublicKey: traits::SignaturePublicKey<CURRENT_VERSION>,
SignatureKeyPair: traits::SignatureKeyPair<CURRENT_VERSION>,
>(
&self,
public_key: &SignaturePublicKey,
) -> Result<Option<SignatureKeyPair>, Self::Error> {
Ok(self.storage.signature_key_pair(public_key)?)
}
fn encryption_key_pair<
HpkeKeyPair: traits::HpkeKeyPair<CURRENT_VERSION>,
EncryptionKey: traits::EncryptionKey<CURRENT_VERSION>,
>(
&self,
public_key: &EncryptionKey,
) -> Result<Option<HpkeKeyPair>, Self::Error> {
Ok(self.storage.encryption_key_pair(public_key)?)
}
fn encryption_epoch_key_pairs<
GroupId: traits::GroupId<CURRENT_VERSION>,
EpochKey: traits::EpochKey<CURRENT_VERSION>,
HpkeKeyPair: traits::HpkeKeyPair<CURRENT_VERSION>,
>(
&self,
group_id: &GroupId,
epoch: &EpochKey,
leaf_index: u32,
) -> Result<Vec<HpkeKeyPair>, Self::Error> {
Ok(self.storage.encryption_epoch_key_pairs(group_id, epoch, leaf_index)?)
}
fn key_package<
KeyPackageRef: traits::HashReference<CURRENT_VERSION>,
KeyPackage: traits::KeyPackage<CURRENT_VERSION>,
>(
&self,
hash_ref: &KeyPackageRef,
) -> Result<Option<KeyPackage>, Self::Error> {
Ok(self.storage.key_package(hash_ref)?)
}
fn psk<
PskBundle: traits::PskBundle<CURRENT_VERSION>,
PskId: traits::PskId<CURRENT_VERSION>,
>(
&self,
psk_id: &PskId,
) -> Result<Option<PskBundle>, Self::Error> {
Ok(self.storage.psk(psk_id)?)
}
// --- deleters (flush needed) ---
fn remove_proposal<
GroupId: traits::GroupId<CURRENT_VERSION>,
ProposalRef: traits::ProposalRef<CURRENT_VERSION>,
>(
&self,
group_id: &GroupId,
proposal_ref: &ProposalRef,
) -> Result<(), Self::Error> {
self.storage.remove_proposal(group_id, proposal_ref)?;
self.flush()
}
fn delete_own_leaf_nodes<GroupId: traits::GroupId<CURRENT_VERSION>>(
&self,
group_id: &GroupId,
) -> Result<(), Self::Error> {
self.storage.delete_own_leaf_nodes(group_id)?;
self.flush()
}
fn delete_group_config<GroupId: traits::GroupId<CURRENT_VERSION>>(
&self,
group_id: &GroupId,
) -> Result<(), Self::Error> {
self.storage.delete_group_config(group_id)?;
self.flush()
}
fn delete_tree<GroupId: traits::GroupId<CURRENT_VERSION>>(
&self,
group_id: &GroupId,
) -> Result<(), Self::Error> {
self.storage.delete_tree(group_id)?;
self.flush()
}
fn delete_confirmation_tag<GroupId: traits::GroupId<CURRENT_VERSION>>(
&self,
group_id: &GroupId,
) -> Result<(), Self::Error> {
self.storage.delete_confirmation_tag(group_id)?;
self.flush()
}
fn delete_group_state<GroupId: traits::GroupId<CURRENT_VERSION>>(
&self,
group_id: &GroupId,
) -> Result<(), Self::Error> {
self.storage.delete_group_state(group_id)?;
self.flush()
}
fn delete_context<GroupId: traits::GroupId<CURRENT_VERSION>>(
&self,
group_id: &GroupId,
) -> Result<(), Self::Error> {
self.storage.delete_context(group_id)?;
self.flush()
}
fn delete_interim_transcript_hash<GroupId: traits::GroupId<CURRENT_VERSION>>(
&self,
group_id: &GroupId,
) -> Result<(), Self::Error> {
self.storage.delete_interim_transcript_hash(group_id)?;
self.flush()
}
fn delete_message_secrets<GroupId: traits::GroupId<CURRENT_VERSION>>(
&self,
group_id: &GroupId,
) -> Result<(), Self::Error> {
self.storage.delete_message_secrets(group_id)?;
self.flush()
}
fn delete_all_resumption_psk_secrets<GroupId: traits::GroupId<CURRENT_VERSION>>(
&self,
group_id: &GroupId,
) -> Result<(), Self::Error> {
self.storage.delete_all_resumption_psk_secrets(group_id)?;
self.flush()
}
fn delete_own_leaf_index<GroupId: traits::GroupId<CURRENT_VERSION>>(
&self,
group_id: &GroupId,
) -> Result<(), Self::Error> {
self.storage.delete_own_leaf_index(group_id)?;
self.flush()
}
fn delete_group_epoch_secrets<GroupId: traits::GroupId<CURRENT_VERSION>>(
&self,
group_id: &GroupId,
) -> Result<(), Self::Error> {
self.storage.delete_group_epoch_secrets(group_id)?;
self.flush()
}
fn clear_proposal_queue<
GroupId: traits::GroupId<CURRENT_VERSION>,
ProposalRef: traits::ProposalRef<CURRENT_VERSION>,
>(
&self,
group_id: &GroupId,
) -> Result<(), Self::Error> {
self.storage.clear_proposal_queue::<GroupId, ProposalRef>(group_id)?;
self.flush()
}
fn delete_signature_key_pair<
SignaturePublicKey: traits::SignaturePublicKey<CURRENT_VERSION>,
>(
&self,
public_key: &SignaturePublicKey,
) -> Result<(), Self::Error> {
self.storage.delete_signature_key_pair(public_key)?;
self.flush()
}
fn delete_encryption_key_pair<EncryptionKey: traits::EncryptionKey<CURRENT_VERSION>>(
&self,
public_key: &EncryptionKey,
) -> Result<(), Self::Error> {
self.storage.delete_encryption_key_pair(public_key)?;
self.flush()
}
fn delete_encryption_epoch_key_pairs<
GroupId: traits::GroupId<CURRENT_VERSION>,
EpochKey: traits::EpochKey<CURRENT_VERSION>,
>(
&self,
group_id: &GroupId,
epoch: &EpochKey,
leaf_index: u32,
) -> Result<(), Self::Error> {
self.storage.delete_encryption_epoch_key_pairs(group_id, epoch, leaf_index)?;
self.flush()
}
fn delete_key_package<KeyPackageRef: traits::HashReference<CURRENT_VERSION>>(
&self,
hash_ref: &KeyPackageRef,
) -> Result<(), Self::Error> {
self.storage.delete_key_package(hash_ref)?;
self.flush()
}
fn delete_psk<PskKey: traits::PskId<CURRENT_VERSION>>(
&self,
psk_id: &PskKey,
) -> Result<(), Self::Error> {
self.storage.delete_psk(psk_id)?;
self.flush()
}
}