chore: rename quicproquo → quicprochat in Rust workspace

Rename all crate directories, package names, binary names, proto
package/module paths, ALPN strings, env var prefixes, config filenames,
mDNS service names, and plugin ABI symbols from quicproquo/qpq to
quicprochat/qpc.
This commit is contained in:
2026-03-07 18:24:52 +01:00
parent d8c1392587
commit a710037dde
212 changed files with 609 additions and 609 deletions

View File

@@ -0,0 +1,150 @@
#![allow(clippy::unwrap_used)]
//! Benchmark: Identity keypair operations, sealed sender, and message padding.
//!
//! Covers:
//! - [`IdentityKeypair`] generation, signing, and signature verification
//! - Sealed sender `seal` / `unseal` (Ed25519 sign + verify overhead)
//! - Message padding `pad` / `unpad` at various payload sizes
use criterion::{black_box, criterion_group, criterion_main, BenchmarkId, Criterion};
use quicprochat_core::{compute_safety_number, IdentityKeypair, padding};
// ── Identity keypair benchmarks ──────────────────────────────────────────────
fn bench_identity_keygen(c: &mut Criterion) {
c.bench_function("identity_keygen", |b| {
b.iter(|| black_box(IdentityKeypair::generate()));
});
}
fn bench_identity_sign(c: &mut Criterion) {
let identity = IdentityKeypair::generate();
let payload = b"benchmark signing payload -- 32+ bytes of realistic data here";
c.bench_function("identity_sign", |b| {
b.iter(|| black_box(identity.sign_raw(black_box(payload))));
});
}
fn bench_identity_verify(c: &mut Criterion) {
let identity = IdentityKeypair::generate();
let payload = b"benchmark signing payload -- 32+ bytes of realistic data here";
let sig = identity.sign_raw(payload);
let pk = identity.public_key_bytes();
c.bench_function("identity_verify", |b| {
b.iter(|| {
IdentityKeypair::verify_raw(
black_box(&pk),
black_box(payload),
black_box(&sig),
)
.unwrap();
});
});
}
// ── Sealed sender benchmarks ─────────────────────────────────────────────────
fn bench_sealed_sender(c: &mut Criterion) {
use quicprochat_core::sealed_sender::{seal, unseal};
let sizes: &[(&str, usize)] = &[
("32B", 32),
("256B", 256),
("1KB", 1024),
("4KB", 4096),
];
let identity = IdentityKeypair::generate();
let mut group = c.benchmark_group("sealed_sender_seal");
for (label, size) in sizes {
let payload = vec![0xABu8; *size];
group.bench_with_input(
BenchmarkId::from_parameter(label),
&payload,
|b, payload| {
b.iter(|| black_box(seal(black_box(&identity), black_box(payload))));
},
);
}
group.finish();
let mut group = c.benchmark_group("sealed_sender_unseal");
for (label, size) in sizes {
let payload = vec![0xABu8; *size];
let sealed = seal(&identity, &payload);
group.bench_with_input(
BenchmarkId::from_parameter(label),
&sealed,
|b, sealed| {
b.iter(|| black_box(unseal(black_box(sealed)).unwrap()));
},
);
}
group.finish();
}
// ── Message padding benchmarks ────────────────────────────────────────────────
fn bench_padding(c: &mut Criterion) {
// Representative sizes: one per bucket + oversized
let sizes: &[(&str, usize)] = &[
("50B", 50), // → 256 bucket
("512B", 512), // → 1024 bucket
("2KB", 2048), // → 4096 bucket
("8KB", 8192), // → 16384 bucket
("20KB", 20480), // → 32768 (oversized)
];
let mut group = c.benchmark_group("padding_pad");
for (label, size) in sizes {
let payload = vec![0xABu8; *size];
group.bench_with_input(
BenchmarkId::from_parameter(label),
&payload,
|b, payload| {
b.iter(|| black_box(padding::pad(black_box(payload))));
},
);
}
group.finish();
let mut group = c.benchmark_group("padding_unpad");
for (label, size) in sizes {
let payload = vec![0xABu8; *size];
let padded = padding::pad(&payload);
group.bench_with_input(
BenchmarkId::from_parameter(label),
&padded,
|b, padded| {
b.iter(|| black_box(padding::unpad(black_box(padded)).unwrap()));
},
);
}
group.finish();
}
// ── Safety number benchmarks ─────────────────────────────────────────────────
fn bench_safety_number(c: &mut Criterion) {
let key_a = [0x1au8; 32];
let key_b = [0x2bu8; 32];
c.bench_function("safety_number", |b| {
b.iter(|| black_box(compute_safety_number(black_box(&key_a), black_box(&key_b))));
});
}
criterion_group!(
benches,
bench_identity_keygen,
bench_identity_sign,
bench_identity_verify,
bench_sealed_sender,
bench_padding,
bench_safety_number,
);
criterion_main!(benches);

View File

@@ -0,0 +1,153 @@
#![allow(clippy::unwrap_used)]
//! Benchmark: Hybrid KEM (X25519 + ML-KEM-768) vs classical-only encryption.
//!
//! Compares keypair generation, encryption, and decryption times for the
//! hybrid post-quantum scheme against classical X25519 + ChaCha20-Poly1305.
use criterion::{black_box, criterion_group, criterion_main, BenchmarkId, Criterion};
use quicprochat_core::{hybrid_encrypt, hybrid_decrypt, HybridKeypair};
// ── Classical baseline (X25519 + ChaCha20-Poly1305) ─────────────────────────
use chacha20poly1305::{
aead::{Aead, KeyInit},
ChaCha20Poly1305, Key, Nonce,
};
use hkdf::Hkdf;
use rand::{rngs::OsRng, RngCore};
use sha2::Sha256;
use x25519_dalek::{EphemeralSecret, PublicKey as X25519Public, StaticSecret};
struct ClassicalKeypair {
secret: StaticSecret,
public: X25519Public,
}
impl ClassicalKeypair {
fn generate() -> Self {
let secret = StaticSecret::random_from_rng(OsRng);
let public = X25519Public::from(&secret);
Self { secret, public }
}
}
fn classical_encrypt(recipient_pk: &X25519Public, plaintext: &[u8]) -> Vec<u8> {
let eph_secret = EphemeralSecret::random_from_rng(OsRng);
let eph_public = X25519Public::from(&eph_secret);
let shared = eph_secret.diffie_hellman(recipient_pk);
let hk = Hkdf::<Sha256>::new(None, shared.as_bytes());
let mut key_bytes = [0u8; 32];
hk.expand(b"classical-bench", &mut key_bytes).unwrap();
let mut nonce_bytes = [0u8; 12];
OsRng.fill_bytes(&mut nonce_bytes);
let cipher = ChaCha20Poly1305::new(Key::from_slice(&key_bytes));
let ct = cipher
.encrypt(Nonce::from_slice(&nonce_bytes), plaintext)
.unwrap();
// Wire: eph_pk(32) || nonce(12) || ciphertext
let mut out = Vec::with_capacity(32 + 12 + ct.len());
out.extend_from_slice(eph_public.as_bytes());
out.extend_from_slice(&nonce_bytes);
out.extend_from_slice(&ct);
out
}
fn classical_decrypt(keypair: &ClassicalKeypair, envelope: &[u8]) -> Vec<u8> {
let eph_pk = X25519Public::from(<[u8; 32]>::try_from(&envelope[..32]).unwrap());
let nonce_bytes: [u8; 12] = envelope[32..44].try_into().unwrap();
let ct = &envelope[44..];
let shared = keypair.secret.diffie_hellman(&eph_pk);
let hk = Hkdf::<Sha256>::new(None, shared.as_bytes());
let mut key_bytes = [0u8; 32];
hk.expand(b"classical-bench", &mut key_bytes).unwrap();
let cipher = ChaCha20Poly1305::new(Key::from_slice(&key_bytes));
cipher
.decrypt(Nonce::from_slice(&nonce_bytes), ct)
.unwrap()
}
// ── Benchmarks ──────────────────────────────────────────────────────────────
fn bench_keygen(c: &mut Criterion) {
let mut group = c.benchmark_group("kem_keygen");
group.bench_function("hybrid", |b| {
b.iter(|| black_box(HybridKeypair::generate()));
});
group.bench_function("classical", |b| {
b.iter(|| black_box(ClassicalKeypair::generate()));
});
group.finish();
}
fn bench_encrypt(c: &mut Criterion) {
let sizes: &[(&str, usize)] = &[("100B", 100), ("1KB", 1024), ("4KB", 4096), ("64KB", 65536)];
let mut group = c.benchmark_group("kem_encrypt");
let hybrid_kp = HybridKeypair::generate();
let hybrid_pk = hybrid_kp.public_key();
let classical_kp = ClassicalKeypair::generate();
for (label, size) in sizes {
let payload = vec![0xABu8; *size];
group.bench_with_input(
BenchmarkId::new("hybrid", label),
&payload,
|b, payload| {
b.iter(|| hybrid_encrypt(&hybrid_pk, black_box(payload), b"", b"").unwrap());
},
);
group.bench_with_input(
BenchmarkId::new("classical", label),
&payload,
|b, payload| {
b.iter(|| classical_encrypt(&classical_kp.public, black_box(payload)));
},
);
}
group.finish();
}
fn bench_decrypt(c: &mut Criterion) {
let sizes: &[(&str, usize)] = &[("100B", 100), ("1KB", 1024), ("4KB", 4096), ("64KB", 65536)];
let mut group = c.benchmark_group("kem_decrypt");
let hybrid_kp = HybridKeypair::generate();
let hybrid_pk = hybrid_kp.public_key();
let classical_kp = ClassicalKeypair::generate();
for (label, size) in sizes {
let payload = vec![0xABu8; *size];
let hybrid_ct = hybrid_encrypt(&hybrid_pk, &payload, b"", b"").unwrap();
let classical_ct = classical_encrypt(&classical_kp.public, &payload);
group.bench_with_input(
BenchmarkId::new("hybrid", label),
&hybrid_ct,
|b, ct| {
b.iter(|| hybrid_decrypt(&hybrid_kp, black_box(ct), b"", b"").unwrap());
},
);
group.bench_with_input(
BenchmarkId::new("classical", label),
&classical_ct,
|b, ct| {
b.iter(|| classical_decrypt(&classical_kp, black_box(ct)));
},
);
}
group.finish();
}
criterion_group!(benches, bench_keygen, bench_encrypt, bench_decrypt);
criterion_main!(benches);

View File

@@ -0,0 +1,157 @@
#![allow(clippy::unwrap_used)]
//! Benchmark: MLS group operations at various group sizes.
//!
//! Measures KeyPackage generation, group creation, member addition,
//! message encryption, and message decryption.
use std::sync::Arc;
use criterion::{criterion_group, criterion_main, BatchSize, BenchmarkId, Criterion};
use quicprochat_core::{GroupMember, IdentityKeypair};
/// Create identities and a group of the given size.
/// Returns (creator, Vec<members>).
fn setup_group(size: usize) -> (GroupMember, Vec<GroupMember>) {
let creator_id = Arc::new(IdentityKeypair::generate());
let mut creator = GroupMember::new(creator_id);
creator.create_group(b"bench-group").unwrap();
let mut members = Vec::with_capacity(size.saturating_sub(1));
for _ in 1..size {
let joiner_id = Arc::new(IdentityKeypair::generate());
let mut joiner = GroupMember::new(joiner_id);
let kp = joiner.generate_key_package().unwrap();
let (_commit, welcome) = creator.add_member(&kp).unwrap();
joiner.join_group(&welcome).unwrap();
members.push(joiner);
}
(creator, members)
}
fn bench_keygen(c: &mut Criterion) {
c.bench_function("mls_keygen", |b| {
b.iter_batched(
|| {
let id = Arc::new(IdentityKeypair::generate());
GroupMember::new(id)
},
|mut member| {
member.generate_key_package().unwrap();
},
BatchSize::SmallInput,
);
});
}
fn bench_group_create(c: &mut Criterion) {
c.bench_function("mls_group_create", |b| {
b.iter_batched(
|| {
let id = Arc::new(IdentityKeypair::generate());
GroupMember::new(id)
},
|mut member| {
member.create_group(b"bench-group").unwrap();
},
BatchSize::SmallInput,
);
});
}
fn bench_add_member(c: &mut Criterion) {
let mut group = c.benchmark_group("mls_add_member");
group.sample_size(10);
for size in [2, 10, 50, 100] {
group.bench_with_input(BenchmarkId::from_parameter(size), &size, |b, &size| {
b.iter_batched(
|| {
let (creator, members) = setup_group(size);
let joiner_id = Arc::new(IdentityKeypair::generate());
let mut joiner = GroupMember::new(joiner_id);
let kp = joiner.generate_key_package().unwrap();
(creator, members, joiner, kp)
},
|(mut creator, _members, _joiner, kp)| {
creator.add_member(&kp).unwrap();
},
BatchSize::SmallInput,
);
});
}
group.finish();
}
fn bench_epoch_rotation(c: &mut Criterion) {
let mut group = c.benchmark_group("mls_epoch_rotation");
group.sample_size(10);
for size in [2, 10, 50] {
group.bench_with_input(BenchmarkId::from_parameter(size), &size, |b, &size| {
b.iter_batched(
|| {
let (mut creator, members) = setup_group(size);
// Propose a self-update to simulate epoch rotation
let proposal = creator.propose_self_update().unwrap();
(creator, members, proposal)
},
|(mut creator, _members, _proposal)| {
// Commit pending proposals (the self-update) to advance the epoch
creator.commit_pending_proposals().unwrap();
},
BatchSize::SmallInput,
);
});
}
group.finish();
}
fn bench_send_message(c: &mut Criterion) {
let mut group = c.benchmark_group("mls_send_message");
for size in [2, 10, 50] {
group.bench_with_input(BenchmarkId::from_parameter(size), &size, |b, &size| {
let (mut creator, _members) = setup_group(size);
let payload = b"hello benchmark message";
b.iter(|| {
creator.send_message(payload).unwrap();
});
});
}
group.finish();
}
fn bench_receive_message(c: &mut Criterion) {
let mut group = c.benchmark_group("mls_receive_message");
for size in [2, 10, 50] {
group.bench_with_input(BenchmarkId::from_parameter(size), &size, |b, &size| {
// For receive, we need a fresh ciphertext each iteration since
// MLS message processing is destructive (epoch state changes).
// We pre-generate a batch and consume them.
let (mut creator, mut members) = setup_group(size);
if members.is_empty() {
return;
}
let payload = b"hello benchmark message";
b.iter_batched(
|| creator.send_message(payload).unwrap(),
|ct| {
// Receive on the first joiner
let _ = members[0].receive_message(&ct);
},
BatchSize::SmallInput,
);
});
}
group.finish();
}
criterion_group!(
benches,
bench_keygen,
bench_group_create,
bench_add_member,
bench_epoch_rotation,
bench_send_message,
bench_receive_message,
);
criterion_main!(benches);

View File

@@ -0,0 +1,171 @@
#![allow(clippy::unwrap_used)]
//! Benchmark: Cap'n Proto vs Protobuf serialization for chat message envelopes.
//!
//! Compares serialization/deserialization speed and encoded size at three
//! payload sizes (100 B, 1 KB, 4 KB) for a typical Envelope{seq, data} message.
use criterion::{black_box, criterion_group, criterion_main, BenchmarkId, Criterion};
// ── Cap'n Proto path ────────────────────────────────────────────────────────
fn capnp_serialize_envelope(seq: u64, data: &[u8]) -> Vec<u8> {
let mut msg = capnp::message::Builder::new_default();
{
let mut envelope = msg.init_root::<quicprochat_proto::node_capnp::envelope::Builder>();
envelope.set_seq(seq);
envelope.set_data(data);
}
quicprochat_proto::to_bytes(&msg).unwrap()
}
fn capnp_deserialize_envelope(bytes: &[u8]) -> (u64, Vec<u8>) {
let reader = quicprochat_proto::from_bytes(bytes).unwrap();
let envelope = reader
.get_root::<quicprochat_proto::node_capnp::envelope::Reader>()
.unwrap();
(envelope.get_seq(), envelope.get_data().unwrap().to_vec())
}
// ── Protobuf path (hand-coded prost encoding to avoid build-dep) ────────────
//
// Envelope { seq: uint64 (field 1), data: bytes (field 2) }
// Wire format: varint tag + varint seq + len-delimited data
fn protobuf_serialize_envelope(seq: u64, data: &[u8]) -> Vec<u8> {
// Build a prost message via raw encoding.
// Field 1: uint64 seq, wire type 0 (varint), tag = (1 << 3) | 0 = 0x08
// Field 2: bytes data, wire type 2 (length-delimited), tag = (2 << 3) | 2 = 0x12
let mut buf = Vec::with_capacity(10 + data.len());
// Encode field 1 (seq)
prost::encoding::uint64::encode(1, &seq, &mut buf);
// Encode field 2 (data)
prost::encoding::bytes::encode(2, &data.to_vec(), &mut buf);
buf
}
fn protobuf_deserialize_envelope(bytes: &[u8]) -> (u64, Vec<u8>) {
// Decode manually using prost wire format
let mut seq: u64 = 0;
let mut data: Vec<u8> = Vec::new();
let mut buf = bytes;
while !buf.is_empty() {
let (tag, wire_type) =
prost::encoding::decode_key(&mut buf).expect("decode key");
match tag {
1 => {
prost::encoding::uint64::merge(wire_type, &mut seq, &mut buf, Default::default())
.expect("decode seq");
}
2 => {
prost::encoding::bytes::merge(wire_type, &mut data, &mut buf, Default::default())
.expect("decode data");
}
_ => {
prost::encoding::skip_field(wire_type, tag, &mut buf, Default::default())
.expect("skip unknown field");
}
}
}
(seq, data)
}
// ── Benchmarks ──────────────────────────────────────────────────────────────
fn bench_serialize(c: &mut Criterion) {
let sizes: &[(&str, usize)] = &[("100B", 100), ("1KB", 1024), ("4KB", 4096)];
let mut group = c.benchmark_group("serialize_envelope");
for (label, size) in sizes {
let payload = vec![0xABu8; *size];
let seq = 42u64;
group.bench_with_input(
BenchmarkId::new("capnp", label),
&(&seq, &payload),
|b, &(seq, payload)| {
b.iter(|| capnp_serialize_envelope(black_box(*seq), black_box(payload)));
},
);
group.bench_with_input(
BenchmarkId::new("protobuf", label),
&(&seq, &payload),
|b, &(seq, payload)| {
b.iter(|| protobuf_serialize_envelope(black_box(*seq), black_box(payload)));
},
);
}
group.finish();
}
fn bench_deserialize(c: &mut Criterion) {
let sizes: &[(&str, usize)] = &[("100B", 100), ("1KB", 1024), ("4KB", 4096)];
let mut group = c.benchmark_group("deserialize_envelope");
for (label, size) in sizes {
let payload = vec![0xABu8; *size];
let seq = 42u64;
let capnp_bytes = capnp_serialize_envelope(seq, &payload);
let proto_bytes = protobuf_serialize_envelope(seq, &payload);
group.bench_with_input(
BenchmarkId::new("capnp", label),
&capnp_bytes,
|b, bytes| {
b.iter(|| capnp_deserialize_envelope(black_box(bytes)));
},
);
group.bench_with_input(
BenchmarkId::new("protobuf", label),
&proto_bytes,
|b, bytes| {
b.iter(|| protobuf_deserialize_envelope(black_box(bytes)));
},
);
}
group.finish();
}
fn bench_encoded_sizes(c: &mut Criterion) {
let sizes: &[(&str, usize)] = &[("100B", 100), ("1KB", 1024), ("4KB", 4096)];
let mut group = c.benchmark_group("encoded_size");
for (label, size) in sizes {
let payload = vec![0xABu8; *size];
let capnp_bytes = capnp_serialize_envelope(42, &payload);
let proto_bytes = protobuf_serialize_envelope(42, &payload);
// Use a trivial benchmark that just returns the size -- the point
// is to get criterion to print the iteration count and allow
// comparison. The real value is in the eprintln below.
group.bench_with_input(
BenchmarkId::new("capnp", label),
&capnp_bytes,
|b, bytes| {
b.iter(|| black_box(bytes.len()));
},
);
group.bench_with_input(
BenchmarkId::new("protobuf", label),
&proto_bytes,
|b, bytes| {
b.iter(|| black_box(bytes.len()));
},
);
eprintln!(
" {label}: capnp={} bytes, protobuf={} bytes, overhead={:+} bytes",
capnp_bytes.len(),
proto_bytes.len(),
capnp_bytes.len() as isize - proto_bytes.len() as isize,
);
}
group.finish();
}
criterion_group!(benches, bench_serialize, bench_deserialize, bench_encoded_sizes);
criterion_main!(benches);