chore: rename project quicnprotochat -> quicproquo (binaries: qpq)

Rename the entire workspace:
- Crate packages: quicnprotochat-{core,proto,server,client,gui,p2p,mobile} -> quicproquo-*
- Binary names: quicnprotochat -> qpq, quicnprotochat-server -> qpq-server,
  quicnprotochat-gui -> qpq-gui
- Default files: *-state.bin -> qpq-state.bin, *-server.toml -> qpq-server.toml,
  *.db -> qpq.db
- Environment variable prefix: QUICNPROTOCHAT_* -> QPQ_*
- App identifier: chat.quicnproto.gui -> chat.quicproquo.gui
- Proto package: quicnprotochat.bench -> quicproquo.bench
- All documentation, Docker, CI, and script references updated

HKDF domain-separation strings and P2P ALPN remain unchanged for
backward compatibility with existing encrypted state and wire protocol.

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
This commit is contained in:
2026-03-01 20:11:51 +01:00
parent 553de3a2b7
commit 853ca4fec0
152 changed files with 4070 additions and 788 deletions

View File

@@ -0,0 +1,152 @@
//! Benchmark: Hybrid KEM (X25519 + ML-KEM-768) vs classical-only encryption.
//!
//! Compares keypair generation, encryption, and decryption times for the
//! hybrid post-quantum scheme against classical X25519 + ChaCha20-Poly1305.
use criterion::{black_box, criterion_group, criterion_main, BenchmarkId, Criterion};
use quicproquo_core::{hybrid_encrypt, hybrid_decrypt, HybridKeypair};
// ── Classical baseline (X25519 + ChaCha20-Poly1305) ─────────────────────────
use chacha20poly1305::{
aead::{Aead, KeyInit},
ChaCha20Poly1305, Key, Nonce,
};
use hkdf::Hkdf;
use rand::{rngs::OsRng, RngCore};
use sha2::Sha256;
use x25519_dalek::{EphemeralSecret, PublicKey as X25519Public, StaticSecret};
struct ClassicalKeypair {
secret: StaticSecret,
public: X25519Public,
}
impl ClassicalKeypair {
fn generate() -> Self {
let secret = StaticSecret::random_from_rng(OsRng);
let public = X25519Public::from(&secret);
Self { secret, public }
}
}
fn classical_encrypt(recipient_pk: &X25519Public, plaintext: &[u8]) -> Vec<u8> {
let eph_secret = EphemeralSecret::random_from_rng(OsRng);
let eph_public = X25519Public::from(&eph_secret);
let shared = eph_secret.diffie_hellman(recipient_pk);
let hk = Hkdf::<Sha256>::new(None, shared.as_bytes());
let mut key_bytes = [0u8; 32];
hk.expand(b"classical-bench", &mut key_bytes).unwrap();
let mut nonce_bytes = [0u8; 12];
OsRng.fill_bytes(&mut nonce_bytes);
let cipher = ChaCha20Poly1305::new(Key::from_slice(&key_bytes));
let ct = cipher
.encrypt(Nonce::from_slice(&nonce_bytes), plaintext)
.unwrap();
// Wire: eph_pk(32) || nonce(12) || ciphertext
let mut out = Vec::with_capacity(32 + 12 + ct.len());
out.extend_from_slice(eph_public.as_bytes());
out.extend_from_slice(&nonce_bytes);
out.extend_from_slice(&ct);
out
}
fn classical_decrypt(keypair: &ClassicalKeypair, envelope: &[u8]) -> Vec<u8> {
let eph_pk = X25519Public::from(<[u8; 32]>::try_from(&envelope[..32]).unwrap());
let nonce_bytes: [u8; 12] = envelope[32..44].try_into().unwrap();
let ct = &envelope[44..];
let shared = keypair.secret.diffie_hellman(&eph_pk);
let hk = Hkdf::<Sha256>::new(None, shared.as_bytes());
let mut key_bytes = [0u8; 32];
hk.expand(b"classical-bench", &mut key_bytes).unwrap();
let cipher = ChaCha20Poly1305::new(Key::from_slice(&key_bytes));
cipher
.decrypt(Nonce::from_slice(&nonce_bytes), ct)
.unwrap()
}
// ── Benchmarks ──────────────────────────────────────────────────────────────
fn bench_keygen(c: &mut Criterion) {
let mut group = c.benchmark_group("kem_keygen");
group.bench_function("hybrid", |b| {
b.iter(|| black_box(HybridKeypair::generate()));
});
group.bench_function("classical", |b| {
b.iter(|| black_box(ClassicalKeypair::generate()));
});
group.finish();
}
fn bench_encrypt(c: &mut Criterion) {
let sizes: &[(&str, usize)] = &[("100B", 100), ("1KB", 1024), ("4KB", 4096), ("64KB", 65536)];
let mut group = c.benchmark_group("kem_encrypt");
let hybrid_kp = HybridKeypair::generate();
let hybrid_pk = hybrid_kp.public_key();
let classical_kp = ClassicalKeypair::generate();
for (label, size) in sizes {
let payload = vec![0xABu8; *size];
group.bench_with_input(
BenchmarkId::new("hybrid", label),
&payload,
|b, payload| {
b.iter(|| hybrid_encrypt(&hybrid_pk, black_box(payload), b"", b"").unwrap());
},
);
group.bench_with_input(
BenchmarkId::new("classical", label),
&payload,
|b, payload| {
b.iter(|| classical_encrypt(&classical_kp.public, black_box(payload)));
},
);
}
group.finish();
}
fn bench_decrypt(c: &mut Criterion) {
let sizes: &[(&str, usize)] = &[("100B", 100), ("1KB", 1024), ("4KB", 4096), ("64KB", 65536)];
let mut group = c.benchmark_group("kem_decrypt");
let hybrid_kp = HybridKeypair::generate();
let hybrid_pk = hybrid_kp.public_key();
let classical_kp = ClassicalKeypair::generate();
for (label, size) in sizes {
let payload = vec![0xABu8; *size];
let hybrid_ct = hybrid_encrypt(&hybrid_pk, &payload, b"", b"").unwrap();
let classical_ct = classical_encrypt(&classical_kp.public, &payload);
group.bench_with_input(
BenchmarkId::new("hybrid", label),
&hybrid_ct,
|b, ct| {
b.iter(|| hybrid_decrypt(&hybrid_kp, black_box(ct), b"", b"").unwrap());
},
);
group.bench_with_input(
BenchmarkId::new("classical", label),
&classical_ct,
|b, ct| {
b.iter(|| classical_decrypt(&classical_kp, black_box(ct)));
},
);
}
group.finish();
}
criterion_group!(benches, bench_keygen, bench_encrypt, bench_decrypt);
criterion_main!(benches);

View File

@@ -0,0 +1,132 @@
//! Benchmark: MLS group operations at various group sizes.
//!
//! Measures KeyPackage generation, group creation, member addition,
//! message encryption, and message decryption.
use std::sync::Arc;
use criterion::{criterion_group, criterion_main, BatchSize, BenchmarkId, Criterion};
use quicproquo_core::{GroupMember, IdentityKeypair};
/// Create identities and a group of the given size.
/// Returns (creator, Vec<members>).
fn setup_group(size: usize) -> (GroupMember, Vec<GroupMember>) {
let creator_id = Arc::new(IdentityKeypair::generate());
let mut creator = GroupMember::new(creator_id);
creator.create_group(b"bench-group").unwrap();
let mut members = Vec::with_capacity(size.saturating_sub(1));
for _ in 1..size {
let joiner_id = Arc::new(IdentityKeypair::generate());
let mut joiner = GroupMember::new(joiner_id);
let kp = joiner.generate_key_package().unwrap();
let (_commit, welcome) = creator.add_member(&kp).unwrap();
joiner.join_group(&welcome).unwrap();
members.push(joiner);
}
(creator, members)
}
fn bench_keygen(c: &mut Criterion) {
c.bench_function("mls_keygen", |b| {
b.iter_batched(
|| {
let id = Arc::new(IdentityKeypair::generate());
GroupMember::new(id)
},
|mut member| {
member.generate_key_package().unwrap();
},
BatchSize::SmallInput,
);
});
}
fn bench_group_create(c: &mut Criterion) {
c.bench_function("mls_group_create", |b| {
b.iter_batched(
|| {
let id = Arc::new(IdentityKeypair::generate());
GroupMember::new(id)
},
|mut member| {
member.create_group(b"bench-group").unwrap();
},
BatchSize::SmallInput,
);
});
}
fn bench_add_member(c: &mut Criterion) {
let mut group = c.benchmark_group("mls_add_member");
// Smaller sizes to keep setup time reasonable
for size in [2, 10, 50] {
group.bench_with_input(BenchmarkId::from_parameter(size), &size, |b, &size| {
b.iter_batched(
|| {
let (creator, members) = setup_group(size);
let joiner_id = Arc::new(IdentityKeypair::generate());
let mut joiner = GroupMember::new(joiner_id);
let kp = joiner.generate_key_package().unwrap();
(creator, members, joiner, kp)
},
|(mut creator, _members, _joiner, kp)| {
creator.add_member(&kp).unwrap();
},
BatchSize::SmallInput,
);
});
}
group.finish();
}
fn bench_send_message(c: &mut Criterion) {
let mut group = c.benchmark_group("mls_send_message");
for size in [2, 10, 50] {
group.bench_with_input(BenchmarkId::from_parameter(size), &size, |b, &size| {
let (mut creator, _members) = setup_group(size);
let payload = b"hello benchmark message";
b.iter(|| {
creator.send_message(payload).unwrap();
});
});
}
group.finish();
}
fn bench_receive_message(c: &mut Criterion) {
let mut group = c.benchmark_group("mls_receive_message");
for size in [2, 10, 50] {
group.bench_with_input(BenchmarkId::from_parameter(size), &size, |b, &size| {
// For receive, we need a fresh ciphertext each iteration since
// MLS message processing is destructive (epoch state changes).
// We pre-generate a batch and consume them.
let (mut creator, mut members) = setup_group(size);
if members.is_empty() {
return;
}
let payload = b"hello benchmark message";
b.iter_batched(
|| creator.send_message(payload).unwrap(),
|ct| {
// Receive on the first joiner
let _ = members[0].receive_message(&ct);
},
BatchSize::SmallInput,
);
});
}
group.finish();
}
criterion_group!(
benches,
bench_keygen,
bench_group_create,
bench_add_member,
bench_send_message,
bench_receive_message,
);
criterion_main!(benches);

View File

@@ -0,0 +1,170 @@
//! Benchmark: Cap'n Proto vs Protobuf serialization for chat message envelopes.
//!
//! Compares serialization/deserialization speed and encoded size at three
//! payload sizes (100 B, 1 KB, 4 KB) for a typical Envelope{seq, data} message.
use criterion::{black_box, criterion_group, criterion_main, BenchmarkId, Criterion};
// ── Cap'n Proto path ────────────────────────────────────────────────────────
fn capnp_serialize_envelope(seq: u64, data: &[u8]) -> Vec<u8> {
let mut msg = capnp::message::Builder::new_default();
{
let mut envelope = msg.init_root::<quicproquo_proto::node_capnp::envelope::Builder>();
envelope.set_seq(seq);
envelope.set_data(data);
}
quicproquo_proto::to_bytes(&msg).unwrap()
}
fn capnp_deserialize_envelope(bytes: &[u8]) -> (u64, Vec<u8>) {
let reader = quicproquo_proto::from_bytes(bytes).unwrap();
let envelope = reader
.get_root::<quicproquo_proto::node_capnp::envelope::Reader>()
.unwrap();
(envelope.get_seq(), envelope.get_data().unwrap().to_vec())
}
// ── Protobuf path (hand-coded prost encoding to avoid build-dep) ────────────
//
// Envelope { seq: uint64 (field 1), data: bytes (field 2) }
// Wire format: varint tag + varint seq + len-delimited data
fn protobuf_serialize_envelope(seq: u64, data: &[u8]) -> Vec<u8> {
// Build a prost message via raw encoding.
// Field 1: uint64 seq, wire type 0 (varint), tag = (1 << 3) | 0 = 0x08
// Field 2: bytes data, wire type 2 (length-delimited), tag = (2 << 3) | 2 = 0x12
let mut buf = Vec::with_capacity(10 + data.len());
// Encode field 1 (seq)
prost::encoding::uint64::encode(1, &seq, &mut buf);
// Encode field 2 (data)
prost::encoding::bytes::encode(2, &data.to_vec(), &mut buf);
buf
}
fn protobuf_deserialize_envelope(bytes: &[u8]) -> (u64, Vec<u8>) {
// Decode manually using prost wire format
let mut seq: u64 = 0;
let mut data: Vec<u8> = Vec::new();
let mut buf = bytes;
while !buf.is_empty() {
let (tag, wire_type) =
prost::encoding::decode_key(&mut buf).expect("decode key");
match tag {
1 => {
prost::encoding::uint64::merge(wire_type, &mut seq, &mut buf, Default::default())
.expect("decode seq");
}
2 => {
prost::encoding::bytes::merge(wire_type, &mut data, &mut buf, Default::default())
.expect("decode data");
}
_ => {
prost::encoding::skip_field(wire_type, tag, &mut buf, Default::default())
.expect("skip unknown field");
}
}
}
(seq, data)
}
// ── Benchmarks ──────────────────────────────────────────────────────────────
fn bench_serialize(c: &mut Criterion) {
let sizes: &[(&str, usize)] = &[("100B", 100), ("1KB", 1024), ("4KB", 4096)];
let mut group = c.benchmark_group("serialize_envelope");
for (label, size) in sizes {
let payload = vec![0xABu8; *size];
let seq = 42u64;
group.bench_with_input(
BenchmarkId::new("capnp", label),
&(&seq, &payload),
|b, &(seq, payload)| {
b.iter(|| capnp_serialize_envelope(black_box(*seq), black_box(payload)));
},
);
group.bench_with_input(
BenchmarkId::new("protobuf", label),
&(&seq, &payload),
|b, &(seq, payload)| {
b.iter(|| protobuf_serialize_envelope(black_box(*seq), black_box(payload)));
},
);
}
group.finish();
}
fn bench_deserialize(c: &mut Criterion) {
let sizes: &[(&str, usize)] = &[("100B", 100), ("1KB", 1024), ("4KB", 4096)];
let mut group = c.benchmark_group("deserialize_envelope");
for (label, size) in sizes {
let payload = vec![0xABu8; *size];
let seq = 42u64;
let capnp_bytes = capnp_serialize_envelope(seq, &payload);
let proto_bytes = protobuf_serialize_envelope(seq, &payload);
group.bench_with_input(
BenchmarkId::new("capnp", label),
&capnp_bytes,
|b, bytes| {
b.iter(|| capnp_deserialize_envelope(black_box(bytes)));
},
);
group.bench_with_input(
BenchmarkId::new("protobuf", label),
&proto_bytes,
|b, bytes| {
b.iter(|| protobuf_deserialize_envelope(black_box(bytes)));
},
);
}
group.finish();
}
fn bench_encoded_sizes(c: &mut Criterion) {
let sizes: &[(&str, usize)] = &[("100B", 100), ("1KB", 1024), ("4KB", 4096)];
let mut group = c.benchmark_group("encoded_size");
for (label, size) in sizes {
let payload = vec![0xABu8; *size];
let capnp_bytes = capnp_serialize_envelope(42, &payload);
let proto_bytes = protobuf_serialize_envelope(42, &payload);
// Use a trivial benchmark that just returns the size -- the point
// is to get criterion to print the iteration count and allow
// comparison. The real value is in the eprintln below.
group.bench_with_input(
BenchmarkId::new("capnp", label),
&capnp_bytes,
|b, bytes| {
b.iter(|| black_box(bytes.len()));
},
);
group.bench_with_input(
BenchmarkId::new("protobuf", label),
&proto_bytes,
|b, bytes| {
b.iter(|| black_box(bytes.len()));
},
);
eprintln!(
" {label}: capnp={} bytes, protobuf={} bytes, overhead={:+} bytes",
capnp_bytes.len(),
proto_bytes.len(),
capnp_bytes.len() as isize - proto_bytes.len() as isize,
);
}
group.finish();
}
criterion_group!(benches, bench_serialize, bench_deserialize, bench_encoded_sizes);
criterion_main!(benches);