test: add unit tests for RPC framing, SDK state machine, and server domain services

Add comprehensive tests across three layers:
- RPC framing: empty payloads, max boundary, truncated frames, multi-frame buffers,
  all status codes, all method ID ranges, payload-too-large for response/push
- SDK: event broadcast send/receive, multiple subscribers, clone preservation,
  conversation upsert, missing conversation, message ID roundtrip, member keys
- Server domain: auth session validation/expiry, channel creation/symmetry/validation,
  delivery peek/ack/sequence ordering/fetch-limited, key package upload/fetch/validation,
  hybrid key batch fetch, size boundary tests
- CI: MSRV (1.75) check job, macOS cross-platform build check
This commit is contained in:
2026-03-08 18:07:43 +01:00
parent e4c5868b31
commit 872695e5f1
8 changed files with 1114 additions and 0 deletions

View File

@@ -113,6 +113,57 @@ jobs:
name: coverage-report name: coverage-report
path: coverage/cobertura.xml path: coverage/cobertura.xml
msrv:
name: MSRV Check
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- name: Install MSRV Rust (1.75)
uses: dtolnay/rust-action@1.75
with:
components: clippy
- name: Install capnp
run: sudo apt-get update && sudo apt-get install -y capnproto
- name: Cache cargo
uses: actions/cache@v4
with:
path: |
~/.cargo/registry
~/.cargo/git
target
key: ${{ runner.os }}-msrv-${{ hashFiles('**/Cargo.lock') }}
restore-keys: |
${{ runner.os }}-msrv-
- name: Check MSRV
run: cargo check --workspace
macos:
name: macOS Build Check
runs-on: macos-latest
steps:
- uses: actions/checkout@v4
- name: Install Rust
uses: dtolnay/rust-action@stable
- name: Cache cargo
uses: actions/cache@v4
with:
path: |
~/.cargo/registry
~/.cargo/git
target
key: ${{ runner.os }}-cargo-${{ hashFiles('**/Cargo.lock') }}
restore-keys: |
${{ runner.os }}-cargo-
- name: Check build
run: cargo check --workspace
docker: docker:
name: Docker Build name: Docker Build
runs-on: ubuntu-latest runs-on: ubuntu-latest

View File

@@ -277,4 +277,241 @@ mod tests {
let decoded = RequestFrame::decode(&mut buf).expect("decode").expect("complete"); let decoded = RequestFrame::decode(&mut buf).expect("decode").expect("complete");
assert!(decoded.payload.is_empty()); assert!(decoded.payload.is_empty());
} }
// ── Additional framing tests ────────────────────────────────────────────
#[test]
fn empty_payload_response() {
let frame = ResponseFrame {
status: RpcStatus::NotFound as u8,
request_id: 999,
payload: Bytes::new(),
};
let encoded = frame.encode();
assert_eq!(encoded.len(), RESPONSE_HEADER_SIZE);
let mut buf = BytesMut::from(encoded.as_ref());
let decoded = ResponseFrame::decode(&mut buf).expect("decode").expect("complete");
assert!(decoded.payload.is_empty());
assert_eq!(decoded.status, RpcStatus::NotFound as u8);
assert_eq!(decoded.request_id, 999);
}
#[test]
fn empty_payload_push() {
let frame = PushFrame {
event_type: 0,
payload: Bytes::new(),
};
let encoded = frame.encode();
assert_eq!(encoded.len(), PUSH_HEADER_SIZE);
let mut buf = BytesMut::from(encoded.as_ref());
let decoded = PushFrame::decode(&mut buf).expect("decode").expect("complete");
assert!(decoded.payload.is_empty());
assert_eq!(decoded.event_type, 0);
}
#[test]
fn max_boundary_payload_request() {
// Exactly MAX_PAYLOAD_SIZE should succeed (not exceed limit).
let payload = vec![0xABu8; MAX_PAYLOAD_SIZE];
let frame = RequestFrame {
method_id: 1,
request_id: 1,
payload: Bytes::from(payload.clone()),
};
let encoded = frame.encode();
assert_eq!(encoded.len(), REQUEST_HEADER_SIZE + MAX_PAYLOAD_SIZE);
let mut buf = BytesMut::from(encoded.as_ref());
let decoded = RequestFrame::decode(&mut buf).expect("decode").expect("complete");
assert_eq!(decoded.payload.len(), MAX_PAYLOAD_SIZE);
assert_eq!(decoded.payload[0], 0xAB);
}
#[test]
fn response_payload_too_large_rejected() {
let mut buf = BytesMut::new();
buf.put_u8(0); // status OK
buf.put_u32(1); // request_id
buf.put_u32((MAX_PAYLOAD_SIZE + 1) as u32);
let result = ResponseFrame::decode(&mut buf);
assert!(matches!(result, Err(RpcError::PayloadTooLarge { .. })));
}
#[test]
fn push_payload_too_large_rejected() {
let mut buf = BytesMut::new();
buf.put_u16(1); // event_type
buf.put_u32((MAX_PAYLOAD_SIZE + 1) as u32);
let result = PushFrame::decode(&mut buf);
assert!(matches!(result, Err(RpcError::PayloadTooLarge { .. })));
}
#[test]
fn incomplete_response_returns_none() {
// Less than RESPONSE_HEADER_SIZE bytes
let mut buf = BytesMut::from(&[0u8; 4][..]);
assert!(ResponseFrame::decode(&mut buf).expect("no error").is_none());
}
#[test]
fn incomplete_push_returns_none() {
// Less than PUSH_HEADER_SIZE bytes
let mut buf = BytesMut::from(&[0u8; 3][..]);
assert!(PushFrame::decode(&mut buf).expect("no error").is_none());
}
#[test]
fn request_header_present_but_payload_incomplete() {
// Full header but payload not yet received
let frame = RequestFrame {
method_id: 10,
request_id: 20,
payload: Bytes::from_static(b"abcdefgh"),
};
let encoded = frame.encode();
// Truncate to header + 3 bytes of payload (need 8)
let mut buf = BytesMut::from(&encoded[..REQUEST_HEADER_SIZE + 3]);
assert!(RequestFrame::decode(&mut buf).expect("no error").is_none());
// Buffer should be untouched (not consumed)
assert_eq!(buf.len(), REQUEST_HEADER_SIZE + 3);
}
#[test]
fn response_header_present_but_payload_incomplete() {
let frame = ResponseFrame {
status: 0,
request_id: 1,
payload: Bytes::from_static(b"abcdefgh"),
};
let encoded = frame.encode();
let mut buf = BytesMut::from(&encoded[..RESPONSE_HEADER_SIZE + 2]);
assert!(ResponseFrame::decode(&mut buf).expect("no error").is_none());
}
#[test]
fn push_header_present_but_payload_incomplete() {
let frame = PushFrame {
event_type: 1,
payload: Bytes::from_static(b"abcdefgh"),
};
let encoded = frame.encode();
let mut buf = BytesMut::from(&encoded[..PUSH_HEADER_SIZE + 2]);
assert!(PushFrame::decode(&mut buf).expect("no error").is_none());
}
#[test]
fn request_zero_length_prefix() {
// Zero-length payload in the header is valid (empty payload)
let mut buf = BytesMut::new();
buf.put_u16(5); // method_id
buf.put_u32(10); // request_id
buf.put_u32(0); // payload_len = 0
let decoded = RequestFrame::decode(&mut buf).expect("decode").expect("complete");
assert_eq!(decoded.method_id, 5);
assert_eq!(decoded.request_id, 10);
assert!(decoded.payload.is_empty());
}
#[test]
fn response_rpc_status_conversion() {
let frame = ResponseFrame {
status: RpcStatus::Unauthorized as u8,
request_id: 1,
payload: Bytes::new(),
};
assert_eq!(frame.rpc_status(), Some(RpcStatus::Unauthorized));
let unknown = ResponseFrame {
status: 255,
request_id: 1,
payload: Bytes::new(),
};
assert_eq!(unknown.rpc_status(), None);
}
#[test]
fn all_method_ids_roundtrip() {
// Test a selection of method IDs spanning the full u16 range
let method_ids: &[u16] = &[0, 1, 100, 200, 300, 400, 500, 1000, u16::MAX];
for &mid in method_ids {
let frame = RequestFrame {
method_id: mid,
request_id: mid as u32,
payload: Bytes::from_static(b"x"),
};
let encoded = frame.encode();
let mut buf = BytesMut::from(encoded.as_ref());
let decoded = RequestFrame::decode(&mut buf).unwrap().unwrap();
assert_eq!(decoded.method_id, mid);
assert_eq!(decoded.request_id, mid as u32);
}
}
#[test]
fn all_rpc_status_values_roundtrip() {
let statuses = [
RpcStatus::Ok,
RpcStatus::BadRequest,
RpcStatus::Unauthorized,
RpcStatus::Forbidden,
RpcStatus::NotFound,
RpcStatus::RateLimited,
RpcStatus::DeadlineExceeded,
RpcStatus::Unavailable,
RpcStatus::Internal,
RpcStatus::UnknownMethod,
];
for status in statuses {
let frame = ResponseFrame {
status: status as u8,
request_id: 1,
payload: Bytes::new(),
};
let encoded = frame.encode();
let mut buf = BytesMut::from(encoded.as_ref());
let decoded = ResponseFrame::decode(&mut buf).unwrap().unwrap();
assert_eq!(decoded.rpc_status(), Some(status));
}
}
#[test]
fn request_max_request_id() {
let frame = RequestFrame {
method_id: 1,
request_id: u32::MAX,
payload: Bytes::from_static(b"max-id"),
};
let encoded = frame.encode();
let mut buf = BytesMut::from(encoded.as_ref());
let decoded = RequestFrame::decode(&mut buf).unwrap().unwrap();
assert_eq!(decoded.request_id, u32::MAX);
}
#[test]
fn multiple_frames_in_buffer() {
// Two request frames concatenated in one buffer
let f1 = RequestFrame {
method_id: 1,
request_id: 10,
payload: Bytes::from_static(b"first"),
};
let f2 = RequestFrame {
method_id: 2,
request_id: 20,
payload: Bytes::from_static(b"second"),
};
let mut buf = BytesMut::new();
buf.extend_from_slice(&f1.encode());
buf.extend_from_slice(&f2.encode());
let d1 = RequestFrame::decode(&mut buf).unwrap().unwrap();
assert_eq!(d1.method_id, 1);
assert_eq!(d1.payload, Bytes::from_static(b"first"));
let d2 = RequestFrame::decode(&mut buf).unwrap().unwrap();
assert_eq!(d2.method_id, 2);
assert_eq!(d2.payload, Bytes::from_static(b"second"));
assert!(buf.is_empty());
}
} }

View File

@@ -1038,4 +1038,83 @@ mod tests {
// Returns 0 for unknown conversations. // Returns 0 for unknown conversations.
assert_eq!(store.get_last_seen_seq(&missing).unwrap(), 0); assert_eq!(store.get_last_seen_seq(&missing).unwrap(), 0);
} }
#[test]
fn upsert_conversation_updates_fields() {
let (_dir, store) = open_test_store();
let mut conv = make_group_conv("updatable", 1000);
store.save_conversation(&conv).unwrap();
// Update display name and activity
conv.display_name = "#updated".to_string();
conv.last_activity_ms = 9000;
conv.unread_count = 5;
conv.is_hybrid = true;
store.save_conversation(&conv).unwrap();
let loaded = store.load_conversation(&conv.id).unwrap().unwrap();
assert_eq!(loaded.display_name, "#updated");
assert_eq!(loaded.last_activity_ms, 9000);
assert_eq!(loaded.unread_count, 5);
assert!(loaded.is_hybrid);
}
#[test]
fn load_missing_conversation_returns_none() {
let (_dir, store) = open_test_store();
let missing = ConversationId([0xFFu8; 16]);
assert!(store.load_conversation(&missing).unwrap().is_none());
}
#[test]
fn conversation_id_hex_encoding() {
let id = ConversationId([0xAB; 16]);
assert_eq!(id.hex(), "abababababababababababababababab");
assert_eq!(id.hex().len(), 32);
}
#[test]
fn save_message_with_message_id_and_ref() {
let (_dir, store) = open_test_store();
let conv = make_group_conv("msg-ids", 1000);
store.save_conversation(&conv).unwrap();
let msg_id = [42u8; 16];
let ref_id = [99u8; 16];
store.save_message(&StoredMessage {
conversation_id: conv.id.clone(),
message_id: Some(msg_id),
sender_key: vec![1, 2, 3],
sender_name: Some("alice".to_string()),
body: "reply".to_string(),
msg_type: "chat".to_string(),
ref_msg_id: Some(ref_id),
timestamp_ms: 5000,
is_outgoing: true,
}).unwrap();
let msgs = store.load_recent_messages(&conv.id, 10).unwrap();
assert_eq!(msgs.len(), 1);
assert_eq!(msgs[0].message_id, Some(msg_id));
assert_eq!(msgs[0].ref_msg_id, Some(ref_id));
assert!(msgs[0].is_outgoing);
}
#[test]
fn member_keys_serialization_roundtrip() {
let (_dir, store) = open_test_store();
let mut conv = make_group_conv("member-keys", 1000);
conv.member_keys = vec![
vec![1u8; 32],
vec![2u8; 32],
vec![3u8; 32],
];
store.save_conversation(&conv).unwrap();
let loaded = store.load_conversation(&conv.id).unwrap().unwrap();
assert_eq!(loaded.member_keys.len(), 3);
assert_eq!(loaded.member_keys[0], vec![1u8; 32]);
assert_eq!(loaded.member_keys[1], vec![2u8; 32]);
assert_eq!(loaded.member_keys[2], vec![3u8; 32]);
}
} }

View File

@@ -85,3 +85,145 @@ pub enum ClientEvent {
/// An error occurred in the background. /// An error occurred in the background.
Error { message: String }, Error { message: String },
} }
#[cfg(test)]
mod tests {
use super::*;
use tokio::sync::broadcast;
#[test]
fn event_broadcast_send_receive() {
let (tx, mut rx) = broadcast::channel::<ClientEvent>(16);
tx.send(ClientEvent::Connected).unwrap();
tx.send(ClientEvent::Disconnected {
reason: "test".into(),
})
.unwrap();
let e1 = rx.try_recv().unwrap();
assert!(matches!(e1, ClientEvent::Connected));
let e2 = rx.try_recv().unwrap();
assert!(matches!(e2, ClientEvent::Disconnected { reason } if reason == "test"));
}
#[test]
fn event_broadcast_multiple_subscribers() {
let (tx, mut rx1) = broadcast::channel::<ClientEvent>(16);
let mut rx2 = tx.subscribe();
tx.send(ClientEvent::Registered {
username: "alice".into(),
})
.unwrap();
let e1 = rx1.try_recv().unwrap();
let e2 = rx2.try_recv().unwrap();
assert!(matches!(e1, ClientEvent::Registered { username } if username == "alice"));
assert!(matches!(e2, ClientEvent::Registered { username } if username == "alice"));
}
#[test]
fn event_no_subscribers_does_not_panic() {
let (tx, _) = broadcast::channel::<ClientEvent>(16);
// Send with no active receiver — should return Err but not panic.
let result = tx.send(ClientEvent::Connected);
assert!(result.is_err()); // no receivers
}
#[test]
fn event_clone_preserves_data() {
let event = ClientEvent::MessageReceived {
conversation_id: [1u8; 16],
sender_key: vec![2u8; 32],
sender_name: Some("bob".into()),
body: "hello world".into(),
timestamp_ms: 12345,
};
let cloned = event.clone();
match cloned {
ClientEvent::MessageReceived {
conversation_id,
sender_key,
sender_name,
body,
timestamp_ms,
} => {
assert_eq!(conversation_id, [1u8; 16]);
assert_eq!(sender_key, vec![2u8; 32]);
assert_eq!(sender_name, Some("bob".to_string()));
assert_eq!(body, "hello world");
assert_eq!(timestamp_ms, 12345);
}
_ => panic!("wrong variant after clone"),
}
}
#[test]
fn event_debug_format() {
let event = ClientEvent::Error {
message: "something went wrong".into(),
};
let dbg = format!("{event:?}");
assert!(dbg.contains("something went wrong"));
}
#[test]
fn all_event_variants_are_clone() {
// Verify all variants can be cloned without issue.
let events: Vec<ClientEvent> = vec![
ClientEvent::Connected,
ClientEvent::Disconnected { reason: "r".into() },
ClientEvent::Reconnecting { attempt: 1 },
ClientEvent::Registered { username: "u".into() },
ClientEvent::LoggedIn { username: "u".into() },
ClientEvent::LoggedOut { username: "u".into() },
ClientEvent::Authenticated { username: "u".into() },
ClientEvent::MessageReceived {
conversation_id: [0; 16],
sender_key: vec![],
sender_name: None,
body: "b".into(),
timestamp_ms: 0,
},
ClientEvent::MessageSent {
conversation_id: [0; 16],
seq: 0,
},
ClientEvent::ConversationCreated {
conversation_id: [0; 16],
display_name: "d".into(),
},
ClientEvent::MemberAdded {
conversation_id: [0; 16],
member_key: vec![],
},
ClientEvent::MemberRemoved {
conversation_id: [0; 16],
member_key: vec![],
},
ClientEvent::PushEvent {
event_type: 0,
payload: vec![],
},
ClientEvent::MessageQueued {
outbox_id: 0,
conversation_id: [0; 16],
},
ClientEvent::OutboxFlushed { sent: 0, failed: 0 },
ClientEvent::MessageGap {
conversation_id: [0; 16],
expected_seq: 0,
received_seq: 1,
},
ClientEvent::Error { message: "e".into() },
];
for event in &events {
let _ = event.clone();
}
assert_eq!(events.len(), 17);
}
}

View File

@@ -70,3 +70,97 @@ impl AuthService {
Ok(RegisterFinishResp { success: true }) Ok(RegisterFinishResp { success: true })
} }
} }
#[cfg(test)]
#[allow(clippy::unwrap_used)]
mod tests {
use super::*;
fn test_service() -> AuthService {
let dir = tempfile::tempdir().unwrap();
let store = Arc::new(crate::storage::FileBackedStore::open(dir.path()).unwrap());
let mut rng = rand::rngs::OsRng;
let opaque_setup = ServerSetup::<OpaqueSuite>::new(&mut rng);
AuthService {
store,
opaque_setup: Arc::new(opaque_setup),
pending_logins: Arc::new(DashMap::new()),
sessions: Arc::new(DashMap::new()),
auth_cfg: Arc::new(AuthConfig {
required_token: None,
allow_insecure_identity_from_request: false,
}),
}
}
#[test]
fn validate_session_valid_token() {
let svc = test_service();
let token = vec![1u8; 16];
let ik = vec![2u8; 32];
svc.sessions.insert(
token.clone(),
SessionInfo {
username: "alice".to_string(),
identity_key: ik.clone(),
created_at: crate::auth::current_timestamp(),
expires_at: crate::auth::current_timestamp() + 3600,
},
);
let auth = svc.validate_session(&token).unwrap();
assert_eq!(auth.identity_key, ik);
assert_eq!(auth.token, token);
assert!(auth.device_id.is_none());
}
#[test]
fn validate_session_expired_token() {
let svc = test_service();
let token = vec![3u8; 16];
svc.sessions.insert(
token.clone(),
SessionInfo {
username: "bob".to_string(),
identity_key: vec![4u8; 32],
created_at: 0,
expires_at: 0, // already expired
},
);
assert!(svc.validate_session(&token).is_none());
// Expired session should be removed from the map
assert!(!svc.sessions.contains_key(&token));
}
#[test]
fn validate_session_missing_token() {
let svc = test_service();
assert!(svc.validate_session(&[0u8; 16]).is_none());
}
#[test]
fn validate_session_removes_expired_on_check() {
let svc = test_service();
let token = vec![5u8; 16];
svc.sessions.insert(
token.clone(),
SessionInfo {
username: "eve".to_string(),
identity_key: vec![6u8; 32],
created_at: 0,
expires_at: 1, // expired long ago
},
);
// First check: returns None and removes
assert!(svc.validate_session(&token).is_none());
// Session should be gone
assert!(svc.sessions.is_empty());
}
}

View File

@@ -36,3 +36,96 @@ impl ChannelService {
}) })
} }
} }
#[cfg(test)]
#[allow(clippy::unwrap_used)]
mod tests {
use super::*;
use crate::storage::FileBackedStore;
fn test_service() -> (tempfile::TempDir, ChannelService) {
let dir = tempfile::tempdir().unwrap();
let store = Arc::new(FileBackedStore::open(dir.path()).unwrap());
let svc = ChannelService { store };
(dir, svc)
}
#[test]
fn create_channel_success() {
let (_dir, svc) = test_service();
let caller = vec![1u8; 32];
let peer = vec![2u8; 32];
let resp = svc
.create_channel(CreateChannelReq { peer_key: peer.clone() }, &caller)
.unwrap();
assert!(resp.was_new);
assert_eq!(resp.channel_id.len(), 16);
}
#[test]
fn create_channel_idempotent() {
let (_dir, svc) = test_service();
let caller = vec![1u8; 32];
let peer = vec![2u8; 32];
let resp1 = svc
.create_channel(CreateChannelReq { peer_key: peer.clone() }, &caller)
.unwrap();
assert!(resp1.was_new);
let resp2 = svc
.create_channel(CreateChannelReq { peer_key: peer.clone() }, &caller)
.unwrap();
assert!(!resp2.was_new);
assert_eq!(resp1.channel_id, resp2.channel_id);
}
#[test]
fn create_channel_symmetric() {
let (_dir, svc) = test_service();
let a = vec![1u8; 32];
let b = vec![2u8; 32];
let resp_ab = svc
.create_channel(CreateChannelReq { peer_key: b.clone() }, &a)
.unwrap();
let resp_ba = svc
.create_channel(CreateChannelReq { peer_key: a.clone() }, &b)
.unwrap();
// Same channel regardless of who initiates
assert_eq!(resp_ab.channel_id, resp_ba.channel_id);
}
#[test]
fn create_channel_rejects_invalid_peer_key_length() {
let (_dir, svc) = test_service();
let caller = vec![1u8; 32];
let err = svc
.create_channel(CreateChannelReq { peer_key: vec![1u8; 31] }, &caller)
.unwrap_err();
assert!(matches!(err, DomainError::InvalidIdentityKey(31)));
let err = svc
.create_channel(CreateChannelReq { peer_key: vec![1u8; 33] }, &caller)
.unwrap_err();
assert!(matches!(err, DomainError::InvalidIdentityKey(33)));
let err = svc
.create_channel(CreateChannelReq { peer_key: vec![] }, &caller)
.unwrap_err();
assert!(matches!(err, DomainError::InvalidIdentityKey(0)));
}
#[test]
fn create_channel_rejects_self_channel() {
let (_dir, svc) = test_service();
let me = vec![5u8; 32];
let err = svc
.create_channel(CreateChannelReq { peer_key: me.clone() }, &me)
.unwrap_err();
assert!(matches!(err, DomainError::BadParams(_)));
}
}

View File

@@ -353,4 +353,184 @@ mod tests {
let bare = device_recipient_key(&ik, &[]); let bare = device_recipient_key(&ik, &[]);
assert_eq!(bare, ik); assert_eq!(bare, ik);
} }
#[test]
fn peek_does_not_drain() {
let (_dir, svc) = test_service();
let ik = vec![10u8; 32];
let ch = vec![0u8; 16];
svc.enqueue(EnqueueReq {
recipient_key: ik.clone(),
payload: b"peek-me".to_vec(),
channel_id: ch.clone(),
ttl_secs: 0,
})
.unwrap();
// Peek should return the message without removing it.
let peeked = svc
.peek(PeekReq {
recipient_key: ik.clone(),
channel_id: ch.clone(),
limit: 10,
})
.unwrap();
assert_eq!(peeked.payloads.len(), 1);
assert_eq!(peeked.payloads[0].data, b"peek-me");
// Peek again — still there.
let peeked2 = svc
.peek(PeekReq {
recipient_key: ik.clone(),
channel_id: ch.clone(),
limit: 10,
})
.unwrap();
assert_eq!(peeked2.payloads.len(), 1);
// Fetch drains it.
let fetched = svc
.fetch(FetchReq {
recipient_key: ik.clone(),
channel_id: ch.clone(),
limit: 10,
})
.unwrap();
assert_eq!(fetched.payloads.len(), 1);
// Now peek returns empty.
let peeked3 = svc
.peek(PeekReq {
recipient_key: ik,
channel_id: ch,
limit: 10,
})
.unwrap();
assert!(peeked3.payloads.is_empty());
}
#[test]
fn ack_removes_messages_up_to_seq() {
let (_dir, svc) = test_service();
let ik = vec![11u8; 32];
let ch = vec![0u8; 16];
// Enqueue 3 messages (use peek to verify without draining).
for i in 0..3 {
svc.enqueue(EnqueueReq {
recipient_key: ik.clone(),
payload: format!("msg-{i}").into_bytes(),
channel_id: ch.clone(),
ttl_secs: 0,
})
.unwrap();
}
let all = svc
.peek(PeekReq {
recipient_key: ik.clone(),
channel_id: ch.clone(),
limit: 10,
})
.unwrap();
assert_eq!(all.payloads.len(), 3);
// Ack up to seq of the second message.
let ack_seq = all.payloads[1].seq;
svc.ack(AckReq {
recipient_key: ik.clone(),
channel_id: ch.clone(),
seq_up_to: ack_seq,
})
.unwrap();
// Only the third message should remain.
let remaining = svc
.peek(PeekReq {
recipient_key: ik,
channel_id: ch,
limit: 10,
})
.unwrap();
assert_eq!(remaining.payloads.len(), 1);
assert_eq!(remaining.payloads[0].data, b"msg-2");
}
#[test]
fn fetch_empty_queue() {
let (_dir, svc) = test_service();
let ik = vec![20u8; 32];
let ch = vec![0u8; 16];
let resp = svc
.fetch(FetchReq {
recipient_key: ik,
channel_id: ch,
limit: 0,
})
.unwrap();
assert!(resp.payloads.is_empty());
}
#[test]
fn enqueue_sequence_numbers_increase() {
let (_dir, svc) = test_service();
let ik = vec![30u8; 32];
let ch = vec![0u8; 16];
let r1 = svc
.enqueue(EnqueueReq {
recipient_key: ik.clone(),
payload: b"a".to_vec(),
channel_id: ch.clone(),
ttl_secs: 0,
})
.unwrap();
let r2 = svc
.enqueue(EnqueueReq {
recipient_key: ik.clone(),
payload: b"b".to_vec(),
channel_id: ch.clone(),
ttl_secs: 0,
})
.unwrap();
let r3 = svc
.enqueue(EnqueueReq {
recipient_key: ik,
payload: b"c".to_vec(),
channel_id: ch,
ttl_secs: 0,
})
.unwrap();
assert!(r2.seq > r1.seq);
assert!(r3.seq > r2.seq);
}
#[test]
fn fetch_limited_respects_limit() {
let (_dir, svc) = test_service();
let ik = vec![40u8; 32];
let ch = vec![0u8; 16];
for i in 0..5 {
svc.enqueue(EnqueueReq {
recipient_key: ik.clone(),
payload: format!("msg-{i}").into_bytes(),
channel_id: ch.clone(),
ttl_secs: 0,
})
.unwrap();
}
let resp = svc
.fetch(FetchReq {
recipient_key: ik,
channel_id: ch,
limit: 2,
})
.unwrap();
assert_eq!(resp.payloads.len(), 2);
}
} }

View File

@@ -95,3 +95,241 @@ impl KeyService {
Ok(FetchHybridKeysResp { keys }) Ok(FetchHybridKeysResp { keys })
} }
} }
#[cfg(test)]
#[allow(clippy::unwrap_used)]
mod tests {
use super::*;
use crate::storage::FileBackedStore;
fn test_service() -> (tempfile::TempDir, KeyService) {
let dir = tempfile::tempdir().unwrap();
let store = Arc::new(FileBackedStore::open(dir.path()).unwrap());
let svc = KeyService { store };
(dir, svc)
}
fn test_auth() -> CallerAuth {
CallerAuth {
identity_key: vec![1u8; 32],
token: vec![0u8; 16],
device_id: None,
}
}
#[test]
fn upload_and_fetch_key_package() {
let (_dir, svc) = test_service();
let auth = test_auth();
let ik = vec![1u8; 32];
let package = vec![42u8; 128];
let resp = svc
.upload_key_package(
UploadKeyPackageReq {
identity_key: ik.clone(),
package: package.clone(),
},
&auth,
)
.unwrap();
// Fingerprint is SHA-256 of the package
assert_eq!(resp.fingerprint.len(), 32);
let fetched = svc
.fetch_key_package(FetchKeyPackageReq { identity_key: ik }, &auth)
.unwrap();
assert_eq!(fetched.package, package);
}
#[test]
fn fetch_key_package_missing() {
let (_dir, svc) = test_service();
let auth = test_auth();
let resp = svc
.fetch_key_package(
FetchKeyPackageReq { identity_key: vec![99u8; 32] },
&auth,
)
.unwrap();
assert!(resp.package.is_empty());
}
#[test]
fn upload_key_package_rejects_invalid_identity_key() {
let (_dir, svc) = test_service();
let auth = test_auth();
let err = svc
.upload_key_package(
UploadKeyPackageReq {
identity_key: vec![1u8; 31],
package: vec![1u8; 10],
},
&auth,
)
.unwrap_err();
assert!(matches!(err, DomainError::InvalidIdentityKey(31)));
}
#[test]
fn upload_key_package_rejects_empty_package() {
let (_dir, svc) = test_service();
let auth = test_auth();
let err = svc
.upload_key_package(
UploadKeyPackageReq {
identity_key: vec![1u8; 32],
package: vec![],
},
&auth,
)
.unwrap_err();
assert!(matches!(err, DomainError::EmptyPackage));
}
#[test]
fn upload_key_package_rejects_oversized() {
let (_dir, svc) = test_service();
let auth = test_auth();
let err = svc
.upload_key_package(
UploadKeyPackageReq {
identity_key: vec![1u8; 32],
package: vec![0u8; MAX_KEYPACKAGE_BYTES + 1],
},
&auth,
)
.unwrap_err();
assert!(matches!(err, DomainError::PackageTooLarge(_)));
}
#[test]
fn upload_and_fetch_hybrid_key() {
let (_dir, svc) = test_service();
let auth = test_auth();
let ik = vec![2u8; 32];
let hk = vec![0xABu8; 1184]; // ML-KEM-768 public key size
svc.upload_hybrid_key(
UploadHybridKeyReq {
identity_key: ik.clone(),
hybrid_public_key: hk.clone(),
},
&auth,
)
.unwrap();
let resp = svc
.fetch_hybrid_key(FetchHybridKeyReq { identity_key: ik }, &auth)
.unwrap();
assert_eq!(resp.hybrid_public_key, hk);
}
#[test]
fn fetch_hybrid_key_missing() {
let (_dir, svc) = test_service();
let auth = test_auth();
let resp = svc
.fetch_hybrid_key(
FetchHybridKeyReq { identity_key: vec![99u8; 32] },
&auth,
)
.unwrap();
assert!(resp.hybrid_public_key.is_empty());
}
#[test]
fn upload_hybrid_key_rejects_invalid_identity() {
let (_dir, svc) = test_service();
let auth = test_auth();
let err = svc
.upload_hybrid_key(
UploadHybridKeyReq {
identity_key: vec![1u8; 10],
hybrid_public_key: vec![1u8; 100],
},
&auth,
)
.unwrap_err();
assert!(matches!(err, DomainError::InvalidIdentityKey(10)));
}
#[test]
fn upload_hybrid_key_rejects_empty() {
let (_dir, svc) = test_service();
let auth = test_auth();
let err = svc
.upload_hybrid_key(
UploadHybridKeyReq {
identity_key: vec![1u8; 32],
hybrid_public_key: vec![],
},
&auth,
)
.unwrap_err();
assert!(matches!(err, DomainError::EmptyHybridKey));
}
#[test]
fn fetch_hybrid_keys_batch() {
let (_dir, svc) = test_service();
let auth = test_auth();
let ik1 = vec![1u8; 32];
let ik2 = vec![2u8; 32];
let ik3 = vec![3u8; 32]; // no hybrid key uploaded
svc.upload_hybrid_key(
UploadHybridKeyReq {
identity_key: ik1.clone(),
hybrid_public_key: vec![0xAAu8; 64],
},
&auth,
)
.unwrap();
svc.upload_hybrid_key(
UploadHybridKeyReq {
identity_key: ik2.clone(),
hybrid_public_key: vec![0xBBu8; 64],
},
&auth,
)
.unwrap();
let resp = svc
.fetch_hybrid_keys(
FetchHybridKeysReq {
identity_keys: vec![ik1, ik2, ik3],
},
&auth,
)
.unwrap();
assert_eq!(resp.keys.len(), 3);
assert_eq!(resp.keys[0], vec![0xAAu8; 64]);
assert_eq!(resp.keys[1], vec![0xBBu8; 64]);
assert!(resp.keys[2].is_empty()); // missing key returns empty
}
#[test]
fn upload_key_package_at_max_size() {
let (_dir, svc) = test_service();
let auth = test_auth();
// Exactly at max should succeed
let resp = svc
.upload_key_package(
UploadKeyPackageReq {
identity_key: vec![1u8; 32],
package: vec![0u8; MAX_KEYPACKAGE_BYTES],
},
&auth,
);
assert!(resp.is_ok());
}
}