feat: Sprint 5 — encrypted file transfer with chunked upload/download
- Add uploadBlob (@21) and downloadBlob (@22) RPCs to Cap'n Proto schema with SHA-256 content addressing and chunked transfer - Server blob handler: 256KB chunks, SHA-256 verification on finalize, .meta JSON sidecar, 50MB size limit, content-addressable storage - Add FileRef (0x08) AppMessage variant with blob_id, filename, file_size, mime_type - /send-file command: read file, compute hash, upload in chunks with progress display, send FileRef via MLS, MIME auto-detection - /download command: fetch blob in chunks with progress, verify hash, save to disk with collision avoidance - 2 new E2E tests: upload/download round-trip with partial reads, hash mismatch rejection (14 E2E tests total) - New error codes: E024-E027 for blob operations
This commit is contained in:
@@ -52,6 +52,7 @@ anyhow = { workspace = true }
|
||||
thiserror = { workspace = true }
|
||||
bincode = { workspace = true }
|
||||
serde = { workspace = true }
|
||||
serde_json = { workspace = true }
|
||||
|
||||
# CLI
|
||||
clap = { workspace = true }
|
||||
|
||||
@@ -26,6 +26,10 @@ pub const E020_BAD_PARAMS: &str = "E020";
|
||||
pub const E021_CIPHERSUITE_NOT_ALLOWED: &str = "E021";
|
||||
pub const E022_CHANNEL_ACCESS_DENIED: &str = "E022";
|
||||
pub const E023_CHANNEL_NOT_FOUND: &str = "E023";
|
||||
pub const E024_BLOB_TOO_LARGE: &str = "E024";
|
||||
pub const E025_BLOB_HASH_LENGTH: &str = "E025";
|
||||
pub const E026_BLOB_HASH_MISMATCH: &str = "E026";
|
||||
pub const E027_BLOB_NOT_FOUND: &str = "E027";
|
||||
|
||||
/// Build a `capnp::Error::failed()` with the structured code prefix.
|
||||
pub fn coded_error(code: &str, msg: impl std::fmt::Display) -> capnp::Error {
|
||||
|
||||
@@ -220,6 +220,10 @@ async fn main() -> anyhow::Result<()> {
|
||||
}
|
||||
};
|
||||
|
||||
// Ensure blobs directory exists for file transfer support.
|
||||
std::fs::create_dir_all(PathBuf::from(&effective.data_dir).join("blobs"))
|
||||
.context("create blobs directory")?;
|
||||
|
||||
let auth_cfg = Arc::new(AuthConfig::new(
|
||||
effective.auth_token.clone(),
|
||||
effective.allow_insecure_auth,
|
||||
@@ -594,6 +598,7 @@ async fn main() -> anyhow::Result<()> {
|
||||
let sk = Arc::clone(&signing_key);
|
||||
let conn_hooks = Arc::clone(&hooks);
|
||||
let conn_kt_log = Arc::clone(&kt_log);
|
||||
let conn_data_dir = PathBuf::from(&effective.data_dir);
|
||||
|
||||
tokio::task::spawn_local(async move {
|
||||
if let Err(e) = handle_node_connection(
|
||||
@@ -611,6 +616,7 @@ async fn main() -> anyhow::Result<()> {
|
||||
sk,
|
||||
conn_hooks,
|
||||
conn_kt_log,
|
||||
conn_data_dir,
|
||||
)
|
||||
.await
|
||||
{
|
||||
|
||||
325
crates/quicproquo-server/src/node_service/blob_ops.rs
Normal file
325
crates/quicproquo-server/src/node_service/blob_ops.rs
Normal file
@@ -0,0 +1,325 @@
|
||||
//! uploadBlob / downloadBlob RPCs: chunked file transfer with SHA-256 integrity verification.
|
||||
|
||||
use std::io::{Read, Seek, SeekFrom, Write};
|
||||
use std::path::PathBuf;
|
||||
|
||||
use capnp::capability::Promise;
|
||||
use quicproquo_proto::node_capnp::node_service;
|
||||
use sha2::{Digest, Sha256};
|
||||
|
||||
use crate::auth::{coded_error, fmt_hex, validate_auth_context};
|
||||
use crate::error_codes::*;
|
||||
|
||||
use super::NodeServiceImpl;
|
||||
|
||||
/// Maximum blob size: 50 MB.
|
||||
const MAX_BLOB_SIZE: u64 = 50 * 1024 * 1024;
|
||||
|
||||
/// Maximum download chunk size: 256 KB.
|
||||
const MAX_DOWNLOAD_CHUNK: u32 = 256 * 1024;
|
||||
|
||||
/// Metadata stored alongside each completed blob.
|
||||
#[derive(serde::Serialize, serde::Deserialize)]
|
||||
struct BlobMeta {
|
||||
mime_type: String,
|
||||
total_size: u64,
|
||||
uploaded_at: u64,
|
||||
uploader_key_prefix: String,
|
||||
}
|
||||
|
||||
/// Resolve the blobs directory from the server's data_dir.
|
||||
fn blobs_dir(data_dir: &std::path::Path) -> PathBuf {
|
||||
data_dir.join("blobs")
|
||||
}
|
||||
|
||||
impl NodeServiceImpl {
|
||||
pub fn handle_upload_blob(
|
||||
&mut self,
|
||||
params: node_service::UploadBlobParams,
|
||||
mut results: node_service::UploadBlobResults,
|
||||
) -> Promise<(), capnp::Error> {
|
||||
let p = match params.get() {
|
||||
Ok(p) => p,
|
||||
Err(e) => return Promise::err(coded_error(E020_BAD_PARAMS, e)),
|
||||
};
|
||||
|
||||
let auth_ctx = match validate_auth_context(&self.auth_cfg, &self.sessions, p.get_auth()) {
|
||||
Ok(ctx) => ctx,
|
||||
Err(e) => return Promise::err(e),
|
||||
};
|
||||
|
||||
let blob_hash = match p.get_blob_hash() {
|
||||
Ok(v) => v.to_vec(),
|
||||
Err(e) => return Promise::err(coded_error(E020_BAD_PARAMS, e)),
|
||||
};
|
||||
let chunk = match p.get_chunk() {
|
||||
Ok(v) => v.to_vec(),
|
||||
Err(e) => return Promise::err(coded_error(E020_BAD_PARAMS, e)),
|
||||
};
|
||||
let offset = p.get_offset();
|
||||
let total_size = p.get_total_size();
|
||||
let mime_type = match p.get_mime_type() {
|
||||
Ok(v) => match v.to_str() {
|
||||
Ok(s) => s.to_string(),
|
||||
Err(e) => return Promise::err(coded_error(E020_BAD_PARAMS, e)),
|
||||
},
|
||||
Err(e) => return Promise::err(coded_error(E020_BAD_PARAMS, e)),
|
||||
};
|
||||
|
||||
// Validate blobHash length.
|
||||
if blob_hash.len() != 32 {
|
||||
return Promise::err(coded_error(
|
||||
E025_BLOB_HASH_LENGTH,
|
||||
format!("blobHash must be exactly 32 bytes, got {}", blob_hash.len()),
|
||||
));
|
||||
}
|
||||
|
||||
// Validate totalSize.
|
||||
if total_size > MAX_BLOB_SIZE {
|
||||
return Promise::err(coded_error(
|
||||
E024_BLOB_TOO_LARGE,
|
||||
format!("totalSize {} exceeds max blob size ({} bytes)", total_size, MAX_BLOB_SIZE),
|
||||
));
|
||||
}
|
||||
if total_size == 0 {
|
||||
return Promise::err(coded_error(E020_BAD_PARAMS, "totalSize must be > 0"));
|
||||
}
|
||||
|
||||
// Validate chunk bounds.
|
||||
if offset.checked_add(chunk.len() as u64).map_or(true, |end| end > total_size) {
|
||||
return Promise::err(coded_error(
|
||||
E020_BAD_PARAMS,
|
||||
format!(
|
||||
"chunk out of bounds: offset={} + chunk_len={} > totalSize={}",
|
||||
offset,
|
||||
chunk.len(),
|
||||
total_size
|
||||
),
|
||||
));
|
||||
}
|
||||
|
||||
let blob_hex = hex::encode(&blob_hash);
|
||||
let dir = blobs_dir(&self.data_dir);
|
||||
|
||||
// Ensure blobs directory exists.
|
||||
if let Err(e) = std::fs::create_dir_all(&dir) {
|
||||
return Promise::err(coded_error(
|
||||
E009_STORAGE_ERROR,
|
||||
format!("failed to create blobs directory: {e}"),
|
||||
));
|
||||
}
|
||||
|
||||
let part_path = dir.join(format!("{blob_hex}.part"));
|
||||
let final_path = dir.join(&blob_hex);
|
||||
let meta_path = dir.join(format!("{blob_hex}.meta"));
|
||||
|
||||
// If the blob already exists (fully uploaded), return immediately.
|
||||
if final_path.exists() {
|
||||
results.get().set_blob_id(&blob_hash);
|
||||
return Promise::ok(());
|
||||
}
|
||||
|
||||
// Write chunk at the given offset.
|
||||
let write_result = (|| -> Result<(), String> {
|
||||
let mut file = std::fs::OpenOptions::new()
|
||||
.create(true)
|
||||
.write(true)
|
||||
.truncate(false)
|
||||
.open(&part_path)
|
||||
.map_err(|e| format!("open .part file: {e}"))?;
|
||||
file.seek(SeekFrom::Start(offset))
|
||||
.map_err(|e| format!("seek: {e}"))?;
|
||||
file.write_all(&chunk)
|
||||
.map_err(|e| format!("write chunk: {e}"))?;
|
||||
file.sync_all()
|
||||
.map_err(|e| format!("sync: {e}"))?;
|
||||
Ok(())
|
||||
})();
|
||||
|
||||
if let Err(e) = write_result {
|
||||
return Promise::err(coded_error(E009_STORAGE_ERROR, e));
|
||||
}
|
||||
|
||||
// Check if the blob is complete.
|
||||
let end = offset + chunk.len() as u64;
|
||||
if end == total_size {
|
||||
// Verify SHA-256 of the complete file.
|
||||
let verify_result = (|| -> Result<bool, String> {
|
||||
let mut file = std::fs::File::open(&part_path)
|
||||
.map_err(|e| format!("open for verify: {e}"))?;
|
||||
let mut hasher = Sha256::new();
|
||||
let mut buf = [0u8; 64 * 1024];
|
||||
loop {
|
||||
let n = file.read(&mut buf).map_err(|e| format!("read: {e}"))?;
|
||||
if n == 0 {
|
||||
break;
|
||||
}
|
||||
hasher.update(&buf[..n]);
|
||||
}
|
||||
let computed: [u8; 32] = hasher.finalize().into();
|
||||
Ok(computed == blob_hash.as_slice())
|
||||
})();
|
||||
|
||||
match verify_result {
|
||||
Ok(true) => {
|
||||
// Hash matches — finalize the blob.
|
||||
if let Err(e) = std::fs::rename(&part_path, &final_path) {
|
||||
return Promise::err(coded_error(
|
||||
E009_STORAGE_ERROR,
|
||||
format!("rename .part to final: {e}"),
|
||||
));
|
||||
}
|
||||
|
||||
// Write metadata file.
|
||||
let uploader_prefix = auth_ctx
|
||||
.identity_key
|
||||
.as_deref()
|
||||
.filter(|k| k.len() >= 4)
|
||||
.map(|k| hex::encode(&k[..4]))
|
||||
.unwrap_or_default();
|
||||
|
||||
let now = std::time::SystemTime::now()
|
||||
.duration_since(std::time::UNIX_EPOCH)
|
||||
.unwrap_or_default()
|
||||
.as_secs();
|
||||
|
||||
let meta = BlobMeta {
|
||||
mime_type: mime_type.clone(),
|
||||
total_size,
|
||||
uploaded_at: now,
|
||||
uploader_key_prefix: uploader_prefix.clone(),
|
||||
};
|
||||
|
||||
if let Err(e) = (|| -> Result<(), String> {
|
||||
let json = serde_json::to_string_pretty(&meta)
|
||||
.map_err(|e| format!("serialize meta: {e}"))?;
|
||||
std::fs::write(&meta_path, json.as_bytes())
|
||||
.map_err(|e| format!("write meta: {e}"))?;
|
||||
Ok(())
|
||||
})() {
|
||||
// Non-fatal: the blob is already stored; log and continue.
|
||||
tracing::warn!(error = %e, "failed to write blob metadata");
|
||||
}
|
||||
|
||||
tracing::info!(
|
||||
blob_hash_prefix = %fmt_hex(&blob_hash[..4]),
|
||||
total_size = total_size,
|
||||
mime_type = %mime_type,
|
||||
uploader_prefix = %uploader_prefix,
|
||||
"audit: blob_upload_complete"
|
||||
);
|
||||
}
|
||||
Ok(false) => {
|
||||
// Hash mismatch — delete the .part file.
|
||||
let _ = std::fs::remove_file(&part_path);
|
||||
return Promise::err(coded_error(
|
||||
E026_BLOB_HASH_MISMATCH,
|
||||
"SHA-256 of uploaded data does not match blobHash",
|
||||
));
|
||||
}
|
||||
Err(e) => {
|
||||
let _ = std::fs::remove_file(&part_path);
|
||||
return Promise::err(coded_error(E009_STORAGE_ERROR, e));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
results.get().set_blob_id(&blob_hash);
|
||||
Promise::ok(())
|
||||
}
|
||||
|
||||
pub fn handle_download_blob(
|
||||
&mut self,
|
||||
params: node_service::DownloadBlobParams,
|
||||
mut results: node_service::DownloadBlobResults,
|
||||
) -> Promise<(), capnp::Error> {
|
||||
let p = match params.get() {
|
||||
Ok(p) => p,
|
||||
Err(e) => return Promise::err(coded_error(E020_BAD_PARAMS, e)),
|
||||
};
|
||||
|
||||
if let Err(e) = validate_auth_context(&self.auth_cfg, &self.sessions, p.get_auth()) {
|
||||
return Promise::err(e);
|
||||
}
|
||||
|
||||
let blob_id = match p.get_blob_id() {
|
||||
Ok(v) => v.to_vec(),
|
||||
Err(e) => return Promise::err(coded_error(E020_BAD_PARAMS, e)),
|
||||
};
|
||||
let offset = p.get_offset();
|
||||
let length = p.get_length().min(MAX_DOWNLOAD_CHUNK);
|
||||
|
||||
if blob_id.len() != 32 {
|
||||
return Promise::err(coded_error(
|
||||
E025_BLOB_HASH_LENGTH,
|
||||
format!("blobId must be exactly 32 bytes, got {}", blob_id.len()),
|
||||
));
|
||||
}
|
||||
|
||||
let blob_hex = hex::encode(&blob_id);
|
||||
let dir = blobs_dir(&self.data_dir);
|
||||
let blob_path = dir.join(&blob_hex);
|
||||
let meta_path = dir.join(format!("{blob_hex}.meta"));
|
||||
|
||||
// Check that the blob exists.
|
||||
if !blob_path.exists() {
|
||||
return Promise::err(coded_error(E027_BLOB_NOT_FOUND, "blob not found"));
|
||||
}
|
||||
|
||||
// Read metadata.
|
||||
let meta: BlobMeta = match std::fs::read_to_string(&meta_path) {
|
||||
Ok(json) => match serde_json::from_str(&json) {
|
||||
Ok(m) => m,
|
||||
Err(e) => {
|
||||
return Promise::err(coded_error(
|
||||
E009_STORAGE_ERROR,
|
||||
format!("corrupt blob metadata: {e}"),
|
||||
));
|
||||
}
|
||||
},
|
||||
Err(e) => {
|
||||
return Promise::err(coded_error(
|
||||
E009_STORAGE_ERROR,
|
||||
format!("read blob metadata: {e}"),
|
||||
));
|
||||
}
|
||||
};
|
||||
|
||||
// Read the requested chunk.
|
||||
let read_result = (|| -> Result<Vec<u8>, String> {
|
||||
let mut file = std::fs::File::open(&blob_path)
|
||||
.map_err(|e| format!("open blob: {e}"))?;
|
||||
let file_len = file
|
||||
.metadata()
|
||||
.map_err(|e| format!("file metadata: {e}"))?
|
||||
.len();
|
||||
|
||||
if offset >= file_len {
|
||||
return Ok(vec![]);
|
||||
}
|
||||
|
||||
file.seek(SeekFrom::Start(offset))
|
||||
.map_err(|e| format!("seek: {e}"))?;
|
||||
let remaining = (file_len - offset) as usize;
|
||||
let to_read = remaining.min(length as usize);
|
||||
let mut buf = vec![0u8; to_read];
|
||||
file.read_exact(&mut buf)
|
||||
.map_err(|e| format!("read chunk: {e}"))?;
|
||||
Ok(buf)
|
||||
})();
|
||||
|
||||
match read_result {
|
||||
Ok(chunk) => {
|
||||
let mut r = results.get();
|
||||
r.set_chunk(&chunk);
|
||||
r.set_total_size(meta.total_size);
|
||||
r.set_mime_type(&meta.mime_type);
|
||||
}
|
||||
Err(e) => {
|
||||
return Promise::err(coded_error(E009_STORAGE_ERROR, e));
|
||||
}
|
||||
}
|
||||
|
||||
Promise::ok(())
|
||||
}
|
||||
}
|
||||
@@ -1,3 +1,4 @@
|
||||
use std::path::PathBuf;
|
||||
use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
|
||||
@@ -20,6 +21,7 @@ use crate::storage::Store;
|
||||
const CAPNP_TRAVERSAL_LIMIT_WORDS: usize = 4 * 1024 * 1024;
|
||||
|
||||
mod auth_ops;
|
||||
mod blob_ops;
|
||||
mod channel_ops;
|
||||
mod delivery;
|
||||
mod key_ops;
|
||||
@@ -194,6 +196,22 @@ impl node_service::Server for NodeServiceImpl {
|
||||
) -> capnp::capability::Promise<(), capnp::Error> {
|
||||
self.handle_resolve_identity(params, results)
|
||||
}
|
||||
|
||||
fn upload_blob(
|
||||
&mut self,
|
||||
params: node_service::UploadBlobParams,
|
||||
results: node_service::UploadBlobResults,
|
||||
) -> capnp::capability::Promise<(), capnp::Error> {
|
||||
self.handle_upload_blob(params, results)
|
||||
}
|
||||
|
||||
fn download_blob(
|
||||
&mut self,
|
||||
params: node_service::DownloadBlobParams,
|
||||
results: node_service::DownloadBlobResults,
|
||||
) -> capnp::capability::Promise<(), capnp::Error> {
|
||||
self.handle_download_blob(params, results)
|
||||
}
|
||||
}
|
||||
|
||||
pub const CURRENT_WIRE_VERSION: u16 = 1;
|
||||
@@ -218,6 +236,8 @@ pub struct NodeServiceImpl {
|
||||
pub signing_key: Arc<quicproquo_core::IdentityKeypair>,
|
||||
/// Key Transparency Merkle log (shared across connections).
|
||||
pub kt_log: Arc<std::sync::Mutex<MerkleLog>>,
|
||||
/// Server data directory (used for blob storage).
|
||||
pub data_dir: PathBuf,
|
||||
}
|
||||
|
||||
impl NodeServiceImpl {
|
||||
@@ -236,6 +256,7 @@ impl NodeServiceImpl {
|
||||
signing_key: Arc<quicproquo_core::IdentityKeypair>,
|
||||
hooks: Arc<dyn crate::hooks::ServerHooks>,
|
||||
kt_log: Arc<std::sync::Mutex<MerkleLog>>,
|
||||
data_dir: PathBuf,
|
||||
) -> Self {
|
||||
Self {
|
||||
store,
|
||||
@@ -251,6 +272,7 @@ impl NodeServiceImpl {
|
||||
hooks,
|
||||
signing_key,
|
||||
kt_log,
|
||||
data_dir,
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -271,6 +293,7 @@ pub async fn handle_node_connection(
|
||||
signing_key: Arc<quicproquo_core::IdentityKeypair>,
|
||||
hooks: Arc<dyn crate::hooks::ServerHooks>,
|
||||
kt_log: Arc<std::sync::Mutex<MerkleLog>>,
|
||||
data_dir: PathBuf,
|
||||
) -> Result<(), anyhow::Error> {
|
||||
let connection = connecting.await?;
|
||||
|
||||
@@ -305,6 +328,7 @@ pub async fn handle_node_connection(
|
||||
signing_key,
|
||||
hooks,
|
||||
kt_log,
|
||||
data_dir,
|
||||
));
|
||||
|
||||
RpcSystem::new(Box::new(network), Some(service.client))
|
||||
|
||||
Reference in New Issue
Block a user