feat: Sprint 1 — production hardening, TLS lifecycle, CI coverage, lint cleanup

- Fix 3 client panics: replace .unwrap()/.expect() with proper error
  handling in rpc.rs (AUTH_CONTEXT lock), repl.rs (pending_member),
  and retry.rs (last_err)
- Add --danger-accept-invalid-certs flag with InsecureServerCertVerifier
  for development TLS bypass, plus mdBook TLS documentation
- Add CI coverage job (cargo-tarpaulin) and Docker build validation
  to GitHub Actions workflow, plus README CI badge
- Add [workspace.lints] config, fix 46 clippy warnings across 8 crates,
  zero warnings on all buildable crates
- Update Dockerfile for all 11 workspace members
This commit is contained in:
2026-03-03 23:19:11 +01:00
parent dc4e4e49a0
commit 612b06aa8e
33 changed files with 388 additions and 67 deletions

View File

@@ -64,5 +64,8 @@ metrics-exporter-prometheus = "0.15"
# mDNS service announcement for local mesh / Freifunk node discovery.
mdns-sd = "0.12"
[lints]
workspace = true
[dev-dependencies]
tempfile = "3"

View File

@@ -178,7 +178,7 @@ pub fn validate_auth_context(
Err(crate::error_codes::coded_error(E003_INVALID_TOKEN, "invalid accessToken"))
}
pub fn require_identity<'a>(auth_ctx: &'a AuthContext) -> Result<&'a [u8], capnp::Error> {
pub fn require_identity(auth_ctx: &AuthContext) -> Result<&[u8], capnp::Error> {
match auth_ctx.identity_key.as_deref() {
Some(ik) => Ok(ik),
None => Err(crate::error_codes::coded_error(

View File

@@ -121,7 +121,7 @@ pub fn merge_config(args: &crate::Args, file: &FileConfig) -> EffectiveConfig {
args.data_dir.clone()
};
let tls_cert = if args.tls_cert == PathBuf::from(DEFAULT_TLS_CERT) {
let tls_cert = if args.tls_cert == Path::new(DEFAULT_TLS_CERT) {
file.tls_cert
.clone()
.unwrap_or_else(|| PathBuf::from(DEFAULT_TLS_CERT))
@@ -129,7 +129,7 @@ pub fn merge_config(args: &crate::Args, file: &FileConfig) -> EffectiveConfig {
args.tls_cert.clone()
};
let tls_key = if args.tls_key == PathBuf::from(DEFAULT_TLS_KEY) {
let tls_key = if args.tls_key == Path::new(DEFAULT_TLS_KEY) {
file.tls_key
.clone()
.unwrap_or_else(|| PathBuf::from(DEFAULT_TLS_KEY))
@@ -159,7 +159,7 @@ pub fn merge_config(args: &crate::Args, file: &FileConfig) -> EffectiveConfig {
args.store_backend.clone()
};
let db_path = if args.db_path == PathBuf::from(DEFAULT_DB_PATH) {
let db_path = if args.db_path == Path::new(DEFAULT_DB_PATH) {
file.db_path
.clone()
.unwrap_or_else(|| PathBuf::from(DEFAULT_DB_PATH))

View File

@@ -22,6 +22,7 @@ mod federation;
pub mod hooks;
mod metrics;
mod node_service;
#[allow(unsafe_code)] // FFI: C-ABI plugin interaction requires unsafe blocks
mod plugin_loader;
mod sql_store;
mod tls;
@@ -213,7 +214,7 @@ async fn main() -> anyhow::Result<()> {
}
Arc::new(SqlStore::open(&effective.db_path, &effective.db_key)?)
}
"file" | _ => {
_ => {
tracing::info!(dir = %effective.data_dir, "opening file-backed store");
Arc::new(FileBackedStore::open(&effective.data_dir)?)
}

View File

@@ -53,7 +53,7 @@ impl NodeServiceImpl {
));
}
let (channel_id, was_new) = match self.store.create_channel(&identity, &peer_key) {
let (channel_id, was_new) = match self.store.create_channel(identity, &peer_key) {
Ok(pair) => pair,
Err(e) => return Promise::err(storage_err(e)),
};

View File

@@ -12,7 +12,7 @@ fn storage_err(err: StorageError) -> capnp::Error {
coded_error(E009_STORAGE_ERROR, err)
}
const MAX_KEYPACKAGE_BYTES: usize = 1 * 1024 * 1024; // 1 MB cap per KeyPackage
const MAX_KEYPACKAGE_BYTES: usize = 1024 * 1024; // 1 MB cap per KeyPackage
impl NodeServiceImpl {
pub fn handle_upload_key_package(

View File

@@ -221,6 +221,7 @@ pub struct NodeServiceImpl {
}
impl NodeServiceImpl {
#[allow(clippy::too_many_arguments)]
pub fn new(
store: Arc<dyn Store>,
waiters: Arc<DashMap<Vec<u8>, Arc<Notify>>>,
@@ -254,6 +255,7 @@ impl NodeServiceImpl {
}
}
#[allow(clippy::too_many_arguments)]
pub async fn handle_node_connection(
connecting: quinn::Connecting,
store: Arc<dyn Store>,

View File

@@ -147,6 +147,7 @@ pub trait Store: Send + Sync {
fn create_channel(&self, member_a: &[u8], member_b: &[u8]) -> Result<(Vec<u8>, bool), StorageError>;
/// Get the two members of a channel by channel_id (16 bytes). Returns (member_a, member_b) in sorted order.
#[allow(clippy::type_complexity)]
fn get_channel_members(&self, channel_id: &[u8]) -> Result<Option<(Vec<u8>, Vec<u8>)>, StorageError>;
// ── Federation ──────────────────────────────────────────────────────────
@@ -232,6 +233,7 @@ pub struct FileBackedStore {
channels_path: PathBuf,
key_packages: Mutex<HashMap<Vec<u8>, VecDeque<Vec<u8>>>>,
deliveries: Mutex<QueueMapV3>,
#[allow(clippy::type_complexity)]
channels: Mutex<HashMap<Vec<u8>, (Vec<u8>, Vec<u8>)>>,
hybrid_keys: Mutex<HashMap<Vec<u8>, Vec<u8>>>,
users: Mutex<HashMap<String, Vec<u8>>>,
@@ -282,6 +284,7 @@ impl FileBackedStore {
})
}
#[allow(clippy::type_complexity)]
fn load_channels(
path: &Path,
) -> Result<HashMap<Vec<u8>, (Vec<u8>, Vec<u8>)>, StorageError> {
@@ -435,13 +438,13 @@ impl Store for FileBackedStore {
map.entry(identity_key.to_vec())
.or_default()
.push_back(package);
self.flush_kp_map(&self.kp_path, &*map)
self.flush_kp_map(&self.kp_path, &map)
}
fn fetch_key_package(&self, identity_key: &[u8]) -> Result<Option<Vec<u8>>, StorageError> {
let mut map = lock(&self.key_packages)?;
let package = map.get_mut(identity_key).and_then(|q| q.pop_front());
self.flush_kp_map(&self.kp_path, &*map)?;
self.flush_kp_map(&self.kp_path, &map)?;
Ok(package)
}
@@ -460,7 +463,7 @@ impl Store for FileBackedStore {
let seq = *entry;
*entry = seq + 1;
inner.map.entry(key).or_default().push_back(SeqEntry { seq, data: payload });
self.flush_delivery_map(&self.ds_path, &*inner)?;
self.flush_delivery_map(&self.ds_path, &inner)?;
Ok(seq)
}
@@ -479,7 +482,7 @@ impl Store for FileBackedStore {
.get_mut(&key)
.map(|q| q.drain(..).map(|e| (e.seq, e.data)).collect())
.unwrap_or_default();
self.flush_delivery_map(&self.ds_path, &*inner)?;
self.flush_delivery_map(&self.ds_path, &inner)?;
Ok(messages)
}
@@ -502,7 +505,7 @@ impl Store for FileBackedStore {
q.drain(..count).map(|e| (e.seq, e.data)).collect()
})
.unwrap_or_default();
self.flush_delivery_map(&self.ds_path, &*inner)?;
self.flush_delivery_map(&self.ds_path, &inner)?;
Ok(messages)
}
@@ -527,7 +530,7 @@ impl Store for FileBackedStore {
) -> Result<(), StorageError> {
let mut map = lock(&self.hybrid_keys)?;
map.insert(identity_key.to_vec(), hybrid_pk);
self.flush_hybrid_keys(&self.hk_path, &*map)
self.flush_hybrid_keys(&self.hk_path, &map)
}
fn fetch_hybrid_key(&self, identity_key: &[u8]) -> Result<Option<Vec<u8>>, StorageError> {
@@ -615,7 +618,7 @@ impl Store for FileBackedStore {
v.insert(record);
}
}
self.flush_users(&self.users_path, &*map)
self.flush_users(&self.users_path, &map)
}
fn get_user_record(&self, username: &str) -> Result<Option<Vec<u8>>, StorageError> {
@@ -635,7 +638,7 @@ impl Store for FileBackedStore {
) -> Result<(), StorageError> {
let mut map = lock(&self.identity_keys)?;
map.insert(username.to_string(), identity_key);
self.flush_map_string_bytes(&self.identity_keys_path, &*map)
self.flush_map_string_bytes(&self.identity_keys_path, &map)
}
fn get_user_identity_key(&self, username: &str) -> Result<Option<Vec<u8>>, StorageError> {
@@ -697,7 +700,7 @@ impl Store for FileBackedStore {
} else {
0
};
self.flush_delivery_map(&self.ds_path, &*inner)?;
self.flush_delivery_map(&self.ds_path, &inner)?;
Ok(removed)
}
@@ -730,7 +733,7 @@ impl Store for FileBackedStore {
rand::thread_rng().fill_bytes(&mut channel_id);
let channel_id = channel_id.to_vec();
map.insert(channel_id.clone(), (a, b));
self.flush_channels(&self.channels_path, &*map)?;
self.flush_channels(&self.channels_path, &map)?;
Ok((channel_id, true))
}