Files
quicproquo/crates/quicproquo-client/src/client/conversation.rs
Chris Nennemann c8398d6cb7 feat: DM epoch fix, federation relay, and mDNS mesh discovery
- schema: createChannel returns wasNew :Bool to elect the MLS initiator
  unambiguously; prevents duplicate group creation on concurrent /dm calls
- core: group helpers for epoch tracking and key-package lifecycle
- server: federation subsystem — mTLS QUIC server-to-server relay with
  Cap'n Proto RPC; enqueue/batchEnqueue relay unknown recipients to their
  home domain via FederationClient
- server: mDNS _quicproquo._udp.local. service announcement on startup
- server: storage + sql_store — identity_exists, peek/ack, federation
  home-server lookup helpers
- client: /mesh peers REPL command (mDNS discovery, feature = "mesh")
- client: MeshDiscovery — background mDNS browse with ServiceDaemon
- client: was_new=false path in cmd_dm waits for peer Welcome instead of
  creating a duplicate initiator group
- p2p: fix ALPN from quicnprotochat/p2p/1 → quicproquo/p2p/1
- workspace: re-include quicproquo-p2p in members
2026-03-03 14:41:56 +01:00

694 lines
26 KiB
Rust

//! Multi-conversation state backed by SQLite (SQLCipher-encrypted when a
//! password is provided).
//!
//! Each conversation (DM or group) has its own MLS group blob, keystore blob,
//! member list, and message history.
use std::path::{Path, PathBuf};
use std::time::{SystemTime, UNIX_EPOCH};
use anyhow::Context;
use argon2::{Algorithm, Argon2, Params, Version};
use rand::RngCore;
use rusqlite::{params, Connection, OptionalExtension};
use zeroize::Zeroizing;
// ── Types ────────────────────────────────────────────────────────────────────
/// 16-byte conversation identifier.
/// - DMs: the channel_id returned by `createChannel` (server-assigned UUID).
/// - Groups: SHA-256(group_name)[..16].
#[derive(Clone, Debug, PartialEq, Eq, Hash)]
pub struct ConversationId(pub [u8; 16]);
impl ConversationId {
pub fn from_slice(s: &[u8]) -> Option<Self> {
if s.len() == 16 {
let mut buf = [0u8; 16];
buf.copy_from_slice(s);
Some(Self(buf))
} else {
None
}
}
/// Derive a conversation ID from a group name via SHA-256 truncation.
pub fn from_group_name(name: &str) -> Self {
use sha2::{Sha256, Digest};
let hash = Sha256::digest(name.as_bytes());
let mut buf = [0u8; 16];
buf.copy_from_slice(&hash[..16]);
Self(buf)
}
pub fn hex(&self) -> String {
hex::encode(self.0)
}
}
#[derive(Clone, Debug, PartialEq, Eq)]
pub enum ConversationKind {
/// 1:1 DM channel with a specific peer.
Dm {
peer_key: Vec<u8>,
peer_username: Option<String>,
},
/// Named group with N members.
Group { name: String },
}
#[derive(Clone, Debug)]
pub struct Conversation {
pub id: ConversationId,
pub kind: ConversationKind,
pub display_name: String,
/// Serialized MLS group (bincode).
pub mls_group_blob: Option<Vec<u8>>,
/// Serialized keystore (bincode HashMap).
pub keystore_blob: Option<Vec<u8>>,
/// Member identity keys (bincode Vec<Vec<u8>>).
pub member_keys: Vec<Vec<u8>>,
pub unread_count: u32,
pub last_activity_ms: u64,
pub created_at_ms: u64,
/// Whether this conversation uses hybrid (X25519 + ML-KEM-768) MLS keys.
pub is_hybrid: bool,
/// Highest server-side delivery sequence number seen.
pub last_seen_seq: u64,
}
#[derive(Clone, Debug)]
pub struct StoredMessage {
pub conversation_id: ConversationId,
pub message_id: Option<[u8; 16]>,
pub sender_key: Vec<u8>,
pub sender_name: Option<String>,
pub body: String,
pub msg_type: String,
pub ref_msg_id: Option<[u8; 16]>,
pub timestamp_ms: u64,
pub is_outgoing: bool,
}
// ── Key derivation (Argon2id, matching state.rs parameters) ─────────────────
const ARGON2_M_COST: u32 = 19 * 1024;
const ARGON2_T_COST: u32 = 2;
const ARGON2_P_COST: u32 = 1;
const SALT_LEN: usize = 16;
/// Derive a 32-byte SQLCipher key from the user password and a random salt.
fn derive_convdb_key(password: &str, salt: &[u8]) -> anyhow::Result<Zeroizing<[u8; 32]>> {
let params = Params::new(ARGON2_M_COST, ARGON2_T_COST, ARGON2_P_COST, Some(32))
.map_err(|e| anyhow::anyhow!("argon2 params: {e}"))?;
let argon2 = Argon2::new(Algorithm::Argon2id, Version::default(), params);
let mut key = Zeroizing::new([0u8; 32]);
argon2
.hash_password_into(password.as_bytes(), salt, &mut *key)
.map_err(|e| anyhow::anyhow!("convdb key derivation: {e}"))?;
Ok(key)
}
/// Read or create a 16-byte random salt at `salt_path` (mode 0o600).
fn get_or_create_salt(salt_path: &Path) -> anyhow::Result<Vec<u8>> {
if salt_path.exists() {
let bytes = std::fs::read(salt_path).context("read convdb salt")?;
anyhow::ensure!(bytes.len() == SALT_LEN, "invalid convdb salt length");
return Ok(bytes);
}
let mut salt = vec![0u8; SALT_LEN];
rand::rngs::OsRng.fill_bytes(&mut salt);
std::fs::write(salt_path, &salt).context("write convdb salt")?;
#[cfg(unix)]
{
use std::os::unix::fs::PermissionsExt;
std::fs::set_permissions(salt_path, std::fs::Permissions::from_mode(0o600)).ok();
}
Ok(salt)
}
// ── ConversationStore ────────────────────────────────────────────────────────
pub struct ConversationStore {
conn: Connection,
}
impl ConversationStore {
/// Open or create the conversation database at `db_path`.
/// If `password` is `Some`, the database is encrypted with SQLCipher using
/// an Argon2id-derived key. Existing unencrypted databases are migrated
/// transparently.
pub fn open(db_path: &Path, password: Option<&str>) -> anyhow::Result<Self> {
if let Some(parent) = db_path.parent() {
std::fs::create_dir_all(parent).ok();
}
match password {
Some(pw) => Self::open_encrypted(db_path, pw),
None => Self::open_plain(db_path),
}
}
fn open_plain(db_path: &Path) -> anyhow::Result<Self> {
let conn = Connection::open(db_path).context("open conversation db")?;
conn.execute_batch("PRAGMA journal_mode=WAL; PRAGMA foreign_keys=ON;")
.context("set pragmas")?;
Self::migrate(&conn)?;
Ok(Self { conn })
}
fn open_encrypted(db_path: &Path, password: &str) -> anyhow::Result<Self> {
let salt_path = PathBuf::from(format!("{}-salt", db_path.display()));
let already_encrypted = salt_path.exists();
// Migrate an existing unencrypted database before opening with encryption.
if db_path.exists() && !already_encrypted {
Self::migrate_plain_to_encrypted(db_path, &salt_path, password)?;
// After migration, salt file exists and DB is encrypted — fall through.
}
let salt = get_or_create_salt(&salt_path)?;
let key = derive_convdb_key(password, &salt)?;
let hex_key = hex::encode(&*key);
let conn = Connection::open(db_path).context("open conversation db")?;
conn.pragma_update(None, "key", format!("x'{hex_key}'"))
.context("set SQLCipher key")?;
conn.execute_batch("PRAGMA journal_mode=WAL; PRAGMA foreign_keys=ON;")
.context("set pragmas")?;
Self::migrate(&conn)?;
Ok(Self { conn })
}
/// Migrate an unencrypted `.convdb` to an encrypted one in-place.
fn migrate_plain_to_encrypted(
db_path: &Path,
salt_path: &Path,
password: &str,
) -> anyhow::Result<()> {
let salt = get_or_create_salt(salt_path)?;
let key = derive_convdb_key(password, &salt)?;
let hex_key = hex::encode(&*key);
let enc_path = db_path.with_extension("convdb-enc");
// Open the existing plaintext database.
let plain = Connection::open(db_path).context("open plain convdb for migration")?;
plain.execute_batch("PRAGMA journal_mode=WAL; PRAGMA foreign_keys=ON;").ok();
// Attach a new encrypted database and export into it.
plain
.execute_batch(&format!(
"ATTACH DATABASE '{}' AS encrypted KEY \"x'{hex_key}'\";",
enc_path.display()
))
.context("attach encrypted db for migration")?;
plain
.execute_batch("SELECT sqlcipher_export('encrypted');")
.context("sqlcipher_export to encrypted db")?;
plain
.execute_batch("DETACH DATABASE encrypted;")
.context("detach encrypted db")?;
drop(plain);
// Swap files: encrypted → original.
std::fs::rename(&enc_path, db_path).context("replace convdb with encrypted version")?;
// Clean up WAL/SHM left from the plaintext open.
let wal = PathBuf::from(format!("{}-wal", db_path.display()));
let shm = PathBuf::from(format!("{}-shm", db_path.display()));
std::fs::remove_file(&wal).ok();
std::fs::remove_file(&shm).ok();
tracing::info!("migrated conversation database to encrypted storage");
Ok(())
}
fn migrate(conn: &Connection) -> anyhow::Result<()> {
conn.execute_batch(
"CREATE TABLE IF NOT EXISTS conversations (
id BLOB PRIMARY KEY,
kind TEXT NOT NULL,
display_name TEXT NOT NULL,
peer_key BLOB,
peer_username TEXT,
group_name TEXT,
mls_group_blob BLOB,
keystore_blob BLOB,
member_keys BLOB,
unread_count INTEGER NOT NULL DEFAULT 0,
last_activity_ms INTEGER NOT NULL DEFAULT 0,
created_at_ms INTEGER NOT NULL DEFAULT 0
);
CREATE TABLE IF NOT EXISTS messages (
id INTEGER PRIMARY KEY AUTOINCREMENT,
conversation_id BLOB NOT NULL REFERENCES conversations(id),
message_id BLOB,
sender_key BLOB NOT NULL,
sender_name TEXT,
body TEXT NOT NULL,
msg_type TEXT NOT NULL,
ref_msg_id BLOB,
timestamp_ms INTEGER NOT NULL,
is_outgoing INTEGER NOT NULL DEFAULT 0
);
CREATE INDEX IF NOT EXISTS idx_messages_conv
ON messages(conversation_id, timestamp_ms);
CREATE TABLE IF NOT EXISTS outbox (
id INTEGER PRIMARY KEY AUTOINCREMENT,
conversation_id BLOB NOT NULL,
recipient_key BLOB NOT NULL,
payload BLOB NOT NULL,
created_at_ms INTEGER NOT NULL,
retry_count INTEGER NOT NULL DEFAULT 0,
status TEXT NOT NULL DEFAULT 'pending'
);
CREATE INDEX IF NOT EXISTS idx_outbox_status
ON outbox(status, created_at_ms);",
)
.context("migrate conversation db")?;
// Additive migrations for new columns (safe to re-run; errors ignored if column already exists).
conn.execute_batch("ALTER TABLE conversations ADD COLUMN is_hybrid INTEGER NOT NULL DEFAULT 0;").ok();
conn.execute_batch("ALTER TABLE conversations ADD COLUMN last_seen_seq INTEGER NOT NULL DEFAULT 0;").ok();
Ok(())
}
// ── Conversation CRUD ────────────────────────────────────────────────
pub fn save_conversation(&self, conv: &Conversation) -> anyhow::Result<()> {
let (kind_str, peer_key, peer_username, group_name) = match &conv.kind {
ConversationKind::Dm {
peer_key,
peer_username,
} => ("dm", Some(peer_key.as_slice()), peer_username.as_deref(), None),
ConversationKind::Group { name } => ("group", None, None, Some(name.as_str())),
};
let member_keys_blob = bincode::serialize(&conv.member_keys)
.context("serialize member_keys")?;
self.conn.execute(
"INSERT INTO conversations
(id, kind, display_name, peer_key, peer_username, group_name,
mls_group_blob, keystore_blob, member_keys, unread_count,
last_activity_ms, created_at_ms, is_hybrid, last_seen_seq)
VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7, ?8, ?9, ?10, ?11, ?12, ?13, ?14)
ON CONFLICT(id) DO UPDATE SET
display_name = excluded.display_name,
mls_group_blob = excluded.mls_group_blob,
keystore_blob = excluded.keystore_blob,
member_keys = excluded.member_keys,
unread_count = excluded.unread_count,
last_activity_ms = excluded.last_activity_ms,
is_hybrid = excluded.is_hybrid,
last_seen_seq = excluded.last_seen_seq",
params![
conv.id.0.as_slice(),
kind_str,
conv.display_name,
peer_key,
peer_username,
group_name,
conv.mls_group_blob,
conv.keystore_blob,
member_keys_blob,
conv.unread_count,
conv.last_activity_ms,
conv.created_at_ms,
conv.is_hybrid as i32,
conv.last_seen_seq as i64,
],
)?;
Ok(())
}
pub fn load_conversation(&self, id: &ConversationId) -> anyhow::Result<Option<Conversation>> {
self.conn
.query_row(
"SELECT kind, display_name, peer_key, peer_username, group_name,
mls_group_blob, keystore_blob, member_keys, unread_count,
last_activity_ms, created_at_ms, is_hybrid, last_seen_seq
FROM conversations WHERE id = ?1",
params![id.0.as_slice()],
|row| {
let kind_str: String = row.get(0)?;
let display_name: String = row.get(1)?;
let peer_key: Option<Vec<u8>> = row.get(2)?;
let peer_username: Option<String> = row.get(3)?;
let group_name: Option<String> = row.get(4)?;
let mls_group_blob: Option<Vec<u8>> = row.get(5)?;
let keystore_blob: Option<Vec<u8>> = row.get(6)?;
let member_keys_blob: Option<Vec<u8>> = row.get(7)?;
let unread_count: u32 = row.get(8)?;
let last_activity_ms: u64 = row.get(9)?;
let created_at_ms: u64 = row.get(10)?;
let is_hybrid_int: i32 = row.get(11)?;
let last_seen_seq: i64 = row.get(12)?;
let kind = if kind_str == "dm" {
ConversationKind::Dm {
peer_key: peer_key.unwrap_or_default(),
peer_username,
}
} else {
ConversationKind::Group {
name: group_name.unwrap_or_default(),
}
};
let member_keys: Vec<Vec<u8>> = member_keys_blob
.and_then(|b| bincode::deserialize(&b).ok())
.unwrap_or_default();
Ok(Conversation {
id: id.clone(),
kind,
display_name,
mls_group_blob,
keystore_blob,
member_keys,
unread_count,
last_activity_ms,
created_at_ms,
is_hybrid: is_hybrid_int != 0,
last_seen_seq: last_seen_seq as u64,
})
},
)
.optional()
.context("load conversation")
}
pub fn list_conversations(&self) -> anyhow::Result<Vec<Conversation>> {
let mut stmt = self.conn.prepare(
"SELECT id, kind, display_name, peer_key, peer_username, group_name,
mls_group_blob, keystore_blob, member_keys, unread_count,
last_activity_ms, created_at_ms, is_hybrid, last_seen_seq
FROM conversations ORDER BY last_activity_ms DESC",
)?;
let rows = stmt.query_map([], |row| {
let id_blob: Vec<u8> = row.get(0)?;
let kind_str: String = row.get(1)?;
let display_name: String = row.get(2)?;
let peer_key: Option<Vec<u8>> = row.get(3)?;
let peer_username: Option<String> = row.get(4)?;
let group_name: Option<String> = row.get(5)?;
let mls_group_blob: Option<Vec<u8>> = row.get(6)?;
let keystore_blob: Option<Vec<u8>> = row.get(7)?;
let member_keys_blob: Option<Vec<u8>> = row.get(8)?;
let unread_count: u32 = row.get(9)?;
let last_activity_ms: u64 = row.get(10)?;
let created_at_ms: u64 = row.get(11)?;
let is_hybrid_int: i32 = row.get(12)?;
let last_seen_seq: i64 = row.get(13)?;
let id = ConversationId::from_slice(&id_blob).unwrap_or(ConversationId([0; 16]));
let kind = if kind_str == "dm" {
ConversationKind::Dm {
peer_key: peer_key.unwrap_or_default(),
peer_username,
}
} else {
ConversationKind::Group {
name: group_name.unwrap_or_default(),
}
};
let member_keys: Vec<Vec<u8>> = member_keys_blob
.and_then(|b| bincode::deserialize(&b).ok())
.unwrap_or_default();
Ok(Conversation {
id,
kind,
display_name,
mls_group_blob,
keystore_blob,
member_keys,
unread_count,
last_activity_ms,
created_at_ms,
is_hybrid: is_hybrid_int != 0,
last_seen_seq: last_seen_seq as u64,
})
})?;
let mut convs = Vec::new();
for row in rows {
convs.push(row?);
}
Ok(convs)
}
/// Find a DM conversation by the peer's identity key.
pub fn find_dm_by_peer(&self, peer_key: &[u8]) -> anyhow::Result<Option<Conversation>> {
let id_blob: Option<Vec<u8>> = self
.conn
.query_row(
"SELECT id FROM conversations WHERE kind = 'dm' AND peer_key = ?1",
params![peer_key],
|row| row.get(0),
)
.optional()?;
match id_blob {
Some(blob) => {
let id = ConversationId::from_slice(&blob)
.context("invalid conversation id in db")?;
self.load_conversation(&id)
}
None => Ok(None),
}
}
/// Find a group conversation by name.
pub fn find_group_by_name(&self, name: &str) -> anyhow::Result<Option<Conversation>> {
let id_blob: Option<Vec<u8>> = self
.conn
.query_row(
"SELECT id FROM conversations WHERE kind = 'group' AND group_name = ?1",
params![name],
|row| row.get(0),
)
.optional()?;
match id_blob {
Some(blob) => {
let id = ConversationId::from_slice(&blob)
.context("invalid conversation id in db")?;
self.load_conversation(&id)
}
None => Ok(None),
}
}
pub fn increment_unread(&self, id: &ConversationId) -> anyhow::Result<()> {
self.conn.execute(
"UPDATE conversations SET unread_count = unread_count + 1 WHERE id = ?1",
params![id.0.as_slice()],
)?;
Ok(())
}
pub fn reset_unread(&self, id: &ConversationId) -> anyhow::Result<()> {
self.conn.execute(
"UPDATE conversations SET unread_count = 0 WHERE id = ?1",
params![id.0.as_slice()],
)?;
Ok(())
}
pub fn update_activity(&self, id: &ConversationId, ts_ms: u64) -> anyhow::Result<()> {
self.conn.execute(
"UPDATE conversations SET last_activity_ms = ?2 WHERE id = ?1 AND last_activity_ms < ?2",
params![id.0.as_slice(), ts_ms],
)?;
Ok(())
}
// ── Message CRUD ─────────────────────────────────────────────────────
pub fn save_message(&self, msg: &StoredMessage) -> anyhow::Result<()> {
self.conn.execute(
"INSERT INTO messages
(conversation_id, message_id, sender_key, sender_name, body,
msg_type, ref_msg_id, timestamp_ms, is_outgoing)
VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7, ?8, ?9)",
params![
msg.conversation_id.0.as_slice(),
msg.message_id.as_ref().map(|id| id.as_slice()),
msg.sender_key,
msg.sender_name,
msg.body,
msg.msg_type,
msg.ref_msg_id.as_ref().map(|id| id.as_slice()),
msg.timestamp_ms,
msg.is_outgoing as i32,
],
)?;
Ok(())
}
pub fn load_recent_messages(
&self,
conv_id: &ConversationId,
limit: usize,
) -> anyhow::Result<Vec<StoredMessage>> {
let mut stmt = self.conn.prepare(
"SELECT message_id, sender_key, sender_name, body, msg_type,
ref_msg_id, timestamp_ms, is_outgoing
FROM messages
WHERE conversation_id = ?1
ORDER BY timestamp_ms DESC
LIMIT ?2",
)?;
let rows = stmt.query_map(params![conv_id.0.as_slice(), limit as u32], |row| {
let message_id: Option<Vec<u8>> = row.get(0)?;
let sender_key: Vec<u8> = row.get(1)?;
let sender_name: Option<String> = row.get(2)?;
let body: String = row.get(3)?;
let msg_type: String = row.get(4)?;
let ref_msg_id: Option<Vec<u8>> = row.get(5)?;
let timestamp_ms: u64 = row.get(6)?;
let is_outgoing: i32 = row.get(7)?;
fn to_16(v: &[u8]) -> Option<[u8; 16]> {
if v.len() == 16 {
let mut buf = [0u8; 16];
buf.copy_from_slice(v);
Some(buf)
} else {
None
}
}
Ok(StoredMessage {
conversation_id: conv_id.clone(),
message_id: message_id.as_deref().and_then(to_16),
sender_key,
sender_name,
body,
msg_type,
ref_msg_id: ref_msg_id.as_deref().and_then(to_16),
timestamp_ms,
is_outgoing: is_outgoing != 0,
})
})?;
let mut msgs = Vec::new();
for row in rows {
msgs.push(row?);
}
// Reverse so oldest first
msgs.reverse();
Ok(msgs)
}
/// Save a message, deduplicating by message_id within the same conversation.
/// Returns `true` if the message was saved (new), `false` if it was a duplicate.
pub fn save_message_dedup(&self, msg: &StoredMessage) -> anyhow::Result<bool> {
if let Some(ref mid) = msg.message_id {
let exists: bool = self.conn.query_row(
"SELECT EXISTS(SELECT 1 FROM messages WHERE message_id = ?1 AND conversation_id = ?2)",
params![mid.as_slice(), msg.conversation_id.0.as_slice()],
|row| row.get(0),
)?;
if exists {
return Ok(false);
}
}
self.save_message(msg)?;
Ok(true)
}
// ── Sequence tracking ──────────────────────────────────────────────
pub fn update_last_seen_seq(&self, id: &ConversationId, seq: u64) -> anyhow::Result<()> {
self.conn.execute(
"UPDATE conversations SET last_seen_seq = ?2 WHERE id = ?1 AND last_seen_seq < ?2",
params![id.0.as_slice(), seq as i64],
)?;
Ok(())
}
// ── Outbox (offline queue) ────────────────────────────────────────
pub fn enqueue_outbox(
&self,
conv_id: &ConversationId,
recipient_key: &[u8],
payload: &[u8],
) -> anyhow::Result<()> {
self.conn.execute(
"INSERT INTO outbox (conversation_id, recipient_key, payload, created_at_ms)
VALUES (?1, ?2, ?3, ?4)",
params![conv_id.0.as_slice(), recipient_key, payload, now_ms() as i64],
)?;
Ok(())
}
pub fn load_pending_outbox(&self) -> anyhow::Result<Vec<OutboxEntry>> {
let mut stmt = self.conn.prepare(
"SELECT id, conversation_id, recipient_key, payload, retry_count
FROM outbox WHERE status = 'pending' ORDER BY created_at_ms",
)?;
let rows = stmt.query_map([], |row| {
let id: i64 = row.get(0)?;
let conv_blob: Vec<u8> = row.get(1)?;
let recipient_key: Vec<u8> = row.get(2)?;
let payload: Vec<u8> = row.get(3)?;
let retry_count: u32 = row.get(4)?;
Ok(OutboxEntry {
id,
conversation_id: ConversationId::from_slice(&conv_blob)
.unwrap_or(ConversationId([0; 16])),
recipient_key,
payload,
retry_count,
})
})?;
let mut entries = Vec::new();
for row in rows {
entries.push(row?);
}
Ok(entries)
}
pub fn mark_outbox_sent(&self, id: i64) -> anyhow::Result<()> {
self.conn.execute(
"UPDATE outbox SET status = 'sent' WHERE id = ?1",
params![id],
)?;
Ok(())
}
pub fn mark_outbox_failed(&self, id: i64, retry_count: u32) -> anyhow::Result<()> {
let new_status = if retry_count > 5 { "failed" } else { "pending" };
self.conn.execute(
"UPDATE outbox SET retry_count = ?2, status = ?3 WHERE id = ?1",
params![id, retry_count, new_status],
)?;
Ok(())
}
}
/// An entry in the offline outbox queue.
#[derive(Clone, Debug)]
pub struct OutboxEntry {
pub id: i64,
pub conversation_id: ConversationId,
pub recipient_key: Vec<u8>,
pub payload: Vec<u8>,
pub retry_count: u32,
}
pub fn now_ms() -> u64 {
SystemTime::now()
.duration_since(UNIX_EPOCH)
.unwrap_or_default()
.as_millis() as u64
}