Skip to content
Closed
Show file tree
Hide file tree
Changes from 11 commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions Cargo.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

1 change: 1 addition & 0 deletions walletkit-core/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -46,6 +46,7 @@ sha2 = { version = "0.10", optional = true }
strum = { version = "0.27", features = ["derive"] }
subtle = "2"
thiserror = "2"
parking_lot = "0.12"
tokio = { version = "1", features = ["sync"] }
tracing = "0.1"
tracing-log = "0.2"
Expand Down
5 changes: 2 additions & 3 deletions walletkit-core/src/authenticator/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -566,10 +566,9 @@ mod tests {
let provider = InMemoryStorageProvider::new(&root);
let store = CredentialStore::from_provider(&provider).expect("store");
store.init(42, 100).expect("init storage");
cache_embedded_groth16_material(store.storage_paths().expect("paths"))
.expect("cache material");
cache_embedded_groth16_material(store.storage_paths()).expect("cache material");

let paths = store.storage_paths().expect("paths");
let paths = store.storage_paths();
Authenticator::init(&seed, &config, paths, Arc::new(store))
.await
.unwrap();
Expand Down
182 changes: 117 additions & 65 deletions walletkit-core/src/storage/credential_storage.rs
Original file line number Diff line number Diff line change
@@ -1,6 +1,8 @@
//! Storage facade implementing the credential storage API.

use std::sync::{Arc, Mutex};
use std::sync::Arc;

use parking_lot::Mutex;

use world_id_core::FieldElement as CoreFieldElement;

Expand Down Expand Up @@ -148,12 +150,8 @@ impl CredentialStore {
}

/// Returns the storage paths used by this handle.
///
/// # Errors
///
/// Returns an error if the storage mutex is poisoned.
pub fn storage_paths(&self) -> StorageResult<Arc<StoragePaths>> {
self.lock_inner().map(|inner| Arc::new(inner.paths.clone()))
pub fn storage_paths(&self) -> Arc<StoragePaths> {
Arc::new(self.lock_inner().paths.clone())
}

/// Initializes storage and validates the account leaf index.
Expand All @@ -162,7 +160,7 @@ impl CredentialStore {
///
/// Returns an error if initialization fails or the leaf index mismatches.
pub fn init(&self, leaf_index: u64, now: u64) -> StorageResult<()> {
let mut inner = self.lock_inner()?;
let mut inner = self.lock_inner();
inner.init(leaf_index, now)
}

Expand All @@ -179,7 +177,7 @@ impl CredentialStore {
issuer_schema_id: Option<u64>,
now: u64,
) -> StorageResult<Vec<CredentialRecord>> {
self.lock_inner()?.list_credentials(issuer_schema_id, now)
self.lock_inner().list_credentials(issuer_schema_id, now)
}

/// Deletes a credential by ID.
Expand All @@ -189,7 +187,7 @@ impl CredentialStore {
/// Returns an error if the delete operation fails or the credential ID does
/// not exist.
pub fn delete_credential(&self, credential_id: u64) -> StorageResult<()> {
self.lock_inner()?.delete_credential(credential_id)?;
self.lock_inner().delete_credential(credential_id)?;
self.notify_vault_changed();
Ok(())
}
Expand All @@ -207,7 +205,7 @@ impl CredentialStore {
associated_data: Option<Vec<u8>>,
now: u64,
) -> StorageResult<u64> {
let id = self.lock_inner()?.store_credential(
let id = self.lock_inner().store_credential(
credential,
blinding_factor,
expires_at,
Expand All @@ -224,7 +222,7 @@ impl CredentialStore {
///
/// Returns an error if the cache lookup fails.
pub fn merkle_cache_get(&self, valid_until: u64) -> StorageResult<Option<Vec<u8>>> {
self.lock_inner()?.merkle_cache_get(valid_until)
self.lock_inner().merkle_cache_get(valid_until)
}

/// Inserts a cached Merkle proof with a TTL.
Expand All @@ -238,7 +236,7 @@ impl CredentialStore {
now: u64,
ttl_seconds: u64,
) -> StorageResult<()> {
self.lock_inner()?
self.lock_inner()
.merkle_cache_put(proof_bytes, now, ttl_seconds)
}

Expand All @@ -260,7 +258,7 @@ impl CredentialStore {
reason = "non-owned strings cannot be lifted via UniFFI"
)]
pub fn import_vault_from_backup(&self, backup_path: String) -> StorageResult<()> {
self.lock_inner()?.import_vault_from_backup(&backup_path)
self.lock_inner().import_vault_from_backup(&backup_path)
}

/// **Development only.** Permanently deletes all stored credentials and their
Expand All @@ -280,7 +278,7 @@ impl CredentialStore {
///
/// Returns an error if the delete operation fails.
pub fn danger_delete_all_credentials(&self) -> StorageResult<u64> {
let mut inner = self.lock_inner()?;
let mut inner = self.lock_inner();
let count = inner.danger_delete_all_credentials()?;
drop(inner);
if count > 0 {
Expand All @@ -291,48 +289,49 @@ impl CredentialStore {

/// Registers a backup manager that will be notified after vault mutations
/// ([`store_credential`](Self::store_credential),
/// [`delete_credential`](Self::delete_credential),
/// [`danger_delete_all_credentials`](Self::danger_delete_all_credentials)).
/// Backup failures are logged but do not affect the mutation result.
///
/// # Errors
pub fn set_backup_manager(&self, manager: Arc<dyn WalletKitBackupManager>) {
*self.backup.lock() = manager;
}

/// Triggers a one-off backup sync without mutating the vault.
///
/// Returns an error if the backup mutex is poisoned.
pub fn set_backup_manager(
&self,
manager: Arc<dyn WalletKitBackupManager>,
) -> StorageResult<()> {
*self.backup.lock().map_err(|_| {
StorageError::Lock("backup config mutex poisoned".to_string())
})? = manager;
Ok(())
/// Call this at initial backup creation to catch up any credentials
/// that were stored before the backup was set up. The registered
/// [`WalletKitBackupManager`] receives the same `on_vault_changed`
/// callback as after a normal vault mutation.
///
/// Errors from the vault export or backup callback are logged but not
/// propagated — the caller can schedule a retry via another
/// `sync_backup` call if needed.
pub fn sync_backup(&self) {
self.notify_vault_changed();
}
}

/// Implementation not exposed to foreign bindings
impl CredentialStore {
fn lock_inner(
&self,
) -> StorageResult<std::sync::MutexGuard<'_, CredentialStoreInner>> {
self.inner
.lock()
.map_err(|_| StorageError::Lock("storage mutex poisoned".to_string()))
fn lock_inner(&self) -> parking_lot::MutexGuard<'_, CredentialStoreInner> {
self.inner.lock()
}

/// Best-effort export + notification to the backup manager, if one is set.
/// Exports a plaintext vault snapshot and notifies the registered backup
/// manager via [`on_vault_changed`](WalletKitBackupManager::on_vault_changed).
///
/// Called after vault mutations and by [`sync_backup`](Self::sync_backup).
/// Returns `Ok(())` if no backup manager is configured (noop).
///
/// Called after any vault mutation (store, delete) so the host app can
/// sync the updated vault to its backup. Failures are logged but never
/// propagated — the vault mutation has already succeeded and callers
/// should not see an error from a backup side-effect.
/// Backup callback errors are logged but **not** propagated, because the
/// vault mutation has already been committed by the time this runs.
/// Propagating would cause callers to see a failed result for a mutation
/// that actually succeeded, leading to incorrect retries.
fn notify_vault_changed(&self) {
// Hold the backup lock for the entire export+callback path. This
// serializes concurrent notifications so backups are delivered in
// mutation order. Recover the guard on poison — the manager is
// still valid after a prior panic.
let guard = self
.backup
.lock()
.unwrap_or_else(std::sync::PoisonError::into_inner);
// mutation order.
let guard = self.backup.lock();

let dest_dir = guard.dest_dir();
if dest_dir.is_empty() {
Expand All @@ -342,13 +341,10 @@ impl CredentialStore {
// Export a plaintext snapshot of the vault. The file is sensitive
// (unencrypted), so we wrap it in a guard that deletes it on drop —
// no matter how we exit (normal return, early return, or panic).
let vault_path = match self
.lock_inner()
.and_then(|inner| inner.export_vault_for_backup(&dest_dir))
{
let vault_path = match self.lock_inner().export_vault_for_backup(&dest_dir) {
Ok(path) => path,
Err(e) => {
tracing::error!("Failed to export vault for backup: {e}");
tracing::error!("Vault export for backup failed: {e}");
return;
}
};
Expand All @@ -372,7 +368,9 @@ impl CredentialStore {
// the vault to Bedrock. The host must finish with the file during
// this synchronous call — the guard deletes it on return.
if let Err(e) = guard.on_vault_changed(vault_path) {
tracing::error!("Backup manager on_vault_changed failed: {e}");
tracing::error!(
"Backup callback failed (vault mutation already committed): {e}"
);
}
}

Expand All @@ -388,7 +386,7 @@ impl CredentialStore {
issuer_schema_id: u64,
now: u64,
) -> StorageResult<Option<(Credential, FieldElement)>> {
self.lock_inner()?.get_credential(issuer_schema_id, now)
self.lock_inner().get_credential(issuer_schema_id, now)
}

/// Checks whether a replay guard entry exists for the given nullifier.
Expand All @@ -404,7 +402,7 @@ impl CredentialStore {
nullifier: CoreFieldElement,
now: u64,
) -> StorageResult<bool> {
self.lock_inner()?.is_nullifier_replay(nullifier, now)
self.lock_inner().is_nullifier_replay(nullifier, now)
}

/// After a proof has been successfully generated, creates a replay guard entry
Expand All @@ -418,7 +416,7 @@ impl CredentialStore {
nullifier: CoreFieldElement,
now: u64,
) -> StorageResult<()> {
self.lock_inner()?.replay_guard_set(nullifier, now)
self.lock_inner().replay_guard_set(nullifier, now)
}
}

Expand Down Expand Up @@ -645,12 +643,8 @@ impl CredentialStore {
}

/// Returns the storage paths used by this handle.
///
/// # Errors
///
/// Returns an error if the storage mutex is poisoned.
pub fn paths(&self) -> StorageResult<StoragePaths> {
self.lock_inner().map(|inner| inner.paths.clone())
pub fn paths(&self) -> StoragePaths {
self.lock_inner().paths.clone()
}
}

Expand Down Expand Up @@ -1329,15 +1323,21 @@ mod tests {
}

fn call_count(&self) -> usize {
self.calls.lock().unwrap().len()
self.calls.lock().len()
}

fn last_path(&self) -> Option<String> {
self.calls.lock().unwrap().last().map(|(p, _)| p.clone())
self.calls
.lock()
.last()
.map(|(p, _): &(String, bool)| p.clone())
}

fn last_file_existed(&self) -> bool {
self.calls.lock().unwrap().last().is_some_and(|(_, e)| *e)
self.calls
.lock()
.last()
.is_some_and(|(_, e): &(String, bool)| *e)
}
}

Expand All @@ -1348,7 +1348,7 @@ mod tests {

fn on_vault_changed(&self, vault_file_path: String) -> StorageResult<()> {
let existed = std::path::Path::new(&vault_file_path).exists();
self.calls.lock().unwrap().push((vault_file_path, existed));
self.calls.lock().push((vault_file_path, existed));
Ok(())
}
}
Expand All @@ -1366,9 +1366,7 @@ mod tests {
std::fs::create_dir_all(&export_dir).expect("create export dir");

let manager = MockBackupManager::new(export_dir.to_string_lossy().to_string());
store
.set_backup_manager(manager.clone())
.expect("set backup manager");
store.set_backup_manager(manager.clone());

(store, manager, root, export_dir)
}
Expand Down Expand Up @@ -1508,4 +1506,58 @@ mod tests {
cleanup_test_storage(&root);
cleanup_test_storage(&export_dir);
}

#[test]
fn test_sync_backup_triggers_notification_without_mutation() {
use world_id_core::Credential as CoreCredential;

let (store, manager, root, export_dir) = setup_store_with_backup();

// Store a credential so the vault is non-empty.
let cred: Credential = CoreCredential::new()
.issuer_schema_id(100)
.genesis_issued_at(1000)
.into();
store
.store_credential(&cred, &FieldElement::from(7u64), 9999, None, 1000)
.expect("store credential");
assert_eq!(manager.call_count(), 1);

// sync_backup triggers a notification without mutating the vault.
store.sync_backup();
assert_eq!(manager.call_count(), 2);
assert!(
manager.last_file_existed(),
"exported vault file should exist during the callback"
);
let path = manager.last_path().unwrap();
assert!(
!std::path::Path::new(&path).exists(),
"exported vault file should be cleaned up after the callback"
);

// Vault contents are unchanged — still the same credential.
let credentials = store.list_credentials(None, 1000).expect("list");
assert_eq!(credentials.len(), 1);
assert_eq!(credentials[0].issuer_schema_id, 100);

cleanup_test_storage(&root);
cleanup_test_storage(&export_dir);
}

#[test]
fn test_sync_backup_on_empty_vault() {
let (store, manager, root, export_dir) = setup_store_with_backup();

// sync_backup on an empty vault should still trigger a notification.
store.sync_backup();
assert_eq!(manager.call_count(), 1);
assert!(
manager.last_file_existed(),
"exported vault file should exist during the callback"
);

cleanup_test_storage(&root);
cleanup_test_storage(&export_dir);
}
}
11 changes: 7 additions & 4 deletions walletkit-core/src/storage/traits.rs
Original file line number Diff line number Diff line change
Expand Up @@ -121,9 +121,12 @@ pub trait WalletKitBackupManager: Send + Sync {
///
/// # Errors
///
/// Returning `Err` is treated as best-effort — the error is logged but
/// does not affect the vault mutation that triggered this call. Returning
/// `Result` (rather than `()`) ensures that host-side exceptions are
/// translated into a Rust `Err` by `UniFFI` instead of panicking.
/// Errors returned here are **logged but not propagated** to the caller
/// of the vault mutation. The vault mutation has already been committed
/// by the time this callback runs, so a failure here only means the
/// backup sync failed — the credential data is safe. The host app can
/// schedule a retry via
/// [`sync_backup`](crate::storage::CredentialStore::sync_backup) if
/// needed.
fn on_vault_changed(&self, vault_file_path: String) -> StorageResult<()>;
}
Loading
Loading