Compare commits

...

1 Commits

Author SHA1 Message Date
df701f21b9 feat(secrets-mcp): 共享 key 删除时自动迁移并重定向 (v0.3.7)
All checks were successful
Secrets MCP — Build & Release / 检查 / 构建 / 发版 (push) Successful in 4m4s
Secrets MCP — Build & Release / 部署 secrets-mcp (push) Successful in 6s
删除仍被 metadata.key_ref 引用的 key 条目时,在同一事务内将密文复制到首个引用方,
其余引用方的 key_ref 重定向到新 owner;env_map 解析 key_ref 时不再限定 type=key。
Web 删除 API 返回 migrated;Dashboard 删除成功后提示迁移。

Bump secrets-mcp to 0.3.7;补充删除迁移相关单测(需 SECRETS_DATABASE_URL)。

Made-with: Cursor
2026-04-03 09:27:20 +08:00
8 changed files with 487 additions and 29 deletions

View File

@@ -118,7 +118,7 @@ oauth_accounts (
### PEM 共享(`key_ref` ### PEM 共享(`key_ref`
将共享 PEM 存为 **`type=key`** 的 entry其它记录在 `metadata.key_ref` 指向该 key 的 `name`(支持 `folder/name` 格式消歧)。更新 key 记录后,引用方通过服务层解析合并逻辑即可使用新密钥(实现`secrets_core::service::env_map` 建议将共享 PEM 存为 **`type=key`** 的 entry其它记录在 `metadata.key_ref` 指向目标 entry 的 `name`(支持 `folder/name` 格式消歧)。删除被引用 key 时,服务会自动迁移为单副本 + 重定向(复制到首个引用方,其余引用方改指向新 owner解析逻辑`secrets_core::service::env_map`
## 代码规范 ## 代码规范

2
Cargo.lock generated
View File

@@ -1968,7 +1968,7 @@ dependencies = [
[[package]] [[package]]
name = "secrets-mcp" name = "secrets-mcp"
version = "0.3.6" version = "0.3.7"
dependencies = [ dependencies = [
"anyhow", "anyhow",
"askama", "askama",

View File

@@ -57,6 +57,7 @@ SECRETS_ENV=production
- **`secrets_search`**:发现条目(可按 query / folder / type / name 过滤);不要求加密头。 - **`secrets_search`**:发现条目(可按 query / folder / type / name 过滤);不要求加密头。
- **`secrets_get` / `secrets_update` / `secrets_delete`(按 name/ `secrets_history` / `secrets_rollback`**:仅 `name` 且全局唯一则直接命中;若多条同名,返回消歧错误,需在参数中补 **`folder`**。 - **`secrets_get` / `secrets_update` / `secrets_delete`(按 name/ `secrets_history` / `secrets_rollback`**:仅 `name` 且全局唯一则直接命中;若多条同名,返回消歧错误,需在参数中补 **`folder`**。
- **`secrets_delete`**`dry_run=true` 时与真实删除相同的消歧规则——唯一则预览一条,多条则报错并要求 `folder` - **`secrets_delete`**`dry_run=true` 时与真实删除相同的消歧规则——唯一则预览一条,多条则报错并要求 `folder`
- **共享 key 自动迁移删除**:删除仍被 `metadata.key_ref` 引用的 key 条目时,系统会自动迁移:把密文复制到首个引用方,并将其余引用方的 `key_ref` 重定向到新 owner然后继续删除。
## 加密架构(混合 E2EE ## 加密架构(混合 E2EE
@@ -167,7 +168,8 @@ flowchart LR
### PEM 共享(`key_ref` ### PEM 共享(`key_ref`
同一 PEM 可被多条 `server` 等记录引用:将 PEM 存为 **`type=key`** 的 entry在其它条目的 `metadata.key_ref` 中写该 key 条目`name`(支持 `folder/name` 格式消歧);轮换时只更新 key 记录即可。 同一 PEM 可被多条 `server` 等记录引用:建议将 PEM 存为 **`type=key`** 的 entry在其它条目的 `metadata.key_ref` 中写目标 entry `name`(支持 `folder/name` 格式消歧);轮换时只更新该目标记录即可。
删除共享 key 时,系统会自动迁移引用:将密文复制到首个引用方(单副本),其余引用方的 `key_ref` 自动重定向到该新 owner再删除原 key 记录。
## 审计日志 ## 审计日志

View File

@@ -17,6 +17,7 @@ pub struct DeletedEntry {
#[derive(Debug, serde::Serialize)] #[derive(Debug, serde::Serialize)]
pub struct DeleteResult { pub struct DeleteResult {
pub deleted: Vec<DeletedEntry>, pub deleted: Vec<DeletedEntry>,
pub migrated: Vec<String>,
pub dry_run: bool, pub dry_run: bool,
} }
@@ -31,6 +32,174 @@ pub struct DeleteParams<'a> {
pub user_id: Option<Uuid>, pub user_id: Option<Uuid>,
} }
#[derive(Debug, sqlx::FromRow)]
struct KeyReferrer {
id: Uuid,
folder: String,
#[sqlx(rename = "type")]
entry_type: String,
name: String,
}
fn ref_label(r: &KeyReferrer) -> String {
format!("{}/{} ({})", r.folder, r.name, r.entry_type)
}
fn ref_path(r: &KeyReferrer) -> String {
format!("{}/{}", r.folder, r.name)
}
async fn fetch_key_referrers_pool(
pool: &PgPool,
key_entry_id: Uuid,
key_folder: &str,
key_name: &str,
user_id: Option<Uuid>,
) -> Result<Vec<KeyReferrer>> {
let qualified = format!("{}/{}", key_folder, key_name);
let refs: Vec<KeyReferrer> = if let Some(uid) = user_id {
sqlx::query_as(
"SELECT id, folder, type, name FROM entries \
WHERE user_id = $1 AND id <> $2 \
AND (metadata->>'key_ref' = $3 OR metadata->>'key_ref' = $4) \
ORDER BY folder, type, name",
)
.bind(uid)
.bind(key_entry_id)
.bind(key_name)
.bind(&qualified)
.fetch_all(pool)
.await?
} else {
sqlx::query_as(
"SELECT id, folder, type, name FROM entries \
WHERE user_id IS NULL AND id <> $1 \
AND (metadata->>'key_ref' = $2 OR metadata->>'key_ref' = $3) \
ORDER BY folder, type, name",
)
.bind(key_entry_id)
.bind(key_name)
.bind(&qualified)
.fetch_all(pool)
.await?
};
Ok(refs)
}
async fn migrate_key_refs_if_needed(
tx: &mut sqlx::Transaction<'_, sqlx::Postgres>,
key_row: &EntryRow,
key_name: &str,
user_id: Option<Uuid>,
dry_run: bool,
) -> Result<Vec<String>> {
let qualified = format!("{}/{}", key_row.folder, key_name);
let refs: Vec<KeyReferrer> = if let Some(uid) = user_id {
sqlx::query_as(
"SELECT id, folder, type, name FROM entries \
WHERE user_id = $1 AND id <> $2 \
AND (metadata->>'key_ref' = $3 OR metadata->>'key_ref' = $4) \
ORDER BY folder, type, name",
)
.bind(uid)
.bind(key_row.id)
.bind(key_name)
.bind(&qualified)
.fetch_all(&mut **tx)
.await?
} else {
sqlx::query_as(
"SELECT id, folder, type, name FROM entries \
WHERE user_id IS NULL AND id <> $1 \
AND (metadata->>'key_ref' = $2 OR metadata->>'key_ref' = $3) \
ORDER BY folder, type, name",
)
.bind(key_row.id)
.bind(key_name)
.bind(&qualified)
.fetch_all(&mut **tx)
.await?
};
if refs.is_empty() {
return Ok(vec![]);
}
if dry_run {
return Ok(refs.iter().map(ref_label).collect());
}
let owner = &refs[0];
let owner_path = ref_path(owner);
let key_fields: Vec<SecretFieldRow> =
sqlx::query_as("SELECT id, field_name, encrypted FROM secrets WHERE entry_id = $1")
.bind(key_row.id)
.fetch_all(&mut **tx)
.await?;
for f in &key_fields {
sqlx::query(
"INSERT INTO secrets (entry_id, field_name, encrypted) VALUES ($1, $2, $3) \
ON CONFLICT (entry_id, field_name) DO NOTHING",
)
.bind(owner.id)
.bind(&f.field_name)
.bind(&f.encrypted)
.execute(&mut **tx)
.await?;
}
sqlx::query(
"UPDATE entries SET metadata = metadata - 'key_ref', \
version = version + 1, updated_at = NOW() WHERE id = $1",
)
.bind(owner.id)
.execute(&mut **tx)
.await?;
crate::audit::log_tx(
tx,
user_id,
"key_migrate",
&owner.folder,
&owner.entry_type,
&owner.name,
json!({
"from_key": format!("{}/{}", key_row.folder, key_name),
"role": "new_owner",
"redirect_target": owner_path,
}),
)
.await;
for r in refs.iter().skip(1) {
sqlx::query(
"UPDATE entries SET metadata = jsonb_set(metadata, '{key_ref}', to_jsonb($2::text), true), \
version = version + 1, updated_at = NOW() WHERE id = $1",
)
.bind(r.id)
.bind(&owner_path)
.execute(&mut **tx)
.await?;
crate::audit::log_tx(
tx,
user_id,
"key_migrate",
&r.folder,
&r.entry_type,
&r.name,
json!({
"from_key": format!("{}/{}", key_row.folder, key_name),
"role": "redirected_ref",
"redirect_to": owner_path,
}),
)
.await;
}
Ok(refs.iter().map(ref_label).collect())
}
/// Delete a single entry by id (multi-tenant: `user_id` must match). Cascades `secrets` via FK. /// Delete a single entry by id (multi-tenant: `user_id` must match). Cascades `secrets` via FK.
pub async fn delete_by_id(pool: &PgPool, entry_id: Uuid, user_id: Uuid) -> Result<DeleteResult> { pub async fn delete_by_id(pool: &PgPool, entry_id: Uuid, user_id: Uuid) -> Result<DeleteResult> {
let mut tx = pool.begin().await?; let mut tx = pool.begin().await?;
@@ -55,6 +224,8 @@ pub async fn delete_by_id(pool: &PgPool, entry_id: Uuid, user_id: Uuid) -> Resul
let entry_type = row.entry_type.clone(); let entry_type = row.entry_type.clone();
let name = row.name.clone(); let name = row.name.clone();
let entry_row: EntryRow = (&row).into(); let entry_row: EntryRow = (&row).into();
let migrated =
migrate_key_refs_if_needed(&mut tx, &entry_row, &name, Some(user_id), false).await?;
snapshot_and_delete( snapshot_and_delete(
&mut tx, &mut tx,
@@ -83,6 +254,7 @@ pub async fn delete_by_id(pool: &PgPool, entry_id: Uuid, user_id: Uuid) -> Resul
folder, folder,
entry_type, entry_type,
}], }],
migrated,
dry_run: false, dry_run: false,
}) })
} }
@@ -122,6 +294,7 @@ async fn delete_one(
// - 2+ matches → disambiguation error (same as non-dry-run) // - 2+ matches → disambiguation error (same as non-dry-run)
#[derive(sqlx::FromRow)] #[derive(sqlx::FromRow)]
struct DryRunRow { struct DryRunRow {
id: Uuid,
folder: String, folder: String,
#[sqlx(rename = "type")] #[sqlx(rename = "type")]
entry_type: String, entry_type: String,
@@ -130,7 +303,7 @@ async fn delete_one(
let rows: Vec<DryRunRow> = if let Some(uid) = user_id { let rows: Vec<DryRunRow> = if let Some(uid) = user_id {
if let Some(f) = folder { if let Some(f) = folder {
sqlx::query_as( sqlx::query_as(
"SELECT folder, type FROM entries WHERE user_id = $1 AND folder = $2 AND name = $3", "SELECT id, folder, type FROM entries WHERE user_id = $1 AND folder = $2 AND name = $3",
) )
.bind(uid) .bind(uid)
.bind(f) .bind(f)
@@ -138,40 +311,48 @@ async fn delete_one(
.fetch_all(pool) .fetch_all(pool)
.await? .await?
} else { } else {
sqlx::query_as("SELECT folder, type FROM entries WHERE user_id = $1 AND name = $2") sqlx::query_as(
.bind(uid) "SELECT id, folder, type FROM entries WHERE user_id = $1 AND name = $2",
.bind(name) )
.fetch_all(pool) .bind(uid)
.await? .bind(name)
.fetch_all(pool)
.await?
} }
} else if let Some(f) = folder { } else if let Some(f) = folder {
sqlx::query_as( sqlx::query_as(
"SELECT folder, type FROM entries WHERE user_id IS NULL AND folder = $1 AND name = $2", "SELECT id, folder, type FROM entries WHERE user_id IS NULL AND folder = $1 AND name = $2",
) )
.bind(f) .bind(f)
.bind(name) .bind(name)
.fetch_all(pool) .fetch_all(pool)
.await? .await?
} else { } else {
sqlx::query_as("SELECT folder, type FROM entries WHERE user_id IS NULL AND name = $1") sqlx::query_as(
.bind(name) "SELECT id, folder, type FROM entries WHERE user_id IS NULL AND name = $1",
.fetch_all(pool) )
.await? .bind(name)
.fetch_all(pool)
.await?
}; };
return match rows.len() { return match rows.len() {
0 => Ok(DeleteResult { 0 => Ok(DeleteResult {
deleted: vec![], deleted: vec![],
migrated: vec![],
dry_run: true, dry_run: true,
}), }),
1 => { 1 => {
let row = rows.into_iter().next().unwrap(); let row = rows.into_iter().next().unwrap();
let refs =
fetch_key_referrers_pool(pool, row.id, &row.folder, name, user_id).await?;
Ok(DeleteResult { Ok(DeleteResult {
deleted: vec![DeletedEntry { deleted: vec![DeletedEntry {
name: name.to_string(), name: name.to_string(),
folder: row.folder, folder: row.folder,
entry_type: row.entry_type, entry_type: row.entry_type,
}], }],
migrated: refs.iter().map(ref_label).collect(),
dry_run: true, dry_run: true,
}) })
} }
@@ -236,6 +417,7 @@ async fn delete_one(
tx.rollback().await?; tx.rollback().await?;
return Ok(DeleteResult { return Ok(DeleteResult {
deleted: vec![], deleted: vec![],
migrated: vec![],
dry_run: false, dry_run: false,
}); });
} }
@@ -255,6 +437,7 @@ async fn delete_one(
let folder = row.folder.clone(); let folder = row.folder.clone();
let entry_type = row.entry_type.clone(); let entry_type = row.entry_type.clone();
let migrated = migrate_key_refs_if_needed(&mut tx, &row, name, user_id, false).await?;
snapshot_and_delete(&mut tx, &folder, &entry_type, name, &row, user_id).await?; snapshot_and_delete(&mut tx, &folder, &entry_type, name, &row, user_id).await?;
crate::audit::log_tx( crate::audit::log_tx(
&mut tx, &mut tx,
@@ -274,6 +457,7 @@ async fn delete_one(
folder, folder,
entry_type, entry_type,
}], }],
migrated,
dry_run: false, dry_run: false,
}) })
} }
@@ -334,6 +518,12 @@ async fn delete_bulk(
let rows = q.fetch_all(pool).await?; let rows = q.fetch_all(pool).await?;
if dry_run { if dry_run {
let mut migrated: Vec<String> = Vec::new();
for row in &rows {
let refs =
fetch_key_referrers_pool(pool, row.id, &row.folder, &row.name, user_id).await?;
migrated.extend(refs.iter().map(ref_label));
}
let deleted = rows let deleted = rows
.iter() .iter()
.map(|r| DeletedEntry { .map(|r| DeletedEntry {
@@ -344,11 +534,13 @@ async fn delete_bulk(
.collect(); .collect();
return Ok(DeleteResult { return Ok(DeleteResult {
deleted, deleted,
migrated,
dry_run: true, dry_run: true,
}); });
} }
let mut deleted = Vec::with_capacity(rows.len()); let mut deleted = Vec::with_capacity(rows.len());
let mut migrated: Vec<String> = Vec::new();
for row in &rows { for row in &rows {
let entry_row = EntryRow { let entry_row = EntryRow {
id: row.id, id: row.id,
@@ -360,6 +552,8 @@ async fn delete_bulk(
notes: row.notes.clone(), notes: row.notes.clone(),
}; };
let mut tx = pool.begin().await?; let mut tx = pool.begin().await?;
let m = migrate_key_refs_if_needed(&mut tx, &entry_row, &row.name, user_id, false).await?;
migrated.extend(m);
snapshot_and_delete( snapshot_and_delete(
&mut tx, &mut tx,
&row.folder, &row.folder,
@@ -389,6 +583,7 @@ async fn delete_bulk(
Ok(DeleteResult { Ok(DeleteResult {
deleted, deleted,
migrated,
dry_run: false, dry_run: false,
}) })
} }
@@ -451,3 +646,264 @@ async fn snapshot_and_delete(
Ok(()) Ok(())
} }
#[cfg(test)]
mod tests {
use super::*;
use serde_json::json;
async fn maybe_test_pool() -> Option<PgPool> {
let Ok(url) = std::env::var("SECRETS_DATABASE_URL") else {
eprintln!("skip delete migration tests: SECRETS_DATABASE_URL is not set");
return None;
};
let Ok(pool) = PgPool::connect(&url).await else {
eprintln!("skip delete migration tests: cannot connect to database");
return None;
};
if let Err(e) = crate::db::migrate(&pool).await {
eprintln!("skip delete migration tests: migrate failed: {e}");
return None;
}
Some(pool)
}
async fn insert_entry(
pool: &PgPool,
id: Uuid,
user_id: Uuid,
folder: &str,
entry_type: &str,
name: &str,
metadata: serde_json::Value,
) -> Result<()> {
sqlx::query(
"INSERT INTO entries (id, user_id, folder, type, name, notes, tags, metadata, version) \
VALUES ($1, $2, $3, $4, $5, '', ARRAY[]::text[], $6, 1)",
)
.bind(id)
.bind(user_id)
.bind(folder)
.bind(entry_type)
.bind(name)
.bind(metadata)
.execute(pool)
.await?;
Ok(())
}
#[tokio::test]
async fn delete_shared_key_dry_run_reports_migration_without_writes() -> Result<()> {
let Some(pool) = maybe_test_pool().await else {
return Ok(());
};
let user_id = Uuid::from_u128(rand::random());
let key_id = Uuid::from_u128(rand::random());
let ref_a = Uuid::from_u128(rand::random());
let ref_b = Uuid::from_u128(rand::random());
insert_entry(
&pool,
key_id,
user_id,
"kfolder",
"key",
"shared-key",
json!({}),
)
.await?;
sqlx::query("INSERT INTO secrets (entry_id, field_name, encrypted) VALUES ($1, $2, $3)")
.bind(key_id)
.bind("pem")
.bind(vec![1_u8, 2, 3])
.execute(&pool)
.await?;
insert_entry(
&pool,
ref_a,
user_id,
"afolder",
"server",
"srv-a",
json!({"key_ref":"kfolder/shared-key"}),
)
.await?;
insert_entry(
&pool,
ref_b,
user_id,
"bfolder",
"server",
"srv-b",
json!({"key_ref":"shared-key"}),
)
.await?;
let result = run(
&pool,
DeleteParams {
name: Some("shared-key"),
folder: Some("kfolder"),
entry_type: None,
dry_run: true,
user_id: Some(user_id),
},
)
.await?;
assert!(result.dry_run);
assert_eq!(result.deleted.len(), 1);
assert_eq!(result.migrated.len(), 2);
let key_exists: bool = sqlx::query_scalar(
"SELECT EXISTS(SELECT 1 FROM entries WHERE id = $1 AND user_id = $2)",
)
.bind(key_id)
.bind(user_id)
.fetch_one(&pool)
.await?;
assert!(key_exists);
let ref_a_key_ref: Option<String> =
sqlx::query_scalar("SELECT metadata->>'key_ref' FROM entries WHERE id = $1")
.bind(ref_a)
.fetch_one(&pool)
.await?;
let ref_b_key_ref: Option<String> =
sqlx::query_scalar("SELECT metadata->>'key_ref' FROM entries WHERE id = $1")
.bind(ref_b)
.fetch_one(&pool)
.await?;
assert_eq!(ref_a_key_ref.as_deref(), Some("kfolder/shared-key"));
assert_eq!(ref_b_key_ref.as_deref(), Some("shared-key"));
sqlx::query("DELETE FROM entries WHERE user_id = $1")
.bind(user_id)
.execute(&pool)
.await?;
Ok(())
}
#[tokio::test]
async fn delete_shared_key_auto_migrates_single_copy_and_redirects_refs() -> Result<()> {
let Some(pool) = maybe_test_pool().await else {
return Ok(());
};
let user_id = Uuid::from_u128(rand::random());
let key_id = Uuid::from_u128(rand::random());
let ref_a = Uuid::from_u128(rand::random());
let ref_b = Uuid::from_u128(rand::random());
let ref_c = Uuid::from_u128(rand::random());
insert_entry(
&pool,
key_id,
user_id,
"kfolder",
"key",
"shared-key",
json!({}),
)
.await?;
sqlx::query("INSERT INTO secrets (entry_id, field_name, encrypted) VALUES ($1, $2, $3)")
.bind(key_id)
.bind("pem")
.bind(vec![7_u8, 8, 9])
.execute(&pool)
.await?;
// owner candidate (sorted first by folder)
insert_entry(
&pool,
ref_a,
user_id,
"afolder",
"server",
"srv-a",
json!({"key_ref":"kfolder/shared-key"}),
)
.await?;
insert_entry(
&pool,
ref_b,
user_id,
"bfolder",
"server",
"srv-b",
json!({"key_ref":"shared-key"}),
)
.await?;
insert_entry(
&pool,
ref_c,
user_id,
"cfolder",
"service",
"svc-c",
json!({"key_ref":"kfolder/shared-key"}),
)
.await?;
let result = run(
&pool,
DeleteParams {
name: Some("shared-key"),
folder: Some("kfolder"),
entry_type: None,
dry_run: false,
user_id: Some(user_id),
},
)
.await?;
assert!(!result.dry_run);
assert_eq!(result.deleted.len(), 1);
assert_eq!(result.migrated.len(), 3);
let key_exists: bool = sqlx::query_scalar(
"SELECT EXISTS(SELECT 1 FROM entries WHERE id = $1 AND user_id = $2)",
)
.bind(key_id)
.bind(user_id)
.fetch_one(&pool)
.await?;
assert!(!key_exists);
let owner_key_ref: Option<String> =
sqlx::query_scalar("SELECT metadata->>'key_ref' FROM entries WHERE id = $1")
.bind(ref_a)
.fetch_one(&pool)
.await?;
let ref_b_key_ref: Option<String> =
sqlx::query_scalar("SELECT metadata->>'key_ref' FROM entries WHERE id = $1")
.bind(ref_b)
.fetch_one(&pool)
.await?;
let ref_c_key_ref: Option<String> =
sqlx::query_scalar("SELECT metadata->>'key_ref' FROM entries WHERE id = $1")
.bind(ref_c)
.fetch_one(&pool)
.await?;
assert_eq!(owner_key_ref, None);
assert_eq!(ref_b_key_ref.as_deref(), Some("afolder/srv-a"));
assert_eq!(ref_c_key_ref.as_deref(), Some("afolder/srv-a"));
let owner_has_copied: bool = sqlx::query_scalar(
"SELECT EXISTS(SELECT 1 FROM secrets WHERE entry_id = $1 AND field_name = 'pem')",
)
.bind(ref_a)
.fetch_one(&pool)
.await?;
assert!(owner_has_copied);
sqlx::query("DELETE FROM entries WHERE user_id = $1")
.bind(user_id)
.execute(&pool)
.await?;
Ok(())
}
}

View File

@@ -75,16 +75,8 @@ async fn build_entry_env_map(
} else { } else {
(None, key_ref) (None, key_ref)
}; };
let key_entries = fetch_entries( let key_entries =
pool, fetch_entries(pool, ref_folder, None, Some(ref_name), &[], None, user_id).await?;
ref_folder,
Some("key"),
Some(ref_name),
&[],
None,
user_id,
)
.await?;
if key_entries.len() > 1 { if key_entries.len() > 1 {
anyhow::bail!( anyhow::bail!(

View File

@@ -1,6 +1,6 @@
[package] [package]
name = "secrets-mcp" name = "secrets-mcp"
version = "0.3.6" version = "0.3.7"
edition.workspace = true edition.workspace = true
[[bin]] [[bin]]

View File

@@ -990,11 +990,14 @@ async fn api_entry_delete(
.await .await
.ok_or((StatusCode::UNAUTHORIZED, Json(json!({ "error": "未登录" }))))?; .ok_or((StatusCode::UNAUTHORIZED, Json(json!({ "error": "未登录" }))))?;
delete_by_id(&state.pool, entry_id, user_id) let result = delete_by_id(&state.pool, entry_id, user_id)
.await .await
.map_err(map_entry_mutation_err)?; .map_err(map_entry_mutation_err)?;
Ok(Json(json!({ "ok": true }))) Ok(Json(json!({
"ok": true,
"migrated": result.migrated,
})))
} }
// ── OAuth / Well-known ──────────────────────────────────────────────────────── // ── OAuth / Well-known ────────────────────────────────────────────────────────

View File

@@ -367,7 +367,7 @@
var nameEl = tr.querySelector('.cell-name'); var nameEl = tr.querySelector('.cell-name');
var name = nameEl ? nameEl.textContent.trim() : ''; var name = nameEl ? nameEl.textContent.trim() : '';
if (!id) return; if (!id) return;
if (!confirm('确定删除条目「' + name + '」?关联的密文字段将一并删除。')) return; if (!confirm('确定删除条目「' + name + '」?')) return;
fetch('/api/entries/' + encodeURIComponent(id), { method: 'DELETE', credentials: 'same-origin' }) fetch('/api/entries/' + encodeURIComponent(id), { method: 'DELETE', credentials: 'same-origin' })
.then(function (r) { .then(function (r) {
return r.json().then(function (data) { return r.json().then(function (data) {
@@ -375,7 +375,12 @@
return data; return data;
}); });
}) })
.then(function () { window.location.reload(); }) .then(function (data) {
if (data && Array.isArray(data.migrated) && data.migrated.length > 0) {
alert('已自动迁移共享 key 引用:' + data.migrated.length + ' 个条目完成重定向。');
}
window.location.reload();
})
.catch(function (e) { alert(e.message || String(e)); }); .catch(function (e) { alert(e.message || String(e)); });
}); });
}); });