feat(secrets-mcp): 共享 key 删除时自动迁移并重定向 (v0.3.7)
All checks were successful
Secrets MCP — Build & Release / 检查 / 构建 / 发版 (push) Successful in 4m4s
Secrets MCP — Build & Release / 部署 secrets-mcp (push) Successful in 6s

删除仍被 metadata.key_ref 引用的 key 条目时,在同一事务内将密文复制到首个引用方,
其余引用方的 key_ref 重定向到新 owner;env_map 解析 key_ref 时不再限定 type=key。
Web 删除 API 返回 migrated;Dashboard 删除成功后提示迁移。

Bump secrets-mcp to 0.3.7;补充删除迁移相关单测(需 SECRETS_DATABASE_URL)。

Made-with: Cursor
This commit is contained in:
2026-04-03 09:27:20 +08:00
parent c3c536200e
commit df701f21b9
8 changed files with 487 additions and 29 deletions

View File

@@ -17,6 +17,7 @@ pub struct DeletedEntry {
#[derive(Debug, serde::Serialize)]
pub struct DeleteResult {
pub deleted: Vec<DeletedEntry>,
pub migrated: Vec<String>,
pub dry_run: bool,
}
@@ -31,6 +32,174 @@ pub struct DeleteParams<'a> {
pub user_id: Option<Uuid>,
}
#[derive(Debug, sqlx::FromRow)]
struct KeyReferrer {
id: Uuid,
folder: String,
#[sqlx(rename = "type")]
entry_type: String,
name: String,
}
fn ref_label(r: &KeyReferrer) -> String {
format!("{}/{} ({})", r.folder, r.name, r.entry_type)
}
fn ref_path(r: &KeyReferrer) -> String {
format!("{}/{}", r.folder, r.name)
}
async fn fetch_key_referrers_pool(
pool: &PgPool,
key_entry_id: Uuid,
key_folder: &str,
key_name: &str,
user_id: Option<Uuid>,
) -> Result<Vec<KeyReferrer>> {
let qualified = format!("{}/{}", key_folder, key_name);
let refs: Vec<KeyReferrer> = if let Some(uid) = user_id {
sqlx::query_as(
"SELECT id, folder, type, name FROM entries \
WHERE user_id = $1 AND id <> $2 \
AND (metadata->>'key_ref' = $3 OR metadata->>'key_ref' = $4) \
ORDER BY folder, type, name",
)
.bind(uid)
.bind(key_entry_id)
.bind(key_name)
.bind(&qualified)
.fetch_all(pool)
.await?
} else {
sqlx::query_as(
"SELECT id, folder, type, name FROM entries \
WHERE user_id IS NULL AND id <> $1 \
AND (metadata->>'key_ref' = $2 OR metadata->>'key_ref' = $3) \
ORDER BY folder, type, name",
)
.bind(key_entry_id)
.bind(key_name)
.bind(&qualified)
.fetch_all(pool)
.await?
};
Ok(refs)
}
async fn migrate_key_refs_if_needed(
tx: &mut sqlx::Transaction<'_, sqlx::Postgres>,
key_row: &EntryRow,
key_name: &str,
user_id: Option<Uuid>,
dry_run: bool,
) -> Result<Vec<String>> {
let qualified = format!("{}/{}", key_row.folder, key_name);
let refs: Vec<KeyReferrer> = if let Some(uid) = user_id {
sqlx::query_as(
"SELECT id, folder, type, name FROM entries \
WHERE user_id = $1 AND id <> $2 \
AND (metadata->>'key_ref' = $3 OR metadata->>'key_ref' = $4) \
ORDER BY folder, type, name",
)
.bind(uid)
.bind(key_row.id)
.bind(key_name)
.bind(&qualified)
.fetch_all(&mut **tx)
.await?
} else {
sqlx::query_as(
"SELECT id, folder, type, name FROM entries \
WHERE user_id IS NULL AND id <> $1 \
AND (metadata->>'key_ref' = $2 OR metadata->>'key_ref' = $3) \
ORDER BY folder, type, name",
)
.bind(key_row.id)
.bind(key_name)
.bind(&qualified)
.fetch_all(&mut **tx)
.await?
};
if refs.is_empty() {
return Ok(vec![]);
}
if dry_run {
return Ok(refs.iter().map(ref_label).collect());
}
let owner = &refs[0];
let owner_path = ref_path(owner);
let key_fields: Vec<SecretFieldRow> =
sqlx::query_as("SELECT id, field_name, encrypted FROM secrets WHERE entry_id = $1")
.bind(key_row.id)
.fetch_all(&mut **tx)
.await?;
for f in &key_fields {
sqlx::query(
"INSERT INTO secrets (entry_id, field_name, encrypted) VALUES ($1, $2, $3) \
ON CONFLICT (entry_id, field_name) DO NOTHING",
)
.bind(owner.id)
.bind(&f.field_name)
.bind(&f.encrypted)
.execute(&mut **tx)
.await?;
}
sqlx::query(
"UPDATE entries SET metadata = metadata - 'key_ref', \
version = version + 1, updated_at = NOW() WHERE id = $1",
)
.bind(owner.id)
.execute(&mut **tx)
.await?;
crate::audit::log_tx(
tx,
user_id,
"key_migrate",
&owner.folder,
&owner.entry_type,
&owner.name,
json!({
"from_key": format!("{}/{}", key_row.folder, key_name),
"role": "new_owner",
"redirect_target": owner_path,
}),
)
.await;
for r in refs.iter().skip(1) {
sqlx::query(
"UPDATE entries SET metadata = jsonb_set(metadata, '{key_ref}', to_jsonb($2::text), true), \
version = version + 1, updated_at = NOW() WHERE id = $1",
)
.bind(r.id)
.bind(&owner_path)
.execute(&mut **tx)
.await?;
crate::audit::log_tx(
tx,
user_id,
"key_migrate",
&r.folder,
&r.entry_type,
&r.name,
json!({
"from_key": format!("{}/{}", key_row.folder, key_name),
"role": "redirected_ref",
"redirect_to": owner_path,
}),
)
.await;
}
Ok(refs.iter().map(ref_label).collect())
}
/// Delete a single entry by id (multi-tenant: `user_id` must match). Cascades `secrets` via FK.
pub async fn delete_by_id(pool: &PgPool, entry_id: Uuid, user_id: Uuid) -> Result<DeleteResult> {
let mut tx = pool.begin().await?;
@@ -55,6 +224,8 @@ pub async fn delete_by_id(pool: &PgPool, entry_id: Uuid, user_id: Uuid) -> Resul
let entry_type = row.entry_type.clone();
let name = row.name.clone();
let entry_row: EntryRow = (&row).into();
let migrated =
migrate_key_refs_if_needed(&mut tx, &entry_row, &name, Some(user_id), false).await?;
snapshot_and_delete(
&mut tx,
@@ -83,6 +254,7 @@ pub async fn delete_by_id(pool: &PgPool, entry_id: Uuid, user_id: Uuid) -> Resul
folder,
entry_type,
}],
migrated,
dry_run: false,
})
}
@@ -122,6 +294,7 @@ async fn delete_one(
// - 2+ matches → disambiguation error (same as non-dry-run)
#[derive(sqlx::FromRow)]
struct DryRunRow {
id: Uuid,
folder: String,
#[sqlx(rename = "type")]
entry_type: String,
@@ -130,7 +303,7 @@ async fn delete_one(
let rows: Vec<DryRunRow> = if let Some(uid) = user_id {
if let Some(f) = folder {
sqlx::query_as(
"SELECT folder, type FROM entries WHERE user_id = $1 AND folder = $2 AND name = $3",
"SELECT id, folder, type FROM entries WHERE user_id = $1 AND folder = $2 AND name = $3",
)
.bind(uid)
.bind(f)
@@ -138,40 +311,48 @@ async fn delete_one(
.fetch_all(pool)
.await?
} else {
sqlx::query_as("SELECT folder, type FROM entries WHERE user_id = $1 AND name = $2")
.bind(uid)
.bind(name)
.fetch_all(pool)
.await?
sqlx::query_as(
"SELECT id, folder, type FROM entries WHERE user_id = $1 AND name = $2",
)
.bind(uid)
.bind(name)
.fetch_all(pool)
.await?
}
} else if let Some(f) = folder {
sqlx::query_as(
"SELECT folder, type FROM entries WHERE user_id IS NULL AND folder = $1 AND name = $2",
"SELECT id, folder, type FROM entries WHERE user_id IS NULL AND folder = $1 AND name = $2",
)
.bind(f)
.bind(name)
.fetch_all(pool)
.await?
} else {
sqlx::query_as("SELECT folder, type FROM entries WHERE user_id IS NULL AND name = $1")
.bind(name)
.fetch_all(pool)
.await?
sqlx::query_as(
"SELECT id, folder, type FROM entries WHERE user_id IS NULL AND name = $1",
)
.bind(name)
.fetch_all(pool)
.await?
};
return match rows.len() {
0 => Ok(DeleteResult {
deleted: vec![],
migrated: vec![],
dry_run: true,
}),
1 => {
let row = rows.into_iter().next().unwrap();
let refs =
fetch_key_referrers_pool(pool, row.id, &row.folder, name, user_id).await?;
Ok(DeleteResult {
deleted: vec![DeletedEntry {
name: name.to_string(),
folder: row.folder,
entry_type: row.entry_type,
}],
migrated: refs.iter().map(ref_label).collect(),
dry_run: true,
})
}
@@ -236,6 +417,7 @@ async fn delete_one(
tx.rollback().await?;
return Ok(DeleteResult {
deleted: vec![],
migrated: vec![],
dry_run: false,
});
}
@@ -255,6 +437,7 @@ async fn delete_one(
let folder = row.folder.clone();
let entry_type = row.entry_type.clone();
let migrated = migrate_key_refs_if_needed(&mut tx, &row, name, user_id, false).await?;
snapshot_and_delete(&mut tx, &folder, &entry_type, name, &row, user_id).await?;
crate::audit::log_tx(
&mut tx,
@@ -274,6 +457,7 @@ async fn delete_one(
folder,
entry_type,
}],
migrated,
dry_run: false,
})
}
@@ -334,6 +518,12 @@ async fn delete_bulk(
let rows = q.fetch_all(pool).await?;
if dry_run {
let mut migrated: Vec<String> = Vec::new();
for row in &rows {
let refs =
fetch_key_referrers_pool(pool, row.id, &row.folder, &row.name, user_id).await?;
migrated.extend(refs.iter().map(ref_label));
}
let deleted = rows
.iter()
.map(|r| DeletedEntry {
@@ -344,11 +534,13 @@ async fn delete_bulk(
.collect();
return Ok(DeleteResult {
deleted,
migrated,
dry_run: true,
});
}
let mut deleted = Vec::with_capacity(rows.len());
let mut migrated: Vec<String> = Vec::new();
for row in &rows {
let entry_row = EntryRow {
id: row.id,
@@ -360,6 +552,8 @@ async fn delete_bulk(
notes: row.notes.clone(),
};
let mut tx = pool.begin().await?;
let m = migrate_key_refs_if_needed(&mut tx, &entry_row, &row.name, user_id, false).await?;
migrated.extend(m);
snapshot_and_delete(
&mut tx,
&row.folder,
@@ -389,6 +583,7 @@ async fn delete_bulk(
Ok(DeleteResult {
deleted,
migrated,
dry_run: false,
})
}
@@ -451,3 +646,264 @@ async fn snapshot_and_delete(
Ok(())
}
#[cfg(test)]
mod tests {
use super::*;
use serde_json::json;
async fn maybe_test_pool() -> Option<PgPool> {
let Ok(url) = std::env::var("SECRETS_DATABASE_URL") else {
eprintln!("skip delete migration tests: SECRETS_DATABASE_URL is not set");
return None;
};
let Ok(pool) = PgPool::connect(&url).await else {
eprintln!("skip delete migration tests: cannot connect to database");
return None;
};
if let Err(e) = crate::db::migrate(&pool).await {
eprintln!("skip delete migration tests: migrate failed: {e}");
return None;
}
Some(pool)
}
async fn insert_entry(
pool: &PgPool,
id: Uuid,
user_id: Uuid,
folder: &str,
entry_type: &str,
name: &str,
metadata: serde_json::Value,
) -> Result<()> {
sqlx::query(
"INSERT INTO entries (id, user_id, folder, type, name, notes, tags, metadata, version) \
VALUES ($1, $2, $3, $4, $5, '', ARRAY[]::text[], $6, 1)",
)
.bind(id)
.bind(user_id)
.bind(folder)
.bind(entry_type)
.bind(name)
.bind(metadata)
.execute(pool)
.await?;
Ok(())
}
#[tokio::test]
async fn delete_shared_key_dry_run_reports_migration_without_writes() -> Result<()> {
let Some(pool) = maybe_test_pool().await else {
return Ok(());
};
let user_id = Uuid::from_u128(rand::random());
let key_id = Uuid::from_u128(rand::random());
let ref_a = Uuid::from_u128(rand::random());
let ref_b = Uuid::from_u128(rand::random());
insert_entry(
&pool,
key_id,
user_id,
"kfolder",
"key",
"shared-key",
json!({}),
)
.await?;
sqlx::query("INSERT INTO secrets (entry_id, field_name, encrypted) VALUES ($1, $2, $3)")
.bind(key_id)
.bind("pem")
.bind(vec![1_u8, 2, 3])
.execute(&pool)
.await?;
insert_entry(
&pool,
ref_a,
user_id,
"afolder",
"server",
"srv-a",
json!({"key_ref":"kfolder/shared-key"}),
)
.await?;
insert_entry(
&pool,
ref_b,
user_id,
"bfolder",
"server",
"srv-b",
json!({"key_ref":"shared-key"}),
)
.await?;
let result = run(
&pool,
DeleteParams {
name: Some("shared-key"),
folder: Some("kfolder"),
entry_type: None,
dry_run: true,
user_id: Some(user_id),
},
)
.await?;
assert!(result.dry_run);
assert_eq!(result.deleted.len(), 1);
assert_eq!(result.migrated.len(), 2);
let key_exists: bool = sqlx::query_scalar(
"SELECT EXISTS(SELECT 1 FROM entries WHERE id = $1 AND user_id = $2)",
)
.bind(key_id)
.bind(user_id)
.fetch_one(&pool)
.await?;
assert!(key_exists);
let ref_a_key_ref: Option<String> =
sqlx::query_scalar("SELECT metadata->>'key_ref' FROM entries WHERE id = $1")
.bind(ref_a)
.fetch_one(&pool)
.await?;
let ref_b_key_ref: Option<String> =
sqlx::query_scalar("SELECT metadata->>'key_ref' FROM entries WHERE id = $1")
.bind(ref_b)
.fetch_one(&pool)
.await?;
assert_eq!(ref_a_key_ref.as_deref(), Some("kfolder/shared-key"));
assert_eq!(ref_b_key_ref.as_deref(), Some("shared-key"));
sqlx::query("DELETE FROM entries WHERE user_id = $1")
.bind(user_id)
.execute(&pool)
.await?;
Ok(())
}
#[tokio::test]
async fn delete_shared_key_auto_migrates_single_copy_and_redirects_refs() -> Result<()> {
let Some(pool) = maybe_test_pool().await else {
return Ok(());
};
let user_id = Uuid::from_u128(rand::random());
let key_id = Uuid::from_u128(rand::random());
let ref_a = Uuid::from_u128(rand::random());
let ref_b = Uuid::from_u128(rand::random());
let ref_c = Uuid::from_u128(rand::random());
insert_entry(
&pool,
key_id,
user_id,
"kfolder",
"key",
"shared-key",
json!({}),
)
.await?;
sqlx::query("INSERT INTO secrets (entry_id, field_name, encrypted) VALUES ($1, $2, $3)")
.bind(key_id)
.bind("pem")
.bind(vec![7_u8, 8, 9])
.execute(&pool)
.await?;
// owner candidate (sorted first by folder)
insert_entry(
&pool,
ref_a,
user_id,
"afolder",
"server",
"srv-a",
json!({"key_ref":"kfolder/shared-key"}),
)
.await?;
insert_entry(
&pool,
ref_b,
user_id,
"bfolder",
"server",
"srv-b",
json!({"key_ref":"shared-key"}),
)
.await?;
insert_entry(
&pool,
ref_c,
user_id,
"cfolder",
"service",
"svc-c",
json!({"key_ref":"kfolder/shared-key"}),
)
.await?;
let result = run(
&pool,
DeleteParams {
name: Some("shared-key"),
folder: Some("kfolder"),
entry_type: None,
dry_run: false,
user_id: Some(user_id),
},
)
.await?;
assert!(!result.dry_run);
assert_eq!(result.deleted.len(), 1);
assert_eq!(result.migrated.len(), 3);
let key_exists: bool = sqlx::query_scalar(
"SELECT EXISTS(SELECT 1 FROM entries WHERE id = $1 AND user_id = $2)",
)
.bind(key_id)
.bind(user_id)
.fetch_one(&pool)
.await?;
assert!(!key_exists);
let owner_key_ref: Option<String> =
sqlx::query_scalar("SELECT metadata->>'key_ref' FROM entries WHERE id = $1")
.bind(ref_a)
.fetch_one(&pool)
.await?;
let ref_b_key_ref: Option<String> =
sqlx::query_scalar("SELECT metadata->>'key_ref' FROM entries WHERE id = $1")
.bind(ref_b)
.fetch_one(&pool)
.await?;
let ref_c_key_ref: Option<String> =
sqlx::query_scalar("SELECT metadata->>'key_ref' FROM entries WHERE id = $1")
.bind(ref_c)
.fetch_one(&pool)
.await?;
assert_eq!(owner_key_ref, None);
assert_eq!(ref_b_key_ref.as_deref(), Some("afolder/srv-a"));
assert_eq!(ref_c_key_ref.as_deref(), Some("afolder/srv-a"));
let owner_has_copied: bool = sqlx::query_scalar(
"SELECT EXISTS(SELECT 1 FROM secrets WHERE entry_id = $1 AND field_name = 'pem')",
)
.bind(ref_a)
.fetch_one(&pool)
.await?;
assert!(owner_has_copied);
sqlx::query("DELETE FROM entries WHERE user_id = $1")
.bind(user_id)
.execute(&pool)
.await?;
Ok(())
}
}