chore(release): secrets-mcp 0.4.0
All checks were successful
Secrets MCP — Build & Release / 检查 / 构建 / 发版 (push) Successful in 4m19s
Secrets MCP — Build & Release / 部署 secrets-mcp (push) Successful in 6s

Bump version for the N:N entry_secrets data model and related MCP/Web
changes. Remove superseded SQL migration artifacts; rely on auto-migrate.
Add structured errors, taxonomy normalization, and web i18n helpers.

Made-with: Cursor
This commit is contained in:
voson
2026-04-04 17:58:12 +08:00
parent b99d821644
commit 1518388374
29 changed files with 2285 additions and 1260 deletions

View File

@@ -7,7 +7,9 @@ use uuid::Uuid;
use crate::crypto;
use crate::db;
use crate::error::{AppError, DbErrorContext};
use crate::models::EntryRow;
use crate::taxonomy;
// ── Key/value parsing helpers ─────────────────────────────────────────────────
@@ -177,13 +179,19 @@ pub struct AddParams<'a> {
pub tags: &'a [String],
pub meta_entries: &'a [String],
pub secret_entries: &'a [String],
pub secret_types: &'a std::collections::HashMap<String, String>,
pub link_secret_names: &'a [String],
/// Optional user_id for multi-user isolation (None = single-user CLI mode)
pub user_id: Option<Uuid>,
}
pub async fn run(pool: &PgPool, params: AddParams<'_>, master_key: &[u8; 32]) -> Result<AddResult> {
let metadata = build_json(params.meta_entries)?;
let Value::Object(mut metadata_map) = build_json(params.meta_entries)? else {
unreachable!("build_json always returns a JSON object");
};
let normalized_entry_type =
taxonomy::normalize_entry_type_and_metadata(params.entry_type, &mut metadata_map);
let metadata = Value::Object(metadata_map);
let secret_json = build_json(params.secret_entries)?;
let meta_keys = collect_key_paths(params.meta_entries)?;
let secret_keys = collect_key_paths(params.secret_entries)?;
@@ -224,7 +232,7 @@ pub async fn run(pool: &PgPool, params: AddParams<'_>, master_key: &[u8; 32]) ->
entry_id: ex.id,
user_id: params.user_id,
folder: params.folder,
entry_type: params.entry_type,
entry_type: &normalized_entry_type,
name: params.name,
version: ex.version,
action: "add",
@@ -254,7 +262,7 @@ pub async fn run(pool: &PgPool, params: AddParams<'_>, master_key: &[u8; 32]) ->
)
.bind(uid)
.bind(params.folder)
.bind(params.entry_type)
.bind(&normalized_entry_type)
.bind(params.name)
.bind(params.notes)
.bind(params.tags)
@@ -277,7 +285,7 @@ pub async fn run(pool: &PgPool, params: AddParams<'_>, master_key: &[u8; 32]) ->
RETURNING id"#,
)
.bind(params.folder)
.bind(params.entry_type)
.bind(&normalized_entry_type)
.bind(params.name)
.bind(params.notes)
.bind(params.tags)
@@ -299,7 +307,7 @@ pub async fn run(pool: &PgPool, params: AddParams<'_>, master_key: &[u8; 32]) ->
entry_id,
user_id: params.user_id,
folder: params.folder,
entry_type: params.entry_type,
entry_type: &normalized_entry_type,
name: params.name,
version: current_entry_version,
action: "create",
@@ -345,30 +353,42 @@ pub async fn run(pool: &PgPool, params: AddParams<'_>, master_key: &[u8; 32]) ->
}
}
let orphan_candidates: Vec<Uuid> = existing_fields.iter().map(|f| f.id).collect();
sqlx::query("DELETE FROM entry_secrets WHERE entry_id = $1")
.bind(entry_id)
.execute(&mut *tx)
.await?;
sqlx::query(
"DELETE FROM secrets s \
WHERE NOT EXISTS (SELECT 1 FROM entry_secrets es WHERE es.secret_id = s.id)",
)
.execute(&mut *tx)
.await?;
if !orphan_candidates.is_empty() {
sqlx::query(
"DELETE FROM secrets s \
WHERE s.id = ANY($1) \
AND NOT EXISTS (SELECT 1 FROM entry_secrets es WHERE es.secret_id = s.id)",
)
.bind(&orphan_candidates)
.execute(&mut *tx)
.await?;
}
}
for (field_name, field_value) in &flat_fields {
let encrypted = crypto::encrypt_json(master_key, field_value)?;
let secret_type = params
.secret_types
.get(field_name)
.map(|s| s.as_str())
.unwrap_or("text");
let secret_id: Uuid = sqlx::query_scalar(
"INSERT INTO secrets (user_id, name, type, encrypted) VALUES ($1, $2, $3, $4) RETURNING id",
)
.bind(params.user_id)
.bind(field_name)
.bind(infer_secret_type(field_name))
.bind(secret_type)
.bind(&encrypted)
.fetch_one(&mut *tx)
.await?;
.await
.map_err(|e| AppError::from_db_error(e, DbErrorContext::secret_name(field_name)))?;
sqlx::query("INSERT INTO entry_secrets (entry_id, secret_id) VALUES ($1, $2)")
.bind(entry_id)
.bind(secret_id)
@@ -414,7 +434,7 @@ pub async fn run(pool: &PgPool, params: AddParams<'_>, master_key: &[u8; 32]) ->
params.user_id,
"add",
params.folder,
params.entry_type,
&normalized_entry_type,
params.name,
serde_json::json!({
"tags": params.tags,
@@ -429,32 +449,13 @@ pub async fn run(pool: &PgPool, params: AddParams<'_>, master_key: &[u8; 32]) ->
Ok(AddResult {
name: params.name.to_string(),
folder: params.folder.to_string(),
entry_type: params.entry_type.to_string(),
entry_type: normalized_entry_type,
tags: params.tags.to_vec(),
meta_keys,
secret_keys,
})
}
pub(crate) fn infer_secret_type(name: &str) -> &'static str {
match name {
"ssh_key" => "pem",
"password" => "password",
"phone" | "phone_2" => "phone",
"webhook_url" | "address" => "url",
"access_key_id"
| "access_key_secret"
| "global_api_key"
| "api_key"
| "secret_key"
| "personal_access_token"
| "runner_token"
| "GOOGLE_CLIENT_ID"
| "GOOGLE_CLIENT_SECRET" => "token",
_ => "text",
}
}
fn validate_link_secret_names(
link_secret_names: &[String],
new_secret_names: &BTreeSet<String>,
@@ -601,6 +602,7 @@ mod tests {
tags: &[],
meta_entries: &[],
secret_entries: &[],
secret_types: &Default::default(),
link_secret_names: std::slice::from_ref(&secret_name),
user_id: None,
},
@@ -647,6 +649,7 @@ mod tests {
tags: &[],
meta_entries: &[],
secret_entries: &[],
secret_types: &Default::default(),
link_secret_names: std::slice::from_ref(&secret_name),
user_id: None,
},
@@ -697,6 +700,7 @@ mod tests {
tags: &[],
meta_entries: &[],
secret_entries: &[],
secret_types: &Default::default(),
link_secret_names: std::slice::from_ref(&secret_name),
user_id: None,
},
@@ -709,4 +713,69 @@ mod tests {
cleanup_test_rows(&pool, &marker).await?;
Ok(())
}
#[tokio::test]
async fn add_duplicate_secret_name_returns_conflict_error() -> Result<()> {
let Some(pool) = maybe_test_pool().await else {
return Ok(());
};
let suffix = Uuid::from_u128(rand::random()).to_string();
let marker = format!("dup_secret_{}", &suffix[..8]);
let entry_name = format!("{}_entry", marker);
let secret_name = "shared_token";
cleanup_test_rows(&pool, &marker).await?;
// First add succeeds
run(
&pool,
AddParams {
name: &entry_name,
folder: &marker,
entry_type: "service",
notes: "",
tags: &[],
meta_entries: &[],
secret_entries: &[format!("{}=value1", secret_name)],
secret_types: &Default::default(),
link_secret_names: &[],
user_id: None,
},
&[0_u8; 32],
)
.await?;
// Second add with same secret name under same user_id should fail with ConflictSecretName
let entry_name2 = format!("{}_entry2", marker);
let err = run(
&pool,
AddParams {
name: &entry_name2,
folder: &marker,
entry_type: "service",
notes: "",
tags: &[],
meta_entries: &[],
secret_entries: &[format!("{}=value2", secret_name)],
secret_types: &Default::default(),
link_secret_names: &[],
user_id: None,
},
&[0_u8; 32],
)
.await
.expect_err("must fail on duplicate secret name");
let app_err = err
.downcast_ref::<crate::error::AppError>()
.expect("error should be AppError");
assert!(
matches!(app_err, crate::error::AppError::ConflictSecretName { .. }),
"expected ConflictSecretName, got: {}",
app_err
);
cleanup_test_rows(&pool, &marker).await?;
Ok(())
}
}

View File

@@ -17,7 +17,6 @@ pub struct DeletedEntry {
#[derive(Debug, serde::Serialize)]
pub struct DeleteResult {
pub deleted: Vec<DeletedEntry>,
pub migrated: Vec<String>,
pub dry_run: bool,
}
@@ -32,174 +31,6 @@ pub struct DeleteParams<'a> {
pub user_id: Option<Uuid>,
}
#[derive(Debug, sqlx::FromRow)]
struct KeyReferrer {
id: Uuid,
folder: String,
#[sqlx(rename = "type")]
entry_type: String,
name: String,
}
fn ref_label(r: &KeyReferrer) -> String {
format!("{}/{} ({})", r.folder, r.name, r.entry_type)
}
fn ref_path(r: &KeyReferrer) -> String {
format!("{}/{}", r.folder, r.name)
}
async fn fetch_key_referrers_pool(
pool: &PgPool,
key_entry_id: Uuid,
key_folder: &str,
key_name: &str,
user_id: Option<Uuid>,
) -> Result<Vec<KeyReferrer>> {
let qualified = format!("{}/{}", key_folder, key_name);
let refs: Vec<KeyReferrer> = if let Some(uid) = user_id {
sqlx::query_as(
"SELECT id, folder, type, name FROM entries \
WHERE user_id = $1 AND id <> $2 \
AND (metadata->>'key_ref' = $3 OR metadata->>'key_ref' = $4) \
ORDER BY folder, type, name",
)
.bind(uid)
.bind(key_entry_id)
.bind(key_name)
.bind(&qualified)
.fetch_all(pool)
.await?
} else {
sqlx::query_as(
"SELECT id, folder, type, name FROM entries \
WHERE user_id IS NULL AND id <> $1 \
AND (metadata->>'key_ref' = $2 OR metadata->>'key_ref' = $3) \
ORDER BY folder, type, name",
)
.bind(key_entry_id)
.bind(key_name)
.bind(&qualified)
.fetch_all(pool)
.await?
};
Ok(refs)
}
async fn migrate_key_refs_if_needed(
tx: &mut sqlx::Transaction<'_, sqlx::Postgres>,
key_row: &EntryRow,
key_name: &str,
user_id: Option<Uuid>,
dry_run: bool,
) -> Result<Vec<String>> {
let qualified = format!("{}/{}", key_row.folder, key_name);
let refs: Vec<KeyReferrer> = if let Some(uid) = user_id {
sqlx::query_as(
"SELECT id, folder, type, name FROM entries \
WHERE user_id = $1 AND id <> $2 \
AND (metadata->>'key_ref' = $3 OR metadata->>'key_ref' = $4) \
ORDER BY folder, type, name",
)
.bind(uid)
.bind(key_row.id)
.bind(key_name)
.bind(&qualified)
.fetch_all(&mut **tx)
.await?
} else {
sqlx::query_as(
"SELECT id, folder, type, name FROM entries \
WHERE user_id IS NULL AND id <> $1 \
AND (metadata->>'key_ref' = $2 OR metadata->>'key_ref' = $3) \
ORDER BY folder, type, name",
)
.bind(key_row.id)
.bind(key_name)
.bind(&qualified)
.fetch_all(&mut **tx)
.await?
};
if refs.is_empty() {
return Ok(vec![]);
}
if dry_run {
return Ok(refs.iter().map(ref_label).collect());
}
let owner = &refs[0];
let owner_path = ref_path(owner);
let key_fields: Vec<SecretFieldRow> = sqlx::query_as(
"SELECT s.id, s.name, s.encrypted \
FROM entry_secrets es \
JOIN secrets s ON s.id = es.secret_id \
WHERE es.entry_id = $1",
)
.bind(key_row.id)
.fetch_all(&mut **tx)
.await?;
for f in &key_fields {
sqlx::query("INSERT INTO entry_secrets (entry_id, secret_id) VALUES ($1, $2) ON CONFLICT DO NOTHING")
.bind(owner.id)
.bind(f.id)
.execute(&mut **tx)
.await?;
}
sqlx::query(
"UPDATE entries SET metadata = metadata - 'key_ref', \
version = version + 1, updated_at = NOW() WHERE id = $1",
)
.bind(owner.id)
.execute(&mut **tx)
.await?;
crate::audit::log_tx(
tx,
user_id,
"key_migrate",
&owner.folder,
&owner.entry_type,
&owner.name,
json!({
"from_key": format!("{}/{}", key_row.folder, key_name),
"role": "new_owner",
"redirect_target": owner_path,
}),
)
.await;
for r in refs.iter().skip(1) {
sqlx::query(
"UPDATE entries SET metadata = jsonb_set(metadata, '{key_ref}', to_jsonb($2::text), true), \
version = version + 1, updated_at = NOW() WHERE id = $1",
)
.bind(r.id)
.bind(&owner_path)
.execute(&mut **tx)
.await?;
crate::audit::log_tx(
tx,
user_id,
"key_migrate",
&r.folder,
&r.entry_type,
&r.name,
json!({
"from_key": format!("{}/{}", key_row.folder, key_name),
"role": "redirected_ref",
"redirect_to": owner_path,
}),
)
.await;
}
Ok(refs.iter().map(ref_label).collect())
}
/// Delete a single entry by id (multi-tenant: `user_id` must match).
pub async fn delete_by_id(pool: &PgPool, entry_id: Uuid, user_id: Uuid) -> Result<DeleteResult> {
let mut tx = pool.begin().await?;
@@ -224,8 +55,6 @@ pub async fn delete_by_id(pool: &PgPool, entry_id: Uuid, user_id: Uuid) -> Resul
let entry_type = row.entry_type.clone();
let name = row.name.clone();
let entry_row: EntryRow = (&row).into();
let migrated =
migrate_key_refs_if_needed(&mut tx, &entry_row, &name, Some(user_id), false).await?;
snapshot_and_delete(
&mut tx,
@@ -254,7 +83,6 @@ pub async fn delete_by_id(pool: &PgPool, entry_id: Uuid, user_id: Uuid) -> Resul
folder,
entry_type,
}],
migrated,
dry_run: false,
})
}
@@ -294,6 +122,7 @@ async fn delete_one(
// - 2+ matches → disambiguation error (same as non-dry-run)
#[derive(sqlx::FromRow)]
struct DryRunRow {
#[allow(dead_code)]
id: Uuid,
folder: String,
#[sqlx(rename = "type")]
@@ -339,20 +168,16 @@ async fn delete_one(
return match rows.len() {
0 => Ok(DeleteResult {
deleted: vec![],
migrated: vec![],
dry_run: true,
}),
1 => {
let row = rows.into_iter().next().unwrap();
let refs =
fetch_key_referrers_pool(pool, row.id, &row.folder, name, user_id).await?;
Ok(DeleteResult {
deleted: vec![DeletedEntry {
name: name.to_string(),
folder: row.folder,
entry_type: row.entry_type,
}],
migrated: refs.iter().map(ref_label).collect(),
dry_run: true,
})
}
@@ -417,7 +242,6 @@ async fn delete_one(
tx.rollback().await?;
return Ok(DeleteResult {
deleted: vec![],
migrated: vec![],
dry_run: false,
});
}
@@ -437,7 +261,6 @@ async fn delete_one(
let folder = row.folder.clone();
let entry_type = row.entry_type.clone();
let migrated = migrate_key_refs_if_needed(&mut tx, &row, name, user_id, false).await?;
snapshot_and_delete(&mut tx, &folder, &entry_type, name, &row, user_id).await?;
crate::audit::log_tx(
&mut tx,
@@ -457,7 +280,6 @@ async fn delete_one(
folder,
entry_type,
}],
migrated,
dry_run: false,
})
}
@@ -497,33 +319,29 @@ async fn delete_bulk(
}
if entry_type.is_some() {
conditions.push(format!("type = ${}", idx));
idx += 1;
}
let where_clause = format!("WHERE {}", conditions.join(" AND "));
let sql = format!(
"SELECT id, version, folder, type, name, metadata, tags, notes \
FROM entries {where_clause} ORDER BY type, name"
);
let mut q = sqlx::query_as::<_, FullEntryRow>(&sql);
if let Some(uid) = user_id {
q = q.bind(uid);
}
if let Some(f) = folder {
q = q.bind(f);
}
if let Some(t) = entry_type {
q = q.bind(t);
}
let rows = q.fetch_all(pool).await?;
let _ = idx; // used only for placeholder numbering in conditions
if dry_run {
let mut migrated: Vec<String> = Vec::new();
for row in &rows {
let refs =
fetch_key_referrers_pool(pool, row.id, &row.folder, &row.name, user_id).await?;
migrated.extend(refs.iter().map(ref_label));
let sql = format!(
"SELECT id, version, folder, type, name, metadata, tags, notes \
FROM entries {where_clause} ORDER BY type, name"
);
let mut q = sqlx::query_as::<_, FullEntryRow>(&sql);
if let Some(uid) = user_id {
q = q.bind(uid);
}
if let Some(f) = folder {
q = q.bind(f);
}
if let Some(t) = entry_type {
q = q.bind(t);
}
let rows = q.fetch_all(pool).await?;
let deleted = rows
.iter()
.map(|r| DeletedEntry {
@@ -534,15 +352,31 @@ async fn delete_bulk(
.collect();
return Ok(DeleteResult {
deleted,
migrated,
dry_run: true,
});
}
let mut tx = pool.begin().await?;
let sql = format!(
"SELECT id, version, folder, type, name, metadata, tags, notes \
FROM entries {where_clause} ORDER BY type, name FOR UPDATE"
);
let mut q = sqlx::query_as::<_, FullEntryRow>(&sql);
if let Some(uid) = user_id {
q = q.bind(uid);
}
if let Some(f) = folder {
q = q.bind(f);
}
if let Some(t) = entry_type {
q = q.bind(t);
}
let rows = q.fetch_all(&mut *tx).await?;
let mut deleted = Vec::with_capacity(rows.len());
let mut migrated: Vec<String> = Vec::new();
for row in &rows {
let entry_row = EntryRow {
let entry_row: EntryRow = EntryRow {
id: row.id,
version: row.version,
folder: row.folder.clone(),
@@ -551,9 +385,6 @@ async fn delete_bulk(
metadata: row.metadata.clone(),
notes: row.notes.clone(),
};
let mut tx = pool.begin().await?;
let m = migrate_key_refs_if_needed(&mut tx, &entry_row, &row.name, user_id, false).await?;
migrated.extend(m);
snapshot_and_delete(
&mut tx,
&row.folder,
@@ -573,7 +404,6 @@ async fn delete_bulk(
json!({"bulk": true}),
)
.await;
tx.commit().await?;
deleted.push(DeletedEntry {
name: row.name.clone(),
folder: row.folder.clone(),
@@ -581,9 +411,10 @@ async fn delete_bulk(
});
}
tx.commit().await?;
Ok(DeleteResult {
deleted,
migrated,
dry_run: false,
})
}
@@ -646,12 +477,17 @@ async fn snapshot_and_delete(
.execute(&mut **tx)
.await?;
sqlx::query(
"DELETE FROM secrets s \
WHERE NOT EXISTS (SELECT 1 FROM entry_secrets es WHERE es.secret_id = s.id)",
)
.execute(&mut **tx)
.await?;
let secret_ids: Vec<Uuid> = fields.iter().map(|f| f.id).collect();
if !secret_ids.is_empty() {
sqlx::query(
"DELETE FROM secrets s \
WHERE s.id = ANY($1) \
AND NOT EXISTS (SELECT 1 FROM entry_secrets es WHERE es.secret_id = s.id)",
)
.bind(&secret_ids)
.execute(&mut **tx)
.await?;
}
Ok(())
}
@@ -659,280 +495,153 @@ async fn snapshot_and_delete(
#[cfg(test)]
mod tests {
use super::*;
use serde_json::json;
use sqlx::PgPool;
async fn maybe_test_pool() -> Option<PgPool> {
let Ok(url) = std::env::var("SECRETS_DATABASE_URL") else {
eprintln!("skip delete migration tests: SECRETS_DATABASE_URL is not set");
eprintln!("skip delete tests: SECRETS_DATABASE_URL is not set");
return None;
};
let Ok(pool) = PgPool::connect(&url).await else {
eprintln!("skip delete migration tests: cannot connect to database");
eprintln!("skip delete tests: cannot connect to database");
return None;
};
if let Err(e) = crate::db::migrate(&pool).await {
eprintln!("skip delete migration tests: migrate failed: {e}");
eprintln!("skip delete tests: migrate failed: {e}");
return None;
}
Some(pool)
}
async fn insert_entry(
pool: &PgPool,
id: Uuid,
user_id: Uuid,
folder: &str,
entry_type: &str,
name: &str,
metadata: serde_json::Value,
) -> Result<()> {
async fn cleanup_single_user_rows(pool: &PgPool, marker: &str) -> Result<()> {
sqlx::query(
"INSERT INTO entries (id, user_id, folder, type, name, notes, tags, metadata, version) \
VALUES ($1, $2, $3, $4, $5, '', ARRAY[]::text[], $6, 1)",
"DELETE FROM entries WHERE user_id IS NULL AND (name LIKE $1 OR folder LIKE $1)",
)
.bind(id)
.bind(user_id)
.bind(folder)
.bind(entry_type)
.bind(name)
.bind(metadata)
.bind(format!("%{marker}%"))
.execute(pool)
.await?;
sqlx::query(
"DELETE FROM secrets WHERE user_id IS NULL AND name LIKE $1 \
AND NOT EXISTS (SELECT 1 FROM entry_secrets es WHERE es.secret_id = secrets.id)",
)
.bind(format!("%{marker}%"))
.execute(pool)
.await?;
Ok(())
}
async fn insert_secret_for_entry(
pool: &PgPool,
user_id: Uuid,
entry_id: Uuid,
name: &str,
secret_type: &str,
encrypted: Vec<u8>,
) -> Result<()> {
let secret_id: Uuid = sqlx::query_scalar(
"INSERT INTO secrets (user_id, name, type, encrypted) VALUES ($1, $2, $3, $4) RETURNING id",
)
.bind(user_id)
.bind(name)
.bind(secret_type)
.bind(encrypted)
.fetch_one(pool)
.await?;
sqlx::query("INSERT INTO entry_secrets (entry_id, secret_id) VALUES ($1, $2)")
.bind(entry_id)
.bind(secret_id)
.execute(pool)
.await?;
Ok(())
}
#[tokio::test]
async fn delete_shared_key_dry_run_reports_migration_without_writes() -> Result<()> {
async fn delete_dry_run_reports_matching_entry_without_writes() -> Result<()> {
let Some(pool) = maybe_test_pool().await else {
return Ok(());
};
let suffix = Uuid::from_u128(rand::random()).to_string();
let marker = format!("delete_dry_{}", &suffix[..8]);
let entry_name = format!("{}_entry", marker);
let user_id = Uuid::from_u128(rand::random());
let key_id = Uuid::from_u128(rand::random());
let ref_a = Uuid::from_u128(rand::random());
let ref_b = Uuid::from_u128(rand::random());
cleanup_single_user_rows(&pool, &marker).await?;
insert_entry(
&pool,
key_id,
user_id,
"kfolder",
"key",
"shared-key",
json!({}),
)
.await?;
insert_secret_for_entry(&pool, user_id, key_id, "pem", "pem", vec![1_u8, 2, 3]).await?;
insert_entry(
&pool,
ref_a,
user_id,
"afolder",
"server",
"srv-a",
json!({"key_ref":"kfolder/shared-key"}),
)
.await?;
insert_entry(
&pool,
ref_b,
user_id,
"bfolder",
"server",
"srv-b",
json!({"key_ref":"shared-key"}),
sqlx::query(
"INSERT INTO entries (user_id, folder, type, name, notes, tags, metadata) \
VALUES (NULL, $1, 'service', $2, '', '{}', '{}')",
)
.bind(&marker)
.bind(&entry_name)
.execute(&pool)
.await?;
let result = run(
&pool,
DeleteParams {
name: Some("shared-key"),
folder: Some("kfolder"),
name: Some(&entry_name),
folder: Some(&marker),
entry_type: None,
dry_run: true,
user_id: Some(user_id),
user_id: None,
},
)
.await?;
assert!(result.dry_run);
assert_eq!(result.deleted.len(), 1);
assert_eq!(result.migrated.len(), 2);
assert_eq!(result.deleted[0].name, entry_name);
let key_exists: bool = sqlx::query_scalar(
"SELECT EXISTS(SELECT 1 FROM entries WHERE id = $1 AND user_id = $2)",
let still_exists: bool = sqlx::query_scalar(
"SELECT EXISTS(SELECT 1 FROM entries WHERE user_id IS NULL AND folder = $1 AND name = $2)",
)
.bind(key_id)
.bind(user_id)
.bind(&marker)
.bind(&entry_name)
.fetch_one(&pool)
.await?;
assert!(key_exists);
assert!(still_exists);
let ref_a_key_ref: Option<String> =
sqlx::query_scalar("SELECT metadata->>'key_ref' FROM entries WHERE id = $1")
.bind(ref_a)
.fetch_one(&pool)
.await?;
let ref_b_key_ref: Option<String> =
sqlx::query_scalar("SELECT metadata->>'key_ref' FROM entries WHERE id = $1")
.bind(ref_b)
.fetch_one(&pool)
.await?;
assert_eq!(ref_a_key_ref.as_deref(), Some("kfolder/shared-key"));
assert_eq!(ref_b_key_ref.as_deref(), Some("shared-key"));
sqlx::query("DELETE FROM entries WHERE user_id = $1")
.bind(user_id)
.execute(&pool)
.await?;
cleanup_single_user_rows(&pool, &marker).await?;
Ok(())
}
#[tokio::test]
async fn delete_shared_key_auto_migrates_single_copy_and_redirects_refs() -> Result<()> {
async fn delete_by_id_removes_entry_and_orphan_secret() -> Result<()> {
let Some(pool) = maybe_test_pool().await else {
return Ok(());
};
let suffix = Uuid::from_u128(rand::random()).to_string();
let marker = format!("delete_id_{}", &suffix[..8]);
let user_id = Uuid::from_u128(rand::random());
let key_id = Uuid::from_u128(rand::random());
let ref_a = Uuid::from_u128(rand::random());
let ref_b = Uuid::from_u128(rand::random());
let ref_c = Uuid::from_u128(rand::random());
let entry_name = format!("{}_entry", marker);
let secret_name = format!("{}_secret", marker);
insert_entry(
&pool,
key_id,
user_id,
"kfolder",
"key",
"shared-key",
json!({}),
)
.await?;
insert_secret_for_entry(&pool, user_id, key_id, "pem", "pem", vec![7_u8, 8, 9]).await?;
// owner candidate (sorted first by folder)
insert_entry(
&pool,
ref_a,
user_id,
"afolder",
"server",
"srv-a",
json!({"key_ref":"kfolder/shared-key"}),
)
.await?;
insert_entry(
&pool,
ref_b,
user_id,
"bfolder",
"server",
"srv-b",
json!({"key_ref":"shared-key"}),
)
.await?;
insert_entry(
&pool,
ref_c,
user_id,
"cfolder",
"service",
"svc-c",
json!({"key_ref":"kfolder/shared-key"}),
)
.await?;
let result = run(
&pool,
DeleteParams {
name: Some("shared-key"),
folder: Some("kfolder"),
entry_type: None,
dry_run: false,
user_id: Some(user_id),
},
)
.await?;
assert!(!result.dry_run);
assert_eq!(result.deleted.len(), 1);
assert_eq!(result.migrated.len(), 3);
let key_exists: bool = sqlx::query_scalar(
"SELECT EXISTS(SELECT 1 FROM entries WHERE id = $1 AND user_id = $2)",
)
.bind(key_id)
.bind(user_id)
.fetch_one(&pool)
.await?;
assert!(!key_exists);
let owner_key_ref: Option<String> =
sqlx::query_scalar("SELECT metadata->>'key_ref' FROM entries WHERE id = $1")
.bind(ref_a)
.fetch_one(&pool)
.await?;
let ref_b_key_ref: Option<String> =
sqlx::query_scalar("SELECT metadata->>'key_ref' FROM entries WHERE id = $1")
.bind(ref_b)
.fetch_one(&pool)
.await?;
let ref_c_key_ref: Option<String> =
sqlx::query_scalar("SELECT metadata->>'key_ref' FROM entries WHERE id = $1")
.bind(ref_c)
.fetch_one(&pool)
.await?;
assert_eq!(owner_key_ref, None);
assert_eq!(ref_b_key_ref.as_deref(), Some("afolder/srv-a"));
assert_eq!(ref_c_key_ref.as_deref(), Some("afolder/srv-a"));
let owner_has_copied: bool = sqlx::query_scalar(
"SELECT EXISTS( \
SELECT 1 \
FROM entry_secrets es \
JOIN secrets s ON s.id = es.secret_id \
WHERE es.entry_id = $1 AND s.name = 'pem' \
)",
)
.bind(ref_a)
.fetch_one(&pool)
.await?;
assert!(owner_has_copied);
sqlx::query("DELETE FROM entries WHERE user_id = $1")
sqlx::query("DELETE FROM entries WHERE user_id = $1 AND folder = $2")
.bind(user_id)
.bind(&marker)
.execute(&pool)
.await?;
sqlx::query("DELETE FROM secrets WHERE user_id = $1 AND name = $2")
.bind(user_id)
.bind(&secret_name)
.execute(&pool)
.await?;
let entry_id: Uuid = sqlx::query_scalar(
"INSERT INTO entries (user_id, folder, type, name, notes, tags, metadata) \
VALUES ($1, $2, 'service', $3, '', '{}', '{}') RETURNING id",
)
.bind(user_id)
.bind(&marker)
.bind(&entry_name)
.fetch_one(&pool)
.await?;
let secret_id: Uuid = sqlx::query_scalar(
"INSERT INTO secrets (user_id, name, type, encrypted) VALUES ($1, $2, 'text', $3) RETURNING id",
)
.bind(user_id)
.bind(&secret_name)
.bind(vec![1_u8, 2, 3])
.fetch_one(&pool)
.await?;
sqlx::query("INSERT INTO entry_secrets (entry_id, secret_id) VALUES ($1, $2)")
.bind(entry_id)
.bind(secret_id)
.execute(&pool)
.await?;
let result = delete_by_id(&pool, entry_id, user_id).await?;
assert!(!result.dry_run);
assert_eq!(result.deleted.len(), 1);
assert_eq!(result.deleted[0].name, entry_name);
let entry_exists: bool =
sqlx::query_scalar("SELECT EXISTS(SELECT 1 FROM entries WHERE id = $1)")
.bind(entry_id)
.fetch_one(&pool)
.await?;
let secret_exists: bool =
sqlx::query_scalar("SELECT EXISTS(SELECT 1 FROM secrets WHERE id = $1)")
.bind(secret_id)
.fetch_one(&pool)
.await?;
assert!(!entry_exists);
assert!(!secret_exists);
Ok(())
}
}

View File

@@ -40,7 +40,7 @@ async fn build_entry_env_map(
only_fields: &[String],
prefix: &str,
master_key: &[u8; 32],
user_id: Option<Uuid>,
_user_id: Option<Uuid>,
) -> Result<HashMap<String, String>> {
let entry_ids = vec![entry.id];
let secrets_map = fetch_secrets_for_entries(pool, &entry_ids).await?;
@@ -68,44 +68,6 @@ async fn build_entry_env_map(
map.insert(key, json_to_env_string(&decrypted));
}
// Resolve key_ref. Supported formats: "name" or "folder/name".
if let Some(key_ref) = entry.metadata.get("key_ref").and_then(|v| v.as_str()) {
let (ref_folder, ref_name) = if let Some((f, n)) = key_ref.split_once('/') {
(Some(f), n)
} else {
(None, key_ref)
};
let key_entries =
fetch_entries(pool, ref_folder, None, Some(ref_name), &[], None, user_id).await?;
if key_entries.len() > 1 {
anyhow::bail!(
"key_ref '{}' matched {} entries; qualify with folder/name to resolve the ambiguity",
key_ref,
key_entries.len()
);
}
if let Some(key_entry) = key_entries.first() {
let key_ids = vec![key_entry.id];
let key_fields_map = fetch_secrets_for_entries(pool, &key_ids).await?;
let empty = vec![];
let key_fields = key_fields_map.get(&key_entry.id).unwrap_or(&empty);
let key_prefix = env_prefix(key_entry, prefix);
for f in key_fields {
let decrypted = crypto::decrypt_json(master_key, &f.encrypted)?;
let key_var = format!(
"{}_{}",
key_prefix,
f.name.to_uppercase().replace(['-', '.'], "_")
);
map.insert(key_var, json_to_env_string(&decrypted));
}
} else {
tracing::warn!(key_ref, ?user_id, "key_ref target not found");
}
}
Ok(map)
}

View File

@@ -85,6 +85,7 @@ pub async fn run(
tags: &entry.tags,
meta_entries: &meta_entries,
secret_entries: &secret_entries,
secret_types: &Default::default(),
link_secret_names: &[],
user_id: params.user_id,
},

View File

@@ -8,10 +8,23 @@ use crate::models::{Entry, SecretField};
pub const FETCH_ALL_LIMIT: u32 = 100_000;
/// Build an ILIKE pattern for fuzzy matching, escaping `%` and `_` literals.
pub fn ilike_pattern(value: &str) -> String {
format!(
"%{}%",
value
.replace('\\', "\\\\")
.replace('%', "\\%")
.replace('_', "\\_")
)
}
pub struct SearchParams<'a> {
pub folder: Option<&'a str>,
pub entry_type: Option<&'a str>,
pub name: Option<&'a str>,
/// Fuzzy match on `entries.name` only (ILIKE with escaped `%`/`_`).
pub name_query: Option<&'a str>,
pub tags: &'a [String],
pub query: Option<&'a str>,
pub sort: &'a str,
@@ -51,11 +64,15 @@ pub async fn count_entries(pool: &PgPool, a: &SearchParams<'_>) -> Result<i64> {
if let Some(v) = a.name {
q = q.bind(v);
}
if let Some(v) = a.name_query {
let pattern = ilike_pattern(v);
q = q.bind(pattern);
}
for tag in a.tags {
q = q.bind(tag);
}
if let Some(v) = a.query {
let pattern = format!("%{}%", v.replace('%', "\\%").replace('_', "\\_"));
let pattern = ilike_pattern(v);
q = q.bind(pattern);
}
let n = q.fetch_one(pool).await?;
@@ -86,6 +103,10 @@ fn entry_where_clause_and_next_idx(a: &SearchParams<'_>) -> (String, i32) {
conditions.push(format!("name = ${}", idx));
idx += 1;
}
if a.name_query.is_some() {
conditions.push(format!("name ILIKE ${} ESCAPE '\\'", idx));
idx += 1;
}
if !a.tags.is_empty() {
let placeholders: Vec<String> = a
.tags
@@ -135,6 +156,7 @@ pub async fn run(pool: &PgPool, params: SearchParams<'_>) -> Result<SearchResult
}
/// Fetch entries matching the given filters — returns all matching entries up to FETCH_ALL_LIMIT.
#[allow(clippy::too_many_arguments)]
pub async fn fetch_entries(
pool: &PgPool,
folder: Option<&str>,
@@ -148,6 +170,7 @@ pub async fn fetch_entries(
folder,
entry_type,
name,
name_query: None,
tags,
query,
sort: "name",
@@ -189,11 +212,15 @@ async fn fetch_entries_paged(pool: &PgPool, a: &SearchParams<'_>) -> Result<Vec<
if let Some(v) = a.name {
q = q.bind(v);
}
if let Some(v) = a.name_query {
let pattern = ilike_pattern(v);
q = q.bind(pattern);
}
for tag in a.tags {
q = q.bind(tag);
}
if let Some(v) = a.query {
let pattern = format!("%{}%", v.replace('%', "\\%").replace('_', "\\_"));
let pattern = ilike_pattern(v);
q = q.bind(pattern);
}
q = q.bind(a.limit as i64).bind(a.offset as i64);
@@ -384,3 +411,13 @@ impl EntrySecretRow {
}
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn ilike_pattern_escapes_backslash_percent_and_underscore() {
assert_eq!(ilike_pattern(r"hello\_100%"), r"%hello\\\_100\%%");
}
}

View File

@@ -5,11 +5,13 @@ use uuid::Uuid;
use crate::crypto;
use crate::db;
use crate::error::{AppError, DbErrorContext};
use crate::models::{EntryRow, EntryWriteRow};
use crate::service::add::{
collect_field_paths, collect_key_paths, flatten_json_fields, infer_secret_type, insert_path,
parse_key_path, parse_kv, remove_path,
collect_field_paths, collect_key_paths, flatten_json_fields, insert_path, parse_key_path,
parse_kv, remove_path,
};
use crate::taxonomy;
#[derive(Debug, serde::Serialize)]
pub struct UpdateResult {
@@ -35,6 +37,7 @@ pub struct UpdateParams<'a> {
pub meta_entries: &'a [String],
pub remove_meta: &'a [String],
pub secret_entries: &'a [String],
pub secret_types: &'a std::collections::HashMap<String, String>,
pub remove_secrets: &'a [String],
pub user_id: Option<Uuid>,
}
@@ -90,10 +93,7 @@ pub async fn run(
let row = match rows.len() {
0 => {
tx.rollback().await?;
anyhow::bail!(
"Not found: '{}'. Use `add` to create it first.",
params.name
)
return Err(AppError::NotFoundEntry.into());
}
1 => rows.into_iter().next().unwrap(),
_ => {
@@ -167,10 +167,7 @@ pub async fn run(
if result.rows_affected() == 0 {
tx.rollback().await?;
anyhow::bail!(
"Concurrent modification detected for '{}'. Please retry.",
params.name
);
return Err(AppError::ConcurrentModification.into());
}
for entry in params.secret_entries {
@@ -224,15 +221,21 @@ pub async fn run(
.execute(&mut *tx)
.await?;
} else {
let secret_type = params
.secret_types
.get(field_name)
.map(|s| s.as_str())
.unwrap_or("text");
let secret_id: Uuid = sqlx::query_scalar(
"INSERT INTO secrets (user_id, name, type, encrypted) VALUES ($1, $2, $3, $4) RETURNING id",
)
.bind(params.user_id)
.bind(field_name)
.bind(infer_secret_type(field_name))
.bind(field_name.to_string())
.bind(secret_type)
.bind(&encrypted)
.fetch_one(&mut *tx)
.await?;
.await
.map_err(|e| AppError::from_db_error(e, DbErrorContext::secret_name(field_name)))?;
sqlx::query("INSERT INTO entry_secrets (entry_id, secret_id) VALUES ($1, $2)")
.bind(row.id)
.bind(secret_id)
@@ -347,13 +350,13 @@ pub async fn update_fields_by_id(
user_id: Uuid,
params: UpdateEntryFieldsByIdParams<'_>,
) -> Result<()> {
if params.folder.len() > 128 {
if params.folder.chars().count() > 128 {
anyhow::bail!("folder must be at most 128 characters");
}
if params.entry_type.len() > 64 {
if params.entry_type.chars().count() > 64 {
anyhow::bail!("type must be at most 64 characters");
}
if params.name.len() > 256 {
if params.name.chars().count() > 256 {
anyhow::bail!("name must be at most 256 characters");
}
@@ -372,7 +375,7 @@ pub async fn update_fields_by_id(
Some(r) => r,
None => {
tx.rollback().await?;
anyhow::bail!("Entry not found");
return Err(AppError::NotFoundEntry.into());
}
};
@@ -395,17 +398,25 @@ pub async fn update_fields_by_id(
tracing::warn!(error = %e, "failed to snapshot entry history before web update");
}
let mut metadata_map = match params.metadata {
Value::Object(m) => m.clone(),
_ => Map::new(),
};
let normalized_type =
taxonomy::normalize_entry_type_and_metadata(params.entry_type, &mut metadata_map);
let normalized_metadata = Value::Object(metadata_map);
let res = sqlx::query(
"UPDATE entries SET folder = $1, type = $2, name = $3, notes = $4, tags = $5, metadata = $6, \
version = version + 1, updated_at = NOW() \
WHERE id = $7 AND version = $8",
)
.bind(params.folder)
.bind(params.entry_type)
.bind(&normalized_type)
.bind(params.name)
.bind(params.notes)
.bind(params.tags)
.bind(params.metadata)
.bind(&normalized_metadata)
.bind(row.id)
.bind(row.version)
.execute(&mut *tx)
@@ -414,16 +425,17 @@ pub async fn update_fields_by_id(
if let sqlx::Error::Database(ref d) = e
&& d.code().as_deref() == Some("23505")
{
return anyhow::anyhow!(
"An entry with this folder and name already exists for your account."
);
return AppError::ConflictEntryName {
folder: params.folder.to_string(),
name: params.name.to_string(),
};
}
e.into()
AppError::Internal(e.into())
})?;
if res.rows_affected() == 0 {
tx.rollback().await?;
anyhow::bail!("Concurrent modification detected. Please refresh and try again.");
return Err(AppError::ConcurrentModification.into());
}
crate::audit::log_tx(
@@ -431,7 +443,7 @@ pub async fn update_fields_by_id(
Some(user_id),
"update",
params.folder,
params.entry_type,
&normalized_type,
params.name,
serde_json::json!({
"source": "web",