From 089d0b4b58244068e3449e96d3519c32df23b181 Mon Sep 17 00:00:00 2001 From: agent Date: Thu, 9 Apr 2026 15:23:16 +0800 Subject: [PATCH] style(dashboard): move version footer out of card --- Cargo.lock | 2 +- crates/secrets-core/src/db.rs | 22 +- crates/secrets-core/src/models.rs | 4 + crates/secrets-core/src/service/add.rs | 8 +- crates/secrets-core/src/service/delete.rs | 202 +++++++- crates/secrets-core/src/service/env_map.rs | 2 +- crates/secrets-core/src/service/export.rs | 1 + crates/secrets-core/src/service/mod.rs | 1 + crates/secrets-core/src/service/relations.rs | 324 ++++++++++++ crates/secrets-core/src/service/rollback.rs | 160 ++---- crates/secrets-core/src/service/search.rs | 30 +- crates/secrets-core/src/service/update.rs | 5 +- crates/secrets-mcp/Cargo.toml | 2 +- crates/secrets-mcp/src/main.rs | 21 + crates/secrets-mcp/src/tools.rs | 130 +++-- crates/secrets-mcp/src/web/entries.rs | 264 +++++++++- crates/secrets-mcp/src/web/mod.rs | 7 + crates/secrets-mcp/templates/audit.html | 112 +++-- crates/secrets-mcp/templates/dashboard.html | 171 +++---- crates/secrets-mcp/templates/entries.html | 504 ++++++++++++------- crates/secrets-mcp/templates/i18n.js | 3 + crates/secrets-mcp/templates/trash.html | 272 ++++++++++ plans/metadata-search-and-entry-relations.md | 392 +++++++++++++++ 23 files changed, 2114 insertions(+), 525 deletions(-) create mode 100644 crates/secrets-core/src/service/relations.rs create mode 100644 crates/secrets-mcp/templates/trash.html create mode 100644 plans/metadata-search-and-entry-relations.md diff --git a/Cargo.lock b/Cargo.lock index d2a8846..5dc97a6 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2065,7 +2065,7 @@ dependencies = [ [[package]] name = "secrets-mcp" -version = "0.5.12" +version = "0.5.13" dependencies = [ "anyhow", "askama", diff --git a/crates/secrets-core/src/db.rs b/crates/secrets-core/src/db.rs index 4013fca..e111d02 100644 --- a/crates/secrets-core/src/db.rs +++ b/crates/secrets-core/src/db.rs @@ -80,10 +80,12 @@ pub async fn migrate(pool: &PgPool) -> Result<()> { metadata JSONB NOT NULL DEFAULT '{}', version BIGINT NOT NULL DEFAULT 1, created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(), - updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW() + updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW(), + deleted_at TIMESTAMPTZ ); -- Legacy unique constraint without user_id (single-user mode) + -- NOTE: These are rebuilt below with `deleted_at IS NULL` for soft-delete support. CREATE UNIQUE INDEX IF NOT EXISTS idx_entries_unique_legacy ON entries(folder, name) WHERE user_id IS NULL; @@ -127,6 +129,17 @@ pub async fn migrate(pool: &PgPool) -> Result<()> { ); CREATE INDEX IF NOT EXISTS idx_entry_secrets_secret_id ON entry_secrets(secret_id); + -- ── entry_relations: parent-child links between entries ────────────────── + CREATE TABLE IF NOT EXISTS entry_relations ( + parent_entry_id UUID NOT NULL REFERENCES entries(id) ON DELETE CASCADE, + child_entry_id UUID NOT NULL REFERENCES entries(id) ON DELETE CASCADE, + created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(), + PRIMARY KEY(parent_entry_id, child_entry_id), + CHECK (parent_entry_id <> child_entry_id) + ); + CREATE INDEX IF NOT EXISTS idx_entry_relations_parent ON entry_relations(parent_entry_id); + CREATE INDEX IF NOT EXISTS idx_entry_relations_child ON entry_relations(child_entry_id); + -- ── audit_log: append-only operation log ───────────────────────────────── CREATE TABLE IF NOT EXISTS audit_log ( id BIGINT GENERATED ALWAYS AS IDENTITY PRIMARY KEY, @@ -170,6 +183,7 @@ pub async fn migrate(pool: &PgPool) -> Result<()> { -- Backfill: add notes to entries if not present (fresh installs already have it) ALTER TABLE entries ADD COLUMN IF NOT EXISTS notes TEXT NOT NULL DEFAULT ''; + ALTER TABLE entries ADD COLUMN IF NOT EXISTS deleted_at TIMESTAMPTZ; -- ── secrets_history: field-level snapshot ──────────────────────────────── CREATE TABLE IF NOT EXISTS secrets_history ( @@ -404,11 +418,11 @@ async fn migrate_schema(pool: &PgPool) -> Result<()> { CREATE UNIQUE INDEX IF NOT EXISTS idx_entries_unique_legacy ON entries(folder, name) - WHERE user_id IS NULL; + WHERE user_id IS NULL AND deleted_at IS NULL; CREATE UNIQUE INDEX IF NOT EXISTS idx_entries_unique_user ON entries(user_id, folder, name) - WHERE user_id IS NOT NULL; + WHERE user_id IS NOT NULL AND deleted_at IS NULL; -- ── Replace old namespace/kind indexes ──────────────────────────────────── DROP INDEX IF EXISTS idx_entries_namespace; @@ -420,6 +434,8 @@ async fn migrate_schema(pool: &PgPool) -> Result<()> { ON entries(folder) WHERE folder <> ''; CREATE INDEX IF NOT EXISTS idx_entries_type ON entries(type) WHERE type <> ''; + CREATE INDEX IF NOT EXISTS idx_entries_deleted_at + ON entries(deleted_at) WHERE deleted_at IS NOT NULL; CREATE INDEX IF NOT EXISTS idx_audit_log_folder_type ON audit_log(folder, type); CREATE INDEX IF NOT EXISTS idx_entries_history_folder_type_name diff --git a/crates/secrets-core/src/models.rs b/crates/secrets-core/src/models.rs index 9313f62..8687d41 100644 --- a/crates/secrets-core/src/models.rs +++ b/crates/secrets-core/src/models.rs @@ -21,6 +21,7 @@ pub struct Entry { pub version: i64, pub created_at: DateTime, pub updated_at: DateTime, + pub deleted_at: Option>, } /// A single encrypted field belonging to an Entry. @@ -52,6 +53,7 @@ pub struct EntryRow { pub tags: Vec, pub metadata: Value, pub notes: String, + pub name: String, } /// Entry row including `name` (used for id-scoped web / service updates). @@ -66,6 +68,7 @@ pub struct EntryWriteRow { pub tags: Vec, pub metadata: Value, pub notes: String, + pub deleted_at: Option>, } impl From<&EntryWriteRow> for EntryRow { @@ -78,6 +81,7 @@ impl From<&EntryWriteRow> for EntryRow { tags: r.tags.clone(), metadata: r.metadata.clone(), notes: r.notes.clone(), + name: r.name.clone(), } } } diff --git a/crates/secrets-core/src/service/add.rs b/crates/secrets-core/src/service/add.rs index 0cdba6a..751c653 100644 --- a/crates/secrets-core/src/service/add.rs +++ b/crates/secrets-core/src/service/add.rs @@ -213,8 +213,8 @@ pub async fn run(pool: &PgPool, params: AddParams<'_>, master_key: &[u8; 32]) -> // Fetch existing entry by (user_id, folder, name) — the natural unique key let existing: Option = if let Some(uid) = params.user_id { sqlx::query_as( - "SELECT id, version, folder, type, tags, metadata, notes FROM entries \ - WHERE user_id = $1 AND folder = $2 AND name = $3", + "SELECT id, version, folder, type, tags, metadata, notes, name FROM entries \ + WHERE user_id = $1 AND folder = $2 AND name = $3 AND deleted_at IS NULL", ) .bind(uid) .bind(params.folder) @@ -223,8 +223,8 @@ pub async fn run(pool: &PgPool, params: AddParams<'_>, master_key: &[u8; 32]) -> .await? } else { sqlx::query_as( - "SELECT id, version, folder, type, tags, metadata, notes FROM entries \ - WHERE user_id IS NULL AND folder = $1 AND name = $2", + "SELECT id, version, folder, type, tags, metadata, notes, name FROM entries \ + WHERE user_id IS NULL AND folder = $1 AND name = $2 AND deleted_at IS NULL", ) .bind(params.folder) .bind(params.name) diff --git a/crates/secrets-core/src/service/delete.rs b/crates/secrets-core/src/service/delete.rs index b503e11..0e8cd4a 100644 --- a/crates/secrets-core/src/service/delete.rs +++ b/crates/secrets-core/src/service/delete.rs @@ -4,6 +4,7 @@ use sqlx::PgPool; use uuid::Uuid; use crate::db; +use crate::error::AppError; use crate::models::{EntryRow, EntryWriteRow, SecretFieldRow}; use crate::service::util::user_scope_condition; @@ -21,6 +22,17 @@ pub struct DeleteResult { pub dry_run: bool, } +#[derive(Debug, serde::Serialize, sqlx::FromRow)] +pub struct TrashEntry { + pub id: Uuid, + pub name: String, + pub folder: String, + #[serde(rename = "type")] + #[sqlx(rename = "type")] + pub entry_type: String, + pub deleted_at: chrono::DateTime, +} + pub struct DeleteParams<'a> { /// If set, delete a single entry by name. pub name: Option<&'a str>, @@ -36,12 +48,156 @@ pub struct DeleteParams<'a> { /// Prevents accidental mass deletion when filters are too broad. pub const MAX_BULK_DELETE: usize = 1000; +pub async fn list_deleted_entries( + pool: &PgPool, + user_id: Uuid, + limit: u32, + offset: u32, +) -> Result> { + sqlx::query_as( + "SELECT id, name, folder, type, deleted_at FROM entries \ + WHERE user_id = $1 AND deleted_at IS NOT NULL \ + ORDER BY deleted_at DESC, name ASC LIMIT $2 OFFSET $3", + ) + .bind(user_id) + .bind(limit as i64) + .bind(offset as i64) + .fetch_all(pool) + .await + .map_err(Into::into) +} + +pub async fn count_deleted_entries(pool: &PgPool, user_id: Uuid) -> Result { + sqlx::query_scalar::<_, i64>( + "SELECT COUNT(*)::bigint FROM entries WHERE user_id = $1 AND deleted_at IS NOT NULL", + ) + .bind(user_id) + .fetch_one(pool) + .await + .map_err(Into::into) +} + +pub async fn restore_deleted_by_id(pool: &PgPool, entry_id: Uuid, user_id: Uuid) -> Result<()> { + let mut tx = pool.begin().await?; + let row: Option = sqlx::query_as( + "SELECT id, version, folder, type, name, tags, metadata, notes, deleted_at FROM entries \ + WHERE id = $1 AND user_id = $2 AND deleted_at IS NOT NULL FOR UPDATE", + ) + .bind(entry_id) + .bind(user_id) + .fetch_optional(&mut *tx) + .await?; + + let row = match row { + Some(r) => r, + None => { + tx.rollback().await?; + return Err(AppError::NotFoundEntry.into()); + } + }; + + let conflict_exists: bool = sqlx::query_scalar( + "SELECT EXISTS(SELECT 1 FROM entries \ + WHERE user_id = $1 AND folder = $2 AND name = $3 AND deleted_at IS NULL AND id <> $4)", + ) + .bind(user_id) + .bind(&row.folder) + .bind(&row.name) + .bind(row.id) + .fetch_one(&mut *tx) + .await?; + if conflict_exists { + tx.rollback().await?; + return Err(AppError::ConflictEntryName { + folder: row.folder, + name: row.name, + } + .into()); + } + + sqlx::query("UPDATE entries SET deleted_at = NULL, updated_at = NOW() WHERE id = $1") + .bind(row.id) + .execute(&mut *tx) + .await?; + + crate::audit::log_tx( + &mut tx, + Some(user_id), + "restore", + &row.folder, + &row.entry_type, + &row.name, + json!({ "entry_id": row.id }), + ) + .await; + tx.commit().await?; + Ok(()) +} + +pub async fn purge_deleted_by_id(pool: &PgPool, entry_id: Uuid, user_id: Uuid) -> Result<()> { + let mut tx = pool.begin().await?; + let row: Option = sqlx::query_as( + "SELECT id, version, folder, type, name, tags, metadata, notes, deleted_at FROM entries \ + WHERE id = $1 AND user_id = $2 AND deleted_at IS NOT NULL FOR UPDATE", + ) + .bind(entry_id) + .bind(user_id) + .fetch_optional(&mut *tx) + .await?; + + let row = match row { + Some(r) => r, + None => { + tx.rollback().await?; + return Err(AppError::NotFoundEntry.into()); + } + }; + + purge_entry_record(&mut tx, row.id).await?; + crate::audit::log_tx( + &mut tx, + Some(user_id), + "purge", + &row.folder, + &row.entry_type, + &row.name, + json!({ "entry_id": row.id }), + ) + .await; + tx.commit().await?; + Ok(()) +} + +pub async fn purge_expired_deleted_entries(pool: &PgPool) -> Result { + #[derive(sqlx::FromRow)] + struct ExpiredRow { + id: Uuid, + } + + let mut tx = pool.begin().await?; + let rows: Vec = sqlx::query_as( + "SELECT id FROM entries \ + WHERE deleted_at IS NOT NULL \ + AND deleted_at < NOW() - INTERVAL '3 months' \ + FOR UPDATE", + ) + .fetch_all(&mut *tx) + .await?; + + for row in &rows { + purge_entry_record(&mut tx, row.id).await?; + } + + tx.commit().await?; + Ok(rows.len() as u64) +} + /// Delete a single entry by id (multi-tenant: `user_id` must match). pub async fn delete_by_id(pool: &PgPool, entry_id: Uuid, user_id: Uuid) -> Result { let mut tx = pool.begin().await?; let row: Option = sqlx::query_as( - "SELECT id, version, folder, type, name, tags, metadata, notes FROM entries \ - WHERE id = $1 AND user_id = $2 FOR UPDATE", + "SELECT id, version, folder, type, name, tags, metadata, notes, deleted_at FROM entries \ + WHERE id = $1 AND user_id = $2 AND deleted_at IS NULL FOR UPDATE", ) .bind(entry_id) .bind(user_id) @@ -61,7 +217,7 @@ pub async fn delete_by_id(pool: &PgPool, entry_id: Uuid, user_id: Uuid) -> Resul let name = row.name.clone(); let entry_row: EntryRow = (&row).into(); - snapshot_and_delete( + snapshot_and_soft_delete( &mut tx, &folder, &entry_type, @@ -141,7 +297,7 @@ async fn delete_one( } conditions.push(format!("name = ${}", idx)); let sql = format!( - "SELECT folder, type FROM entries WHERE {}", + "SELECT folder, type FROM entries WHERE {} AND deleted_at IS NULL", conditions.join(" AND ") ); let mut q = sqlx::query_as::<_, DryRunRow>(&sql); @@ -198,7 +354,8 @@ async fn delete_one( } conditions.push(format!("name = ${}", idx)); let sql = format!( - "SELECT id, version, folder, type, tags, metadata, notes FROM entries WHERE {} FOR UPDATE", + "SELECT id, version, folder, type, tags, metadata, notes, name FROM entries \ + WHERE {} AND deleted_at IS NULL FOR UPDATE", conditions.join(" AND ") ); let mut q = sqlx::query_as::<_, EntryRow>(&sql); @@ -238,7 +395,7 @@ async fn delete_one( let folder = row.folder.clone(); let entry_type = row.entry_type.clone(); - snapshot_and_delete(&mut tx, &folder, &entry_type, name, &row, user_id).await?; + snapshot_and_soft_delete(&mut tx, &folder, &entry_type, name, &row, user_id).await?; crate::audit::log_tx( &mut tx, user_id, @@ -305,7 +462,7 @@ async fn delete_bulk( if dry_run { let sql = format!( "SELECT id, version, folder, type, name, metadata, tags, notes \ - FROM entries {where_clause} ORDER BY type, name" + FROM entries {where_clause} AND deleted_at IS NULL ORDER BY type, name" ); let mut q = sqlx::query_as::<_, FullEntryRow>(&sql); if let Some(uid) = user_id { @@ -337,7 +494,7 @@ async fn delete_bulk( let sql = format!( "SELECT id, version, folder, type, name, metadata, tags, notes \ - FROM entries {where_clause} ORDER BY type, name FOR UPDATE" + FROM entries {where_clause} AND deleted_at IS NULL ORDER BY type, name FOR UPDATE" ); let mut q = sqlx::query_as::<_, FullEntryRow>(&sql); if let Some(uid) = user_id { @@ -371,8 +528,9 @@ async fn delete_bulk( tags: row.tags.clone(), metadata: row.metadata.clone(), notes: row.notes.clone(), + name: row.name.clone(), }; - snapshot_and_delete( + snapshot_and_soft_delete( &mut tx, &row.folder, &row.entry_type, @@ -406,7 +564,7 @@ async fn delete_bulk( }) } -async fn snapshot_and_delete( +async fn snapshot_and_soft_delete( tx: &mut sqlx::Transaction<'_, sqlx::Postgres>, folder: &str, entry_type: &str, @@ -468,11 +626,33 @@ async fn snapshot_and_delete( } } - sqlx::query("DELETE FROM entries WHERE id = $1") + sqlx::query("UPDATE entries SET deleted_at = NOW(), updated_at = NOW() WHERE id = $1") .bind(row.id) .execute(&mut **tx) .await?; + Ok(()) +} + +async fn purge_entry_record( + tx: &mut sqlx::Transaction<'_, sqlx::Postgres>, + entry_id: Uuid, +) -> Result<()> { + let fields: Vec = sqlx::query_as( + "SELECT s.id, s.name, s.encrypted \ + FROM entry_secrets es \ + JOIN secrets s ON s.id = es.secret_id \ + WHERE es.entry_id = $1", + ) + .bind(entry_id) + .fetch_all(&mut **tx) + .await?; + + sqlx::query("DELETE FROM entries WHERE id = $1") + .bind(entry_id) + .execute(&mut **tx) + .await?; + let secret_ids: Vec = fields.iter().map(|f| f.id).collect(); if !secret_ids.is_empty() { sqlx::query( diff --git a/crates/secrets-core/src/service/env_map.rs b/crates/secrets-core/src/service/env_map.rs index 9f7faa8..04f3030 100644 --- a/crates/secrets-core/src/service/env_map.rs +++ b/crates/secrets-core/src/service/env_map.rs @@ -20,7 +20,7 @@ pub async fn build_env_map( master_key: &[u8; 32], user_id: Option, ) -> Result> { - let entries = fetch_entries(pool, folder, entry_type, name, tags, None, user_id).await?; + let entries = fetch_entries(pool, folder, entry_type, name, tags, None, None, user_id).await?; if entries.is_empty() { return Ok(HashMap::new()); } diff --git a/crates/secrets-core/src/service/export.rs b/crates/secrets-core/src/service/export.rs index f1d4f72..ec6dd14 100644 --- a/crates/secrets-core/src/service/export.rs +++ b/crates/secrets-core/src/service/export.rs @@ -30,6 +30,7 @@ pub async fn export( params.name, params.tags, params.query, + None, params.user_id, ) .await?; diff --git a/crates/secrets-core/src/service/mod.rs b/crates/secrets-core/src/service/mod.rs index 432c6de..0d1b1b4 100644 --- a/crates/secrets-core/src/service/mod.rs +++ b/crates/secrets-core/src/service/mod.rs @@ -7,6 +7,7 @@ pub mod export; pub mod get_secret; pub mod history; pub mod import; +pub mod relations; pub mod rollback; pub mod search; pub mod update; diff --git a/crates/secrets-core/src/service/relations.rs b/crates/secrets-core/src/service/relations.rs new file mode 100644 index 0000000..9274cec --- /dev/null +++ b/crates/secrets-core/src/service/relations.rs @@ -0,0 +1,324 @@ +use std::collections::{BTreeSet, HashMap}; + +use anyhow::Result; +use sqlx::PgPool; +use uuid::Uuid; + +use crate::error::AppError; + +#[derive(Debug, Clone, serde::Serialize, sqlx::FromRow)] +pub struct RelationEntrySummary { + pub id: Uuid, + pub folder: String, + #[serde(rename = "type")] + #[sqlx(rename = "type")] + pub entry_type: String, + pub name: String, +} + +#[derive(Debug, Clone, Default, serde::Serialize)] +pub struct EntryRelations { + pub parents: Vec, + pub children: Vec, +} + +pub async fn add_parent_relation( + pool: &PgPool, + parent_entry_id: Uuid, + child_entry_id: Uuid, + user_id: Option, +) -> Result<()> { + if parent_entry_id == child_entry_id { + return Err(AppError::Validation { + message: "entry cannot reference itself".to_string(), + } + .into()); + } + + let mut tx = pool.begin().await?; + validate_live_entries(&mut tx, &[parent_entry_id, child_entry_id], user_id).await?; + + let cycle_exists: bool = sqlx::query_scalar( + "WITH RECURSIVE descendants AS ( \ + SELECT child_entry_id FROM entry_relations WHERE parent_entry_id = $1 \ + UNION \ + SELECT er.child_entry_id \ + FROM entry_relations er \ + JOIN descendants d ON d.child_entry_id = er.parent_entry_id \ + ) \ + SELECT EXISTS(SELECT 1 FROM descendants WHERE child_entry_id = $2)", + ) + .bind(child_entry_id) + .bind(parent_entry_id) + .fetch_one(&mut *tx) + .await?; + if cycle_exists { + tx.rollback().await?; + return Err(AppError::Validation { + message: "adding this relation would create a cycle".to_string(), + } + .into()); + } + + sqlx::query( + "INSERT INTO entry_relations (parent_entry_id, child_entry_id) \ + VALUES ($1, $2) ON CONFLICT DO NOTHING", + ) + .bind(parent_entry_id) + .bind(child_entry_id) + .execute(&mut *tx) + .await?; + tx.commit().await?; + Ok(()) +} + +pub async fn remove_parent_relation( + pool: &PgPool, + parent_entry_id: Uuid, + child_entry_id: Uuid, + user_id: Option, +) -> Result<()> { + let mut tx = pool.begin().await?; + validate_live_entries(&mut tx, &[parent_entry_id, child_entry_id], user_id).await?; + sqlx::query("DELETE FROM entry_relations WHERE parent_entry_id = $1 AND child_entry_id = $2") + .bind(parent_entry_id) + .bind(child_entry_id) + .execute(&mut *tx) + .await?; + tx.commit().await?; + Ok(()) +} + +pub async fn set_parent_relations( + pool: &PgPool, + child_entry_id: Uuid, + parent_entry_ids: &[Uuid], + user_id: Option, +) -> Result<()> { + let deduped: Vec = parent_entry_ids + .iter() + .copied() + .collect::>() + .into_iter() + .collect(); + if deduped.contains(&child_entry_id) { + return Err(AppError::Validation { + message: "entry cannot reference itself".to_string(), + } + .into()); + } + + let mut tx = pool.begin().await?; + let mut validate_ids = Vec::with_capacity(deduped.len() + 1); + validate_ids.push(child_entry_id); + validate_ids.extend(deduped.iter().copied()); + validate_live_entries(&mut tx, &validate_ids, user_id).await?; + + let current_parent_ids: Vec = + sqlx::query_scalar("SELECT parent_entry_id FROM entry_relations WHERE child_entry_id = $1") + .bind(child_entry_id) + .fetch_all(&mut *tx) + .await?; + let current: BTreeSet = current_parent_ids.into_iter().collect(); + let target: BTreeSet = deduped.iter().copied().collect(); + + for parent_id in current.difference(&target) { + sqlx::query( + "DELETE FROM entry_relations WHERE parent_entry_id = $1 AND child_entry_id = $2", + ) + .bind(*parent_id) + .bind(child_entry_id) + .execute(&mut *tx) + .await?; + } + + for parent_id in target.difference(¤t) { + let cycle_exists: bool = sqlx::query_scalar( + "WITH RECURSIVE descendants AS ( \ + SELECT child_entry_id FROM entry_relations WHERE parent_entry_id = $1 \ + UNION \ + SELECT er.child_entry_id \ + FROM entry_relations er \ + JOIN descendants d ON d.child_entry_id = er.parent_entry_id \ + ) \ + SELECT EXISTS(SELECT 1 FROM descendants WHERE child_entry_id = $2)", + ) + .bind(child_entry_id) + .bind(*parent_id) + .fetch_one(&mut *tx) + .await?; + if cycle_exists { + tx.rollback().await?; + return Err(AppError::Validation { + message: "adding this relation would create a cycle".to_string(), + } + .into()); + } + + sqlx::query( + "INSERT INTO entry_relations (parent_entry_id, child_entry_id) VALUES ($1, $2) \ + ON CONFLICT DO NOTHING", + ) + .bind(*parent_id) + .bind(child_entry_id) + .execute(&mut *tx) + .await?; + } + + tx.commit().await?; + Ok(()) +} + +pub async fn get_relations_for_entries( + pool: &PgPool, + entry_ids: &[Uuid], + user_id: Option, +) -> Result> { + if entry_ids.is_empty() { + return Ok(HashMap::new()); + } + + #[derive(sqlx::FromRow)] + struct ParentRow { + owner_entry_id: Uuid, + id: Uuid, + folder: String, + #[sqlx(rename = "type")] + entry_type: String, + name: String, + } + + #[derive(sqlx::FromRow)] + struct ChildRow { + owner_entry_id: Uuid, + id: Uuid, + folder: String, + #[sqlx(rename = "type")] + entry_type: String, + name: String, + } + + let (parents, children): (Vec, Vec) = if let Some(uid) = user_id { + let parents = sqlx::query_as( + "SELECT er.child_entry_id AS owner_entry_id, p.id, p.folder, p.type, p.name \ + FROM entry_relations er \ + JOIN entries p ON p.id = er.parent_entry_id \ + JOIN entries c ON c.id = er.child_entry_id \ + WHERE er.child_entry_id = ANY($1) \ + AND p.user_id = $2 AND c.user_id = $2 \ + AND p.deleted_at IS NULL AND c.deleted_at IS NULL \ + ORDER BY er.child_entry_id, p.name ASC", + ) + .bind(entry_ids) + .bind(uid) + .fetch_all(pool); + let children = sqlx::query_as( + "SELECT er.parent_entry_id AS owner_entry_id, c.id, c.folder, c.type, c.name \ + FROM entry_relations er \ + JOIN entries c ON c.id = er.child_entry_id \ + JOIN entries p ON p.id = er.parent_entry_id \ + WHERE er.parent_entry_id = ANY($1) \ + AND p.user_id = $2 AND c.user_id = $2 \ + AND p.deleted_at IS NULL AND c.deleted_at IS NULL \ + ORDER BY er.parent_entry_id, c.name ASC", + ) + .bind(entry_ids) + .bind(uid) + .fetch_all(pool); + (parents.await?, children.await?) + } else { + let parents = sqlx::query_as( + "SELECT er.child_entry_id AS owner_entry_id, p.id, p.folder, p.type, p.name \ + FROM entry_relations er \ + JOIN entries p ON p.id = er.parent_entry_id \ + JOIN entries c ON c.id = er.child_entry_id \ + WHERE er.child_entry_id = ANY($1) \ + AND p.user_id IS NULL AND c.user_id IS NULL \ + AND p.deleted_at IS NULL AND c.deleted_at IS NULL \ + ORDER BY er.child_entry_id, p.name ASC", + ) + .bind(entry_ids) + .fetch_all(pool); + let children = sqlx::query_as( + "SELECT er.parent_entry_id AS owner_entry_id, c.id, c.folder, c.type, c.name \ + FROM entry_relations er \ + JOIN entries c ON c.id = er.child_entry_id \ + JOIN entries p ON p.id = er.parent_entry_id \ + WHERE er.parent_entry_id = ANY($1) \ + AND p.user_id IS NULL AND c.user_id IS NULL \ + AND p.deleted_at IS NULL AND c.deleted_at IS NULL \ + ORDER BY er.parent_entry_id, c.name ASC", + ) + .bind(entry_ids) + .fetch_all(pool); + (parents.await?, children.await?) + }; + + let mut map: HashMap = entry_ids + .iter() + .copied() + .map(|id| (id, EntryRelations::default())) + .collect(); + + for row in parents { + map.entry(row.owner_entry_id) + .or_default() + .parents + .push(RelationEntrySummary { + id: row.id, + folder: row.folder, + entry_type: row.entry_type, + name: row.name, + }); + } + + for row in children { + map.entry(row.owner_entry_id) + .or_default() + .children + .push(RelationEntrySummary { + id: row.id, + folder: row.folder, + entry_type: row.entry_type, + name: row.name, + }); + } + + Ok(map) +} + +async fn validate_live_entries( + tx: &mut sqlx::Transaction<'_, sqlx::Postgres>, + entry_ids: &[Uuid], + user_id: Option, +) -> Result<()> { + let unique_ids: Vec = entry_ids + .iter() + .copied() + .collect::>() + .into_iter() + .collect(); + let live_count: i64 = if let Some(uid) = user_id { + sqlx::query_scalar( + "SELECT COUNT(*)::bigint FROM entries \ + WHERE id = ANY($1) AND user_id = $2 AND deleted_at IS NULL", + ) + .bind(&unique_ids) + .bind(uid) + .fetch_one(&mut **tx) + .await? + } else { + sqlx::query_scalar( + "SELECT COUNT(*)::bigint FROM entries \ + WHERE id = ANY($1) AND user_id IS NULL AND deleted_at IS NULL", + ) + .bind(&unique_ids) + .fetch_one(&mut **tx) + .await? + }; + + if live_count != unique_ids.len() as i64 { + return Err(AppError::NotFoundEntry.into()); + } + Ok(()) +} diff --git a/crates/secrets-core/src/service/rollback.rs b/crates/secrets-core/src/service/rollback.rs index 453abdd..084d5ea 100644 --- a/crates/secrets-core/src/service/rollback.rs +++ b/crates/secrets-core/src/service/rollback.rs @@ -6,6 +6,8 @@ use sqlx::PgPool; use uuid::Uuid; use crate::db; +use crate::error::AppError; +use crate::models::EntryWriteRow; #[derive(Debug, serde::Serialize)] pub struct RollbackResult { @@ -17,11 +19,9 @@ pub struct RollbackResult { } /// Roll back entry `name` to `to_version` (or the most recent snapshot if None). -/// `folder` is optional; if omitted and multiple entries share the name, an error is returned. pub async fn run( pool: &PgPool, - name: &str, - folder: Option<&str>, + entry_id: Uuid, to_version: Option, user_id: Option, ) -> Result { @@ -36,88 +36,26 @@ pub async fn run( metadata: Value, } - // Disambiguate: find the unique entry_id for (name, folder). - // Query entries_history by entry_id once we know it; first resolve via name + optional folder. - let entry_id: Option = if let Some(uid) = user_id { - if let Some(f) = folder { - sqlx::query_scalar( - "SELECT DISTINCT entry_id FROM entries_history \ - WHERE name = $1 AND folder = $2 AND user_id = $3 LIMIT 1", - ) - .bind(name) - .bind(f) - .bind(uid) - .fetch_optional(pool) - .await? - } else { - let ids: Vec = sqlx::query_scalar( - "SELECT DISTINCT entry_id FROM entries_history \ - WHERE name = $1 AND user_id = $2", - ) - .bind(name) - .bind(uid) - .fetch_all(pool) - .await?; - match ids.len() { - 0 => None, - 1 => Some(ids[0]), - _ => { - let folders: Vec = sqlx::query_scalar( - "SELECT DISTINCT folder FROM entries_history \ - WHERE name = $1 AND user_id = $2", - ) - .bind(name) - .bind(uid) - .fetch_all(pool) - .await?; - anyhow::bail!( - "Ambiguous: entries named '{}' exist in folders: [{}]. \ - Specify 'folder' to disambiguate.", - name, - folders.join(", ") - ) - } - } - } - } else if let Some(f) = folder { - sqlx::query_scalar( - "SELECT DISTINCT entry_id FROM entries_history \ - WHERE name = $1 AND folder = $2 AND user_id IS NULL LIMIT 1", + let live_entry: Option = if let Some(uid) = user_id { + sqlx::query_as( + "SELECT id, version, folder, type, name, tags, metadata, notes, deleted_at FROM entries \ + WHERE id = $1 AND user_id = $2 AND deleted_at IS NULL", ) - .bind(name) - .bind(f) + .bind(entry_id) + .bind(uid) .fetch_optional(pool) .await? } else { - let ids: Vec = sqlx::query_scalar( - "SELECT DISTINCT entry_id FROM entries_history \ - WHERE name = $1 AND user_id IS NULL", + sqlx::query_as( + "SELECT id, version, folder, type, name, tags, metadata, notes, deleted_at FROM entries \ + WHERE id = $1 AND user_id IS NULL AND deleted_at IS NULL", ) - .bind(name) - .fetch_all(pool) - .await?; - match ids.len() { - 0 => None, - 1 => Some(ids[0]), - _ => { - let folders: Vec = sqlx::query_scalar( - "SELECT DISTINCT folder FROM entries_history \ - WHERE name = $1 AND user_id IS NULL", - ) - .bind(name) - .fetch_all(pool) - .await?; - anyhow::bail!( - "Ambiguous: entries named '{}' exist in folders: [{}]. \ - Specify 'folder' to disambiguate.", - name, - folders.join(", ") - ) - } - } + .bind(entry_id) + .fetch_optional(pool) + .await? }; - let entry_id = entry_id.ok_or_else(|| anyhow::anyhow!("No history found for '{}'", name))?; + let live_entry = live_entry.ok_or(AppError::NotFoundEntry)?; let snap: Option = if let Some(ver) = to_version { sqlx::query_as( @@ -142,8 +80,8 @@ pub async fn run( let snap = snap.ok_or_else(|| { anyhow::anyhow!( - "No history found for '{}'{}.", - name, + "No history found for entry '{}'{}.", + live_entry.name, to_version .map(|v| format!(" at version {}", v)) .unwrap_or_default() @@ -155,21 +93,9 @@ pub async fn run( let mut tx = pool.begin().await?; - #[derive(sqlx::FromRow)] - struct LiveEntry { - id: Uuid, - version: i64, - folder: String, - #[sqlx(rename = "type")] - entry_type: String, - tags: Vec, - metadata: Value, - } - - // Lock the live entry if it exists (matched by entry_id for precision). - let live: Option = sqlx::query_as( - "SELECT id, version, folder, type, tags, metadata FROM entries \ - WHERE id = $1 FOR UPDATE", + let live: Option = sqlx::query_as( + "SELECT id, version, folder, type, name, tags, metadata, notes, deleted_at FROM entries \ + WHERE id = $1 AND deleted_at IS NULL FOR UPDATE", ) .bind(entry_id) .fetch_optional(&mut *tx) @@ -192,7 +118,7 @@ pub async fn run( user_id, folder: &lr.folder, entry_type: &lr.entry_type, - name, + name: &lr.name, version: lr.version, action: "rollback", tags: &lr.tags, @@ -237,11 +163,13 @@ pub async fn run( } sqlx::query( - "UPDATE entries SET folder = $1, type = $2, tags = $3, metadata = $4, version = version + 1, \ - updated_at = NOW() WHERE id = $5", + "UPDATE entries SET folder = $1, type = $2, name = $3, notes = $4, tags = $5, metadata = $6, \ + version = version + 1, updated_at = NOW() WHERE id = $7", ) .bind(&snap.folder) .bind(&snap.entry_type) + .bind(&live_entry.name) + .bind(&live_entry.notes) .bind(&snap.tags) .bind(&snap_metadata) .bind(lr.id) @@ -250,36 +178,7 @@ pub async fn run( lr.id } else { - if let Some(uid) = user_id { - sqlx::query_scalar( - "INSERT INTO entries \ - (user_id, folder, type, name, notes, tags, metadata, version, updated_at) \ - VALUES ($1, $2, $3, $4, '', $5, $6, $7, NOW()) RETURNING id", - ) - .bind(uid) - .bind(&snap.folder) - .bind(&snap.entry_type) - .bind(name) - .bind(&snap.tags) - .bind(&snap_metadata) - .bind(snap.version) - .fetch_one(&mut *tx) - .await? - } else { - sqlx::query_scalar( - "INSERT INTO entries \ - (folder, type, name, notes, tags, metadata, version, updated_at) \ - VALUES ($1, $2, $3, '', $4, $5, $6, NOW()) RETURNING id", - ) - .bind(&snap.folder) - .bind(&snap.entry_type) - .bind(name) - .bind(&snap.tags) - .bind(&snap_metadata) - .bind(snap.version) - .fetch_one(&mut *tx) - .await? - } + return Err(AppError::NotFoundEntry.into()); }; if let Some(secret_snapshot) = snap_secret_snapshot { @@ -292,8 +191,9 @@ pub async fn run( "rollback", &snap.folder, &snap.entry_type, - name, + &live_entry.name, serde_json::json!({ + "entry_id": entry_id, "restored_version": snap.version, "original_action": snap.action, }), @@ -303,7 +203,7 @@ pub async fn run( tx.commit().await?; Ok(RollbackResult { - name: name.to_string(), + name: live_entry.name, folder: snap.folder, entry_type: snap.entry_type, restored_version: snap.version, diff --git a/crates/secrets-core/src/service/search.rs b/crates/secrets-core/src/service/search.rs index 6b87183..5d8368a 100644 --- a/crates/secrets-core/src/service/search.rs +++ b/crates/secrets-core/src/service/search.rs @@ -27,6 +27,7 @@ pub struct SearchParams<'a> { pub name_query: Option<&'a str>, pub tags: &'a [String], pub query: Option<&'a str>, + pub metadata_query: Option<&'a str>, pub sort: &'a str, pub limit: u32, pub offset: u32, @@ -75,6 +76,10 @@ pub async fn count_entries(pool: &PgPool, a: &SearchParams<'_>) -> Result { let pattern = ilike_pattern(v); q = q.bind(pattern); } + if let Some(v) = a.metadata_query { + let pattern = ilike_pattern(v); + q = q.bind(pattern); + } let n = q.fetch_one(pool).await?; Ok(n) } @@ -90,6 +95,7 @@ fn entry_where_clause_and_next_idx(a: &SearchParams<'_>) -> (String, i32) { } else { conditions.push("user_id IS NULL".to_string()); } + conditions.push("deleted_at IS NULL".to_string()); if a.folder.is_some() { conditions.push(format!("folder = ${}", idx)); @@ -132,6 +138,14 @@ fn entry_where_clause_and_next_idx(a: &SearchParams<'_>) -> (String, i32) { )); idx += 1; } + if a.metadata_query.is_some() { + conditions.push(format!( + "EXISTS (SELECT 1 FROM jsonb_path_query(metadata, 'strict $.** ? (@.type() != \"object\" && @.type() != \"array\")') AS val \ + WHERE (val #>> '{{}}') ILIKE ${} ESCAPE '\\')", + idx + )); + idx += 1; + } let where_clause = if conditions.is_empty() { String::new() @@ -164,6 +178,7 @@ pub async fn fetch_entries( name: Option<&str>, tags: &[String], query: Option<&str>, + metadata_query: Option<&str>, user_id: Option, ) -> Result> { let params = SearchParams { @@ -173,6 +188,7 @@ pub async fn fetch_entries( name_query: None, tags, query, + metadata_query, sort: "name", limit: FETCH_ALL_LIMIT, offset: 0, @@ -195,7 +211,7 @@ async fn fetch_entries_paged(pool: &PgPool, a: &SearchParams<'_>) -> Result) -> Result = if let Some(uid) = user_id { sqlx::query_as( "SELECT id, user_id, folder, type, name, notes, tags, metadata, version, \ - created_at, updated_at FROM entries WHERE id = $1 AND user_id = $2", + created_at, updated_at, deleted_at FROM entries WHERE id = $1 AND user_id = $2 AND deleted_at IS NULL", ) .bind(id) .bind(uid) @@ -276,7 +296,7 @@ pub async fn resolve_entry_by_id( } else { sqlx::query_as( "SELECT id, user_id, folder, type, name, notes, tags, metadata, version, \ - created_at, updated_at FROM entries WHERE id = $1 AND user_id IS NULL", + created_at, updated_at, deleted_at FROM entries WHERE id = $1 AND user_id IS NULL AND deleted_at IS NULL", ) .bind(id) .fetch_optional(pool) @@ -298,7 +318,7 @@ pub async fn resolve_entry( folder: Option<&str>, user_id: Option, ) -> Result { - let entries = fetch_entries(pool, folder, None, Some(name), &[], None, user_id).await?; + let entries = fetch_entries(pool, folder, None, Some(name), &[], None, None, user_id).await?; match entries.len() { 0 => { if let Some(f) = folder { @@ -339,6 +359,7 @@ struct EntryRaw { version: i64, created_at: chrono::DateTime, updated_at: chrono::DateTime, + deleted_at: Option>, } impl From for Entry { @@ -355,6 +376,7 @@ impl From for Entry { version: r.version, created_at: r.created_at, updated_at: r.updated_at, + deleted_at: r.deleted_at, } } } diff --git a/crates/secrets-core/src/service/update.rs b/crates/secrets-core/src/service/update.rs index bb4d9c3..e28ed24 100644 --- a/crates/secrets-core/src/service/update.rs +++ b/crates/secrets-core/src/service/update.rs @@ -66,7 +66,8 @@ pub async fn run( } conditions.push(format!("name = ${}", idx)); let sql = format!( - "SELECT id, version, folder, type, tags, metadata, notes FROM entries WHERE {} FOR UPDATE", + "SELECT id, version, folder, type, tags, metadata, notes, name FROM entries \ + WHERE {} AND deleted_at IS NULL FOR UPDATE", conditions.join(" AND ") ); let mut q = sqlx::query_as::<_, EntryRow>(&sql); @@ -464,7 +465,7 @@ pub async fn update_fields_by_id( let row: Option = sqlx::query_as( "SELECT id, version, folder, type, name, tags, metadata, notes FROM entries \ - WHERE id = $1 AND user_id = $2 FOR UPDATE", + WHERE id = $1 AND user_id = $2 AND deleted_at IS NULL FOR UPDATE", ) .bind(entry_id) .bind(user_id) diff --git a/crates/secrets-mcp/Cargo.toml b/crates/secrets-mcp/Cargo.toml index 806917d..937998d 100644 --- a/crates/secrets-mcp/Cargo.toml +++ b/crates/secrets-mcp/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "secrets-mcp" -version = "0.5.12" +version = "0.5.13" edition.workspace = true [[bin]] diff --git a/crates/secrets-mcp/src/main.rs b/crates/secrets-mcp/src/main.rs index 5c8317b..de056d9 100644 --- a/crates/secrets-mcp/src/main.rs +++ b/crates/secrets-mcp/src/main.rs @@ -26,6 +26,7 @@ use tracing_subscriber::fmt::time::FormatTime; use secrets_core::config::resolve_db_config; use secrets_core::db::{create_pool, migrate}; +use secrets_core::service::delete::purge_expired_deleted_entries; use crate::oauth::OAuthConfig; use crate::tools::SecretsService; @@ -169,6 +170,7 @@ async fn main() -> Result<()> { // Rate limiting let rate_limit_state = rate_limit::RateLimitState::new(); let rate_limit_cleanup = rate_limit::spawn_cleanup_task(rate_limit_state.ip_limiter.clone()); + let recycle_bin_cleanup = tokio::spawn(start_recycle_bin_cleanup_task(pool.clone())); let router = Router::new() .merge(web::web_router()) @@ -212,9 +214,28 @@ async fn main() -> Result<()> { session_cleanup.abort(); rate_limit_cleanup.abort(); + recycle_bin_cleanup.abort(); Ok(()) } +async fn start_recycle_bin_cleanup_task(pool: PgPool) { + let mut interval = tokio::time::interval(tokio::time::Duration::from_secs(24 * 60 * 60)); + interval.set_missed_tick_behavior(tokio::time::MissedTickBehavior::Delay); + + loop { + interval.tick().await; + match purge_expired_deleted_entries(&pool).await { + Ok(count) if count > 0 => { + tracing::info!(purged_count = count, "purged expired recycle bin entries"); + } + Ok(_) => {} + Err(error) => { + tracing::warn!(error = %error, "failed to purge expired recycle bin entries"); + } + } + } +} + async fn shutdown_signal() { let ctrl_c = tokio::signal::ctrl_c(); diff --git a/crates/secrets-mcp/src/tools.rs b/crates/secrets-mcp/src/tools.rs index 51915d0..84e7cd3 100644 --- a/crates/secrets-mcp/src/tools.rs +++ b/crates/secrets-mcp/src/tools.rs @@ -168,8 +168,9 @@ use secrets_core::service::{ export::{ExportParams, export as svc_export}, get_secret::{get_all_secrets_by_id, get_secret_field_by_id}, history::run as svc_history, + relations::{add_parent_relation, get_relations_for_entries, remove_parent_relation}, rollback::run as svc_rollback, - search::{SearchParams, resolve_entry_by_id, run as svc_search}, + search::{SearchParams, resolve_entry, resolve_entry_by_id, run as svc_search}, update::{UpdateParams, run as svc_update}, }; @@ -373,6 +374,8 @@ struct FindInput { description = "Fuzzy search across name, folder, type, notes, tags, and metadata values" )] query: Option, + #[schemars(description = "Fuzzy search across metadata values only (keys excluded)")] + metadata_query: Option, #[schemars(description = "Exact folder filter (e.g. 'refining', 'ricnsmart')")] folder: Option, #[schemars( @@ -401,6 +404,8 @@ struct FindInput { struct SearchInput { #[schemars(description = "Fuzzy search across name, folder, type, notes, tags, metadata")] query: Option, + #[schemars(description = "Fuzzy search across metadata values only (keys excluded)")] + metadata_query: Option, #[schemars(description = "Folder filter (e.g. 'refining', 'personal', 'family')")] folder: Option, #[schemars( @@ -486,6 +491,9 @@ struct AddInput { )] #[serde(default, deserialize_with = "deser::option_vec_string_from_string")] link_secret_names: Option>, + #[schemars(description = "UUIDs of parent entries to link to this entry")] + #[serde(default, deserialize_with = "deser::option_vec_string_from_string")] + parent_ids: Option>, #[schemars(description = "Encryption key as a 64-char hex string. \ If provided, takes priority over the X-Encryption-Key HTTP header. \ Use this when the MCP client cannot reliably forward custom headers.")] @@ -551,6 +559,12 @@ struct UpdateInput { )] #[serde(default, deserialize_with = "deser::option_vec_string_from_string")] unlink_secret_names: Option>, + #[schemars(description = "UUIDs of parent entries to link")] + #[serde(default, deserialize_with = "deser::option_vec_string_from_string")] + add_parent_ids: Option>, + #[schemars(description = "UUIDs of parent entries to unlink")] + #[serde(default, deserialize_with = "deser::option_vec_string_from_string")] + remove_parent_ids: Option>, #[schemars(description = "Encryption key as a 64-char hex string. \ If provided, takes priority over the X-Encryption-Key HTTP header. \ Use this when the MCP client cannot reliably forward custom headers.")] @@ -596,16 +610,8 @@ struct HistoryInput { #[derive(Debug, Deserialize, JsonSchema)] struct RollbackInput { - #[schemars(description = "Name of the entry")] - name: String, - #[schemars( - description = "Folder for disambiguation when multiple entries share the same name (optional)" - )] - folder: Option, - #[schemars( - description = "Entry UUID (from secrets_find). If provided, name/folder are ignored." - )] - id: Option, + #[schemars(description = "Entry UUID (from secrets_find) for an existing, non-deleted entry")] + id: String, #[schemars(description = "Target version number. Omit to restore the most recent snapshot.")] #[serde(default, deserialize_with = "deser::option_i64_from_string")] to_version: Option, @@ -725,6 +731,10 @@ fn parse_uuid(s: &str) -> Result { .map_err(|_| rmcp::ErrorData::invalid_request(format!("Invalid UUID: '{}'", s), None)) } +fn parse_uuid_list(values: &[String]) -> Result, rmcp::ErrorData> { + values.iter().map(|value| parse_uuid(value)).collect() +} + // ── Tool implementations ────────────────────────────────────────────────────── #[tool_router] @@ -752,6 +762,7 @@ impl SecretsService { name = input.name.as_deref(), name_query = input.name_query.as_deref(), query = input.query.as_deref(), + metadata_query = input.metadata_query.as_deref(), "tool call start", ); let tags = input.tags.unwrap_or_default(); @@ -764,6 +775,7 @@ impl SecretsService { name_query: input.name_query.as_deref(), tags: &tags, query: input.query.as_deref(), + metadata_query: input.metadata_query.as_deref(), sort: "name", limit: input.limit.unwrap_or(20), offset: input.offset.unwrap_or(0), @@ -780,6 +792,7 @@ impl SecretsService { name_query: input.name_query.as_deref(), tags: &tags, query: input.query.as_deref(), + metadata_query: input.metadata_query.as_deref(), sort: "name", limit: 0, offset: 0, @@ -792,11 +805,23 @@ impl SecretsService { |e| tracing::warn!(tool = "secrets_find", error = %e, "count_entries failed"), ) .unwrap_or(0); + let relation_map = get_relations_for_entries( + &self.pool, + &result + .entries + .iter() + .map(|entry| entry.id) + .collect::>(), + Some(user_id), + ) + .await + .map_err(|e| mcp_err_internal_logged("secrets_find", Some(user_id), e))?; let entries: Vec = result .entries .iter() .map(|e| { + let relations = relation_map.get(&e.id).cloned().unwrap_or_default(); let schema: Vec = result .secret_schemas .get(&e.id) @@ -819,6 +844,8 @@ impl SecretsService { "type": e.entry_type, "tags": e.tags, "metadata": e.metadata, + "parents": relations.parents, + "children": relations.children, "secret_fields": schema, "updated_at": e.updated_at.format("%Y-%m-%dT%H:%M:%SZ").to_string(), }) @@ -867,6 +894,7 @@ impl SecretsService { name = input.name.as_deref(), name_query = input.name_query.as_deref(), query = input.query.as_deref(), + metadata_query = input.metadata_query.as_deref(), "tool call start", ); let tags = input.tags.unwrap_or_default(); @@ -879,6 +907,7 @@ impl SecretsService { name_query: input.name_query.as_deref(), tags: &tags, query: input.query.as_deref(), + metadata_query: input.metadata_query.as_deref(), sort: input.sort.as_deref().unwrap_or("name"), limit: input.limit.unwrap_or(20), offset: input.offset.unwrap_or(0), @@ -887,12 +916,24 @@ impl SecretsService { ) .await .map_err(|e| mcp_err_internal_logged("secrets_search", Some(user_id), e))?; + let relation_map = get_relations_for_entries( + &self.pool, + &result + .entries + .iter() + .map(|entry| entry.id) + .collect::>(), + Some(user_id), + ) + .await + .map_err(|e| mcp_err_internal_logged("secrets_search", Some(user_id), e))?; let summary = input.summary.unwrap_or(false); let entries: Vec = result .entries .iter() .map(|e| { + let relations = relation_map.get(&e.id).cloned().unwrap_or_default(); if summary { serde_json::json!({ "name": e.name, @@ -900,6 +941,8 @@ impl SecretsService { "type": e.entry_type, "tags": e.tags, "notes": e.notes, + "parents": relations.parents, + "children": relations.children, "updated_at": e.updated_at.format("%Y-%m-%dT%H:%M:%SZ").to_string(), }) } else { @@ -926,6 +969,8 @@ impl SecretsService { "notes": e.notes, "tags": e.tags, "metadata": e.metadata, + "parents": relations.parents, + "children": relations.children, "secret_fields": schema, "version": e.version, "updated_at": e.updated_at.format("%Y-%m-%dT%H:%M:%SZ").to_string(), @@ -1066,6 +1111,7 @@ impl SecretsService { .filter_map(|(k, v)| v.as_str().map(|s| (k, s.to_string()))) .collect(); let link_secret_names = input.link_secret_names.unwrap_or_default(); + let parent_ids = parse_uuid_list(&input.parent_ids.unwrap_or_default())?; let folder = input.folder.as_deref().unwrap_or(""); let entry_type = input.entry_type.as_deref().unwrap_or(""); let notes = input.notes.as_deref().unwrap_or(""); @@ -1089,6 +1135,15 @@ impl SecretsService { .await .map_err(|e| mcp_err_from_anyhow("secrets_add", Some(user_id), e))?; + let created_entry = resolve_entry(&self.pool, &input.name, Some(folder), Some(user_id)) + .await + .map_err(|e| mcp_err_internal_logged("secrets_add", Some(user_id), e))?; + for parent_id in parent_ids { + add_parent_relation(&self.pool, parent_id, created_entry.id, Some(user_id)) + .await + .map_err(|e| mcp_err_from_anyhow("secrets_add", Some(user_id), e))?; + } + tracing::info!( tool = "secrets_add", ?user_id, @@ -1176,6 +1231,8 @@ impl SecretsService { let remove_secrets = input.remove_secrets.unwrap_or_default(); let link_secret_names = input.link_secret_names.unwrap_or_default(); let unlink_secret_names = input.unlink_secret_names.unwrap_or_default(); + let add_parent_ids = parse_uuid_list(&input.add_parent_ids.unwrap_or_default())?; + let remove_parent_ids = parse_uuid_list(&input.remove_parent_ids.unwrap_or_default())?; let result = svc_update( &self.pool, @@ -1199,6 +1256,30 @@ impl SecretsService { .await .map_err(|e| mcp_err_from_anyhow("secrets_update", Some(user_id), e))?; + let entry_id = if let Some(id_str) = input.id.as_deref() { + parse_uuid(id_str)? + } else { + resolve_entry( + &self.pool, + &resolved_name, + resolved_folder.as_deref(), + Some(user_id), + ) + .await + .map_err(|e| mcp_err_internal_logged("secrets_update", Some(user_id), e))? + .id + }; + for parent_id in add_parent_ids { + add_parent_relation(&self.pool, parent_id, entry_id, Some(user_id)) + .await + .map_err(|e| mcp_err_from_anyhow("secrets_update", Some(user_id), e))?; + } + for parent_id in remove_parent_ids { + remove_parent_relation(&self.pool, parent_id, entry_id, Some(user_id)) + .await + .map_err(|e| mcp_err_from_anyhow("secrets_update", Some(user_id), e))?; + } + tracing::info!( tool = "secrets_update", ?user_id, @@ -1354,32 +1435,15 @@ impl SecretsService { tracing::info!( tool = "secrets_rollback", ?user_id, - name = %input.name, - id = ?input.id, + id = %input.id, to_version = input.to_version, "tool call start", ); + let entry_id = parse_uuid(&input.id)?; - let (resolved_name, resolved_folder): (String, Option) = - if let Some(ref id_str) = input.id { - let eid = parse_uuid(id_str)?; - let entry = resolve_entry_by_id(&self.pool, eid, Some(user_id)) - .await - .map_err(|e| mcp_err_internal_logged("secrets_rollback", Some(user_id), e))?; - (entry.name, Some(entry.folder)) - } else { - (input.name.clone(), input.folder.clone()) - }; - - let result = svc_rollback( - &self.pool, - &resolved_name, - resolved_folder.as_deref(), - input.to_version, - Some(user_id), - ) - .await - .map_err(|e| mcp_err_internal_logged("secrets_rollback", Some(user_id), e))?; + let result = svc_rollback(&self.pool, entry_id, input.to_version, Some(user_id)) + .await + .map_err(|e| mcp_err_internal_logged("secrets_rollback", Some(user_id), e))?; tracing::info!( tool = "secrets_rollback", diff --git a/crates/secrets-mcp/src/web/entries.rs b/crates/secrets-mcp/src/web/entries.rs index 1f1bebe..652b3b0 100644 --- a/crates/secrets-mcp/src/web/entries.rs +++ b/crates/secrets-mcp/src/web/entries.rs @@ -12,8 +12,12 @@ use uuid::Uuid; use secrets_core::error::AppError; use secrets_core::service::{ - delete::delete_by_id, + delete::{ + count_deleted_entries, delete_by_id, list_deleted_entries, purge_deleted_by_id, + restore_deleted_by_id, + }, get_secret::get_all_secrets_by_id, + relations::{RelationEntrySummary, get_relations_for_entries, set_parent_relations}, search::{SearchParams, count_entries, fetch_secrets_for_entries, ilike_pattern, list_entries}, update::{UpdateEntryFieldsByIdParams, update_fields_by_id}, }; @@ -40,6 +44,7 @@ struct EntriesPageTemplate { secret_type_options_json: String, filter_folder: String, filter_name: String, + filter_metadata_query: String, filter_type: String, current_page: u32, total_pages: u32, @@ -47,6 +52,18 @@ struct EntriesPageTemplate { version: &'static str, } +#[derive(Template)] +#[template(path = "trash.html")] +struct TrashPageTemplate { + user_name: String, + user_email: String, + entries: Vec, + current_page: u32, + total_pages: u32, + total_count: i64, + version: &'static str, +} + /// Non-sensitive entry fields; `secrets` lists field names/types only (no ciphertext). struct EntryListItemView { id: String, @@ -61,6 +78,9 @@ struct EntryListItemView { secrets: Vec, /// JSON array of `{ id, name, secret_type }` for dialog secret chips. secrets_json: String, + parents: Vec, + children: Vec, + parents_json: String, /// RFC3339 UTC; shown in edit dialog. updated_at_iso: String, } @@ -72,6 +92,15 @@ struct SecretSummaryView { secret_type: String, } +#[derive(Clone, Serialize)] +struct RelationSummaryView { + id: String, + name: String, + folder: String, + entry_type: String, + href: String, +} + struct FolderTabView { name: String, count: i64, @@ -79,16 +108,32 @@ struct FolderTabView { active: bool, } +struct TrashEntryView { + id: String, + name: String, + folder: String, + entry_type: String, + deleted_at_iso: String, + deleted_at_label: String, +} + #[derive(Deserialize)] pub(super) struct EntriesQuery { folder: Option, name: Option, + metadata_query: Option, /// URL query key is `type` (maps to DB column `entries.type`). #[serde(rename = "type")] entry_type: Option, page: Option, } +#[derive(Deserialize)] +pub(super) struct EntryOptionQuery { + q: Option, + exclude_id: Option, +} + // ── Entry mutation error helpers ────────────────────────────────────────────── type EntryApiError = (StatusCode, Json); @@ -171,6 +216,23 @@ fn map_app_error(err: &AppError, lang: UiLang) -> EntryApiError { } } +fn relation_views(items: &[RelationEntrySummary]) -> Vec { + items + .iter() + .map(|item| RelationSummaryView { + id: item.id.to_string(), + name: item.name.clone(), + folder: item.folder.clone(), + entry_type: item.entry_type.clone(), + href: format!( + "/entries?folder={}&name={}", + urlencoding::encode(&item.folder), + urlencoding::encode(&item.name) + ), + }) + .collect() +} + // ── Handlers ────────────────────────────────────────────────────────────────── pub(super) async fn entries_page( @@ -202,6 +264,12 @@ pub(super) async fn entries_page( .map(|s| s.trim()) .filter(|s| !s.is_empty()) .map(|s| s.to_string()); + let metadata_query_filter = q + .metadata_query + .as_ref() + .map(|s| s.trim()) + .filter(|s| !s.is_empty()) + .map(|s| s.to_string()); let page = q.page.unwrap_or(1).max(1); let count_params = SearchParams { folder: folder_filter.as_deref(), @@ -210,6 +278,7 @@ pub(super) async fn entries_page( name_query: name_filter.as_deref(), tags: &[], query: None, + metadata_query: metadata_query_filter.as_deref(), sort: "updated", limit: ENTRIES_PAGE_LIMIT, offset: 0, @@ -223,7 +292,7 @@ pub(super) async fn entries_page( count: i64, } let mut folder_sql = - "SELECT folder, COUNT(*)::bigint AS count FROM entries WHERE user_id = $1".to_string(); + "SELECT folder, COUNT(*)::bigint AS count FROM entries WHERE user_id = $1 AND deleted_at IS NULL".to_string(); let mut bind_idx = 2; if type_filter.is_some() { folder_sql.push_str(&format!(" AND type = ${bind_idx}")); @@ -233,6 +302,13 @@ pub(super) async fn entries_page( folder_sql.push_str(&format!(" AND name ILIKE ${bind_idx} ESCAPE '\\'")); bind_idx += 1; } + if metadata_query_filter.is_some() { + folder_sql.push_str(&format!( + " AND EXISTS (SELECT 1 FROM jsonb_path_query(metadata, 'strict $.** ? (@.type() != \"object\" && @.type() != \"array\")') AS val \ + WHERE (val #>> '{{}}') ILIKE ${bind_idx} ESCAPE '\\')" + )); + bind_idx += 1; + } let _ = bind_idx; folder_sql.push_str(" GROUP BY folder ORDER BY folder"); let mut folder_query = sqlx::query_as::<_, FolderCountRow>(&folder_sql).bind(user_id); @@ -242,6 +318,9 @@ pub(super) async fn entries_page( if let Some(n) = name_filter.as_deref() { folder_query = folder_query.bind(ilike_pattern(n)); } + if let Some(v) = metadata_query_filter.as_deref() { + folder_query = folder_query.bind(ilike_pattern(v)); + } #[derive(sqlx::FromRow)] struct TypeOptionRow { @@ -261,7 +340,7 @@ pub(super) async fn entries_page( }, folder_query.fetch_all(&state.pool), sqlx::query_as::<_, TypeOptionRow>( - "SELECT DISTINCT type FROM entries WHERE user_id = $1 ORDER BY type", + "SELECT DISTINCT type FROM entries WHERE user_id = $1 AND deleted_at IS NULL ORDER BY type", ) .bind(user_id) .fetch_all(&state.pool), @@ -297,6 +376,12 @@ pub(super) async fn entries_page( tracing::error!(error = %e, "failed to load secret schema list for web"); StatusCode::INTERNAL_SERVER_ERROR })?; + let relation_map = get_relations_for_entries(&state.pool, &entry_ids, Some(user_id)) + .await + .map_err(|e| { + tracing::error!(error = %e, "failed to load relation list for web"); + StatusCode::INTERNAL_SERVER_ERROR + })?; if let Some(current) = type_filter.as_ref() && !current.is_empty() && !type_options.iter().any(|t| t == current) @@ -309,6 +394,7 @@ pub(super) async fn entries_page( folder: Option<&str>, entry_type: Option<&str>, name: Option<&str>, + metadata_query: Option<&str>, page: Option, ) -> String { let mut pairs: Vec = Vec::new(); @@ -327,6 +413,11 @@ pub(super) async fn entries_page( { pairs.push(format!("name={}", urlencoding::encode(n))); } + if let Some(v) = metadata_query + && !v.is_empty() + { + pairs.push(format!("metadata_query={}", urlencoding::encode(v))); + } if let Some(p) = page { pairs.push(format!("page={}", p)); } @@ -346,6 +437,7 @@ pub(super) async fn entries_page( None, type_filter.as_deref(), name_filter.as_deref(), + metadata_query_filter.as_deref(), Some(1), ), active: folder_filter.is_none(), @@ -357,6 +449,7 @@ pub(super) async fn entries_page( Some(&name), type_filter.as_deref(), name_filter.as_deref(), + metadata_query_filter.as_deref(), Some(1), ), active: folder_filter.as_deref() == Some(name.as_str()), @@ -368,6 +461,7 @@ pub(super) async fn entries_page( let entries = rows .into_iter() .map(|e| { + let relations = relation_map.get(&e.id).cloned().unwrap_or_default(); let secrets: Vec = secret_schemas .get(&e.id) .map(|fields| { @@ -384,6 +478,9 @@ pub(super) async fn entries_page( let secrets_json = serde_json::to_string(&secrets).unwrap_or_else(|_| "[]".to_string()); let metadata_json = serde_json::to_string(&e.metadata).unwrap_or_else(|_| "{}".to_string()); + let parents = relation_views(&relations.parents); + let children = relation_views(&relations.children); + let parents_json = serde_json::to_string(&parents).unwrap_or_else(|_| "[]".to_string()); EntryListItemView { id: e.id.to_string(), folder: e.folder, @@ -394,6 +491,9 @@ pub(super) async fn entries_page( metadata_json, secrets, secrets_json, + parents, + children, + parents_json, updated_at_iso: e.updated_at.to_rfc3339_opts(SecondsFormat::Secs, true), } }) @@ -414,6 +514,7 @@ pub(super) async fn entries_page( .unwrap_or_default(), filter_folder: folder_filter.unwrap_or_default(), filter_name: name_filter.unwrap_or_default(), + filter_metadata_query: metadata_query_filter.unwrap_or_default(), filter_type: type_filter.unwrap_or_default(), current_page, total_pages, @@ -424,6 +525,56 @@ pub(super) async fn entries_page( render_template(tmpl) } +pub(super) async fn trash_page( + State(state): State, + session: Session, + Query(q): Query, +) -> Result { + let user = match require_valid_user(&state.pool, &session, "trash_page").await { + Ok(u) => u, + Err(r) => return Ok(r), + }; + + let page = q.page.unwrap_or(1).max(1); + let total_count = count_deleted_entries(&state.pool, user.id) + .await + .map_err(|e| { + tracing::error!(error = %e, user_id = %user.id, "failed to count trash entries"); + StatusCode::INTERNAL_SERVER_ERROR + })?; + let (current_page, total_pages, offset) = paginate(page, total_count, ENTRIES_PAGE_LIMIT); + let rows = list_deleted_entries(&state.pool, user.id, ENTRIES_PAGE_LIMIT, offset) + .await + .map_err(|e| { + tracing::error!(error = %e, user_id = %user.id, "failed to load trash entries"); + StatusCode::INTERNAL_SERVER_ERROR + })?; + + let entries = rows + .into_iter() + .map(|entry| TrashEntryView { + id: entry.id.to_string(), + name: entry.name, + folder: entry.folder, + entry_type: entry.entry_type, + deleted_at_iso: entry.deleted_at.to_rfc3339_opts(SecondsFormat::Secs, true), + deleted_at_label: entry.deleted_at.format("%Y-%m-%d %H:%M:%S UTC").to_string(), + }) + .collect(); + + let tmpl = TrashPageTemplate { + user_name: user.name.clone(), + user_email: user.email.clone().unwrap_or_default(), + entries, + current_page, + total_pages, + total_count, + version: env!("CARGO_PKG_VERSION"), + }; + + render_template(tmpl) +} + // ── Entry management (Web UI, non-sensitive fields only) ─────────────────────── #[derive(Deserialize)] @@ -435,6 +586,7 @@ pub(super) struct EntryPatchBody { notes: String, tags: Vec, metadata: serde_json::Value, + parent_ids: Option>, } pub(super) async fn api_entry_patch( @@ -496,9 +648,70 @@ pub(super) async fn api_entry_patch( .await .map_err(|e| map_entry_mutation_err(e, lang))?; + if let Some(parent_ids) = body.parent_ids.as_deref() { + set_parent_relations(&state.pool, entry_id, parent_ids, Some(user_id)) + .await + .map_err(|e| map_entry_mutation_err(e, lang))?; + } + Ok(Json(json!({ "ok": true }))) } +pub(super) async fn api_entry_options( + State(state): State, + session: Session, + headers: HeaderMap, + Query(q): Query, +) -> Result, EntryApiError> { + let lang = request_ui_lang(&headers); + let user_id = current_user_id(&session).await.ok_or(( + StatusCode::UNAUTHORIZED, + Json(json!({ "error": tr(lang, "未登录", "尚未登入", "Not logged in") })), + ))?; + + let query = + q.q.as_deref() + .map(str::trim) + .filter(|value| !value.is_empty()) + .unwrap_or(""); + if query.is_empty() { + return Ok(Json(json!([]))); + } + + let rows = list_entries( + &state.pool, + SearchParams { + folder: None, + entry_type: None, + name: None, + name_query: Some(query), + tags: &[], + query: None, + metadata_query: None, + sort: "name", + limit: 10, + offset: 0, + user_id: Some(user_id), + }, + ) + .await + .map_err(|e| map_entry_mutation_err(e, lang))?; + + let options: Vec<_> = rows + .into_iter() + .filter(|entry| Some(entry.id) != q.exclude_id) + .map(|entry| { + json!({ + "id": entry.id, + "name": entry.name, + "folder": entry.folder, + "type": entry.entry_type, + }) + }) + .collect(); + Ok(Json(serde_json::Value::Array(options))) +} + pub(super) async fn api_entry_delete( State(state): State, session: Session, @@ -517,6 +730,51 @@ pub(super) async fn api_entry_delete( Ok(Json(json!({ "ok": true, + "deleted": true, + }))) +} + +pub(super) async fn api_trash_restore( + State(state): State, + session: Session, + headers: HeaderMap, + Path(entry_id): Path, +) -> Result, EntryApiError> { + let lang = request_ui_lang(&headers); + let user_id = current_user_id(&session).await.ok_or(( + StatusCode::UNAUTHORIZED, + Json(json!({ "error": tr(lang, "未登录", "尚未登入", "Not logged in") })), + ))?; + + restore_deleted_by_id(&state.pool, entry_id, user_id) + .await + .map_err(|e| map_entry_mutation_err(e, lang))?; + + Ok(Json(json!({ + "ok": true, + "restored": true, + }))) +} + +pub(super) async fn api_trash_purge( + State(state): State, + session: Session, + headers: HeaderMap, + Path(entry_id): Path, +) -> Result, EntryApiError> { + let lang = request_ui_lang(&headers); + let user_id = current_user_id(&session).await.ok_or(( + StatusCode::UNAUTHORIZED, + Json(json!({ "error": tr(lang, "未登录", "尚未登入", "Not logged in") })), + ))?; + + purge_deleted_by_id(&state.pool, entry_id, user_id) + .await + .map_err(|e| map_entry_mutation_err(e, lang))?; + + Ok(Json(json!({ + "ok": true, + "purged": true, }))) } diff --git a/crates/secrets-mcp/src/web/mod.rs b/crates/secrets-mcp/src/web/mod.rs index 9848335..3bd6eca 100644 --- a/crates/secrets-mcp/src/web/mod.rs +++ b/crates/secrets-mcp/src/web/mod.rs @@ -193,6 +193,7 @@ pub fn web_router() -> Router { .route("/auth/logout", post(auth::auth_logout)) .route("/dashboard", get(account::dashboard)) .route("/entries", get(entries::entries_page)) + .route("/trash", get(entries::trash_page)) .route("/audit", get(audit::audit_page)) .route("/account/bind/google", get(auth::account_bind_google)) .route("/account/unbind/{provider}", post(auth::account_unbind)) @@ -200,6 +201,7 @@ pub fn web_router() -> Router { .route("/api/key-setup", post(account::api_key_setup)) .route("/api/key-change", post(account::api_key_change)) .route("/api/apikey", get(account::api_apikey_get)) + .route("/api/entries/options", get(entries::api_entry_options)) .route( "/api/apikey/regenerate", post(account::api_apikey_regenerate), @@ -208,6 +210,11 @@ pub fn web_router() -> Router { "/api/entries/{id}", patch(entries::api_entry_patch).delete(entries::api_entry_delete), ) + .route("/api/trash/{id}/restore", post(entries::api_trash_restore)) + .route( + "/api/trash/{id}", + axum::routing::delete(entries::api_trash_purge), + ) .route( "/api/entries/{entry_id}/secrets/{secret_id}", axum::routing::delete(entries::api_entry_secret_unlink), diff --git a/crates/secrets-mcp/templates/audit.html b/crates/secrets-mcp/templates/audit.html index f6922ad..263b1e2 100644 --- a/crates/secrets-mcp/templates/audit.html +++ b/crates/secrets-mcp/templates/audit.html @@ -13,77 +13,87 @@ --border: #30363d; --text: #e6edf3; --text-muted: #8b949e; --accent: #58a6ff; --accent-hover: #79b8ff; } - body { background: var(--bg); color: var(--text); font-family: 'Inter', sans-serif; min-height: 100vh; } + body { background: #0d1117; color: #c9d1d9; font-family: 'Inter', sans-serif; min-height: 100vh; } .layout { display: flex; min-height: 100vh; } .sidebar { - width: 220px; flex-shrink: 0; background: var(--surface); border-right: 1px solid var(--border); - padding: 24px 16px; display: flex; flex-direction: column; gap: 20px; + width: 200px; flex-shrink: 0; background: #0b1220; border-right: 1px solid rgba(240,246,252,0.08); + padding: 20px 12px; display: flex; flex-direction: column; gap: 20px; } - .sidebar-logo { font-family: 'JetBrains Mono', monospace; font-size: 16px; font-weight: 600; - color: var(--text); text-decoration: none; padding: 0 10px; } - .sidebar-logo span { color: var(--accent); } - .sidebar-menu { display: flex; flex-direction: column; gap: 6px; } + .sidebar-logo { font-family: 'Inter', sans-serif; font-size: 16px; font-weight: 700; + color: #fff; text-decoration: none; padding: 0 10px; } + .sidebar-menu { display: grid; gap: 6px; } .sidebar-link { - padding: 10px 12px; border-radius: 8px; color: var(--text-muted); text-decoration: none; - border: 1px solid transparent; font-size: 13px; font-weight: 500; + padding: 10px 12px; border-radius: 10px; color: #8b949e; text-decoration: none; + font-size: 13px; font-weight: 500; } - .sidebar-link:hover { background: var(--surface2); color: var(--text); } + .sidebar-link:hover { background: rgba(56,139,253,0.14); color: #fff; } .sidebar-link.active { - background: rgba(88,166,255,0.12); color: var(--text); border-color: rgba(88,166,255,0.35); + background: rgba(56,139,253,0.14); color: #fff; } .content-shell { flex: 1; min-width: 0; display: flex; flex-direction: column; } .topbar { - background: var(--surface); border-bottom: 1px solid var(--border); padding: 0 24px; - display: flex; align-items: center; gap: 12px; min-height: 52px; + background: transparent; border-bottom: none; padding: 0 24px; + display: flex; align-items: center; gap: 12px; min-height: 44px; } .topbar-spacer { flex: 1; } - .nav-user { font-size: 13px; color: var(--text-muted); } - .lang-bar { display: flex; gap: 2px; background: var(--surface2); border-radius: 6px; padding: 2px; } - .lang-btn { padding: 3px 9px; border: none; background: none; color: var(--text-muted); - font-size: 12px; cursor: pointer; border-radius: 4px; } - .lang-btn.active { background: var(--border); color: var(--text); } + .nav-user { font-size: 14px; color: #8b949e; } + .lang-bar { display: flex; gap: 2px; background: rgba(240,246,252,0.06); border-radius: 8px; padding: 2px; } + .lang-btn { padding: 4px 10px; border: none; background: none; color: #8b949e; + font-size: 12px; cursor: pointer; border-radius: 6px; } + .lang-btn.active { background: rgba(240,246,252,0.1); color: #fff; } .btn-sign-out { - padding: 5px 12px; border-radius: 6px; border: 1px solid var(--border); - background: none; color: var(--text); font-size: 12px; text-decoration: none; cursor: pointer; + padding: 6px 14px; border-radius: 10px; border: 1px solid rgba(240,246,252,0.12); + background: #161b22; color: #c9d1d9; font-size: 13px; text-decoration: none; cursor: pointer; } - .btn-sign-out:hover { background: var(--surface2); } - .main { padding: 32px 24px 40px; flex: 1; } - .card { background: var(--surface); border: 1px solid var(--border); border-radius: 12px; - padding: 24px; width: 100%; max-width: 1180px; margin: 0 auto; } + .btn-sign-out:hover { border-color: rgba(56,139,253,0.45); color: #fff; } + .main { padding: 16px 16px 24px; flex: 1; } + .card { background: #111827; border: 1px solid rgba(240,246,252,0.08); border-radius: 18px; + padding: 20px; width: 100%; } .card-title-row { display: flex; align-items: center; flex-wrap: wrap; gap: 8px; - margin-bottom: 20px; + margin-bottom: 18px; } - .card-title { font-size: 20px; font-weight: 600; margin: 0; } + .card-title { font-size: 22px; font-weight: 700; margin: 0; color: #fff; } .card-title-count { display: inline-flex; align-items: center; min-height: 24px; padding: 0 8px; - border: 1px solid var(--border); + border: 1px solid rgba(240,246,252,0.08); border-radius: 999px; - background: var(--bg); - color: var(--text-muted); + background: #0d1117; + color: #8b949e; font-size: 12px; font-weight: 600; line-height: 1; font-family: 'JetBrains Mono', monospace; } - .empty { color: var(--text-muted); font-size: 14px; padding: 20px 0; } + .empty { color: #8b949e; font-size: 14px; padding: 20px 0; } table { width: 100%; border-collapse: collapse; } - th, td { text-align: left; vertical-align: top; padding: 12px 10px; border-top: 1px solid var(--border); } - th { color: var(--text-muted); font-size: 12px; font-weight: 600; } - td { font-size: 13px; } + th, td { text-align: left; vertical-align: top; padding: 14px 12px; border-top: 1px solid rgba(240,246,252,0.08); } + th { color: #8b949e; font-size: 12px; font-weight: 600; } + td { font-size: 13px; color: #c9d1d9; } .mono { font-family: 'JetBrains Mono', monospace; } - .detail { - background: var(--bg); border: 1px solid var(--border); border-radius: 8px; - padding: 10px; white-space: pre-wrap; word-break: break-word; font-size: 12px; - max-width: 460px; + .col-detail { min-width: 260px; max-width: 460px; } + .detail-scroll { + height: calc(1.5em * 3 + 20px); + min-height: calc(1.5em * 3 + 20px); + overflow: auto; + resize: vertical; + white-space: pre-wrap; + word-break: break-word; + padding: 10px; + background: #0d1117; + border: 1px solid rgba(240,246,252,0.08); + border-radius: 10px; + font-size: 12px; + font-family: 'JetBrains Mono', monospace; + margin: 0; } @media (max-width: 900px) { .layout { flex-direction: column; } .sidebar { - width: 100%; border-right: none; border-bottom: 1px solid var(--border); + width: 100%; border-right: none; border-bottom: 1px solid rgba(240,246,252,0.08); padding: 16px; gap: 14px; } .sidebar-menu { flex-direction: row; } @@ -93,42 +103,43 @@ .topbar { padding: 12px 16px; flex-wrap: wrap; } table, thead, tbody, th, td, tr { display: block; } thead { display: none; } - tr { border-top: 1px solid var(--border); padding: 12px 0; } + tr { border-top: 1px solid rgba(240,246,252,0.08); padding: 12px 0; } td { border-top: none; padding: 6px 0; } td::before { - display: block; color: var(--text-muted); font-size: 11px; + display: block; color: #8b949e; font-size: 11px; margin-bottom: 4px; text-transform: uppercase; content: attr(data-label); } .detail { max-width: none; } } .pagination { - display: flex; align-items: center; gap: 8px; margin-top: 20px; + display: flex; align-items: center; gap: 12px; margin-top: 18px; justify-content: center; padding: 12px 0; } .page-btn { - padding: 6px 14px; border-radius: 6px; border: 1px solid var(--border); - background: var(--surface); color: var(--text); text-decoration: none; + padding: 8px 12px; border-radius: 10px; border: 1px solid rgba(240,246,252,0.12); + background: #161b22; color: #c9d1d9; text-decoration: none; font-size: 13px; cursor: pointer; } - .page-btn:hover { background: var(--surface2); } + .page-btn:hover { border-color: rgba(56,139,253,0.45); color: #fff; } .page-btn-disabled { - padding: 6px 14px; border-radius: 6px; border: 1px solid var(--border); - background: var(--surface); color: var(--text-muted); font-size: 13px; + padding: 8px 12px; border-radius: 10px; border: 1px solid rgba(240,246,252,0.12); + background: #161b22; color: #6e7681; font-size: 13px; opacity: 0.5; cursor: not-allowed; } .page-info { - color: var(--text-muted); font-size: 13px; font-family: 'JetBrains Mono', monospace; + color: #8b949e; font-size: 13px; font-family: 'JetBrains Mono', monospace; }
@@ -172,7 +183,7 @@ {{ entry.action }} {{ entry.target }} -
{{ entry.detail }}
+ {% if !entry.detail.is_empty() %}
{{ entry.detail }}
{% endif %} {% endfor %} @@ -198,7 +209,7 @@
- + diff --git a/crates/secrets-mcp/templates/dashboard.html b/crates/secrets-mcp/templates/dashboard.html index 1ec63bb..3c7fa0a 100644 --- a/crates/secrets-mcp/templates/dashboard.html +++ b/crates/secrets-mcp/templates/dashboard.html @@ -18,110 +18,108 @@ .layout { display: flex; min-height: 100vh; } .sidebar { - width: 220px; flex-shrink: 0; background: var(--surface); border-right: 1px solid var(--border); - padding: 24px 16px; display: flex; flex-direction: column; gap: 20px; + width: 200px; flex-shrink: 0; background: #0b1220; border-right: 1px solid rgba(240,246,252,0.08); + padding: 20px 12px; display: flex; flex-direction: column; gap: 20px; } - .sidebar-logo { font-family: 'JetBrains Mono', monospace; font-size: 16px; font-weight: 600; - color: var(--text); text-decoration: none; padding: 0 10px; } - .sidebar-logo span { color: var(--accent); } - .sidebar-menu { display: flex; flex-direction: column; gap: 6px; } + .sidebar-logo { font-family: 'Inter', sans-serif; font-size: 16px; font-weight: 700; + color: #fff; text-decoration: none; padding: 0 10px; } + .sidebar-menu { display: grid; gap: 6px; } .sidebar-link { - padding: 10px 12px; border-radius: 8px; color: var(--text-muted); text-decoration: none; - border: 1px solid transparent; font-size: 13px; font-weight: 500; - } - .sidebar-link:hover { background: var(--surface2); color: var(--text); } - .sidebar-link.active { - background: rgba(88,166,255,0.12); color: var(--text); border-color: rgba(88,166,255,0.35); + padding: 10px 12px; border-radius: 10px; color: #8b949e; text-decoration: none; + font-size: 13px; font-weight: 500; } + .sidebar-link:hover { background: rgba(56,139,253,0.14); color: #fff; } + .sidebar-link.active { background: rgba(56,139,253,0.14); color: #fff; } .content-shell { flex: 1; min-width: 0; display: flex; flex-direction: column; } .topbar { - background: var(--surface); border-bottom: 1px solid var(--border); padding: 0 24px; - display: flex; align-items: center; gap: 12px; min-height: 52px; + background: transparent; border-bottom: none; padding: 0 24px; + display: flex; align-items: center; gap: 12px; min-height: 44px; } .topbar-spacer { flex: 1; } - .nav-user { font-size: 13px; color: var(--text-muted); } - .lang-bar { display: flex; gap: 2px; background: var(--surface2); border-radius: 6px; padding: 2px; } - .lang-btn { padding: 3px 9px; border: none; background: none; color: var(--text-muted); - font-size: 12px; cursor: pointer; border-radius: 4px; } - .lang-btn.active { background: var(--border); color: var(--text); } - .btn-sign-out { padding: 5px 12px; border-radius: 6px; border: 1px solid var(--border); - background: none; color: var(--text); font-size: 12px; cursor: pointer; } - .btn-sign-out:hover { background: var(--surface2); } + .nav-user { font-size: 14px; color: #8b949e; } + .lang-bar { display: flex; gap: 2px; background: rgba(240,246,252,0.06); border-radius: 8px; padding: 2px; } + .lang-btn { padding: 4px 10px; border: none; background: none; color: #8b949e; + font-size: 12px; cursor: pointer; border-radius: 6px; } + .lang-btn.active { background: rgba(240,246,252,0.1); color: #fff; } + .btn-sign-out { + padding: 6px 14px; border-radius: 10px; border: 1px solid rgba(240,246,252,0.12); + background: #161b22; color: #c9d1d9; font-size: 13px; text-decoration: none; cursor: pointer; + } + .btn-sign-out:hover { border-color: rgba(56,139,253,0.45); color: #fff; } /* Main content column */ - .main { display: flex; flex-direction: column; align-items: center; - padding: 24px 20px 8px; min-height: 0; } + .main { padding: 16px 16px 0; flex: 1; min-height: 0; display: flex; flex-direction: column; } .app-footer { - margin-top: auto; text-align: center; - padding: 4px 20px 12px; - font-size: 12px; - color: #9da7b3; + padding: 12px 0; + font-size: 11px; + color: var(--text-muted); font-family: 'JetBrains Mono', monospace; + margin-top: auto; } - .card { background: var(--surface); border: 1px solid var(--border); border-radius: 12px; - padding: 24px; width: 100%; max-width: 980px; } - .card-title { font-size: 18px; font-weight: 600; margin-bottom: 24px; } + .card { background: #111827; border: 1px solid rgba(240,246,252,0.08); border-radius: 18px; + padding: 20px; width: 100%; } + .card-title { font-size: 22px; font-weight: 700; margin-bottom: 24px; color: #fff; } /* Form */ .field { margin-bottom: 12px; } - .field label { display: block; font-size: 12px; color: var(--text-muted); margin-bottom: 5px; } - .field input { width: 100%; background: var(--bg); border: 1px solid var(--border); - color: var(--text); padding: 9px 12px; border-radius: 6px; + .field label { display: block; font-size: 12px; color: #8b949e; margin-bottom: 5px; } + .field input { width: 100%; background: #0d1117; border: 1px solid rgba(240,246,252,0.08); + color: #c9d1d9; padding: 9px 12px; border-radius: 10px; font-size: 13px; outline: none; } - .field input:focus { border-color: var(--accent); } + .field input:focus { border-color: rgba(56,139,253,0.5); } .pw-field { position: relative; } .pw-field > input { padding-right: 42px; } .pw-toggle { position: absolute; right: 6px; top: 50%; transform: translateY(-50%); display: flex; align-items: center; justify-content: center; - width: 32px; height: 32px; border: none; border-radius: 6px; - background: transparent; color: var(--text-muted); cursor: pointer; + width: 32px; height: 32px; border: none; border-radius: 8px; + background: transparent; color: #8b949e; cursor: pointer; } - .pw-toggle:hover { color: var(--text); background: var(--surface2); } - .pw-toggle:focus-visible { outline: 2px solid var(--accent); outline-offset: 2px; } + .pw-toggle:hover { color: #c9d1d9; background: rgba(240,246,252,0.06); } + .pw-toggle:focus-visible { outline: 2px solid rgba(56,139,253,0.5); outline-offset: 2px; } .pw-icon svg { display: block; } .pw-icon.hidden { display: none; } - .error-msg { color: var(--danger); font-size: 12px; margin-top: 6px; display: none; } + .error-msg { color: #f85149; font-size: 12px; margin-top: 6px; display: none; } /* Buttons */ .btn-primary { display: inline-flex; align-items: center; gap: 6px; width: 100%; - justify-content: center; padding: 10px 20px; border-radius: 7px; - border: none; background: var(--accent); color: #0d1117; + justify-content: center; padding: 10px 20px; border-radius: 10px; + border: none; background: #388bfd; color: #fff; font-size: 14px; font-weight: 600; cursor: pointer; transition: background 0.15s; } - .btn-primary:hover { background: var(--accent-hover); } + .btn-primary:hover { background: #58a6ff; } .btn-primary:disabled { opacity: 0.5; cursor: not-allowed; } - .btn-sm { display: inline-flex; align-items: center; gap: 4px; padding: 5px 12px; - border-radius: 5px; border: 1px solid var(--border); background: none; - color: var(--text-muted); font-size: 12px; cursor: pointer; } - .btn-sm:hover { color: var(--text); border-color: var(--text-muted); } + .btn-sm { display: inline-flex; align-items: center; gap: 4px; padding: 8px 12px; + border-radius: 10px; border: 1px solid rgba(240,246,252,0.12); background: #161b22; + color: #8b949e; font-size: 13px; cursor: pointer; font-family: inherit; } + .btn-sm:hover { border-color: rgba(56,139,253,0.45); color: #fff; } .btn-copy { display: flex; align-items: center; gap: 8px; width: 100%; justify-content: center; - padding: 11px 20px; border-radius: 7px; border: 1px solid var(--success); - background: rgba(63,185,80,0.1); color: var(--success); - font-size: 14px; font-weight: 600; cursor: pointer; transition: all 0.15s; } + padding: 11px 20px; border-radius: 10px; border: 1px solid #3fb950; + background: rgba(63,185,80,0.1); color: #3fb950; + font-size: 14px; font-weight: 600; cursor: pointer; transition: all 0.15s; font-family: inherit; } .btn-copy:hover { background: rgba(63,185,80,0.2); } - .btn-copy.copied { background: var(--success); color: #0d1117; border-color: var(--success); } + .btn-copy.copied { background: #3fb950; color: #0d1117; border-color: #3fb950; } /* Config format switcher */ .config-tabs { display: grid; grid-template-columns: repeat(2, minmax(0, 1fr)); gap: 10px; margin-bottom: 12px; } - .config-tab { padding: 12px 14px; border-radius: 10px; border: 1px solid var(--border); - background: var(--surface2); color: var(--text-muted); cursor: pointer; + .config-tab { padding: 12px 14px; border-radius: 10px; border: 1px solid rgba(240,246,252,0.08); + background: #161b22; color: #8b949e; cursor: pointer; font-family: inherit; text-align: left; transition: border-color 0.15s, background 0.15s, transform 0.15s; } - .config-tab:hover { color: var(--text); border-color: var(--accent); transform: translateY(-1px); } - .config-tab.active { background: rgba(88,166,255,0.1); color: var(--text); border-color: var(--accent); } + .config-tab:hover { color: #c9d1d9; border-color: rgba(56,139,253,0.45); transform: translateY(-1px); } + .config-tab.active { background: rgba(56,139,253,0.14); color: #fff; border-color: rgba(56,139,253,0.3); } .config-tab-title { display: block; font-size: 13px; font-weight: 600; color: inherit; } /* Config box */ .config-wrap { position: relative; margin-bottom: 14px; } - .config-box { background: var(--bg); border: 1px solid var(--border); border-radius: 8px; + .config-box { background: #0d1117; border: 1px solid rgba(240,246,252,0.08); border-radius: 10px; padding: 16px; font-family: 'JetBrains Mono', monospace; font-size: 11px; - line-height: 1.7; color: var(--text); overflow-x: auto; white-space: pre; } - .config-box.locked { color: var(--text-muted); filter: blur(3px); user-select: none; + line-height: 1.7; color: #c9d1d9; overflow-x: auto; white-space: pre; } + .config-box.locked { color: #8b949e; filter: blur(3px); user-select: none; pointer-events: none; } .config-key { color: #79c0ff; } .config-str { color: #a5d6ff; } - .config-val { color: var(--accent); } + .config-val { color: #58a6ff; } /* Divider */ - .divider { border: none; border-top: 1px solid var(--border); margin: 20px 0; } + .divider { border: none; border-top: 1px solid rgba(240,246,252,0.08); margin: 20px 0; } /* Actions row */ .actions-row { display: flex; gap: 8px; flex-wrap: wrap; justify-content: center; } @@ -135,34 +133,29 @@ .modal-bd { display: none; position: fixed; inset: 0; background: rgba(0,0,0,0.75); z-index: 100; align-items: center; justify-content: center; } .modal-bd.open { display: flex; } - .modal { background: var(--surface); border: 1px solid var(--border); border-radius: 12px; + .modal { background: #111827; border: 1px solid rgba(240,246,252,0.08); border-radius: 18px; padding: 28px; width: 100%; max-width: 420px; } - .modal h3 { font-size: 16px; font-weight: 600; margin-bottom: 16px; } + .modal h3 { font-size: 18px; font-weight: 700; margin-bottom: 16px; color: #fff; } .modal-actions { display: flex; gap: 8px; margin-top: 16px; } - .btn-modal-ok { flex: 1; padding: 8px; border-radius: 6px; border: none; - background: var(--accent); color: #0d1117; font-size: 13px; - font-weight: 600; cursor: pointer; } - .btn-modal-ok:hover { background: var(--accent-hover); } - .btn-modal-cancel { padding: 8px 16px; border-radius: 6px; border: 1px solid var(--border); - background: none; color: var(--text); font-size: 13px; cursor: pointer; } - .btn-modal-cancel:hover { background: var(--surface2); } + .btn-modal-ok { flex: 1; padding: 8px; border-radius: 10px; border: none; + background: #388bfd; color: #fff; font-size: 13px; + font-weight: 600; cursor: pointer; font-family: inherit; } + .btn-modal-ok:hover { background: #58a6ff; } + .btn-modal-cancel { padding: 8px 16px; border-radius: 10px; border: 1px solid rgba(240,246,252,0.12); + background: #161b22; color: #c9d1d9; font-size: 13px; cursor: pointer; font-family: inherit; } + .btn-modal-cancel:hover { border-color: rgba(56,139,253,0.45); color: #fff; } @media (max-width: 900px) { .layout { flex-direction: column; } .sidebar { - width: 100%; border-right: none; border-bottom: 1px solid var(--border); + width: 100%; border-right: none; border-bottom: 1px solid rgba(240,246,252,0.08); padding: 16px; gap: 14px; } - .sidebar-menu { flex-direction: row; } - .sidebar-link { flex: 1; text-align: center; } - } - - @media (max-width: 720px) { - .config-tabs { grid-template-columns: 1fr; } + .sidebar-menu { flex-direction: row; flex-wrap: wrap; } + .sidebar-link { flex: 1; text-align: center; min-width: 72px; } + .main { padding: 20px 12px 28px; } + .card { padding: 16px; } .topbar { padding: 12px 16px; flex-wrap: wrap; } - .main { padding: 16px 12px 6px; } - .app-footer { padding: 4px 12px 10px; } - .card { padding: 18px; } } @@ -171,11 +164,12 @@
@@ -293,13 +287,11 @@
- -
{{ version }}
- - - + + + - + - + + + + diff --git a/plans/metadata-search-and-entry-relations.md b/plans/metadata-search-and-entry-relations.md new file mode 100644 index 0000000..551b0d4 --- /dev/null +++ b/plans/metadata-search-and-entry-relations.md @@ -0,0 +1,392 @@ +# Metadata Value Search & Entry Relations (DAG) + +## Overview + +Two new features for secrets-mcp: + +1. **Metadata Value Search** — fuzzy search across all JSON scalar values in `metadata`, excluding keys +2. **Entry Relations** — directional parent-child associations between entries (DAG, multiple parents allowed, cycle detection) + +--- + +## Feature 1: Metadata Value Search + +### Problem + +The existing `query` parameter in `secrets_find`/`secrets_search` searches `metadata::text ILIKE`, which matches keys, JSON punctuation, and structural characters. Users want to search **only metadata values** (e.g. find entries where any metadata value contains "1.2.3.4", regardless of key name). + +### Solution + +Add a new `metadata_query` filter to `SearchParams` that uses PostgreSQL `jsonb_path_query` to iterate over only scalar values (strings, numbers, booleans), then applies ILIKE matching. + +### Changes + +#### secrets-core + +**`crates/secrets-core/src/service/search.rs`** + +- Add `metadata_query: Option<&'a str>` field to `SearchParams` +- In `entry_where_clause_and_next_idx`, when `metadata_query` is set, add: + +```sql +EXISTS ( + SELECT 1 FROM jsonb_path_query( + entries.metadata, + 'strict $.** ? (@.type() != "object" && @.type() != "array")' + ) AS val + WHERE (val#>>'{}') ILIKE $N ESCAPE '\' +) +``` + +- Bind `ilike_pattern(metadata_query)` at the correct `$N` position in both `fetch_entries_paged` and `count_entries` + +#### secrets-mcp (MCP tools) + +**`crates/secrets-mcp/src/tools.rs`** + +- Add `metadata_query` field to `FindInput`: + +```rust +#[schemars(description = "Fuzzy search across metadata values only (keys excluded)")] +metadata_query: Option, +``` + +- Add same field to `SearchInput` +- Pass `metadata_query` through to `SearchParams` in both `secrets_find` and `secrets_search` handlers + +#### secrets-mcp (Web) + +**`crates/secrets-mcp/src/web/entries.rs`** + +- Add `metadata_query: Option` to `EntriesQuery` +- Thread it into all `SearchParams` usages (count, list, folder counts) +- Pass it into template context +- Add `metadata_query` to `EntriesPageTemplate` and filter form hidden fields +- Include `metadata_query` in pagination `href` links + +**`crates/secrets-mcp/templates/entries.html`** + +- Add a "metadata 值" text input to the filter bar (after name, before type) +- Preserve value in the input on re-render + +### i18n Keys + +| Key | zh | zh-Hant | en | +|-----|-----|---------|-----| +| `filterMetaLabel` | 元数据值 | 元数据值 | Metadata value | +| `filterMetaPlaceholder` | 搜索元数据值 | 搜尋元資料值 | Search metadata values | + +### Performance Notes + +- The `jsonb_path_query` with `$.**` scans all nested values recursively; this is a sequential scan on the metadata column per row +- The existing GIN index on `metadata jsonb_path_ops` supports `@>` containment queries but NOT this pattern +- For production datasets > 10k entries, consider a generated column or materialized search column in a future iteration +- First version prioritizes semantic correctness over index optimization + +--- + +## Feature 2: Entry Relations (DAG) + +### Data Model + +New table `entry_relations`: + +```sql +CREATE TABLE IF NOT EXISTS entry_relations ( + parent_entry_id UUID NOT NULL REFERENCES entries(id) ON DELETE CASCADE, + child_entry_id UUID NOT NULL REFERENCES entries(id) ON DELETE CASCADE, + created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(), + PRIMARY KEY (parent_entry_id, child_entry_id), + CHECK (parent_entry_id <> child_entry_id) +); + +CREATE INDEX idx_entry_relations_parent ON entry_relations(parent_entry_id); +CREATE INDEX idx_entry_relations_child ON entry_relations(child_entry_id); + +-- Enforce multi-tenant isolation: parent and child must belong to same user +ALTER TABLE entry_relations ADD CONSTRAINT fk_parent_user + FOREIGN KEY (parent_entry_id) REFERENCES entries(id) ON DELETE CASCADE; +ALTER TABLE entry_relations ADD CONSTRAINT fk_child_user + FOREIGN KEY (child_entry_id) REFERENCES entries(id) ON DELETE CASCADE; +``` + +Shared secrets already use `entry_secrets` as an N:N relation, so this is consistent with the existing pattern. + +### Cycle Detection + +On every `INSERT INTO entry_relations(parent, child)`, check that no path exists from `child` back to `parent`: + +```sql +-- Returns true if adding (parent, child) would create a cycle +SELECT EXISTS( + SELECT 1 FROM entry_relations + WHERE child_entry_id = $1 -- $1 = proposed parent + START WITH parent_entry_id = $2 -- $2 = proposed child + CONNECT BY PRIOR child_entry_id = parent_entry_id +); +``` + +Wait — PostgreSQL doesn't support `START WITH ... CONNECT BY`. Use recursive CTE instead: + +```sql +WITH RECURSIVE chain AS ( + SELECT parent_entry_id AS ancestor + FROM entry_relations + WHERE child_entry_id = $1 -- proposed child + UNION ALL + SELECT er.parent_entry_id + FROM entry_relations er + JOIN chain c ON c.ancestor = er.child_entry_id +) +SELECT EXISTS(SELECT 1 FROM chain WHERE ancestor = $2); +-- $1 = proposed child, $2 = proposed parent +``` + +If `EXISTS` returns true, reject with `AppError::Validation { message: "cycle detected" }`. + +### secrets-core Changes + +**New file: `crates/secrets-core/src/service/relations.rs`** + +```rust +pub struct RelationSummary { + pub parent_id: Uuid, + pub parent_name: String, + pub parent_folder: String, + pub parent_type: String, +} + +pub struct AddRelationParams<'a> { + pub parent_entry_id: Uuid, + pub child_entry_id: Uuid, + pub user_id: Option, +} + +pub struct RemoveRelationParams<'a> { + pub parent_entry_id: Uuid, + pub child_entry_id: Uuid, + pub user_id: Option, +} + +/// Add a parent→child relation. Validates: +/// - Both entries exist and belong to the same user +/// - No self-reference (enforced by CHECK constraint) +/// - No cycle (recursive CTE check) +pub async fn add_relation(pool: &PgPool, params: AddRelationParams<'_>) -> Result<()> + +/// Remove a parent→child relation. +pub async fn remove_relation(pool: &PgPool, params: RemoveRelationParams<'_>) -> Result<()> + +/// Get all parents of an entry (with summary info). +pub async fn get_parents(pool: &PgPool, entry_id: Uuid, user_id: Option) -> Result> + +/// Get all children of an entry (with summary info). +pub async fn get_children(pool: &PgPool, entry_id: Uuid, user_id: Option) -> Result> + +/// Get parents + children for a batch of entry IDs (for list pages). +pub async fn get_relations_for_entries( + pool: &PgPool, + entry_ids: &[Uuid], + user_id: Option, +) -> Result>> +``` + +**`crates/secrets-core/src/service/mod.rs`** — add `pub mod relations;` + +**`crates/secrets-core/src/db.rs`** — add entry_relations table creation in `migrate()` + +**`crates/secrets-core/src/error.rs`** — no new error variant needed; use `AppError::Validation { message }` for cycle detection and permission errors + +### MCP Tool Changes + +**`crates/secrets-mcp/src/tools.rs`** + +1. **`secrets_add`** (`AddInput`): add optional `parent_ids: Option>` field + - Description: "UUIDs of parent entries to link. Creates parent→child relations." + - After creating the entry, call `relations::add_relation` for each parent + +2. **`secrets_update`** (`UpdateInput`): add two fields: + - `add_parent_ids: Option>` — "UUIDs of parent entries to link" + - `remove_parent_ids: Option>` — "UUIDs of parent entries to unlink" + +3. **`secrets_find`** and `secrets_search` output: add `parents` and `children` arrays to each entry result: + ```json + { + "id": "...", + "name": "...", + "parents": [{"id": "...", "name": "...", "folder": "...", "type": "..."}], + "children": [{"id": "...", "name": "...", "folder": "...", "type": "..."}] + } + ``` + - Fetch relations for all returned entry IDs in a single batch query + +### Web Changes + +**`crates/secrets-mcp/src/web/entries.rs`** + +1. **New API endpoints:** + + - `POST /api/entries/{id}/relations` — add parent relation + - Body: `{ "parent_id": "uuid" }` + - Validates same-user ownership and cycle detection + + - `DELETE /api/entries/{id}/relations/{parent_id}` — remove parent relation + + - `GET /api/entries/options?q=xxx` — lightweight search for parent selection modal + - Returns `[{ "id": "...", "name": "...", "folder": "...", "type": "..." }]` + - Used by the edit dialog's parent selection autocomplete + +2. **Entry list template data** — include parent/child counts per entry row + +3. **`api_entry_patch`** — extend `EntryPatchBody` with optional `parent_ids: Option>` + - When present, replace all parent relations for this entry with the given list + - This is simpler than incremental add/remove in the Web UI context + +**`crates/secrets-mcp/templates/entries.html`** + +1. **List table**: add a "关联" (relations) column showing parent/child counts as clickable chips +2. **Edit dialog**: add "上级条目" (parent entries) section + - Show current parents as removable chips + - Add a search-as-you-type input that queries `/api/entries/options` + - Click a search result to add it as parent + - On save, send `parent_ids` in the PATCH body +3. **View dialog / detail**: show "下级条目" (children) list with clickable links that navigate to the child entry +4. **i18n**: add keys for all new UI elements + +### i18n Keys (Entry Relations) + +| Key | zh | zh-Hant | en | +|-----|-----|---------|-----| +| `colRelations` | 关联 | 關聯 | Relations | +| `parentEntriesLabel` | 上级条目 | 上級條目 | Parent entries | +| `childrenEntriesLabel` | 下级条目 | 下級條目 | Child entries | +| `addParentLabel` | 添加上级 | 新增上級 | Add parent | +| `removeParentLabel` | 移除上级 | 移除上級 | Remove parent | +| `searchEntriesPlaceholder` | 搜索条目… | 搜尋條目… | Search entries… | +| `noParents` | 无上级 | 無上級 | No parents | +| `noChildren` | 无下级 | 無下級 | No children | +| `relationCycleError` | 无法添加:会形成循环引用 | 無法新增:會形成循環引用 | Cannot add: would create a cycle | + +### Audit Logging + +Log relation changes in the existing `audit::log_tx` system: + +- Action: `"add_relation"` / `"remove_relation"` +- Detail JSON: `{ "parent_id": "...", "parent_name": "...", "child_id": "...", "child_name": "..." }` + +### Export / Import + +**`ExportEntry`** — add optional `parents: Vec` where: + +```rust +pub struct ParentRef { + pub folder: String, + pub name: String, +} +``` + +- On export, resolve each entry's parent IDs to `(folder, name)` pairs +- On import, two-phase: + 1. Create all entries (skip parents) + 2. For each entry with `parents`, resolve `(folder, name)` → `entry_id` and call `add_relation` + 3. If a parent reference cannot be resolved, log a warning and skip it (don't fail the entire import) + +### History / Rollback + +- Relation changes are **not** versioned in `entries_history`. They are tracked only via `audit_log`. +- Rationale: relations are a cross-entry concern; rolling them back alongside entry fields would require complex multi-entry coordination. The audit log provides sufficient traceability. +- If the user explicitly requests rollback of relations in the future, it can be implemented as a separate feature. + +--- + +## Implementation Order + +### Phase 1: Metadata Value Search + +1. `secrets-core/src/service/search.rs` — add `metadata_query` to `SearchParams`, implement SQL condition +2. `secrets-mcp/src/tools.rs` — add `metadata_query` to `FindInput` and `SearchInput`, wire through +3. `secrets-mcp/src/web/entries.rs` — add `metadata_query` to `EntriesQuery`, `SearchParams`, pagination, folder counts +4. `secrets-mcp/templates/entries.html` — add input field, i18n +5. Test: existing `query` still works; `metadata_query` only matches values + +### Phase 2: Entry Relations (Core) + +1. `secrets-core/src/db.rs` — add `entry_relations` table to `migrate()` +2. `secrets-core/src/service/relations.rs` — implement `add_relation`, `remove_relation`, `get_parents`, `get_children`, `get_relations_for_entries`, cycle detection +3. `secrets-core/src/service/mod.rs` — add `pub mod relations` +4. Test: add/remove/query relations, cycle detection, same-user validation + +### Phase 3: Entry Relations (MCP) + +1. `secrets-mcp/src/tools.rs` — extend `AddInput`, `UpdateInput` with parent IDs +2. `secrets-mcp/src/tools.rs` — extend `secrets_find`/`secrets_search` output with `parents`/`children` +3. Test: MCP tools work end-to-end + +### Phase 4: Entry Relations (Web) + +1. `secrets-mcp/src/web/entries.rs` — add API endpoints, extend `EntryPatchBody`, extend template data +2. `secrets-mcp/templates/entries.html` — add relations column, edit dialog parent selector, view dialog children list +3. Test: Web UI works end-to-end + +### Phase 5: Export / Import (Optional) + +1. `secrets-core/src/models.rs` — add `parents` to `ExportEntry` +2. `secrets-core/src/service/export.rs` — populate parents +3. `secrets-core/src/service/import.rs` — two-phase import with relation resolution + +--- + +## Database Migration + +Add to `secrets-core/src/db.rs` `migrate()`: + +```sql +CREATE TABLE IF NOT EXISTS entry_relations ( + parent_entry_id UUID NOT NULL REFERENCES entries(id) ON DELETE CASCADE, + child_entry_id UUID NOT NULL REFERENCES entries(id) ON DELETE CASCADE, + created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(), + PRIMARY KEY (parent_entry_id, child_entry_id), + CHECK (parent_entry_id <> child_entry_id) +); + +CREATE INDEX IF NOT EXISTS idx_entry_relations_parent ON entry_relations(parent_entry_id); +CREATE INDEX IF NOT EXISTS idx_entry_relations_child ON entry_relations(child_entry_id); +``` + +This is idempotent (uses `IF NOT EXISTS`) and will run automatically on next startup. + +--- + +## Security Considerations + +- **Same-user isolation**: `add_relation` must verify both `parent_entry_id` and `child_entry_id` belong to the same `user_id` (or both are `NULL` for legacy single-user mode) +- **Cycle detection**: Recursive CTE query prevents any directed cycle, regardless of depth +- **CASCADE delete**: When an entry is deleted, all its relation edges are automatically removed via the `ON DELETE CASCADE` foreign key. This is the same pattern used by `entry_secrets`. + +--- + +## Testing Checklist + +### Metadata Search +- [ ] `metadata_query=1.2.3.4` matches entries where any metadata value contains "1.2.3.4" +- [ ] `metadata_query=1.2.3.4` does NOT match entries where only the key contains "1.2.3.4" +- [ ] `metadata_query` works with nested metadata (e.g. `{"server": {"ip": "1.2.3.4"}}`) +- [ ] `metadata_query` combined with `folder`/`type`/`tags` filters works correctly +- [ ] `metadata_query` with special characters (`%`, `_`) is properly escaped +- [ ] Existing `query` parameter behavior is unchanged +- [ ] Web filter bar preserves `metadata_query` across pagination and folder tab clicks + +### Entry Relations +- [ ] Can add a parent→child relation between two entries +- [ ] Can add multiple parents to a single entry +- [ ] Cannot add self-referencing relation (CHECK constraint) +- [ ] Cannot create a direct cycle (A→B→A) +- [ ] Cannot create an indirect cycle (A→B→C→A) +- [ ] Cannot link entries from different users +- [ ] Deleting an entry removes all its relation edges but leaves related entries intact +- [ ] MCP `secrets_add` with `parent_ids` creates relations +- [ ] MCP `secrets_update` with `add_parent_ids`/`remove_parent_ids` modifies relations +- [ ] MCP `secrets_find`/`secrets_search` output includes `parents` and `children` +- [ ] Web entry list shows relation counts +- [ ] Web edit dialog allows adding/removing parents +- [ ] Web entry view shows children with navigation links \ No newline at end of file