Merge pull request 'refactor/entry-secret-nn' (#1) from refactor/entry-secret-nn into main
Some checks failed
Secrets MCP — Build & Release / 检查 / 构建 / 发版 (push) Successful in 2m42s
Secrets MCP — Build & Release / 部署 secrets-mcp (push) Failing after 6s

Reviewed-on: #1
This commit was merged in pull request #1.
This commit is contained in:
2026-04-03 19:44:47 +08:00
21 changed files with 1115 additions and 199 deletions

1
.gitignore vendored
View File

@@ -5,3 +5,4 @@
# Google OAuth 下载的 JSON 凭据文件 # Google OAuth 下载的 JSON 凭据文件
client_secret_*.apps.googleusercontent.com.json client_secret_*.apps.googleusercontent.com.json
*.pem *.pem
tmp/

2
Cargo.lock generated
View File

@@ -1968,7 +1968,7 @@ dependencies = [
[[package]] [[package]]
name = "secrets-mcp" name = "secrets-mcp"
version = "0.3.7" version = "0.3.9"
dependencies = [ dependencies = [
"anyhow", "anyhow",
"askama", "askama",

View File

@@ -83,16 +83,30 @@ pub async fn migrate(pool: &PgPool) -> Result<()> {
-- ── secrets: one row per encrypted field ───────────────────────────────── -- ── secrets: one row per encrypted field ─────────────────────────────────
CREATE TABLE IF NOT EXISTS secrets ( CREATE TABLE IF NOT EXISTS secrets (
id UUID PRIMARY KEY DEFAULT uuidv7(), id UUID PRIMARY KEY DEFAULT uuidv7(),
entry_id UUID NOT NULL REFERENCES entries(id) ON DELETE CASCADE, user_id UUID,
field_name VARCHAR(256) NOT NULL, name VARCHAR(256) NOT NULL,
type VARCHAR(64) NOT NULL DEFAULT 'text',
encrypted BYTEA NOT NULL DEFAULT '\x', encrypted BYTEA NOT NULL DEFAULT '\x',
version BIGINT NOT NULL DEFAULT 1, version BIGINT NOT NULL DEFAULT 1,
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(), created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW(), updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW()
UNIQUE(entry_id, field_name)
); );
CREATE INDEX IF NOT EXISTS idx_secrets_entry_id ON secrets(entry_id); CREATE INDEX IF NOT EXISTS idx_secrets_user_id ON secrets(user_id) WHERE user_id IS NOT NULL;
CREATE UNIQUE INDEX IF NOT EXISTS idx_secrets_unique_user_name
ON secrets(user_id, name) WHERE user_id IS NOT NULL;
CREATE INDEX IF NOT EXISTS idx_secrets_name ON secrets(name);
CREATE INDEX IF NOT EXISTS idx_secrets_type ON secrets(type);
-- ── entry_secrets: N:N relation ────────────────────────────────────────────
CREATE TABLE IF NOT EXISTS entry_secrets (
entry_id UUID NOT NULL REFERENCES entries(id) ON DELETE CASCADE,
secret_id UUID NOT NULL REFERENCES secrets(id) ON DELETE CASCADE,
sort_order INT NOT NULL DEFAULT 0,
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
PRIMARY KEY(entry_id, secret_id)
);
CREATE INDEX IF NOT EXISTS idx_entry_secrets_secret_id ON entry_secrets(secret_id);
-- ── audit_log: append-only operation log ───────────────────────────────── -- ── audit_log: append-only operation log ─────────────────────────────────
CREATE TABLE IF NOT EXISTS audit_log ( CREATE TABLE IF NOT EXISTS audit_log (
@@ -141,17 +155,13 @@ pub async fn migrate(pool: &PgPool) -> Result<()> {
-- ── secrets_history: field-level snapshot ──────────────────────────────── -- ── secrets_history: field-level snapshot ────────────────────────────────
CREATE TABLE IF NOT EXISTS secrets_history ( CREATE TABLE IF NOT EXISTS secrets_history (
id BIGINT GENERATED ALWAYS AS IDENTITY PRIMARY KEY, id BIGINT GENERATED ALWAYS AS IDENTITY PRIMARY KEY,
entry_id UUID NOT NULL,
secret_id UUID NOT NULL, secret_id UUID NOT NULL,
entry_version BIGINT NOT NULL, name VARCHAR(256) NOT NULL,
field_name VARCHAR(256) NOT NULL,
encrypted BYTEA NOT NULL DEFAULT '\x', encrypted BYTEA NOT NULL DEFAULT '\x',
action VARCHAR(16) NOT NULL, action VARCHAR(16) NOT NULL,
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW() created_at TIMESTAMPTZ NOT NULL DEFAULT NOW()
); );
CREATE INDEX IF NOT EXISTS idx_secrets_history_entry_id
ON secrets_history(entry_id, entry_version DESC);
CREATE INDEX IF NOT EXISTS idx_secrets_history_secret_id CREATE INDEX IF NOT EXISTS idx_secrets_history_secret_id
ON secrets_history(secret_id); ON secrets_history(secret_id);
@@ -210,6 +220,16 @@ pub async fn migrate(pool: &PgPool) -> Result<()> {
END IF; END IF;
END $$; END $$;
DO $$ BEGIN
IF NOT EXISTS (
SELECT 1 FROM pg_constraint WHERE conname = 'fk_secrets_user_id'
) THEN
ALTER TABLE secrets
ADD CONSTRAINT fk_secrets_user_id
FOREIGN KEY (user_id) REFERENCES users(id) ON DELETE SET NULL;
END IF;
END $$;
DO $$ BEGIN DO $$ BEGIN
IF NOT EXISTS ( IF NOT EXISTS (
SELECT 1 FROM pg_constraint WHERE conname = 'fk_audit_log_user_id' SELECT 1 FROM pg_constraint WHERE conname = 'fk_audit_log_user_id'
@@ -499,10 +519,8 @@ pub async fn snapshot_entry_history(
// ── Secret field-level history snapshot ────────────────────────────────────── // ── Secret field-level history snapshot ──────────────────────────────────────
pub struct SecretSnapshotParams<'a> { pub struct SecretSnapshotParams<'a> {
pub entry_id: uuid::Uuid,
pub secret_id: uuid::Uuid, pub secret_id: uuid::Uuid,
pub entry_version: i64, pub name: &'a str,
pub field_name: &'a str,
pub encrypted: &'a [u8], pub encrypted: &'a [u8],
pub action: &'a str, pub action: &'a str,
} }
@@ -513,13 +531,11 @@ pub async fn snapshot_secret_history(
) -> Result<()> { ) -> Result<()> {
sqlx::query( sqlx::query(
"INSERT INTO secrets_history \ "INSERT INTO secrets_history \
(entry_id, secret_id, entry_version, field_name, encrypted, action) \ (secret_id, name, encrypted, action) \
VALUES ($1, $2, $3, $4, $5, $6)", VALUES ($1, $2, $3, $4)",
) )
.bind(p.entry_id)
.bind(p.secret_id) .bind(p.secret_id)
.bind(p.entry_version) .bind(p.name)
.bind(p.field_name)
.bind(p.encrypted) .bind(p.encrypted)
.bind(p.action) .bind(p.action)
.execute(&mut **tx) .execute(&mut **tx)

View File

@@ -27,8 +27,11 @@ pub struct Entry {
#[derive(Debug, Serialize, Deserialize, sqlx::FromRow)] #[derive(Debug, Serialize, Deserialize, sqlx::FromRow)]
pub struct SecretField { pub struct SecretField {
pub id: Uuid, pub id: Uuid,
pub entry_id: Uuid, pub user_id: Option<Uuid>,
pub field_name: String, pub name: String,
#[serde(rename = "type")]
#[sqlx(rename = "type")]
pub secret_type: String,
/// AES-256-GCM ciphertext: nonce(12B) || ciphertext+tag /// AES-256-GCM ciphertext: nonce(12B) || ciphertext+tag
pub encrypted: Vec<u8>, pub encrypted: Vec<u8>,
pub version: i64, pub version: i64,
@@ -83,7 +86,7 @@ impl From<&EntryWriteRow> for EntryRow {
#[derive(Debug, sqlx::FromRow)] #[derive(Debug, sqlx::FromRow)]
pub struct SecretFieldRow { pub struct SecretFieldRow {
pub id: Uuid, pub id: Uuid,
pub field_name: String, pub name: String,
pub encrypted: Vec<u8>, pub encrypted: Vec<u8>,
} }

View File

@@ -1,6 +1,7 @@
use anyhow::Result; use anyhow::Result;
use serde_json::{Map, Value}; use serde_json::{Map, Value};
use sqlx::PgPool; use sqlx::PgPool;
use std::collections::{BTreeSet, HashSet};
use std::fs; use std::fs;
use uuid::Uuid; use uuid::Uuid;
@@ -176,6 +177,7 @@ pub struct AddParams<'a> {
pub tags: &'a [String], pub tags: &'a [String],
pub meta_entries: &'a [String], pub meta_entries: &'a [String],
pub secret_entries: &'a [String], pub secret_entries: &'a [String],
pub link_secret_names: &'a [String],
/// Optional user_id for multi-user isolation (None = single-user CLI mode) /// Optional user_id for multi-user isolation (None = single-user CLI mode)
pub user_id: Option<Uuid>, pub user_id: Option<Uuid>,
} }
@@ -185,6 +187,11 @@ pub async fn run(pool: &PgPool, params: AddParams<'_>, master_key: &[u8; 32]) ->
let secret_json = build_json(params.secret_entries)?; let secret_json = build_json(params.secret_entries)?;
let meta_keys = collect_key_paths(params.meta_entries)?; let meta_keys = collect_key_paths(params.meta_entries)?;
let secret_keys = collect_key_paths(params.secret_entries)?; let secret_keys = collect_key_paths(params.secret_entries)?;
let flat_fields = flatten_json_fields("", &secret_json);
let new_secret_names: BTreeSet<String> =
flat_fields.iter().map(|(name, _)| name.clone()).collect();
let link_secret_names =
validate_link_secret_names(params.link_secret_names, &new_secret_names)?;
let mut tx = pool.begin().await?; let mut tx = pool.begin().await?;
@@ -279,7 +286,8 @@ pub async fn run(pool: &PgPool, params: AddParams<'_>, master_key: &[u8; 32]) ->
.await? .await?
}; };
let new_entry_version: i64 = sqlx::query_scalar("SELECT version FROM entries WHERE id = $1") let current_entry_version: i64 =
sqlx::query_scalar("SELECT version FROM entries WHERE id = $1")
.bind(entry_id) .bind(entry_id)
.fetch_one(&mut *tx) .fetch_one(&mut *tx)
.await?; .await?;
@@ -293,7 +301,7 @@ pub async fn run(pool: &PgPool, params: AddParams<'_>, master_key: &[u8; 32]) ->
folder: params.folder, folder: params.folder,
entry_type: params.entry_type, entry_type: params.entry_type,
name: params.name, name: params.name,
version: new_entry_version, version: current_entry_version,
action: "create", action: "create",
tags: params.tags, tags: params.tags,
metadata: &metadata, metadata: &metadata,
@@ -308,11 +316,15 @@ pub async fn run(pool: &PgPool, params: AddParams<'_>, master_key: &[u8; 32]) ->
#[derive(sqlx::FromRow)] #[derive(sqlx::FromRow)]
struct ExistingField { struct ExistingField {
id: Uuid, id: Uuid,
field_name: String, name: String,
encrypted: Vec<u8>, encrypted: Vec<u8>,
} }
let existing_fields: Vec<ExistingField> = let existing_fields: Vec<ExistingField> = sqlx::query_as(
sqlx::query_as("SELECT id, field_name, encrypted FROM secrets WHERE entry_id = $1") "SELECT s.id, s.name, s.encrypted \
FROM entry_secrets es \
JOIN secrets s ON s.id = es.secret_id \
WHERE es.entry_id = $1",
)
.bind(entry_id) .bind(entry_id)
.fetch_all(&mut *tx) .fetch_all(&mut *tx)
.await?; .await?;
@@ -321,10 +333,8 @@ pub async fn run(pool: &PgPool, params: AddParams<'_>, master_key: &[u8; 32]) ->
if let Err(e) = db::snapshot_secret_history( if let Err(e) = db::snapshot_secret_history(
&mut tx, &mut tx,
db::SecretSnapshotParams { db::SecretSnapshotParams {
entry_id,
secret_id: f.id, secret_id: f.id,
entry_version: new_entry_version - 1, name: &f.name,
field_name: &f.field_name,
encrypted: &f.encrypted, encrypted: &f.encrypted,
action: "add", action: "add",
}, },
@@ -335,23 +345,70 @@ pub async fn run(pool: &PgPool, params: AddParams<'_>, master_key: &[u8; 32]) ->
} }
} }
sqlx::query("DELETE FROM secrets WHERE entry_id = $1") sqlx::query("DELETE FROM entry_secrets WHERE entry_id = $1")
.bind(entry_id) .bind(entry_id)
.execute(&mut *tx) .execute(&mut *tx)
.await?; .await?;
sqlx::query(
"DELETE FROM secrets s \
WHERE NOT EXISTS (SELECT 1 FROM entry_secrets es WHERE es.secret_id = s.id)",
)
.execute(&mut *tx)
.await?;
} }
let flat_fields = flatten_json_fields("", &secret_json);
for (field_name, field_value) in &flat_fields { for (field_name, field_value) in &flat_fields {
let encrypted = crypto::encrypt_json(master_key, field_value)?; let encrypted = crypto::encrypt_json(master_key, field_value)?;
sqlx::query("INSERT INTO secrets (entry_id, field_name, encrypted) VALUES ($1, $2, $3)") let secret_id: Uuid = sqlx::query_scalar(
.bind(entry_id) "INSERT INTO secrets (user_id, name, type, encrypted) VALUES ($1, $2, $3, $4) RETURNING id",
)
.bind(params.user_id)
.bind(field_name) .bind(field_name)
.bind(infer_secret_type(field_name))
.bind(&encrypted) .bind(&encrypted)
.fetch_one(&mut *tx)
.await?;
sqlx::query("INSERT INTO entry_secrets (entry_id, secret_id) VALUES ($1, $2)")
.bind(entry_id)
.bind(secret_id)
.execute(&mut *tx) .execute(&mut *tx)
.await?; .await?;
} }
for link_name in &link_secret_names {
let secret_ids: Vec<Uuid> = if let Some(uid) = params.user_id {
sqlx::query_scalar("SELECT id FROM secrets WHERE user_id = $1 AND name = $2")
.bind(uid)
.bind(link_name)
.fetch_all(&mut *tx)
.await?
} else {
sqlx::query_scalar("SELECT id FROM secrets WHERE user_id IS NULL AND name = $1")
.bind(link_name)
.fetch_all(&mut *tx)
.await?
};
match secret_ids.len() {
0 => anyhow::bail!("Not found: secret named '{}'", link_name),
1 => {
sqlx::query(
"INSERT INTO entry_secrets (entry_id, secret_id) VALUES ($1, $2) ON CONFLICT DO NOTHING",
)
.bind(entry_id)
.bind(secret_ids[0])
.execute(&mut *tx)
.await?;
}
n => anyhow::bail!(
"Ambiguous: {} secrets named '{}' found. Please deduplicate names first.",
n,
link_name
),
}
}
crate::audit::log_tx( crate::audit::log_tx(
&mut tx, &mut tx,
params.user_id, params.user_id,
@@ -379,9 +436,56 @@ pub async fn run(pool: &PgPool, params: AddParams<'_>, master_key: &[u8; 32]) ->
}) })
} }
pub(crate) fn infer_secret_type(name: &str) -> &'static str {
match name {
"ssh_key" => "pem",
"password" => "password",
"phone" | "phone_2" => "phone",
"webhook_url" | "address" => "url",
"access_key_id"
| "access_key_secret"
| "global_api_key"
| "api_key"
| "secret_key"
| "personal_access_token"
| "runner_token"
| "GOOGLE_CLIENT_ID"
| "GOOGLE_CLIENT_SECRET" => "token",
_ => "text",
}
}
fn validate_link_secret_names(
link_secret_names: &[String],
new_secret_names: &BTreeSet<String>,
) -> Result<Vec<String>> {
let mut deduped = Vec::new();
let mut seen = HashSet::new();
for raw in link_secret_names {
let trimmed = raw.trim();
if trimmed.is_empty() {
anyhow::bail!("link_secret_names contains an empty name");
}
if new_secret_names.contains(trimmed) {
anyhow::bail!(
"Conflict: secret '{}' is provided both in secrets/secrets_obj and link_secret_names",
trimmed
);
}
if seen.insert(trimmed.to_string()) {
deduped.push(trimmed.to_string());
}
}
Ok(deduped)
}
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use super::*; use super::*;
use sqlx::PgPool;
use std::collections::BTreeSet;
#[test] #[test]
fn parse_nested_file_shorthand() { fn parse_nested_file_shorthand() {
@@ -410,4 +514,199 @@ mod tests {
assert_eq!(fields[1].0, "credentials.type"); assert_eq!(fields[1].0, "credentials.type");
assert_eq!(fields[2].0, "username"); assert_eq!(fields[2].0, "username");
} }
#[test]
fn validate_link_secret_names_conflict_with_new_secret() {
let mut new_names = BTreeSet::new();
new_names.insert("password".to_string());
let err = validate_link_secret_names(&[String::from("password")], &new_names)
.expect_err("must fail on overlap");
assert!(
err.to_string()
.contains("provided both in secrets/secrets_obj and link_secret_names")
);
}
#[test]
fn validate_link_secret_names_dedup_and_trim() {
let names = vec![
" shared_key ".to_string(),
"shared_key".to_string(),
"runner_token".to_string(),
];
let deduped = validate_link_secret_names(&names, &BTreeSet::new()).unwrap();
assert_eq!(deduped, vec!["shared_key", "runner_token"]);
}
async fn maybe_test_pool() -> Option<PgPool> {
let Ok(url) = std::env::var("SECRETS_DATABASE_URL") else {
eprintln!("skip add linkage tests: SECRETS_DATABASE_URL is not set");
return None;
};
let Ok(pool) = PgPool::connect(&url).await else {
eprintln!("skip add linkage tests: cannot connect to database");
return None;
};
if let Err(e) = crate::db::migrate(&pool).await {
eprintln!("skip add linkage tests: migrate failed: {e}");
return None;
}
Some(pool)
}
async fn cleanup_test_rows(pool: &PgPool, marker: &str) -> Result<()> {
sqlx::query(
"DELETE FROM entries WHERE user_id IS NULL AND (name LIKE $1 OR folder LIKE $1)",
)
.bind(format!("%{marker}%"))
.execute(pool)
.await?;
sqlx::query(
"DELETE FROM secrets WHERE user_id IS NULL AND name LIKE $1 \
AND NOT EXISTS (SELECT 1 FROM entry_secrets es WHERE es.secret_id = secrets.id)",
)
.bind(format!("%{marker}%"))
.execute(pool)
.await?;
Ok(())
}
#[tokio::test]
async fn add_links_existing_secret_by_unique_name() -> Result<()> {
let Some(pool) = maybe_test_pool().await else {
return Ok(());
};
let suffix = Uuid::from_u128(rand::random()).to_string();
let marker = format!("link_unique_{}", &suffix[..8]);
let secret_name = format!("{}_secret", marker);
let entry_name = format!("{}_entry", marker);
cleanup_test_rows(&pool, &marker).await?;
let secret_id: Uuid = sqlx::query_scalar(
"INSERT INTO secrets (user_id, name, type, encrypted) VALUES (NULL, $1, 'text', $2) RETURNING id",
)
.bind(&secret_name)
.bind(vec![1_u8, 2, 3])
.fetch_one(&pool)
.await?;
run(
&pool,
AddParams {
name: &entry_name,
folder: &marker,
entry_type: "service",
notes: "",
tags: &[],
meta_entries: &[],
secret_entries: &[],
link_secret_names: std::slice::from_ref(&secret_name),
user_id: None,
},
&[0_u8; 32],
)
.await?;
let linked: bool = sqlx::query_scalar(
"SELECT EXISTS( \
SELECT 1 FROM entry_secrets es \
JOIN entries e ON e.id = es.entry_id \
WHERE e.user_id IS NULL AND e.name = $1 AND es.secret_id = $2 \
)",
)
.bind(&entry_name)
.bind(secret_id)
.fetch_one(&pool)
.await?;
assert!(linked);
cleanup_test_rows(&pool, &marker).await?;
Ok(())
}
#[tokio::test]
async fn add_link_secret_name_not_found_fails() -> Result<()> {
let Some(pool) = maybe_test_pool().await else {
return Ok(());
};
let suffix = Uuid::from_u128(rand::random()).to_string();
let marker = format!("link_missing_{}", &suffix[..8]);
let secret_name = format!("{}_secret", marker);
let entry_name = format!("{}_entry", marker);
cleanup_test_rows(&pool, &marker).await?;
let err = run(
&pool,
AddParams {
name: &entry_name,
folder: &marker,
entry_type: "service",
notes: "",
tags: &[],
meta_entries: &[],
secret_entries: &[],
link_secret_names: std::slice::from_ref(&secret_name),
user_id: None,
},
&[0_u8; 32],
)
.await
.expect_err("must fail when linked secret is not found");
assert!(err.to_string().contains("Not found: secret named"));
cleanup_test_rows(&pool, &marker).await?;
Ok(())
}
#[tokio::test]
async fn add_link_secret_name_ambiguous_fails() -> Result<()> {
let Some(pool) = maybe_test_pool().await else {
return Ok(());
};
let suffix = Uuid::from_u128(rand::random()).to_string();
let marker = format!("link_amb_{}", &suffix[..8]);
let secret_name = format!("{}_dup_secret", marker);
let entry_name = format!("{}_entry", marker);
cleanup_test_rows(&pool, &marker).await?;
sqlx::query(
"INSERT INTO secrets (user_id, name, type, encrypted) VALUES (NULL, $1, 'text', $2)",
)
.bind(&secret_name)
.bind(vec![1_u8])
.execute(&pool)
.await?;
sqlx::query(
"INSERT INTO secrets (user_id, name, type, encrypted) VALUES (NULL, $1, 'text', $2)",
)
.bind(&secret_name)
.bind(vec![2_u8])
.execute(&pool)
.await?;
let err = run(
&pool,
AddParams {
name: &entry_name,
folder: &marker,
entry_type: "service",
notes: "",
tags: &[],
meta_entries: &[],
secret_entries: &[],
link_secret_names: std::slice::from_ref(&secret_name),
user_id: None,
},
&[0_u8; 32],
)
.await
.expect_err("must fail on ambiguous linked secret name");
assert!(err.to_string().contains("Ambiguous:"));
cleanup_test_rows(&pool, &marker).await?;
Ok(())
}
} }

View File

@@ -130,20 +130,20 @@ async fn migrate_key_refs_if_needed(
let owner = &refs[0]; let owner = &refs[0];
let owner_path = ref_path(owner); let owner_path = ref_path(owner);
let key_fields: Vec<SecretFieldRow> = let key_fields: Vec<SecretFieldRow> = sqlx::query_as(
sqlx::query_as("SELECT id, field_name, encrypted FROM secrets WHERE entry_id = $1") "SELECT s.id, s.name, s.encrypted \
FROM entry_secrets es \
JOIN secrets s ON s.id = es.secret_id \
WHERE es.entry_id = $1",
)
.bind(key_row.id) .bind(key_row.id)
.fetch_all(&mut **tx) .fetch_all(&mut **tx)
.await?; .await?;
for f in &key_fields { for f in &key_fields {
sqlx::query( sqlx::query("INSERT INTO entry_secrets (entry_id, secret_id) VALUES ($1, $2) ON CONFLICT DO NOTHING")
"INSERT INTO secrets (entry_id, field_name, encrypted) VALUES ($1, $2, $3) \
ON CONFLICT (entry_id, field_name) DO NOTHING",
)
.bind(owner.id) .bind(owner.id)
.bind(&f.field_name) .bind(f.id)
.bind(&f.encrypted)
.execute(&mut **tx) .execute(&mut **tx)
.await?; .await?;
} }
@@ -200,7 +200,7 @@ async fn migrate_key_refs_if_needed(
Ok(refs.iter().map(ref_label).collect()) Ok(refs.iter().map(ref_label).collect())
} }
/// Delete a single entry by id (multi-tenant: `user_id` must match). Cascades `secrets` via FK. /// Delete a single entry by id (multi-tenant: `user_id` must match).
pub async fn delete_by_id(pool: &PgPool, entry_id: Uuid, user_id: Uuid) -> Result<DeleteResult> { pub async fn delete_by_id(pool: &PgPool, entry_id: Uuid, user_id: Uuid) -> Result<DeleteResult> {
let mut tx = pool.begin().await?; let mut tx = pool.begin().await?;
let row: Option<EntryWriteRow> = sqlx::query_as( let row: Option<EntryWriteRow> = sqlx::query_as(
@@ -615,8 +615,12 @@ async fn snapshot_and_delete(
tracing::warn!(error = %e, "failed to snapshot entry history before delete"); tracing::warn!(error = %e, "failed to snapshot entry history before delete");
} }
let fields: Vec<SecretFieldRow> = let fields: Vec<SecretFieldRow> = sqlx::query_as(
sqlx::query_as("SELECT id, field_name, encrypted FROM secrets WHERE entry_id = $1") "SELECT s.id, s.name, s.encrypted \
FROM entry_secrets es \
JOIN secrets s ON s.id = es.secret_id \
WHERE es.entry_id = $1",
)
.bind(row.id) .bind(row.id)
.fetch_all(&mut **tx) .fetch_all(&mut **tx)
.await?; .await?;
@@ -625,10 +629,8 @@ async fn snapshot_and_delete(
if let Err(e) = db::snapshot_secret_history( if let Err(e) = db::snapshot_secret_history(
tx, tx,
db::SecretSnapshotParams { db::SecretSnapshotParams {
entry_id: row.id,
secret_id: f.id, secret_id: f.id,
entry_version: row.version, name: &f.name,
field_name: &f.field_name,
encrypted: &f.encrypted, encrypted: &f.encrypted,
action: "delete", action: "delete",
}, },
@@ -644,6 +646,13 @@ async fn snapshot_and_delete(
.execute(&mut **tx) .execute(&mut **tx)
.await?; .await?;
sqlx::query(
"DELETE FROM secrets s \
WHERE NOT EXISTS (SELECT 1 FROM entry_secrets es WHERE es.secret_id = s.id)",
)
.execute(&mut **tx)
.await?;
Ok(()) Ok(())
} }
@@ -692,6 +701,31 @@ mod tests {
Ok(()) Ok(())
} }
async fn insert_secret_for_entry(
pool: &PgPool,
user_id: Uuid,
entry_id: Uuid,
name: &str,
secret_type: &str,
encrypted: Vec<u8>,
) -> Result<()> {
let secret_id: Uuid = sqlx::query_scalar(
"INSERT INTO secrets (user_id, name, type, encrypted) VALUES ($1, $2, $3, $4) RETURNING id",
)
.bind(user_id)
.bind(name)
.bind(secret_type)
.bind(encrypted)
.fetch_one(pool)
.await?;
sqlx::query("INSERT INTO entry_secrets (entry_id, secret_id) VALUES ($1, $2)")
.bind(entry_id)
.bind(secret_id)
.execute(pool)
.await?;
Ok(())
}
#[tokio::test] #[tokio::test]
async fn delete_shared_key_dry_run_reports_migration_without_writes() -> Result<()> { async fn delete_shared_key_dry_run_reports_migration_without_writes() -> Result<()> {
let Some(pool) = maybe_test_pool().await else { let Some(pool) = maybe_test_pool().await else {
@@ -713,12 +747,7 @@ mod tests {
json!({}), json!({}),
) )
.await?; .await?;
sqlx::query("INSERT INTO secrets (entry_id, field_name, encrypted) VALUES ($1, $2, $3)") insert_secret_for_entry(&pool, user_id, key_id, "pem", "pem", vec![1_u8, 2, 3]).await?;
.bind(key_id)
.bind("pem")
.bind(vec![1_u8, 2, 3])
.execute(&pool)
.await?;
insert_entry( insert_entry(
&pool, &pool,
@@ -808,12 +837,7 @@ mod tests {
json!({}), json!({}),
) )
.await?; .await?;
sqlx::query("INSERT INTO secrets (entry_id, field_name, encrypted) VALUES ($1, $2, $3)") insert_secret_for_entry(&pool, user_id, key_id, "pem", "pem", vec![7_u8, 8, 9]).await?;
.bind(key_id)
.bind("pem")
.bind(vec![7_u8, 8, 9])
.execute(&pool)
.await?;
// owner candidate (sorted first by folder) // owner candidate (sorted first by folder)
insert_entry( insert_entry(
@@ -893,7 +917,12 @@ mod tests {
assert_eq!(ref_c_key_ref.as_deref(), Some("afolder/srv-a")); assert_eq!(ref_c_key_ref.as_deref(), Some("afolder/srv-a"));
let owner_has_copied: bool = sqlx::query_scalar( let owner_has_copied: bool = sqlx::query_scalar(
"SELECT EXISTS(SELECT 1 FROM secrets WHERE entry_id = $1 AND field_name = 'pem')", "SELECT EXISTS( \
SELECT 1 \
FROM entry_secrets es \
JOIN secrets s ON s.id = es.secret_id \
WHERE es.entry_id = $1 AND s.name = 'pem' \
)",
) )
.bind(ref_a) .bind(ref_a)
.fetch_one(&pool) .fetch_one(&pool)

View File

@@ -51,7 +51,7 @@ async fn build_entry_env_map(
} else { } else {
all_fields all_fields
.iter() .iter()
.filter(|f| only_fields.contains(&f.field_name)) .filter(|f| only_fields.contains(&f.name))
.collect() .collect()
}; };
@@ -63,7 +63,7 @@ async fn build_entry_env_map(
let key = format!( let key = format!(
"{}_{}", "{}_{}",
effective_prefix, effective_prefix,
f.field_name.to_uppercase().replace(['-', '.'], "_") f.name.to_uppercase().replace(['-', '.'], "_")
); );
map.insert(key, json_to_env_string(&decrypted)); map.insert(key, json_to_env_string(&decrypted));
} }
@@ -97,7 +97,7 @@ async fn build_entry_env_map(
let key_var = format!( let key_var = format!(
"{}_{}", "{}_{}",
key_prefix, key_prefix,
f.field_name.to_uppercase().replace(['-', '.'], "_") f.name.to_uppercase().replace(['-', '.'], "_")
); );
map.insert(key_var, json_to_env_string(&decrypted)); map.insert(key_var, json_to_env_string(&decrypted));
} }

View File

@@ -55,7 +55,7 @@ pub async fn export(
let mut map = BTreeMap::new(); let mut map = BTreeMap::new();
for f in fields { for f in fields {
let decrypted = crypto::decrypt_json(mk, &f.encrypted)?; let decrypted = crypto::decrypt_json(mk, &f.encrypted)?;
map.insert(f.field_name.clone(), decrypted); map.insert(f.name.clone(), decrypted);
} }
Some(map) Some(map)
} }

View File

@@ -25,7 +25,7 @@ pub async fn get_secret_field(
let field = fields let field = fields
.iter() .iter()
.find(|f| f.field_name == field_name) .find(|f| f.name == field_name)
.ok_or_else(|| anyhow::anyhow!("Secret field '{}' not found", field_name))?; .ok_or_else(|| anyhow::anyhow!("Secret field '{}' not found", field_name))?;
crypto::decrypt_json(master_key, &field.encrypted) crypto::decrypt_json(master_key, &field.encrypted)
@@ -49,7 +49,7 @@ pub async fn get_all_secrets(
let mut map = HashMap::new(); let mut map = HashMap::new();
for f in fields { for f in fields {
let decrypted = crypto::decrypt_json(master_key, &f.encrypted)?; let decrypted = crypto::decrypt_json(master_key, &f.encrypted)?;
map.insert(f.field_name.clone(), decrypted); map.insert(f.name.clone(), decrypted);
} }
Ok(map) Ok(map)
} }
@@ -72,7 +72,7 @@ pub async fn get_secret_field_by_id(
let field = fields let field = fields
.iter() .iter()
.find(|f| f.field_name == field_name) .find(|f| f.name == field_name)
.ok_or_else(|| anyhow::anyhow!("Secret field '{}' not found", field_name))?; .ok_or_else(|| anyhow::anyhow!("Secret field '{}' not found", field_name))?;
crypto::decrypt_json(master_key, &field.encrypted) crypto::decrypt_json(master_key, &field.encrypted)
@@ -98,7 +98,7 @@ pub async fn get_all_secrets_by_id(
let mut map = HashMap::new(); let mut map = HashMap::new();
for f in fields { for f in fields {
let decrypted = crypto::decrypt_json(master_key, &f.encrypted)?; let decrypted = crypto::decrypt_json(master_key, &f.encrypted)?;
map.insert(f.field_name.clone(), decrypted); map.insert(f.name.clone(), decrypted);
} }
Ok(map) Ok(map)
} }

View File

@@ -85,6 +85,7 @@ pub async fn run(
tags: &entry.tags, tags: &entry.tags,
meta_entries: &meta_entries, meta_entries: &meta_entries,
secret_entries: &secret_entries, secret_entries: &secret_entries,
link_secret_names: &[],
user_id: params.user_id, user_id: params.user_id,
}, },
master_key, master_key,

View File

@@ -3,7 +3,6 @@ use serde_json::Value;
use sqlx::PgPool; use sqlx::PgPool;
use uuid::Uuid; use uuid::Uuid;
use crate::crypto;
use crate::db; use crate::db;
#[derive(Debug, serde::Serialize)] #[derive(Debug, serde::Serialize)]
@@ -27,7 +26,6 @@ pub async fn run(
) -> Result<RollbackResult> { ) -> Result<RollbackResult> {
#[derive(sqlx::FromRow)] #[derive(sqlx::FromRow)]
struct EntryHistoryRow { struct EntryHistoryRow {
entry_id: Uuid,
folder: String, folder: String,
#[sqlx(rename = "type")] #[sqlx(rename = "type")]
entry_type: String, entry_type: String,
@@ -122,7 +120,7 @@ pub async fn run(
let snap: Option<EntryHistoryRow> = if let Some(ver) = to_version { let snap: Option<EntryHistoryRow> = if let Some(ver) = to_version {
sqlx::query_as( sqlx::query_as(
"SELECT entry_id, folder, type, version, action, tags, metadata \ "SELECT folder, type, version, action, tags, metadata \
FROM entries_history \ FROM entries_history \
WHERE entry_id = $1 AND version = $2 ORDER BY id DESC LIMIT 1", WHERE entry_id = $1 AND version = $2 ORDER BY id DESC LIMIT 1",
) )
@@ -132,7 +130,7 @@ pub async fn run(
.await? .await?
} else { } else {
sqlx::query_as( sqlx::query_as(
"SELECT entry_id, folder, type, version, action, tags, metadata \ "SELECT folder, type, version, action, tags, metadata \
FROM entries_history \ FROM entries_history \
WHERE entry_id = $1 ORDER BY id DESC LIMIT 1", WHERE entry_id = $1 ORDER BY id DESC LIMIT 1",
) )
@@ -151,33 +149,7 @@ pub async fn run(
) )
})?; })?;
#[derive(sqlx::FromRow)] let _ = master_key;
struct SecretHistoryRow {
field_name: String,
encrypted: Vec<u8>,
action: String,
}
let field_snaps: Vec<SecretHistoryRow> = sqlx::query_as(
"SELECT field_name, encrypted, action FROM secrets_history \
WHERE entry_id = $1 AND entry_version = $2 ORDER BY field_name",
)
.bind(snap.entry_id)
.bind(snap.version)
.fetch_all(pool)
.await?;
for f in &field_snaps {
if f.action != "delete" && !f.encrypted.is_empty() {
crypto::decrypt_json(master_key, &f.encrypted).map_err(|e| {
anyhow::anyhow!(
"Cannot decrypt snapshot for field '{}': {}",
f.field_name,
e
)
})?;
}
}
let mut tx = pool.begin().await?; let mut tx = pool.begin().await?;
@@ -226,11 +198,15 @@ pub async fn run(
#[derive(sqlx::FromRow)] #[derive(sqlx::FromRow)]
struct LiveField { struct LiveField {
id: Uuid, id: Uuid,
field_name: String, name: String,
encrypted: Vec<u8>, encrypted: Vec<u8>,
} }
let live_fields: Vec<LiveField> = let live_fields: Vec<LiveField> = sqlx::query_as(
sqlx::query_as("SELECT id, field_name, encrypted FROM secrets WHERE entry_id = $1") "SELECT s.id, s.name, s.encrypted \
FROM entry_secrets es \
JOIN secrets s ON s.id = es.secret_id \
WHERE es.entry_id = $1",
)
.bind(lr.id) .bind(lr.id)
.fetch_all(&mut *tx) .fetch_all(&mut *tx)
.await?; .await?;
@@ -239,10 +215,8 @@ pub async fn run(
if let Err(e) = db::snapshot_secret_history( if let Err(e) = db::snapshot_secret_history(
&mut tx, &mut tx,
db::SecretSnapshotParams { db::SecretSnapshotParams {
entry_id: lr.id,
secret_id: f.id, secret_id: f.id,
entry_version: lr.version, name: &f.name,
field_name: &f.field_name,
encrypted: &f.encrypted, encrypted: &f.encrypted,
action: "rollback", action: "rollback",
}, },
@@ -297,22 +271,9 @@ pub async fn run(
} }
}; };
sqlx::query("DELETE FROM secrets WHERE entry_id = $1") // In N:N mode, rollback restores entry metadata/tags only.
.bind(live_entry_id) // Secret snapshots are kept for audit but secret linkage/content is not rewritten here.
.execute(&mut *tx) let _ = live_entry_id;
.await?;
for f in &field_snaps {
if f.action == "delete" {
continue;
}
sqlx::query("INSERT INTO secrets (entry_id, field_name, encrypted) VALUES ($1, $2, $3)")
.bind(live_entry_id)
.bind(&f.field_name)
.bind(&f.encrypted)
.execute(&mut *tx)
.await?;
}
crate::audit::log_tx( crate::audit::log_tx(
&mut tx, &mut tx,

View File

@@ -210,8 +210,12 @@ pub async fn fetch_secret_schemas(
if entry_ids.is_empty() { if entry_ids.is_empty() {
return Ok(HashMap::new()); return Ok(HashMap::new());
} }
let fields: Vec<SecretField> = sqlx::query_as( let fields: Vec<EntrySecretRow> = sqlx::query_as(
"SELECT * FROM secrets WHERE entry_id = ANY($1) ORDER BY entry_id, field_name", "SELECT es.entry_id, s.id, s.user_id, s.name, s.type, s.encrypted, s.version, s.created_at, s.updated_at \
FROM entry_secrets es \
JOIN secrets s ON s.id = es.secret_id \
WHERE es.entry_id = ANY($1) \
ORDER BY es.entry_id, es.sort_order, s.name",
) )
.bind(entry_ids) .bind(entry_ids)
.fetch_all(pool) .fetch_all(pool)
@@ -219,7 +223,8 @@ pub async fn fetch_secret_schemas(
let mut map: HashMap<Uuid, Vec<SecretField>> = HashMap::new(); let mut map: HashMap<Uuid, Vec<SecretField>> = HashMap::new();
for f in fields { for f in fields {
map.entry(f.entry_id).or_default().push(f); let entry_id = f.entry_id;
map.entry(entry_id).or_default().push(f.secret());
} }
Ok(map) Ok(map)
} }
@@ -232,8 +237,12 @@ pub async fn fetch_secrets_for_entries(
if entry_ids.is_empty() { if entry_ids.is_empty() {
return Ok(HashMap::new()); return Ok(HashMap::new());
} }
let fields: Vec<SecretField> = sqlx::query_as( let fields: Vec<EntrySecretRow> = sqlx::query_as(
"SELECT * FROM secrets WHERE entry_id = ANY($1) ORDER BY entry_id, field_name", "SELECT es.entry_id, s.id, s.user_id, s.name, s.type, s.encrypted, s.version, s.created_at, s.updated_at \
FROM entry_secrets es \
JOIN secrets s ON s.id = es.secret_id \
WHERE es.entry_id = ANY($1) \
ORDER BY es.entry_id, es.sort_order, s.name",
) )
.bind(entry_ids) .bind(entry_ids)
.fetch_all(pool) .fetch_all(pool)
@@ -241,7 +250,8 @@ pub async fn fetch_secrets_for_entries(
let mut map: HashMap<Uuid, Vec<SecretField>> = HashMap::new(); let mut map: HashMap<Uuid, Vec<SecretField>> = HashMap::new();
for f in fields { for f in fields {
map.entry(f.entry_id).or_default().push(f); let entry_id = f.entry_id;
map.entry(entry_id).or_default().push(f.secret());
} }
Ok(map) Ok(map)
} }
@@ -345,3 +355,32 @@ impl From<EntryRaw> for Entry {
} }
} }
} }
#[derive(sqlx::FromRow)]
struct EntrySecretRow {
entry_id: Uuid,
id: Uuid,
user_id: Option<Uuid>,
name: String,
#[sqlx(rename = "type")]
secret_type: String,
encrypted: Vec<u8>,
version: i64,
created_at: chrono::DateTime<chrono::Utc>,
updated_at: chrono::DateTime<chrono::Utc>,
}
impl EntrySecretRow {
fn secret(self) -> SecretField {
SecretField {
id: self.id,
user_id: self.user_id,
name: self.name,
secret_type: self.secret_type,
encrypted: self.encrypted,
version: self.version,
created_at: self.created_at,
updated_at: self.updated_at,
}
}
}

View File

@@ -7,8 +7,8 @@ use crate::crypto;
use crate::db; use crate::db;
use crate::models::{EntryRow, EntryWriteRow}; use crate::models::{EntryRow, EntryWriteRow};
use crate::service::add::{ use crate::service::add::{
collect_field_paths, collect_key_paths, flatten_json_fields, insert_path, parse_key_path, collect_field_paths, collect_key_paths, flatten_json_fields, infer_secret_type, insert_path,
parse_kv, remove_path, parse_key_path, parse_kv, remove_path,
}; };
#[derive(Debug, serde::Serialize)] #[derive(Debug, serde::Serialize)]
@@ -173,8 +173,6 @@ pub async fn run(
); );
} }
let new_version = row.version + 1;
for entry in params.secret_entries { for entry in params.secret_entries {
let (path, field_value) = parse_kv(entry)?; let (path, field_value) = parse_kv(entry)?;
let flat = flatten_json_fields("", &{ let flat = flatten_json_fields("", &{
@@ -192,7 +190,10 @@ pub async fn run(
encrypted: Vec<u8>, encrypted: Vec<u8>,
} }
let ef: Option<ExistingField> = sqlx::query_as( let ef: Option<ExistingField> = sqlx::query_as(
"SELECT id, encrypted FROM secrets WHERE entry_id = $1 AND field_name = $2", "SELECT s.id, s.encrypted \
FROM entry_secrets es \
JOIN secrets s ON s.id = es.secret_id \
WHERE es.entry_id = $1 AND s.name = $2",
) )
.bind(row.id) .bind(row.id)
.bind(field_name) .bind(field_name)
@@ -203,10 +204,8 @@ pub async fn run(
&& let Err(e) = db::snapshot_secret_history( && let Err(e) = db::snapshot_secret_history(
&mut tx, &mut tx,
db::SecretSnapshotParams { db::SecretSnapshotParams {
entry_id: row.id,
secret_id: ef.id, secret_id: ef.id,
entry_version: row.version, name: field_name,
field_name,
encrypted: &ef.encrypted, encrypted: &ef.encrypted,
action: "update", action: "update",
}, },
@@ -216,16 +215,30 @@ pub async fn run(
tracing::warn!(error = %e, "failed to snapshot secret field history"); tracing::warn!(error = %e, "failed to snapshot secret field history");
} }
if let Some(ef) = ef {
sqlx::query( sqlx::query(
"INSERT INTO secrets (entry_id, field_name, encrypted) VALUES ($1, $2, $3) \ "UPDATE secrets SET encrypted = $1, version = version + 1, updated_at = NOW() WHERE id = $2",
ON CONFLICT (entry_id, field_name) DO UPDATE SET \
encrypted = EXCLUDED.encrypted, version = secrets.version + 1, updated_at = NOW()",
) )
.bind(row.id)
.bind(field_name)
.bind(&encrypted) .bind(&encrypted)
.bind(ef.id)
.execute(&mut *tx) .execute(&mut *tx)
.await?; .await?;
} else {
let secret_id: Uuid = sqlx::query_scalar(
"INSERT INTO secrets (user_id, name, type, encrypted) VALUES ($1, $2, $3, $4) RETURNING id",
)
.bind(params.user_id)
.bind(field_name)
.bind(infer_secret_type(field_name))
.bind(&encrypted)
.fetch_one(&mut *tx)
.await?;
sqlx::query("INSERT INTO entry_secrets (entry_id, secret_id) VALUES ($1, $2)")
.bind(row.id)
.bind(secret_id)
.execute(&mut *tx)
.await?;
}
} }
} }
@@ -239,7 +252,10 @@ pub async fn run(
encrypted: Vec<u8>, encrypted: Vec<u8>,
} }
let field: Option<FieldToDelete> = sqlx::query_as( let field: Option<FieldToDelete> = sqlx::query_as(
"SELECT id, encrypted FROM secrets WHERE entry_id = $1 AND field_name = $2", "SELECT s.id, s.encrypted \
FROM entry_secrets es \
JOIN secrets s ON s.id = es.secret_id \
WHERE es.entry_id = $1 AND s.name = $2",
) )
.bind(row.id) .bind(row.id)
.bind(&field_name) .bind(&field_name)
@@ -250,10 +266,8 @@ pub async fn run(
if let Err(e) = db::snapshot_secret_history( if let Err(e) = db::snapshot_secret_history(
&mut tx, &mut tx,
db::SecretSnapshotParams { db::SecretSnapshotParams {
entry_id: row.id,
secret_id: f.id, secret_id: f.id,
entry_version: new_version, name: &field_name,
field_name: &field_name,
encrypted: &f.encrypted, encrypted: &f.encrypted,
action: "delete", action: "delete",
}, },
@@ -262,7 +276,16 @@ pub async fn run(
{ {
tracing::warn!(error = %e, "failed to snapshot secret field history before delete"); tracing::warn!(error = %e, "failed to snapshot secret field history before delete");
} }
sqlx::query("DELETE FROM secrets WHERE id = $1") sqlx::query("DELETE FROM entry_secrets WHERE entry_id = $1 AND secret_id = $2")
.bind(row.id)
.bind(f.id)
.execute(&mut *tx)
.await?;
sqlx::query(
"DELETE FROM secrets s \
WHERE s.id = $1 \
AND NOT EXISTS (SELECT 1 FROM entry_secrets es WHERE es.secret_id = s.id)",
)
.bind(f.id) .bind(f.id)
.execute(&mut *tx) .execute(&mut *tx)
.await?; .await?;

View File

@@ -1,6 +1,6 @@
[package] [package]
name = "secrets-mcp" name = "secrets-mcp"
version = "0.3.7" version = "0.3.9"
edition.workspace = true edition.workspace = true
[[bin]] [[bin]]

View File

@@ -40,6 +40,14 @@ fn load_env_var(name: &str) -> Option<String> {
std::env::var(name).ok().filter(|s| !s.is_empty()) std::env::var(name).ok().filter(|s| !s.is_empty())
} }
/// Pretty-print bind address in logs (`127.0.0.1` → `localhost`); actual socket bind unchanged.
fn listen_addr_log_display(bind_addr: &str) -> String {
bind_addr
.strip_prefix("127.0.0.1:")
.map(|port| format!("localhost:{port}"))
.unwrap_or_else(|| bind_addr.to_string())
}
fn load_oauth_config(prefix: &str, base_url: &str, path: &str) -> Option<OAuthConfig> { fn load_oauth_config(prefix: &str, base_url: &str, path: &str) -> Option<OAuthConfig> {
let client_id = load_env_var(&format!("{}_CLIENT_ID", prefix))?; let client_id = load_env_var(&format!("{}_CLIENT_ID", prefix))?;
let client_secret = load_env_var(&format!("{}_CLIENT_SECRET", prefix))?; let client_secret = load_env_var(&format!("{}_CLIENT_SECRET", prefix))?;
@@ -168,7 +176,10 @@ async fn main() -> Result<()> {
.await .await
.with_context(|| format!("failed to bind to {}", bind_addr))?; .with_context(|| format!("failed to bind to {}", bind_addr))?;
tracing::info!("Secrets MCP Server listening on http://{}", bind_addr); tracing::info!(
"Secrets MCP Server listening on http://{}",
listen_addr_log_display(&bind_addr)
);
tracing::info!("MCP endpoint: {}/mcp", base_url); tracing::info!("MCP endpoint: {}/mcp", base_url);
axum::serve( axum::serve(

View File

@@ -225,12 +225,18 @@ struct AddInput {
description = "Metadata fields as a JSON object {\"key\": value}. Merged with 'meta' if both provided." description = "Metadata fields as a JSON object {\"key\": value}. Merged with 'meta' if both provided."
)] )]
meta_obj: Option<Map<String, Value>>, meta_obj: Option<Map<String, Value>>,
#[schemars(description = "Secret fields as 'key=value' strings")] #[schemars(
description = "Secret fields as 'key=value' strings. Reminder: non-sensitive endpoint/address fields should go to metadata.address instead of secrets."
)]
secrets: Option<Vec<String>>, secrets: Option<Vec<String>>,
#[schemars( #[schemars(
description = "Secret fields as a JSON object {\"key\": \"value\"}. Merged with 'secrets' if both provided." description = "Secret fields as a JSON object {\"key\": \"value\"}. Merged with 'secrets' if both provided. Reminder: non-sensitive endpoint/address fields should go to metadata.address."
)] )]
secrets_obj: Option<Map<String, Value>>, secrets_obj: Option<Map<String, Value>>,
#[schemars(
description = "Link existing secrets by secret name. Names must resolve uniquely under current user."
)]
link_secret_names: Option<Vec<String>>,
} }
#[derive(Debug, Deserialize, JsonSchema)] #[derive(Debug, Deserialize, JsonSchema)]
@@ -259,10 +265,12 @@ struct UpdateInput {
meta_obj: Option<Map<String, Value>>, meta_obj: Option<Map<String, Value>>,
#[schemars(description = "Metadata field keys to remove")] #[schemars(description = "Metadata field keys to remove")]
remove_meta: Option<Vec<String>>, remove_meta: Option<Vec<String>>,
#[schemars(description = "Secret fields to update/add as 'key=value' strings")] #[schemars(
description = "Secret fields to update/add as 'key=value' strings. Reminder: non-sensitive endpoint/address fields should go to metadata.address instead of secrets."
)]
secrets: Option<Vec<String>>, secrets: Option<Vec<String>>,
#[schemars( #[schemars(
description = "Secret fields to update/add as a JSON object {\"key\": \"value\"}. Merged with 'secrets' if both provided." description = "Secret fields to update/add as a JSON object {\"key\": \"value\"}. Merged with 'secrets' if both provided. Reminder: non-sensitive endpoint/address fields should go to metadata.address."
)] )]
secrets_obj: Option<Map<String, Value>>, secrets_obj: Option<Map<String, Value>>,
#[schemars(description = "Secret field keys to remove")] #[schemars(description = "Secret field keys to remove")]
@@ -429,10 +437,20 @@ impl SecretsService {
.entries .entries
.iter() .iter()
.map(|e| { .map(|e| {
let schema: Vec<&str> = result let schema: Vec<serde_json::Value> = result
.secret_schemas .secret_schemas
.get(&e.id) .get(&e.id)
.map(|f| f.iter().map(|s| s.field_name.as_str()).collect()) .map(|f| {
f.iter()
.map(|s| {
serde_json::json!({
"id": s.id,
"name": s.name,
"type": s.secret_type,
})
})
.collect()
})
.unwrap_or_default(); .unwrap_or_default();
serde_json::json!({ serde_json::json!({
"id": e.id, "id": e.id,
@@ -517,10 +535,20 @@ impl SecretsService {
"updated_at": e.updated_at.format("%Y-%m-%dT%H:%M:%SZ").to_string(), "updated_at": e.updated_at.format("%Y-%m-%dT%H:%M:%SZ").to_string(),
}) })
} else { } else {
let schema: Vec<&str> = result let schema: Vec<serde_json::Value> = result
.secret_schemas .secret_schemas
.get(&e.id) .get(&e.id)
.map(|f| f.iter().map(|s| s.field_name.as_str()).collect()) .map(|f| {
f.iter()
.map(|s| {
serde_json::json!({
"id": s.id,
"name": s.name,
"type": s.secret_type,
})
})
.collect()
})
.unwrap_or_default(); .unwrap_or_default();
serde_json::json!({ serde_json::json!({
"id": e.id, "id": e.id,
@@ -639,6 +667,7 @@ impl SecretsService {
if let Some(obj) = input.secrets_obj { if let Some(obj) = input.secrets_obj {
secrets.extend(map_to_kv_strings(obj)); secrets.extend(map_to_kv_strings(obj));
} }
let link_secret_names = input.link_secret_names.unwrap_or_default();
let folder = input.folder.as_deref().unwrap_or(""); let folder = input.folder.as_deref().unwrap_or("");
let entry_type = input.entry_type.as_deref().unwrap_or(""); let entry_type = input.entry_type.as_deref().unwrap_or("");
let notes = input.notes.as_deref().unwrap_or(""); let notes = input.notes.as_deref().unwrap_or("");
@@ -653,6 +682,7 @@ impl SecretsService {
tags: &tags, tags: &tags,
meta_entries: &meta, meta_entries: &meta,
secret_entries: &secrets, secret_entries: &secrets,
link_secret_names: &link_secret_names,
user_id: Some(user_id), user_id: Some(user_id),
}, },
&user_key, &user_key,

View File

@@ -21,7 +21,7 @@ use secrets_core::service::{
api_key::{ensure_api_key, regenerate_api_key}, api_key::{ensure_api_key, regenerate_api_key},
audit_log::list_for_user, audit_log::list_for_user,
delete::delete_by_id, delete::delete_by_id,
search::{SearchParams, count_entries, list_entries}, search::{SearchParams, count_entries, fetch_secret_schemas, list_entries},
update::{UpdateEntryFieldsByIdParams, update_fields_by_id}, update::{UpdateEntryFieldsByIdParams, update_fields_by_id},
user::{ user::{
OAuthProfile, bind_oauth_account, find_or_create_user, get_user_by_id, OAuthProfile, bind_oauth_account, find_or_create_user, get_user_by_id,
@@ -105,10 +105,17 @@ struct EntryListItemView {
notes: String, notes: String,
tags: String, tags: String,
metadata: String, metadata: String,
secrets: Vec<SecretSummaryView>,
/// RFC3339 UTC for `<time datetime>`; localized in entries.html. /// RFC3339 UTC for `<time datetime>`; localized in entries.html.
updated_at_iso: String, updated_at_iso: String,
} }
struct SecretSummaryView {
id: String,
name: String,
secret_type: String,
}
/// Cap for HTML list (avoids loading unbounded rows into memory). /// Cap for HTML list (avoids loading unbounded rows into memory).
const ENTRIES_PAGE_LIMIT: u32 = 5_000; const ENTRIES_PAGE_LIMIT: u32 = 5_000;
@@ -207,6 +214,10 @@ pub fn web_router() -> Router<AppState> {
"/api/entries/{id}", "/api/entries/{id}",
patch(api_entry_patch).delete(api_entry_delete), patch(api_entry_patch).delete(api_entry_delete),
) )
.route(
"/api/entries/{entry_id}/secrets/{secret_id}",
axum::routing::delete(api_entry_secret_unlink),
)
} }
fn text_asset_response(content: &'static str, content_type: &'static str) -> Response { fn text_asset_response(content: &'static str, content_type: &'static str) -> Response {
@@ -577,6 +588,13 @@ async fn entries_page(
StatusCode::INTERNAL_SERVER_ERROR StatusCode::INTERNAL_SERVER_ERROR
})?; })?;
let shown_count = rows.len(); let shown_count = rows.len();
let entry_ids: Vec<Uuid> = rows.iter().map(|e| e.id).collect();
let secret_schemas = fetch_secret_schemas(&state.pool, &entry_ids)
.await
.map_err(|e| {
tracing::error!(error = %e, "failed to load secret schema list for web");
StatusCode::INTERNAL_SERVER_ERROR
})?;
let entries = rows let entries = rows
.into_iter() .into_iter()
@@ -589,6 +607,19 @@ async fn entries_page(
tags: e.tags.join(", "), tags: e.tags.join(", "),
metadata: serde_json::to_string_pretty(&e.metadata) metadata: serde_json::to_string_pretty(&e.metadata)
.unwrap_or_else(|_| "{}".to_string()), .unwrap_or_else(|_| "{}".to_string()),
secrets: secret_schemas
.get(&e.id)
.map(|fields| {
fields
.iter()
.map(|f| SecretSummaryView {
id: f.id.to_string(),
name: f.name.clone(),
secret_type: f.secret_type.clone(),
})
.collect()
})
.unwrap_or_default(),
updated_at_iso: e.updated_at.to_rfc3339_opts(SecondsFormat::Secs, true), updated_at_iso: e.updated_at.to_rfc3339_opts(SecondsFormat::Secs, true),
}) })
.collect(); .collect();
@@ -1000,6 +1031,104 @@ async fn api_entry_delete(
}))) })))
} }
async fn api_entry_secret_unlink(
State(state): State<AppState>,
session: Session,
Path((entry_id, secret_id)): Path<(Uuid, Uuid)>,
) -> Result<Json<serde_json::Value>, EntryApiError> {
#[derive(sqlx::FromRow)]
struct EntryAuditRow {
folder: String,
#[sqlx(rename = "type")]
entry_type: String,
name: String,
}
let user_id = current_user_id(&session)
.await
.ok_or((StatusCode::UNAUTHORIZED, Json(json!({ "error": "未登录" }))))?;
let mut tx = state
.pool
.begin()
.await
.map_err(|e| map_entry_mutation_err(e.into()))?;
let entry_row: Option<EntryAuditRow> =
sqlx::query_as("SELECT folder, type, name FROM entries WHERE id = $1 AND user_id = $2")
.bind(entry_id)
.bind(user_id)
.fetch_optional(&mut *tx)
.await
.map_err(|e| map_entry_mutation_err(e.into()))?;
let Some(entry_row) = entry_row else {
tx.rollback()
.await
.map_err(|e| map_entry_mutation_err(e.into()))?;
return Err((
StatusCode::NOT_FOUND,
Json(json!({ "error": "条目不存在或无权访问" })),
));
};
let deleted = sqlx::query("DELETE FROM entry_secrets WHERE entry_id = $1 AND secret_id = $2")
.bind(entry_id)
.bind(secret_id)
.execute(&mut *tx)
.await
.map_err(|e| map_entry_mutation_err(e.into()))?
.rows_affected();
if deleted == 0 {
tx.rollback()
.await
.map_err(|e| map_entry_mutation_err(e.into()))?;
return Err((
StatusCode::NOT_FOUND,
Json(json!({ "error": "关联不存在" })),
));
}
let secret_deleted = sqlx::query(
"DELETE FROM secrets s \
WHERE s.id = $1 \
AND NOT EXISTS (SELECT 1 FROM entry_secrets es WHERE es.secret_id = s.id)",
)
.bind(secret_id)
.execute(&mut *tx)
.await
.map_err(|e| map_entry_mutation_err(e.into()))?
.rows_affected()
> 0;
secrets_core::audit::log_tx(
&mut tx,
Some(user_id),
"unlink_secret",
&entry_row.folder,
&entry_row.entry_type,
&entry_row.name,
json!({
"source": "web",
"entry_id": entry_id,
"secret_id": secret_id,
"deleted_secret": secret_deleted,
}),
)
.await;
tx.commit()
.await
.map_err(|e| map_entry_mutation_err(e.into()))?;
Ok(Json(json!({
"ok": true,
"deleted_relation": true,
"deleted_secret": secret_deleted,
})))
}
// ── OAuth / Well-known ──────────────────────────────────────────────────────── // ── OAuth / Well-known ────────────────────────────────────────────────────────
/// RFC 9728 — OAuth 2.0 Protected Resource Metadata. /// RFC 9728 — OAuth 2.0 Protected Resource Metadata.

View File

@@ -45,7 +45,7 @@
.btn-sign-out:hover { background: var(--surface2); } .btn-sign-out:hover { background: var(--surface2); }
.main { padding: 32px 24px 40px; flex: 1; } .main { padding: 32px 24px 40px; flex: 1; }
.card { background: var(--surface); border: 1px solid var(--border); border-radius: 12px; .card { background: var(--surface); border: 1px solid var(--border); border-radius: 12px;
padding: 24px; width: 100%; max-width: 1280px; margin: 0 auto; } padding: 24px; width: 100%; max-width: 1480px; margin: 0 auto; }
.card-title { font-size: 20px; font-weight: 600; margin-bottom: 8px; } .card-title { font-size: 20px; font-weight: 600; margin-bottom: 8px; }
.card-subtitle { color: var(--text-muted); font-size: 13px; margin-bottom: 20px; } .card-subtitle { color: var(--text-muted); font-size: 13px; margin-bottom: 20px; }
.filter-bar { .filter-bar {
@@ -73,17 +73,46 @@
} }
.btn-clear:hover { background: var(--surface2); color: var(--text); } .btn-clear:hover { background: var(--surface2); color: var(--text); }
.empty { color: var(--text-muted); font-size: 14px; padding: 20px 0; } .empty { color: var(--text-muted); font-size: 14px; padding: 20px 0; }
.table-wrap { overflow-x: auto; } .table-wrap {
table { width: 100%; border-collapse: collapse; min-width: 720px; } overflow: auto;
th, td { text-align: left; vertical-align: top; padding: 12px 10px; border-top: 1px solid var(--border); } border: 1px solid var(--border);
th { color: var(--text-muted); font-size: 12px; font-weight: 600; white-space: nowrap; } border-radius: 10px;
td { font-size: 13px; } background: var(--bg);
.mono { font-family: 'JetBrains Mono', monospace; } max-height: 72vh;
.cell-notes, .cell-meta {
max-width: 280px; word-break: break-word;
} }
table {
width: max-content;
min-width: 1240px;
border-collapse: separate;
border-spacing: 0;
}
th, td { text-align: left; vertical-align: top; padding: 12px 10px; border-top: 1px solid var(--border); }
th {
color: var(--text-muted);
font-size: 12px;
font-weight: 600;
white-space: nowrap;
position: sticky;
top: 0;
z-index: 2;
background: var(--surface);
}
td { font-size: 13px; line-height: 1.45; }
tbody tr:nth-child(2n) td { background: rgba(255, 255, 255, 0.01); }
.mono { font-family: 'JetBrains Mono', monospace; }
.col-updated { min-width: 168px; }
.col-folder { min-width: 128px; }
.col-type { min-width: 108px; }
.col-name { min-width: 180px; max-width: 260px; }
.col-tags { min-width: 160px; max-width: 220px; }
.col-actions { min-width: 132px; }
.cell-name, .cell-tags-val {
overflow-wrap: anywhere;
word-break: break-word;
}
.cell-notes, .cell-meta { min-width: 260px; max-width: 360px; }
.notes-scroll { .notes-scroll {
max-height: 160px; max-height: 120px;
overflow: auto; overflow: auto;
white-space: pre-wrap; white-space: pre-wrap;
word-break: break-word; word-break: break-word;
@@ -96,10 +125,45 @@
.detail { .detail {
background: var(--bg); border: 1px solid var(--border); border-radius: 8px; background: var(--bg); border: 1px solid var(--border); border-radius: 8px;
padding: 10px; white-space: pre-wrap; word-break: break-word; font-size: 12px; padding: 10px; white-space: pre-wrap; word-break: break-word; font-size: 12px;
max-width: 320px; max-height: 160px; overflow: auto; max-width: 360px; max-height: 120px; overflow: auto;
} }
.col-actions { white-space: nowrap; } .col-actions { white-space: nowrap; }
.row-actions { display: flex; flex-wrap: wrap; gap: 6px; } .row-actions { display: flex; flex-wrap: wrap; gap: 6px; }
.col-secrets { min-width: 300px; max-width: 420px; }
.secret-list { display: flex; flex-wrap: wrap; gap: 6px; max-width: 400px; }
.secret-chip {
display: inline-flex;
align-items: center;
gap: 6px;
border: 1px solid var(--border);
border-radius: 999px;
padding: 3px 8px;
font-size: 11px;
background: var(--surface2);
font-family: 'JetBrains Mono', monospace;
max-width: 100%;
min-width: 0;
}
.secret-name {
min-width: 0;
overflow: hidden;
text-overflow: ellipsis;
white-space: nowrap;
}
.secret-type {
color: var(--text-muted);
border-left: 1px solid var(--border);
padding-left: 6px;
}
.btn-unlink-secret {
border: none;
background: transparent;
color: #f85149;
cursor: pointer;
font-size: 12px;
line-height: 1;
padding: 0;
}
.btn-row { .btn-row {
padding: 4px 10px; border-radius: 6px; font-size: 12px; cursor: pointer; padding: 4px 10px; border-radius: 6px; font-size: 12px; cursor: pointer;
border: 1px solid var(--border); background: var(--surface2); color: var(--text-muted); border: 1px solid var(--border); background: var(--surface2); color: var(--text-muted);
@@ -145,7 +209,8 @@
.main { padding: 20px 12px 28px; } .main { padding: 20px 12px 28px; }
.card { padding: 16px; } .card { padding: 16px; }
.topbar { padding: 12px 16px; flex-wrap: wrap; } .topbar { padding: 12px 16px; flex-wrap: wrap; }
table, thead, tbody, th, td, tr { display: block; } .table-wrap { max-height: none; border: none; background: transparent; }
table, thead, tbody, th, td, tr { display: block; min-width: 0; width: 100%; }
thead { display: none; } thead { display: none; }
tr { border-top: 1px solid var(--border); padding: 12px 0; } tr { border-top: 1px solid var(--border); padding: 12px 0; }
td { border-top: none; padding: 6px 0; max-width: none; } td { border-top: none; padding: 6px 0; max-width: none; }
@@ -160,9 +225,9 @@
td.col-notes::before { content: "Notes"; } td.col-notes::before { content: "Notes"; }
td.col-tags::before { content: "Tags"; } td.col-tags::before { content: "Tags"; }
td.col-meta::before { content: "Metadata"; } td.col-meta::before { content: "Metadata"; }
td.col-secrets::before { content: "Secrets"; }
td.col-actions::before { content: "操作"; } td.col-actions::before { content: "操作"; }
.detail { max-width: none; } .detail, .notes-scroll, .secret-list { max-width: none; }
.notes-scroll { max-width: none; }
} }
</style> </style>
</head> </head>
@@ -189,7 +254,7 @@
<main class="main"> <main class="main">
<section class="card"> <section class="card">
<div class="card-title">我的条目</div> <div class="card-title">我的条目</div>
<div class="card-subtitle">在当前筛选条件下,共 <strong>{{ total_count }}</strong> 条记录;本页显示 <strong>{{ shown_count }}</strong> 条(按更新时间降序,单页最多 {{ limit }} 条)。不含密文字段。时间为浏览器本地时区。</div> <div class="card-subtitle">在当前筛选条件下,共 <strong>{{ total_count }}</strong> 条记录;本页显示 <strong>{{ shown_count }}</strong> 条(按更新时间降序,单页最多 {{ limit }} 条)。不含密文字段。时间为浏览器本地时区。提示:非敏感地址类字段(如 address / endpoint / url建议放在 Metadata例如 <code>metadata.address</code>),仅密码/令牌等放 Secrets。</div>
<form class="filter-bar" method="get" action="/entries"> <form class="filter-bar" method="get" action="/entries">
<div class="filter-field"> <div class="filter-field">
@@ -220,6 +285,7 @@
<th>Notes</th> <th>Notes</th>
<th>Tags</th> <th>Tags</th>
<th>Metadata</th> <th>Metadata</th>
<th>Secrets</th>
<th>操作</th> <th>操作</th>
</tr> </tr>
</thead> </thead>
@@ -233,6 +299,17 @@
<td class="col-notes cell-notes">{% if !entry.notes.is_empty() %}<div class="notes-scroll cell-notes-val">{{ entry.notes }}</div>{% endif %}</td> <td class="col-notes cell-notes">{% if !entry.notes.is_empty() %}<div class="notes-scroll cell-notes-val">{{ entry.notes }}</div>{% endif %}</td>
<td class="col-tags mono cell-tags-val">{{ entry.tags }}</td> <td class="col-tags mono cell-tags-val">{{ entry.tags }}</td>
<td class="col-meta cell-meta"><pre class="detail cell-meta-val">{{ entry.metadata }}</pre></td> <td class="col-meta cell-meta"><pre class="detail cell-meta-val">{{ entry.metadata }}</pre></td>
<td class="col-secrets">
<div class="secret-list">
{% for s in entry.secrets %}
<span class="secret-chip">
<span class="secret-name" title="{{ s.name }}">{{ s.name }}</span>
<span class="secret-type">{{ s.secret_type }}</span>
<button type="button" class="btn-unlink-secret" data-secret-id="{{ s.id }}" data-secret-name="{{ s.name }}" title="解除关联">x</button>
</span>
{% endfor %}
</div>
</td>
<td class="col-actions"> <td class="col-actions">
<div class="row-actions"> <div class="row-actions">
<button type="button" class="btn-row btn-edit">编辑</button> <button type="button" class="btn-row btn-edit">编辑</button>
@@ -383,6 +460,29 @@
}) })
.catch(function (e) { alert(e.message || String(e)); }); .catch(function (e) { alert(e.message || String(e)); });
}); });
tr.querySelectorAll('.btn-unlink-secret').forEach(function (btn) {
btn.addEventListener('click', function () {
var entryId = tr.getAttribute('data-entry-id');
var secretId = btn.getAttribute('data-secret-id');
var secretName = btn.getAttribute('data-secret-name') || '';
if (!entryId || !secretId) return;
if (!confirm('确定解除 secret 关联「' + secretName + '」?')) return;
fetch('/api/entries/' + encodeURIComponent(entryId) + '/secrets/' + encodeURIComponent(secretId), {
method: 'DELETE',
credentials: 'same-origin'
}).then(function (r) {
return r.json().then(function (data) {
if (!r.ok) throw new Error(data.error || ('HTTP ' + r.status));
return data;
});
}).then(function () {
window.location.reload();
}).catch(function (e) {
alert(e.message || String(e));
});
});
});
}); });
})(); })();
</script> </script>

View File

@@ -0,0 +1,126 @@
-- Entry-Secret N:N migration (manual SQL)
-- Safe to re-run: uses IF EXISTS/IF NOT EXISTS guards.
BEGIN;
-- 1) secrets: add new columns
ALTER TABLE secrets
ADD COLUMN IF NOT EXISTS user_id UUID REFERENCES users(id) ON DELETE SET NULL;
ALTER TABLE secrets
ADD COLUMN IF NOT EXISTS type VARCHAR(64) NOT NULL DEFAULT 'text';
-- 2) rename field_name -> name (idempotent)
DO $$ BEGIN
IF EXISTS (
SELECT 1
FROM information_schema.columns
WHERE table_name = 'secrets' AND column_name = 'field_name'
) THEN
ALTER TABLE secrets RENAME COLUMN field_name TO name;
END IF;
END $$;
-- 3) create join table
CREATE TABLE IF NOT EXISTS entry_secrets (
entry_id UUID NOT NULL REFERENCES entries(id) ON DELETE CASCADE,
secret_id UUID NOT NULL REFERENCES secrets(id) ON DELETE CASCADE,
sort_order INT NOT NULL DEFAULT 0,
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
PRIMARY KEY (entry_id, secret_id)
);
CREATE INDEX IF NOT EXISTS idx_entry_secrets_secret_id ON entry_secrets(secret_id);
-- 4) backfill user_id and relationship from old secrets.entry_id
DO $$ BEGIN
IF EXISTS (
SELECT 1
FROM information_schema.columns
WHERE table_name = 'secrets' AND column_name = 'entry_id'
) THEN
UPDATE secrets s
SET user_id = e.user_id
FROM entries e
WHERE s.entry_id = e.id AND s.user_id IS NULL;
INSERT INTO entry_secrets(entry_id, secret_id, sort_order)
SELECT entry_id, id, 0
FROM secrets
WHERE entry_id IS NOT NULL
ON CONFLICT DO NOTHING;
END IF;
END $$;
-- 5) backfill secret types
UPDATE secrets SET type = 'pem' WHERE name IN ('ssh_key');
UPDATE secrets SET type = 'password' WHERE name IN ('password');
UPDATE secrets SET type = 'phone' WHERE name LIKE 'phone%';
UPDATE secrets SET type = 'url' WHERE name IN ('webhook_url', 'address');
UPDATE secrets
SET type = 'token'
WHERE name IN (
'access_key_id',
'access_key_secret',
'global_api_key',
'api_key',
'secret_key',
'personal_access_token',
'runner_token',
'GOOGLE_CLIENT_ID',
'GOOGLE_CLIENT_SECRET'
);
-- 6) drop old entry_id path
ALTER TABLE secrets DROP CONSTRAINT IF EXISTS secrets_entry_id_fkey;
DROP INDEX IF EXISTS idx_secrets_entry_id;
ALTER TABLE secrets DROP CONSTRAINT IF EXISTS secrets_entry_id_field_name_key;
ALTER TABLE secrets DROP CONSTRAINT IF EXISTS secrets_entry_id_name_key;
ALTER TABLE secrets DROP COLUMN IF EXISTS entry_id;
-- 7) add indexes for new access paths
CREATE INDEX IF NOT EXISTS idx_secrets_user_id
ON secrets(user_id) WHERE user_id IS NOT NULL;
DO $$
DECLARE
duplicate_samples TEXT;
BEGIN
SELECT string_agg(
format('user_id=%s, name=%s, count=%s', t.user_id, t.name, t.cnt),
E'\n'
)
INTO duplicate_samples
FROM (
SELECT user_id::TEXT AS user_id, name, COUNT(*) AS cnt
FROM secrets
WHERE user_id IS NOT NULL
GROUP BY user_id, name
HAVING COUNT(*) > 1
ORDER BY cnt DESC, user_id, name
LIMIT 20
) t;
IF duplicate_samples IS NOT NULL THEN
RAISE EXCEPTION
'Cannot enforce unique constraint on secrets(user_id, name). Duplicates found:%',
E'\n' || duplicate_samples
USING HINT = 'Please deduplicate conflicting rows, then rerun migration.';
END IF;
END $$;
CREATE UNIQUE INDEX IF NOT EXISTS idx_secrets_unique_user_name
ON secrets(user_id, name) WHERE user_id IS NOT NULL;
CREATE INDEX IF NOT EXISTS idx_secrets_name ON secrets(name);
CREATE INDEX IF NOT EXISTS idx_secrets_type ON secrets(type);
-- 8) secrets_history: rename and remove entry-scoped columns
DO $$ BEGIN
IF EXISTS (
SELECT 1
FROM information_schema.columns
WHERE table_name = 'secrets_history' AND column_name = 'field_name'
) THEN
ALTER TABLE secrets_history RENAME COLUMN field_name TO name;
END IF;
END $$;
ALTER TABLE secrets_history DROP COLUMN IF EXISTS entry_id;
ALTER TABLE secrets_history DROP COLUMN IF EXISTS entry_version;
COMMIT;

View File

@@ -0,0 +1,67 @@
-- Metadata cleanup migration (manual SQL)
-- Keep tags/type as dedicated columns; remove duplicated metadata keys.
BEGIN;
-- 1) Promote metadata.type -> entries.type when present.
UPDATE entries
SET type = metadata->>'type'
WHERE metadata->>'type' IS NOT NULL
AND metadata->>'type' <> '';
-- 2) Remove metadata.type.
UPDATE entries
SET metadata = metadata - 'type'
WHERE metadata ? 'type';
-- 3) Remove metadata.environment (duplicated by tags prod/dev).
UPDATE entries
SET metadata = metadata - 'environment'
WHERE metadata ? 'environment';
-- 4) Remove metadata.account when equal to folder.
UPDATE entries
SET metadata = metadata - 'account'
WHERE metadata->>'account' = folder;
-- 5) Normalize manufacturer -> provider.
UPDATE entries
SET metadata = (metadata - 'manufacturer')
|| jsonb_build_object('provider', metadata->>'manufacturer')
WHERE metadata ? 'manufacturer'
AND NOT metadata ? 'provider';
UPDATE entries
SET metadata = metadata - 'manufacturer'
WHERE metadata ? 'manufacturer'
AND metadata ? 'provider';
-- 6) Drop ssh_key_format (moved to secrets.type).
UPDATE entries
SET metadata = metadata - 'ssh_key_format'
WHERE metadata ? 'ssh_key_format';
-- 7) Remove display_name when duplicated by name.
UPDATE entries
SET metadata = metadata - 'display_name'
WHERE metadata->>'display_name' = name;
-- 8) Condense server_* metadata into server_ref.
UPDATE entries
SET metadata = metadata
- 'server_account'
- 'server_hostname'
- 'server_location'
- 'server_public_ip'
|| CASE
WHEN metadata ? 'server_entry_name'
THEN jsonb_build_object('server_ref', metadata->>'server_entry_name')
ELSE '{}'::jsonb
END
WHERE metadata ? 'server_entry_name' OR metadata ? 'server_account';
UPDATE entries
SET metadata = metadata - 'server_entry_name'
WHERE metadata ? 'server_entry_name';
COMMIT;

View File

@@ -0,0 +1,81 @@
#!/usr/bin/env bash
# Migrate PostgreSQL data from secrets-mcp-prod to secrets-nn-test.
#
# Prereqs: pg_dump and pg_restore (PostgreSQL client tools) on PATH.
# TLS: Use the same connection parameters as your MCP / app (e.g. sslmode=verify-full
# and PGSSLROOTCERT if needed). If local psql fails with "certificate verify failed",
# run this script from a host that trusts the server CA, or set PGSSLROOTCERT.
#
# Usage:
# export SOURCE_DATABASE_URL='postgres://USER:PASS@host:5432/secrets-mcp-prod?sslmode=verify-full'
# export TARGET_DATABASE_URL='postgres://USER:PASS@host:5432/secrets-nn-test?sslmode=verify-full'
# ./scripts/migrate-db-prod-to-nn-test.sh
#
# Options (env):
# BACKUP_TARGET_FIRST=1 # default: dump target to ./backup-secrets-nn-test-<timestamp>.dump before restore
# RUN_NN_SQL=1 # default: run migrations/001_nn_schema.sql then 002_data_cleanup.sql on target after restore
# SKIP_TARGET_BACKUP=1 # skip target backup
#
# WARNINGS:
# - pg_restore with --clean --if-exists drops objects that exist in the dump; target DB is replaced
# to match the logical content of the source dump (same as typical full restore).
# - Optionally keep a manual dump of the target before proceeding.
# - 001_nn_schema.sql will fail if secrets has duplicate (user_id, name) after backfill; fix data first.
set -euo pipefail
ROOT="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)"
cd "$ROOT"
SOURCE_URL="${SOURCE_DATABASE_URL:-}"
TARGET_URL="${TARGET_DATABASE_URL:-}"
if [[ -z "$SOURCE_URL" || -z "$TARGET_URL" ]]; then
echo "Set SOURCE_DATABASE_URL and TARGET_DATABASE_URL (postgres URLs)." >&2
exit 1
fi
if ! command -v pg_dump >/dev/null || ! command -v pg_restore >/dev/null; then
echo "pg_dump and pg_restore are required." >&2
exit 1
fi
TS="$(date +%Y%m%d%H%M%S)"
DUMP_FILE="${DUMP_FILE:-$ROOT/tmp/secrets-mcp-prod-${TS}.dump}"
mkdir -p "$(dirname "$DUMP_FILE")"
if [[ "${EXCLUDE_TOWER_SESSIONS:-}" == "1" ]]; then
echo "==> Excluding schema tower_sessions from dump"
pg_dump "$SOURCE_URL" -Fc --no-owner --no-acl --exclude-schema=tower_sessions -f "$DUMP_FILE"
else
echo "==> Dumping source (custom format) -> $DUMP_FILE"
pg_dump "$SOURCE_URL" -Fc --no-owner --no-acl -f "$DUMP_FILE"
fi
if [[ "${SKIP_TARGET_BACKUP:-}" != "1" && "${BACKUP_TARGET_FIRST:-1}" == "1" ]]; then
BACKUP_FILE="$ROOT/tmp/secrets-nn-test-before-${TS}.dump"
echo "==> Backing up target -> $BACKUP_FILE"
pg_dump "$TARGET_URL" -Fc --no-owner --no-acl -f "$BACKUP_FILE" || {
echo "Target backup failed (empty DB is OK). Continuing." >&2
}
fi
echo "==> Restoring into target (--clean --if-exists)"
pg_restore -d "$TARGET_URL" --no-owner --no-acl --clean --if-exists --verbose "$DUMP_FILE"
if [[ "${RUN_NN_SQL:-1}" == "1" ]]; then
if [[ ! -f "$ROOT/migrations/001_nn_schema.sql" ]]; then
echo "migrations/001_nn_schema.sql not found; skip NN SQL." >&2
else
echo "==> Applying migrations/001_nn_schema.sql on target"
psql "$TARGET_URL" -v ON_ERROR_STOP=1 -f "$ROOT/migrations/001_nn_schema.sql"
fi
if [[ -f "$ROOT/migrations/002_data_cleanup.sql" ]]; then
echo "==> Applying migrations/002_data_cleanup.sql on target"
psql "$TARGET_URL" -v ON_ERROR_STOP=1 -f "$ROOT/migrations/002_data_cleanup.sql"
fi
fi
echo "==> Done. Suggested verification:"
echo " psql \"\$TARGET_DATABASE_URL\" -c \"SELECT COUNT(*) FROM entries; SELECT COUNT(*) FROM secrets; SELECT COUNT(*) FROM entry_secrets;\""
echo " ./scripts/release-check.sh # optional app-side sanity"