Compare commits
4 Commits
secrets-mc
...
b99d821644
| Author | SHA1 | Date | |
|---|---|---|---|
| b99d821644 | |||
|
|
32f275f88a | ||
|
|
c6fb457734 | ||
| df701f21b9 |
3
.gitignore
vendored
3
.gitignore
vendored
@@ -4,4 +4,5 @@
|
|||||||
.cursor/
|
.cursor/
|
||||||
# Google OAuth 下载的 JSON 凭据文件
|
# Google OAuth 下载的 JSON 凭据文件
|
||||||
client_secret_*.apps.googleusercontent.com.json
|
client_secret_*.apps.googleusercontent.com.json
|
||||||
*.pem
|
*.pem
|
||||||
|
tmp/
|
||||||
@@ -118,7 +118,7 @@ oauth_accounts (
|
|||||||
|
|
||||||
### PEM 共享(`key_ref`)
|
### PEM 共享(`key_ref`)
|
||||||
|
|
||||||
将共享 PEM 存为 **`type=key`** 的 entry;其它记录在 `metadata.key_ref` 指向该 key 的 `name`(支持 `folder/name` 格式消歧)。更新 key 记录后,引用方通过服务层解析合并逻辑即可使用新密钥(实现见 `secrets_core::service::env_map`)。
|
建议将共享 PEM 存为 **`type=key`** 的 entry;其它记录在 `metadata.key_ref` 指向目标 entry 的 `name`(支持 `folder/name` 格式消歧)。删除被引用 key 时,服务会自动迁移为单副本 + 重定向(复制到首个引用方,其余引用方改指向新 owner);解析逻辑见 `secrets_core::service::env_map`。
|
||||||
|
|
||||||
## 代码规范
|
## 代码规范
|
||||||
|
|
||||||
|
|||||||
2
Cargo.lock
generated
2
Cargo.lock
generated
@@ -1968,7 +1968,7 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "secrets-mcp"
|
name = "secrets-mcp"
|
||||||
version = "0.3.6"
|
version = "0.3.9"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"anyhow",
|
"anyhow",
|
||||||
"askama",
|
"askama",
|
||||||
|
|||||||
@@ -57,6 +57,7 @@ SECRETS_ENV=production
|
|||||||
- **`secrets_search`**:发现条目(可按 query / folder / type / name 过滤);不要求加密头。
|
- **`secrets_search`**:发现条目(可按 query / folder / type / name 过滤);不要求加密头。
|
||||||
- **`secrets_get` / `secrets_update` / `secrets_delete`(按 name)/ `secrets_history` / `secrets_rollback`**:仅 `name` 且全局唯一则直接命中;若多条同名,返回消歧错误,需在参数中补 **`folder`**。
|
- **`secrets_get` / `secrets_update` / `secrets_delete`(按 name)/ `secrets_history` / `secrets_rollback`**:仅 `name` 且全局唯一则直接命中;若多条同名,返回消歧错误,需在参数中补 **`folder`**。
|
||||||
- **`secrets_delete`**:`dry_run=true` 时与真实删除相同的消歧规则——唯一则预览一条,多条则报错并要求 `folder`。
|
- **`secrets_delete`**:`dry_run=true` 时与真实删除相同的消歧规则——唯一则预览一条,多条则报错并要求 `folder`。
|
||||||
|
- **共享 key 自动迁移删除**:删除仍被 `metadata.key_ref` 引用的 key 条目时,系统会自动迁移:把密文复制到首个引用方,并将其余引用方的 `key_ref` 重定向到新 owner,然后继续删除。
|
||||||
|
|
||||||
## 加密架构(混合 E2EE)
|
## 加密架构(混合 E2EE)
|
||||||
|
|
||||||
@@ -167,7 +168,8 @@ flowchart LR
|
|||||||
|
|
||||||
### PEM 共享(`key_ref`)
|
### PEM 共享(`key_ref`)
|
||||||
|
|
||||||
同一 PEM 可被多条 `server` 等记录引用:将 PEM 存为 **`type=key`** 的 entry,在其它条目的 `metadata.key_ref` 中写该 key 条目的 `name`(支持 `folder/name` 格式消歧);轮换时只更新 key 记录即可。
|
同一 PEM 可被多条 `server` 等记录引用:建议将 PEM 存为 **`type=key`** 的 entry,在其它条目的 `metadata.key_ref` 中写目标 entry 的 `name`(支持 `folder/name` 格式消歧);轮换时只更新该目标记录即可。
|
||||||
|
删除共享 key 时,系统会自动迁移引用:将密文复制到首个引用方(单副本),其余引用方的 `key_ref` 自动重定向到该新 owner,再删除原 key 记录。
|
||||||
|
|
||||||
## 审计日志
|
## 审计日志
|
||||||
|
|
||||||
|
|||||||
@@ -83,16 +83,30 @@ pub async fn migrate(pool: &PgPool) -> Result<()> {
|
|||||||
-- ── secrets: one row per encrypted field ─────────────────────────────────
|
-- ── secrets: one row per encrypted field ─────────────────────────────────
|
||||||
CREATE TABLE IF NOT EXISTS secrets (
|
CREATE TABLE IF NOT EXISTS secrets (
|
||||||
id UUID PRIMARY KEY DEFAULT uuidv7(),
|
id UUID PRIMARY KEY DEFAULT uuidv7(),
|
||||||
entry_id UUID NOT NULL REFERENCES entries(id) ON DELETE CASCADE,
|
user_id UUID,
|
||||||
field_name VARCHAR(256) NOT NULL,
|
name VARCHAR(256) NOT NULL,
|
||||||
|
type VARCHAR(64) NOT NULL DEFAULT 'text',
|
||||||
encrypted BYTEA NOT NULL DEFAULT '\x',
|
encrypted BYTEA NOT NULL DEFAULT '\x',
|
||||||
version BIGINT NOT NULL DEFAULT 1,
|
version BIGINT NOT NULL DEFAULT 1,
|
||||||
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
|
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
|
||||||
updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
|
updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW()
|
||||||
UNIQUE(entry_id, field_name)
|
|
||||||
);
|
);
|
||||||
|
|
||||||
CREATE INDEX IF NOT EXISTS idx_secrets_entry_id ON secrets(entry_id);
|
CREATE INDEX IF NOT EXISTS idx_secrets_user_id ON secrets(user_id) WHERE user_id IS NOT NULL;
|
||||||
|
CREATE UNIQUE INDEX IF NOT EXISTS idx_secrets_unique_user_name
|
||||||
|
ON secrets(user_id, name) WHERE user_id IS NOT NULL;
|
||||||
|
CREATE INDEX IF NOT EXISTS idx_secrets_name ON secrets(name);
|
||||||
|
CREATE INDEX IF NOT EXISTS idx_secrets_type ON secrets(type);
|
||||||
|
|
||||||
|
-- ── entry_secrets: N:N relation ────────────────────────────────────────────
|
||||||
|
CREATE TABLE IF NOT EXISTS entry_secrets (
|
||||||
|
entry_id UUID NOT NULL REFERENCES entries(id) ON DELETE CASCADE,
|
||||||
|
secret_id UUID NOT NULL REFERENCES secrets(id) ON DELETE CASCADE,
|
||||||
|
sort_order INT NOT NULL DEFAULT 0,
|
||||||
|
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
|
||||||
|
PRIMARY KEY(entry_id, secret_id)
|
||||||
|
);
|
||||||
|
CREATE INDEX IF NOT EXISTS idx_entry_secrets_secret_id ON entry_secrets(secret_id);
|
||||||
|
|
||||||
-- ── audit_log: append-only operation log ─────────────────────────────────
|
-- ── audit_log: append-only operation log ─────────────────────────────────
|
||||||
CREATE TABLE IF NOT EXISTS audit_log (
|
CREATE TABLE IF NOT EXISTS audit_log (
|
||||||
@@ -141,17 +155,13 @@ pub async fn migrate(pool: &PgPool) -> Result<()> {
|
|||||||
-- ── secrets_history: field-level snapshot ────────────────────────────────
|
-- ── secrets_history: field-level snapshot ────────────────────────────────
|
||||||
CREATE TABLE IF NOT EXISTS secrets_history (
|
CREATE TABLE IF NOT EXISTS secrets_history (
|
||||||
id BIGINT GENERATED ALWAYS AS IDENTITY PRIMARY KEY,
|
id BIGINT GENERATED ALWAYS AS IDENTITY PRIMARY KEY,
|
||||||
entry_id UUID NOT NULL,
|
|
||||||
secret_id UUID NOT NULL,
|
secret_id UUID NOT NULL,
|
||||||
entry_version BIGINT NOT NULL,
|
name VARCHAR(256) NOT NULL,
|
||||||
field_name VARCHAR(256) NOT NULL,
|
|
||||||
encrypted BYTEA NOT NULL DEFAULT '\x',
|
encrypted BYTEA NOT NULL DEFAULT '\x',
|
||||||
action VARCHAR(16) NOT NULL,
|
action VARCHAR(16) NOT NULL,
|
||||||
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW()
|
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW()
|
||||||
);
|
);
|
||||||
|
|
||||||
CREATE INDEX IF NOT EXISTS idx_secrets_history_entry_id
|
|
||||||
ON secrets_history(entry_id, entry_version DESC);
|
|
||||||
CREATE INDEX IF NOT EXISTS idx_secrets_history_secret_id
|
CREATE INDEX IF NOT EXISTS idx_secrets_history_secret_id
|
||||||
ON secrets_history(secret_id);
|
ON secrets_history(secret_id);
|
||||||
|
|
||||||
@@ -210,6 +220,16 @@ pub async fn migrate(pool: &PgPool) -> Result<()> {
|
|||||||
END IF;
|
END IF;
|
||||||
END $$;
|
END $$;
|
||||||
|
|
||||||
|
DO $$ BEGIN
|
||||||
|
IF NOT EXISTS (
|
||||||
|
SELECT 1 FROM pg_constraint WHERE conname = 'fk_secrets_user_id'
|
||||||
|
) THEN
|
||||||
|
ALTER TABLE secrets
|
||||||
|
ADD CONSTRAINT fk_secrets_user_id
|
||||||
|
FOREIGN KEY (user_id) REFERENCES users(id) ON DELETE SET NULL;
|
||||||
|
END IF;
|
||||||
|
END $$;
|
||||||
|
|
||||||
DO $$ BEGIN
|
DO $$ BEGIN
|
||||||
IF NOT EXISTS (
|
IF NOT EXISTS (
|
||||||
SELECT 1 FROM pg_constraint WHERE conname = 'fk_audit_log_user_id'
|
SELECT 1 FROM pg_constraint WHERE conname = 'fk_audit_log_user_id'
|
||||||
@@ -499,10 +519,8 @@ pub async fn snapshot_entry_history(
|
|||||||
// ── Secret field-level history snapshot ──────────────────────────────────────
|
// ── Secret field-level history snapshot ──────────────────────────────────────
|
||||||
|
|
||||||
pub struct SecretSnapshotParams<'a> {
|
pub struct SecretSnapshotParams<'a> {
|
||||||
pub entry_id: uuid::Uuid,
|
|
||||||
pub secret_id: uuid::Uuid,
|
pub secret_id: uuid::Uuid,
|
||||||
pub entry_version: i64,
|
pub name: &'a str,
|
||||||
pub field_name: &'a str,
|
|
||||||
pub encrypted: &'a [u8],
|
pub encrypted: &'a [u8],
|
||||||
pub action: &'a str,
|
pub action: &'a str,
|
||||||
}
|
}
|
||||||
@@ -513,13 +531,11 @@ pub async fn snapshot_secret_history(
|
|||||||
) -> Result<()> {
|
) -> Result<()> {
|
||||||
sqlx::query(
|
sqlx::query(
|
||||||
"INSERT INTO secrets_history \
|
"INSERT INTO secrets_history \
|
||||||
(entry_id, secret_id, entry_version, field_name, encrypted, action) \
|
(secret_id, name, encrypted, action) \
|
||||||
VALUES ($1, $2, $3, $4, $5, $6)",
|
VALUES ($1, $2, $3, $4)",
|
||||||
)
|
)
|
||||||
.bind(p.entry_id)
|
|
||||||
.bind(p.secret_id)
|
.bind(p.secret_id)
|
||||||
.bind(p.entry_version)
|
.bind(p.name)
|
||||||
.bind(p.field_name)
|
|
||||||
.bind(p.encrypted)
|
.bind(p.encrypted)
|
||||||
.bind(p.action)
|
.bind(p.action)
|
||||||
.execute(&mut **tx)
|
.execute(&mut **tx)
|
||||||
|
|||||||
@@ -27,8 +27,11 @@ pub struct Entry {
|
|||||||
#[derive(Debug, Serialize, Deserialize, sqlx::FromRow)]
|
#[derive(Debug, Serialize, Deserialize, sqlx::FromRow)]
|
||||||
pub struct SecretField {
|
pub struct SecretField {
|
||||||
pub id: Uuid,
|
pub id: Uuid,
|
||||||
pub entry_id: Uuid,
|
pub user_id: Option<Uuid>,
|
||||||
pub field_name: String,
|
pub name: String,
|
||||||
|
#[serde(rename = "type")]
|
||||||
|
#[sqlx(rename = "type")]
|
||||||
|
pub secret_type: String,
|
||||||
/// AES-256-GCM ciphertext: nonce(12B) || ciphertext+tag
|
/// AES-256-GCM ciphertext: nonce(12B) || ciphertext+tag
|
||||||
pub encrypted: Vec<u8>,
|
pub encrypted: Vec<u8>,
|
||||||
pub version: i64,
|
pub version: i64,
|
||||||
@@ -83,7 +86,7 @@ impl From<&EntryWriteRow> for EntryRow {
|
|||||||
#[derive(Debug, sqlx::FromRow)]
|
#[derive(Debug, sqlx::FromRow)]
|
||||||
pub struct SecretFieldRow {
|
pub struct SecretFieldRow {
|
||||||
pub id: Uuid,
|
pub id: Uuid,
|
||||||
pub field_name: String,
|
pub name: String,
|
||||||
pub encrypted: Vec<u8>,
|
pub encrypted: Vec<u8>,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -1,6 +1,7 @@
|
|||||||
use anyhow::Result;
|
use anyhow::Result;
|
||||||
use serde_json::{Map, Value};
|
use serde_json::{Map, Value};
|
||||||
use sqlx::PgPool;
|
use sqlx::PgPool;
|
||||||
|
use std::collections::{BTreeSet, HashSet};
|
||||||
use std::fs;
|
use std::fs;
|
||||||
use uuid::Uuid;
|
use uuid::Uuid;
|
||||||
|
|
||||||
@@ -176,6 +177,7 @@ pub struct AddParams<'a> {
|
|||||||
pub tags: &'a [String],
|
pub tags: &'a [String],
|
||||||
pub meta_entries: &'a [String],
|
pub meta_entries: &'a [String],
|
||||||
pub secret_entries: &'a [String],
|
pub secret_entries: &'a [String],
|
||||||
|
pub link_secret_names: &'a [String],
|
||||||
/// Optional user_id for multi-user isolation (None = single-user CLI mode)
|
/// Optional user_id for multi-user isolation (None = single-user CLI mode)
|
||||||
pub user_id: Option<Uuid>,
|
pub user_id: Option<Uuid>,
|
||||||
}
|
}
|
||||||
@@ -185,6 +187,11 @@ pub async fn run(pool: &PgPool, params: AddParams<'_>, master_key: &[u8; 32]) ->
|
|||||||
let secret_json = build_json(params.secret_entries)?;
|
let secret_json = build_json(params.secret_entries)?;
|
||||||
let meta_keys = collect_key_paths(params.meta_entries)?;
|
let meta_keys = collect_key_paths(params.meta_entries)?;
|
||||||
let secret_keys = collect_key_paths(params.secret_entries)?;
|
let secret_keys = collect_key_paths(params.secret_entries)?;
|
||||||
|
let flat_fields = flatten_json_fields("", &secret_json);
|
||||||
|
let new_secret_names: BTreeSet<String> =
|
||||||
|
flat_fields.iter().map(|(name, _)| name.clone()).collect();
|
||||||
|
let link_secret_names =
|
||||||
|
validate_link_secret_names(params.link_secret_names, &new_secret_names)?;
|
||||||
|
|
||||||
let mut tx = pool.begin().await?;
|
let mut tx = pool.begin().await?;
|
||||||
|
|
||||||
@@ -279,10 +286,11 @@ pub async fn run(pool: &PgPool, params: AddParams<'_>, master_key: &[u8; 32]) ->
|
|||||||
.await?
|
.await?
|
||||||
};
|
};
|
||||||
|
|
||||||
let new_entry_version: i64 = sqlx::query_scalar("SELECT version FROM entries WHERE id = $1")
|
let current_entry_version: i64 =
|
||||||
.bind(entry_id)
|
sqlx::query_scalar("SELECT version FROM entries WHERE id = $1")
|
||||||
.fetch_one(&mut *tx)
|
.bind(entry_id)
|
||||||
.await?;
|
.fetch_one(&mut *tx)
|
||||||
|
.await?;
|
||||||
|
|
||||||
if existing.is_none()
|
if existing.is_none()
|
||||||
&& let Err(e) = db::snapshot_entry_history(
|
&& let Err(e) = db::snapshot_entry_history(
|
||||||
@@ -293,7 +301,7 @@ pub async fn run(pool: &PgPool, params: AddParams<'_>, master_key: &[u8; 32]) ->
|
|||||||
folder: params.folder,
|
folder: params.folder,
|
||||||
entry_type: params.entry_type,
|
entry_type: params.entry_type,
|
||||||
name: params.name,
|
name: params.name,
|
||||||
version: new_entry_version,
|
version: current_entry_version,
|
||||||
action: "create",
|
action: "create",
|
||||||
tags: params.tags,
|
tags: params.tags,
|
||||||
metadata: &metadata,
|
metadata: &metadata,
|
||||||
@@ -308,23 +316,25 @@ pub async fn run(pool: &PgPool, params: AddParams<'_>, master_key: &[u8; 32]) ->
|
|||||||
#[derive(sqlx::FromRow)]
|
#[derive(sqlx::FromRow)]
|
||||||
struct ExistingField {
|
struct ExistingField {
|
||||||
id: Uuid,
|
id: Uuid,
|
||||||
field_name: String,
|
name: String,
|
||||||
encrypted: Vec<u8>,
|
encrypted: Vec<u8>,
|
||||||
}
|
}
|
||||||
let existing_fields: Vec<ExistingField> =
|
let existing_fields: Vec<ExistingField> = sqlx::query_as(
|
||||||
sqlx::query_as("SELECT id, field_name, encrypted FROM secrets WHERE entry_id = $1")
|
"SELECT s.id, s.name, s.encrypted \
|
||||||
.bind(entry_id)
|
FROM entry_secrets es \
|
||||||
.fetch_all(&mut *tx)
|
JOIN secrets s ON s.id = es.secret_id \
|
||||||
.await?;
|
WHERE es.entry_id = $1",
|
||||||
|
)
|
||||||
|
.bind(entry_id)
|
||||||
|
.fetch_all(&mut *tx)
|
||||||
|
.await?;
|
||||||
|
|
||||||
for f in &existing_fields {
|
for f in &existing_fields {
|
||||||
if let Err(e) = db::snapshot_secret_history(
|
if let Err(e) = db::snapshot_secret_history(
|
||||||
&mut tx,
|
&mut tx,
|
||||||
db::SecretSnapshotParams {
|
db::SecretSnapshotParams {
|
||||||
entry_id,
|
|
||||||
secret_id: f.id,
|
secret_id: f.id,
|
||||||
entry_version: new_entry_version - 1,
|
name: &f.name,
|
||||||
field_name: &f.field_name,
|
|
||||||
encrypted: &f.encrypted,
|
encrypted: &f.encrypted,
|
||||||
action: "add",
|
action: "add",
|
||||||
},
|
},
|
||||||
@@ -335,21 +345,68 @@ pub async fn run(pool: &PgPool, params: AddParams<'_>, master_key: &[u8; 32]) ->
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
sqlx::query("DELETE FROM secrets WHERE entry_id = $1")
|
sqlx::query("DELETE FROM entry_secrets WHERE entry_id = $1")
|
||||||
.bind(entry_id)
|
.bind(entry_id)
|
||||||
.execute(&mut *tx)
|
.execute(&mut *tx)
|
||||||
.await?;
|
.await?;
|
||||||
|
|
||||||
|
sqlx::query(
|
||||||
|
"DELETE FROM secrets s \
|
||||||
|
WHERE NOT EXISTS (SELECT 1 FROM entry_secrets es WHERE es.secret_id = s.id)",
|
||||||
|
)
|
||||||
|
.execute(&mut *tx)
|
||||||
|
.await?;
|
||||||
|
}
|
||||||
|
|
||||||
|
for (field_name, field_value) in &flat_fields {
|
||||||
|
let encrypted = crypto::encrypt_json(master_key, field_value)?;
|
||||||
|
let secret_id: Uuid = sqlx::query_scalar(
|
||||||
|
"INSERT INTO secrets (user_id, name, type, encrypted) VALUES ($1, $2, $3, $4) RETURNING id",
|
||||||
|
)
|
||||||
|
.bind(params.user_id)
|
||||||
|
.bind(field_name)
|
||||||
|
.bind(infer_secret_type(field_name))
|
||||||
|
.bind(&encrypted)
|
||||||
|
.fetch_one(&mut *tx)
|
||||||
|
.await?;
|
||||||
|
sqlx::query("INSERT INTO entry_secrets (entry_id, secret_id) VALUES ($1, $2)")
|
||||||
|
.bind(entry_id)
|
||||||
|
.bind(secret_id)
|
||||||
|
.execute(&mut *tx)
|
||||||
|
.await?;
|
||||||
}
|
}
|
||||||
|
|
||||||
let flat_fields = flatten_json_fields("", &secret_json);
|
for link_name in &link_secret_names {
|
||||||
for (field_name, field_value) in &flat_fields {
|
let secret_ids: Vec<Uuid> = if let Some(uid) = params.user_id {
|
||||||
let encrypted = crypto::encrypt_json(master_key, field_value)?;
|
sqlx::query_scalar("SELECT id FROM secrets WHERE user_id = $1 AND name = $2")
|
||||||
sqlx::query("INSERT INTO secrets (entry_id, field_name, encrypted) VALUES ($1, $2, $3)")
|
.bind(uid)
|
||||||
.bind(entry_id)
|
.bind(link_name)
|
||||||
.bind(field_name)
|
.fetch_all(&mut *tx)
|
||||||
.bind(&encrypted)
|
.await?
|
||||||
.execute(&mut *tx)
|
} else {
|
||||||
.await?;
|
sqlx::query_scalar("SELECT id FROM secrets WHERE user_id IS NULL AND name = $1")
|
||||||
|
.bind(link_name)
|
||||||
|
.fetch_all(&mut *tx)
|
||||||
|
.await?
|
||||||
|
};
|
||||||
|
|
||||||
|
match secret_ids.len() {
|
||||||
|
0 => anyhow::bail!("Not found: secret named '{}'", link_name),
|
||||||
|
1 => {
|
||||||
|
sqlx::query(
|
||||||
|
"INSERT INTO entry_secrets (entry_id, secret_id) VALUES ($1, $2) ON CONFLICT DO NOTHING",
|
||||||
|
)
|
||||||
|
.bind(entry_id)
|
||||||
|
.bind(secret_ids[0])
|
||||||
|
.execute(&mut *tx)
|
||||||
|
.await?;
|
||||||
|
}
|
||||||
|
n => anyhow::bail!(
|
||||||
|
"Ambiguous: {} secrets named '{}' found. Please deduplicate names first.",
|
||||||
|
n,
|
||||||
|
link_name
|
||||||
|
),
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
crate::audit::log_tx(
|
crate::audit::log_tx(
|
||||||
@@ -379,9 +436,56 @@ pub async fn run(pool: &PgPool, params: AddParams<'_>, master_key: &[u8; 32]) ->
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub(crate) fn infer_secret_type(name: &str) -> &'static str {
|
||||||
|
match name {
|
||||||
|
"ssh_key" => "pem",
|
||||||
|
"password" => "password",
|
||||||
|
"phone" | "phone_2" => "phone",
|
||||||
|
"webhook_url" | "address" => "url",
|
||||||
|
"access_key_id"
|
||||||
|
| "access_key_secret"
|
||||||
|
| "global_api_key"
|
||||||
|
| "api_key"
|
||||||
|
| "secret_key"
|
||||||
|
| "personal_access_token"
|
||||||
|
| "runner_token"
|
||||||
|
| "GOOGLE_CLIENT_ID"
|
||||||
|
| "GOOGLE_CLIENT_SECRET" => "token",
|
||||||
|
_ => "text",
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn validate_link_secret_names(
|
||||||
|
link_secret_names: &[String],
|
||||||
|
new_secret_names: &BTreeSet<String>,
|
||||||
|
) -> Result<Vec<String>> {
|
||||||
|
let mut deduped = Vec::new();
|
||||||
|
let mut seen = HashSet::new();
|
||||||
|
|
||||||
|
for raw in link_secret_names {
|
||||||
|
let trimmed = raw.trim();
|
||||||
|
if trimmed.is_empty() {
|
||||||
|
anyhow::bail!("link_secret_names contains an empty name");
|
||||||
|
}
|
||||||
|
if new_secret_names.contains(trimmed) {
|
||||||
|
anyhow::bail!(
|
||||||
|
"Conflict: secret '{}' is provided both in secrets/secrets_obj and link_secret_names",
|
||||||
|
trimmed
|
||||||
|
);
|
||||||
|
}
|
||||||
|
if seen.insert(trimmed.to_string()) {
|
||||||
|
deduped.push(trimmed.to_string());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(deduped)
|
||||||
|
}
|
||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod tests {
|
mod tests {
|
||||||
use super::*;
|
use super::*;
|
||||||
|
use sqlx::PgPool;
|
||||||
|
use std::collections::BTreeSet;
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn parse_nested_file_shorthand() {
|
fn parse_nested_file_shorthand() {
|
||||||
@@ -410,4 +514,199 @@ mod tests {
|
|||||||
assert_eq!(fields[1].0, "credentials.type");
|
assert_eq!(fields[1].0, "credentials.type");
|
||||||
assert_eq!(fields[2].0, "username");
|
assert_eq!(fields[2].0, "username");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn validate_link_secret_names_conflict_with_new_secret() {
|
||||||
|
let mut new_names = BTreeSet::new();
|
||||||
|
new_names.insert("password".to_string());
|
||||||
|
let err = validate_link_secret_names(&[String::from("password")], &new_names)
|
||||||
|
.expect_err("must fail on overlap");
|
||||||
|
assert!(
|
||||||
|
err.to_string()
|
||||||
|
.contains("provided both in secrets/secrets_obj and link_secret_names")
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn validate_link_secret_names_dedup_and_trim() {
|
||||||
|
let names = vec![
|
||||||
|
" shared_key ".to_string(),
|
||||||
|
"shared_key".to_string(),
|
||||||
|
"runner_token".to_string(),
|
||||||
|
];
|
||||||
|
let deduped = validate_link_secret_names(&names, &BTreeSet::new()).unwrap();
|
||||||
|
assert_eq!(deduped, vec!["shared_key", "runner_token"]);
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn maybe_test_pool() -> Option<PgPool> {
|
||||||
|
let Ok(url) = std::env::var("SECRETS_DATABASE_URL") else {
|
||||||
|
eprintln!("skip add linkage tests: SECRETS_DATABASE_URL is not set");
|
||||||
|
return None;
|
||||||
|
};
|
||||||
|
let Ok(pool) = PgPool::connect(&url).await else {
|
||||||
|
eprintln!("skip add linkage tests: cannot connect to database");
|
||||||
|
return None;
|
||||||
|
};
|
||||||
|
if let Err(e) = crate::db::migrate(&pool).await {
|
||||||
|
eprintln!("skip add linkage tests: migrate failed: {e}");
|
||||||
|
return None;
|
||||||
|
}
|
||||||
|
Some(pool)
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn cleanup_test_rows(pool: &PgPool, marker: &str) -> Result<()> {
|
||||||
|
sqlx::query(
|
||||||
|
"DELETE FROM entries WHERE user_id IS NULL AND (name LIKE $1 OR folder LIKE $1)",
|
||||||
|
)
|
||||||
|
.bind(format!("%{marker}%"))
|
||||||
|
.execute(pool)
|
||||||
|
.await?;
|
||||||
|
sqlx::query(
|
||||||
|
"DELETE FROM secrets WHERE user_id IS NULL AND name LIKE $1 \
|
||||||
|
AND NOT EXISTS (SELECT 1 FROM entry_secrets es WHERE es.secret_id = secrets.id)",
|
||||||
|
)
|
||||||
|
.bind(format!("%{marker}%"))
|
||||||
|
.execute(pool)
|
||||||
|
.await?;
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn add_links_existing_secret_by_unique_name() -> Result<()> {
|
||||||
|
let Some(pool) = maybe_test_pool().await else {
|
||||||
|
return Ok(());
|
||||||
|
};
|
||||||
|
let suffix = Uuid::from_u128(rand::random()).to_string();
|
||||||
|
let marker = format!("link_unique_{}", &suffix[..8]);
|
||||||
|
let secret_name = format!("{}_secret", marker);
|
||||||
|
let entry_name = format!("{}_entry", marker);
|
||||||
|
|
||||||
|
cleanup_test_rows(&pool, &marker).await?;
|
||||||
|
|
||||||
|
let secret_id: Uuid = sqlx::query_scalar(
|
||||||
|
"INSERT INTO secrets (user_id, name, type, encrypted) VALUES (NULL, $1, 'text', $2) RETURNING id",
|
||||||
|
)
|
||||||
|
.bind(&secret_name)
|
||||||
|
.bind(vec![1_u8, 2, 3])
|
||||||
|
.fetch_one(&pool)
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
run(
|
||||||
|
&pool,
|
||||||
|
AddParams {
|
||||||
|
name: &entry_name,
|
||||||
|
folder: &marker,
|
||||||
|
entry_type: "service",
|
||||||
|
notes: "",
|
||||||
|
tags: &[],
|
||||||
|
meta_entries: &[],
|
||||||
|
secret_entries: &[],
|
||||||
|
link_secret_names: std::slice::from_ref(&secret_name),
|
||||||
|
user_id: None,
|
||||||
|
},
|
||||||
|
&[0_u8; 32],
|
||||||
|
)
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
let linked: bool = sqlx::query_scalar(
|
||||||
|
"SELECT EXISTS( \
|
||||||
|
SELECT 1 FROM entry_secrets es \
|
||||||
|
JOIN entries e ON e.id = es.entry_id \
|
||||||
|
WHERE e.user_id IS NULL AND e.name = $1 AND es.secret_id = $2 \
|
||||||
|
)",
|
||||||
|
)
|
||||||
|
.bind(&entry_name)
|
||||||
|
.bind(secret_id)
|
||||||
|
.fetch_one(&pool)
|
||||||
|
.await?;
|
||||||
|
assert!(linked);
|
||||||
|
|
||||||
|
cleanup_test_rows(&pool, &marker).await?;
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn add_link_secret_name_not_found_fails() -> Result<()> {
|
||||||
|
let Some(pool) = maybe_test_pool().await else {
|
||||||
|
return Ok(());
|
||||||
|
};
|
||||||
|
let suffix = Uuid::from_u128(rand::random()).to_string();
|
||||||
|
let marker = format!("link_missing_{}", &suffix[..8]);
|
||||||
|
let secret_name = format!("{}_secret", marker);
|
||||||
|
let entry_name = format!("{}_entry", marker);
|
||||||
|
|
||||||
|
cleanup_test_rows(&pool, &marker).await?;
|
||||||
|
|
||||||
|
let err = run(
|
||||||
|
&pool,
|
||||||
|
AddParams {
|
||||||
|
name: &entry_name,
|
||||||
|
folder: &marker,
|
||||||
|
entry_type: "service",
|
||||||
|
notes: "",
|
||||||
|
tags: &[],
|
||||||
|
meta_entries: &[],
|
||||||
|
secret_entries: &[],
|
||||||
|
link_secret_names: std::slice::from_ref(&secret_name),
|
||||||
|
user_id: None,
|
||||||
|
},
|
||||||
|
&[0_u8; 32],
|
||||||
|
)
|
||||||
|
.await
|
||||||
|
.expect_err("must fail when linked secret is not found");
|
||||||
|
assert!(err.to_string().contains("Not found: secret named"));
|
||||||
|
|
||||||
|
cleanup_test_rows(&pool, &marker).await?;
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn add_link_secret_name_ambiguous_fails() -> Result<()> {
|
||||||
|
let Some(pool) = maybe_test_pool().await else {
|
||||||
|
return Ok(());
|
||||||
|
};
|
||||||
|
let suffix = Uuid::from_u128(rand::random()).to_string();
|
||||||
|
let marker = format!("link_amb_{}", &suffix[..8]);
|
||||||
|
let secret_name = format!("{}_dup_secret", marker);
|
||||||
|
let entry_name = format!("{}_entry", marker);
|
||||||
|
|
||||||
|
cleanup_test_rows(&pool, &marker).await?;
|
||||||
|
|
||||||
|
sqlx::query(
|
||||||
|
"INSERT INTO secrets (user_id, name, type, encrypted) VALUES (NULL, $1, 'text', $2)",
|
||||||
|
)
|
||||||
|
.bind(&secret_name)
|
||||||
|
.bind(vec![1_u8])
|
||||||
|
.execute(&pool)
|
||||||
|
.await?;
|
||||||
|
sqlx::query(
|
||||||
|
"INSERT INTO secrets (user_id, name, type, encrypted) VALUES (NULL, $1, 'text', $2)",
|
||||||
|
)
|
||||||
|
.bind(&secret_name)
|
||||||
|
.bind(vec![2_u8])
|
||||||
|
.execute(&pool)
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
let err = run(
|
||||||
|
&pool,
|
||||||
|
AddParams {
|
||||||
|
name: &entry_name,
|
||||||
|
folder: &marker,
|
||||||
|
entry_type: "service",
|
||||||
|
notes: "",
|
||||||
|
tags: &[],
|
||||||
|
meta_entries: &[],
|
||||||
|
secret_entries: &[],
|
||||||
|
link_secret_names: std::slice::from_ref(&secret_name),
|
||||||
|
user_id: None,
|
||||||
|
},
|
||||||
|
&[0_u8; 32],
|
||||||
|
)
|
||||||
|
.await
|
||||||
|
.expect_err("must fail on ambiguous linked secret name");
|
||||||
|
assert!(err.to_string().contains("Ambiguous:"));
|
||||||
|
|
||||||
|
cleanup_test_rows(&pool, &marker).await?;
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -17,6 +17,7 @@ pub struct DeletedEntry {
|
|||||||
#[derive(Debug, serde::Serialize)]
|
#[derive(Debug, serde::Serialize)]
|
||||||
pub struct DeleteResult {
|
pub struct DeleteResult {
|
||||||
pub deleted: Vec<DeletedEntry>,
|
pub deleted: Vec<DeletedEntry>,
|
||||||
|
pub migrated: Vec<String>,
|
||||||
pub dry_run: bool,
|
pub dry_run: bool,
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -31,7 +32,175 @@ pub struct DeleteParams<'a> {
|
|||||||
pub user_id: Option<Uuid>,
|
pub user_id: Option<Uuid>,
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Delete a single entry by id (multi-tenant: `user_id` must match). Cascades `secrets` via FK.
|
#[derive(Debug, sqlx::FromRow)]
|
||||||
|
struct KeyReferrer {
|
||||||
|
id: Uuid,
|
||||||
|
folder: String,
|
||||||
|
#[sqlx(rename = "type")]
|
||||||
|
entry_type: String,
|
||||||
|
name: String,
|
||||||
|
}
|
||||||
|
|
||||||
|
fn ref_label(r: &KeyReferrer) -> String {
|
||||||
|
format!("{}/{} ({})", r.folder, r.name, r.entry_type)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn ref_path(r: &KeyReferrer) -> String {
|
||||||
|
format!("{}/{}", r.folder, r.name)
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn fetch_key_referrers_pool(
|
||||||
|
pool: &PgPool,
|
||||||
|
key_entry_id: Uuid,
|
||||||
|
key_folder: &str,
|
||||||
|
key_name: &str,
|
||||||
|
user_id: Option<Uuid>,
|
||||||
|
) -> Result<Vec<KeyReferrer>> {
|
||||||
|
let qualified = format!("{}/{}", key_folder, key_name);
|
||||||
|
let refs: Vec<KeyReferrer> = if let Some(uid) = user_id {
|
||||||
|
sqlx::query_as(
|
||||||
|
"SELECT id, folder, type, name FROM entries \
|
||||||
|
WHERE user_id = $1 AND id <> $2 \
|
||||||
|
AND (metadata->>'key_ref' = $3 OR metadata->>'key_ref' = $4) \
|
||||||
|
ORDER BY folder, type, name",
|
||||||
|
)
|
||||||
|
.bind(uid)
|
||||||
|
.bind(key_entry_id)
|
||||||
|
.bind(key_name)
|
||||||
|
.bind(&qualified)
|
||||||
|
.fetch_all(pool)
|
||||||
|
.await?
|
||||||
|
} else {
|
||||||
|
sqlx::query_as(
|
||||||
|
"SELECT id, folder, type, name FROM entries \
|
||||||
|
WHERE user_id IS NULL AND id <> $1 \
|
||||||
|
AND (metadata->>'key_ref' = $2 OR metadata->>'key_ref' = $3) \
|
||||||
|
ORDER BY folder, type, name",
|
||||||
|
)
|
||||||
|
.bind(key_entry_id)
|
||||||
|
.bind(key_name)
|
||||||
|
.bind(&qualified)
|
||||||
|
.fetch_all(pool)
|
||||||
|
.await?
|
||||||
|
};
|
||||||
|
Ok(refs)
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn migrate_key_refs_if_needed(
|
||||||
|
tx: &mut sqlx::Transaction<'_, sqlx::Postgres>,
|
||||||
|
key_row: &EntryRow,
|
||||||
|
key_name: &str,
|
||||||
|
user_id: Option<Uuid>,
|
||||||
|
dry_run: bool,
|
||||||
|
) -> Result<Vec<String>> {
|
||||||
|
let qualified = format!("{}/{}", key_row.folder, key_name);
|
||||||
|
let refs: Vec<KeyReferrer> = if let Some(uid) = user_id {
|
||||||
|
sqlx::query_as(
|
||||||
|
"SELECT id, folder, type, name FROM entries \
|
||||||
|
WHERE user_id = $1 AND id <> $2 \
|
||||||
|
AND (metadata->>'key_ref' = $3 OR metadata->>'key_ref' = $4) \
|
||||||
|
ORDER BY folder, type, name",
|
||||||
|
)
|
||||||
|
.bind(uid)
|
||||||
|
.bind(key_row.id)
|
||||||
|
.bind(key_name)
|
||||||
|
.bind(&qualified)
|
||||||
|
.fetch_all(&mut **tx)
|
||||||
|
.await?
|
||||||
|
} else {
|
||||||
|
sqlx::query_as(
|
||||||
|
"SELECT id, folder, type, name FROM entries \
|
||||||
|
WHERE user_id IS NULL AND id <> $1 \
|
||||||
|
AND (metadata->>'key_ref' = $2 OR metadata->>'key_ref' = $3) \
|
||||||
|
ORDER BY folder, type, name",
|
||||||
|
)
|
||||||
|
.bind(key_row.id)
|
||||||
|
.bind(key_name)
|
||||||
|
.bind(&qualified)
|
||||||
|
.fetch_all(&mut **tx)
|
||||||
|
.await?
|
||||||
|
};
|
||||||
|
|
||||||
|
if refs.is_empty() {
|
||||||
|
return Ok(vec![]);
|
||||||
|
}
|
||||||
|
if dry_run {
|
||||||
|
return Ok(refs.iter().map(ref_label).collect());
|
||||||
|
}
|
||||||
|
|
||||||
|
let owner = &refs[0];
|
||||||
|
let owner_path = ref_path(owner);
|
||||||
|
let key_fields: Vec<SecretFieldRow> = sqlx::query_as(
|
||||||
|
"SELECT s.id, s.name, s.encrypted \
|
||||||
|
FROM entry_secrets es \
|
||||||
|
JOIN secrets s ON s.id = es.secret_id \
|
||||||
|
WHERE es.entry_id = $1",
|
||||||
|
)
|
||||||
|
.bind(key_row.id)
|
||||||
|
.fetch_all(&mut **tx)
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
for f in &key_fields {
|
||||||
|
sqlx::query("INSERT INTO entry_secrets (entry_id, secret_id) VALUES ($1, $2) ON CONFLICT DO NOTHING")
|
||||||
|
.bind(owner.id)
|
||||||
|
.bind(f.id)
|
||||||
|
.execute(&mut **tx)
|
||||||
|
.await?;
|
||||||
|
}
|
||||||
|
|
||||||
|
sqlx::query(
|
||||||
|
"UPDATE entries SET metadata = metadata - 'key_ref', \
|
||||||
|
version = version + 1, updated_at = NOW() WHERE id = $1",
|
||||||
|
)
|
||||||
|
.bind(owner.id)
|
||||||
|
.execute(&mut **tx)
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
crate::audit::log_tx(
|
||||||
|
tx,
|
||||||
|
user_id,
|
||||||
|
"key_migrate",
|
||||||
|
&owner.folder,
|
||||||
|
&owner.entry_type,
|
||||||
|
&owner.name,
|
||||||
|
json!({
|
||||||
|
"from_key": format!("{}/{}", key_row.folder, key_name),
|
||||||
|
"role": "new_owner",
|
||||||
|
"redirect_target": owner_path,
|
||||||
|
}),
|
||||||
|
)
|
||||||
|
.await;
|
||||||
|
|
||||||
|
for r in refs.iter().skip(1) {
|
||||||
|
sqlx::query(
|
||||||
|
"UPDATE entries SET metadata = jsonb_set(metadata, '{key_ref}', to_jsonb($2::text), true), \
|
||||||
|
version = version + 1, updated_at = NOW() WHERE id = $1",
|
||||||
|
)
|
||||||
|
.bind(r.id)
|
||||||
|
.bind(&owner_path)
|
||||||
|
.execute(&mut **tx)
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
crate::audit::log_tx(
|
||||||
|
tx,
|
||||||
|
user_id,
|
||||||
|
"key_migrate",
|
||||||
|
&r.folder,
|
||||||
|
&r.entry_type,
|
||||||
|
&r.name,
|
||||||
|
json!({
|
||||||
|
"from_key": format!("{}/{}", key_row.folder, key_name),
|
||||||
|
"role": "redirected_ref",
|
||||||
|
"redirect_to": owner_path,
|
||||||
|
}),
|
||||||
|
)
|
||||||
|
.await;
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(refs.iter().map(ref_label).collect())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Delete a single entry by id (multi-tenant: `user_id` must match).
|
||||||
pub async fn delete_by_id(pool: &PgPool, entry_id: Uuid, user_id: Uuid) -> Result<DeleteResult> {
|
pub async fn delete_by_id(pool: &PgPool, entry_id: Uuid, user_id: Uuid) -> Result<DeleteResult> {
|
||||||
let mut tx = pool.begin().await?;
|
let mut tx = pool.begin().await?;
|
||||||
let row: Option<EntryWriteRow> = sqlx::query_as(
|
let row: Option<EntryWriteRow> = sqlx::query_as(
|
||||||
@@ -55,6 +224,8 @@ pub async fn delete_by_id(pool: &PgPool, entry_id: Uuid, user_id: Uuid) -> Resul
|
|||||||
let entry_type = row.entry_type.clone();
|
let entry_type = row.entry_type.clone();
|
||||||
let name = row.name.clone();
|
let name = row.name.clone();
|
||||||
let entry_row: EntryRow = (&row).into();
|
let entry_row: EntryRow = (&row).into();
|
||||||
|
let migrated =
|
||||||
|
migrate_key_refs_if_needed(&mut tx, &entry_row, &name, Some(user_id), false).await?;
|
||||||
|
|
||||||
snapshot_and_delete(
|
snapshot_and_delete(
|
||||||
&mut tx,
|
&mut tx,
|
||||||
@@ -83,6 +254,7 @@ pub async fn delete_by_id(pool: &PgPool, entry_id: Uuid, user_id: Uuid) -> Resul
|
|||||||
folder,
|
folder,
|
||||||
entry_type,
|
entry_type,
|
||||||
}],
|
}],
|
||||||
|
migrated,
|
||||||
dry_run: false,
|
dry_run: false,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
@@ -122,6 +294,7 @@ async fn delete_one(
|
|||||||
// - 2+ matches → disambiguation error (same as non-dry-run)
|
// - 2+ matches → disambiguation error (same as non-dry-run)
|
||||||
#[derive(sqlx::FromRow)]
|
#[derive(sqlx::FromRow)]
|
||||||
struct DryRunRow {
|
struct DryRunRow {
|
||||||
|
id: Uuid,
|
||||||
folder: String,
|
folder: String,
|
||||||
#[sqlx(rename = "type")]
|
#[sqlx(rename = "type")]
|
||||||
entry_type: String,
|
entry_type: String,
|
||||||
@@ -130,7 +303,7 @@ async fn delete_one(
|
|||||||
let rows: Vec<DryRunRow> = if let Some(uid) = user_id {
|
let rows: Vec<DryRunRow> = if let Some(uid) = user_id {
|
||||||
if let Some(f) = folder {
|
if let Some(f) = folder {
|
||||||
sqlx::query_as(
|
sqlx::query_as(
|
||||||
"SELECT folder, type FROM entries WHERE user_id = $1 AND folder = $2 AND name = $3",
|
"SELECT id, folder, type FROM entries WHERE user_id = $1 AND folder = $2 AND name = $3",
|
||||||
)
|
)
|
||||||
.bind(uid)
|
.bind(uid)
|
||||||
.bind(f)
|
.bind(f)
|
||||||
@@ -138,40 +311,48 @@ async fn delete_one(
|
|||||||
.fetch_all(pool)
|
.fetch_all(pool)
|
||||||
.await?
|
.await?
|
||||||
} else {
|
} else {
|
||||||
sqlx::query_as("SELECT folder, type FROM entries WHERE user_id = $1 AND name = $2")
|
sqlx::query_as(
|
||||||
.bind(uid)
|
"SELECT id, folder, type FROM entries WHERE user_id = $1 AND name = $2",
|
||||||
.bind(name)
|
)
|
||||||
.fetch_all(pool)
|
.bind(uid)
|
||||||
.await?
|
.bind(name)
|
||||||
|
.fetch_all(pool)
|
||||||
|
.await?
|
||||||
}
|
}
|
||||||
} else if let Some(f) = folder {
|
} else if let Some(f) = folder {
|
||||||
sqlx::query_as(
|
sqlx::query_as(
|
||||||
"SELECT folder, type FROM entries WHERE user_id IS NULL AND folder = $1 AND name = $2",
|
"SELECT id, folder, type FROM entries WHERE user_id IS NULL AND folder = $1 AND name = $2",
|
||||||
)
|
)
|
||||||
.bind(f)
|
.bind(f)
|
||||||
.bind(name)
|
.bind(name)
|
||||||
.fetch_all(pool)
|
.fetch_all(pool)
|
||||||
.await?
|
.await?
|
||||||
} else {
|
} else {
|
||||||
sqlx::query_as("SELECT folder, type FROM entries WHERE user_id IS NULL AND name = $1")
|
sqlx::query_as(
|
||||||
.bind(name)
|
"SELECT id, folder, type FROM entries WHERE user_id IS NULL AND name = $1",
|
||||||
.fetch_all(pool)
|
)
|
||||||
.await?
|
.bind(name)
|
||||||
|
.fetch_all(pool)
|
||||||
|
.await?
|
||||||
};
|
};
|
||||||
|
|
||||||
return match rows.len() {
|
return match rows.len() {
|
||||||
0 => Ok(DeleteResult {
|
0 => Ok(DeleteResult {
|
||||||
deleted: vec![],
|
deleted: vec![],
|
||||||
|
migrated: vec![],
|
||||||
dry_run: true,
|
dry_run: true,
|
||||||
}),
|
}),
|
||||||
1 => {
|
1 => {
|
||||||
let row = rows.into_iter().next().unwrap();
|
let row = rows.into_iter().next().unwrap();
|
||||||
|
let refs =
|
||||||
|
fetch_key_referrers_pool(pool, row.id, &row.folder, name, user_id).await?;
|
||||||
Ok(DeleteResult {
|
Ok(DeleteResult {
|
||||||
deleted: vec![DeletedEntry {
|
deleted: vec![DeletedEntry {
|
||||||
name: name.to_string(),
|
name: name.to_string(),
|
||||||
folder: row.folder,
|
folder: row.folder,
|
||||||
entry_type: row.entry_type,
|
entry_type: row.entry_type,
|
||||||
}],
|
}],
|
||||||
|
migrated: refs.iter().map(ref_label).collect(),
|
||||||
dry_run: true,
|
dry_run: true,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
@@ -236,6 +417,7 @@ async fn delete_one(
|
|||||||
tx.rollback().await?;
|
tx.rollback().await?;
|
||||||
return Ok(DeleteResult {
|
return Ok(DeleteResult {
|
||||||
deleted: vec![],
|
deleted: vec![],
|
||||||
|
migrated: vec![],
|
||||||
dry_run: false,
|
dry_run: false,
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
@@ -255,6 +437,7 @@ async fn delete_one(
|
|||||||
|
|
||||||
let folder = row.folder.clone();
|
let folder = row.folder.clone();
|
||||||
let entry_type = row.entry_type.clone();
|
let entry_type = row.entry_type.clone();
|
||||||
|
let migrated = migrate_key_refs_if_needed(&mut tx, &row, name, user_id, false).await?;
|
||||||
snapshot_and_delete(&mut tx, &folder, &entry_type, name, &row, user_id).await?;
|
snapshot_and_delete(&mut tx, &folder, &entry_type, name, &row, user_id).await?;
|
||||||
crate::audit::log_tx(
|
crate::audit::log_tx(
|
||||||
&mut tx,
|
&mut tx,
|
||||||
@@ -274,6 +457,7 @@ async fn delete_one(
|
|||||||
folder,
|
folder,
|
||||||
entry_type,
|
entry_type,
|
||||||
}],
|
}],
|
||||||
|
migrated,
|
||||||
dry_run: false,
|
dry_run: false,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
@@ -334,6 +518,12 @@ async fn delete_bulk(
|
|||||||
let rows = q.fetch_all(pool).await?;
|
let rows = q.fetch_all(pool).await?;
|
||||||
|
|
||||||
if dry_run {
|
if dry_run {
|
||||||
|
let mut migrated: Vec<String> = Vec::new();
|
||||||
|
for row in &rows {
|
||||||
|
let refs =
|
||||||
|
fetch_key_referrers_pool(pool, row.id, &row.folder, &row.name, user_id).await?;
|
||||||
|
migrated.extend(refs.iter().map(ref_label));
|
||||||
|
}
|
||||||
let deleted = rows
|
let deleted = rows
|
||||||
.iter()
|
.iter()
|
||||||
.map(|r| DeletedEntry {
|
.map(|r| DeletedEntry {
|
||||||
@@ -344,11 +534,13 @@ async fn delete_bulk(
|
|||||||
.collect();
|
.collect();
|
||||||
return Ok(DeleteResult {
|
return Ok(DeleteResult {
|
||||||
deleted,
|
deleted,
|
||||||
|
migrated,
|
||||||
dry_run: true,
|
dry_run: true,
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
let mut deleted = Vec::with_capacity(rows.len());
|
let mut deleted = Vec::with_capacity(rows.len());
|
||||||
|
let mut migrated: Vec<String> = Vec::new();
|
||||||
for row in &rows {
|
for row in &rows {
|
||||||
let entry_row = EntryRow {
|
let entry_row = EntryRow {
|
||||||
id: row.id,
|
id: row.id,
|
||||||
@@ -360,6 +552,8 @@ async fn delete_bulk(
|
|||||||
notes: row.notes.clone(),
|
notes: row.notes.clone(),
|
||||||
};
|
};
|
||||||
let mut tx = pool.begin().await?;
|
let mut tx = pool.begin().await?;
|
||||||
|
let m = migrate_key_refs_if_needed(&mut tx, &entry_row, &row.name, user_id, false).await?;
|
||||||
|
migrated.extend(m);
|
||||||
snapshot_and_delete(
|
snapshot_and_delete(
|
||||||
&mut tx,
|
&mut tx,
|
||||||
&row.folder,
|
&row.folder,
|
||||||
@@ -389,6 +583,7 @@ async fn delete_bulk(
|
|||||||
|
|
||||||
Ok(DeleteResult {
|
Ok(DeleteResult {
|
||||||
deleted,
|
deleted,
|
||||||
|
migrated,
|
||||||
dry_run: false,
|
dry_run: false,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
@@ -420,20 +615,22 @@ async fn snapshot_and_delete(
|
|||||||
tracing::warn!(error = %e, "failed to snapshot entry history before delete");
|
tracing::warn!(error = %e, "failed to snapshot entry history before delete");
|
||||||
}
|
}
|
||||||
|
|
||||||
let fields: Vec<SecretFieldRow> =
|
let fields: Vec<SecretFieldRow> = sqlx::query_as(
|
||||||
sqlx::query_as("SELECT id, field_name, encrypted FROM secrets WHERE entry_id = $1")
|
"SELECT s.id, s.name, s.encrypted \
|
||||||
.bind(row.id)
|
FROM entry_secrets es \
|
||||||
.fetch_all(&mut **tx)
|
JOIN secrets s ON s.id = es.secret_id \
|
||||||
.await?;
|
WHERE es.entry_id = $1",
|
||||||
|
)
|
||||||
|
.bind(row.id)
|
||||||
|
.fetch_all(&mut **tx)
|
||||||
|
.await?;
|
||||||
|
|
||||||
for f in &fields {
|
for f in &fields {
|
||||||
if let Err(e) = db::snapshot_secret_history(
|
if let Err(e) = db::snapshot_secret_history(
|
||||||
tx,
|
tx,
|
||||||
db::SecretSnapshotParams {
|
db::SecretSnapshotParams {
|
||||||
entry_id: row.id,
|
|
||||||
secret_id: f.id,
|
secret_id: f.id,
|
||||||
entry_version: row.version,
|
name: &f.name,
|
||||||
field_name: &f.field_name,
|
|
||||||
encrypted: &f.encrypted,
|
encrypted: &f.encrypted,
|
||||||
action: "delete",
|
action: "delete",
|
||||||
},
|
},
|
||||||
@@ -449,5 +646,293 @@ async fn snapshot_and_delete(
|
|||||||
.execute(&mut **tx)
|
.execute(&mut **tx)
|
||||||
.await?;
|
.await?;
|
||||||
|
|
||||||
|
sqlx::query(
|
||||||
|
"DELETE FROM secrets s \
|
||||||
|
WHERE NOT EXISTS (SELECT 1 FROM entry_secrets es WHERE es.secret_id = s.id)",
|
||||||
|
)
|
||||||
|
.execute(&mut **tx)
|
||||||
|
.await?;
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
mod tests {
|
||||||
|
use super::*;
|
||||||
|
use serde_json::json;
|
||||||
|
|
||||||
|
async fn maybe_test_pool() -> Option<PgPool> {
|
||||||
|
let Ok(url) = std::env::var("SECRETS_DATABASE_URL") else {
|
||||||
|
eprintln!("skip delete migration tests: SECRETS_DATABASE_URL is not set");
|
||||||
|
return None;
|
||||||
|
};
|
||||||
|
let Ok(pool) = PgPool::connect(&url).await else {
|
||||||
|
eprintln!("skip delete migration tests: cannot connect to database");
|
||||||
|
return None;
|
||||||
|
};
|
||||||
|
if let Err(e) = crate::db::migrate(&pool).await {
|
||||||
|
eprintln!("skip delete migration tests: migrate failed: {e}");
|
||||||
|
return None;
|
||||||
|
}
|
||||||
|
Some(pool)
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn insert_entry(
|
||||||
|
pool: &PgPool,
|
||||||
|
id: Uuid,
|
||||||
|
user_id: Uuid,
|
||||||
|
folder: &str,
|
||||||
|
entry_type: &str,
|
||||||
|
name: &str,
|
||||||
|
metadata: serde_json::Value,
|
||||||
|
) -> Result<()> {
|
||||||
|
sqlx::query(
|
||||||
|
"INSERT INTO entries (id, user_id, folder, type, name, notes, tags, metadata, version) \
|
||||||
|
VALUES ($1, $2, $3, $4, $5, '', ARRAY[]::text[], $6, 1)",
|
||||||
|
)
|
||||||
|
.bind(id)
|
||||||
|
.bind(user_id)
|
||||||
|
.bind(folder)
|
||||||
|
.bind(entry_type)
|
||||||
|
.bind(name)
|
||||||
|
.bind(metadata)
|
||||||
|
.execute(pool)
|
||||||
|
.await?;
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn insert_secret_for_entry(
|
||||||
|
pool: &PgPool,
|
||||||
|
user_id: Uuid,
|
||||||
|
entry_id: Uuid,
|
||||||
|
name: &str,
|
||||||
|
secret_type: &str,
|
||||||
|
encrypted: Vec<u8>,
|
||||||
|
) -> Result<()> {
|
||||||
|
let secret_id: Uuid = sqlx::query_scalar(
|
||||||
|
"INSERT INTO secrets (user_id, name, type, encrypted) VALUES ($1, $2, $3, $4) RETURNING id",
|
||||||
|
)
|
||||||
|
.bind(user_id)
|
||||||
|
.bind(name)
|
||||||
|
.bind(secret_type)
|
||||||
|
.bind(encrypted)
|
||||||
|
.fetch_one(pool)
|
||||||
|
.await?;
|
||||||
|
sqlx::query("INSERT INTO entry_secrets (entry_id, secret_id) VALUES ($1, $2)")
|
||||||
|
.bind(entry_id)
|
||||||
|
.bind(secret_id)
|
||||||
|
.execute(pool)
|
||||||
|
.await?;
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn delete_shared_key_dry_run_reports_migration_without_writes() -> Result<()> {
|
||||||
|
let Some(pool) = maybe_test_pool().await else {
|
||||||
|
return Ok(());
|
||||||
|
};
|
||||||
|
|
||||||
|
let user_id = Uuid::from_u128(rand::random());
|
||||||
|
let key_id = Uuid::from_u128(rand::random());
|
||||||
|
let ref_a = Uuid::from_u128(rand::random());
|
||||||
|
let ref_b = Uuid::from_u128(rand::random());
|
||||||
|
|
||||||
|
insert_entry(
|
||||||
|
&pool,
|
||||||
|
key_id,
|
||||||
|
user_id,
|
||||||
|
"kfolder",
|
||||||
|
"key",
|
||||||
|
"shared-key",
|
||||||
|
json!({}),
|
||||||
|
)
|
||||||
|
.await?;
|
||||||
|
insert_secret_for_entry(&pool, user_id, key_id, "pem", "pem", vec![1_u8, 2, 3]).await?;
|
||||||
|
|
||||||
|
insert_entry(
|
||||||
|
&pool,
|
||||||
|
ref_a,
|
||||||
|
user_id,
|
||||||
|
"afolder",
|
||||||
|
"server",
|
||||||
|
"srv-a",
|
||||||
|
json!({"key_ref":"kfolder/shared-key"}),
|
||||||
|
)
|
||||||
|
.await?;
|
||||||
|
insert_entry(
|
||||||
|
&pool,
|
||||||
|
ref_b,
|
||||||
|
user_id,
|
||||||
|
"bfolder",
|
||||||
|
"server",
|
||||||
|
"srv-b",
|
||||||
|
json!({"key_ref":"shared-key"}),
|
||||||
|
)
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
let result = run(
|
||||||
|
&pool,
|
||||||
|
DeleteParams {
|
||||||
|
name: Some("shared-key"),
|
||||||
|
folder: Some("kfolder"),
|
||||||
|
entry_type: None,
|
||||||
|
dry_run: true,
|
||||||
|
user_id: Some(user_id),
|
||||||
|
},
|
||||||
|
)
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
assert!(result.dry_run);
|
||||||
|
assert_eq!(result.deleted.len(), 1);
|
||||||
|
assert_eq!(result.migrated.len(), 2);
|
||||||
|
|
||||||
|
let key_exists: bool = sqlx::query_scalar(
|
||||||
|
"SELECT EXISTS(SELECT 1 FROM entries WHERE id = $1 AND user_id = $2)",
|
||||||
|
)
|
||||||
|
.bind(key_id)
|
||||||
|
.bind(user_id)
|
||||||
|
.fetch_one(&pool)
|
||||||
|
.await?;
|
||||||
|
assert!(key_exists);
|
||||||
|
|
||||||
|
let ref_a_key_ref: Option<String> =
|
||||||
|
sqlx::query_scalar("SELECT metadata->>'key_ref' FROM entries WHERE id = $1")
|
||||||
|
.bind(ref_a)
|
||||||
|
.fetch_one(&pool)
|
||||||
|
.await?;
|
||||||
|
let ref_b_key_ref: Option<String> =
|
||||||
|
sqlx::query_scalar("SELECT metadata->>'key_ref' FROM entries WHERE id = $1")
|
||||||
|
.bind(ref_b)
|
||||||
|
.fetch_one(&pool)
|
||||||
|
.await?;
|
||||||
|
assert_eq!(ref_a_key_ref.as_deref(), Some("kfolder/shared-key"));
|
||||||
|
assert_eq!(ref_b_key_ref.as_deref(), Some("shared-key"));
|
||||||
|
|
||||||
|
sqlx::query("DELETE FROM entries WHERE user_id = $1")
|
||||||
|
.bind(user_id)
|
||||||
|
.execute(&pool)
|
||||||
|
.await?;
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn delete_shared_key_auto_migrates_single_copy_and_redirects_refs() -> Result<()> {
|
||||||
|
let Some(pool) = maybe_test_pool().await else {
|
||||||
|
return Ok(());
|
||||||
|
};
|
||||||
|
|
||||||
|
let user_id = Uuid::from_u128(rand::random());
|
||||||
|
let key_id = Uuid::from_u128(rand::random());
|
||||||
|
let ref_a = Uuid::from_u128(rand::random());
|
||||||
|
let ref_b = Uuid::from_u128(rand::random());
|
||||||
|
let ref_c = Uuid::from_u128(rand::random());
|
||||||
|
|
||||||
|
insert_entry(
|
||||||
|
&pool,
|
||||||
|
key_id,
|
||||||
|
user_id,
|
||||||
|
"kfolder",
|
||||||
|
"key",
|
||||||
|
"shared-key",
|
||||||
|
json!({}),
|
||||||
|
)
|
||||||
|
.await?;
|
||||||
|
insert_secret_for_entry(&pool, user_id, key_id, "pem", "pem", vec![7_u8, 8, 9]).await?;
|
||||||
|
|
||||||
|
// owner candidate (sorted first by folder)
|
||||||
|
insert_entry(
|
||||||
|
&pool,
|
||||||
|
ref_a,
|
||||||
|
user_id,
|
||||||
|
"afolder",
|
||||||
|
"server",
|
||||||
|
"srv-a",
|
||||||
|
json!({"key_ref":"kfolder/shared-key"}),
|
||||||
|
)
|
||||||
|
.await?;
|
||||||
|
insert_entry(
|
||||||
|
&pool,
|
||||||
|
ref_b,
|
||||||
|
user_id,
|
||||||
|
"bfolder",
|
||||||
|
"server",
|
||||||
|
"srv-b",
|
||||||
|
json!({"key_ref":"shared-key"}),
|
||||||
|
)
|
||||||
|
.await?;
|
||||||
|
insert_entry(
|
||||||
|
&pool,
|
||||||
|
ref_c,
|
||||||
|
user_id,
|
||||||
|
"cfolder",
|
||||||
|
"service",
|
||||||
|
"svc-c",
|
||||||
|
json!({"key_ref":"kfolder/shared-key"}),
|
||||||
|
)
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
let result = run(
|
||||||
|
&pool,
|
||||||
|
DeleteParams {
|
||||||
|
name: Some("shared-key"),
|
||||||
|
folder: Some("kfolder"),
|
||||||
|
entry_type: None,
|
||||||
|
dry_run: false,
|
||||||
|
user_id: Some(user_id),
|
||||||
|
},
|
||||||
|
)
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
assert!(!result.dry_run);
|
||||||
|
assert_eq!(result.deleted.len(), 1);
|
||||||
|
assert_eq!(result.migrated.len(), 3);
|
||||||
|
|
||||||
|
let key_exists: bool = sqlx::query_scalar(
|
||||||
|
"SELECT EXISTS(SELECT 1 FROM entries WHERE id = $1 AND user_id = $2)",
|
||||||
|
)
|
||||||
|
.bind(key_id)
|
||||||
|
.bind(user_id)
|
||||||
|
.fetch_one(&pool)
|
||||||
|
.await?;
|
||||||
|
assert!(!key_exists);
|
||||||
|
|
||||||
|
let owner_key_ref: Option<String> =
|
||||||
|
sqlx::query_scalar("SELECT metadata->>'key_ref' FROM entries WHERE id = $1")
|
||||||
|
.bind(ref_a)
|
||||||
|
.fetch_one(&pool)
|
||||||
|
.await?;
|
||||||
|
let ref_b_key_ref: Option<String> =
|
||||||
|
sqlx::query_scalar("SELECT metadata->>'key_ref' FROM entries WHERE id = $1")
|
||||||
|
.bind(ref_b)
|
||||||
|
.fetch_one(&pool)
|
||||||
|
.await?;
|
||||||
|
let ref_c_key_ref: Option<String> =
|
||||||
|
sqlx::query_scalar("SELECT metadata->>'key_ref' FROM entries WHERE id = $1")
|
||||||
|
.bind(ref_c)
|
||||||
|
.fetch_one(&pool)
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
assert_eq!(owner_key_ref, None);
|
||||||
|
assert_eq!(ref_b_key_ref.as_deref(), Some("afolder/srv-a"));
|
||||||
|
assert_eq!(ref_c_key_ref.as_deref(), Some("afolder/srv-a"));
|
||||||
|
|
||||||
|
let owner_has_copied: bool = sqlx::query_scalar(
|
||||||
|
"SELECT EXISTS( \
|
||||||
|
SELECT 1 \
|
||||||
|
FROM entry_secrets es \
|
||||||
|
JOIN secrets s ON s.id = es.secret_id \
|
||||||
|
WHERE es.entry_id = $1 AND s.name = 'pem' \
|
||||||
|
)",
|
||||||
|
)
|
||||||
|
.bind(ref_a)
|
||||||
|
.fetch_one(&pool)
|
||||||
|
.await?;
|
||||||
|
assert!(owner_has_copied);
|
||||||
|
|
||||||
|
sqlx::query("DELETE FROM entries WHERE user_id = $1")
|
||||||
|
.bind(user_id)
|
||||||
|
.execute(&pool)
|
||||||
|
.await?;
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|||||||
@@ -51,7 +51,7 @@ async fn build_entry_env_map(
|
|||||||
} else {
|
} else {
|
||||||
all_fields
|
all_fields
|
||||||
.iter()
|
.iter()
|
||||||
.filter(|f| only_fields.contains(&f.field_name))
|
.filter(|f| only_fields.contains(&f.name))
|
||||||
.collect()
|
.collect()
|
||||||
};
|
};
|
||||||
|
|
||||||
@@ -63,7 +63,7 @@ async fn build_entry_env_map(
|
|||||||
let key = format!(
|
let key = format!(
|
||||||
"{}_{}",
|
"{}_{}",
|
||||||
effective_prefix,
|
effective_prefix,
|
||||||
f.field_name.to_uppercase().replace(['-', '.'], "_")
|
f.name.to_uppercase().replace(['-', '.'], "_")
|
||||||
);
|
);
|
||||||
map.insert(key, json_to_env_string(&decrypted));
|
map.insert(key, json_to_env_string(&decrypted));
|
||||||
}
|
}
|
||||||
@@ -75,16 +75,8 @@ async fn build_entry_env_map(
|
|||||||
} else {
|
} else {
|
||||||
(None, key_ref)
|
(None, key_ref)
|
||||||
};
|
};
|
||||||
let key_entries = fetch_entries(
|
let key_entries =
|
||||||
pool,
|
fetch_entries(pool, ref_folder, None, Some(ref_name), &[], None, user_id).await?;
|
||||||
ref_folder,
|
|
||||||
Some("key"),
|
|
||||||
Some(ref_name),
|
|
||||||
&[],
|
|
||||||
None,
|
|
||||||
user_id,
|
|
||||||
)
|
|
||||||
.await?;
|
|
||||||
|
|
||||||
if key_entries.len() > 1 {
|
if key_entries.len() > 1 {
|
||||||
anyhow::bail!(
|
anyhow::bail!(
|
||||||
@@ -105,7 +97,7 @@ async fn build_entry_env_map(
|
|||||||
let key_var = format!(
|
let key_var = format!(
|
||||||
"{}_{}",
|
"{}_{}",
|
||||||
key_prefix,
|
key_prefix,
|
||||||
f.field_name.to_uppercase().replace(['-', '.'], "_")
|
f.name.to_uppercase().replace(['-', '.'], "_")
|
||||||
);
|
);
|
||||||
map.insert(key_var, json_to_env_string(&decrypted));
|
map.insert(key_var, json_to_env_string(&decrypted));
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -55,7 +55,7 @@ pub async fn export(
|
|||||||
let mut map = BTreeMap::new();
|
let mut map = BTreeMap::new();
|
||||||
for f in fields {
|
for f in fields {
|
||||||
let decrypted = crypto::decrypt_json(mk, &f.encrypted)?;
|
let decrypted = crypto::decrypt_json(mk, &f.encrypted)?;
|
||||||
map.insert(f.field_name.clone(), decrypted);
|
map.insert(f.name.clone(), decrypted);
|
||||||
}
|
}
|
||||||
Some(map)
|
Some(map)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -25,7 +25,7 @@ pub async fn get_secret_field(
|
|||||||
|
|
||||||
let field = fields
|
let field = fields
|
||||||
.iter()
|
.iter()
|
||||||
.find(|f| f.field_name == field_name)
|
.find(|f| f.name == field_name)
|
||||||
.ok_or_else(|| anyhow::anyhow!("Secret field '{}' not found", field_name))?;
|
.ok_or_else(|| anyhow::anyhow!("Secret field '{}' not found", field_name))?;
|
||||||
|
|
||||||
crypto::decrypt_json(master_key, &field.encrypted)
|
crypto::decrypt_json(master_key, &field.encrypted)
|
||||||
@@ -49,7 +49,7 @@ pub async fn get_all_secrets(
|
|||||||
let mut map = HashMap::new();
|
let mut map = HashMap::new();
|
||||||
for f in fields {
|
for f in fields {
|
||||||
let decrypted = crypto::decrypt_json(master_key, &f.encrypted)?;
|
let decrypted = crypto::decrypt_json(master_key, &f.encrypted)?;
|
||||||
map.insert(f.field_name.clone(), decrypted);
|
map.insert(f.name.clone(), decrypted);
|
||||||
}
|
}
|
||||||
Ok(map)
|
Ok(map)
|
||||||
}
|
}
|
||||||
@@ -72,7 +72,7 @@ pub async fn get_secret_field_by_id(
|
|||||||
|
|
||||||
let field = fields
|
let field = fields
|
||||||
.iter()
|
.iter()
|
||||||
.find(|f| f.field_name == field_name)
|
.find(|f| f.name == field_name)
|
||||||
.ok_or_else(|| anyhow::anyhow!("Secret field '{}' not found", field_name))?;
|
.ok_or_else(|| anyhow::anyhow!("Secret field '{}' not found", field_name))?;
|
||||||
|
|
||||||
crypto::decrypt_json(master_key, &field.encrypted)
|
crypto::decrypt_json(master_key, &field.encrypted)
|
||||||
@@ -98,7 +98,7 @@ pub async fn get_all_secrets_by_id(
|
|||||||
let mut map = HashMap::new();
|
let mut map = HashMap::new();
|
||||||
for f in fields {
|
for f in fields {
|
||||||
let decrypted = crypto::decrypt_json(master_key, &f.encrypted)?;
|
let decrypted = crypto::decrypt_json(master_key, &f.encrypted)?;
|
||||||
map.insert(f.field_name.clone(), decrypted);
|
map.insert(f.name.clone(), decrypted);
|
||||||
}
|
}
|
||||||
Ok(map)
|
Ok(map)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -85,6 +85,7 @@ pub async fn run(
|
|||||||
tags: &entry.tags,
|
tags: &entry.tags,
|
||||||
meta_entries: &meta_entries,
|
meta_entries: &meta_entries,
|
||||||
secret_entries: &secret_entries,
|
secret_entries: &secret_entries,
|
||||||
|
link_secret_names: &[],
|
||||||
user_id: params.user_id,
|
user_id: params.user_id,
|
||||||
},
|
},
|
||||||
master_key,
|
master_key,
|
||||||
|
|||||||
@@ -3,7 +3,6 @@ use serde_json::Value;
|
|||||||
use sqlx::PgPool;
|
use sqlx::PgPool;
|
||||||
use uuid::Uuid;
|
use uuid::Uuid;
|
||||||
|
|
||||||
use crate::crypto;
|
|
||||||
use crate::db;
|
use crate::db;
|
||||||
|
|
||||||
#[derive(Debug, serde::Serialize)]
|
#[derive(Debug, serde::Serialize)]
|
||||||
@@ -27,7 +26,6 @@ pub async fn run(
|
|||||||
) -> Result<RollbackResult> {
|
) -> Result<RollbackResult> {
|
||||||
#[derive(sqlx::FromRow)]
|
#[derive(sqlx::FromRow)]
|
||||||
struct EntryHistoryRow {
|
struct EntryHistoryRow {
|
||||||
entry_id: Uuid,
|
|
||||||
folder: String,
|
folder: String,
|
||||||
#[sqlx(rename = "type")]
|
#[sqlx(rename = "type")]
|
||||||
entry_type: String,
|
entry_type: String,
|
||||||
@@ -122,7 +120,7 @@ pub async fn run(
|
|||||||
|
|
||||||
let snap: Option<EntryHistoryRow> = if let Some(ver) = to_version {
|
let snap: Option<EntryHistoryRow> = if let Some(ver) = to_version {
|
||||||
sqlx::query_as(
|
sqlx::query_as(
|
||||||
"SELECT entry_id, folder, type, version, action, tags, metadata \
|
"SELECT folder, type, version, action, tags, metadata \
|
||||||
FROM entries_history \
|
FROM entries_history \
|
||||||
WHERE entry_id = $1 AND version = $2 ORDER BY id DESC LIMIT 1",
|
WHERE entry_id = $1 AND version = $2 ORDER BY id DESC LIMIT 1",
|
||||||
)
|
)
|
||||||
@@ -132,7 +130,7 @@ pub async fn run(
|
|||||||
.await?
|
.await?
|
||||||
} else {
|
} else {
|
||||||
sqlx::query_as(
|
sqlx::query_as(
|
||||||
"SELECT entry_id, folder, type, version, action, tags, metadata \
|
"SELECT folder, type, version, action, tags, metadata \
|
||||||
FROM entries_history \
|
FROM entries_history \
|
||||||
WHERE entry_id = $1 ORDER BY id DESC LIMIT 1",
|
WHERE entry_id = $1 ORDER BY id DESC LIMIT 1",
|
||||||
)
|
)
|
||||||
@@ -151,33 +149,7 @@ pub async fn run(
|
|||||||
)
|
)
|
||||||
})?;
|
})?;
|
||||||
|
|
||||||
#[derive(sqlx::FromRow)]
|
let _ = master_key;
|
||||||
struct SecretHistoryRow {
|
|
||||||
field_name: String,
|
|
||||||
encrypted: Vec<u8>,
|
|
||||||
action: String,
|
|
||||||
}
|
|
||||||
|
|
||||||
let field_snaps: Vec<SecretHistoryRow> = sqlx::query_as(
|
|
||||||
"SELECT field_name, encrypted, action FROM secrets_history \
|
|
||||||
WHERE entry_id = $1 AND entry_version = $2 ORDER BY field_name",
|
|
||||||
)
|
|
||||||
.bind(snap.entry_id)
|
|
||||||
.bind(snap.version)
|
|
||||||
.fetch_all(pool)
|
|
||||||
.await?;
|
|
||||||
|
|
||||||
for f in &field_snaps {
|
|
||||||
if f.action != "delete" && !f.encrypted.is_empty() {
|
|
||||||
crypto::decrypt_json(master_key, &f.encrypted).map_err(|e| {
|
|
||||||
anyhow::anyhow!(
|
|
||||||
"Cannot decrypt snapshot for field '{}': {}",
|
|
||||||
f.field_name,
|
|
||||||
e
|
|
||||||
)
|
|
||||||
})?;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
let mut tx = pool.begin().await?;
|
let mut tx = pool.begin().await?;
|
||||||
|
|
||||||
@@ -226,23 +198,25 @@ pub async fn run(
|
|||||||
#[derive(sqlx::FromRow)]
|
#[derive(sqlx::FromRow)]
|
||||||
struct LiveField {
|
struct LiveField {
|
||||||
id: Uuid,
|
id: Uuid,
|
||||||
field_name: String,
|
name: String,
|
||||||
encrypted: Vec<u8>,
|
encrypted: Vec<u8>,
|
||||||
}
|
}
|
||||||
let live_fields: Vec<LiveField> =
|
let live_fields: Vec<LiveField> = sqlx::query_as(
|
||||||
sqlx::query_as("SELECT id, field_name, encrypted FROM secrets WHERE entry_id = $1")
|
"SELECT s.id, s.name, s.encrypted \
|
||||||
.bind(lr.id)
|
FROM entry_secrets es \
|
||||||
.fetch_all(&mut *tx)
|
JOIN secrets s ON s.id = es.secret_id \
|
||||||
.await?;
|
WHERE es.entry_id = $1",
|
||||||
|
)
|
||||||
|
.bind(lr.id)
|
||||||
|
.fetch_all(&mut *tx)
|
||||||
|
.await?;
|
||||||
|
|
||||||
for f in &live_fields {
|
for f in &live_fields {
|
||||||
if let Err(e) = db::snapshot_secret_history(
|
if let Err(e) = db::snapshot_secret_history(
|
||||||
&mut tx,
|
&mut tx,
|
||||||
db::SecretSnapshotParams {
|
db::SecretSnapshotParams {
|
||||||
entry_id: lr.id,
|
|
||||||
secret_id: f.id,
|
secret_id: f.id,
|
||||||
entry_version: lr.version,
|
name: &f.name,
|
||||||
field_name: &f.field_name,
|
|
||||||
encrypted: &f.encrypted,
|
encrypted: &f.encrypted,
|
||||||
action: "rollback",
|
action: "rollback",
|
||||||
},
|
},
|
||||||
@@ -297,22 +271,9 @@ pub async fn run(
|
|||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
sqlx::query("DELETE FROM secrets WHERE entry_id = $1")
|
// In N:N mode, rollback restores entry metadata/tags only.
|
||||||
.bind(live_entry_id)
|
// Secret snapshots are kept for audit but secret linkage/content is not rewritten here.
|
||||||
.execute(&mut *tx)
|
let _ = live_entry_id;
|
||||||
.await?;
|
|
||||||
|
|
||||||
for f in &field_snaps {
|
|
||||||
if f.action == "delete" {
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
sqlx::query("INSERT INTO secrets (entry_id, field_name, encrypted) VALUES ($1, $2, $3)")
|
|
||||||
.bind(live_entry_id)
|
|
||||||
.bind(&f.field_name)
|
|
||||||
.bind(&f.encrypted)
|
|
||||||
.execute(&mut *tx)
|
|
||||||
.await?;
|
|
||||||
}
|
|
||||||
|
|
||||||
crate::audit::log_tx(
|
crate::audit::log_tx(
|
||||||
&mut tx,
|
&mut tx,
|
||||||
|
|||||||
@@ -210,8 +210,12 @@ pub async fn fetch_secret_schemas(
|
|||||||
if entry_ids.is_empty() {
|
if entry_ids.is_empty() {
|
||||||
return Ok(HashMap::new());
|
return Ok(HashMap::new());
|
||||||
}
|
}
|
||||||
let fields: Vec<SecretField> = sqlx::query_as(
|
let fields: Vec<EntrySecretRow> = sqlx::query_as(
|
||||||
"SELECT * FROM secrets WHERE entry_id = ANY($1) ORDER BY entry_id, field_name",
|
"SELECT es.entry_id, s.id, s.user_id, s.name, s.type, s.encrypted, s.version, s.created_at, s.updated_at \
|
||||||
|
FROM entry_secrets es \
|
||||||
|
JOIN secrets s ON s.id = es.secret_id \
|
||||||
|
WHERE es.entry_id = ANY($1) \
|
||||||
|
ORDER BY es.entry_id, es.sort_order, s.name",
|
||||||
)
|
)
|
||||||
.bind(entry_ids)
|
.bind(entry_ids)
|
||||||
.fetch_all(pool)
|
.fetch_all(pool)
|
||||||
@@ -219,7 +223,8 @@ pub async fn fetch_secret_schemas(
|
|||||||
|
|
||||||
let mut map: HashMap<Uuid, Vec<SecretField>> = HashMap::new();
|
let mut map: HashMap<Uuid, Vec<SecretField>> = HashMap::new();
|
||||||
for f in fields {
|
for f in fields {
|
||||||
map.entry(f.entry_id).or_default().push(f);
|
let entry_id = f.entry_id;
|
||||||
|
map.entry(entry_id).or_default().push(f.secret());
|
||||||
}
|
}
|
||||||
Ok(map)
|
Ok(map)
|
||||||
}
|
}
|
||||||
@@ -232,8 +237,12 @@ pub async fn fetch_secrets_for_entries(
|
|||||||
if entry_ids.is_empty() {
|
if entry_ids.is_empty() {
|
||||||
return Ok(HashMap::new());
|
return Ok(HashMap::new());
|
||||||
}
|
}
|
||||||
let fields: Vec<SecretField> = sqlx::query_as(
|
let fields: Vec<EntrySecretRow> = sqlx::query_as(
|
||||||
"SELECT * FROM secrets WHERE entry_id = ANY($1) ORDER BY entry_id, field_name",
|
"SELECT es.entry_id, s.id, s.user_id, s.name, s.type, s.encrypted, s.version, s.created_at, s.updated_at \
|
||||||
|
FROM entry_secrets es \
|
||||||
|
JOIN secrets s ON s.id = es.secret_id \
|
||||||
|
WHERE es.entry_id = ANY($1) \
|
||||||
|
ORDER BY es.entry_id, es.sort_order, s.name",
|
||||||
)
|
)
|
||||||
.bind(entry_ids)
|
.bind(entry_ids)
|
||||||
.fetch_all(pool)
|
.fetch_all(pool)
|
||||||
@@ -241,7 +250,8 @@ pub async fn fetch_secrets_for_entries(
|
|||||||
|
|
||||||
let mut map: HashMap<Uuid, Vec<SecretField>> = HashMap::new();
|
let mut map: HashMap<Uuid, Vec<SecretField>> = HashMap::new();
|
||||||
for f in fields {
|
for f in fields {
|
||||||
map.entry(f.entry_id).or_default().push(f);
|
let entry_id = f.entry_id;
|
||||||
|
map.entry(entry_id).or_default().push(f.secret());
|
||||||
}
|
}
|
||||||
Ok(map)
|
Ok(map)
|
||||||
}
|
}
|
||||||
@@ -345,3 +355,32 @@ impl From<EntryRaw> for Entry {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[derive(sqlx::FromRow)]
|
||||||
|
struct EntrySecretRow {
|
||||||
|
entry_id: Uuid,
|
||||||
|
id: Uuid,
|
||||||
|
user_id: Option<Uuid>,
|
||||||
|
name: String,
|
||||||
|
#[sqlx(rename = "type")]
|
||||||
|
secret_type: String,
|
||||||
|
encrypted: Vec<u8>,
|
||||||
|
version: i64,
|
||||||
|
created_at: chrono::DateTime<chrono::Utc>,
|
||||||
|
updated_at: chrono::DateTime<chrono::Utc>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl EntrySecretRow {
|
||||||
|
fn secret(self) -> SecretField {
|
||||||
|
SecretField {
|
||||||
|
id: self.id,
|
||||||
|
user_id: self.user_id,
|
||||||
|
name: self.name,
|
||||||
|
secret_type: self.secret_type,
|
||||||
|
encrypted: self.encrypted,
|
||||||
|
version: self.version,
|
||||||
|
created_at: self.created_at,
|
||||||
|
updated_at: self.updated_at,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|||||||
@@ -7,8 +7,8 @@ use crate::crypto;
|
|||||||
use crate::db;
|
use crate::db;
|
||||||
use crate::models::{EntryRow, EntryWriteRow};
|
use crate::models::{EntryRow, EntryWriteRow};
|
||||||
use crate::service::add::{
|
use crate::service::add::{
|
||||||
collect_field_paths, collect_key_paths, flatten_json_fields, insert_path, parse_key_path,
|
collect_field_paths, collect_key_paths, flatten_json_fields, infer_secret_type, insert_path,
|
||||||
parse_kv, remove_path,
|
parse_key_path, parse_kv, remove_path,
|
||||||
};
|
};
|
||||||
|
|
||||||
#[derive(Debug, serde::Serialize)]
|
#[derive(Debug, serde::Serialize)]
|
||||||
@@ -173,8 +173,6 @@ pub async fn run(
|
|||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
let new_version = row.version + 1;
|
|
||||||
|
|
||||||
for entry in params.secret_entries {
|
for entry in params.secret_entries {
|
||||||
let (path, field_value) = parse_kv(entry)?;
|
let (path, field_value) = parse_kv(entry)?;
|
||||||
let flat = flatten_json_fields("", &{
|
let flat = flatten_json_fields("", &{
|
||||||
@@ -192,7 +190,10 @@ pub async fn run(
|
|||||||
encrypted: Vec<u8>,
|
encrypted: Vec<u8>,
|
||||||
}
|
}
|
||||||
let ef: Option<ExistingField> = sqlx::query_as(
|
let ef: Option<ExistingField> = sqlx::query_as(
|
||||||
"SELECT id, encrypted FROM secrets WHERE entry_id = $1 AND field_name = $2",
|
"SELECT s.id, s.encrypted \
|
||||||
|
FROM entry_secrets es \
|
||||||
|
JOIN secrets s ON s.id = es.secret_id \
|
||||||
|
WHERE es.entry_id = $1 AND s.name = $2",
|
||||||
)
|
)
|
||||||
.bind(row.id)
|
.bind(row.id)
|
||||||
.bind(field_name)
|
.bind(field_name)
|
||||||
@@ -203,10 +204,8 @@ pub async fn run(
|
|||||||
&& let Err(e) = db::snapshot_secret_history(
|
&& let Err(e) = db::snapshot_secret_history(
|
||||||
&mut tx,
|
&mut tx,
|
||||||
db::SecretSnapshotParams {
|
db::SecretSnapshotParams {
|
||||||
entry_id: row.id,
|
|
||||||
secret_id: ef.id,
|
secret_id: ef.id,
|
||||||
entry_version: row.version,
|
name: field_name,
|
||||||
field_name,
|
|
||||||
encrypted: &ef.encrypted,
|
encrypted: &ef.encrypted,
|
||||||
action: "update",
|
action: "update",
|
||||||
},
|
},
|
||||||
@@ -216,16 +215,30 @@ pub async fn run(
|
|||||||
tracing::warn!(error = %e, "failed to snapshot secret field history");
|
tracing::warn!(error = %e, "failed to snapshot secret field history");
|
||||||
}
|
}
|
||||||
|
|
||||||
sqlx::query(
|
if let Some(ef) = ef {
|
||||||
"INSERT INTO secrets (entry_id, field_name, encrypted) VALUES ($1, $2, $3) \
|
sqlx::query(
|
||||||
ON CONFLICT (entry_id, field_name) DO UPDATE SET \
|
"UPDATE secrets SET encrypted = $1, version = version + 1, updated_at = NOW() WHERE id = $2",
|
||||||
encrypted = EXCLUDED.encrypted, version = secrets.version + 1, updated_at = NOW()",
|
)
|
||||||
)
|
.bind(&encrypted)
|
||||||
.bind(row.id)
|
.bind(ef.id)
|
||||||
.bind(field_name)
|
.execute(&mut *tx)
|
||||||
.bind(&encrypted)
|
.await?;
|
||||||
.execute(&mut *tx)
|
} else {
|
||||||
.await?;
|
let secret_id: Uuid = sqlx::query_scalar(
|
||||||
|
"INSERT INTO secrets (user_id, name, type, encrypted) VALUES ($1, $2, $3, $4) RETURNING id",
|
||||||
|
)
|
||||||
|
.bind(params.user_id)
|
||||||
|
.bind(field_name)
|
||||||
|
.bind(infer_secret_type(field_name))
|
||||||
|
.bind(&encrypted)
|
||||||
|
.fetch_one(&mut *tx)
|
||||||
|
.await?;
|
||||||
|
sqlx::query("INSERT INTO entry_secrets (entry_id, secret_id) VALUES ($1, $2)")
|
||||||
|
.bind(row.id)
|
||||||
|
.bind(secret_id)
|
||||||
|
.execute(&mut *tx)
|
||||||
|
.await?;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -239,7 +252,10 @@ pub async fn run(
|
|||||||
encrypted: Vec<u8>,
|
encrypted: Vec<u8>,
|
||||||
}
|
}
|
||||||
let field: Option<FieldToDelete> = sqlx::query_as(
|
let field: Option<FieldToDelete> = sqlx::query_as(
|
||||||
"SELECT id, encrypted FROM secrets WHERE entry_id = $1 AND field_name = $2",
|
"SELECT s.id, s.encrypted \
|
||||||
|
FROM entry_secrets es \
|
||||||
|
JOIN secrets s ON s.id = es.secret_id \
|
||||||
|
WHERE es.entry_id = $1 AND s.name = $2",
|
||||||
)
|
)
|
||||||
.bind(row.id)
|
.bind(row.id)
|
||||||
.bind(&field_name)
|
.bind(&field_name)
|
||||||
@@ -250,10 +266,8 @@ pub async fn run(
|
|||||||
if let Err(e) = db::snapshot_secret_history(
|
if let Err(e) = db::snapshot_secret_history(
|
||||||
&mut tx,
|
&mut tx,
|
||||||
db::SecretSnapshotParams {
|
db::SecretSnapshotParams {
|
||||||
entry_id: row.id,
|
|
||||||
secret_id: f.id,
|
secret_id: f.id,
|
||||||
entry_version: new_version,
|
name: &field_name,
|
||||||
field_name: &field_name,
|
|
||||||
encrypted: &f.encrypted,
|
encrypted: &f.encrypted,
|
||||||
action: "delete",
|
action: "delete",
|
||||||
},
|
},
|
||||||
@@ -262,10 +276,19 @@ pub async fn run(
|
|||||||
{
|
{
|
||||||
tracing::warn!(error = %e, "failed to snapshot secret field history before delete");
|
tracing::warn!(error = %e, "failed to snapshot secret field history before delete");
|
||||||
}
|
}
|
||||||
sqlx::query("DELETE FROM secrets WHERE id = $1")
|
sqlx::query("DELETE FROM entry_secrets WHERE entry_id = $1 AND secret_id = $2")
|
||||||
|
.bind(row.id)
|
||||||
.bind(f.id)
|
.bind(f.id)
|
||||||
.execute(&mut *tx)
|
.execute(&mut *tx)
|
||||||
.await?;
|
.await?;
|
||||||
|
sqlx::query(
|
||||||
|
"DELETE FROM secrets s \
|
||||||
|
WHERE s.id = $1 \
|
||||||
|
AND NOT EXISTS (SELECT 1 FROM entry_secrets es WHERE es.secret_id = s.id)",
|
||||||
|
)
|
||||||
|
.bind(f.id)
|
||||||
|
.execute(&mut *tx)
|
||||||
|
.await?;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
[package]
|
[package]
|
||||||
name = "secrets-mcp"
|
name = "secrets-mcp"
|
||||||
version = "0.3.6"
|
version = "0.3.9"
|
||||||
edition.workspace = true
|
edition.workspace = true
|
||||||
|
|
||||||
[[bin]]
|
[[bin]]
|
||||||
|
|||||||
@@ -40,6 +40,14 @@ fn load_env_var(name: &str) -> Option<String> {
|
|||||||
std::env::var(name).ok().filter(|s| !s.is_empty())
|
std::env::var(name).ok().filter(|s| !s.is_empty())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Pretty-print bind address in logs (`127.0.0.1` → `localhost`); actual socket bind unchanged.
|
||||||
|
fn listen_addr_log_display(bind_addr: &str) -> String {
|
||||||
|
bind_addr
|
||||||
|
.strip_prefix("127.0.0.1:")
|
||||||
|
.map(|port| format!("localhost:{port}"))
|
||||||
|
.unwrap_or_else(|| bind_addr.to_string())
|
||||||
|
}
|
||||||
|
|
||||||
fn load_oauth_config(prefix: &str, base_url: &str, path: &str) -> Option<OAuthConfig> {
|
fn load_oauth_config(prefix: &str, base_url: &str, path: &str) -> Option<OAuthConfig> {
|
||||||
let client_id = load_env_var(&format!("{}_CLIENT_ID", prefix))?;
|
let client_id = load_env_var(&format!("{}_CLIENT_ID", prefix))?;
|
||||||
let client_secret = load_env_var(&format!("{}_CLIENT_SECRET", prefix))?;
|
let client_secret = load_env_var(&format!("{}_CLIENT_SECRET", prefix))?;
|
||||||
@@ -168,7 +176,10 @@ async fn main() -> Result<()> {
|
|||||||
.await
|
.await
|
||||||
.with_context(|| format!("failed to bind to {}", bind_addr))?;
|
.with_context(|| format!("failed to bind to {}", bind_addr))?;
|
||||||
|
|
||||||
tracing::info!("Secrets MCP Server listening on http://{}", bind_addr);
|
tracing::info!(
|
||||||
|
"Secrets MCP Server listening on http://{}",
|
||||||
|
listen_addr_log_display(&bind_addr)
|
||||||
|
);
|
||||||
tracing::info!("MCP endpoint: {}/mcp", base_url);
|
tracing::info!("MCP endpoint: {}/mcp", base_url);
|
||||||
|
|
||||||
axum::serve(
|
axum::serve(
|
||||||
|
|||||||
@@ -225,12 +225,18 @@ struct AddInput {
|
|||||||
description = "Metadata fields as a JSON object {\"key\": value}. Merged with 'meta' if both provided."
|
description = "Metadata fields as a JSON object {\"key\": value}. Merged with 'meta' if both provided."
|
||||||
)]
|
)]
|
||||||
meta_obj: Option<Map<String, Value>>,
|
meta_obj: Option<Map<String, Value>>,
|
||||||
#[schemars(description = "Secret fields as 'key=value' strings")]
|
#[schemars(
|
||||||
|
description = "Secret fields as 'key=value' strings. Reminder: non-sensitive endpoint/address fields should go to metadata.address instead of secrets."
|
||||||
|
)]
|
||||||
secrets: Option<Vec<String>>,
|
secrets: Option<Vec<String>>,
|
||||||
#[schemars(
|
#[schemars(
|
||||||
description = "Secret fields as a JSON object {\"key\": \"value\"}. Merged with 'secrets' if both provided."
|
description = "Secret fields as a JSON object {\"key\": \"value\"}. Merged with 'secrets' if both provided. Reminder: non-sensitive endpoint/address fields should go to metadata.address."
|
||||||
)]
|
)]
|
||||||
secrets_obj: Option<Map<String, Value>>,
|
secrets_obj: Option<Map<String, Value>>,
|
||||||
|
#[schemars(
|
||||||
|
description = "Link existing secrets by secret name. Names must resolve uniquely under current user."
|
||||||
|
)]
|
||||||
|
link_secret_names: Option<Vec<String>>,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Debug, Deserialize, JsonSchema)]
|
#[derive(Debug, Deserialize, JsonSchema)]
|
||||||
@@ -259,10 +265,12 @@ struct UpdateInput {
|
|||||||
meta_obj: Option<Map<String, Value>>,
|
meta_obj: Option<Map<String, Value>>,
|
||||||
#[schemars(description = "Metadata field keys to remove")]
|
#[schemars(description = "Metadata field keys to remove")]
|
||||||
remove_meta: Option<Vec<String>>,
|
remove_meta: Option<Vec<String>>,
|
||||||
#[schemars(description = "Secret fields to update/add as 'key=value' strings")]
|
#[schemars(
|
||||||
|
description = "Secret fields to update/add as 'key=value' strings. Reminder: non-sensitive endpoint/address fields should go to metadata.address instead of secrets."
|
||||||
|
)]
|
||||||
secrets: Option<Vec<String>>,
|
secrets: Option<Vec<String>>,
|
||||||
#[schemars(
|
#[schemars(
|
||||||
description = "Secret fields to update/add as a JSON object {\"key\": \"value\"}. Merged with 'secrets' if both provided."
|
description = "Secret fields to update/add as a JSON object {\"key\": \"value\"}. Merged with 'secrets' if both provided. Reminder: non-sensitive endpoint/address fields should go to metadata.address."
|
||||||
)]
|
)]
|
||||||
secrets_obj: Option<Map<String, Value>>,
|
secrets_obj: Option<Map<String, Value>>,
|
||||||
#[schemars(description = "Secret field keys to remove")]
|
#[schemars(description = "Secret field keys to remove")]
|
||||||
@@ -429,10 +437,20 @@ impl SecretsService {
|
|||||||
.entries
|
.entries
|
||||||
.iter()
|
.iter()
|
||||||
.map(|e| {
|
.map(|e| {
|
||||||
let schema: Vec<&str> = result
|
let schema: Vec<serde_json::Value> = result
|
||||||
.secret_schemas
|
.secret_schemas
|
||||||
.get(&e.id)
|
.get(&e.id)
|
||||||
.map(|f| f.iter().map(|s| s.field_name.as_str()).collect())
|
.map(|f| {
|
||||||
|
f.iter()
|
||||||
|
.map(|s| {
|
||||||
|
serde_json::json!({
|
||||||
|
"id": s.id,
|
||||||
|
"name": s.name,
|
||||||
|
"type": s.secret_type,
|
||||||
|
})
|
||||||
|
})
|
||||||
|
.collect()
|
||||||
|
})
|
||||||
.unwrap_or_default();
|
.unwrap_or_default();
|
||||||
serde_json::json!({
|
serde_json::json!({
|
||||||
"id": e.id,
|
"id": e.id,
|
||||||
@@ -517,10 +535,20 @@ impl SecretsService {
|
|||||||
"updated_at": e.updated_at.format("%Y-%m-%dT%H:%M:%SZ").to_string(),
|
"updated_at": e.updated_at.format("%Y-%m-%dT%H:%M:%SZ").to_string(),
|
||||||
})
|
})
|
||||||
} else {
|
} else {
|
||||||
let schema: Vec<&str> = result
|
let schema: Vec<serde_json::Value> = result
|
||||||
.secret_schemas
|
.secret_schemas
|
||||||
.get(&e.id)
|
.get(&e.id)
|
||||||
.map(|f| f.iter().map(|s| s.field_name.as_str()).collect())
|
.map(|f| {
|
||||||
|
f.iter()
|
||||||
|
.map(|s| {
|
||||||
|
serde_json::json!({
|
||||||
|
"id": s.id,
|
||||||
|
"name": s.name,
|
||||||
|
"type": s.secret_type,
|
||||||
|
})
|
||||||
|
})
|
||||||
|
.collect()
|
||||||
|
})
|
||||||
.unwrap_or_default();
|
.unwrap_or_default();
|
||||||
serde_json::json!({
|
serde_json::json!({
|
||||||
"id": e.id,
|
"id": e.id,
|
||||||
@@ -639,6 +667,7 @@ impl SecretsService {
|
|||||||
if let Some(obj) = input.secrets_obj {
|
if let Some(obj) = input.secrets_obj {
|
||||||
secrets.extend(map_to_kv_strings(obj));
|
secrets.extend(map_to_kv_strings(obj));
|
||||||
}
|
}
|
||||||
|
let link_secret_names = input.link_secret_names.unwrap_or_default();
|
||||||
let folder = input.folder.as_deref().unwrap_or("");
|
let folder = input.folder.as_deref().unwrap_or("");
|
||||||
let entry_type = input.entry_type.as_deref().unwrap_or("");
|
let entry_type = input.entry_type.as_deref().unwrap_or("");
|
||||||
let notes = input.notes.as_deref().unwrap_or("");
|
let notes = input.notes.as_deref().unwrap_or("");
|
||||||
@@ -653,6 +682,7 @@ impl SecretsService {
|
|||||||
tags: &tags,
|
tags: &tags,
|
||||||
meta_entries: &meta,
|
meta_entries: &meta,
|
||||||
secret_entries: &secrets,
|
secret_entries: &secrets,
|
||||||
|
link_secret_names: &link_secret_names,
|
||||||
user_id: Some(user_id),
|
user_id: Some(user_id),
|
||||||
},
|
},
|
||||||
&user_key,
|
&user_key,
|
||||||
|
|||||||
@@ -21,7 +21,7 @@ use secrets_core::service::{
|
|||||||
api_key::{ensure_api_key, regenerate_api_key},
|
api_key::{ensure_api_key, regenerate_api_key},
|
||||||
audit_log::list_for_user,
|
audit_log::list_for_user,
|
||||||
delete::delete_by_id,
|
delete::delete_by_id,
|
||||||
search::{SearchParams, count_entries, list_entries},
|
search::{SearchParams, count_entries, fetch_secret_schemas, list_entries},
|
||||||
update::{UpdateEntryFieldsByIdParams, update_fields_by_id},
|
update::{UpdateEntryFieldsByIdParams, update_fields_by_id},
|
||||||
user::{
|
user::{
|
||||||
OAuthProfile, bind_oauth_account, find_or_create_user, get_user_by_id,
|
OAuthProfile, bind_oauth_account, find_or_create_user, get_user_by_id,
|
||||||
@@ -105,10 +105,17 @@ struct EntryListItemView {
|
|||||||
notes: String,
|
notes: String,
|
||||||
tags: String,
|
tags: String,
|
||||||
metadata: String,
|
metadata: String,
|
||||||
|
secrets: Vec<SecretSummaryView>,
|
||||||
/// RFC3339 UTC for `<time datetime>`; localized in entries.html.
|
/// RFC3339 UTC for `<time datetime>`; localized in entries.html.
|
||||||
updated_at_iso: String,
|
updated_at_iso: String,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
struct SecretSummaryView {
|
||||||
|
id: String,
|
||||||
|
name: String,
|
||||||
|
secret_type: String,
|
||||||
|
}
|
||||||
|
|
||||||
/// Cap for HTML list (avoids loading unbounded rows into memory).
|
/// Cap for HTML list (avoids loading unbounded rows into memory).
|
||||||
const ENTRIES_PAGE_LIMIT: u32 = 5_000;
|
const ENTRIES_PAGE_LIMIT: u32 = 5_000;
|
||||||
|
|
||||||
@@ -207,6 +214,10 @@ pub fn web_router() -> Router<AppState> {
|
|||||||
"/api/entries/{id}",
|
"/api/entries/{id}",
|
||||||
patch(api_entry_patch).delete(api_entry_delete),
|
patch(api_entry_patch).delete(api_entry_delete),
|
||||||
)
|
)
|
||||||
|
.route(
|
||||||
|
"/api/entries/{entry_id}/secrets/{secret_id}",
|
||||||
|
axum::routing::delete(api_entry_secret_unlink),
|
||||||
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn text_asset_response(content: &'static str, content_type: &'static str) -> Response {
|
fn text_asset_response(content: &'static str, content_type: &'static str) -> Response {
|
||||||
@@ -577,6 +588,13 @@ async fn entries_page(
|
|||||||
StatusCode::INTERNAL_SERVER_ERROR
|
StatusCode::INTERNAL_SERVER_ERROR
|
||||||
})?;
|
})?;
|
||||||
let shown_count = rows.len();
|
let shown_count = rows.len();
|
||||||
|
let entry_ids: Vec<Uuid> = rows.iter().map(|e| e.id).collect();
|
||||||
|
let secret_schemas = fetch_secret_schemas(&state.pool, &entry_ids)
|
||||||
|
.await
|
||||||
|
.map_err(|e| {
|
||||||
|
tracing::error!(error = %e, "failed to load secret schema list for web");
|
||||||
|
StatusCode::INTERNAL_SERVER_ERROR
|
||||||
|
})?;
|
||||||
|
|
||||||
let entries = rows
|
let entries = rows
|
||||||
.into_iter()
|
.into_iter()
|
||||||
@@ -589,6 +607,19 @@ async fn entries_page(
|
|||||||
tags: e.tags.join(", "),
|
tags: e.tags.join(", "),
|
||||||
metadata: serde_json::to_string_pretty(&e.metadata)
|
metadata: serde_json::to_string_pretty(&e.metadata)
|
||||||
.unwrap_or_else(|_| "{}".to_string()),
|
.unwrap_or_else(|_| "{}".to_string()),
|
||||||
|
secrets: secret_schemas
|
||||||
|
.get(&e.id)
|
||||||
|
.map(|fields| {
|
||||||
|
fields
|
||||||
|
.iter()
|
||||||
|
.map(|f| SecretSummaryView {
|
||||||
|
id: f.id.to_string(),
|
||||||
|
name: f.name.clone(),
|
||||||
|
secret_type: f.secret_type.clone(),
|
||||||
|
})
|
||||||
|
.collect()
|
||||||
|
})
|
||||||
|
.unwrap_or_default(),
|
||||||
updated_at_iso: e.updated_at.to_rfc3339_opts(SecondsFormat::Secs, true),
|
updated_at_iso: e.updated_at.to_rfc3339_opts(SecondsFormat::Secs, true),
|
||||||
})
|
})
|
||||||
.collect();
|
.collect();
|
||||||
@@ -990,11 +1021,112 @@ async fn api_entry_delete(
|
|||||||
.await
|
.await
|
||||||
.ok_or((StatusCode::UNAUTHORIZED, Json(json!({ "error": "未登录" }))))?;
|
.ok_or((StatusCode::UNAUTHORIZED, Json(json!({ "error": "未登录" }))))?;
|
||||||
|
|
||||||
delete_by_id(&state.pool, entry_id, user_id)
|
let result = delete_by_id(&state.pool, entry_id, user_id)
|
||||||
.await
|
.await
|
||||||
.map_err(map_entry_mutation_err)?;
|
.map_err(map_entry_mutation_err)?;
|
||||||
|
|
||||||
Ok(Json(json!({ "ok": true })))
|
Ok(Json(json!({
|
||||||
|
"ok": true,
|
||||||
|
"migrated": result.migrated,
|
||||||
|
})))
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn api_entry_secret_unlink(
|
||||||
|
State(state): State<AppState>,
|
||||||
|
session: Session,
|
||||||
|
Path((entry_id, secret_id)): Path<(Uuid, Uuid)>,
|
||||||
|
) -> Result<Json<serde_json::Value>, EntryApiError> {
|
||||||
|
#[derive(sqlx::FromRow)]
|
||||||
|
struct EntryAuditRow {
|
||||||
|
folder: String,
|
||||||
|
#[sqlx(rename = "type")]
|
||||||
|
entry_type: String,
|
||||||
|
name: String,
|
||||||
|
}
|
||||||
|
|
||||||
|
let user_id = current_user_id(&session)
|
||||||
|
.await
|
||||||
|
.ok_or((StatusCode::UNAUTHORIZED, Json(json!({ "error": "未登录" }))))?;
|
||||||
|
|
||||||
|
let mut tx = state
|
||||||
|
.pool
|
||||||
|
.begin()
|
||||||
|
.await
|
||||||
|
.map_err(|e| map_entry_mutation_err(e.into()))?;
|
||||||
|
|
||||||
|
let entry_row: Option<EntryAuditRow> =
|
||||||
|
sqlx::query_as("SELECT folder, type, name FROM entries WHERE id = $1 AND user_id = $2")
|
||||||
|
.bind(entry_id)
|
||||||
|
.bind(user_id)
|
||||||
|
.fetch_optional(&mut *tx)
|
||||||
|
.await
|
||||||
|
.map_err(|e| map_entry_mutation_err(e.into()))?;
|
||||||
|
|
||||||
|
let Some(entry_row) = entry_row else {
|
||||||
|
tx.rollback()
|
||||||
|
.await
|
||||||
|
.map_err(|e| map_entry_mutation_err(e.into()))?;
|
||||||
|
return Err((
|
||||||
|
StatusCode::NOT_FOUND,
|
||||||
|
Json(json!({ "error": "条目不存在或无权访问" })),
|
||||||
|
));
|
||||||
|
};
|
||||||
|
|
||||||
|
let deleted = sqlx::query("DELETE FROM entry_secrets WHERE entry_id = $1 AND secret_id = $2")
|
||||||
|
.bind(entry_id)
|
||||||
|
.bind(secret_id)
|
||||||
|
.execute(&mut *tx)
|
||||||
|
.await
|
||||||
|
.map_err(|e| map_entry_mutation_err(e.into()))?
|
||||||
|
.rows_affected();
|
||||||
|
|
||||||
|
if deleted == 0 {
|
||||||
|
tx.rollback()
|
||||||
|
.await
|
||||||
|
.map_err(|e| map_entry_mutation_err(e.into()))?;
|
||||||
|
return Err((
|
||||||
|
StatusCode::NOT_FOUND,
|
||||||
|
Json(json!({ "error": "关联不存在" })),
|
||||||
|
));
|
||||||
|
}
|
||||||
|
|
||||||
|
let secret_deleted = sqlx::query(
|
||||||
|
"DELETE FROM secrets s \
|
||||||
|
WHERE s.id = $1 \
|
||||||
|
AND NOT EXISTS (SELECT 1 FROM entry_secrets es WHERE es.secret_id = s.id)",
|
||||||
|
)
|
||||||
|
.bind(secret_id)
|
||||||
|
.execute(&mut *tx)
|
||||||
|
.await
|
||||||
|
.map_err(|e| map_entry_mutation_err(e.into()))?
|
||||||
|
.rows_affected()
|
||||||
|
> 0;
|
||||||
|
|
||||||
|
secrets_core::audit::log_tx(
|
||||||
|
&mut tx,
|
||||||
|
Some(user_id),
|
||||||
|
"unlink_secret",
|
||||||
|
&entry_row.folder,
|
||||||
|
&entry_row.entry_type,
|
||||||
|
&entry_row.name,
|
||||||
|
json!({
|
||||||
|
"source": "web",
|
||||||
|
"entry_id": entry_id,
|
||||||
|
"secret_id": secret_id,
|
||||||
|
"deleted_secret": secret_deleted,
|
||||||
|
}),
|
||||||
|
)
|
||||||
|
.await;
|
||||||
|
|
||||||
|
tx.commit()
|
||||||
|
.await
|
||||||
|
.map_err(|e| map_entry_mutation_err(e.into()))?;
|
||||||
|
|
||||||
|
Ok(Json(json!({
|
||||||
|
"ok": true,
|
||||||
|
"deleted_relation": true,
|
||||||
|
"deleted_secret": secret_deleted,
|
||||||
|
})))
|
||||||
}
|
}
|
||||||
|
|
||||||
// ── OAuth / Well-known ────────────────────────────────────────────────────────
|
// ── OAuth / Well-known ────────────────────────────────────────────────────────
|
||||||
|
|||||||
@@ -45,7 +45,7 @@
|
|||||||
.btn-sign-out:hover { background: var(--surface2); }
|
.btn-sign-out:hover { background: var(--surface2); }
|
||||||
.main { padding: 32px 24px 40px; flex: 1; }
|
.main { padding: 32px 24px 40px; flex: 1; }
|
||||||
.card { background: var(--surface); border: 1px solid var(--border); border-radius: 12px;
|
.card { background: var(--surface); border: 1px solid var(--border); border-radius: 12px;
|
||||||
padding: 24px; width: 100%; max-width: 1280px; margin: 0 auto; }
|
padding: 24px; width: 100%; max-width: 1480px; margin: 0 auto; }
|
||||||
.card-title { font-size: 20px; font-weight: 600; margin-bottom: 8px; }
|
.card-title { font-size: 20px; font-weight: 600; margin-bottom: 8px; }
|
||||||
.card-subtitle { color: var(--text-muted); font-size: 13px; margin-bottom: 20px; }
|
.card-subtitle { color: var(--text-muted); font-size: 13px; margin-bottom: 20px; }
|
||||||
.filter-bar {
|
.filter-bar {
|
||||||
@@ -73,17 +73,46 @@
|
|||||||
}
|
}
|
||||||
.btn-clear:hover { background: var(--surface2); color: var(--text); }
|
.btn-clear:hover { background: var(--surface2); color: var(--text); }
|
||||||
.empty { color: var(--text-muted); font-size: 14px; padding: 20px 0; }
|
.empty { color: var(--text-muted); font-size: 14px; padding: 20px 0; }
|
||||||
.table-wrap { overflow-x: auto; }
|
.table-wrap {
|
||||||
table { width: 100%; border-collapse: collapse; min-width: 720px; }
|
overflow: auto;
|
||||||
th, td { text-align: left; vertical-align: top; padding: 12px 10px; border-top: 1px solid var(--border); }
|
border: 1px solid var(--border);
|
||||||
th { color: var(--text-muted); font-size: 12px; font-weight: 600; white-space: nowrap; }
|
border-radius: 10px;
|
||||||
td { font-size: 13px; }
|
background: var(--bg);
|
||||||
.mono { font-family: 'JetBrains Mono', monospace; }
|
max-height: 72vh;
|
||||||
.cell-notes, .cell-meta {
|
|
||||||
max-width: 280px; word-break: break-word;
|
|
||||||
}
|
}
|
||||||
|
table {
|
||||||
|
width: max-content;
|
||||||
|
min-width: 1240px;
|
||||||
|
border-collapse: separate;
|
||||||
|
border-spacing: 0;
|
||||||
|
}
|
||||||
|
th, td { text-align: left; vertical-align: top; padding: 12px 10px; border-top: 1px solid var(--border); }
|
||||||
|
th {
|
||||||
|
color: var(--text-muted);
|
||||||
|
font-size: 12px;
|
||||||
|
font-weight: 600;
|
||||||
|
white-space: nowrap;
|
||||||
|
position: sticky;
|
||||||
|
top: 0;
|
||||||
|
z-index: 2;
|
||||||
|
background: var(--surface);
|
||||||
|
}
|
||||||
|
td { font-size: 13px; line-height: 1.45; }
|
||||||
|
tbody tr:nth-child(2n) td { background: rgba(255, 255, 255, 0.01); }
|
||||||
|
.mono { font-family: 'JetBrains Mono', monospace; }
|
||||||
|
.col-updated { min-width: 168px; }
|
||||||
|
.col-folder { min-width: 128px; }
|
||||||
|
.col-type { min-width: 108px; }
|
||||||
|
.col-name { min-width: 180px; max-width: 260px; }
|
||||||
|
.col-tags { min-width: 160px; max-width: 220px; }
|
||||||
|
.col-actions { min-width: 132px; }
|
||||||
|
.cell-name, .cell-tags-val {
|
||||||
|
overflow-wrap: anywhere;
|
||||||
|
word-break: break-word;
|
||||||
|
}
|
||||||
|
.cell-notes, .cell-meta { min-width: 260px; max-width: 360px; }
|
||||||
.notes-scroll {
|
.notes-scroll {
|
||||||
max-height: 160px;
|
max-height: 120px;
|
||||||
overflow: auto;
|
overflow: auto;
|
||||||
white-space: pre-wrap;
|
white-space: pre-wrap;
|
||||||
word-break: break-word;
|
word-break: break-word;
|
||||||
@@ -96,10 +125,45 @@
|
|||||||
.detail {
|
.detail {
|
||||||
background: var(--bg); border: 1px solid var(--border); border-radius: 8px;
|
background: var(--bg); border: 1px solid var(--border); border-radius: 8px;
|
||||||
padding: 10px; white-space: pre-wrap; word-break: break-word; font-size: 12px;
|
padding: 10px; white-space: pre-wrap; word-break: break-word; font-size: 12px;
|
||||||
max-width: 320px; max-height: 160px; overflow: auto;
|
max-width: 360px; max-height: 120px; overflow: auto;
|
||||||
}
|
}
|
||||||
.col-actions { white-space: nowrap; }
|
.col-actions { white-space: nowrap; }
|
||||||
.row-actions { display: flex; flex-wrap: wrap; gap: 6px; }
|
.row-actions { display: flex; flex-wrap: wrap; gap: 6px; }
|
||||||
|
.col-secrets { min-width: 300px; max-width: 420px; }
|
||||||
|
.secret-list { display: flex; flex-wrap: wrap; gap: 6px; max-width: 400px; }
|
||||||
|
.secret-chip {
|
||||||
|
display: inline-flex;
|
||||||
|
align-items: center;
|
||||||
|
gap: 6px;
|
||||||
|
border: 1px solid var(--border);
|
||||||
|
border-radius: 999px;
|
||||||
|
padding: 3px 8px;
|
||||||
|
font-size: 11px;
|
||||||
|
background: var(--surface2);
|
||||||
|
font-family: 'JetBrains Mono', monospace;
|
||||||
|
max-width: 100%;
|
||||||
|
min-width: 0;
|
||||||
|
}
|
||||||
|
.secret-name {
|
||||||
|
min-width: 0;
|
||||||
|
overflow: hidden;
|
||||||
|
text-overflow: ellipsis;
|
||||||
|
white-space: nowrap;
|
||||||
|
}
|
||||||
|
.secret-type {
|
||||||
|
color: var(--text-muted);
|
||||||
|
border-left: 1px solid var(--border);
|
||||||
|
padding-left: 6px;
|
||||||
|
}
|
||||||
|
.btn-unlink-secret {
|
||||||
|
border: none;
|
||||||
|
background: transparent;
|
||||||
|
color: #f85149;
|
||||||
|
cursor: pointer;
|
||||||
|
font-size: 12px;
|
||||||
|
line-height: 1;
|
||||||
|
padding: 0;
|
||||||
|
}
|
||||||
.btn-row {
|
.btn-row {
|
||||||
padding: 4px 10px; border-radius: 6px; font-size: 12px; cursor: pointer;
|
padding: 4px 10px; border-radius: 6px; font-size: 12px; cursor: pointer;
|
||||||
border: 1px solid var(--border); background: var(--surface2); color: var(--text-muted);
|
border: 1px solid var(--border); background: var(--surface2); color: var(--text-muted);
|
||||||
@@ -145,7 +209,8 @@
|
|||||||
.main { padding: 20px 12px 28px; }
|
.main { padding: 20px 12px 28px; }
|
||||||
.card { padding: 16px; }
|
.card { padding: 16px; }
|
||||||
.topbar { padding: 12px 16px; flex-wrap: wrap; }
|
.topbar { padding: 12px 16px; flex-wrap: wrap; }
|
||||||
table, thead, tbody, th, td, tr { display: block; }
|
.table-wrap { max-height: none; border: none; background: transparent; }
|
||||||
|
table, thead, tbody, th, td, tr { display: block; min-width: 0; width: 100%; }
|
||||||
thead { display: none; }
|
thead { display: none; }
|
||||||
tr { border-top: 1px solid var(--border); padding: 12px 0; }
|
tr { border-top: 1px solid var(--border); padding: 12px 0; }
|
||||||
td { border-top: none; padding: 6px 0; max-width: none; }
|
td { border-top: none; padding: 6px 0; max-width: none; }
|
||||||
@@ -160,9 +225,9 @@
|
|||||||
td.col-notes::before { content: "Notes"; }
|
td.col-notes::before { content: "Notes"; }
|
||||||
td.col-tags::before { content: "Tags"; }
|
td.col-tags::before { content: "Tags"; }
|
||||||
td.col-meta::before { content: "Metadata"; }
|
td.col-meta::before { content: "Metadata"; }
|
||||||
|
td.col-secrets::before { content: "Secrets"; }
|
||||||
td.col-actions::before { content: "操作"; }
|
td.col-actions::before { content: "操作"; }
|
||||||
.detail { max-width: none; }
|
.detail, .notes-scroll, .secret-list { max-width: none; }
|
||||||
.notes-scroll { max-width: none; }
|
|
||||||
}
|
}
|
||||||
</style>
|
</style>
|
||||||
</head>
|
</head>
|
||||||
@@ -189,7 +254,7 @@
|
|||||||
<main class="main">
|
<main class="main">
|
||||||
<section class="card">
|
<section class="card">
|
||||||
<div class="card-title">我的条目</div>
|
<div class="card-title">我的条目</div>
|
||||||
<div class="card-subtitle">在当前筛选条件下,共 <strong>{{ total_count }}</strong> 条记录;本页显示 <strong>{{ shown_count }}</strong> 条(按更新时间降序,单页最多 {{ limit }} 条)。不含密文字段。时间为浏览器本地时区。</div>
|
<div class="card-subtitle">在当前筛选条件下,共 <strong>{{ total_count }}</strong> 条记录;本页显示 <strong>{{ shown_count }}</strong> 条(按更新时间降序,单页最多 {{ limit }} 条)。不含密文字段。时间为浏览器本地时区。提示:非敏感地址类字段(如 address / endpoint / url)建议放在 Metadata(例如 <code>metadata.address</code>),仅密码/令牌等放 Secrets。</div>
|
||||||
|
|
||||||
<form class="filter-bar" method="get" action="/entries">
|
<form class="filter-bar" method="get" action="/entries">
|
||||||
<div class="filter-field">
|
<div class="filter-field">
|
||||||
@@ -220,6 +285,7 @@
|
|||||||
<th>Notes</th>
|
<th>Notes</th>
|
||||||
<th>Tags</th>
|
<th>Tags</th>
|
||||||
<th>Metadata</th>
|
<th>Metadata</th>
|
||||||
|
<th>Secrets</th>
|
||||||
<th>操作</th>
|
<th>操作</th>
|
||||||
</tr>
|
</tr>
|
||||||
</thead>
|
</thead>
|
||||||
@@ -233,6 +299,17 @@
|
|||||||
<td class="col-notes cell-notes">{% if !entry.notes.is_empty() %}<div class="notes-scroll cell-notes-val">{{ entry.notes }}</div>{% endif %}</td>
|
<td class="col-notes cell-notes">{% if !entry.notes.is_empty() %}<div class="notes-scroll cell-notes-val">{{ entry.notes }}</div>{% endif %}</td>
|
||||||
<td class="col-tags mono cell-tags-val">{{ entry.tags }}</td>
|
<td class="col-tags mono cell-tags-val">{{ entry.tags }}</td>
|
||||||
<td class="col-meta cell-meta"><pre class="detail cell-meta-val">{{ entry.metadata }}</pre></td>
|
<td class="col-meta cell-meta"><pre class="detail cell-meta-val">{{ entry.metadata }}</pre></td>
|
||||||
|
<td class="col-secrets">
|
||||||
|
<div class="secret-list">
|
||||||
|
{% for s in entry.secrets %}
|
||||||
|
<span class="secret-chip">
|
||||||
|
<span class="secret-name" title="{{ s.name }}">{{ s.name }}</span>
|
||||||
|
<span class="secret-type">{{ s.secret_type }}</span>
|
||||||
|
<button type="button" class="btn-unlink-secret" data-secret-id="{{ s.id }}" data-secret-name="{{ s.name }}" title="解除关联">x</button>
|
||||||
|
</span>
|
||||||
|
{% endfor %}
|
||||||
|
</div>
|
||||||
|
</td>
|
||||||
<td class="col-actions">
|
<td class="col-actions">
|
||||||
<div class="row-actions">
|
<div class="row-actions">
|
||||||
<button type="button" class="btn-row btn-edit">编辑</button>
|
<button type="button" class="btn-row btn-edit">编辑</button>
|
||||||
@@ -367,7 +444,7 @@
|
|||||||
var nameEl = tr.querySelector('.cell-name');
|
var nameEl = tr.querySelector('.cell-name');
|
||||||
var name = nameEl ? nameEl.textContent.trim() : '';
|
var name = nameEl ? nameEl.textContent.trim() : '';
|
||||||
if (!id) return;
|
if (!id) return;
|
||||||
if (!confirm('确定删除条目「' + name + '」?关联的密文字段将一并删除。')) return;
|
if (!confirm('确定删除条目「' + name + '」?')) return;
|
||||||
fetch('/api/entries/' + encodeURIComponent(id), { method: 'DELETE', credentials: 'same-origin' })
|
fetch('/api/entries/' + encodeURIComponent(id), { method: 'DELETE', credentials: 'same-origin' })
|
||||||
.then(function (r) {
|
.then(function (r) {
|
||||||
return r.json().then(function (data) {
|
return r.json().then(function (data) {
|
||||||
@@ -375,9 +452,37 @@
|
|||||||
return data;
|
return data;
|
||||||
});
|
});
|
||||||
})
|
})
|
||||||
.then(function () { window.location.reload(); })
|
.then(function (data) {
|
||||||
|
if (data && Array.isArray(data.migrated) && data.migrated.length > 0) {
|
||||||
|
alert('已自动迁移共享 key 引用:' + data.migrated.length + ' 个条目完成重定向。');
|
||||||
|
}
|
||||||
|
window.location.reload();
|
||||||
|
})
|
||||||
.catch(function (e) { alert(e.message || String(e)); });
|
.catch(function (e) { alert(e.message || String(e)); });
|
||||||
});
|
});
|
||||||
|
|
||||||
|
tr.querySelectorAll('.btn-unlink-secret').forEach(function (btn) {
|
||||||
|
btn.addEventListener('click', function () {
|
||||||
|
var entryId = tr.getAttribute('data-entry-id');
|
||||||
|
var secretId = btn.getAttribute('data-secret-id');
|
||||||
|
var secretName = btn.getAttribute('data-secret-name') || '';
|
||||||
|
if (!entryId || !secretId) return;
|
||||||
|
if (!confirm('确定解除 secret 关联「' + secretName + '」?')) return;
|
||||||
|
fetch('/api/entries/' + encodeURIComponent(entryId) + '/secrets/' + encodeURIComponent(secretId), {
|
||||||
|
method: 'DELETE',
|
||||||
|
credentials: 'same-origin'
|
||||||
|
}).then(function (r) {
|
||||||
|
return r.json().then(function (data) {
|
||||||
|
if (!r.ok) throw new Error(data.error || ('HTTP ' + r.status));
|
||||||
|
return data;
|
||||||
|
});
|
||||||
|
}).then(function () {
|
||||||
|
window.location.reload();
|
||||||
|
}).catch(function (e) {
|
||||||
|
alert(e.message || String(e));
|
||||||
|
});
|
||||||
|
});
|
||||||
|
});
|
||||||
});
|
});
|
||||||
})();
|
})();
|
||||||
</script>
|
</script>
|
||||||
|
|||||||
126
migrations/001_nn_schema.sql
Normal file
126
migrations/001_nn_schema.sql
Normal file
@@ -0,0 +1,126 @@
|
|||||||
|
-- Entry-Secret N:N migration (manual SQL)
|
||||||
|
-- Safe to re-run: uses IF EXISTS/IF NOT EXISTS guards.
|
||||||
|
|
||||||
|
BEGIN;
|
||||||
|
|
||||||
|
-- 1) secrets: add new columns
|
||||||
|
ALTER TABLE secrets
|
||||||
|
ADD COLUMN IF NOT EXISTS user_id UUID REFERENCES users(id) ON DELETE SET NULL;
|
||||||
|
ALTER TABLE secrets
|
||||||
|
ADD COLUMN IF NOT EXISTS type VARCHAR(64) NOT NULL DEFAULT 'text';
|
||||||
|
|
||||||
|
-- 2) rename field_name -> name (idempotent)
|
||||||
|
DO $$ BEGIN
|
||||||
|
IF EXISTS (
|
||||||
|
SELECT 1
|
||||||
|
FROM information_schema.columns
|
||||||
|
WHERE table_name = 'secrets' AND column_name = 'field_name'
|
||||||
|
) THEN
|
||||||
|
ALTER TABLE secrets RENAME COLUMN field_name TO name;
|
||||||
|
END IF;
|
||||||
|
END $$;
|
||||||
|
|
||||||
|
-- 3) create join table
|
||||||
|
CREATE TABLE IF NOT EXISTS entry_secrets (
|
||||||
|
entry_id UUID NOT NULL REFERENCES entries(id) ON DELETE CASCADE,
|
||||||
|
secret_id UUID NOT NULL REFERENCES secrets(id) ON DELETE CASCADE,
|
||||||
|
sort_order INT NOT NULL DEFAULT 0,
|
||||||
|
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
|
||||||
|
PRIMARY KEY (entry_id, secret_id)
|
||||||
|
);
|
||||||
|
CREATE INDEX IF NOT EXISTS idx_entry_secrets_secret_id ON entry_secrets(secret_id);
|
||||||
|
|
||||||
|
-- 4) backfill user_id and relationship from old secrets.entry_id
|
||||||
|
DO $$ BEGIN
|
||||||
|
IF EXISTS (
|
||||||
|
SELECT 1
|
||||||
|
FROM information_schema.columns
|
||||||
|
WHERE table_name = 'secrets' AND column_name = 'entry_id'
|
||||||
|
) THEN
|
||||||
|
UPDATE secrets s
|
||||||
|
SET user_id = e.user_id
|
||||||
|
FROM entries e
|
||||||
|
WHERE s.entry_id = e.id AND s.user_id IS NULL;
|
||||||
|
|
||||||
|
INSERT INTO entry_secrets(entry_id, secret_id, sort_order)
|
||||||
|
SELECT entry_id, id, 0
|
||||||
|
FROM secrets
|
||||||
|
WHERE entry_id IS NOT NULL
|
||||||
|
ON CONFLICT DO NOTHING;
|
||||||
|
END IF;
|
||||||
|
END $$;
|
||||||
|
|
||||||
|
-- 5) backfill secret types
|
||||||
|
UPDATE secrets SET type = 'pem' WHERE name IN ('ssh_key');
|
||||||
|
UPDATE secrets SET type = 'password' WHERE name IN ('password');
|
||||||
|
UPDATE secrets SET type = 'phone' WHERE name LIKE 'phone%';
|
||||||
|
UPDATE secrets SET type = 'url' WHERE name IN ('webhook_url', 'address');
|
||||||
|
UPDATE secrets
|
||||||
|
SET type = 'token'
|
||||||
|
WHERE name IN (
|
||||||
|
'access_key_id',
|
||||||
|
'access_key_secret',
|
||||||
|
'global_api_key',
|
||||||
|
'api_key',
|
||||||
|
'secret_key',
|
||||||
|
'personal_access_token',
|
||||||
|
'runner_token',
|
||||||
|
'GOOGLE_CLIENT_ID',
|
||||||
|
'GOOGLE_CLIENT_SECRET'
|
||||||
|
);
|
||||||
|
|
||||||
|
-- 6) drop old entry_id path
|
||||||
|
ALTER TABLE secrets DROP CONSTRAINT IF EXISTS secrets_entry_id_fkey;
|
||||||
|
DROP INDEX IF EXISTS idx_secrets_entry_id;
|
||||||
|
ALTER TABLE secrets DROP CONSTRAINT IF EXISTS secrets_entry_id_field_name_key;
|
||||||
|
ALTER TABLE secrets DROP CONSTRAINT IF EXISTS secrets_entry_id_name_key;
|
||||||
|
ALTER TABLE secrets DROP COLUMN IF EXISTS entry_id;
|
||||||
|
|
||||||
|
-- 7) add indexes for new access paths
|
||||||
|
CREATE INDEX IF NOT EXISTS idx_secrets_user_id
|
||||||
|
ON secrets(user_id) WHERE user_id IS NOT NULL;
|
||||||
|
DO $$
|
||||||
|
DECLARE
|
||||||
|
duplicate_samples TEXT;
|
||||||
|
BEGIN
|
||||||
|
SELECT string_agg(
|
||||||
|
format('user_id=%s, name=%s, count=%s', t.user_id, t.name, t.cnt),
|
||||||
|
E'\n'
|
||||||
|
)
|
||||||
|
INTO duplicate_samples
|
||||||
|
FROM (
|
||||||
|
SELECT user_id::TEXT AS user_id, name, COUNT(*) AS cnt
|
||||||
|
FROM secrets
|
||||||
|
WHERE user_id IS NOT NULL
|
||||||
|
GROUP BY user_id, name
|
||||||
|
HAVING COUNT(*) > 1
|
||||||
|
ORDER BY cnt DESC, user_id, name
|
||||||
|
LIMIT 20
|
||||||
|
) t;
|
||||||
|
|
||||||
|
IF duplicate_samples IS NOT NULL THEN
|
||||||
|
RAISE EXCEPTION
|
||||||
|
'Cannot enforce unique constraint on secrets(user_id, name). Duplicates found:%',
|
||||||
|
E'\n' || duplicate_samples
|
||||||
|
USING HINT = 'Please deduplicate conflicting rows, then rerun migration.';
|
||||||
|
END IF;
|
||||||
|
END $$;
|
||||||
|
CREATE UNIQUE INDEX IF NOT EXISTS idx_secrets_unique_user_name
|
||||||
|
ON secrets(user_id, name) WHERE user_id IS NOT NULL;
|
||||||
|
CREATE INDEX IF NOT EXISTS idx_secrets_name ON secrets(name);
|
||||||
|
CREATE INDEX IF NOT EXISTS idx_secrets_type ON secrets(type);
|
||||||
|
|
||||||
|
-- 8) secrets_history: rename and remove entry-scoped columns
|
||||||
|
DO $$ BEGIN
|
||||||
|
IF EXISTS (
|
||||||
|
SELECT 1
|
||||||
|
FROM information_schema.columns
|
||||||
|
WHERE table_name = 'secrets_history' AND column_name = 'field_name'
|
||||||
|
) THEN
|
||||||
|
ALTER TABLE secrets_history RENAME COLUMN field_name TO name;
|
||||||
|
END IF;
|
||||||
|
END $$;
|
||||||
|
ALTER TABLE secrets_history DROP COLUMN IF EXISTS entry_id;
|
||||||
|
ALTER TABLE secrets_history DROP COLUMN IF EXISTS entry_version;
|
||||||
|
|
||||||
|
COMMIT;
|
||||||
67
migrations/002_data_cleanup.sql
Normal file
67
migrations/002_data_cleanup.sql
Normal file
@@ -0,0 +1,67 @@
|
|||||||
|
-- Metadata cleanup migration (manual SQL)
|
||||||
|
-- Keep tags/type as dedicated columns; remove duplicated metadata keys.
|
||||||
|
|
||||||
|
BEGIN;
|
||||||
|
|
||||||
|
-- 1) Promote metadata.type -> entries.type when present.
|
||||||
|
UPDATE entries
|
||||||
|
SET type = metadata->>'type'
|
||||||
|
WHERE metadata->>'type' IS NOT NULL
|
||||||
|
AND metadata->>'type' <> '';
|
||||||
|
|
||||||
|
-- 2) Remove metadata.type.
|
||||||
|
UPDATE entries
|
||||||
|
SET metadata = metadata - 'type'
|
||||||
|
WHERE metadata ? 'type';
|
||||||
|
|
||||||
|
-- 3) Remove metadata.environment (duplicated by tags prod/dev).
|
||||||
|
UPDATE entries
|
||||||
|
SET metadata = metadata - 'environment'
|
||||||
|
WHERE metadata ? 'environment';
|
||||||
|
|
||||||
|
-- 4) Remove metadata.account when equal to folder.
|
||||||
|
UPDATE entries
|
||||||
|
SET metadata = metadata - 'account'
|
||||||
|
WHERE metadata->>'account' = folder;
|
||||||
|
|
||||||
|
-- 5) Normalize manufacturer -> provider.
|
||||||
|
UPDATE entries
|
||||||
|
SET metadata = (metadata - 'manufacturer')
|
||||||
|
|| jsonb_build_object('provider', metadata->>'manufacturer')
|
||||||
|
WHERE metadata ? 'manufacturer'
|
||||||
|
AND NOT metadata ? 'provider';
|
||||||
|
|
||||||
|
UPDATE entries
|
||||||
|
SET metadata = metadata - 'manufacturer'
|
||||||
|
WHERE metadata ? 'manufacturer'
|
||||||
|
AND metadata ? 'provider';
|
||||||
|
|
||||||
|
-- 6) Drop ssh_key_format (moved to secrets.type).
|
||||||
|
UPDATE entries
|
||||||
|
SET metadata = metadata - 'ssh_key_format'
|
||||||
|
WHERE metadata ? 'ssh_key_format';
|
||||||
|
|
||||||
|
-- 7) Remove display_name when duplicated by name.
|
||||||
|
UPDATE entries
|
||||||
|
SET metadata = metadata - 'display_name'
|
||||||
|
WHERE metadata->>'display_name' = name;
|
||||||
|
|
||||||
|
-- 8) Condense server_* metadata into server_ref.
|
||||||
|
UPDATE entries
|
||||||
|
SET metadata = metadata
|
||||||
|
- 'server_account'
|
||||||
|
- 'server_hostname'
|
||||||
|
- 'server_location'
|
||||||
|
- 'server_public_ip'
|
||||||
|
|| CASE
|
||||||
|
WHEN metadata ? 'server_entry_name'
|
||||||
|
THEN jsonb_build_object('server_ref', metadata->>'server_entry_name')
|
||||||
|
ELSE '{}'::jsonb
|
||||||
|
END
|
||||||
|
WHERE metadata ? 'server_entry_name' OR metadata ? 'server_account';
|
||||||
|
|
||||||
|
UPDATE entries
|
||||||
|
SET metadata = metadata - 'server_entry_name'
|
||||||
|
WHERE metadata ? 'server_entry_name';
|
||||||
|
|
||||||
|
COMMIT;
|
||||||
81
scripts/migrate-db-prod-to-nn-test.sh
Executable file
81
scripts/migrate-db-prod-to-nn-test.sh
Executable file
@@ -0,0 +1,81 @@
|
|||||||
|
#!/usr/bin/env bash
|
||||||
|
# Migrate PostgreSQL data from secrets-mcp-prod to secrets-nn-test.
|
||||||
|
#
|
||||||
|
# Prereqs: pg_dump and pg_restore (PostgreSQL client tools) on PATH.
|
||||||
|
# TLS: Use the same connection parameters as your MCP / app (e.g. sslmode=verify-full
|
||||||
|
# and PGSSLROOTCERT if needed). If local psql fails with "certificate verify failed",
|
||||||
|
# run this script from a host that trusts the server CA, or set PGSSLROOTCERT.
|
||||||
|
#
|
||||||
|
# Usage:
|
||||||
|
# export SOURCE_DATABASE_URL='postgres://USER:PASS@host:5432/secrets-mcp-prod?sslmode=verify-full'
|
||||||
|
# export TARGET_DATABASE_URL='postgres://USER:PASS@host:5432/secrets-nn-test?sslmode=verify-full'
|
||||||
|
# ./scripts/migrate-db-prod-to-nn-test.sh
|
||||||
|
#
|
||||||
|
# Options (env):
|
||||||
|
# BACKUP_TARGET_FIRST=1 # default: dump target to ./backup-secrets-nn-test-<timestamp>.dump before restore
|
||||||
|
# RUN_NN_SQL=1 # default: run migrations/001_nn_schema.sql then 002_data_cleanup.sql on target after restore
|
||||||
|
# SKIP_TARGET_BACKUP=1 # skip target backup
|
||||||
|
#
|
||||||
|
# WARNINGS:
|
||||||
|
# - pg_restore with --clean --if-exists drops objects that exist in the dump; target DB is replaced
|
||||||
|
# to match the logical content of the source dump (same as typical full restore).
|
||||||
|
# - Optionally keep a manual dump of the target before proceeding.
|
||||||
|
# - 001_nn_schema.sql will fail if secrets has duplicate (user_id, name) after backfill; fix data first.
|
||||||
|
|
||||||
|
set -euo pipefail
|
||||||
|
|
||||||
|
ROOT="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)"
|
||||||
|
cd "$ROOT"
|
||||||
|
|
||||||
|
SOURCE_URL="${SOURCE_DATABASE_URL:-}"
|
||||||
|
TARGET_URL="${TARGET_DATABASE_URL:-}"
|
||||||
|
|
||||||
|
if [[ -z "$SOURCE_URL" || -z "$TARGET_URL" ]]; then
|
||||||
|
echo "Set SOURCE_DATABASE_URL and TARGET_DATABASE_URL (postgres URLs)." >&2
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
if ! command -v pg_dump >/dev/null || ! command -v pg_restore >/dev/null; then
|
||||||
|
echo "pg_dump and pg_restore are required." >&2
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
TS="$(date +%Y%m%d%H%M%S)"
|
||||||
|
DUMP_FILE="${DUMP_FILE:-$ROOT/tmp/secrets-mcp-prod-${TS}.dump}"
|
||||||
|
mkdir -p "$(dirname "$DUMP_FILE")"
|
||||||
|
|
||||||
|
if [[ "${EXCLUDE_TOWER_SESSIONS:-}" == "1" ]]; then
|
||||||
|
echo "==> Excluding schema tower_sessions from dump"
|
||||||
|
pg_dump "$SOURCE_URL" -Fc --no-owner --no-acl --exclude-schema=tower_sessions -f "$DUMP_FILE"
|
||||||
|
else
|
||||||
|
echo "==> Dumping source (custom format) -> $DUMP_FILE"
|
||||||
|
pg_dump "$SOURCE_URL" -Fc --no-owner --no-acl -f "$DUMP_FILE"
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [[ "${SKIP_TARGET_BACKUP:-}" != "1" && "${BACKUP_TARGET_FIRST:-1}" == "1" ]]; then
|
||||||
|
BACKUP_FILE="$ROOT/tmp/secrets-nn-test-before-${TS}.dump"
|
||||||
|
echo "==> Backing up target -> $BACKUP_FILE"
|
||||||
|
pg_dump "$TARGET_URL" -Fc --no-owner --no-acl -f "$BACKUP_FILE" || {
|
||||||
|
echo "Target backup failed (empty DB is OK). Continuing." >&2
|
||||||
|
}
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo "==> Restoring into target (--clean --if-exists)"
|
||||||
|
pg_restore -d "$TARGET_URL" --no-owner --no-acl --clean --if-exists --verbose "$DUMP_FILE"
|
||||||
|
|
||||||
|
if [[ "${RUN_NN_SQL:-1}" == "1" ]]; then
|
||||||
|
if [[ ! -f "$ROOT/migrations/001_nn_schema.sql" ]]; then
|
||||||
|
echo "migrations/001_nn_schema.sql not found; skip NN SQL." >&2
|
||||||
|
else
|
||||||
|
echo "==> Applying migrations/001_nn_schema.sql on target"
|
||||||
|
psql "$TARGET_URL" -v ON_ERROR_STOP=1 -f "$ROOT/migrations/001_nn_schema.sql"
|
||||||
|
fi
|
||||||
|
if [[ -f "$ROOT/migrations/002_data_cleanup.sql" ]]; then
|
||||||
|
echo "==> Applying migrations/002_data_cleanup.sql on target"
|
||||||
|
psql "$TARGET_URL" -v ON_ERROR_STOP=1 -f "$ROOT/migrations/002_data_cleanup.sql"
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo "==> Done. Suggested verification:"
|
||||||
|
echo " psql \"\$TARGET_DATABASE_URL\" -c \"SELECT COUNT(*) FROM entries; SELECT COUNT(*) FROM secrets; SELECT COUNT(*) FROM entry_secrets;\""
|
||||||
|
echo " ./scripts/release-check.sh # optional app-side sanity"
|
||||||
Reference in New Issue
Block a user