fix(secrets-mcp 0.5.20): code review plan — export secret types, env map, rollback, API key, MCP tools, web session & validation

- Export/import: optional secret_types map; AddResult includes entry_id
- env_map: dot→__ segment encoding; collision errors
- rollback: FOR UPDATE + txn-consistent snapshot; restore name from history
- regenerate_api_key: rows_affected guard
- MCP: find count propagates errors; add uses entry_id for relations; rollback no encryption key
- Web: load_session_user_strict + JSON handlers key_version; PATCH length limits
- Tests: ExportEntry serde, env segment
This commit is contained in:
voson
2026-04-11 17:10:16 +08:00
parent 2c7dbf890b
commit d772066210
13 changed files with 266 additions and 141 deletions

View File

@@ -184,6 +184,9 @@ pub struct ExportEntry {
/// Decrypted secret fields. None means no secrets in this export (--no-secrets).
#[serde(default, skip_serializing_if = "Option::is_none")]
pub secrets: Option<BTreeMap<String, Value>>,
/// Per-secret types (`text`, `password`, `key`, …). Omitted in legacy exports; importers default to `"text"`.
#[serde(default, skip_serializing_if = "Option::is_none")]
pub secret_types: Option<BTreeMap<String, String>>,
}
// ── Multi-user models ──────────────────────────────────────────────────────────
@@ -311,3 +314,44 @@ pub fn toml_to_json_value(v: &toml::Value) -> Value {
}
}
}
#[cfg(test)]
mod export_entry_tests {
use super::*;
use std::collections::BTreeMap;
#[test]
fn export_entry_roundtrip_includes_secret_types() {
let mut secrets = BTreeMap::new();
secrets.insert("k".to_string(), serde_json::json!("v"));
let mut types = BTreeMap::new();
types.insert("k".to_string(), "password".to_string());
let e = ExportEntry {
name: "n".to_string(),
folder: "f".to_string(),
entry_type: "t".to_string(),
notes: "".to_string(),
tags: vec![],
metadata: serde_json::json!({}),
secrets: Some(secrets),
secret_types: Some(types),
};
let json = serde_json::to_string(&e).unwrap();
let back: ExportEntry = serde_json::from_str(&json).unwrap();
assert_eq!(
back.secret_types
.as_ref()
.unwrap()
.get("k")
.map(String::as_str),
Some("password")
);
}
#[test]
fn export_entry_legacy_json_without_secret_types_deserializes() {
let json = r#"{"name":"a","folder":"","type":"","notes":"","tags":[],"metadata":{},"secrets":{"x":"y"}}"#;
let e: ExportEntry = serde_json::from_str(json).unwrap();
assert!(e.secret_types.is_none());
}
}

View File

@@ -161,6 +161,7 @@ pub fn flatten_json_fields(prefix: &str, value: &Value) -> Vec<(String, Value)>
#[derive(Debug, serde::Serialize)]
pub struct AddResult {
pub entry_id: Uuid,
pub name: String,
pub folder: String,
#[serde(rename = "type")]
@@ -477,6 +478,7 @@ pub async fn run(pool: &PgPool, params: AddParams<'_>, master_key: &[u8; 32]) ->
tx.commit().await?;
Ok(AddResult {
entry_id,
name: params.name.to_string(),
folder: params.folder.to_string(),
entry_type: entry_type.to_string(),

View File

@@ -47,11 +47,14 @@ pub async fn ensure_api_key(pool: &PgPool, user_id: Uuid) -> Result<String> {
/// Generate a fresh API key for the user, replacing the old one.
pub async fn regenerate_api_key(pool: &PgPool, user_id: Uuid) -> Result<String> {
let new_key = generate_api_key();
sqlx::query("UPDATE users SET api_key = $1 WHERE id = $2")
let res = sqlx::query("UPDATE users SET api_key = $1 WHERE id = $2")
.bind(&new_key)
.bind(user_id)
.execute(pool)
.await?;
if res.rows_affected() == 0 {
return Err(AppError::NotFoundUser.into());
}
Ok(new_key)
}

View File

@@ -45,18 +45,27 @@ pub async fn build_env_map(
for f in fields {
let decrypted = crypto::decrypt_json(master_key, &f.encrypted)?;
let key = format!(
"{}_{}",
effective_prefix,
f.name.to_uppercase().replace(['-', '.'], "_")
);
combined.insert(key, json_to_env_string(&decrypted));
let seg = secret_name_to_env_segment(&f.name);
let key = format!("{}_{}", effective_prefix, seg);
if let Some(_old) = combined.insert(key.clone(), json_to_env_string(&decrypted)) {
anyhow::bail!(
"environment variable name collision after normalization: '{}' (secret '{}')",
key,
f.name
);
}
}
}
Ok(combined)
}
/// Map a secret field name to an env key segment: `.` → `__`, `-` → `_`, then uppercase.
/// Avoids collisions between e.g. `db.password` and `db_password`.
fn secret_name_to_env_segment(name: &str) -> String {
name.replace('.', "__").replace('-', "_").to_uppercase()
}
fn env_prefix(entry: &crate::models::Entry, prefix: &str) -> String {
let name_part = entry.name.to_uppercase().replace(['-', '.', ' '], "_");
if prefix.is_empty() {
@@ -75,3 +84,14 @@ fn json_to_env_string(v: &Value) -> String {
other => other.to_string(),
}
}
#[cfg(test)]
mod tests {
use super::secret_name_to_env_segment;
#[test]
fn secret_name_env_segment_disambiguates_dot_from_underscore() {
assert_eq!(secret_name_to_env_segment("db.password"), "DB__PASSWORD");
assert_eq!(secret_name_to_env_segment("db_password"), "DB_PASSWORD");
}
}

View File

@@ -44,21 +44,23 @@ pub async fn export(
let mut export_entries: Vec<ExportEntry> = Vec::with_capacity(entries.len());
for entry in &entries {
let secrets = if params.no_secrets {
None
let (secrets, secret_types) = if params.no_secrets {
(None, None)
} else {
let fields = secrets_map.get(&entry.id).map(Vec::as_slice).unwrap_or(&[]);
if fields.is_empty() {
Some(BTreeMap::new())
(Some(BTreeMap::new()), Some(BTreeMap::new()))
} else {
let mk = master_key
.ok_or_else(|| anyhow::anyhow!("master key required to decrypt secrets"))?;
let mut map = BTreeMap::new();
let mut type_map = BTreeMap::new();
for f in fields {
let decrypted = crypto::decrypt_json(mk, &f.encrypted)?;
map.insert(f.name.clone(), decrypted);
type_map.insert(f.name.clone(), f.secret_type.clone());
}
Some(map)
(Some(map), Some(type_map))
}
};
@@ -70,6 +72,7 @@ pub async fn export(
tags: entry.tags.clone(),
metadata: entry.metadata.clone(),
secrets,
secret_types,
});
}

View File

@@ -1,5 +1,6 @@
use anyhow::Result;
use sqlx::PgPool;
use std::collections::HashMap;
use uuid::Uuid;
use crate::models::ExportFormat;
@@ -80,6 +81,11 @@ pub async fn run(
let secret_entries = build_secret_entries(entry.secrets.as_ref());
let meta_entries = build_meta_entries(&entry.metadata);
let secret_types_map: HashMap<String, String> = entry
.secret_types
.as_ref()
.map(|m| m.iter().map(|(k, v)| (k.clone(), v.clone())).collect())
.unwrap_or_default();
match add_run(
pool,
@@ -91,7 +97,7 @@ pub async fn run(
tags: &entry.tags,
meta_entries: &meta_entries,
secret_entries: &secret_entries,
secret_types: &Default::default(),
secret_types: &secret_types_map,
link_secret_names: &[],
user_id: params.user_id,
},

View File

@@ -30,58 +30,61 @@ pub async fn run(
folder: String,
#[sqlx(rename = "type")]
entry_type: String,
name: String,
version: i64,
action: String,
tags: Vec<String>,
metadata: Value,
}
let live_entry: Option<EntryWriteRow> = if let Some(uid) = user_id {
let mut tx = pool.begin().await?;
let live: Option<EntryWriteRow> = if let Some(uid) = user_id {
sqlx::query_as(
"SELECT id, version, folder, type, name, tags, metadata, notes, deleted_at FROM entries \
WHERE id = $1 AND user_id = $2 AND deleted_at IS NULL",
WHERE id = $1 AND user_id = $2 AND deleted_at IS NULL FOR UPDATE",
)
.bind(entry_id)
.bind(uid)
.fetch_optional(pool)
.fetch_optional(&mut *tx)
.await?
} else {
sqlx::query_as(
"SELECT id, version, folder, type, name, tags, metadata, notes, deleted_at FROM entries \
WHERE id = $1 AND user_id IS NULL AND deleted_at IS NULL",
WHERE id = $1 AND user_id IS NULL AND deleted_at IS NULL FOR UPDATE",
)
.bind(entry_id)
.fetch_optional(pool)
.fetch_optional(&mut *tx)
.await?
};
let live_entry = live_entry.ok_or(AppError::NotFoundEntry)?;
let lr = live.ok_or(AppError::NotFoundEntry)?;
let snap: Option<EntryHistoryRow> = if let Some(ver) = to_version {
sqlx::query_as(
"SELECT folder, type, version, action, tags, metadata \
"SELECT folder, type, name, version, action, tags, metadata \
FROM entries_history \
WHERE entry_id = $1 AND version = $2 ORDER BY id ASC LIMIT 1",
)
.bind(entry_id)
.bind(ver)
.fetch_optional(pool)
.fetch_optional(&mut *tx)
.await?
} else {
sqlx::query_as(
"SELECT folder, type, version, action, tags, metadata \
"SELECT folder, type, name, version, action, tags, metadata \
FROM entries_history \
WHERE entry_id = $1 ORDER BY id DESC LIMIT 1",
)
.bind(entry_id)
.fetch_optional(pool)
.fetch_optional(&mut *tx)
.await?
};
let snap = snap.ok_or_else(|| {
anyhow::anyhow!(
"No history found for entry '{}'{}.",
live_entry.name,
lr.name,
to_version
.map(|v| format!(" at version {}", v))
.unwrap_or_default()
@@ -91,17 +94,7 @@ pub async fn run(
let snap_secret_snapshot = db::entry_secret_snapshot_from_metadata(&snap.metadata);
let snap_metadata = db::strip_secret_snapshot_from_metadata(&snap.metadata);
let mut tx = pool.begin().await?;
let live: Option<EntryWriteRow> = sqlx::query_as(
"SELECT id, version, folder, type, name, tags, metadata, notes, deleted_at FROM entries \
WHERE id = $1 AND deleted_at IS NULL FOR UPDATE",
)
.bind(entry_id)
.fetch_optional(&mut *tx)
.await?;
let live_entry_id = if let Some(ref lr) = live {
let live_entry_id = {
let history_metadata =
match db::metadata_with_secret_snapshot(&mut tx, lr.id, &lr.metadata).await {
Ok(v) => v,
@@ -168,8 +161,8 @@ pub async fn run(
)
.bind(&snap.folder)
.bind(&snap.entry_type)
.bind(&live_entry.name)
.bind(&live_entry.notes)
.bind(&snap.name)
.bind(&lr.notes)
.bind(&snap.tags)
.bind(&snap_metadata)
.bind(lr.id)
@@ -177,8 +170,6 @@ pub async fn run(
.await?;
lr.id
} else {
return Err(AppError::NotFoundEntry.into());
};
if let Some(secret_snapshot) = snap_secret_snapshot {
@@ -191,7 +182,7 @@ pub async fn run(
"rollback",
&snap.folder,
&snap.entry_type,
&live_entry.name,
&snap.name,
serde_json::json!({
"entry_id": entry_id,
"restored_version": snap.version,
@@ -203,7 +194,7 @@ pub async fn run(
tx.commit().await?;
Ok(RollbackResult {
name: live_entry.name,
name: snap.name,
folder: snap.folder,
entry_type: snap.entry_type,
restored_version: snap.version,