Bump version for the N:N entry_secrets data model and related MCP/Web changes. Remove superseded SQL migration artifacts; rely on auto-migrate. Add structured errors, taxonomy normalization, and web i18n helpers. Made-with: Cursor
782 lines
25 KiB
Rust
782 lines
25 KiB
Rust
use anyhow::Result;
|
|
use serde_json::{Map, Value};
|
|
use sqlx::PgPool;
|
|
use std::collections::{BTreeSet, HashSet};
|
|
use std::fs;
|
|
use uuid::Uuid;
|
|
|
|
use crate::crypto;
|
|
use crate::db;
|
|
use crate::error::{AppError, DbErrorContext};
|
|
use crate::models::EntryRow;
|
|
use crate::taxonomy;
|
|
|
|
// ── Key/value parsing helpers ─────────────────────────────────────────────────
|
|
|
|
pub fn parse_kv(entry: &str) -> Result<(Vec<String>, Value)> {
|
|
if let Some((key, json_str)) = entry.split_once(":=") {
|
|
let val: Value = serde_json::from_str(json_str).map_err(|e| {
|
|
anyhow::anyhow!(
|
|
"Invalid JSON value for key '{}': {} (use key=value for plain strings)",
|
|
key,
|
|
e
|
|
)
|
|
})?;
|
|
return Ok((parse_key_path(key)?, val));
|
|
}
|
|
|
|
if let Some((key, raw_val)) = entry.split_once('=') {
|
|
let value = if let Some(path) = raw_val.strip_prefix('@') {
|
|
fs::read_to_string(path)
|
|
.map_err(|e| anyhow::anyhow!("Failed to read file '{}': {}", path, e))?
|
|
} else {
|
|
raw_val.to_string()
|
|
};
|
|
return Ok((parse_key_path(key)?, Value::String(value)));
|
|
}
|
|
|
|
if let Some((key, path)) = entry.split_once('@') {
|
|
let value = fs::read_to_string(path)
|
|
.map_err(|e| anyhow::anyhow!("Failed to read file '{}': {}", path, e))?;
|
|
return Ok((parse_key_path(key)?, Value::String(value)));
|
|
}
|
|
|
|
anyhow::bail!(
|
|
"Invalid format '{}'. Expected: key=value, key=@file, nested:key@file, or key:=<json>",
|
|
entry
|
|
)
|
|
}
|
|
|
|
pub fn build_json(entries: &[String]) -> Result<Value> {
|
|
let mut map = Map::new();
|
|
for entry in entries {
|
|
let (path, value) = parse_kv(entry)?;
|
|
insert_path(&mut map, &path, value)?;
|
|
}
|
|
Ok(Value::Object(map))
|
|
}
|
|
|
|
pub fn key_path_to_string(path: &[String]) -> String {
|
|
path.join(":")
|
|
}
|
|
|
|
pub fn collect_key_paths(entries: &[String]) -> Result<Vec<String>> {
|
|
entries
|
|
.iter()
|
|
.map(|entry| parse_kv(entry).map(|(path, _)| key_path_to_string(&path)))
|
|
.collect()
|
|
}
|
|
|
|
pub fn collect_field_paths(entries: &[String]) -> Result<Vec<String>> {
|
|
entries
|
|
.iter()
|
|
.map(|entry| parse_key_path(entry).map(|path| key_path_to_string(&path)))
|
|
.collect()
|
|
}
|
|
|
|
pub fn parse_key_path(key: &str) -> Result<Vec<String>> {
|
|
let path: Vec<String> = key
|
|
.split(':')
|
|
.map(str::trim)
|
|
.map(ToOwned::to_owned)
|
|
.collect();
|
|
|
|
if path.is_empty() || path.iter().any(|part| part.is_empty()) {
|
|
anyhow::bail!(
|
|
"Invalid key path '{}'. Use non-empty segments like 'credentials:content'.",
|
|
key
|
|
);
|
|
}
|
|
Ok(path)
|
|
}
|
|
|
|
pub fn insert_path(map: &mut Map<String, Value>, path: &[String], value: Value) -> Result<()> {
|
|
if path.is_empty() {
|
|
anyhow::bail!("Key path cannot be empty");
|
|
}
|
|
if path.len() == 1 {
|
|
map.insert(path[0].clone(), value);
|
|
return Ok(());
|
|
}
|
|
let head = path[0].clone();
|
|
let tail = &path[1..];
|
|
match map.entry(head.clone()) {
|
|
serde_json::map::Entry::Vacant(entry) => {
|
|
let mut child = Map::new();
|
|
insert_path(&mut child, tail, value)?;
|
|
entry.insert(Value::Object(child));
|
|
}
|
|
serde_json::map::Entry::Occupied(mut entry) => match entry.get_mut() {
|
|
Value::Object(child) => insert_path(child, tail, value)?,
|
|
_ => {
|
|
anyhow::bail!(
|
|
"Cannot set nested key '{}' because '{}' is already a non-object value",
|
|
key_path_to_string(path),
|
|
head
|
|
);
|
|
}
|
|
},
|
|
}
|
|
Ok(())
|
|
}
|
|
|
|
pub fn remove_path(map: &mut Map<String, Value>, path: &[String]) -> Result<bool> {
|
|
if path.is_empty() {
|
|
anyhow::bail!("Key path cannot be empty");
|
|
}
|
|
if path.len() == 1 {
|
|
return Ok(map.remove(&path[0]).is_some());
|
|
}
|
|
let Some(value) = map.get_mut(&path[0]) else {
|
|
return Ok(false);
|
|
};
|
|
let Value::Object(child) = value else {
|
|
return Ok(false);
|
|
};
|
|
let removed = remove_path(child, &path[1..])?;
|
|
if child.is_empty() {
|
|
map.remove(&path[0]);
|
|
}
|
|
Ok(removed)
|
|
}
|
|
|
|
pub fn flatten_json_fields(prefix: &str, value: &Value) -> Vec<(String, Value)> {
|
|
match value {
|
|
Value::Object(map) => {
|
|
let mut out = Vec::new();
|
|
for (k, v) in map {
|
|
let full_key = if prefix.is_empty() {
|
|
k.clone()
|
|
} else {
|
|
format!("{}.{}", prefix, k)
|
|
};
|
|
out.extend(flatten_json_fields(&full_key, v));
|
|
}
|
|
out
|
|
}
|
|
other => vec![(prefix.to_string(), other.clone())],
|
|
}
|
|
}
|
|
|
|
// ── AddResult ─────────────────────────────────────────────────────────────────
|
|
|
|
#[derive(Debug, serde::Serialize)]
|
|
pub struct AddResult {
|
|
pub name: String,
|
|
pub folder: String,
|
|
#[serde(rename = "type")]
|
|
pub entry_type: String,
|
|
pub tags: Vec<String>,
|
|
pub meta_keys: Vec<String>,
|
|
pub secret_keys: Vec<String>,
|
|
}
|
|
|
|
pub struct AddParams<'a> {
|
|
pub name: &'a str,
|
|
pub folder: &'a str,
|
|
pub entry_type: &'a str,
|
|
pub notes: &'a str,
|
|
pub tags: &'a [String],
|
|
pub meta_entries: &'a [String],
|
|
pub secret_entries: &'a [String],
|
|
pub secret_types: &'a std::collections::HashMap<String, String>,
|
|
pub link_secret_names: &'a [String],
|
|
/// Optional user_id for multi-user isolation (None = single-user CLI mode)
|
|
pub user_id: Option<Uuid>,
|
|
}
|
|
|
|
pub async fn run(pool: &PgPool, params: AddParams<'_>, master_key: &[u8; 32]) -> Result<AddResult> {
|
|
let Value::Object(mut metadata_map) = build_json(params.meta_entries)? else {
|
|
unreachable!("build_json always returns a JSON object");
|
|
};
|
|
let normalized_entry_type =
|
|
taxonomy::normalize_entry_type_and_metadata(params.entry_type, &mut metadata_map);
|
|
let metadata = Value::Object(metadata_map);
|
|
let secret_json = build_json(params.secret_entries)?;
|
|
let meta_keys = collect_key_paths(params.meta_entries)?;
|
|
let secret_keys = collect_key_paths(params.secret_entries)?;
|
|
let flat_fields = flatten_json_fields("", &secret_json);
|
|
let new_secret_names: BTreeSet<String> =
|
|
flat_fields.iter().map(|(name, _)| name.clone()).collect();
|
|
let link_secret_names =
|
|
validate_link_secret_names(params.link_secret_names, &new_secret_names)?;
|
|
|
|
let mut tx = pool.begin().await?;
|
|
|
|
// Fetch existing entry by (user_id, folder, name) — the natural unique key
|
|
let existing: Option<EntryRow> = if let Some(uid) = params.user_id {
|
|
sqlx::query_as(
|
|
"SELECT id, version, folder, type, tags, metadata, notes FROM entries \
|
|
WHERE user_id = $1 AND folder = $2 AND name = $3",
|
|
)
|
|
.bind(uid)
|
|
.bind(params.folder)
|
|
.bind(params.name)
|
|
.fetch_optional(&mut *tx)
|
|
.await?
|
|
} else {
|
|
sqlx::query_as(
|
|
"SELECT id, version, folder, type, tags, metadata, notes FROM entries \
|
|
WHERE user_id IS NULL AND folder = $1 AND name = $2",
|
|
)
|
|
.bind(params.folder)
|
|
.bind(params.name)
|
|
.fetch_optional(&mut *tx)
|
|
.await?
|
|
};
|
|
|
|
if let Some(ref ex) = existing
|
|
&& let Err(e) = db::snapshot_entry_history(
|
|
&mut tx,
|
|
db::EntrySnapshotParams {
|
|
entry_id: ex.id,
|
|
user_id: params.user_id,
|
|
folder: params.folder,
|
|
entry_type: &normalized_entry_type,
|
|
name: params.name,
|
|
version: ex.version,
|
|
action: "add",
|
|
tags: &ex.tags,
|
|
metadata: &ex.metadata,
|
|
},
|
|
)
|
|
.await
|
|
{
|
|
tracing::warn!(error = %e, "failed to snapshot entry history before upsert");
|
|
}
|
|
|
|
let entry_id: Uuid = if let Some(uid) = params.user_id {
|
|
sqlx::query_scalar(
|
|
r#"INSERT INTO entries (user_id, folder, type, name, notes, tags, metadata, version, updated_at)
|
|
VALUES ($1, $2, $3, $4, $5, $6, $7, 1, NOW())
|
|
ON CONFLICT (user_id, folder, name) WHERE user_id IS NOT NULL
|
|
DO UPDATE SET
|
|
folder = EXCLUDED.folder,
|
|
type = EXCLUDED.type,
|
|
notes = EXCLUDED.notes,
|
|
tags = EXCLUDED.tags,
|
|
metadata = EXCLUDED.metadata,
|
|
version = entries.version + 1,
|
|
updated_at = NOW()
|
|
RETURNING id"#,
|
|
)
|
|
.bind(uid)
|
|
.bind(params.folder)
|
|
.bind(&normalized_entry_type)
|
|
.bind(params.name)
|
|
.bind(params.notes)
|
|
.bind(params.tags)
|
|
.bind(&metadata)
|
|
.fetch_one(&mut *tx)
|
|
.await?
|
|
} else {
|
|
sqlx::query_scalar(
|
|
r#"INSERT INTO entries (folder, type, name, notes, tags, metadata, version, updated_at)
|
|
VALUES ($1, $2, $3, $4, $5, $6, 1, NOW())
|
|
ON CONFLICT (folder, name) WHERE user_id IS NULL
|
|
DO UPDATE SET
|
|
folder = EXCLUDED.folder,
|
|
type = EXCLUDED.type,
|
|
notes = EXCLUDED.notes,
|
|
tags = EXCLUDED.tags,
|
|
metadata = EXCLUDED.metadata,
|
|
version = entries.version + 1,
|
|
updated_at = NOW()
|
|
RETURNING id"#,
|
|
)
|
|
.bind(params.folder)
|
|
.bind(&normalized_entry_type)
|
|
.bind(params.name)
|
|
.bind(params.notes)
|
|
.bind(params.tags)
|
|
.bind(&metadata)
|
|
.fetch_one(&mut *tx)
|
|
.await?
|
|
};
|
|
|
|
let current_entry_version: i64 =
|
|
sqlx::query_scalar("SELECT version FROM entries WHERE id = $1")
|
|
.bind(entry_id)
|
|
.fetch_one(&mut *tx)
|
|
.await?;
|
|
|
|
if existing.is_none()
|
|
&& let Err(e) = db::snapshot_entry_history(
|
|
&mut tx,
|
|
db::EntrySnapshotParams {
|
|
entry_id,
|
|
user_id: params.user_id,
|
|
folder: params.folder,
|
|
entry_type: &normalized_entry_type,
|
|
name: params.name,
|
|
version: current_entry_version,
|
|
action: "create",
|
|
tags: params.tags,
|
|
metadata: &metadata,
|
|
},
|
|
)
|
|
.await
|
|
{
|
|
tracing::warn!(error = %e, "failed to snapshot entry history on create");
|
|
}
|
|
|
|
if existing.is_some() {
|
|
#[derive(sqlx::FromRow)]
|
|
struct ExistingField {
|
|
id: Uuid,
|
|
name: String,
|
|
encrypted: Vec<u8>,
|
|
}
|
|
let existing_fields: Vec<ExistingField> = sqlx::query_as(
|
|
"SELECT s.id, s.name, s.encrypted \
|
|
FROM entry_secrets es \
|
|
JOIN secrets s ON s.id = es.secret_id \
|
|
WHERE es.entry_id = $1",
|
|
)
|
|
.bind(entry_id)
|
|
.fetch_all(&mut *tx)
|
|
.await?;
|
|
|
|
for f in &existing_fields {
|
|
if let Err(e) = db::snapshot_secret_history(
|
|
&mut tx,
|
|
db::SecretSnapshotParams {
|
|
secret_id: f.id,
|
|
name: &f.name,
|
|
encrypted: &f.encrypted,
|
|
action: "add",
|
|
},
|
|
)
|
|
.await
|
|
{
|
|
tracing::warn!(error = %e, "failed to snapshot secret field history");
|
|
}
|
|
}
|
|
|
|
let orphan_candidates: Vec<Uuid> = existing_fields.iter().map(|f| f.id).collect();
|
|
|
|
sqlx::query("DELETE FROM entry_secrets WHERE entry_id = $1")
|
|
.bind(entry_id)
|
|
.execute(&mut *tx)
|
|
.await?;
|
|
|
|
if !orphan_candidates.is_empty() {
|
|
sqlx::query(
|
|
"DELETE FROM secrets s \
|
|
WHERE s.id = ANY($1) \
|
|
AND NOT EXISTS (SELECT 1 FROM entry_secrets es WHERE es.secret_id = s.id)",
|
|
)
|
|
.bind(&orphan_candidates)
|
|
.execute(&mut *tx)
|
|
.await?;
|
|
}
|
|
}
|
|
|
|
for (field_name, field_value) in &flat_fields {
|
|
let encrypted = crypto::encrypt_json(master_key, field_value)?;
|
|
let secret_type = params
|
|
.secret_types
|
|
.get(field_name)
|
|
.map(|s| s.as_str())
|
|
.unwrap_or("text");
|
|
let secret_id: Uuid = sqlx::query_scalar(
|
|
"INSERT INTO secrets (user_id, name, type, encrypted) VALUES ($1, $2, $3, $4) RETURNING id",
|
|
)
|
|
.bind(params.user_id)
|
|
.bind(field_name)
|
|
.bind(secret_type)
|
|
.bind(&encrypted)
|
|
.fetch_one(&mut *tx)
|
|
.await
|
|
.map_err(|e| AppError::from_db_error(e, DbErrorContext::secret_name(field_name)))?;
|
|
sqlx::query("INSERT INTO entry_secrets (entry_id, secret_id) VALUES ($1, $2)")
|
|
.bind(entry_id)
|
|
.bind(secret_id)
|
|
.execute(&mut *tx)
|
|
.await?;
|
|
}
|
|
|
|
for link_name in &link_secret_names {
|
|
let secret_ids: Vec<Uuid> = if let Some(uid) = params.user_id {
|
|
sqlx::query_scalar("SELECT id FROM secrets WHERE user_id = $1 AND name = $2")
|
|
.bind(uid)
|
|
.bind(link_name)
|
|
.fetch_all(&mut *tx)
|
|
.await?
|
|
} else {
|
|
sqlx::query_scalar("SELECT id FROM secrets WHERE user_id IS NULL AND name = $1")
|
|
.bind(link_name)
|
|
.fetch_all(&mut *tx)
|
|
.await?
|
|
};
|
|
|
|
match secret_ids.len() {
|
|
0 => anyhow::bail!("Not found: secret named '{}'", link_name),
|
|
1 => {
|
|
sqlx::query(
|
|
"INSERT INTO entry_secrets (entry_id, secret_id) VALUES ($1, $2) ON CONFLICT DO NOTHING",
|
|
)
|
|
.bind(entry_id)
|
|
.bind(secret_ids[0])
|
|
.execute(&mut *tx)
|
|
.await?;
|
|
}
|
|
n => anyhow::bail!(
|
|
"Ambiguous: {} secrets named '{}' found. Please deduplicate names first.",
|
|
n,
|
|
link_name
|
|
),
|
|
}
|
|
}
|
|
|
|
crate::audit::log_tx(
|
|
&mut tx,
|
|
params.user_id,
|
|
"add",
|
|
params.folder,
|
|
&normalized_entry_type,
|
|
params.name,
|
|
serde_json::json!({
|
|
"tags": params.tags,
|
|
"meta_keys": meta_keys,
|
|
"secret_keys": secret_keys,
|
|
}),
|
|
)
|
|
.await;
|
|
|
|
tx.commit().await?;
|
|
|
|
Ok(AddResult {
|
|
name: params.name.to_string(),
|
|
folder: params.folder.to_string(),
|
|
entry_type: normalized_entry_type,
|
|
tags: params.tags.to_vec(),
|
|
meta_keys,
|
|
secret_keys,
|
|
})
|
|
}
|
|
|
|
fn validate_link_secret_names(
|
|
link_secret_names: &[String],
|
|
new_secret_names: &BTreeSet<String>,
|
|
) -> Result<Vec<String>> {
|
|
let mut deduped = Vec::new();
|
|
let mut seen = HashSet::new();
|
|
|
|
for raw in link_secret_names {
|
|
let trimmed = raw.trim();
|
|
if trimmed.is_empty() {
|
|
anyhow::bail!("link_secret_names contains an empty name");
|
|
}
|
|
if new_secret_names.contains(trimmed) {
|
|
anyhow::bail!(
|
|
"Conflict: secret '{}' is provided both in secrets/secrets_obj and link_secret_names",
|
|
trimmed
|
|
);
|
|
}
|
|
if seen.insert(trimmed.to_string()) {
|
|
deduped.push(trimmed.to_string());
|
|
}
|
|
}
|
|
|
|
Ok(deduped)
|
|
}
|
|
|
|
#[cfg(test)]
|
|
mod tests {
|
|
use super::*;
|
|
use sqlx::PgPool;
|
|
use std::collections::BTreeSet;
|
|
|
|
#[test]
|
|
fn parse_nested_file_shorthand() {
|
|
use std::io::Write;
|
|
let mut f = tempfile::NamedTempFile::new().unwrap();
|
|
writeln!(f, "line1\nline2").unwrap();
|
|
let path = f.path().to_str().unwrap().to_string();
|
|
let entry = format!("credentials:content@{}", path);
|
|
let (path_parts, value) = parse_kv(&entry).unwrap();
|
|
assert_eq!(key_path_to_string(&path_parts), "credentials:content");
|
|
assert!(matches!(value, Value::String(_)));
|
|
}
|
|
|
|
#[test]
|
|
fn flatten_json_fields_nested() {
|
|
let v = serde_json::json!({
|
|
"username": "root",
|
|
"credentials": {
|
|
"type": "ssh",
|
|
"content": "pem"
|
|
}
|
|
});
|
|
let mut fields = flatten_json_fields("", &v);
|
|
fields.sort_by(|a, b| a.0.cmp(&b.0));
|
|
assert_eq!(fields[0].0, "credentials.content");
|
|
assert_eq!(fields[1].0, "credentials.type");
|
|
assert_eq!(fields[2].0, "username");
|
|
}
|
|
|
|
#[test]
|
|
fn validate_link_secret_names_conflict_with_new_secret() {
|
|
let mut new_names = BTreeSet::new();
|
|
new_names.insert("password".to_string());
|
|
let err = validate_link_secret_names(&[String::from("password")], &new_names)
|
|
.expect_err("must fail on overlap");
|
|
assert!(
|
|
err.to_string()
|
|
.contains("provided both in secrets/secrets_obj and link_secret_names")
|
|
);
|
|
}
|
|
|
|
#[test]
|
|
fn validate_link_secret_names_dedup_and_trim() {
|
|
let names = vec![
|
|
" shared_key ".to_string(),
|
|
"shared_key".to_string(),
|
|
"runner_token".to_string(),
|
|
];
|
|
let deduped = validate_link_secret_names(&names, &BTreeSet::new()).unwrap();
|
|
assert_eq!(deduped, vec!["shared_key", "runner_token"]);
|
|
}
|
|
|
|
async fn maybe_test_pool() -> Option<PgPool> {
|
|
let Ok(url) = std::env::var("SECRETS_DATABASE_URL") else {
|
|
eprintln!("skip add linkage tests: SECRETS_DATABASE_URL is not set");
|
|
return None;
|
|
};
|
|
let Ok(pool) = PgPool::connect(&url).await else {
|
|
eprintln!("skip add linkage tests: cannot connect to database");
|
|
return None;
|
|
};
|
|
if let Err(e) = crate::db::migrate(&pool).await {
|
|
eprintln!("skip add linkage tests: migrate failed: {e}");
|
|
return None;
|
|
}
|
|
Some(pool)
|
|
}
|
|
|
|
async fn cleanup_test_rows(pool: &PgPool, marker: &str) -> Result<()> {
|
|
sqlx::query(
|
|
"DELETE FROM entries WHERE user_id IS NULL AND (name LIKE $1 OR folder LIKE $1)",
|
|
)
|
|
.bind(format!("%{marker}%"))
|
|
.execute(pool)
|
|
.await?;
|
|
sqlx::query(
|
|
"DELETE FROM secrets WHERE user_id IS NULL AND name LIKE $1 \
|
|
AND NOT EXISTS (SELECT 1 FROM entry_secrets es WHERE es.secret_id = secrets.id)",
|
|
)
|
|
.bind(format!("%{marker}%"))
|
|
.execute(pool)
|
|
.await?;
|
|
Ok(())
|
|
}
|
|
|
|
#[tokio::test]
|
|
async fn add_links_existing_secret_by_unique_name() -> Result<()> {
|
|
let Some(pool) = maybe_test_pool().await else {
|
|
return Ok(());
|
|
};
|
|
let suffix = Uuid::from_u128(rand::random()).to_string();
|
|
let marker = format!("link_unique_{}", &suffix[..8]);
|
|
let secret_name = format!("{}_secret", marker);
|
|
let entry_name = format!("{}_entry", marker);
|
|
|
|
cleanup_test_rows(&pool, &marker).await?;
|
|
|
|
let secret_id: Uuid = sqlx::query_scalar(
|
|
"INSERT INTO secrets (user_id, name, type, encrypted) VALUES (NULL, $1, 'text', $2) RETURNING id",
|
|
)
|
|
.bind(&secret_name)
|
|
.bind(vec![1_u8, 2, 3])
|
|
.fetch_one(&pool)
|
|
.await?;
|
|
|
|
run(
|
|
&pool,
|
|
AddParams {
|
|
name: &entry_name,
|
|
folder: &marker,
|
|
entry_type: "service",
|
|
notes: "",
|
|
tags: &[],
|
|
meta_entries: &[],
|
|
secret_entries: &[],
|
|
secret_types: &Default::default(),
|
|
link_secret_names: std::slice::from_ref(&secret_name),
|
|
user_id: None,
|
|
},
|
|
&[0_u8; 32],
|
|
)
|
|
.await?;
|
|
|
|
let linked: bool = sqlx::query_scalar(
|
|
"SELECT EXISTS( \
|
|
SELECT 1 FROM entry_secrets es \
|
|
JOIN entries e ON e.id = es.entry_id \
|
|
WHERE e.user_id IS NULL AND e.name = $1 AND es.secret_id = $2 \
|
|
)",
|
|
)
|
|
.bind(&entry_name)
|
|
.bind(secret_id)
|
|
.fetch_one(&pool)
|
|
.await?;
|
|
assert!(linked);
|
|
|
|
cleanup_test_rows(&pool, &marker).await?;
|
|
Ok(())
|
|
}
|
|
|
|
#[tokio::test]
|
|
async fn add_link_secret_name_not_found_fails() -> Result<()> {
|
|
let Some(pool) = maybe_test_pool().await else {
|
|
return Ok(());
|
|
};
|
|
let suffix = Uuid::from_u128(rand::random()).to_string();
|
|
let marker = format!("link_missing_{}", &suffix[..8]);
|
|
let secret_name = format!("{}_secret", marker);
|
|
let entry_name = format!("{}_entry", marker);
|
|
|
|
cleanup_test_rows(&pool, &marker).await?;
|
|
|
|
let err = run(
|
|
&pool,
|
|
AddParams {
|
|
name: &entry_name,
|
|
folder: &marker,
|
|
entry_type: "service",
|
|
notes: "",
|
|
tags: &[],
|
|
meta_entries: &[],
|
|
secret_entries: &[],
|
|
secret_types: &Default::default(),
|
|
link_secret_names: std::slice::from_ref(&secret_name),
|
|
user_id: None,
|
|
},
|
|
&[0_u8; 32],
|
|
)
|
|
.await
|
|
.expect_err("must fail when linked secret is not found");
|
|
assert!(err.to_string().contains("Not found: secret named"));
|
|
|
|
cleanup_test_rows(&pool, &marker).await?;
|
|
Ok(())
|
|
}
|
|
|
|
#[tokio::test]
|
|
async fn add_link_secret_name_ambiguous_fails() -> Result<()> {
|
|
let Some(pool) = maybe_test_pool().await else {
|
|
return Ok(());
|
|
};
|
|
let suffix = Uuid::from_u128(rand::random()).to_string();
|
|
let marker = format!("link_amb_{}", &suffix[..8]);
|
|
let secret_name = format!("{}_dup_secret", marker);
|
|
let entry_name = format!("{}_entry", marker);
|
|
|
|
cleanup_test_rows(&pool, &marker).await?;
|
|
|
|
sqlx::query(
|
|
"INSERT INTO secrets (user_id, name, type, encrypted) VALUES (NULL, $1, 'text', $2)",
|
|
)
|
|
.bind(&secret_name)
|
|
.bind(vec![1_u8])
|
|
.execute(&pool)
|
|
.await?;
|
|
sqlx::query(
|
|
"INSERT INTO secrets (user_id, name, type, encrypted) VALUES (NULL, $1, 'text', $2)",
|
|
)
|
|
.bind(&secret_name)
|
|
.bind(vec![2_u8])
|
|
.execute(&pool)
|
|
.await?;
|
|
|
|
let err = run(
|
|
&pool,
|
|
AddParams {
|
|
name: &entry_name,
|
|
folder: &marker,
|
|
entry_type: "service",
|
|
notes: "",
|
|
tags: &[],
|
|
meta_entries: &[],
|
|
secret_entries: &[],
|
|
secret_types: &Default::default(),
|
|
link_secret_names: std::slice::from_ref(&secret_name),
|
|
user_id: None,
|
|
},
|
|
&[0_u8; 32],
|
|
)
|
|
.await
|
|
.expect_err("must fail on ambiguous linked secret name");
|
|
assert!(err.to_string().contains("Ambiguous:"));
|
|
|
|
cleanup_test_rows(&pool, &marker).await?;
|
|
Ok(())
|
|
}
|
|
|
|
#[tokio::test]
|
|
async fn add_duplicate_secret_name_returns_conflict_error() -> Result<()> {
|
|
let Some(pool) = maybe_test_pool().await else {
|
|
return Ok(());
|
|
};
|
|
let suffix = Uuid::from_u128(rand::random()).to_string();
|
|
let marker = format!("dup_secret_{}", &suffix[..8]);
|
|
let entry_name = format!("{}_entry", marker);
|
|
let secret_name = "shared_token";
|
|
|
|
cleanup_test_rows(&pool, &marker).await?;
|
|
|
|
// First add succeeds
|
|
run(
|
|
&pool,
|
|
AddParams {
|
|
name: &entry_name,
|
|
folder: &marker,
|
|
entry_type: "service",
|
|
notes: "",
|
|
tags: &[],
|
|
meta_entries: &[],
|
|
secret_entries: &[format!("{}=value1", secret_name)],
|
|
secret_types: &Default::default(),
|
|
link_secret_names: &[],
|
|
user_id: None,
|
|
},
|
|
&[0_u8; 32],
|
|
)
|
|
.await?;
|
|
|
|
// Second add with same secret name under same user_id should fail with ConflictSecretName
|
|
let entry_name2 = format!("{}_entry2", marker);
|
|
let err = run(
|
|
&pool,
|
|
AddParams {
|
|
name: &entry_name2,
|
|
folder: &marker,
|
|
entry_type: "service",
|
|
notes: "",
|
|
tags: &[],
|
|
meta_entries: &[],
|
|
secret_entries: &[format!("{}=value2", secret_name)],
|
|
secret_types: &Default::default(),
|
|
link_secret_names: &[],
|
|
user_id: None,
|
|
},
|
|
&[0_u8; 32],
|
|
)
|
|
.await
|
|
.expect_err("must fail on duplicate secret name");
|
|
|
|
let app_err = err
|
|
.downcast_ref::<crate::error::AppError>()
|
|
.expect("error should be AppError");
|
|
assert!(
|
|
matches!(app_err, crate::error::AppError::ConflictSecretName { .. }),
|
|
"expected ConflictSecretName, got: {}",
|
|
app_err
|
|
);
|
|
|
|
cleanup_test_rows(&pool, &marker).await?;
|
|
Ok(())
|
|
}
|
|
}
|