Bump version for the N:N entry_secrets data model and related MCP/Web changes. Remove superseded SQL migration artifacts; rely on auto-migrate. Add structured errors, taxonomy normalization, and web i18n helpers. Made-with: Cursor
459 lines
14 KiB
Rust
459 lines
14 KiB
Rust
use anyhow::Result;
|
|
use serde_json::{Map, Value};
|
|
use sqlx::PgPool;
|
|
use uuid::Uuid;
|
|
|
|
use crate::crypto;
|
|
use crate::db;
|
|
use crate::error::{AppError, DbErrorContext};
|
|
use crate::models::{EntryRow, EntryWriteRow};
|
|
use crate::service::add::{
|
|
collect_field_paths, collect_key_paths, flatten_json_fields, insert_path, parse_key_path,
|
|
parse_kv, remove_path,
|
|
};
|
|
use crate::taxonomy;
|
|
|
|
#[derive(Debug, serde::Serialize)]
|
|
pub struct UpdateResult {
|
|
pub name: String,
|
|
pub folder: String,
|
|
#[serde(rename = "type")]
|
|
pub entry_type: String,
|
|
pub add_tags: Vec<String>,
|
|
pub remove_tags: Vec<String>,
|
|
pub meta_keys: Vec<String>,
|
|
pub remove_meta: Vec<String>,
|
|
pub secret_keys: Vec<String>,
|
|
pub remove_secrets: Vec<String>,
|
|
}
|
|
|
|
pub struct UpdateParams<'a> {
|
|
pub name: &'a str,
|
|
/// Optional folder for disambiguation when multiple entries share the same name.
|
|
pub folder: Option<&'a str>,
|
|
pub notes: Option<&'a str>,
|
|
pub add_tags: &'a [String],
|
|
pub remove_tags: &'a [String],
|
|
pub meta_entries: &'a [String],
|
|
pub remove_meta: &'a [String],
|
|
pub secret_entries: &'a [String],
|
|
pub secret_types: &'a std::collections::HashMap<String, String>,
|
|
pub remove_secrets: &'a [String],
|
|
pub user_id: Option<Uuid>,
|
|
}
|
|
|
|
pub async fn run(
|
|
pool: &PgPool,
|
|
params: UpdateParams<'_>,
|
|
master_key: &[u8; 32],
|
|
) -> Result<UpdateResult> {
|
|
let mut tx = pool.begin().await?;
|
|
|
|
// Fetch matching rows with FOR UPDATE; use folder when provided to resolve ambiguity.
|
|
let rows: Vec<EntryRow> = if let Some(uid) = params.user_id {
|
|
if let Some(folder) = params.folder {
|
|
sqlx::query_as(
|
|
"SELECT id, version, folder, type, tags, metadata, notes FROM entries \
|
|
WHERE user_id = $1 AND folder = $2 AND name = $3 FOR UPDATE",
|
|
)
|
|
.bind(uid)
|
|
.bind(folder)
|
|
.bind(params.name)
|
|
.fetch_all(&mut *tx)
|
|
.await?
|
|
} else {
|
|
sqlx::query_as(
|
|
"SELECT id, version, folder, type, tags, metadata, notes FROM entries \
|
|
WHERE user_id = $1 AND name = $2 FOR UPDATE",
|
|
)
|
|
.bind(uid)
|
|
.bind(params.name)
|
|
.fetch_all(&mut *tx)
|
|
.await?
|
|
}
|
|
} else if let Some(folder) = params.folder {
|
|
sqlx::query_as(
|
|
"SELECT id, version, folder, type, tags, metadata, notes FROM entries \
|
|
WHERE user_id IS NULL AND folder = $1 AND name = $2 FOR UPDATE",
|
|
)
|
|
.bind(folder)
|
|
.bind(params.name)
|
|
.fetch_all(&mut *tx)
|
|
.await?
|
|
} else {
|
|
sqlx::query_as(
|
|
"SELECT id, version, folder, type, tags, metadata, notes FROM entries \
|
|
WHERE user_id IS NULL AND name = $1 FOR UPDATE",
|
|
)
|
|
.bind(params.name)
|
|
.fetch_all(&mut *tx)
|
|
.await?
|
|
};
|
|
|
|
let row = match rows.len() {
|
|
0 => {
|
|
tx.rollback().await?;
|
|
return Err(AppError::NotFoundEntry.into());
|
|
}
|
|
1 => rows.into_iter().next().unwrap(),
|
|
_ => {
|
|
tx.rollback().await?;
|
|
let folders: Vec<&str> = rows.iter().map(|r| r.folder.as_str()).collect();
|
|
anyhow::bail!(
|
|
"Ambiguous: {} entries named '{}' found in folders: [{}]. \
|
|
Specify 'folder' to disambiguate.",
|
|
rows.len(),
|
|
params.name,
|
|
folders.join(", ")
|
|
)
|
|
}
|
|
};
|
|
|
|
if let Err(e) = db::snapshot_entry_history(
|
|
&mut tx,
|
|
db::EntrySnapshotParams {
|
|
entry_id: row.id,
|
|
user_id: params.user_id,
|
|
folder: &row.folder,
|
|
entry_type: &row.entry_type,
|
|
name: params.name,
|
|
version: row.version,
|
|
action: "update",
|
|
tags: &row.tags,
|
|
metadata: &row.metadata,
|
|
},
|
|
)
|
|
.await
|
|
{
|
|
tracing::warn!(error = %e, "failed to snapshot entry history before update");
|
|
}
|
|
|
|
let mut tags: Vec<String> = row.tags.clone();
|
|
for t in params.add_tags {
|
|
if !tags.contains(t) {
|
|
tags.push(t.clone());
|
|
}
|
|
}
|
|
tags.retain(|t| !params.remove_tags.contains(t));
|
|
|
|
let mut meta_map: Map<String, Value> = match row.metadata.clone() {
|
|
Value::Object(m) => m,
|
|
_ => Map::new(),
|
|
};
|
|
for entry in params.meta_entries {
|
|
let (path, value) = parse_kv(entry)?;
|
|
insert_path(&mut meta_map, &path, value)?;
|
|
}
|
|
for key in params.remove_meta {
|
|
let path = parse_key_path(key)?;
|
|
remove_path(&mut meta_map, &path)?;
|
|
}
|
|
let metadata = Value::Object(meta_map);
|
|
|
|
let new_notes = params.notes.unwrap_or(&row.notes);
|
|
|
|
let result = sqlx::query(
|
|
"UPDATE entries SET tags = $1, metadata = $2, notes = $3, \
|
|
version = version + 1, updated_at = NOW() \
|
|
WHERE id = $4 AND version = $5",
|
|
)
|
|
.bind(&tags)
|
|
.bind(&metadata)
|
|
.bind(new_notes)
|
|
.bind(row.id)
|
|
.bind(row.version)
|
|
.execute(&mut *tx)
|
|
.await?;
|
|
|
|
if result.rows_affected() == 0 {
|
|
tx.rollback().await?;
|
|
return Err(AppError::ConcurrentModification.into());
|
|
}
|
|
|
|
for entry in params.secret_entries {
|
|
let (path, field_value) = parse_kv(entry)?;
|
|
let flat = flatten_json_fields("", &{
|
|
let mut m = Map::new();
|
|
insert_path(&mut m, &path, field_value)?;
|
|
Value::Object(m)
|
|
});
|
|
|
|
for (field_name, fv) in &flat {
|
|
let encrypted = crypto::encrypt_json(master_key, fv)?;
|
|
|
|
#[derive(sqlx::FromRow)]
|
|
struct ExistingField {
|
|
id: Uuid,
|
|
encrypted: Vec<u8>,
|
|
}
|
|
let ef: Option<ExistingField> = sqlx::query_as(
|
|
"SELECT s.id, s.encrypted \
|
|
FROM entry_secrets es \
|
|
JOIN secrets s ON s.id = es.secret_id \
|
|
WHERE es.entry_id = $1 AND s.name = $2",
|
|
)
|
|
.bind(row.id)
|
|
.bind(field_name)
|
|
.fetch_optional(&mut *tx)
|
|
.await?;
|
|
|
|
if let Some(ef) = &ef
|
|
&& let Err(e) = db::snapshot_secret_history(
|
|
&mut tx,
|
|
db::SecretSnapshotParams {
|
|
secret_id: ef.id,
|
|
name: field_name,
|
|
encrypted: &ef.encrypted,
|
|
action: "update",
|
|
},
|
|
)
|
|
.await
|
|
{
|
|
tracing::warn!(error = %e, "failed to snapshot secret field history");
|
|
}
|
|
|
|
if let Some(ef) = ef {
|
|
sqlx::query(
|
|
"UPDATE secrets SET encrypted = $1, version = version + 1, updated_at = NOW() WHERE id = $2",
|
|
)
|
|
.bind(&encrypted)
|
|
.bind(ef.id)
|
|
.execute(&mut *tx)
|
|
.await?;
|
|
} else {
|
|
let secret_type = params
|
|
.secret_types
|
|
.get(field_name)
|
|
.map(|s| s.as_str())
|
|
.unwrap_or("text");
|
|
let secret_id: Uuid = sqlx::query_scalar(
|
|
"INSERT INTO secrets (user_id, name, type, encrypted) VALUES ($1, $2, $3, $4) RETURNING id",
|
|
)
|
|
.bind(params.user_id)
|
|
.bind(field_name.to_string())
|
|
.bind(secret_type)
|
|
.bind(&encrypted)
|
|
.fetch_one(&mut *tx)
|
|
.await
|
|
.map_err(|e| AppError::from_db_error(e, DbErrorContext::secret_name(field_name)))?;
|
|
sqlx::query("INSERT INTO entry_secrets (entry_id, secret_id) VALUES ($1, $2)")
|
|
.bind(row.id)
|
|
.bind(secret_id)
|
|
.execute(&mut *tx)
|
|
.await?;
|
|
}
|
|
}
|
|
}
|
|
|
|
for key in params.remove_secrets {
|
|
let path = parse_key_path(key)?;
|
|
let field_name = path.join(".");
|
|
|
|
#[derive(sqlx::FromRow)]
|
|
struct FieldToDelete {
|
|
id: Uuid,
|
|
encrypted: Vec<u8>,
|
|
}
|
|
let field: Option<FieldToDelete> = sqlx::query_as(
|
|
"SELECT s.id, s.encrypted \
|
|
FROM entry_secrets es \
|
|
JOIN secrets s ON s.id = es.secret_id \
|
|
WHERE es.entry_id = $1 AND s.name = $2",
|
|
)
|
|
.bind(row.id)
|
|
.bind(&field_name)
|
|
.fetch_optional(&mut *tx)
|
|
.await?;
|
|
|
|
if let Some(f) = field {
|
|
if let Err(e) = db::snapshot_secret_history(
|
|
&mut tx,
|
|
db::SecretSnapshotParams {
|
|
secret_id: f.id,
|
|
name: &field_name,
|
|
encrypted: &f.encrypted,
|
|
action: "delete",
|
|
},
|
|
)
|
|
.await
|
|
{
|
|
tracing::warn!(error = %e, "failed to snapshot secret field history before delete");
|
|
}
|
|
sqlx::query("DELETE FROM entry_secrets WHERE entry_id = $1 AND secret_id = $2")
|
|
.bind(row.id)
|
|
.bind(f.id)
|
|
.execute(&mut *tx)
|
|
.await?;
|
|
sqlx::query(
|
|
"DELETE FROM secrets s \
|
|
WHERE s.id = $1 \
|
|
AND NOT EXISTS (SELECT 1 FROM entry_secrets es WHERE es.secret_id = s.id)",
|
|
)
|
|
.bind(f.id)
|
|
.execute(&mut *tx)
|
|
.await?;
|
|
}
|
|
}
|
|
|
|
let meta_keys = collect_key_paths(params.meta_entries)?;
|
|
let remove_meta_keys = collect_field_paths(params.remove_meta)?;
|
|
let secret_keys = collect_key_paths(params.secret_entries)?;
|
|
let remove_secret_keys = collect_field_paths(params.remove_secrets)?;
|
|
|
|
crate::audit::log_tx(
|
|
&mut tx,
|
|
params.user_id,
|
|
"update",
|
|
"",
|
|
"",
|
|
params.name,
|
|
serde_json::json!({
|
|
"add_tags": params.add_tags,
|
|
"remove_tags": params.remove_tags,
|
|
"meta_keys": meta_keys,
|
|
"remove_meta": remove_meta_keys,
|
|
"secret_keys": secret_keys,
|
|
"remove_secrets": remove_secret_keys,
|
|
}),
|
|
)
|
|
.await;
|
|
|
|
tx.commit().await?;
|
|
|
|
Ok(UpdateResult {
|
|
name: params.name.to_string(),
|
|
folder: row.folder.clone(),
|
|
entry_type: row.entry_type.clone(),
|
|
add_tags: params.add_tags.to_vec(),
|
|
remove_tags: params.remove_tags.to_vec(),
|
|
meta_keys,
|
|
remove_meta: remove_meta_keys,
|
|
secret_keys,
|
|
remove_secrets: remove_secret_keys,
|
|
})
|
|
}
|
|
|
|
/// Update non-sensitive entry columns by primary key (multi-tenant: `user_id` must match).
|
|
/// Does not read or modify `secrets` rows.
|
|
pub struct UpdateEntryFieldsByIdParams<'a> {
|
|
pub folder: &'a str,
|
|
pub entry_type: &'a str,
|
|
pub name: &'a str,
|
|
pub notes: &'a str,
|
|
pub tags: &'a [String],
|
|
pub metadata: &'a serde_json::Value,
|
|
}
|
|
|
|
pub async fn update_fields_by_id(
|
|
pool: &PgPool,
|
|
entry_id: Uuid,
|
|
user_id: Uuid,
|
|
params: UpdateEntryFieldsByIdParams<'_>,
|
|
) -> Result<()> {
|
|
if params.folder.chars().count() > 128 {
|
|
anyhow::bail!("folder must be at most 128 characters");
|
|
}
|
|
if params.entry_type.chars().count() > 64 {
|
|
anyhow::bail!("type must be at most 64 characters");
|
|
}
|
|
if params.name.chars().count() > 256 {
|
|
anyhow::bail!("name must be at most 256 characters");
|
|
}
|
|
|
|
let mut tx = pool.begin().await?;
|
|
|
|
let row: Option<EntryWriteRow> = sqlx::query_as(
|
|
"SELECT id, version, folder, type, name, tags, metadata, notes FROM entries \
|
|
WHERE id = $1 AND user_id = $2 FOR UPDATE",
|
|
)
|
|
.bind(entry_id)
|
|
.bind(user_id)
|
|
.fetch_optional(&mut *tx)
|
|
.await?;
|
|
|
|
let row = match row {
|
|
Some(r) => r,
|
|
None => {
|
|
tx.rollback().await?;
|
|
return Err(AppError::NotFoundEntry.into());
|
|
}
|
|
};
|
|
|
|
if let Err(e) = db::snapshot_entry_history(
|
|
&mut tx,
|
|
db::EntrySnapshotParams {
|
|
entry_id: row.id,
|
|
user_id: Some(user_id),
|
|
folder: &row.folder,
|
|
entry_type: &row.entry_type,
|
|
name: &row.name,
|
|
version: row.version,
|
|
action: "update",
|
|
tags: &row.tags,
|
|
metadata: &row.metadata,
|
|
},
|
|
)
|
|
.await
|
|
{
|
|
tracing::warn!(error = %e, "failed to snapshot entry history before web update");
|
|
}
|
|
|
|
let mut metadata_map = match params.metadata {
|
|
Value::Object(m) => m.clone(),
|
|
_ => Map::new(),
|
|
};
|
|
let normalized_type =
|
|
taxonomy::normalize_entry_type_and_metadata(params.entry_type, &mut metadata_map);
|
|
let normalized_metadata = Value::Object(metadata_map);
|
|
|
|
let res = sqlx::query(
|
|
"UPDATE entries SET folder = $1, type = $2, name = $3, notes = $4, tags = $5, metadata = $6, \
|
|
version = version + 1, updated_at = NOW() \
|
|
WHERE id = $7 AND version = $8",
|
|
)
|
|
.bind(params.folder)
|
|
.bind(&normalized_type)
|
|
.bind(params.name)
|
|
.bind(params.notes)
|
|
.bind(params.tags)
|
|
.bind(&normalized_metadata)
|
|
.bind(row.id)
|
|
.bind(row.version)
|
|
.execute(&mut *tx)
|
|
.await
|
|
.map_err(|e| {
|
|
if let sqlx::Error::Database(ref d) = e
|
|
&& d.code().as_deref() == Some("23505")
|
|
{
|
|
return AppError::ConflictEntryName {
|
|
folder: params.folder.to_string(),
|
|
name: params.name.to_string(),
|
|
};
|
|
}
|
|
AppError::Internal(e.into())
|
|
})?;
|
|
|
|
if res.rows_affected() == 0 {
|
|
tx.rollback().await?;
|
|
return Err(AppError::ConcurrentModification.into());
|
|
}
|
|
|
|
crate::audit::log_tx(
|
|
&mut tx,
|
|
Some(user_id),
|
|
"update",
|
|
params.folder,
|
|
&normalized_type,
|
|
params.name,
|
|
serde_json::json!({
|
|
"source": "web",
|
|
"entry_id": entry_id,
|
|
"fields": ["folder", "type", "name", "notes", "tags", "metadata"],
|
|
}),
|
|
)
|
|
.await;
|
|
|
|
tx.commit().await?;
|
|
Ok(())
|
|
}
|