824 lines
23 KiB
Rust
824 lines
23 KiB
Rust
use anyhow::Result;
|
|
use serde_json::json;
|
|
use sqlx::PgPool;
|
|
use uuid::Uuid;
|
|
|
|
use crate::db;
|
|
use crate::error::AppError;
|
|
use crate::models::{EntryRow, EntryWriteRow, SecretFieldRow};
|
|
use crate::service::util::user_scope_condition;
|
|
|
|
#[derive(Debug, serde::Serialize)]
|
|
pub struct DeletedEntry {
|
|
pub name: String,
|
|
pub folder: String,
|
|
#[serde(rename = "type")]
|
|
pub entry_type: String,
|
|
}
|
|
|
|
#[derive(Debug, serde::Serialize)]
|
|
pub struct DeleteResult {
|
|
pub deleted: Vec<DeletedEntry>,
|
|
pub dry_run: bool,
|
|
}
|
|
|
|
#[derive(Debug, serde::Serialize, sqlx::FromRow)]
|
|
pub struct TrashEntry {
|
|
pub id: Uuid,
|
|
pub name: String,
|
|
pub folder: String,
|
|
#[serde(rename = "type")]
|
|
#[sqlx(rename = "type")]
|
|
pub entry_type: String,
|
|
pub deleted_at: chrono::DateTime<chrono::Utc>,
|
|
}
|
|
|
|
pub struct DeleteParams<'a> {
|
|
/// If set, delete a single entry by name.
|
|
pub name: Option<&'a str>,
|
|
/// Folder filter for bulk delete.
|
|
pub folder: Option<&'a str>,
|
|
/// Type filter for bulk delete.
|
|
pub entry_type: Option<&'a str>,
|
|
pub dry_run: bool,
|
|
pub user_id: Option<Uuid>,
|
|
}
|
|
|
|
/// Maximum number of entries that can be deleted in a single bulk operation.
|
|
/// Prevents accidental mass deletion when filters are too broad.
|
|
pub const MAX_BULK_DELETE: usize = 1000;
|
|
|
|
pub async fn list_deleted_entries(
|
|
pool: &PgPool,
|
|
user_id: Uuid,
|
|
limit: u32,
|
|
offset: u32,
|
|
) -> Result<Vec<TrashEntry>> {
|
|
sqlx::query_as(
|
|
"SELECT id, name, folder, type, deleted_at FROM entries \
|
|
WHERE user_id = $1 AND deleted_at IS NOT NULL \
|
|
ORDER BY deleted_at DESC, name ASC LIMIT $2 OFFSET $3",
|
|
)
|
|
.bind(user_id)
|
|
.bind(limit as i64)
|
|
.bind(offset as i64)
|
|
.fetch_all(pool)
|
|
.await
|
|
.map_err(Into::into)
|
|
}
|
|
|
|
pub async fn count_deleted_entries(pool: &PgPool, user_id: Uuid) -> Result<i64> {
|
|
sqlx::query_scalar::<_, i64>(
|
|
"SELECT COUNT(*)::bigint FROM entries WHERE user_id = $1 AND deleted_at IS NOT NULL",
|
|
)
|
|
.bind(user_id)
|
|
.fetch_one(pool)
|
|
.await
|
|
.map_err(Into::into)
|
|
}
|
|
|
|
pub async fn restore_deleted_by_id(pool: &PgPool, entry_id: Uuid, user_id: Uuid) -> Result<()> {
|
|
let mut tx = pool.begin().await?;
|
|
let row: Option<EntryWriteRow> = sqlx::query_as(
|
|
"SELECT id, version, folder, type, name, tags, metadata, notes, deleted_at FROM entries \
|
|
WHERE id = $1 AND user_id = $2 AND deleted_at IS NOT NULL FOR UPDATE",
|
|
)
|
|
.bind(entry_id)
|
|
.bind(user_id)
|
|
.fetch_optional(&mut *tx)
|
|
.await?;
|
|
|
|
let row = match row {
|
|
Some(r) => r,
|
|
None => {
|
|
tx.rollback().await?;
|
|
return Err(AppError::NotFoundEntry.into());
|
|
}
|
|
};
|
|
|
|
let conflict_exists: bool = sqlx::query_scalar(
|
|
"SELECT EXISTS(SELECT 1 FROM entries \
|
|
WHERE user_id = $1 AND folder = $2 AND name = $3 AND deleted_at IS NULL AND id <> $4)",
|
|
)
|
|
.bind(user_id)
|
|
.bind(&row.folder)
|
|
.bind(&row.name)
|
|
.bind(row.id)
|
|
.fetch_one(&mut *tx)
|
|
.await?;
|
|
if conflict_exists {
|
|
tx.rollback().await?;
|
|
return Err(AppError::ConflictEntryName {
|
|
folder: row.folder,
|
|
name: row.name,
|
|
}
|
|
.into());
|
|
}
|
|
|
|
sqlx::query("UPDATE entries SET deleted_at = NULL, updated_at = NOW() WHERE id = $1")
|
|
.bind(row.id)
|
|
.execute(&mut *tx)
|
|
.await?;
|
|
|
|
crate::audit::log_tx(
|
|
&mut tx,
|
|
Some(user_id),
|
|
"restore",
|
|
&row.folder,
|
|
&row.entry_type,
|
|
&row.name,
|
|
json!({ "entry_id": row.id }),
|
|
)
|
|
.await;
|
|
tx.commit().await?;
|
|
Ok(())
|
|
}
|
|
|
|
pub async fn purge_deleted_by_id(pool: &PgPool, entry_id: Uuid, user_id: Uuid) -> Result<()> {
|
|
let mut tx = pool.begin().await?;
|
|
let row: Option<EntryWriteRow> = sqlx::query_as(
|
|
"SELECT id, version, folder, type, name, tags, metadata, notes, deleted_at FROM entries \
|
|
WHERE id = $1 AND user_id = $2 AND deleted_at IS NOT NULL FOR UPDATE",
|
|
)
|
|
.bind(entry_id)
|
|
.bind(user_id)
|
|
.fetch_optional(&mut *tx)
|
|
.await?;
|
|
|
|
let row = match row {
|
|
Some(r) => r,
|
|
None => {
|
|
tx.rollback().await?;
|
|
return Err(AppError::NotFoundEntry.into());
|
|
}
|
|
};
|
|
|
|
purge_entry_record(&mut tx, row.id).await?;
|
|
crate::audit::log_tx(
|
|
&mut tx,
|
|
Some(user_id),
|
|
"purge",
|
|
&row.folder,
|
|
&row.entry_type,
|
|
&row.name,
|
|
json!({ "entry_id": row.id }),
|
|
)
|
|
.await;
|
|
tx.commit().await?;
|
|
Ok(())
|
|
}
|
|
|
|
pub async fn purge_expired_deleted_entries(pool: &PgPool) -> Result<u64> {
|
|
#[derive(sqlx::FromRow)]
|
|
struct ExpiredRow {
|
|
id: Uuid,
|
|
}
|
|
|
|
let mut tx = pool.begin().await?;
|
|
let rows: Vec<ExpiredRow> = sqlx::query_as(
|
|
"SELECT id FROM entries \
|
|
WHERE deleted_at IS NOT NULL \
|
|
AND deleted_at < NOW() - INTERVAL '3 months' \
|
|
FOR UPDATE",
|
|
)
|
|
.fetch_all(&mut *tx)
|
|
.await?;
|
|
|
|
for row in &rows {
|
|
purge_entry_record(&mut tx, row.id).await?;
|
|
}
|
|
|
|
tx.commit().await?;
|
|
Ok(rows.len() as u64)
|
|
}
|
|
|
|
/// Delete a single entry by id (multi-tenant: `user_id` must match).
|
|
pub async fn delete_by_id(pool: &PgPool, entry_id: Uuid, user_id: Uuid) -> Result<DeleteResult> {
|
|
let mut tx = pool.begin().await?;
|
|
let row: Option<EntryWriteRow> = sqlx::query_as(
|
|
"SELECT id, version, folder, type, name, tags, metadata, notes, deleted_at FROM entries \
|
|
WHERE id = $1 AND user_id = $2 AND deleted_at IS NULL FOR UPDATE",
|
|
)
|
|
.bind(entry_id)
|
|
.bind(user_id)
|
|
.fetch_optional(&mut *tx)
|
|
.await?;
|
|
|
|
let row = match row {
|
|
Some(r) => r,
|
|
None => {
|
|
tx.rollback().await?;
|
|
anyhow::bail!("Entry not found");
|
|
}
|
|
};
|
|
|
|
let folder = row.folder.clone();
|
|
let entry_type = row.entry_type.clone();
|
|
let name = row.name.clone();
|
|
let entry_row: EntryRow = (&row).into();
|
|
|
|
snapshot_and_soft_delete(
|
|
&mut tx,
|
|
&folder,
|
|
&entry_type,
|
|
&name,
|
|
&entry_row,
|
|
Some(user_id),
|
|
)
|
|
.await?;
|
|
crate::audit::log_tx(
|
|
&mut tx,
|
|
Some(user_id),
|
|
"delete",
|
|
&folder,
|
|
&entry_type,
|
|
&name,
|
|
json!({ "source": "web", "entry_id": entry_id }),
|
|
)
|
|
.await;
|
|
tx.commit().await?;
|
|
|
|
Ok(DeleteResult {
|
|
deleted: vec![DeletedEntry {
|
|
name,
|
|
folder,
|
|
entry_type,
|
|
}],
|
|
dry_run: false,
|
|
})
|
|
}
|
|
|
|
pub async fn run(pool: &PgPool, params: DeleteParams<'_>) -> Result<DeleteResult> {
|
|
match params.name {
|
|
Some(name) => delete_one(pool, name, params.folder, params.dry_run, params.user_id).await,
|
|
None => {
|
|
if params.folder.is_none() && params.entry_type.is_none() {
|
|
anyhow::bail!(
|
|
"Bulk delete requires at least one of: name, folder, or type filter."
|
|
);
|
|
}
|
|
delete_bulk(
|
|
pool,
|
|
params.folder,
|
|
params.entry_type,
|
|
params.dry_run,
|
|
params.user_id,
|
|
)
|
|
.await
|
|
}
|
|
}
|
|
}
|
|
|
|
async fn delete_one(
|
|
pool: &PgPool,
|
|
name: &str,
|
|
folder: Option<&str>,
|
|
dry_run: bool,
|
|
user_id: Option<Uuid>,
|
|
) -> Result<DeleteResult> {
|
|
if dry_run {
|
|
// Dry-run uses the same disambiguation logic as actual delete:
|
|
// - 0 matches → nothing to delete
|
|
// - 1 match → show what would be deleted (with correct folder/type)
|
|
// - 2+ matches → disambiguation error (same as non-dry-run)
|
|
#[derive(sqlx::FromRow)]
|
|
struct DryRunRow {
|
|
folder: String,
|
|
#[sqlx(rename = "type")]
|
|
entry_type: String,
|
|
}
|
|
|
|
let mut idx = 1i32;
|
|
let user_cond = user_scope_condition(user_id, &mut idx);
|
|
let mut conditions = vec![user_cond];
|
|
if folder.is_some() {
|
|
conditions.push(format!("folder = ${}", idx));
|
|
idx += 1;
|
|
}
|
|
conditions.push(format!("name = ${}", idx));
|
|
let sql = format!(
|
|
"SELECT folder, type FROM entries WHERE {} AND deleted_at IS NULL",
|
|
conditions.join(" AND ")
|
|
);
|
|
let mut q = sqlx::query_as::<_, DryRunRow>(&sql);
|
|
if let Some(uid) = user_id {
|
|
q = q.bind(uid);
|
|
}
|
|
if let Some(f) = folder {
|
|
q = q.bind(f);
|
|
}
|
|
q = q.bind(name);
|
|
let rows = q.fetch_all(pool).await?;
|
|
|
|
return match rows.len() {
|
|
0 => Ok(DeleteResult {
|
|
deleted: vec![],
|
|
dry_run: true,
|
|
}),
|
|
1 => {
|
|
let row = rows
|
|
.into_iter()
|
|
.next()
|
|
.ok_or_else(|| anyhow::anyhow!("internal: matched row vanished"))?;
|
|
Ok(DeleteResult {
|
|
deleted: vec![DeletedEntry {
|
|
name: name.to_string(),
|
|
folder: row.folder,
|
|
entry_type: row.entry_type,
|
|
}],
|
|
dry_run: true,
|
|
})
|
|
}
|
|
_ => {
|
|
let folders: Vec<&str> = rows.iter().map(|r| r.folder.as_str()).collect();
|
|
anyhow::bail!(
|
|
"Ambiguous: {} entries named '{}' found in folders: [{}]. \
|
|
Specify 'folder' to disambiguate.",
|
|
rows.len(),
|
|
name,
|
|
folders.join(", ")
|
|
)
|
|
}
|
|
};
|
|
}
|
|
|
|
let mut tx = pool.begin().await?;
|
|
|
|
// Fetch matching rows with FOR UPDATE; use folder when provided to resolve ambiguity.
|
|
let mut idx = 1i32;
|
|
let user_cond = user_scope_condition(user_id, &mut idx);
|
|
let mut conditions = vec![user_cond];
|
|
if folder.is_some() {
|
|
conditions.push(format!("folder = ${}", idx));
|
|
idx += 1;
|
|
}
|
|
conditions.push(format!("name = ${}", idx));
|
|
let sql = format!(
|
|
"SELECT id, version, folder, type, tags, metadata, notes, name FROM entries \
|
|
WHERE {} AND deleted_at IS NULL FOR UPDATE",
|
|
conditions.join(" AND ")
|
|
);
|
|
let mut q = sqlx::query_as::<_, EntryRow>(&sql);
|
|
if let Some(uid) = user_id {
|
|
q = q.bind(uid);
|
|
}
|
|
if let Some(f) = folder {
|
|
q = q.bind(f);
|
|
}
|
|
q = q.bind(name);
|
|
let rows = q.fetch_all(&mut *tx).await?;
|
|
|
|
let row = match rows.len() {
|
|
0 => {
|
|
tx.rollback().await?;
|
|
return Ok(DeleteResult {
|
|
deleted: vec![],
|
|
dry_run: false,
|
|
});
|
|
}
|
|
1 => rows
|
|
.into_iter()
|
|
.next()
|
|
.ok_or_else(|| anyhow::anyhow!("internal: matched row vanished"))?,
|
|
_ => {
|
|
tx.rollback().await?;
|
|
let folders: Vec<&str> = rows.iter().map(|r| r.folder.as_str()).collect();
|
|
anyhow::bail!(
|
|
"Ambiguous: {} entries named '{}' found in folders: [{}]. \
|
|
Specify 'folder' to disambiguate.",
|
|
rows.len(),
|
|
name,
|
|
folders.join(", ")
|
|
)
|
|
}
|
|
};
|
|
|
|
let folder = row.folder.clone();
|
|
let entry_type = row.entry_type.clone();
|
|
snapshot_and_soft_delete(&mut tx, &folder, &entry_type, name, &row, user_id).await?;
|
|
crate::audit::log_tx(
|
|
&mut tx,
|
|
user_id,
|
|
"delete",
|
|
&folder,
|
|
&entry_type,
|
|
name,
|
|
json!({}),
|
|
)
|
|
.await;
|
|
tx.commit().await?;
|
|
|
|
Ok(DeleteResult {
|
|
deleted: vec![DeletedEntry {
|
|
name: name.to_string(),
|
|
folder,
|
|
entry_type,
|
|
}],
|
|
dry_run: false,
|
|
})
|
|
}
|
|
|
|
async fn delete_bulk(
|
|
pool: &PgPool,
|
|
folder: Option<&str>,
|
|
entry_type: Option<&str>,
|
|
dry_run: bool,
|
|
user_id: Option<Uuid>,
|
|
) -> Result<DeleteResult> {
|
|
#[derive(Debug, sqlx::FromRow)]
|
|
struct FullEntryRow {
|
|
id: Uuid,
|
|
version: i64,
|
|
folder: String,
|
|
#[sqlx(rename = "type")]
|
|
entry_type: String,
|
|
name: String,
|
|
metadata: serde_json::Value,
|
|
tags: Vec<String>,
|
|
notes: String,
|
|
}
|
|
|
|
let mut conditions: Vec<String> = Vec::new();
|
|
let mut idx: i32 = 1;
|
|
|
|
if user_id.is_some() {
|
|
conditions.push(format!("user_id = ${}", idx));
|
|
idx += 1;
|
|
} else {
|
|
conditions.push("user_id IS NULL".to_string());
|
|
}
|
|
if folder.is_some() {
|
|
conditions.push(format!("folder = ${}", idx));
|
|
idx += 1;
|
|
}
|
|
if entry_type.is_some() {
|
|
conditions.push(format!("type = ${}", idx));
|
|
idx += 1;
|
|
}
|
|
|
|
let where_clause = format!("WHERE {}", conditions.join(" AND "));
|
|
let _ = idx; // used only for placeholder numbering in conditions
|
|
|
|
if dry_run {
|
|
let sql = format!(
|
|
"SELECT id, version, folder, type, name, metadata, tags, notes \
|
|
FROM entries {where_clause} AND deleted_at IS NULL ORDER BY type, name"
|
|
);
|
|
let mut q = sqlx::query_as::<_, FullEntryRow>(&sql);
|
|
if let Some(uid) = user_id {
|
|
q = q.bind(uid);
|
|
}
|
|
if let Some(f) = folder {
|
|
q = q.bind(f);
|
|
}
|
|
if let Some(t) = entry_type {
|
|
q = q.bind(t);
|
|
}
|
|
let rows = q.fetch_all(pool).await?;
|
|
|
|
let deleted = rows
|
|
.iter()
|
|
.map(|r| DeletedEntry {
|
|
name: r.name.clone(),
|
|
folder: r.folder.clone(),
|
|
entry_type: r.entry_type.clone(),
|
|
})
|
|
.collect();
|
|
return Ok(DeleteResult {
|
|
deleted,
|
|
dry_run: true,
|
|
});
|
|
}
|
|
|
|
let mut tx = pool.begin().await?;
|
|
|
|
let sql = format!(
|
|
"SELECT id, version, folder, type, name, metadata, tags, notes \
|
|
FROM entries {where_clause} AND deleted_at IS NULL ORDER BY type, name FOR UPDATE"
|
|
);
|
|
let mut q = sqlx::query_as::<_, FullEntryRow>(&sql);
|
|
if let Some(uid) = user_id {
|
|
q = q.bind(uid);
|
|
}
|
|
if let Some(f) = folder {
|
|
q = q.bind(f);
|
|
}
|
|
if let Some(t) = entry_type {
|
|
q = q.bind(t);
|
|
}
|
|
let rows = q.fetch_all(&mut *tx).await?;
|
|
|
|
if rows.len() > MAX_BULK_DELETE {
|
|
tx.rollback().await?;
|
|
anyhow::bail!(
|
|
"Bulk delete would affect {} entries (limit: {}). \
|
|
Narrow your filters or delete entries individually.",
|
|
rows.len(),
|
|
MAX_BULK_DELETE,
|
|
);
|
|
}
|
|
|
|
let mut deleted = Vec::with_capacity(rows.len());
|
|
for row in &rows {
|
|
let entry_row: EntryRow = EntryRow {
|
|
id: row.id,
|
|
version: row.version,
|
|
folder: row.folder.clone(),
|
|
entry_type: row.entry_type.clone(),
|
|
tags: row.tags.clone(),
|
|
metadata: row.metadata.clone(),
|
|
notes: row.notes.clone(),
|
|
name: row.name.clone(),
|
|
};
|
|
snapshot_and_soft_delete(
|
|
&mut tx,
|
|
&row.folder,
|
|
&row.entry_type,
|
|
&row.name,
|
|
&entry_row,
|
|
user_id,
|
|
)
|
|
.await?;
|
|
crate::audit::log_tx(
|
|
&mut tx,
|
|
user_id,
|
|
"delete",
|
|
&row.folder,
|
|
&row.entry_type,
|
|
&row.name,
|
|
json!({"bulk": true}),
|
|
)
|
|
.await;
|
|
deleted.push(DeletedEntry {
|
|
name: row.name.clone(),
|
|
folder: row.folder.clone(),
|
|
entry_type: row.entry_type.clone(),
|
|
});
|
|
}
|
|
|
|
tx.commit().await?;
|
|
|
|
Ok(DeleteResult {
|
|
deleted,
|
|
dry_run: false,
|
|
})
|
|
}
|
|
|
|
async fn snapshot_and_soft_delete(
|
|
tx: &mut sqlx::Transaction<'_, sqlx::Postgres>,
|
|
folder: &str,
|
|
entry_type: &str,
|
|
name: &str,
|
|
row: &EntryRow,
|
|
user_id: Option<Uuid>,
|
|
) -> Result<()> {
|
|
let history_metadata = match db::metadata_with_secret_snapshot(tx, row.id, &row.metadata).await
|
|
{
|
|
Ok(v) => v,
|
|
Err(e) => {
|
|
tracing::warn!(error = %e, "failed to build secret snapshot for entry history");
|
|
row.metadata.clone()
|
|
}
|
|
};
|
|
|
|
if let Err(e) = db::snapshot_entry_history(
|
|
tx,
|
|
db::EntrySnapshotParams {
|
|
entry_id: row.id,
|
|
user_id,
|
|
folder,
|
|
entry_type,
|
|
name,
|
|
version: row.version,
|
|
action: "delete",
|
|
tags: &row.tags,
|
|
metadata: &history_metadata,
|
|
},
|
|
)
|
|
.await
|
|
{
|
|
tracing::warn!(error = %e, "failed to snapshot entry history before delete");
|
|
}
|
|
|
|
let fields: Vec<SecretFieldRow> = sqlx::query_as(
|
|
"SELECT s.id, s.name, s.encrypted \
|
|
FROM entry_secrets es \
|
|
JOIN secrets s ON s.id = es.secret_id \
|
|
WHERE es.entry_id = $1",
|
|
)
|
|
.bind(row.id)
|
|
.fetch_all(&mut **tx)
|
|
.await?;
|
|
|
|
for f in &fields {
|
|
if let Err(e) = db::snapshot_secret_history(
|
|
tx,
|
|
db::SecretSnapshotParams {
|
|
secret_id: f.id,
|
|
name: &f.name,
|
|
encrypted: &f.encrypted,
|
|
action: "delete",
|
|
},
|
|
)
|
|
.await
|
|
{
|
|
tracing::warn!(error = %e, "failed to snapshot secret history before delete");
|
|
}
|
|
}
|
|
|
|
sqlx::query("UPDATE entries SET deleted_at = NOW(), updated_at = NOW() WHERE id = $1")
|
|
.bind(row.id)
|
|
.execute(&mut **tx)
|
|
.await?;
|
|
|
|
Ok(())
|
|
}
|
|
|
|
async fn purge_entry_record(
|
|
tx: &mut sqlx::Transaction<'_, sqlx::Postgres>,
|
|
entry_id: Uuid,
|
|
) -> Result<()> {
|
|
let fields: Vec<SecretFieldRow> = sqlx::query_as(
|
|
"SELECT s.id, s.name, s.encrypted \
|
|
FROM entry_secrets es \
|
|
JOIN secrets s ON s.id = es.secret_id \
|
|
WHERE es.entry_id = $1",
|
|
)
|
|
.bind(entry_id)
|
|
.fetch_all(&mut **tx)
|
|
.await?;
|
|
|
|
sqlx::query("DELETE FROM entries WHERE id = $1")
|
|
.bind(entry_id)
|
|
.execute(&mut **tx)
|
|
.await?;
|
|
|
|
let secret_ids: Vec<Uuid> = fields.iter().map(|f| f.id).collect();
|
|
if !secret_ids.is_empty() {
|
|
sqlx::query(
|
|
"DELETE FROM secrets s \
|
|
WHERE s.id = ANY($1) \
|
|
AND NOT EXISTS (SELECT 1 FROM entry_secrets es WHERE es.secret_id = s.id)",
|
|
)
|
|
.bind(&secret_ids)
|
|
.execute(&mut **tx)
|
|
.await?;
|
|
}
|
|
|
|
Ok(())
|
|
}
|
|
|
|
#[cfg(test)]
|
|
mod tests {
|
|
use super::*;
|
|
use sqlx::PgPool;
|
|
|
|
async fn maybe_test_pool() -> Option<PgPool> {
|
|
let Ok(url) = std::env::var("SECRETS_DATABASE_URL") else {
|
|
eprintln!("skip delete tests: SECRETS_DATABASE_URL is not set");
|
|
return None;
|
|
};
|
|
let Ok(pool) = PgPool::connect(&url).await else {
|
|
eprintln!("skip delete tests: cannot connect to database");
|
|
return None;
|
|
};
|
|
if let Err(e) = crate::db::migrate(&pool).await {
|
|
eprintln!("skip delete tests: migrate failed: {e}");
|
|
return None;
|
|
}
|
|
Some(pool)
|
|
}
|
|
|
|
async fn cleanup_single_user_rows(pool: &PgPool, marker: &str) -> Result<()> {
|
|
sqlx::query(
|
|
"DELETE FROM entries WHERE user_id IS NULL AND (name LIKE $1 OR folder LIKE $1)",
|
|
)
|
|
.bind(format!("%{marker}%"))
|
|
.execute(pool)
|
|
.await?;
|
|
sqlx::query(
|
|
"DELETE FROM secrets WHERE user_id IS NULL AND name LIKE $1 \
|
|
AND NOT EXISTS (SELECT 1 FROM entry_secrets es WHERE es.secret_id = secrets.id)",
|
|
)
|
|
.bind(format!("%{marker}%"))
|
|
.execute(pool)
|
|
.await?;
|
|
Ok(())
|
|
}
|
|
|
|
#[tokio::test]
|
|
async fn delete_dry_run_reports_matching_entry_without_writes() -> Result<()> {
|
|
let Some(pool) = maybe_test_pool().await else {
|
|
return Ok(());
|
|
};
|
|
let suffix = Uuid::from_u128(rand::random()).to_string();
|
|
let marker = format!("delete_dry_{}", &suffix[..8]);
|
|
let entry_name = format!("{}_entry", marker);
|
|
|
|
cleanup_single_user_rows(&pool, &marker).await?;
|
|
|
|
sqlx::query(
|
|
"INSERT INTO entries (user_id, folder, type, name, notes, tags, metadata) \
|
|
VALUES (NULL, $1, 'service', $2, '', '{}', '{}')",
|
|
)
|
|
.bind(&marker)
|
|
.bind(&entry_name)
|
|
.execute(&pool)
|
|
.await?;
|
|
|
|
let result = run(
|
|
&pool,
|
|
DeleteParams {
|
|
name: Some(&entry_name),
|
|
folder: Some(&marker),
|
|
entry_type: None,
|
|
dry_run: true,
|
|
user_id: None,
|
|
},
|
|
)
|
|
.await?;
|
|
|
|
assert!(result.dry_run);
|
|
assert_eq!(result.deleted.len(), 1);
|
|
assert_eq!(result.deleted[0].name, entry_name);
|
|
|
|
let still_exists: bool = sqlx::query_scalar(
|
|
"SELECT EXISTS(SELECT 1 FROM entries WHERE user_id IS NULL AND folder = $1 AND name = $2)",
|
|
)
|
|
.bind(&marker)
|
|
.bind(&entry_name)
|
|
.fetch_one(&pool)
|
|
.await?;
|
|
assert!(still_exists);
|
|
|
|
cleanup_single_user_rows(&pool, &marker).await?;
|
|
Ok(())
|
|
}
|
|
|
|
#[tokio::test]
|
|
async fn delete_by_id_removes_entry_and_orphan_secret() -> Result<()> {
|
|
let Some(pool) = maybe_test_pool().await else {
|
|
return Ok(());
|
|
};
|
|
let suffix = Uuid::from_u128(rand::random()).to_string();
|
|
let marker = format!("delete_id_{}", &suffix[..8]);
|
|
let user_id = Uuid::from_u128(rand::random());
|
|
let entry_name = format!("{}_entry", marker);
|
|
let secret_name = format!("{}_secret", marker);
|
|
|
|
sqlx::query("DELETE FROM entries WHERE user_id = $1 AND folder = $2")
|
|
.bind(user_id)
|
|
.bind(&marker)
|
|
.execute(&pool)
|
|
.await?;
|
|
sqlx::query("DELETE FROM secrets WHERE user_id = $1 AND name = $2")
|
|
.bind(user_id)
|
|
.bind(&secret_name)
|
|
.execute(&pool)
|
|
.await?;
|
|
|
|
let entry_id: Uuid = sqlx::query_scalar(
|
|
"INSERT INTO entries (user_id, folder, type, name, notes, tags, metadata) \
|
|
VALUES ($1, $2, 'service', $3, '', '{}', '{}') RETURNING id",
|
|
)
|
|
.bind(user_id)
|
|
.bind(&marker)
|
|
.bind(&entry_name)
|
|
.fetch_one(&pool)
|
|
.await?;
|
|
let secret_id: Uuid = sqlx::query_scalar(
|
|
"INSERT INTO secrets (user_id, name, type, encrypted) VALUES ($1, $2, 'text', $3) RETURNING id",
|
|
)
|
|
.bind(user_id)
|
|
.bind(&secret_name)
|
|
.bind(vec![1_u8, 2, 3])
|
|
.fetch_one(&pool)
|
|
.await?;
|
|
sqlx::query("INSERT INTO entry_secrets (entry_id, secret_id) VALUES ($1, $2)")
|
|
.bind(entry_id)
|
|
.bind(secret_id)
|
|
.execute(&pool)
|
|
.await?;
|
|
|
|
let result = delete_by_id(&pool, entry_id, user_id).await?;
|
|
assert!(!result.dry_run);
|
|
assert_eq!(result.deleted.len(), 1);
|
|
assert_eq!(result.deleted[0].name, entry_name);
|
|
|
|
let entry_exists: bool =
|
|
sqlx::query_scalar("SELECT EXISTS(SELECT 1 FROM entries WHERE id = $1)")
|
|
.bind(entry_id)
|
|
.fetch_one(&pool)
|
|
.await?;
|
|
let secret_exists: bool =
|
|
sqlx::query_scalar("SELECT EXISTS(SELECT 1 FROM secrets WHERE id = $1)")
|
|
.bind(secret_id)
|
|
.fetch_one(&pool)
|
|
.await?;
|
|
assert!(!entry_exists);
|
|
assert!(!secret_exists);
|
|
|
|
Ok(())
|
|
}
|
|
}
|