325 lines
9.9 KiB
Rust
325 lines
9.9 KiB
Rust
use std::collections::{BTreeSet, HashMap};
|
|
|
|
use anyhow::Result;
|
|
use sqlx::PgPool;
|
|
use uuid::Uuid;
|
|
|
|
use crate::error::AppError;
|
|
|
|
#[derive(Debug, Clone, serde::Serialize, sqlx::FromRow)]
|
|
pub struct RelationEntrySummary {
|
|
pub id: Uuid,
|
|
pub folder: String,
|
|
#[serde(rename = "type")]
|
|
#[sqlx(rename = "type")]
|
|
pub entry_type: String,
|
|
pub name: String,
|
|
}
|
|
|
|
#[derive(Debug, Clone, Default, serde::Serialize)]
|
|
pub struct EntryRelations {
|
|
pub parents: Vec<RelationEntrySummary>,
|
|
pub children: Vec<RelationEntrySummary>,
|
|
}
|
|
|
|
pub async fn add_parent_relation(
|
|
pool: &PgPool,
|
|
parent_entry_id: Uuid,
|
|
child_entry_id: Uuid,
|
|
user_id: Option<Uuid>,
|
|
) -> Result<()> {
|
|
if parent_entry_id == child_entry_id {
|
|
return Err(AppError::Validation {
|
|
message: "entry cannot reference itself".to_string(),
|
|
}
|
|
.into());
|
|
}
|
|
|
|
let mut tx = pool.begin().await?;
|
|
validate_live_entries(&mut tx, &[parent_entry_id, child_entry_id], user_id).await?;
|
|
|
|
let cycle_exists: bool = sqlx::query_scalar(
|
|
"WITH RECURSIVE descendants AS ( \
|
|
SELECT child_entry_id FROM entry_relations WHERE parent_entry_id = $1 \
|
|
UNION \
|
|
SELECT er.child_entry_id \
|
|
FROM entry_relations er \
|
|
JOIN descendants d ON d.child_entry_id = er.parent_entry_id \
|
|
) \
|
|
SELECT EXISTS(SELECT 1 FROM descendants WHERE child_entry_id = $2)",
|
|
)
|
|
.bind(child_entry_id)
|
|
.bind(parent_entry_id)
|
|
.fetch_one(&mut *tx)
|
|
.await?;
|
|
if cycle_exists {
|
|
tx.rollback().await?;
|
|
return Err(AppError::Validation {
|
|
message: "adding this relation would create a cycle".to_string(),
|
|
}
|
|
.into());
|
|
}
|
|
|
|
sqlx::query(
|
|
"INSERT INTO entry_relations (parent_entry_id, child_entry_id) \
|
|
VALUES ($1, $2) ON CONFLICT DO NOTHING",
|
|
)
|
|
.bind(parent_entry_id)
|
|
.bind(child_entry_id)
|
|
.execute(&mut *tx)
|
|
.await?;
|
|
tx.commit().await?;
|
|
Ok(())
|
|
}
|
|
|
|
pub async fn remove_parent_relation(
|
|
pool: &PgPool,
|
|
parent_entry_id: Uuid,
|
|
child_entry_id: Uuid,
|
|
user_id: Option<Uuid>,
|
|
) -> Result<()> {
|
|
let mut tx = pool.begin().await?;
|
|
validate_live_entries(&mut tx, &[parent_entry_id, child_entry_id], user_id).await?;
|
|
sqlx::query("DELETE FROM entry_relations WHERE parent_entry_id = $1 AND child_entry_id = $2")
|
|
.bind(parent_entry_id)
|
|
.bind(child_entry_id)
|
|
.execute(&mut *tx)
|
|
.await?;
|
|
tx.commit().await?;
|
|
Ok(())
|
|
}
|
|
|
|
pub async fn set_parent_relations(
|
|
pool: &PgPool,
|
|
child_entry_id: Uuid,
|
|
parent_entry_ids: &[Uuid],
|
|
user_id: Option<Uuid>,
|
|
) -> Result<()> {
|
|
let deduped: Vec<Uuid> = parent_entry_ids
|
|
.iter()
|
|
.copied()
|
|
.collect::<BTreeSet<_>>()
|
|
.into_iter()
|
|
.collect();
|
|
if deduped.contains(&child_entry_id) {
|
|
return Err(AppError::Validation {
|
|
message: "entry cannot reference itself".to_string(),
|
|
}
|
|
.into());
|
|
}
|
|
|
|
let mut tx = pool.begin().await?;
|
|
let mut validate_ids = Vec::with_capacity(deduped.len() + 1);
|
|
validate_ids.push(child_entry_id);
|
|
validate_ids.extend(deduped.iter().copied());
|
|
validate_live_entries(&mut tx, &validate_ids, user_id).await?;
|
|
|
|
let current_parent_ids: Vec<Uuid> =
|
|
sqlx::query_scalar("SELECT parent_entry_id FROM entry_relations WHERE child_entry_id = $1")
|
|
.bind(child_entry_id)
|
|
.fetch_all(&mut *tx)
|
|
.await?;
|
|
let current: BTreeSet<Uuid> = current_parent_ids.into_iter().collect();
|
|
let target: BTreeSet<Uuid> = deduped.iter().copied().collect();
|
|
|
|
for parent_id in current.difference(&target) {
|
|
sqlx::query(
|
|
"DELETE FROM entry_relations WHERE parent_entry_id = $1 AND child_entry_id = $2",
|
|
)
|
|
.bind(*parent_id)
|
|
.bind(child_entry_id)
|
|
.execute(&mut *tx)
|
|
.await?;
|
|
}
|
|
|
|
for parent_id in target.difference(¤t) {
|
|
let cycle_exists: bool = sqlx::query_scalar(
|
|
"WITH RECURSIVE descendants AS ( \
|
|
SELECT child_entry_id FROM entry_relations WHERE parent_entry_id = $1 \
|
|
UNION \
|
|
SELECT er.child_entry_id \
|
|
FROM entry_relations er \
|
|
JOIN descendants d ON d.child_entry_id = er.parent_entry_id \
|
|
) \
|
|
SELECT EXISTS(SELECT 1 FROM descendants WHERE child_entry_id = $2)",
|
|
)
|
|
.bind(child_entry_id)
|
|
.bind(*parent_id)
|
|
.fetch_one(&mut *tx)
|
|
.await?;
|
|
if cycle_exists {
|
|
tx.rollback().await?;
|
|
return Err(AppError::Validation {
|
|
message: "adding this relation would create a cycle".to_string(),
|
|
}
|
|
.into());
|
|
}
|
|
|
|
sqlx::query(
|
|
"INSERT INTO entry_relations (parent_entry_id, child_entry_id) VALUES ($1, $2) \
|
|
ON CONFLICT DO NOTHING",
|
|
)
|
|
.bind(*parent_id)
|
|
.bind(child_entry_id)
|
|
.execute(&mut *tx)
|
|
.await?;
|
|
}
|
|
|
|
tx.commit().await?;
|
|
Ok(())
|
|
}
|
|
|
|
pub async fn get_relations_for_entries(
|
|
pool: &PgPool,
|
|
entry_ids: &[Uuid],
|
|
user_id: Option<Uuid>,
|
|
) -> Result<HashMap<Uuid, EntryRelations>> {
|
|
if entry_ids.is_empty() {
|
|
return Ok(HashMap::new());
|
|
}
|
|
|
|
#[derive(sqlx::FromRow)]
|
|
struct ParentRow {
|
|
owner_entry_id: Uuid,
|
|
id: Uuid,
|
|
folder: String,
|
|
#[sqlx(rename = "type")]
|
|
entry_type: String,
|
|
name: String,
|
|
}
|
|
|
|
#[derive(sqlx::FromRow)]
|
|
struct ChildRow {
|
|
owner_entry_id: Uuid,
|
|
id: Uuid,
|
|
folder: String,
|
|
#[sqlx(rename = "type")]
|
|
entry_type: String,
|
|
name: String,
|
|
}
|
|
|
|
let (parents, children): (Vec<ParentRow>, Vec<ChildRow>) = if let Some(uid) = user_id {
|
|
let parents = sqlx::query_as(
|
|
"SELECT er.child_entry_id AS owner_entry_id, p.id, p.folder, p.type, p.name \
|
|
FROM entry_relations er \
|
|
JOIN entries p ON p.id = er.parent_entry_id \
|
|
JOIN entries c ON c.id = er.child_entry_id \
|
|
WHERE er.child_entry_id = ANY($1) \
|
|
AND p.user_id = $2 AND c.user_id = $2 \
|
|
AND p.deleted_at IS NULL AND c.deleted_at IS NULL \
|
|
ORDER BY er.child_entry_id, p.name ASC",
|
|
)
|
|
.bind(entry_ids)
|
|
.bind(uid)
|
|
.fetch_all(pool);
|
|
let children = sqlx::query_as(
|
|
"SELECT er.parent_entry_id AS owner_entry_id, c.id, c.folder, c.type, c.name \
|
|
FROM entry_relations er \
|
|
JOIN entries c ON c.id = er.child_entry_id \
|
|
JOIN entries p ON p.id = er.parent_entry_id \
|
|
WHERE er.parent_entry_id = ANY($1) \
|
|
AND p.user_id = $2 AND c.user_id = $2 \
|
|
AND p.deleted_at IS NULL AND c.deleted_at IS NULL \
|
|
ORDER BY er.parent_entry_id, c.name ASC",
|
|
)
|
|
.bind(entry_ids)
|
|
.bind(uid)
|
|
.fetch_all(pool);
|
|
(parents.await?, children.await?)
|
|
} else {
|
|
let parents = sqlx::query_as(
|
|
"SELECT er.child_entry_id AS owner_entry_id, p.id, p.folder, p.type, p.name \
|
|
FROM entry_relations er \
|
|
JOIN entries p ON p.id = er.parent_entry_id \
|
|
JOIN entries c ON c.id = er.child_entry_id \
|
|
WHERE er.child_entry_id = ANY($1) \
|
|
AND p.user_id IS NULL AND c.user_id IS NULL \
|
|
AND p.deleted_at IS NULL AND c.deleted_at IS NULL \
|
|
ORDER BY er.child_entry_id, p.name ASC",
|
|
)
|
|
.bind(entry_ids)
|
|
.fetch_all(pool);
|
|
let children = sqlx::query_as(
|
|
"SELECT er.parent_entry_id AS owner_entry_id, c.id, c.folder, c.type, c.name \
|
|
FROM entry_relations er \
|
|
JOIN entries c ON c.id = er.child_entry_id \
|
|
JOIN entries p ON p.id = er.parent_entry_id \
|
|
WHERE er.parent_entry_id = ANY($1) \
|
|
AND p.user_id IS NULL AND c.user_id IS NULL \
|
|
AND p.deleted_at IS NULL AND c.deleted_at IS NULL \
|
|
ORDER BY er.parent_entry_id, c.name ASC",
|
|
)
|
|
.bind(entry_ids)
|
|
.fetch_all(pool);
|
|
(parents.await?, children.await?)
|
|
};
|
|
|
|
let mut map: HashMap<Uuid, EntryRelations> = entry_ids
|
|
.iter()
|
|
.copied()
|
|
.map(|id| (id, EntryRelations::default()))
|
|
.collect();
|
|
|
|
for row in parents {
|
|
map.entry(row.owner_entry_id)
|
|
.or_default()
|
|
.parents
|
|
.push(RelationEntrySummary {
|
|
id: row.id,
|
|
folder: row.folder,
|
|
entry_type: row.entry_type,
|
|
name: row.name,
|
|
});
|
|
}
|
|
|
|
for row in children {
|
|
map.entry(row.owner_entry_id)
|
|
.or_default()
|
|
.children
|
|
.push(RelationEntrySummary {
|
|
id: row.id,
|
|
folder: row.folder,
|
|
entry_type: row.entry_type,
|
|
name: row.name,
|
|
});
|
|
}
|
|
|
|
Ok(map)
|
|
}
|
|
|
|
async fn validate_live_entries(
|
|
tx: &mut sqlx::Transaction<'_, sqlx::Postgres>,
|
|
entry_ids: &[Uuid],
|
|
user_id: Option<Uuid>,
|
|
) -> Result<()> {
|
|
let unique_ids: Vec<Uuid> = entry_ids
|
|
.iter()
|
|
.copied()
|
|
.collect::<BTreeSet<_>>()
|
|
.into_iter()
|
|
.collect();
|
|
let live_count: i64 = if let Some(uid) = user_id {
|
|
sqlx::query_scalar(
|
|
"SELECT COUNT(*)::bigint FROM entries \
|
|
WHERE id = ANY($1) AND user_id = $2 AND deleted_at IS NULL",
|
|
)
|
|
.bind(&unique_ids)
|
|
.bind(uid)
|
|
.fetch_one(&mut **tx)
|
|
.await?
|
|
} else {
|
|
sqlx::query_scalar(
|
|
"SELECT COUNT(*)::bigint FROM entries \
|
|
WHERE id = ANY($1) AND user_id IS NULL AND deleted_at IS NULL",
|
|
)
|
|
.bind(&unique_ids)
|
|
.fetch_one(&mut **tx)
|
|
.await?
|
|
};
|
|
|
|
if live_count != unique_ids.len() as i64 {
|
|
return Err(AppError::NotFoundEntry.into());
|
|
}
|
|
Ok(())
|
|
}
|