use anyhow::Result; use serde_json::Value; use sqlx::PgPool; use std::collections::HashMap; use uuid::Uuid; use crate::models::{Entry, SecretField}; pub const FETCH_ALL_LIMIT: u32 = 100_000; /// Build an ILIKE pattern for fuzzy matching, escaping `%` and `_` literals. pub fn ilike_pattern(value: &str) -> String { format!( "%{}%", value .replace('\\', "\\\\") .replace('%', "\\%") .replace('_', "\\_") ) } pub struct SearchParams<'a> { pub folder: Option<&'a str>, pub entry_type: Option<&'a str>, pub name: Option<&'a str>, /// Fuzzy match on `entries.name` only (ILIKE with escaped `%`/`_`). pub name_query: Option<&'a str>, pub tags: &'a [String], pub query: Option<&'a str>, pub sort: &'a str, pub limit: u32, pub offset: u32, /// Multi-user: filter by this user_id. None = single-user / no filter. pub user_id: Option, } #[derive(Debug, serde::Serialize)] pub struct SearchResult { pub entries: Vec, pub secret_schemas: HashMap>, } /// List `entries` rows matching params (paged, ordered per `params.sort`). /// Does not read the `secrets` table. pub async fn list_entries(pool: &PgPool, params: SearchParams<'_>) -> Result> { fetch_entries_paged(pool, ¶ms).await } /// Count `entries` rows matching the same filters as [`list_entries`] (ignores `sort` / `limit` / `offset`). /// Does not read the `secrets` table. pub async fn count_entries(pool: &PgPool, a: &SearchParams<'_>) -> Result { let (where_clause, _) = entry_where_clause_and_next_idx(a); let sql = format!("SELECT COUNT(*)::bigint FROM entries {where_clause}"); let mut q = sqlx::query_scalar::<_, i64>(&sql); if let Some(uid) = a.user_id { q = q.bind(uid); } if let Some(v) = a.folder { q = q.bind(v); } if let Some(v) = a.entry_type { q = q.bind(v); } if let Some(v) = a.name { q = q.bind(v); } if let Some(v) = a.name_query { let pattern = ilike_pattern(v); q = q.bind(pattern); } for tag in a.tags { q = q.bind(tag); } if let Some(v) = a.query { let pattern = ilike_pattern(v); q = q.bind(pattern); } let n = q.fetch_one(pool).await?; Ok(n) } /// Shared WHERE clause and the next `$n` index (for LIMIT/OFFSET in paged queries). fn entry_where_clause_and_next_idx(a: &SearchParams<'_>) -> (String, i32) { let mut conditions: Vec = Vec::new(); let mut idx: i32 = 1; if a.user_id.is_some() { conditions.push(format!("user_id = ${}", idx)); idx += 1; } else { conditions.push("user_id IS NULL".to_string()); } if a.folder.is_some() { conditions.push(format!("folder = ${}", idx)); idx += 1; } if a.entry_type.is_some() { conditions.push(format!("type = ${}", idx)); idx += 1; } if a.name.is_some() { conditions.push(format!("name = ${}", idx)); idx += 1; } if a.name_query.is_some() { conditions.push(format!("name ILIKE ${} ESCAPE '\\'", idx)); idx += 1; } if !a.tags.is_empty() { let placeholders: Vec = a .tags .iter() .map(|_| { let p = format!("${}", idx); idx += 1; p }) .collect(); conditions.push(format!( "tags @> ARRAY[{}]::text[]", placeholders.join(", ") )); } if a.query.is_some() { conditions.push(format!( "(name ILIKE ${i} ESCAPE '\\' OR folder ILIKE ${i} ESCAPE '\\' \ OR type ILIKE ${i} ESCAPE '\\' OR notes ILIKE ${i} ESCAPE '\\' \ OR metadata::text ILIKE ${i} ESCAPE '\\' \ OR EXISTS (SELECT 1 FROM unnest(tags) t WHERE t ILIKE ${i} ESCAPE '\\'))", i = idx )); idx += 1; } let where_clause = if conditions.is_empty() { String::new() } else { format!("WHERE {}", conditions.join(" AND ")) }; (where_clause, idx) } pub async fn run(pool: &PgPool, params: SearchParams<'_>) -> Result { let entries = fetch_entries_paged(pool, ¶ms).await?; let entry_ids: Vec = entries.iter().map(|e| e.id).collect(); let secret_schemas = if !entry_ids.is_empty() { fetch_secret_schemas(pool, &entry_ids).await? } else { HashMap::new() }; Ok(SearchResult { entries, secret_schemas, }) } /// Fetch entries matching the given filters — returns all matching entries up to FETCH_ALL_LIMIT. #[allow(clippy::too_many_arguments)] pub async fn fetch_entries( pool: &PgPool, folder: Option<&str>, entry_type: Option<&str>, name: Option<&str>, tags: &[String], query: Option<&str>, user_id: Option, ) -> Result> { let params = SearchParams { folder, entry_type, name, name_query: None, tags, query, sort: "name", limit: FETCH_ALL_LIMIT, offset: 0, user_id, }; list_entries(pool, params).await } async fn fetch_entries_paged(pool: &PgPool, a: &SearchParams<'_>) -> Result> { let (where_clause, idx) = entry_where_clause_and_next_idx(a); let order = match a.sort { "updated" => "updated_at DESC", "created" => "created_at DESC", _ => "name ASC", }; let limit_idx = idx; let offset_idx = idx + 1; let sql = format!( "SELECT id, user_id, folder, type, name, notes, tags, metadata, version, \ created_at, updated_at \ FROM entries {where_clause} ORDER BY {order} LIMIT ${limit_idx} OFFSET ${offset_idx}" ); let mut q = sqlx::query_as::<_, EntryRaw>(&sql); if let Some(uid) = a.user_id { q = q.bind(uid); } if let Some(v) = a.folder { q = q.bind(v); } if let Some(v) = a.entry_type { q = q.bind(v); } if let Some(v) = a.name { q = q.bind(v); } if let Some(v) = a.name_query { let pattern = ilike_pattern(v); q = q.bind(pattern); } for tag in a.tags { q = q.bind(tag); } if let Some(v) = a.query { let pattern = ilike_pattern(v); q = q.bind(pattern); } q = q.bind(a.limit as i64).bind(a.offset as i64); let rows = q.fetch_all(pool).await?; Ok(rows.into_iter().map(Entry::from).collect()) } /// Fetch secret field names for a set of entry ids (no decryption). pub async fn fetch_secret_schemas( pool: &PgPool, entry_ids: &[Uuid], ) -> Result>> { if entry_ids.is_empty() { return Ok(HashMap::new()); } let fields: Vec = sqlx::query_as( "SELECT es.entry_id, s.id, s.user_id, s.name, s.type, s.encrypted, s.version, s.created_at, s.updated_at \ FROM entry_secrets es \ JOIN secrets s ON s.id = es.secret_id \ WHERE es.entry_id = ANY($1) \ ORDER BY es.entry_id, es.sort_order, s.name", ) .bind(entry_ids) .fetch_all(pool) .await?; let mut map: HashMap> = HashMap::new(); for f in fields { let entry_id = f.entry_id; map.entry(entry_id).or_default().push(f.secret()); } Ok(map) } /// Fetch all secret fields (including encrypted bytes) for a set of entry ids. pub async fn fetch_secrets_for_entries( pool: &PgPool, entry_ids: &[Uuid], ) -> Result>> { if entry_ids.is_empty() { return Ok(HashMap::new()); } let fields: Vec = sqlx::query_as( "SELECT es.entry_id, s.id, s.user_id, s.name, s.type, s.encrypted, s.version, s.created_at, s.updated_at \ FROM entry_secrets es \ JOIN secrets s ON s.id = es.secret_id \ WHERE es.entry_id = ANY($1) \ ORDER BY es.entry_id, es.sort_order, s.name", ) .bind(entry_ids) .fetch_all(pool) .await?; let mut map: HashMap> = HashMap::new(); for f in fields { let entry_id = f.entry_id; map.entry(entry_id).or_default().push(f.secret()); } Ok(map) } /// Resolve exactly one entry by its UUID primary key. /// /// Returns an error if the entry does not exist or does not belong to the given user. pub async fn resolve_entry_by_id( pool: &PgPool, id: Uuid, user_id: Option, ) -> Result { let row: Option = if let Some(uid) = user_id { sqlx::query_as( "SELECT id, user_id, folder, type, name, notes, tags, metadata, version, \ created_at, updated_at FROM entries WHERE id = $1 AND user_id = $2", ) .bind(id) .bind(uid) .fetch_optional(pool) .await? } else { sqlx::query_as( "SELECT id, user_id, folder, type, name, notes, tags, metadata, version, \ created_at, updated_at FROM entries WHERE id = $1 AND user_id IS NULL", ) .bind(id) .fetch_optional(pool) .await? }; row.map(Entry::from) .ok_or_else(|| anyhow::anyhow!("Entry with id '{}' not found", id)) } /// Resolve exactly one entry by name, with optional folder for disambiguation. /// /// - If `folder` is provided: exact `(folder, name)` match. /// - If `folder` is None and exactly one entry matches: returns it. /// - If `folder` is None and multiple entries match: returns an error listing /// the folders and asking the caller to specify one. pub async fn resolve_entry( pool: &PgPool, name: &str, folder: Option<&str>, user_id: Option, ) -> Result { let entries = fetch_entries(pool, folder, None, Some(name), &[], None, user_id).await?; match entries.len() { 0 => { if let Some(f) = folder { anyhow::bail!("Not found: '{}' in folder '{}'", name, f) } else { anyhow::bail!("Not found: '{}'", name) } } 1 => Ok(entries.into_iter().next().unwrap()), _ => { let folders: Vec<&str> = entries.iter().map(|e| e.folder.as_str()).collect(); anyhow::bail!( "Ambiguous: {} entries named '{}' found in folders: [{}]. \ Specify 'folder' to disambiguate.", entries.len(), name, folders.join(", ") ) } } } // ── Internal raw row (because user_id is nullable in DB) ───────────────────── #[derive(sqlx::FromRow)] struct EntryRaw { id: Uuid, user_id: Option, folder: String, #[sqlx(rename = "type")] entry_type: String, name: String, notes: String, tags: Vec, metadata: Value, version: i64, created_at: chrono::DateTime, updated_at: chrono::DateTime, } impl From for Entry { fn from(r: EntryRaw) -> Self { Entry { id: r.id, user_id: r.user_id, folder: r.folder, entry_type: r.entry_type, name: r.name, notes: r.notes, tags: r.tags, metadata: r.metadata, version: r.version, created_at: r.created_at, updated_at: r.updated_at, } } } #[derive(sqlx::FromRow)] struct EntrySecretRow { entry_id: Uuid, id: Uuid, user_id: Option, name: String, #[sqlx(rename = "type")] secret_type: String, encrypted: Vec, version: i64, created_at: chrono::DateTime, updated_at: chrono::DateTime, } impl EntrySecretRow { fn secret(self) -> SecretField { SecretField { id: self.id, user_id: self.user_id, name: self.name, secret_type: self.secret_type, encrypted: self.encrypted, version: self.version, created_at: self.created_at, updated_at: self.updated_at, } } } #[cfg(test)] mod tests { use super::*; #[test] fn ilike_pattern_escapes_backslash_percent_and_underscore() { assert_eq!(ilike_pattern(r"hello\_100%"), r"%hello\\\_100\%%"); } }