- MCP: secrets_find, secrets_overview; secrets_get id-only; id on update/delete/history/rollback - Add meta_obj/secrets_obj; delete guard; env_map/instructions updates - Core: resolve_entry_by_id; get_*_by_id validates entry + tenant before decrypt Made-with: Cursor
310 lines
8.8 KiB
Rust
310 lines
8.8 KiB
Rust
use anyhow::Result;
|
|
use serde_json::Value;
|
|
use sqlx::PgPool;
|
|
use std::collections::HashMap;
|
|
use uuid::Uuid;
|
|
|
|
use crate::models::{Entry, SecretField};
|
|
|
|
pub const FETCH_ALL_LIMIT: u32 = 100_000;
|
|
|
|
pub struct SearchParams<'a> {
|
|
pub folder: Option<&'a str>,
|
|
pub entry_type: Option<&'a str>,
|
|
pub name: Option<&'a str>,
|
|
pub tags: &'a [String],
|
|
pub query: Option<&'a str>,
|
|
pub sort: &'a str,
|
|
pub limit: u32,
|
|
pub offset: u32,
|
|
/// Multi-user: filter by this user_id. None = single-user / no filter.
|
|
pub user_id: Option<Uuid>,
|
|
}
|
|
|
|
#[derive(Debug, serde::Serialize)]
|
|
pub struct SearchResult {
|
|
pub entries: Vec<Entry>,
|
|
pub secret_schemas: HashMap<Uuid, Vec<SecretField>>,
|
|
}
|
|
|
|
pub async fn run(pool: &PgPool, params: SearchParams<'_>) -> Result<SearchResult> {
|
|
let entries = fetch_entries_paged(pool, ¶ms).await?;
|
|
let entry_ids: Vec<Uuid> = entries.iter().map(|e| e.id).collect();
|
|
let secret_schemas = if !entry_ids.is_empty() {
|
|
fetch_secret_schemas(pool, &entry_ids).await?
|
|
} else {
|
|
HashMap::new()
|
|
};
|
|
Ok(SearchResult {
|
|
entries,
|
|
secret_schemas,
|
|
})
|
|
}
|
|
|
|
/// Fetch entries matching the given filters — returns all matching entries up to FETCH_ALL_LIMIT.
|
|
pub async fn fetch_entries(
|
|
pool: &PgPool,
|
|
folder: Option<&str>,
|
|
entry_type: Option<&str>,
|
|
name: Option<&str>,
|
|
tags: &[String],
|
|
query: Option<&str>,
|
|
user_id: Option<Uuid>,
|
|
) -> Result<Vec<Entry>> {
|
|
let params = SearchParams {
|
|
folder,
|
|
entry_type,
|
|
name,
|
|
tags,
|
|
query,
|
|
sort: "name",
|
|
limit: FETCH_ALL_LIMIT,
|
|
offset: 0,
|
|
user_id,
|
|
};
|
|
fetch_entries_paged(pool, ¶ms).await
|
|
}
|
|
|
|
async fn fetch_entries_paged(pool: &PgPool, a: &SearchParams<'_>) -> Result<Vec<Entry>> {
|
|
let mut conditions: Vec<String> = Vec::new();
|
|
let mut idx: i32 = 1;
|
|
|
|
// user_id filtering — always comes first when present
|
|
if a.user_id.is_some() {
|
|
conditions.push(format!("user_id = ${}", idx));
|
|
idx += 1;
|
|
} else {
|
|
conditions.push("user_id IS NULL".to_string());
|
|
}
|
|
|
|
if a.folder.is_some() {
|
|
conditions.push(format!("folder = ${}", idx));
|
|
idx += 1;
|
|
}
|
|
if a.entry_type.is_some() {
|
|
conditions.push(format!("type = ${}", idx));
|
|
idx += 1;
|
|
}
|
|
if a.name.is_some() {
|
|
conditions.push(format!("name = ${}", idx));
|
|
idx += 1;
|
|
}
|
|
if !a.tags.is_empty() {
|
|
let placeholders: Vec<String> = a
|
|
.tags
|
|
.iter()
|
|
.map(|_| {
|
|
let p = format!("${}", idx);
|
|
idx += 1;
|
|
p
|
|
})
|
|
.collect();
|
|
conditions.push(format!(
|
|
"tags @> ARRAY[{}]::text[]",
|
|
placeholders.join(", ")
|
|
));
|
|
}
|
|
if a.query.is_some() {
|
|
conditions.push(format!(
|
|
"(name ILIKE ${i} ESCAPE '\\' OR folder ILIKE ${i} ESCAPE '\\' \
|
|
OR type ILIKE ${i} ESCAPE '\\' OR notes ILIKE ${i} ESCAPE '\\' \
|
|
OR metadata::text ILIKE ${i} ESCAPE '\\' \
|
|
OR EXISTS (SELECT 1 FROM unnest(tags) t WHERE t ILIKE ${i} ESCAPE '\\'))",
|
|
i = idx
|
|
));
|
|
idx += 1;
|
|
}
|
|
|
|
let order = match a.sort {
|
|
"updated" => "updated_at DESC",
|
|
"created" => "created_at DESC",
|
|
_ => "name ASC",
|
|
};
|
|
|
|
let limit_idx = idx;
|
|
idx += 1;
|
|
let offset_idx = idx;
|
|
|
|
let where_clause = if conditions.is_empty() {
|
|
String::new()
|
|
} else {
|
|
format!("WHERE {}", conditions.join(" AND "))
|
|
};
|
|
|
|
let sql = format!(
|
|
"SELECT id, user_id, folder, type, name, notes, tags, metadata, version, \
|
|
created_at, updated_at \
|
|
FROM entries {where_clause} ORDER BY {order} LIMIT ${limit_idx} OFFSET ${offset_idx}"
|
|
);
|
|
|
|
let mut q = sqlx::query_as::<_, EntryRaw>(&sql);
|
|
|
|
if let Some(uid) = a.user_id {
|
|
q = q.bind(uid);
|
|
}
|
|
if let Some(v) = a.folder {
|
|
q = q.bind(v);
|
|
}
|
|
if let Some(v) = a.entry_type {
|
|
q = q.bind(v);
|
|
}
|
|
if let Some(v) = a.name {
|
|
q = q.bind(v);
|
|
}
|
|
for tag in a.tags {
|
|
q = q.bind(tag);
|
|
}
|
|
if let Some(v) = a.query {
|
|
let pattern = format!("%{}%", v.replace('%', "\\%").replace('_', "\\_"));
|
|
q = q.bind(pattern);
|
|
}
|
|
q = q.bind(a.limit as i64).bind(a.offset as i64);
|
|
|
|
let rows = q.fetch_all(pool).await?;
|
|
Ok(rows.into_iter().map(Entry::from).collect())
|
|
}
|
|
|
|
/// Fetch secret field names for a set of entry ids (no decryption).
|
|
pub async fn fetch_secret_schemas(
|
|
pool: &PgPool,
|
|
entry_ids: &[Uuid],
|
|
) -> Result<HashMap<Uuid, Vec<SecretField>>> {
|
|
if entry_ids.is_empty() {
|
|
return Ok(HashMap::new());
|
|
}
|
|
let fields: Vec<SecretField> = sqlx::query_as(
|
|
"SELECT * FROM secrets WHERE entry_id = ANY($1) ORDER BY entry_id, field_name",
|
|
)
|
|
.bind(entry_ids)
|
|
.fetch_all(pool)
|
|
.await?;
|
|
|
|
let mut map: HashMap<Uuid, Vec<SecretField>> = HashMap::new();
|
|
for f in fields {
|
|
map.entry(f.entry_id).or_default().push(f);
|
|
}
|
|
Ok(map)
|
|
}
|
|
|
|
/// Fetch all secret fields (including encrypted bytes) for a set of entry ids.
|
|
pub async fn fetch_secrets_for_entries(
|
|
pool: &PgPool,
|
|
entry_ids: &[Uuid],
|
|
) -> Result<HashMap<Uuid, Vec<SecretField>>> {
|
|
if entry_ids.is_empty() {
|
|
return Ok(HashMap::new());
|
|
}
|
|
let fields: Vec<SecretField> = sqlx::query_as(
|
|
"SELECT * FROM secrets WHERE entry_id = ANY($1) ORDER BY entry_id, field_name",
|
|
)
|
|
.bind(entry_ids)
|
|
.fetch_all(pool)
|
|
.await?;
|
|
|
|
let mut map: HashMap<Uuid, Vec<SecretField>> = HashMap::new();
|
|
for f in fields {
|
|
map.entry(f.entry_id).or_default().push(f);
|
|
}
|
|
Ok(map)
|
|
}
|
|
|
|
/// Resolve exactly one entry by its UUID primary key.
|
|
///
|
|
/// Returns an error if the entry does not exist or does not belong to the given user.
|
|
pub async fn resolve_entry_by_id(
|
|
pool: &PgPool,
|
|
id: Uuid,
|
|
user_id: Option<Uuid>,
|
|
) -> Result<crate::models::Entry> {
|
|
let row: Option<EntryRaw> = if let Some(uid) = user_id {
|
|
sqlx::query_as(
|
|
"SELECT id, user_id, folder, type, name, notes, tags, metadata, version, \
|
|
created_at, updated_at FROM entries WHERE id = $1 AND user_id = $2",
|
|
)
|
|
.bind(id)
|
|
.bind(uid)
|
|
.fetch_optional(pool)
|
|
.await?
|
|
} else {
|
|
sqlx::query_as(
|
|
"SELECT id, user_id, folder, type, name, notes, tags, metadata, version, \
|
|
created_at, updated_at FROM entries WHERE id = $1 AND user_id IS NULL",
|
|
)
|
|
.bind(id)
|
|
.fetch_optional(pool)
|
|
.await?
|
|
};
|
|
row.map(Entry::from)
|
|
.ok_or_else(|| anyhow::anyhow!("Entry with id '{}' not found", id))
|
|
}
|
|
|
|
/// Resolve exactly one entry by name, with optional folder for disambiguation.
|
|
///
|
|
/// - If `folder` is provided: exact `(folder, name)` match.
|
|
/// - If `folder` is None and exactly one entry matches: returns it.
|
|
/// - If `folder` is None and multiple entries match: returns an error listing
|
|
/// the folders and asking the caller to specify one.
|
|
pub async fn resolve_entry(
|
|
pool: &PgPool,
|
|
name: &str,
|
|
folder: Option<&str>,
|
|
user_id: Option<Uuid>,
|
|
) -> Result<crate::models::Entry> {
|
|
let entries = fetch_entries(pool, folder, None, Some(name), &[], None, user_id).await?;
|
|
match entries.len() {
|
|
0 => {
|
|
if let Some(f) = folder {
|
|
anyhow::bail!("Not found: '{}' in folder '{}'", name, f)
|
|
} else {
|
|
anyhow::bail!("Not found: '{}'", name)
|
|
}
|
|
}
|
|
1 => Ok(entries.into_iter().next().unwrap()),
|
|
_ => {
|
|
let folders: Vec<&str> = entries.iter().map(|e| e.folder.as_str()).collect();
|
|
anyhow::bail!(
|
|
"Ambiguous: {} entries named '{}' found in folders: [{}]. \
|
|
Specify 'folder' to disambiguate.",
|
|
entries.len(),
|
|
name,
|
|
folders.join(", ")
|
|
)
|
|
}
|
|
}
|
|
}
|
|
|
|
// ── Internal raw row (because user_id is nullable in DB) ─────────────────────
|
|
#[derive(sqlx::FromRow)]
|
|
struct EntryRaw {
|
|
id: Uuid,
|
|
user_id: Option<Uuid>,
|
|
folder: String,
|
|
#[sqlx(rename = "type")]
|
|
entry_type: String,
|
|
name: String,
|
|
notes: String,
|
|
tags: Vec<String>,
|
|
metadata: Value,
|
|
version: i64,
|
|
created_at: chrono::DateTime<chrono::Utc>,
|
|
updated_at: chrono::DateTime<chrono::Utc>,
|
|
}
|
|
|
|
impl From<EntryRaw> for Entry {
|
|
fn from(r: EntryRaw) -> Self {
|
|
Entry {
|
|
id: r.id,
|
|
user_id: r.user_id,
|
|
folder: r.folder,
|
|
entry_type: r.entry_type,
|
|
name: r.name,
|
|
notes: r.notes,
|
|
tags: r.tags,
|
|
metadata: r.metadata,
|
|
version: r.version,
|
|
created_at: r.created_at,
|
|
updated_at: r.updated_at,
|
|
}
|
|
}
|
|
}
|