Some checks failed
Secrets CLI - Build & Release / 版本 & Release (push) Successful in 3s
Secrets CLI - Build & Release / 质量检查 (fmt / clippy / test) (push) Successful in 2m20s
Secrets CLI - Build & Release / Build (macOS aarch64 + x86_64) (push) Successful in 1m4s
Secrets CLI - Build & Release / Build (x86_64-unknown-linux-musl) (push) Successful in 1m13s
Secrets CLI - Build & Release / Build (x86_64-pc-windows-msvc) (push) Has been cancelled
Secrets CLI - Build & Release / 发布草稿 Release (push) Has been cancelled
- run 新增 -s/--secret 字段过滤,只注入指定字段到子进程(最小权限) - run 新增 --dry-run 模式,输出变量名与来源映射,不执行命令、不暴露值 - run 新增 -o 参数,dry-run 默认 JSON 输出 - 默认输出格式改为始终 json,移除 TTY 自动切换逻辑,-o text 供人类使用 - build_injected_env_map 签名从 &[SecretField] 改为 &[&SecretField] - 更新 AGENTS.md、README.md、.vscode/tasks.json - version: 0.9.5 → 0.9.6 Made-with: Cursor
569 lines
17 KiB
Rust
569 lines
17 KiB
Rust
use anyhow::Result;
|
|
use serde_json::{Value, json};
|
|
use sqlx::PgPool;
|
|
use std::collections::HashMap;
|
|
|
|
use crate::crypto;
|
|
use crate::models::{Entry, SecretField};
|
|
use crate::output::{OutputMode, format_local_time};
|
|
|
|
pub struct SearchArgs<'a> {
|
|
pub namespace: Option<&'a str>,
|
|
pub kind: Option<&'a str>,
|
|
pub name: Option<&'a str>,
|
|
pub tags: &'a [String],
|
|
pub query: Option<&'a str>,
|
|
pub fields: &'a [String],
|
|
pub summary: bool,
|
|
pub limit: u32,
|
|
pub offset: u32,
|
|
pub sort: &'a str,
|
|
pub output: OutputMode,
|
|
}
|
|
|
|
pub async fn run(pool: &PgPool, args: SearchArgs<'_>) -> Result<()> {
|
|
validate_safe_search_args(args.fields)?;
|
|
|
|
let rows = fetch_entries_paged(
|
|
pool,
|
|
PagedFetchArgs {
|
|
namespace: args.namespace,
|
|
kind: args.kind,
|
|
name: args.name,
|
|
tags: args.tags,
|
|
query: args.query,
|
|
sort: args.sort,
|
|
limit: args.limit,
|
|
offset: args.offset,
|
|
},
|
|
)
|
|
.await?;
|
|
|
|
// -f/--field: extract specific metadata field values directly
|
|
if !args.fields.is_empty() {
|
|
return print_fields(&rows, args.fields);
|
|
}
|
|
|
|
// Fetch secret schemas for all returned entries (no master key needed).
|
|
let entry_ids: Vec<uuid::Uuid> = rows.iter().map(|r| r.id).collect();
|
|
let schema_map = if !args.summary && !entry_ids.is_empty() {
|
|
fetch_secret_schemas(pool, &entry_ids).await?
|
|
} else {
|
|
HashMap::new()
|
|
};
|
|
|
|
match args.output {
|
|
OutputMode::Json | OutputMode::JsonCompact => {
|
|
let arr: Vec<Value> = rows
|
|
.iter()
|
|
.map(|r| to_json(r, args.summary, schema_map.get(&r.id).map(Vec::as_slice)))
|
|
.collect();
|
|
let out = if args.output == OutputMode::Json {
|
|
serde_json::to_string_pretty(&arr)?
|
|
} else {
|
|
serde_json::to_string(&arr)?
|
|
};
|
|
println!("{}", out);
|
|
}
|
|
OutputMode::Text => {
|
|
if rows.is_empty() {
|
|
println!("No records found.");
|
|
return Ok(());
|
|
}
|
|
for row in &rows {
|
|
print_text(
|
|
row,
|
|
args.summary,
|
|
schema_map.get(&row.id).map(Vec::as_slice),
|
|
)?;
|
|
}
|
|
println!("{} record(s) found.", rows.len());
|
|
if rows.len() == args.limit as usize {
|
|
println!(
|
|
" (showing up to {}; use --offset {} to see more)",
|
|
args.limit,
|
|
args.offset + args.limit
|
|
);
|
|
}
|
|
}
|
|
}
|
|
|
|
Ok(())
|
|
}
|
|
|
|
fn validate_safe_search_args(fields: &[String]) -> Result<()> {
|
|
if let Some(field) = fields.iter().find(|field| is_secret_field(field)) {
|
|
anyhow::bail!(
|
|
"Field '{}' is sensitive. `search -f` only supports metadata.* fields; use `secrets run` for secrets.",
|
|
field
|
|
);
|
|
}
|
|
Ok(())
|
|
}
|
|
|
|
fn is_secret_field(field: &str) -> bool {
|
|
matches!(
|
|
field.split_once('.').map(|(section, _)| section),
|
|
Some("secret" | "secrets" | "encrypted")
|
|
)
|
|
}
|
|
|
|
// ── Entry fetching ────────────────────────────────────────────────────────────
|
|
|
|
struct PagedFetchArgs<'a> {
|
|
namespace: Option<&'a str>,
|
|
kind: Option<&'a str>,
|
|
name: Option<&'a str>,
|
|
tags: &'a [String],
|
|
query: Option<&'a str>,
|
|
sort: &'a str,
|
|
limit: u32,
|
|
offset: u32,
|
|
}
|
|
|
|
/// A very large limit used when callers need all matching records (export, run).
|
|
/// Postgres will stop scanning when this many rows are found; adjust if needed.
|
|
pub const FETCH_ALL_LIMIT: u32 = 100_000;
|
|
|
|
/// Fetch entries matching the given filters (used by search, run).
|
|
/// `limit` caps the result set; pass `FETCH_ALL_LIMIT` when you need all matching records.
|
|
pub async fn fetch_entries(
|
|
pool: &PgPool,
|
|
namespace: Option<&str>,
|
|
kind: Option<&str>,
|
|
name: Option<&str>,
|
|
tags: &[String],
|
|
query: Option<&str>,
|
|
) -> Result<Vec<Entry>> {
|
|
fetch_entries_with_limit(pool, namespace, kind, name, tags, query, FETCH_ALL_LIMIT).await
|
|
}
|
|
|
|
/// Like `fetch_entries` but with an explicit limit. Used internally by `search`.
|
|
pub(crate) async fn fetch_entries_with_limit(
|
|
pool: &PgPool,
|
|
namespace: Option<&str>,
|
|
kind: Option<&str>,
|
|
name: Option<&str>,
|
|
tags: &[String],
|
|
query: Option<&str>,
|
|
limit: u32,
|
|
) -> Result<Vec<Entry>> {
|
|
fetch_entries_paged(
|
|
pool,
|
|
PagedFetchArgs {
|
|
namespace,
|
|
kind,
|
|
name,
|
|
tags,
|
|
query,
|
|
sort: "name",
|
|
limit,
|
|
offset: 0,
|
|
},
|
|
)
|
|
.await
|
|
}
|
|
|
|
async fn fetch_entries_paged(pool: &PgPool, a: PagedFetchArgs<'_>) -> Result<Vec<Entry>> {
|
|
let mut conditions: Vec<String> = Vec::new();
|
|
let mut idx: i32 = 1;
|
|
|
|
if a.namespace.is_some() {
|
|
conditions.push(format!("namespace = ${}", idx));
|
|
idx += 1;
|
|
}
|
|
if a.kind.is_some() {
|
|
conditions.push(format!("kind = ${}", idx));
|
|
idx += 1;
|
|
}
|
|
if a.name.is_some() {
|
|
conditions.push(format!("name = ${}", idx));
|
|
idx += 1;
|
|
}
|
|
if !a.tags.is_empty() {
|
|
let placeholders: Vec<String> = a
|
|
.tags
|
|
.iter()
|
|
.map(|_| {
|
|
let p = format!("${}", idx);
|
|
idx += 1;
|
|
p
|
|
})
|
|
.collect();
|
|
conditions.push(format!("tags @> ARRAY[{}]", placeholders.join(", ")));
|
|
}
|
|
if a.query.is_some() {
|
|
conditions.push(format!(
|
|
"(name ILIKE ${i} ESCAPE '\\' OR namespace ILIKE ${i} ESCAPE '\\' OR kind ILIKE ${i} ESCAPE '\\' OR metadata::text ILIKE ${i} ESCAPE '\\' OR EXISTS (SELECT 1 FROM unnest(tags) t WHERE t ILIKE ${i} ESCAPE '\\'))",
|
|
i = idx
|
|
));
|
|
idx += 1;
|
|
}
|
|
|
|
let where_clause = if conditions.is_empty() {
|
|
String::new()
|
|
} else {
|
|
format!("WHERE {}", conditions.join(" AND "))
|
|
};
|
|
|
|
let order = match a.sort {
|
|
"updated" => "updated_at DESC",
|
|
"created" => "created_at DESC",
|
|
_ => "namespace, kind, name",
|
|
};
|
|
|
|
let sql = format!(
|
|
"SELECT * FROM entries {} ORDER BY {} LIMIT ${} OFFSET ${}",
|
|
where_clause,
|
|
order,
|
|
idx,
|
|
idx + 1
|
|
);
|
|
|
|
tracing::debug!(sql, "executing search query");
|
|
|
|
let mut q = sqlx::query_as::<_, Entry>(&sql);
|
|
if let Some(v) = a.namespace {
|
|
q = q.bind(v);
|
|
}
|
|
if let Some(v) = a.kind {
|
|
q = q.bind(v);
|
|
}
|
|
if let Some(v) = a.name {
|
|
q = q.bind(v);
|
|
}
|
|
for v in a.tags {
|
|
q = q.bind(v.as_str());
|
|
}
|
|
if let Some(v) = a.query {
|
|
q = q.bind(format!(
|
|
"%{}%",
|
|
v.replace('\\', "\\\\")
|
|
.replace('%', "\\%")
|
|
.replace('_', "\\_")
|
|
));
|
|
}
|
|
q = q.bind(a.limit as i64).bind(a.offset as i64);
|
|
|
|
Ok(q.fetch_all(pool).await?)
|
|
}
|
|
|
|
// ── Secret schema fetching (no master key) ───────────────────────────────────
|
|
|
|
/// Fetch secret field names for a set of entry ids.
|
|
/// Returns a map from entry_id to list of SecretField.
|
|
async fn fetch_secret_schemas(
|
|
pool: &PgPool,
|
|
entry_ids: &[uuid::Uuid],
|
|
) -> Result<HashMap<uuid::Uuid, Vec<SecretField>>> {
|
|
if entry_ids.is_empty() {
|
|
return Ok(HashMap::new());
|
|
}
|
|
|
|
let fields: Vec<SecretField> = sqlx::query_as(
|
|
"SELECT * FROM secrets WHERE entry_id = ANY($1) ORDER BY entry_id, field_name",
|
|
)
|
|
.bind(entry_ids)
|
|
.fetch_all(pool)
|
|
.await?;
|
|
|
|
let mut map: HashMap<uuid::Uuid, Vec<SecretField>> = HashMap::new();
|
|
for f in fields {
|
|
map.entry(f.entry_id).or_default().push(f);
|
|
}
|
|
Ok(map)
|
|
}
|
|
|
|
/// Fetch all secret fields (including encrypted bytes) for a set of entry ids.
|
|
pub async fn fetch_secrets_for_entries(
|
|
pool: &PgPool,
|
|
entry_ids: &[uuid::Uuid],
|
|
) -> Result<HashMap<uuid::Uuid, Vec<SecretField>>> {
|
|
if entry_ids.is_empty() {
|
|
return Ok(HashMap::new());
|
|
}
|
|
|
|
let fields: Vec<SecretField> = sqlx::query_as(
|
|
"SELECT * FROM secrets WHERE entry_id = ANY($1) ORDER BY entry_id, field_name",
|
|
)
|
|
.bind(entry_ids)
|
|
.fetch_all(pool)
|
|
.await?;
|
|
|
|
let mut map: HashMap<uuid::Uuid, Vec<SecretField>> = HashMap::new();
|
|
for f in fields {
|
|
map.entry(f.entry_id).or_default().push(f);
|
|
}
|
|
Ok(map)
|
|
}
|
|
|
|
// ── Display helpers ───────────────────────────────────────────────────────────
|
|
|
|
fn env_prefix(entry: &Entry, prefix: &str) -> String {
|
|
let name_part = entry.name.to_uppercase().replace(['-', '.', ' '], "_");
|
|
if prefix.is_empty() {
|
|
name_part
|
|
} else {
|
|
format!(
|
|
"{}_{}",
|
|
prefix.to_uppercase().replace(['-', '.', ' '], "_"),
|
|
name_part
|
|
)
|
|
}
|
|
}
|
|
|
|
/// Build a flat KEY=VALUE map from decrypted secret fields only.
|
|
/// Resolves key_ref: if metadata.key_ref is set, merges secret fields from that key entry.
|
|
pub async fn build_injected_env_map(
|
|
pool: &PgPool,
|
|
entry: &Entry,
|
|
prefix: &str,
|
|
master_key: &[u8; 32],
|
|
fields: &[&SecretField],
|
|
) -> Result<HashMap<String, String>> {
|
|
let effective_prefix = env_prefix(entry, prefix);
|
|
let mut map = HashMap::new();
|
|
|
|
// Decrypt each secret field and add to env map.
|
|
for f in fields {
|
|
let decrypted = crypto::decrypt_json(master_key, &f.encrypted)?;
|
|
let key = format!(
|
|
"{}_{}",
|
|
effective_prefix,
|
|
f.field_name.to_uppercase().replace(['-', '.'], "_")
|
|
);
|
|
map.insert(key, json_value_to_env_string(&decrypted));
|
|
}
|
|
|
|
// Resolve key_ref: merge secrets from the referenced key entry.
|
|
if let Some(key_ref) = entry.metadata.get("key_ref").and_then(|v| v.as_str()) {
|
|
let key_entries = fetch_entries(
|
|
pool,
|
|
Some(&entry.namespace),
|
|
Some("key"),
|
|
Some(key_ref),
|
|
&[],
|
|
None,
|
|
)
|
|
.await?;
|
|
|
|
if let Some(key_entry) = key_entries.first() {
|
|
let key_ids = vec![key_entry.id];
|
|
let key_fields_map = fetch_secrets_for_entries(pool, &key_ids).await?;
|
|
let empty = vec![];
|
|
let key_fields = key_fields_map.get(&key_entry.id).unwrap_or(&empty);
|
|
|
|
let key_prefix = env_prefix(key_entry, prefix);
|
|
for f in key_fields {
|
|
let decrypted = crypto::decrypt_json(master_key, &f.encrypted)?;
|
|
let key_var = format!(
|
|
"{}_{}",
|
|
key_prefix,
|
|
f.field_name.to_uppercase().replace(['-', '.'], "_")
|
|
);
|
|
map.insert(key_var, json_value_to_env_string(&decrypted));
|
|
}
|
|
} else {
|
|
tracing::warn!(key_ref, "key_ref target not found");
|
|
}
|
|
}
|
|
|
|
Ok(map)
|
|
}
|
|
|
|
fn json_value_to_env_string(v: &Value) -> String {
|
|
match v {
|
|
Value::String(s) => s.clone(),
|
|
Value::Null => String::new(),
|
|
other => other.to_string(),
|
|
}
|
|
}
|
|
|
|
fn to_json(entry: &Entry, summary: bool, schema: Option<&[SecretField]>) -> Value {
|
|
if summary {
|
|
let desc = entry
|
|
.metadata
|
|
.get("desc")
|
|
.or_else(|| entry.metadata.get("url"))
|
|
.and_then(|v| v.as_str())
|
|
.unwrap_or("")
|
|
.to_string();
|
|
return json!({
|
|
"namespace": entry.namespace,
|
|
"kind": entry.kind,
|
|
"name": entry.name,
|
|
"tags": entry.tags,
|
|
"desc": desc,
|
|
"updated_at": entry.updated_at.format("%Y-%m-%dT%H:%M:%SZ").to_string(),
|
|
});
|
|
}
|
|
|
|
let secrets_val: Value = match schema {
|
|
Some(fields) if !fields.is_empty() => {
|
|
let schema_arr: Vec<Value> = fields
|
|
.iter()
|
|
.map(|f| {
|
|
json!({
|
|
"field_name": f.field_name,
|
|
})
|
|
})
|
|
.collect();
|
|
Value::Array(schema_arr)
|
|
}
|
|
_ => Value::Array(vec![]),
|
|
};
|
|
|
|
json!({
|
|
"id": entry.id,
|
|
"namespace": entry.namespace,
|
|
"kind": entry.kind,
|
|
"name": entry.name,
|
|
"tags": entry.tags,
|
|
"metadata": entry.metadata,
|
|
"secrets": secrets_val,
|
|
"version": entry.version,
|
|
"created_at": entry.created_at.format("%Y-%m-%dT%H:%M:%SZ").to_string(),
|
|
"updated_at": entry.updated_at.format("%Y-%m-%dT%H:%M:%SZ").to_string(),
|
|
})
|
|
}
|
|
|
|
fn print_text(entry: &Entry, summary: bool, schema: Option<&[SecretField]>) -> Result<()> {
|
|
println!("[{}/{}] {}", entry.namespace, entry.kind, entry.name);
|
|
if summary {
|
|
let desc = entry
|
|
.metadata
|
|
.get("desc")
|
|
.or_else(|| entry.metadata.get("url"))
|
|
.and_then(|v| v.as_str())
|
|
.unwrap_or("-");
|
|
if !entry.tags.is_empty() {
|
|
println!(" tags: [{}]", entry.tags.join(", "));
|
|
}
|
|
println!(" desc: {}", desc);
|
|
println!(" updated: {}", format_local_time(entry.updated_at));
|
|
} else {
|
|
println!(" id: {}", entry.id);
|
|
if !entry.tags.is_empty() {
|
|
println!(" tags: [{}]", entry.tags.join(", "));
|
|
}
|
|
if entry.metadata.as_object().is_some_and(|m| !m.is_empty()) {
|
|
println!(
|
|
" metadata: {}",
|
|
serde_json::to_string_pretty(&entry.metadata)?
|
|
);
|
|
}
|
|
match schema {
|
|
Some(fields) if !fields.is_empty() => {
|
|
let schema_str: Vec<String> = fields.iter().map(|f| f.field_name.clone()).collect();
|
|
println!(" secrets: {}", schema_str.join(", "));
|
|
println!(" (use `secrets run` to get values)");
|
|
}
|
|
_ => {}
|
|
}
|
|
println!(" version: {}", entry.version);
|
|
println!(" created: {}", format_local_time(entry.created_at));
|
|
}
|
|
println!();
|
|
Ok(())
|
|
}
|
|
|
|
/// Extract one or more metadata field paths like `metadata.url`.
|
|
fn print_fields(rows: &[Entry], fields: &[String]) -> Result<()> {
|
|
for row in rows {
|
|
for field in fields {
|
|
let val = extract_field(row, field)?;
|
|
println!("{}", val);
|
|
}
|
|
}
|
|
Ok(())
|
|
}
|
|
|
|
fn extract_field(entry: &Entry, field: &str) -> Result<String> {
|
|
let (section, key) = field
|
|
.split_once('.')
|
|
.ok_or_else(|| anyhow::anyhow!("Invalid field path '{}'. Use metadata.<key>.", field))?;
|
|
|
|
let obj = match section {
|
|
"metadata" | "meta" => &entry.metadata,
|
|
other => anyhow::bail!("Unknown field section '{}'. Use 'metadata'.", other),
|
|
};
|
|
|
|
obj.get(key)
|
|
.and_then(|v| {
|
|
v.as_str()
|
|
.map(|s| s.to_string())
|
|
.or_else(|| Some(v.to_string()))
|
|
})
|
|
.ok_or_else(|| {
|
|
anyhow::anyhow!(
|
|
"Field '{}' not found in record [{}/{}/{}]",
|
|
field,
|
|
entry.namespace,
|
|
entry.kind,
|
|
entry.name
|
|
)
|
|
})
|
|
}
|
|
|
|
#[cfg(test)]
|
|
mod tests {
|
|
use super::*;
|
|
use chrono::Utc;
|
|
use serde_json::json;
|
|
use uuid::Uuid;
|
|
|
|
fn sample_entry() -> Entry {
|
|
Entry {
|
|
id: Uuid::nil(),
|
|
namespace: "refining".to_string(),
|
|
kind: "service".to_string(),
|
|
name: "gitea.main".to_string(),
|
|
tags: vec!["prod".to_string()],
|
|
metadata: json!({"url": "https://code.example.com", "enabled": true}),
|
|
version: 1,
|
|
created_at: Utc::now(),
|
|
updated_at: Utc::now(),
|
|
}
|
|
}
|
|
|
|
fn sample_fields() -> Vec<SecretField> {
|
|
let key = [0x42u8; 32];
|
|
let enc = crypto::encrypt_json(&key, &json!("abc123")).unwrap();
|
|
vec![SecretField {
|
|
id: Uuid::nil(),
|
|
entry_id: Uuid::nil(),
|
|
field_name: "token".to_string(),
|
|
encrypted: enc,
|
|
version: 1,
|
|
created_at: Utc::now(),
|
|
updated_at: Utc::now(),
|
|
}]
|
|
}
|
|
|
|
#[test]
|
|
fn rejects_secret_field_extraction() {
|
|
let fields = vec!["secret.token".to_string()];
|
|
let err = validate_safe_search_args(&fields).unwrap_err();
|
|
assert!(err.to_string().contains("sensitive"));
|
|
}
|
|
|
|
#[test]
|
|
fn to_json_full_includes_secrets_schema() {
|
|
let entry = sample_entry();
|
|
let fields = sample_fields();
|
|
let v = to_json(&entry, false, Some(&fields));
|
|
|
|
let secrets = v.get("secrets").unwrap().as_array().unwrap();
|
|
assert_eq!(secrets.len(), 1);
|
|
assert_eq!(secrets[0]["field_name"], "token");
|
|
}
|
|
|
|
#[test]
|
|
fn to_json_summary_omits_secrets_schema() {
|
|
let entry = sample_entry();
|
|
let fields = sample_fields();
|
|
let v = to_json(&entry, true, Some(&fields));
|
|
assert!(v.get("secrets").is_none());
|
|
}
|
|
}
|