Some checks failed
Secrets CLI - Build & Release / 版本 & Release (push) Successful in 3s
Secrets CLI - Build & Release / 质量检查 (fmt / clippy / test) (push) Successful in 2m46s
Secrets CLI - Build & Release / Build (macOS aarch64 + x86_64) (push) Successful in 1m27s
Secrets CLI - Build & Release / Build (x86_64-unknown-linux-musl) (push) Successful in 2m0s
Secrets CLI - Build & Release / 发布草稿 Release (push) Has been cancelled
Secrets CLI - Build & Release / Build (x86_64-pc-windows-msvc) (push) Has been cancelled
- 提取 EntryRow/SecretFieldRow 到 models.rs - 提取 current_actor()、print_json() 公共函数 - ExportFormat::from_extension 复用 from_str - fetch_entries 默认 limit 100k(export/inject/run 不再截断) - history 独立为 history.rs 模块 - delete 改用 DeleteArgs 结构体 - config_dir 改为 Result,Argon2id 参数提取常量 - Cargo 依赖 ^ 前缀、tokio 精简 features - 更新 AGENTS.md 项目结构 Made-with: Cursor
508 lines
16 KiB
Rust
508 lines
16 KiB
Rust
use anyhow::Result;
|
|
use serde_json::{Map, Value, json};
|
|
use sqlx::PgPool;
|
|
use std::fs;
|
|
|
|
use crate::crypto;
|
|
use crate::db;
|
|
use crate::models::EntryRow;
|
|
use crate::output::{OutputMode, print_json};
|
|
|
|
// ── Key/value parsing helpers (shared with update.rs) ───────────────────────
|
|
|
|
/// Parse secret / metadata entries into a nested key path and JSON value.
|
|
/// - `key=value` → stores the literal string `value`
|
|
/// - `key:=<json>` → parses `<json>` as a typed JSON value
|
|
/// - `key=@file` → reads the file content as a string
|
|
/// - `a:b=value` → writes nested fields: `{ "a": { "b": "value" } }`
|
|
/// - `a:b@./file.txt` → shorthand for nested file reads without manual JSON escaping
|
|
pub(crate) fn parse_kv(entry: &str) -> Result<(Vec<String>, Value)> {
|
|
// Typed JSON form: key:=<json>
|
|
if let Some((key, json_str)) = entry.split_once(":=") {
|
|
let val: Value = serde_json::from_str(json_str).map_err(|e| {
|
|
anyhow::anyhow!(
|
|
"Invalid JSON value for key '{}': {} (use key=value for plain strings)",
|
|
key,
|
|
e
|
|
)
|
|
})?;
|
|
return Ok((parse_key_path(key)?, val));
|
|
}
|
|
|
|
// Plain string form: key=value or key=@file
|
|
if let Some((key, raw_val)) = entry.split_once('=') {
|
|
let value = if let Some(path) = raw_val.strip_prefix('@') {
|
|
fs::read_to_string(path)
|
|
.map_err(|e| anyhow::anyhow!("Failed to read file '{}': {}", path, e))?
|
|
} else {
|
|
raw_val.to_string()
|
|
};
|
|
|
|
return Ok((parse_key_path(key)?, Value::String(value)));
|
|
}
|
|
|
|
// Shorthand file form: nested:key@file
|
|
if let Some((key, path)) = entry.split_once('@') {
|
|
let value = fs::read_to_string(path)
|
|
.map_err(|e| anyhow::anyhow!("Failed to read file '{}': {}", path, e))?;
|
|
return Ok((parse_key_path(key)?, Value::String(value)));
|
|
}
|
|
|
|
anyhow::bail!(
|
|
"Invalid format '{}'. Expected: key=value, key=@file, nested:key@file, or key:=<json>",
|
|
entry
|
|
)
|
|
}
|
|
|
|
pub(crate) fn build_json(entries: &[String]) -> Result<Value> {
|
|
let mut map = Map::new();
|
|
for entry in entries {
|
|
let (path, value) = parse_kv(entry)?;
|
|
insert_path(&mut map, &path, value)?;
|
|
}
|
|
Ok(Value::Object(map))
|
|
}
|
|
|
|
pub(crate) fn key_path_to_string(path: &[String]) -> String {
|
|
path.join(":")
|
|
}
|
|
|
|
pub(crate) fn collect_key_paths(entries: &[String]) -> Result<Vec<String>> {
|
|
entries
|
|
.iter()
|
|
.map(|entry| parse_kv(entry).map(|(path, _)| key_path_to_string(&path)))
|
|
.collect()
|
|
}
|
|
|
|
pub(crate) fn collect_field_paths(entries: &[String]) -> Result<Vec<String>> {
|
|
entries
|
|
.iter()
|
|
.map(|entry| parse_key_path(entry).map(|path| key_path_to_string(&path)))
|
|
.collect()
|
|
}
|
|
|
|
pub(crate) fn parse_key_path(key: &str) -> Result<Vec<String>> {
|
|
let path: Vec<String> = key
|
|
.split(':')
|
|
.map(str::trim)
|
|
.map(ToOwned::to_owned)
|
|
.collect();
|
|
|
|
if path.is_empty() || path.iter().any(|part| part.is_empty()) {
|
|
anyhow::bail!(
|
|
"Invalid key path '{}'. Use non-empty segments like 'credentials:content'.",
|
|
key
|
|
);
|
|
}
|
|
|
|
Ok(path)
|
|
}
|
|
|
|
pub(crate) fn insert_path(
|
|
map: &mut Map<String, Value>,
|
|
path: &[String],
|
|
value: Value,
|
|
) -> Result<()> {
|
|
if path.is_empty() {
|
|
anyhow::bail!("Key path cannot be empty");
|
|
}
|
|
|
|
if path.len() == 1 {
|
|
map.insert(path[0].clone(), value);
|
|
return Ok(());
|
|
}
|
|
|
|
let head = path[0].clone();
|
|
let tail = &path[1..];
|
|
|
|
match map.entry(head.clone()) {
|
|
serde_json::map::Entry::Vacant(entry) => {
|
|
let mut child = Map::new();
|
|
insert_path(&mut child, tail, value)?;
|
|
entry.insert(Value::Object(child));
|
|
}
|
|
serde_json::map::Entry::Occupied(mut entry) => match entry.get_mut() {
|
|
Value::Object(child) => insert_path(child, tail, value)?,
|
|
_ => {
|
|
anyhow::bail!(
|
|
"Cannot set nested key '{}' because '{}' is already a non-object value",
|
|
key_path_to_string(path),
|
|
head
|
|
);
|
|
}
|
|
},
|
|
}
|
|
|
|
Ok(())
|
|
}
|
|
|
|
pub(crate) fn remove_path(map: &mut Map<String, Value>, path: &[String]) -> Result<bool> {
|
|
if path.is_empty() {
|
|
anyhow::bail!("Key path cannot be empty");
|
|
}
|
|
|
|
if path.len() == 1 {
|
|
return Ok(map.remove(&path[0]).is_some());
|
|
}
|
|
|
|
let Some(value) = map.get_mut(&path[0]) else {
|
|
return Ok(false);
|
|
};
|
|
|
|
let Value::Object(child) = value else {
|
|
return Ok(false);
|
|
};
|
|
|
|
let removed = remove_path(child, &path[1..])?;
|
|
if child.is_empty() {
|
|
map.remove(&path[0]);
|
|
}
|
|
|
|
Ok(removed)
|
|
}
|
|
|
|
// ── field_type inference and value_len ──────────────────────────────────────
|
|
|
|
/// Infer the field type string from a JSON value.
|
|
pub(crate) fn infer_field_type(v: &Value) -> &'static str {
|
|
match v {
|
|
Value::String(_) => "string",
|
|
Value::Number(_) => "number",
|
|
Value::Bool(_) => "boolean",
|
|
Value::Null => "string",
|
|
Value::Array(_) | Value::Object(_) => "json",
|
|
}
|
|
}
|
|
|
|
/// Compute the plaintext length of a JSON value (chars for string, serialized length otherwise).
|
|
pub(crate) fn compute_value_len(v: &Value) -> i32 {
|
|
match v {
|
|
Value::String(s) => s.chars().count() as i32,
|
|
Value::Null => 0,
|
|
other => other.to_string().chars().count() as i32,
|
|
}
|
|
}
|
|
|
|
/// Flatten a (potentially nested) JSON object into dot-separated field entries.
|
|
/// e.g. `{"credentials": {"type": "ssh", "content": "..."}}` →
|
|
/// `[("credentials.type", "ssh"), ("credentials.content", "...")]`
|
|
/// Top-level non-object values are emitted directly.
|
|
pub(crate) fn flatten_json_fields(prefix: &str, value: &Value) -> Vec<(String, Value)> {
|
|
match value {
|
|
Value::Object(map) => {
|
|
let mut out = Vec::new();
|
|
for (k, v) in map {
|
|
let full_key = if prefix.is_empty() {
|
|
k.clone()
|
|
} else {
|
|
format!("{}.{}", prefix, k)
|
|
};
|
|
out.extend(flatten_json_fields(&full_key, v));
|
|
}
|
|
out
|
|
}
|
|
other => vec![(prefix.to_string(), other.clone())],
|
|
}
|
|
}
|
|
|
|
// ── Add command ──────────────────────────────────────────────────────────────
|
|
|
|
pub struct AddArgs<'a> {
|
|
pub namespace: &'a str,
|
|
pub kind: &'a str,
|
|
pub name: &'a str,
|
|
pub tags: &'a [String],
|
|
pub meta_entries: &'a [String],
|
|
pub secret_entries: &'a [String],
|
|
pub output: OutputMode,
|
|
}
|
|
|
|
pub async fn run(pool: &PgPool, args: AddArgs<'_>, master_key: &[u8; 32]) -> Result<()> {
|
|
let metadata = build_json(args.meta_entries)?;
|
|
let secret_json = build_json(args.secret_entries)?;
|
|
|
|
tracing::debug!(args.namespace, args.kind, args.name, "upserting entry");
|
|
|
|
let meta_keys = collect_key_paths(args.meta_entries)?;
|
|
let secret_keys = collect_key_paths(args.secret_entries)?;
|
|
|
|
let mut tx = pool.begin().await?;
|
|
|
|
// Upsert the entry row (tags + metadata).
|
|
let existing: Option<EntryRow> = sqlx::query_as(
|
|
"SELECT id, version, tags, metadata FROM entries \
|
|
WHERE namespace = $1 AND kind = $2 AND name = $3",
|
|
)
|
|
.bind(args.namespace)
|
|
.bind(args.kind)
|
|
.bind(args.name)
|
|
.fetch_optional(&mut *tx)
|
|
.await?;
|
|
|
|
// Snapshot the current entry state before overwriting.
|
|
if let Some(ref ex) = existing
|
|
&& let Err(e) = db::snapshot_entry_history(
|
|
&mut tx,
|
|
db::EntrySnapshotParams {
|
|
entry_id: ex.id,
|
|
namespace: args.namespace,
|
|
kind: args.kind,
|
|
name: args.name,
|
|
version: ex.version,
|
|
action: "add",
|
|
tags: &ex.tags,
|
|
metadata: &ex.metadata,
|
|
},
|
|
)
|
|
.await
|
|
{
|
|
tracing::warn!(error = %e, "failed to snapshot entry history before upsert");
|
|
}
|
|
|
|
let entry_id: uuid::Uuid = sqlx::query_scalar(
|
|
r#"
|
|
INSERT INTO entries (namespace, kind, name, tags, metadata, version, updated_at)
|
|
VALUES ($1, $2, $3, $4, $5, 1, NOW())
|
|
ON CONFLICT (namespace, kind, name)
|
|
DO UPDATE SET
|
|
tags = EXCLUDED.tags,
|
|
metadata = EXCLUDED.metadata,
|
|
version = entries.version + 1,
|
|
updated_at = NOW()
|
|
RETURNING id
|
|
"#,
|
|
)
|
|
.bind(args.namespace)
|
|
.bind(args.kind)
|
|
.bind(args.name)
|
|
.bind(args.tags)
|
|
.bind(&metadata)
|
|
.fetch_one(&mut *tx)
|
|
.await?;
|
|
|
|
let new_entry_version: i64 = sqlx::query_scalar("SELECT version FROM entries WHERE id = $1")
|
|
.bind(entry_id)
|
|
.fetch_one(&mut *tx)
|
|
.await?;
|
|
|
|
// Snapshot existing secret fields before replacing.
|
|
if existing.is_some() {
|
|
#[derive(sqlx::FromRow)]
|
|
struct ExistingField {
|
|
id: uuid::Uuid,
|
|
field_name: String,
|
|
field_type: String,
|
|
value_len: i32,
|
|
encrypted: Vec<u8>,
|
|
}
|
|
let existing_fields: Vec<ExistingField> = sqlx::query_as(
|
|
"SELECT id, field_name, field_type, value_len, encrypted \
|
|
FROM secrets WHERE entry_id = $1",
|
|
)
|
|
.bind(entry_id)
|
|
.fetch_all(&mut *tx)
|
|
.await?;
|
|
|
|
for f in &existing_fields {
|
|
if let Err(e) = db::snapshot_secret_history(
|
|
&mut tx,
|
|
db::SecretSnapshotParams {
|
|
entry_id,
|
|
secret_id: f.id,
|
|
entry_version: new_entry_version - 1,
|
|
field_name: &f.field_name,
|
|
field_type: &f.field_type,
|
|
value_len: f.value_len,
|
|
encrypted: &f.encrypted,
|
|
action: "add",
|
|
},
|
|
)
|
|
.await
|
|
{
|
|
tracing::warn!(error = %e, "failed to snapshot secret field history");
|
|
}
|
|
}
|
|
|
|
// Delete existing secret fields so we can re-insert the full set.
|
|
sqlx::query("DELETE FROM secrets WHERE entry_id = $1")
|
|
.bind(entry_id)
|
|
.execute(&mut *tx)
|
|
.await?;
|
|
}
|
|
|
|
// Insert new secret fields.
|
|
let flat_fields = flatten_json_fields("", &secret_json);
|
|
for (field_name, field_value) in &flat_fields {
|
|
let field_type = infer_field_type(field_value);
|
|
let value_len = compute_value_len(field_value);
|
|
let encrypted = crypto::encrypt_json(master_key, field_value)?;
|
|
|
|
sqlx::query(
|
|
"INSERT INTO secrets (entry_id, field_name, field_type, value_len, encrypted) \
|
|
VALUES ($1, $2, $3, $4, $5)",
|
|
)
|
|
.bind(entry_id)
|
|
.bind(field_name)
|
|
.bind(field_type)
|
|
.bind(value_len)
|
|
.bind(&encrypted)
|
|
.execute(&mut *tx)
|
|
.await?;
|
|
}
|
|
|
|
crate::audit::log_tx(
|
|
&mut tx,
|
|
"add",
|
|
args.namespace,
|
|
args.kind,
|
|
args.name,
|
|
json!({
|
|
"tags": args.tags,
|
|
"meta_keys": meta_keys,
|
|
"secret_keys": secret_keys,
|
|
}),
|
|
)
|
|
.await;
|
|
|
|
tx.commit().await?;
|
|
|
|
let result_json = json!({
|
|
"action": "added",
|
|
"namespace": args.namespace,
|
|
"kind": args.kind,
|
|
"name": args.name,
|
|
"tags": args.tags,
|
|
"meta_keys": meta_keys,
|
|
"secret_keys": secret_keys,
|
|
});
|
|
|
|
match args.output {
|
|
OutputMode::Json | OutputMode::JsonCompact => {
|
|
print_json(&result_json, &args.output)?;
|
|
}
|
|
_ => {
|
|
println!("Added: [{}/{}] {}", args.namespace, args.kind, args.name);
|
|
if !args.tags.is_empty() {
|
|
println!(" tags: {}", args.tags.join(", "));
|
|
}
|
|
if !args.meta_entries.is_empty() {
|
|
println!(" metadata: {}", meta_keys.join(", "));
|
|
}
|
|
if !args.secret_entries.is_empty() {
|
|
println!(" secrets: {}", secret_keys.join(", "));
|
|
}
|
|
}
|
|
}
|
|
|
|
Ok(())
|
|
}
|
|
|
|
#[cfg(test)]
|
|
mod tests {
|
|
use super::{
|
|
build_json, compute_value_len, flatten_json_fields, infer_field_type, key_path_to_string,
|
|
parse_kv, remove_path,
|
|
};
|
|
use serde_json::Value;
|
|
use std::fs;
|
|
use std::path::PathBuf;
|
|
use std::time::{SystemTime, UNIX_EPOCH};
|
|
|
|
fn temp_file_path(name: &str) -> PathBuf {
|
|
let nanos = SystemTime::now()
|
|
.duration_since(UNIX_EPOCH)
|
|
.expect("clock should be after unix epoch")
|
|
.as_nanos();
|
|
std::env::temp_dir().join(format!("secrets-{name}-{nanos}.txt"))
|
|
}
|
|
|
|
#[test]
|
|
fn parse_nested_file_shorthand() {
|
|
let path = temp_file_path("ssh-key");
|
|
fs::write(&path, "line1\nline2\n").expect("should write temp file");
|
|
|
|
let entry = format!("credentials:content@{}", path.display());
|
|
let (path_parts, value) = parse_kv(&entry).expect("should parse nested file shorthand");
|
|
|
|
assert_eq!(key_path_to_string(&path_parts), "credentials:content");
|
|
assert_eq!(value, serde_json::Value::String("line1\nline2\n".into()));
|
|
|
|
fs::remove_file(path).expect("should remove temp file");
|
|
}
|
|
|
|
#[test]
|
|
fn build_nested_json_from_mixed_entries() {
|
|
let payload = vec![
|
|
"credentials:type=ssh".to_string(),
|
|
"credentials:enabled:=true".to_string(),
|
|
"username=root".to_string(),
|
|
];
|
|
|
|
let value = build_json(&payload).expect("should build nested json");
|
|
|
|
assert_eq!(
|
|
value,
|
|
serde_json::json!({
|
|
"credentials": {
|
|
"type": "ssh",
|
|
"enabled": true
|
|
},
|
|
"username": "root"
|
|
})
|
|
);
|
|
}
|
|
|
|
#[test]
|
|
fn remove_nested_path_prunes_empty_parents() {
|
|
let mut value = serde_json::json!({
|
|
"credentials": {
|
|
"content": "pem-data"
|
|
},
|
|
"username": "root"
|
|
});
|
|
|
|
let map = match &mut value {
|
|
Value::Object(map) => map,
|
|
_ => panic!("expected object"),
|
|
};
|
|
|
|
let removed = remove_path(map, &["credentials".to_string(), "content".to_string()])
|
|
.expect("should remove nested field");
|
|
|
|
assert!(removed);
|
|
assert_eq!(value, serde_json::json!({ "username": "root" }));
|
|
}
|
|
|
|
#[test]
|
|
fn flatten_json_fields_nested() {
|
|
let v = serde_json::json!({
|
|
"username": "root",
|
|
"credentials": {
|
|
"type": "ssh",
|
|
"content": "pem-data"
|
|
}
|
|
});
|
|
let mut fields = flatten_json_fields("", &v);
|
|
fields.sort_by(|a, b| a.0.cmp(&b.0));
|
|
|
|
assert_eq!(fields[0].0, "credentials.content");
|
|
assert_eq!(fields[1].0, "credentials.type");
|
|
assert_eq!(fields[2].0, "username");
|
|
}
|
|
|
|
#[test]
|
|
fn infer_field_types() {
|
|
assert_eq!(infer_field_type(&Value::String("x".into())), "string");
|
|
assert_eq!(infer_field_type(&serde_json::json!(42)), "number");
|
|
assert_eq!(infer_field_type(&Value::Bool(true)), "boolean");
|
|
assert_eq!(infer_field_type(&serde_json::json!(["a"])), "json");
|
|
}
|
|
|
|
#[test]
|
|
fn compute_value_len_string() {
|
|
assert_eq!(compute_value_len(&Value::String("root".into())), 4);
|
|
assert_eq!(compute_value_len(&Value::Null), 0);
|
|
assert_eq!(compute_value_len(&serde_json::json!(1234)), 4);
|
|
}
|
|
}
|