chore: remove field_type and value_len from secrets schema
Some checks failed
Secrets CLI - Build & Release / 版本 & Release (push) Successful in 3s
Secrets CLI - Build & Release / 质量检查 (fmt / clippy / test) (push) Successful in 2m34s
Secrets CLI - Build & Release / Build (macOS aarch64 + x86_64) (push) Successful in 1m3s
Secrets CLI - Build & Release / Build (x86_64-unknown-linux-musl) (push) Successful in 1m15s
Secrets CLI - Build & Release / 发布草稿 Release (push) Has been cancelled
Secrets CLI - Build & Release / Build (x86_64-pc-windows-msvc) (push) Has been cancelled

- Drop field_type, value_len from secrets and secrets_history tables
- Remove infer_field_type, compute_value_len from add.rs
- Simplify search output to field names only
- Update AGENTS.md, README.md documentation

Bump version to 0.9.4

Made-with: Cursor
This commit is contained in:
voson
2026-03-19 16:48:23 +08:00
parent 62a1df316b
commit 854720f10c
12 changed files with 48 additions and 137 deletions

View File

@@ -71,8 +71,6 @@ secrets (
id UUID PRIMARY KEY DEFAULT uuidv7(), id UUID PRIMARY KEY DEFAULT uuidv7(),
entry_id UUID NOT NULL REFERENCES entries(id) ON DELETE CASCADE, entry_id UUID NOT NULL REFERENCES entries(id) ON DELETE CASCADE,
field_name VARCHAR(256) NOT NULL, -- 明文字段名: "username", "token", "ssh_key" field_name VARCHAR(256) NOT NULL, -- 明文字段名: "username", "token", "ssh_key"
field_type VARCHAR(32) NOT NULL DEFAULT 'string', -- 明文类型: "string"|"number"|"boolean"|"json"
value_len INT NOT NULL DEFAULT 0, -- 明文原始值字符数PEM≈4096token≈40
encrypted BYTEA NOT NULL DEFAULT '\x', -- 仅加密值本身nonce(12B)||ciphertext+tag encrypted BYTEA NOT NULL DEFAULT '\x', -- 仅加密值本身nonce(12B)||ciphertext+tag
version BIGINT NOT NULL DEFAULT 1, version BIGINT NOT NULL DEFAULT 1,
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(), created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
@@ -130,8 +128,6 @@ secrets_history (
secret_id UUID NOT NULL, -- 对应 secrets.id secret_id UUID NOT NULL, -- 对应 secrets.id
entry_version BIGINT NOT NULL, -- 关联 entries_history 的版本号 entry_version BIGINT NOT NULL, -- 关联 entries_history 的版本号
field_name VARCHAR(256) NOT NULL, field_name VARCHAR(256) NOT NULL,
field_type VARCHAR(32) NOT NULL DEFAULT 'string',
value_len INT NOT NULL DEFAULT 0,
encrypted BYTEA NOT NULL DEFAULT '\x', encrypted BYTEA NOT NULL DEFAULT '\x',
action VARCHAR(16) NOT NULL, -- 'add' | 'update' | 'delete' | 'rollback' action VARCHAR(16) NOT NULL, -- 'add' | 'update' | 'delete' | 'rollback'
actor VARCHAR(128) NOT NULL DEFAULT '', actor VARCHAR(128) NOT NULL DEFAULT '',
@@ -149,8 +145,6 @@ secrets_history (
| `tags` | 多维分类标签 | `["aliyun","hongkong","ricn"]` | | `tags` | 多维分类标签 | `["aliyun","hongkong","ricn"]` |
| `metadata` | 明文非敏感信息 | `{"ip":"192.0.2.1","desc":"Grafana","key_ref":"my-shared-key"}` | | `metadata` | 明文非敏感信息 | `{"ip":"192.0.2.1","desc":"Grafana","key_ref":"my-shared-key"}` |
| `secrets.field_name` | 加密字段名(明文) | `"username"`, `"token"`, `"ssh_key"` | | `secrets.field_name` | 加密字段名(明文) | `"username"`, `"token"`, `"ssh_key"` |
| `secrets.field_type` | 值类型(明文) | `"string"`, `"number"`, `"boolean"`, `"json"` |
| `secrets.value_len` | 原始值字符数(明文) | `4`root`40`token`4096`PEM |
| `secrets.encrypted` | 仅加密值本身 | AES-256-GCM 密文 | | `secrets.encrypted` | 仅加密值本身 | AES-256-GCM 密文 |
### PEM 共享机制key_ref ### PEM 共享机制key_ref

2
Cargo.lock generated
View File

@@ -1836,7 +1836,7 @@ checksum = "94143f37725109f92c262ed2cf5e59bce7498c01bcc1502d7b9afe439a4e9f49"
[[package]] [[package]]
name = "secrets" name = "secrets"
version = "0.9.3" version = "0.9.4"
dependencies = [ dependencies = [
"aes-gcm", "aes-gcm",
"anyhow", "anyhow",

View File

@@ -1,6 +1,6 @@
[package] [package]
name = "secrets" name = "secrets"
version = "0.9.3" version = "0.9.4"
edition = "2024" edition = "2024"
[dependencies] [dependencies]

View File

@@ -54,7 +54,7 @@ secrets search --sort updated --limit 10 --summary
# 精确定位namespace + kind + name 三元组) # 精确定位namespace + kind + name 三元组)
secrets search -n refining --kind service --name gitea secrets search -n refining --kind service --name gitea
# 获取完整记录(含 secrets 字段 schemafield_name、field_type、value_len,无需 master_key # 获取完整记录(含 secrets 字段,无需 master_key
secrets search -n refining --kind service --name gitea -o json secrets search -n refining --kind service --name gitea -o json
# 直接提取单个 metadata 字段值(最短路径) # 直接提取单个 metadata 字段值(最短路径)
@@ -69,7 +69,7 @@ secrets inject -n refining --kind service --name gitea
secrets run -n refining --kind service --name gitea -- printenv secrets run -n refining --kind service --name gitea -- printenv
``` ```
`search` 展示 metadata 与 secrets 的字段 schema字段名、类型、长度,不展示 secret 值本身;需要值时用 `inject` / `run` `search` 展示 metadata 与 secrets 的字段,不展示 secret 值本身;需要值时用 `inject` / `run`
### 输出格式 ### 输出格式
@@ -184,7 +184,7 @@ RUST_LOG=secrets=trace secrets search
## 数据模型 ## 数据模型
主表 `entries`namespace、kind、name、tags、metadata+ 子表 `secrets`(每个加密字段一行,含 field_name、field_type、value_len、encrypted。首次连接自动建表同时创建 `audit_log``entries_history``secrets_history` 等表。 主表 `entries`namespace、kind、name、tags、metadata+ 子表 `secrets`(每个加密字段一行,含 field_name、encrypted。首次连接自动建表同时创建 `audit_log``entries_history``secrets_history` 等表。
| 位置 | 字段 | 说明 | | 位置 | 字段 | 说明 |
|------|------|------| |------|------|------|
@@ -193,7 +193,7 @@ RUST_LOG=secrets=trace secrets search
| entries | name | 人类可读唯一标识 | | entries | name | 人类可读唯一标识 |
| entries | tags | 多维标签,如 `["aliyun","hongkong"]` | | entries | tags | 多维标签,如 `["aliyun","hongkong"]` |
| entries | metadata | 明文描述ip、desc、domains、key_ref 等) | | entries | metadata | 明文描述ip、desc、domains、key_ref 等) |
| secrets | field_name / field_type / value_len | 明文search 可见AI 可推断 inject 会生成什么变量 | | secrets | field_name | 明文search 可见AI 可推断 inject 会生成什么变量 |
| secrets | encrypted | 仅加密值本身AES-256-GCM | | secrets | encrypted | 仅加密值本身AES-256-GCM |
`-m` / `--meta` 写入 `metadata``-s` / `--secret` 写入 `secrets` 表的独立行。支持 `key=value``key=@file``key:=<json>`,也支持 `credentials:content@./key.pem` 这类嵌套字段文件写入;删除时支持 `--remove-secret credentials:content`。加解密使用主密钥(由 `secrets init` 设置)。 `-m` / `--meta` 写入 `metadata``-s` / `--secret` 写入 `secrets` 表的独立行。支持 `key=value``key=@file``key:=<json>`,也支持 `credentials:content@./key.pem` 这类嵌套字段文件写入;删除时支持 `--remove-secret credentials:content`。加解密使用主密钥(由 `secrets init` 设置)。

View File

@@ -161,28 +161,6 @@ pub(crate) fn remove_path(map: &mut Map<String, Value>, path: &[String]) -> Resu
Ok(removed) Ok(removed)
} }
// ── field_type inference and value_len ──────────────────────────────────────
/// Infer the field type string from a JSON value.
pub(crate) fn infer_field_type(v: &Value) -> &'static str {
match v {
Value::String(_) => "string",
Value::Number(_) => "number",
Value::Bool(_) => "boolean",
Value::Null => "string",
Value::Array(_) | Value::Object(_) => "json",
}
}
/// Compute the plaintext length of a JSON value (chars for string, serialized length otherwise).
pub(crate) fn compute_value_len(v: &Value) -> i32 {
match v {
Value::String(s) => s.chars().count() as i32,
Value::Null => 0,
other => other.to_string().chars().count() as i32,
}
}
/// Flatten a (potentially nested) JSON object into dot-separated field entries. /// Flatten a (potentially nested) JSON object into dot-separated field entries.
/// e.g. `{"credentials": {"type": "ssh", "content": "..."}}` → /// e.g. `{"credentials": {"type": "ssh", "content": "..."}}` →
/// `[("credentials.type", "ssh"), ("credentials.content", "...")]` /// `[("credentials.type", "ssh"), ("credentials.content", "...")]`
@@ -291,12 +269,10 @@ pub async fn run(pool: &PgPool, args: AddArgs<'_>, master_key: &[u8; 32]) -> Res
struct ExistingField { struct ExistingField {
id: uuid::Uuid, id: uuid::Uuid,
field_name: String, field_name: String,
field_type: String,
value_len: i32,
encrypted: Vec<u8>, encrypted: Vec<u8>,
} }
let existing_fields: Vec<ExistingField> = sqlx::query_as( let existing_fields: Vec<ExistingField> = sqlx::query_as(
"SELECT id, field_name, field_type, value_len, encrypted \ "SELECT id, field_name, encrypted \
FROM secrets WHERE entry_id = $1", FROM secrets WHERE entry_id = $1",
) )
.bind(entry_id) .bind(entry_id)
@@ -311,8 +287,6 @@ pub async fn run(pool: &PgPool, args: AddArgs<'_>, master_key: &[u8; 32]) -> Res
secret_id: f.id, secret_id: f.id,
entry_version: new_entry_version - 1, entry_version: new_entry_version - 1,
field_name: &f.field_name, field_name: &f.field_name,
field_type: &f.field_type,
value_len: f.value_len,
encrypted: &f.encrypted, encrypted: &f.encrypted,
action: "add", action: "add",
}, },
@@ -333,18 +307,14 @@ pub async fn run(pool: &PgPool, args: AddArgs<'_>, master_key: &[u8; 32]) -> Res
// Insert new secret fields. // Insert new secret fields.
let flat_fields = flatten_json_fields("", &secret_json); let flat_fields = flatten_json_fields("", &secret_json);
for (field_name, field_value) in &flat_fields { for (field_name, field_value) in &flat_fields {
let field_type = infer_field_type(field_value);
let value_len = compute_value_len(field_value);
let encrypted = crypto::encrypt_json(master_key, field_value)?; let encrypted = crypto::encrypt_json(master_key, field_value)?;
sqlx::query( sqlx::query(
"INSERT INTO secrets (entry_id, field_name, field_type, value_len, encrypted) \ "INSERT INTO secrets (entry_id, field_name, encrypted) \
VALUES ($1, $2, $3, $4, $5)", VALUES ($1, $2, $3)",
) )
.bind(entry_id) .bind(entry_id)
.bind(field_name) .bind(field_name)
.bind(field_type)
.bind(value_len)
.bind(&encrypted) .bind(&encrypted)
.execute(&mut *tx) .execute(&mut *tx)
.await?; .await?;
@@ -399,10 +369,7 @@ pub async fn run(pool: &PgPool, args: AddArgs<'_>, master_key: &[u8; 32]) -> Res
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use super::{ use super::{build_json, flatten_json_fields, key_path_to_string, parse_kv, remove_path};
build_json, compute_value_len, flatten_json_fields, infer_field_type, key_path_to_string,
parse_kv, remove_path,
};
use serde_json::Value; use serde_json::Value;
use std::fs; use std::fs;
use std::path::PathBuf; use std::path::PathBuf;
@@ -489,19 +456,4 @@ mod tests {
assert_eq!(fields[1].0, "credentials.type"); assert_eq!(fields[1].0, "credentials.type");
assert_eq!(fields[2].0, "username"); assert_eq!(fields[2].0, "username");
} }
#[test]
fn infer_field_types() {
assert_eq!(infer_field_type(&Value::String("x".into())), "string");
assert_eq!(infer_field_type(&serde_json::json!(42)), "number");
assert_eq!(infer_field_type(&Value::Bool(true)), "boolean");
assert_eq!(infer_field_type(&serde_json::json!(["a"])), "json");
}
#[test]
fn compute_value_len_string() {
assert_eq!(compute_value_len(&Value::String("root".into())), 4);
assert_eq!(compute_value_len(&Value::Null), 0);
assert_eq!(compute_value_len(&serde_json::json!(1234)), 4);
}
} }

View File

@@ -257,7 +257,7 @@ async fn snapshot_and_delete(
} }
let fields: Vec<SecretFieldRow> = sqlx::query_as( let fields: Vec<SecretFieldRow> = sqlx::query_as(
"SELECT id, field_name, field_type, value_len, encrypted \ "SELECT id, field_name, encrypted \
FROM secrets WHERE entry_id = $1", FROM secrets WHERE entry_id = $1",
) )
.bind(row.id) .bind(row.id)
@@ -272,8 +272,6 @@ async fn snapshot_and_delete(
secret_id: f.id, secret_id: f.id,
entry_version: row.version, entry_version: row.version,
field_name: &f.field_name, field_name: &f.field_name,
field_type: &f.field_type,
value_len: f.value_len,
encrypted: &f.encrypted, encrypted: &f.encrypted,
action: "delete", action: "delete",
}, },

View File

@@ -71,14 +71,12 @@ pub async fn run(pool: &PgPool, args: RollbackArgs<'_>, master_key: &[u8; 32]) -
struct SecretHistoryRow { struct SecretHistoryRow {
secret_id: Uuid, secret_id: Uuid,
field_name: String, field_name: String,
field_type: String,
value_len: i32,
encrypted: Vec<u8>, encrypted: Vec<u8>,
action: String, action: String,
} }
let field_snaps: Vec<SecretHistoryRow> = sqlx::query_as( let field_snaps: Vec<SecretHistoryRow> = sqlx::query_as(
"SELECT secret_id, field_name, field_type, value_len, encrypted, action \ "SELECT secret_id, field_name, encrypted, action \
FROM secrets_history \ FROM secrets_history \
WHERE entry_id = $1 AND entry_version = $2 \ WHERE entry_id = $1 AND entry_version = $2 \
ORDER BY field_name", ORDER BY field_name",
@@ -145,12 +143,10 @@ pub async fn run(pool: &PgPool, args: RollbackArgs<'_>, master_key: &[u8; 32]) -
struct LiveField { struct LiveField {
id: Uuid, id: Uuid,
field_name: String, field_name: String,
field_type: String,
value_len: i32,
encrypted: Vec<u8>, encrypted: Vec<u8>,
} }
let live_fields: Vec<LiveField> = sqlx::query_as( let live_fields: Vec<LiveField> = sqlx::query_as(
"SELECT id, field_name, field_type, value_len, encrypted \ "SELECT id, field_name, encrypted \
FROM secrets WHERE entry_id = $1", FROM secrets WHERE entry_id = $1",
) )
.bind(lr.id) .bind(lr.id)
@@ -165,8 +161,6 @@ pub async fn run(pool: &PgPool, args: RollbackArgs<'_>, master_key: &[u8; 32]) -
secret_id: f.id, secret_id: f.id,
entry_version: lr.version, entry_version: lr.version,
field_name: &f.field_name, field_name: &f.field_name,
field_type: &f.field_type,
value_len: f.value_len,
encrypted: &f.encrypted, encrypted: &f.encrypted,
action: "rollback", action: "rollback",
}, },
@@ -212,11 +206,9 @@ pub async fn run(pool: &PgPool, args: RollbackArgs<'_>, master_key: &[u8; 32]) -
continue; continue;
} }
sqlx::query( sqlx::query(
"INSERT INTO secrets (id, entry_id, field_name, field_type, value_len, encrypted) \ "INSERT INTO secrets (id, entry_id, field_name, encrypted) \
VALUES ($1, $2, $3, $4, $5, $6) \ VALUES ($1, $2, $3, $4) \
ON CONFLICT (entry_id, field_name) DO UPDATE SET \ ON CONFLICT (entry_id, field_name) DO UPDATE SET \
field_type = EXCLUDED.field_type, \
value_len = EXCLUDED.value_len, \
encrypted = EXCLUDED.encrypted, \ encrypted = EXCLUDED.encrypted, \
version = secrets.version + 1, \ version = secrets.version + 1, \
updated_at = NOW()", updated_at = NOW()",
@@ -224,8 +216,6 @@ pub async fn run(pool: &PgPool, args: RollbackArgs<'_>, master_key: &[u8; 32]) -
.bind(f.secret_id) .bind(f.secret_id)
.bind(snap.entry_id) .bind(snap.entry_id)
.bind(&f.field_name) .bind(&f.field_name)
.bind(&f.field_type)
.bind(f.value_len)
.bind(&f.encrypted) .bind(&f.encrypted)
.execute(&mut *tx) .execute(&mut *tx)
.await?; .await?;

View File

@@ -250,8 +250,8 @@ async fn fetch_entries_paged(pool: &PgPool, a: PagedFetchArgs<'_>) -> Result<Vec
// ── Secret schema fetching (no master key) ─────────────────────────────────── // ── Secret schema fetching (no master key) ───────────────────────────────────
/// Fetch secret field schemas (field_name, field_type, value_len) for a set of entry ids. /// Fetch secret field names for a set of entry ids.
/// Returns a map from entry_id to list of SecretField (encrypted field not used here). /// Returns a map from entry_id to list of SecretField.
async fn fetch_secret_schemas( async fn fetch_secret_schemas(
pool: &PgPool, pool: &PgPool,
entry_ids: &[uuid::Uuid], entry_ids: &[uuid::Uuid],
@@ -423,8 +423,6 @@ fn to_json(entry: &Entry, summary: bool, schema: Option<&[SecretField]>) -> Valu
.map(|f| { .map(|f| {
json!({ json!({
"field_name": f.field_name, "field_name": f.field_name,
"field_type": f.field_type,
"value_len": f.value_len,
}) })
}) })
.collect(); .collect();
@@ -474,10 +472,7 @@ fn print_text(entry: &Entry, summary: bool, schema: Option<&[SecretField]>) -> R
} }
match schema { match schema {
Some(fields) if !fields.is_empty() => { Some(fields) if !fields.is_empty() => {
let schema_str: Vec<String> = fields let schema_str: Vec<String> = fields.iter().map(|f| f.field_name.clone()).collect();
.iter()
.map(|f| format!("{}: {}({})", f.field_name, f.field_type, f.value_len))
.collect();
println!(" secrets: {}", schema_str.join(", ")); println!(" secrets: {}", schema_str.join(", "));
println!(" (use `secrets inject` or `secrets run` to get values)"); println!(" (use `secrets inject` or `secrets run` to get values)");
} }
@@ -556,8 +551,6 @@ mod tests {
id: Uuid::nil(), id: Uuid::nil(),
entry_id: Uuid::nil(), entry_id: Uuid::nil(),
field_name: "token".to_string(), field_name: "token".to_string(),
field_type: "string".to_string(),
value_len: 6,
encrypted: enc, encrypted: enc,
version: 1, version: 1,
created_at: Utc::now(), created_at: Utc::now(),
@@ -597,8 +590,6 @@ mod tests {
let secrets = v.get("secrets").unwrap().as_array().unwrap(); let secrets = v.get("secrets").unwrap().as_array().unwrap();
assert_eq!(secrets.len(), 1); assert_eq!(secrets.len(), 1);
assert_eq!(secrets[0]["field_name"], "token"); assert_eq!(secrets[0]["field_name"], "token");
assert_eq!(secrets[0]["field_type"], "string");
assert_eq!(secrets[0]["value_len"], 6);
} }
#[test] #[test]

View File

@@ -4,8 +4,8 @@ use sqlx::PgPool;
use uuid::Uuid; use uuid::Uuid;
use super::add::{ use super::add::{
collect_field_paths, collect_key_paths, compute_value_len, flatten_json_fields, collect_field_paths, collect_key_paths, flatten_json_fields, insert_path, parse_key_path,
infer_field_type, insert_path, parse_key_path, parse_kv, remove_path, parse_kv, remove_path,
}; };
use crate::crypto; use crate::crypto;
use crate::db; use crate::db;
@@ -130,20 +130,16 @@ pub async fn run(pool: &PgPool, args: UpdateArgs<'_>, master_key: &[u8; 32]) ->
}); });
for (field_name, fv) in &flat { for (field_name, fv) in &flat {
let field_type = infer_field_type(fv);
let value_len = compute_value_len(fv);
let encrypted = crypto::encrypt_json(master_key, fv)?; let encrypted = crypto::encrypt_json(master_key, fv)?;
// Snapshot existing field before replacing. // Snapshot existing field before replacing.
#[derive(sqlx::FromRow)] #[derive(sqlx::FromRow)]
struct ExistingField { struct ExistingField {
id: Uuid, id: Uuid,
field_type: String,
value_len: i32,
encrypted: Vec<u8>, encrypted: Vec<u8>,
} }
let existing_field: Option<ExistingField> = sqlx::query_as( let existing_field: Option<ExistingField> = sqlx::query_as(
"SELECT id, field_type, value_len, encrypted \ "SELECT id, encrypted \
FROM secrets WHERE entry_id = $1 AND field_name = $2", FROM secrets WHERE entry_id = $1 AND field_name = $2",
) )
.bind(row.id) .bind(row.id)
@@ -159,8 +155,6 @@ pub async fn run(pool: &PgPool, args: UpdateArgs<'_>, master_key: &[u8; 32]) ->
secret_id: ef.id, secret_id: ef.id,
entry_version: row.version, entry_version: row.version,
field_name, field_name,
field_type: &ef.field_type,
value_len: ef.value_len,
encrypted: &ef.encrypted, encrypted: &ef.encrypted,
action: "update", action: "update",
}, },
@@ -171,19 +165,15 @@ pub async fn run(pool: &PgPool, args: UpdateArgs<'_>, master_key: &[u8; 32]) ->
} }
sqlx::query( sqlx::query(
"INSERT INTO secrets (entry_id, field_name, field_type, value_len, encrypted) \ "INSERT INTO secrets (entry_id, field_name, encrypted) \
VALUES ($1, $2, $3, $4, $5) \ VALUES ($1, $2, $3) \
ON CONFLICT (entry_id, field_name) DO UPDATE SET \ ON CONFLICT (entry_id, field_name) DO UPDATE SET \
field_type = EXCLUDED.field_type, \
value_len = EXCLUDED.value_len, \
encrypted = EXCLUDED.encrypted, \ encrypted = EXCLUDED.encrypted, \
version = secrets.version + 1, \ version = secrets.version + 1, \
updated_at = NOW()", updated_at = NOW()",
) )
.bind(row.id) .bind(row.id)
.bind(field_name) .bind(field_name)
.bind(field_type)
.bind(value_len)
.bind(&encrypted) .bind(&encrypted)
.execute(&mut *tx) .execute(&mut *tx)
.await?; .await?;
@@ -200,12 +190,10 @@ pub async fn run(pool: &PgPool, args: UpdateArgs<'_>, master_key: &[u8; 32]) ->
#[derive(sqlx::FromRow)] #[derive(sqlx::FromRow)]
struct FieldToDelete { struct FieldToDelete {
id: Uuid, id: Uuid,
field_type: String,
value_len: i32,
encrypted: Vec<u8>, encrypted: Vec<u8>,
} }
let field: Option<FieldToDelete> = sqlx::query_as( let field: Option<FieldToDelete> = sqlx::query_as(
"SELECT id, field_type, value_len, encrypted \ "SELECT id, encrypted \
FROM secrets WHERE entry_id = $1 AND field_name = $2", FROM secrets WHERE entry_id = $1 AND field_name = $2",
) )
.bind(row.id) .bind(row.id)
@@ -221,8 +209,6 @@ pub async fn run(pool: &PgPool, args: UpdateArgs<'_>, master_key: &[u8; 32]) ->
secret_id: f.id, secret_id: f.id,
entry_version: new_version, entry_version: new_version,
field_name: &field_name, field_name: &field_name,
field_type: &f.field_type,
value_len: f.value_len,
encrypted: &f.encrypted, encrypted: &f.encrypted,
action: "delete", action: "delete",
}, },

View File

@@ -44,8 +44,6 @@ pub async fn migrate(pool: &PgPool) -> Result<()> {
id UUID PRIMARY KEY DEFAULT uuidv7(), id UUID PRIMARY KEY DEFAULT uuidv7(),
entry_id UUID NOT NULL REFERENCES entries(id) ON DELETE CASCADE, entry_id UUID NOT NULL REFERENCES entries(id) ON DELETE CASCADE,
field_name VARCHAR(256) NOT NULL, field_name VARCHAR(256) NOT NULL,
field_type VARCHAR(32) NOT NULL DEFAULT 'string',
value_len INT NOT NULL DEFAULT 0,
encrypted BYTEA NOT NULL DEFAULT '\x', encrypted BYTEA NOT NULL DEFAULT '\x',
version BIGINT NOT NULL DEFAULT 1, version BIGINT NOT NULL DEFAULT 1,
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(), created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
@@ -103,8 +101,6 @@ pub async fn migrate(pool: &PgPool) -> Result<()> {
secret_id UUID NOT NULL, secret_id UUID NOT NULL,
entry_version BIGINT NOT NULL, entry_version BIGINT NOT NULL,
field_name VARCHAR(256) NOT NULL, field_name VARCHAR(256) NOT NULL,
field_type VARCHAR(32) NOT NULL DEFAULT 'string',
value_len INT NOT NULL DEFAULT 0,
encrypted BYTEA NOT NULL DEFAULT '\x', encrypted BYTEA NOT NULL DEFAULT '\x',
action VARCHAR(16) NOT NULL, action VARCHAR(16) NOT NULL,
actor VARCHAR(128) NOT NULL DEFAULT '', actor VARCHAR(128) NOT NULL DEFAULT '',
@@ -168,8 +164,6 @@ pub struct SecretSnapshotParams<'a> {
pub secret_id: uuid::Uuid, pub secret_id: uuid::Uuid,
pub entry_version: i64, pub entry_version: i64,
pub field_name: &'a str, pub field_name: &'a str,
pub field_type: &'a str,
pub value_len: i32,
pub encrypted: &'a [u8], pub encrypted: &'a [u8],
pub action: &'a str, pub action: &'a str,
} }
@@ -182,15 +176,13 @@ pub async fn snapshot_secret_history(
let actor = current_actor(); let actor = current_actor();
sqlx::query( sqlx::query(
"INSERT INTO secrets_history \ "INSERT INTO secrets_history \
(entry_id, secret_id, entry_version, field_name, field_type, value_len, encrypted, action, actor) \ (entry_id, secret_id, entry_version, field_name, encrypted, action, actor) \
VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9)", VALUES ($1, $2, $3, $4, $5, $6, $7)",
) )
.bind(p.entry_id) .bind(p.entry_id)
.bind(p.secret_id) .bind(p.secret_id)
.bind(p.entry_version) .bind(p.entry_version)
.bind(p.field_name) .bind(p.field_name)
.bind(p.field_type)
.bind(p.value_len)
.bind(p.encrypted) .bind(p.encrypted)
.bind(p.action) .bind(p.action)
.bind(&actor) .bind(&actor)

View File

@@ -111,7 +111,13 @@ EXAMPLES:
# Write a multiline file into a nested secret field # Write a multiline file into a nested secret field
secrets add -n refining --kind server --name my-server \\ secrets add -n refining --kind server --name my-server \\
-s credentials:content@./keys/server.pem")] -s credentials:content@./keys/server.pem
# Shared PEM (key_ref): store key once, reference from multiple servers
secrets add -n refining --kind key --name my-shared-key \\
--tag aliyun -s content=@./keys/shared.pem
secrets add -n refining --kind server --name i-abc123 \\
-m ip=10.0.0.1 -m key_ref=my-shared-key -s username=ecs-user")]
Add { Add {
/// Namespace, e.g. refining, ricnsmart /// Namespace, e.g. refining, ricnsmart
#[arg(short, long)] #[arg(short, long)]
@@ -125,7 +131,8 @@ EXAMPLES:
/// Tag for categorization (repeatable), e.g. --tag aliyun --tag hongkong /// Tag for categorization (repeatable), e.g. --tag aliyun --tag hongkong
#[arg(long = "tag")] #[arg(long = "tag")]
tags: Vec<String>, tags: Vec<String>,
/// Plaintext metadata: key=value, key:=<json>, key=@file, or nested:path@file /// Plaintext metadata: key=value, key:=<json>, key=@file, or nested:path@file.
/// Use key_ref=<name> to reference a shared key entry (kind=key); inject/run merge its secrets.
#[arg(long = "meta", short = 'm')] #[arg(long = "meta", short = 'm')]
meta: Vec<String>, meta: Vec<String>,
/// Secret entry: key=value, key:=<json>, key=@file, or nested:path@file /// Secret entry: key=value, key:=<json>, key=@file, or nested:path@file
@@ -287,7 +294,11 @@ EXAMPLES:
# Update nested typed JSON fields # Update nested typed JSON fields
secrets update -n refining --kind service --name deploy-bot \\ secrets update -n refining --kind service --name deploy-bot \\
-s auth:config:='{\"issuer\":\"gitea\",\"rotate\":true}' \\ -s auth:config:='{\"issuer\":\"gitea\",\"rotate\":true}' \\
-s auth:retry:=5")] -s auth:retry:=5
# Rotate shared PEM (all servers with key_ref=my-shared-key get the new key)
secrets update -n refining --kind key --name my-shared-key \\
-s content=@./keys/new-shared.pem")]
Update { Update {
/// Namespace, e.g. refining, ricnsmart /// Namespace, e.g. refining, ricnsmart
#[arg(short, long)] #[arg(short, long)]
@@ -304,7 +315,8 @@ EXAMPLES:
/// Remove a tag (repeatable) /// Remove a tag (repeatable)
#[arg(long = "remove-tag")] #[arg(long = "remove-tag")]
remove_tags: Vec<String>, remove_tags: Vec<String>,
/// Set or overwrite a metadata field: key=value, key:=<json>, key=@file, or nested:path@file /// Set or overwrite a metadata field: key=value, key:=<json>, key=@file, or nested:path@file.
/// Use key_ref=<name> to reference a shared key entry (kind=key).
#[arg(long = "meta", short = 'm')] #[arg(long = "meta", short = 'm')]
meta: Vec<String>, meta: Vec<String>,
/// Delete a metadata field by key or nested path, e.g. old_port or credentials:content /// Delete a metadata field by key or nested path, e.g. old_port or credentials:content
@@ -394,7 +406,9 @@ EXAMPLES:
secrets inject -n refining --kind service --name gitea -o json secrets inject -n refining --kind service --name gitea -o json
# Eval into current shell (use with caution) # Eval into current shell (use with caution)
eval $(secrets inject -n refining --kind service --name gitea)")] eval $(secrets inject -n refining --kind service --name gitea)
# For entries with metadata.key_ref, referenced key's secrets are merged automatically")]
Inject { Inject {
#[arg(short, long)] #[arg(short, long)]
namespace: Option<String>, namespace: Option<String>,
@@ -424,7 +438,9 @@ EXAMPLES:
secrets run --tag production -- env | grep GITEA secrets run --tag production -- env | grep GITEA
# With prefix # With prefix
secrets run -n refining --kind service --name gitea --prefix GITEA -- printenv")] secrets run -n refining --kind service --name gitea --prefix GITEA -- printenv
# metadata.key_ref entries get key secrets merged (e.g. server + shared PEM)")]
Run { Run {
#[arg(short, long)] #[arg(short, long)]
namespace: Option<String>, namespace: Option<String>,

View File

@@ -20,17 +20,11 @@ pub struct Entry {
} }
/// A single encrypted field belonging to an Entry. /// A single encrypted field belonging to an Entry.
/// field_name, field_type, and value_len are stored in plaintext so that
/// `search` can show the schema without requiring the master key.
#[derive(Debug, Serialize, Deserialize, sqlx::FromRow)] #[derive(Debug, Serialize, Deserialize, sqlx::FromRow)]
pub struct SecretField { pub struct SecretField {
pub id: Uuid, pub id: Uuid,
pub entry_id: Uuid, pub entry_id: Uuid,
pub field_name: String, pub field_name: String,
/// Inferred type: "string", "number", "boolean", "json"
pub field_type: String,
/// Length of the plaintext value in characters (0 for binary-like PEM)
pub value_len: i32,
/// AES-256-GCM ciphertext: nonce(12B) || ciphertext+tag /// AES-256-GCM ciphertext: nonce(12B) || ciphertext+tag
pub encrypted: Vec<u8>, pub encrypted: Vec<u8>,
pub version: i64, pub version: i64,
@@ -54,8 +48,6 @@ pub struct EntryRow {
pub struct SecretFieldRow { pub struct SecretFieldRow {
pub id: Uuid, pub id: Uuid,
pub field_name: String, pub field_name: String,
pub field_type: String,
pub value_len: i32,
pub encrypted: Vec<u8>, pub encrypted: Vec<u8>,
} }