From e1cd6e736c9d9b69a63555da354d955a58b689fa Mon Sep 17 00:00:00 2001 From: voson Date: Thu, 19 Mar 2026 15:18:12 +0800 Subject: [PATCH] =?UTF-8?q?refactor:=20entries=20+=20secrets=20=E5=8F=8C?= =?UTF-8?q?=E8=A1=A8=EF=BC=8Csearch=20=E5=B1=95=E7=A4=BA=20field=20schema?= =?UTF-8?q?=EF=BC=8Ckey=5Fref=20PEM=20=E5=85=B1=E4=BA=AB?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - secrets 表拆为 entries(主表)+ secrets(每字段一行) - search 无需 master_key 即可展示 secrets 字段名、类型、长度 - inject/run 支持 metadata.key_ref 引用 kind=key 记录,PEM 轮换 O(1) - entries_history + secrets_history 字段级历史,rollback 按 version 恢复 - 移除迁移用 DROP 语句,migrate 幂等 - v0.8.0 Made-with: Cursor --- AGENTS.md | 124 +++++++++---- Cargo.lock | 2 +- Cargo.toml | 2 +- README.md | 45 ++--- src/commands/add.rs | 194 +++++++++++++++++--- src/commands/delete.rs | 61 +++++-- src/commands/rollback.rs | 179 ++++++++++++++----- src/commands/run.rs | 26 +-- src/commands/search.rs | 369 ++++++++++++++++++++++++++------------- src/commands/update.rs | 188 +++++++++++++++----- src/db.rs | 164 ++++++++++------- src/main.rs | 5 - src/models.rs | 22 ++- 13 files changed, 1000 insertions(+), 381 deletions(-) diff --git a/AGENTS.md b/AGENTS.md index 879bfc5..045f3cb 100644 --- a/AGENTS.md +++ b/AGENTS.md @@ -7,7 +7,7 @@ 3. 若当前版本对应 tag 已存在,必须先 bump `Cargo.toml` 的 `version`,再执行 `cargo build` 同步 `Cargo.lock`,然后才能提交。 4. 提交前优先运行 `./scripts/release-check.sh`;该脚本会检查重复版本并执行 `cargo fmt -- --check && cargo clippy --locked -- -D warnings && cargo test --locked`。 -跨设备密钥与配置管理 CLI 工具,将 refining / ricnsmart 两个项目的服务器信息、服务凭据存储到 PostgreSQL 18,供 AI 工具读取上下文。敏感数据(encrypted 字段)使用 AES-256-GCM 加密,主密钥由 Argon2id 从主密码派生并存入平台安全存储(macOS Keychain / Windows Credential Manager / Linux keyutils)。 +跨设备密钥与配置管理 CLI 工具,将服务器信息、服务凭据等存储到 PostgreSQL 18,供 AI 工具读取上下文。每个加密字段单独行存储(`secrets` 子表),字段名、类型、长度以明文保存,主密钥由 Argon2id 从主密码派生并存入平台安全存储(macOS Keychain / Windows Credential Manager / Linux keyutils)。 ## 项目结构 @@ -17,19 +17,19 @@ secrets/ main.rs # CLI 入口,clap 命令定义,auto-migrate,--verbose 全局参数 output.rs # OutputMode 枚举 + TTY 检测(TTY→text,非 TTY→json-compact) config.rs # 配置读写:~/.config/secrets/config.toml(database_url) - db.rs # PgPool 创建 + 建表/索引(幂等,含 audit_log + kv_config + secrets_history) + db.rs # PgPool 创建 + 建表/索引(DROP+CREATE,含所有表) crypto.rs # AES-256-GCM 加解密、Argon2id 派生、OS 钥匙串 - models.rs # Secret 结构体(sqlx::FromRow + serde,含 version 字段) - audit.rs # 审计写入:log_tx(事务内)/ log(池,保留备用) + models.rs # Entry + SecretField 结构体(sqlx::FromRow + serde) + audit.rs # 审计写入:log_tx(事务内) commands/ init.rs # init 命令:主密钥初始化(每台设备一次) - add.rs # add 命令:upsert,事务化,含历史快照,支持 key:=json 类型化值与嵌套路径写入 + add.rs # add 命令:upsert entries + 逐字段写入 secrets,含历史快照 config.rs # config 命令:set-db / show / path(持久化 database_url) - search.rs # search 命令:多条件查询,公开 fetch_rows / build_env_map - delete.rs # delete 命令:事务化,含历史快照 - update.rs # update 命令:增量更新,CAS 并发保护,含历史快照 - rollback.rs # rollback / history 命令:版本回滚与历史查看 - run.rs # inject / run 命令:临时环境变量注入 + search.rs # search 命令:多条件查询,展示 secrets 字段 schema(无需 master_key) + delete.rs # delete 命令:事务化,CASCADE 删除 secrets,含历史快照 + update.rs # update 命令:增量更新,secrets 行级 UPSERT/DELETE,CAS 并发保护 + rollback.rs # rollback / history 命令:按 entry_version 恢复 entry + secrets + run.rs # inject / run 命令:逐字段解密 + key_ref 引用解析 upgrade.rs # upgrade 命令:检查、校验摘要并下载最新版本,自动替换二进制 scripts/ release-check.sh # 发版前检查版本号/tag 是否重复,并执行 fmt/clippy/test @@ -44,19 +44,18 @@ secrets/ - **Host**: `:` - **Database**: `secrets` - **连接串**: `postgres://postgres:@:/secrets` -- **表**: `secrets`(主表)+ `audit_log`(审计表)+ `kv_config`(Argon2 salt 等),首次连接自动建表(auto-migrate) +- **表**: `entries`(主表)+ `secrets`(加密字段子表)+ `entries_history` + `secrets_history` + `audit_log` + `kv_config`,首次连接自动建表(auto-migrate) ### 表结构 ```sql -secrets ( +entries ( id UUID PRIMARY KEY DEFAULT uuidv7(), -- PG18 时间有序 UUID namespace VARCHAR(64) NOT NULL, -- 一级隔离: "refining" | "ricnsmart" - kind VARCHAR(64) NOT NULL, -- 类型: "server" | "service"(可扩展) + kind VARCHAR(64) NOT NULL, -- 类型: "server" | "service" | "key"(可扩展) name VARCHAR(256) NOT NULL, -- 人类可读标识 tags TEXT[] NOT NULL DEFAULT '{}', -- 灵活标签: ["aliyun","hongkong"] metadata JSONB NOT NULL DEFAULT '{}', -- 明文描述: ip, desc, domains, location... - encrypted BYTEA NOT NULL DEFAULT '\x', -- AES-256-GCM 密文: nonce(12B)||ciphertext+tag version BIGINT NOT NULL DEFAULT 1, -- 乐观锁版本号,每次写操作自增 created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(), updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW(), @@ -65,26 +64,24 @@ secrets ( ``` ```sql -secrets_history ( - id BIGINT GENERATED ALWAYS AS IDENTITY PRIMARY KEY, - secret_id UUID NOT NULL, -- 对应 secrets.id - namespace VARCHAR(64) NOT NULL, - kind VARCHAR(64) NOT NULL, - name VARCHAR(256) NOT NULL, - version BIGINT NOT NULL, -- 被快照时的版本号 - action VARCHAR(16) NOT NULL, -- 'add' | 'update' | 'delete' | 'rollback' - tags TEXT[] NOT NULL DEFAULT '{}', - metadata JSONB NOT NULL DEFAULT '{}', - encrypted BYTEA NOT NULL DEFAULT '\x', -- 快照时的加密密文 - actor VARCHAR(128) NOT NULL DEFAULT '', - created_at TIMESTAMPTZ NOT NULL DEFAULT NOW() +secrets ( + id UUID PRIMARY KEY DEFAULT uuidv7(), + entry_id UUID NOT NULL REFERENCES entries(id) ON DELETE CASCADE, + field_name VARCHAR(256) NOT NULL, -- 明文字段名: "username", "token", "ssh_key" + field_type VARCHAR(32) NOT NULL DEFAULT 'string', -- 明文类型: "string"|"number"|"boolean"|"json" + value_len INT NOT NULL DEFAULT 0, -- 明文原始值字符数(PEM≈4096,token≈40) + encrypted BYTEA NOT NULL DEFAULT '\x', -- 仅加密值本身:nonce(12B)||ciphertext+tag + version BIGINT NOT NULL DEFAULT 1, + created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(), + updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW(), + UNIQUE(entry_id, field_name) ) ``` ```sql kv_config ( key TEXT PRIMARY KEY, -- 如 'argon2_salt' - value BYTEA NOT NULL -- Argon2id salt,首台设备 init 时生成 + value BYTEA NOT NULL -- Argon2id salt,首台设备 init 时生成 ) ``` @@ -93,26 +90,85 @@ kv_config ( ```sql audit_log ( id BIGINT GENERATED ALWAYS AS IDENTITY PRIMARY KEY, - action VARCHAR(32) NOT NULL, -- 'add' | 'update' | 'delete' + action VARCHAR(32) NOT NULL, -- 'add' | 'update' | 'delete' | 'rollback' namespace VARCHAR(64) NOT NULL, kind VARCHAR(64) NOT NULL, name VARCHAR(256) NOT NULL, detail JSONB NOT NULL DEFAULT '{}', -- 变更摘要(tags/meta keys/secret keys,不含 value) - actor VARCHAR(128) NOT NULL DEFAULT '', -- 操作者($USER 环境变量) + actor VARCHAR(128) NOT NULL DEFAULT '', -- 操作者($USER 环境变量) created_at TIMESTAMPTZ NOT NULL DEFAULT NOW() ) ``` +### entries_history 表结构 + +```sql +entries_history ( + id BIGINT GENERATED ALWAYS AS IDENTITY PRIMARY KEY, + entry_id UUID NOT NULL, + namespace VARCHAR(64) NOT NULL, + kind VARCHAR(64) NOT NULL, + name VARCHAR(256) NOT NULL, + version BIGINT NOT NULL, -- 被快照时的版本号 + action VARCHAR(16) NOT NULL, -- 'add' | 'update' | 'delete' | 'rollback' + tags TEXT[] NOT NULL DEFAULT '{}', + metadata JSONB NOT NULL DEFAULT '{}', + actor VARCHAR(128) NOT NULL DEFAULT '', + created_at TIMESTAMPTZ NOT NULL DEFAULT NOW() +) +``` + +### secrets_history 表结构 + +```sql +secrets_history ( + id BIGINT GENERATED ALWAYS AS IDENTITY PRIMARY KEY, + entry_id UUID NOT NULL, + secret_id UUID NOT NULL, -- 对应 secrets.id + entry_version BIGINT NOT NULL, -- 关联 entries_history 的版本号 + field_name VARCHAR(256) NOT NULL, + field_type VARCHAR(32) NOT NULL DEFAULT 'string', + value_len INT NOT NULL DEFAULT 0, + encrypted BYTEA NOT NULL DEFAULT '\x', + action VARCHAR(16) NOT NULL, -- 'add' | 'update' | 'delete' | 'rollback' + actor VARCHAR(128) NOT NULL DEFAULT '', + created_at TIMESTAMPTZ NOT NULL DEFAULT NOW() +) +``` + ### 字段职责划分 | 字段 | 存什么 | 示例 | |------|--------|------| | `namespace` | 项目/团队隔离 | `refining`, `ricnsmart` | -| `kind` | 记录类型 | `server`, `service` | +| `kind` | 记录类型 | `server`, `service`, `key` | | `name` | 唯一标识名 | `i-uf63f2uookgs5uxmrdyc`, `gitea` | | `tags` | 多维分类标签 | `["aliyun","hongkong","ricn"]` | -| `metadata` | 明文非敏感信息 | `{"ip":"47.243.154.187","desc":"Grafana","domains":["..."]}` | -| `encrypted` | 敏感凭据,AES-256-GCM 加密存储 | 二进制密文,解密后为 `{"ssh_key":"...","password":"..."}` | +| `metadata` | 明文非敏感信息 | `{"ip":"47.243.154.187","desc":"Grafana","key_ref":"ricn-hk-260127"}` | +| `secrets.field_name` | 加密字段名(明文) | `"username"`, `"token"`, `"ssh_key"` | +| `secrets.field_type` | 值类型(明文) | `"string"`, `"number"`, `"boolean"`, `"json"` | +| `secrets.value_len` | 原始值字符数(明文) | `4`(root),`40`(token),`4096`(PEM) | +| `secrets.encrypted` | 仅加密值本身 | AES-256-GCM 密文 | + +### PEM 共享机制(key_ref) + +同一 PEM 被多台服务器共享时,将 PEM 存为独立的 `kind=key` 记录,服务器通过 `metadata.key_ref` 引用: + +```bash +# 1. 存共享 PEM +secrets add -n refining --kind key --name ricn-hk-260127 \ + --tag aliyun --tag hongkong \ + -s content=@./keys/ricn-hk-260127.pem + +# 2. 服务器通过 metadata.key_ref 引用(inject/run 时自动合并 key 的 secrets) +secrets add -n refining --kind server --name i-j6c39dmtkr26vztii0ox \ + -m ip=47.243.154.187 -m key_ref=ricn-hk-260127 \ + -s username=ecs-user + +# 3. 轮换只需更新 key 记录,所有引用服务器自动生效 +secrets update -n refining --kind key --name ricn-hk-260127 \ + -s content=@./keys/new-key.pem +``` ## 数据库配置 @@ -175,7 +231,7 @@ secrets init # --name gitea | i-uf63f2uookgs5uxmrdyc | mqtt # --tag aliyun | hongkong | production # -q / --query mqtt | grafana | gitea (模糊匹配 name/namespace/kind/tags/metadata) -# --show-secrets 已弃用;search 不再直接展示 secrets +# secrets schema search 默认展示 secrets 字段名、类型与长度(无需 master_key) # -f / --field metadata.ip | metadata.url | metadata.default_org # --summary 不带值的 flag,仅返回摘要(name/tags/desc/updated_at) # --limit 20 | 50(默认 50) diff --git a/Cargo.lock b/Cargo.lock index 90a85d6..ca1cce2 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1836,7 +1836,7 @@ checksum = "94143f37725109f92c262ed2cf5e59bce7498c01bcc1502d7b9afe439a4e9f49" [[package]] name = "secrets" -version = "0.7.5" +version = "0.8.0" dependencies = [ "aes-gcm", "anyhow", diff --git a/Cargo.toml b/Cargo.toml index 28d4f70..fac46cd 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "secrets" -version = "0.7.5" +version = "0.8.0" edition = "2024" [dependencies] diff --git a/README.md b/README.md index 4510793..54038c5 100644 --- a/README.md +++ b/README.md @@ -2,7 +2,7 @@ 跨设备密钥与配置管理 CLI,基于 Rust + PostgreSQL 18。 -将服务器信息、服务凭据统一存入数据库,供本地工具和 AI 读取上下文。敏感数据(`encrypted` 字段)使用 AES-256-GCM 加密存储,主密钥由 Argon2id 从主密码派生并存入系统钥匙串。 +将服务器信息、服务凭据统一存入数据库,供本地工具和 AI 读取上下文。每个敏感字段单独行存储(`secrets` 子表),字段名、类型、长度以明文保存便于 AI 理解,仅值本身使用 AES-256-GCM 加密;主密钥由 Argon2id 从主密码派生并存入系统钥匙串。 ## 安装 @@ -54,7 +54,7 @@ secrets search --sort updated --limit 10 --summary # 精确定位(namespace + kind + name 三元组) secrets search -n refining --kind service --name gitea -# 获取完整记录(secrets 保持加密占位) +# 获取完整记录(含 secrets 字段 schema:field_name、field_type、value_len,无需 master_key) secrets search -n refining --kind service --name gitea -o json # 直接提取单个 metadata 字段值(最短路径) @@ -69,7 +69,7 @@ secrets inject -n refining --kind service --name gitea secrets run -n refining --kind service --name gitea -- printenv ``` -`search` 只负责发现、定位和读取 metadata,不直接展示 secrets。 +`search` 展示 metadata 与 secrets 的字段 schema(字段名、类型、长度),不展示 secret 值本身;需要值时用 `inject` / `run`。 ### 输出格式 @@ -111,7 +111,7 @@ secrets search -n refining --kind service --name gitea # 精确查找 secrets search -q mqtt # 关键词模糊搜索 secrets search --tag hongkong # 按 tag 过滤 secrets search -n refining --kind service --name gitea -f metadata.url # 提取 metadata 字段 -secrets search -n refining --kind service --name gitea -o json # 完整记录(secrets 保持占位) +secrets search -n refining --kind service --name gitea -o json # 完整记录(含 secrets schema) secrets search --sort updated --limit 10 --summary # 最近改动 secrets search -n refining --summary --limit 10 --offset 10 # 翻页 @@ -165,18 +165,21 @@ RUST_LOG=secrets=trace secrets search ## 数据模型 -单张 `secrets` 表,首次连接自动建表;同时自动创建 `audit_log` 表,记录所有写操作。 +主表 `entries`(namespace、kind、name、tags、metadata)+ 子表 `secrets`(每个加密字段一行,含 field_name、field_type、value_len、encrypted)。首次连接自动建表;同时创建 `audit_log`、`entries_history`、`secrets_history` 等表。 -| 字段 | 说明 | -|------|------| -| `namespace` | 一级隔离,如 `refining`、`ricnsmart` | -| `kind` | 记录类型,如 `server`、`service`(可自由扩展) | -| `name` | 人类可读唯一标识 | -| `tags` | 多维标签,如 `["aliyun","hongkong"]` | -| `metadata` | 明文描述信息(ip、desc、domains 等) | -| `encrypted` | 敏感凭据(ssh_key、password、token 等),AES-256-GCM 加密存储 | +| 位置 | 字段 | 说明 | +|------|------|------| +| entries | namespace | 一级隔离,如 `refining`、`ricnsmart` | +| entries | kind | 记录类型,如 `server`、`service`、`key`(可自由扩展) | +| entries | name | 人类可读唯一标识 | +| entries | tags | 多维标签,如 `["aliyun","hongkong"]` | +| entries | metadata | 明文描述(ip、desc、domains、key_ref 等) | +| secrets | field_name / field_type / value_len | 明文,search 可见,AI 可推断 inject 会生成什么变量 | +| secrets | encrypted | 仅加密值本身,AES-256-GCM | -`-m` / `--meta` 写入 `metadata`,`-s` / `--secret` 写入 `encrypted`。支持 `key=value`、`key=@file`、`key:=`,也支持 `credentials:content@./key.pem` 这种嵌套字段文件写入语法,避免手动转义多行文本;删除时也支持 `--remove-secret credentials:content` 和 `--remove-meta credentials:content`。加解密使用主密钥(由 `secrets init` 设置)。 +`-m` / `--meta` 写入 `metadata`,`-s` / `--secret` 写入 `secrets` 表的独立行。支持 `key=value`、`key=@file`、`key:=`,也支持 `credentials:content@./key.pem` 这类嵌套字段文件写入;删除时支持 `--remove-secret credentials:content`。加解密使用主密钥(由 `secrets init` 设置)。 + +**PEM 共享**:同一 PEM 被多台服务器共享时,可存为 `kind=key` 记录,服务器通过 `metadata.key_ref` 引用;轮换只需 update 一条 key 记录,所有引用自动生效。详见 [AGENTS.md](AGENTS.md)。 ### `-m` / `--meta` JSON 语法速查 @@ -280,17 +283,19 @@ src/ main.rs # CLI 入口(clap),含各子命令 after_help 示例 output.rs # OutputMode 枚举 + TTY 检测 config.rs # 配置读写(~/.config/secrets/config.toml) - db.rs # 连接池 + auto-migrate(secrets + audit_log + kv_config) + db.rs # 连接池 + auto-migrate(entries + secrets + entries_history + secrets_history + audit_log + kv_config) crypto.rs # AES-256-GCM 加解密、Argon2id 派生、OS 钥匙串 - models.rs # Secret 结构体 + models.rs # Entry + SecretField 结构体 audit.rs # 审计日志写入(audit_log 表) commands/ init.rs # 主密钥初始化(首次/新设备) - add.rs # upsert,支持 -o json + add.rs # upsert entries + secrets 行,支持 -o json config.rs # config set-db/show/path - search.rs # 多条件查询,支持 -f/-o/--summary/--limit/--offset/--sort - delete.rs # 删除 - update.rs # 增量更新(合并 tags/metadata/encrypted) + search.rs # 多条件查询,展示 secrets schema(-f/-o/--summary/--limit/--offset/--sort) + delete.rs # 删除(CASCADE 删除 secrets) + update.rs # 增量更新(tags/metadata + secrets 行级 UPSERT/DELETE) + rollback.rs # rollback / history:按 entry_version 恢复 + run.rs # inject / run,逐字段解密 + key_ref 引用解析 upgrade.rs # 从 Gitea Release 自更新 scripts/ setup-gitea-actions.sh # 配置 Gitea Actions 变量与 Secrets diff --git a/src/commands/add.rs b/src/commands/add.rs index 9b1022b..71a6ca5 100644 --- a/src/commands/add.rs +++ b/src/commands/add.rs @@ -7,6 +7,8 @@ use crate::crypto; use crate::db; use crate::output::OutputMode; +// ── Key/value parsing helpers (shared with update.rs) ─────────────────────── + /// Parse secret / metadata entries into a nested key path and JSON value. /// - `key=value` → stores the literal string `value` /// - `key:=` → parses `` as a typed JSON value @@ -158,6 +160,52 @@ pub(crate) fn remove_path(map: &mut Map, path: &[String]) -> Resu Ok(removed) } +// ── field_type inference and value_len ────────────────────────────────────── + +/// Infer the field type string from a JSON value. +pub(crate) fn infer_field_type(v: &Value) -> &'static str { + match v { + Value::String(_) => "string", + Value::Number(_) => "number", + Value::Bool(_) => "boolean", + Value::Null => "string", + Value::Array(_) | Value::Object(_) => "json", + } +} + +/// Compute the plaintext length of a JSON value (chars for string, serialized length otherwise). +pub(crate) fn compute_value_len(v: &Value) -> i32 { + match v { + Value::String(s) => s.chars().count() as i32, + Value::Null => 0, + other => other.to_string().chars().count() as i32, + } +} + +/// Flatten a (potentially nested) JSON object into dot-separated field entries. +/// e.g. `{"credentials": {"type": "ssh", "content": "..."}}` → +/// `[("credentials.type", "ssh"), ("credentials.content", "...")]` +/// Top-level non-object values are emitted directly. +pub(crate) fn flatten_json_fields(prefix: &str, value: &Value) -> Vec<(String, Value)> { + match value { + Value::Object(map) => { + let mut out = Vec::new(); + for (k, v) in map { + let full_key = if prefix.is_empty() { + k.clone() + } else { + format!("{}.{}", prefix, k) + }; + out.extend(flatten_json_fields(&full_key, v)); + } + out + } + other => vec![(prefix.to_string(), other.clone())], + } +} + +// ── Add command ────────────────────────────────────────────────────────────── + pub struct AddArgs<'a> { pub namespace: &'a str, pub kind: &'a str, @@ -171,26 +219,24 @@ pub struct AddArgs<'a> { pub async fn run(pool: &PgPool, args: AddArgs<'_>, master_key: &[u8; 32]) -> Result<()> { let metadata = build_json(args.meta_entries)?; let secret_json = build_json(args.secret_entries)?; - let encrypted_bytes = crypto::encrypt_json(master_key, &secret_json)?; - tracing::debug!(args.namespace, args.kind, args.name, "upserting record"); + tracing::debug!(args.namespace, args.kind, args.name, "upserting entry"); let meta_keys = collect_key_paths(args.meta_entries)?; let secret_keys = collect_key_paths(args.secret_entries)?; let mut tx = pool.begin().await?; - // Snapshot existing row into history before overwriting (if it exists). + // Upsert the entry row (tags + metadata). #[derive(sqlx::FromRow)] - struct ExistingRow { + struct EntryRow { id: uuid::Uuid, version: i64, tags: Vec, - metadata: serde_json::Value, - encrypted: Vec, + metadata: Value, } - let existing: Option = sqlx::query_as( - "SELECT id, version, tags, metadata, encrypted FROM secrets \ + let existing: Option = sqlx::query_as( + "SELECT id, version, tags, metadata FROM entries \ WHERE namespace = $1 AND kind = $2 AND name = $3", ) .bind(args.namespace) @@ -199,11 +245,12 @@ pub async fn run(pool: &PgPool, args: AddArgs<'_>, master_key: &[u8; 32]) -> Res .fetch_optional(&mut *tx) .await?; - if let Some(ex) = existing - && let Err(e) = db::snapshot_history( + // Snapshot the current entry state before overwriting. + if let Some(ref ex) = existing + && let Err(e) = db::snapshot_entry_history( &mut tx, - db::SnapshotParams { - secret_id: ex.id, + db::EntrySnapshotParams { + entry_id: ex.id, namespace: args.namespace, kind: args.kind, name: args.name, @@ -211,25 +258,24 @@ pub async fn run(pool: &PgPool, args: AddArgs<'_>, master_key: &[u8; 32]) -> Res action: "add", tags: &ex.tags, metadata: &ex.metadata, - encrypted: &ex.encrypted, }, ) .await { - tracing::warn!(error = %e, "failed to snapshot history before upsert"); + tracing::warn!(error = %e, "failed to snapshot entry history before upsert"); } - sqlx::query( + let entry_id: uuid::Uuid = sqlx::query_scalar( r#" - INSERT INTO secrets (namespace, kind, name, tags, metadata, encrypted, version, updated_at) - VALUES ($1, $2, $3, $4, $5, $6, 1, NOW()) + INSERT INTO entries (namespace, kind, name, tags, metadata, version, updated_at) + VALUES ($1, $2, $3, $4, $5, 1, NOW()) ON CONFLICT (namespace, kind, name) DO UPDATE SET tags = EXCLUDED.tags, metadata = EXCLUDED.metadata, - encrypted = EXCLUDED.encrypted, - version = secrets.version + 1, + version = entries.version + 1, updated_at = NOW() + RETURNING id "#, ) .bind(args.namespace) @@ -237,10 +283,79 @@ pub async fn run(pool: &PgPool, args: AddArgs<'_>, master_key: &[u8; 32]) -> Res .bind(args.name) .bind(args.tags) .bind(&metadata) - .bind(&encrypted_bytes) - .execute(&mut *tx) + .fetch_one(&mut *tx) .await?; + let new_entry_version: i64 = sqlx::query_scalar("SELECT version FROM entries WHERE id = $1") + .bind(entry_id) + .fetch_one(&mut *tx) + .await?; + + // Snapshot existing secret fields before replacing. + if existing.is_some() { + #[derive(sqlx::FromRow)] + struct ExistingField { + id: uuid::Uuid, + field_name: String, + field_type: String, + value_len: i32, + encrypted: Vec, + } + let existing_fields: Vec = sqlx::query_as( + "SELECT id, field_name, field_type, value_len, encrypted \ + FROM secrets WHERE entry_id = $1", + ) + .bind(entry_id) + .fetch_all(&mut *tx) + .await?; + + for f in &existing_fields { + if let Err(e) = db::snapshot_secret_history( + &mut tx, + db::SecretSnapshotParams { + entry_id, + secret_id: f.id, + entry_version: new_entry_version - 1, + field_name: &f.field_name, + field_type: &f.field_type, + value_len: f.value_len, + encrypted: &f.encrypted, + action: "add", + }, + ) + .await + { + tracing::warn!(error = %e, "failed to snapshot secret field history"); + } + } + + // Delete existing secret fields so we can re-insert the full set. + sqlx::query("DELETE FROM secrets WHERE entry_id = $1") + .bind(entry_id) + .execute(&mut *tx) + .await?; + } + + // Insert new secret fields. + let flat_fields = flatten_json_fields("", &secret_json); + for (field_name, field_value) in &flat_fields { + let field_type = infer_field_type(field_value); + let value_len = compute_value_len(field_value); + let encrypted = crypto::encrypt_json(master_key, field_value)?; + + sqlx::query( + "INSERT INTO secrets (entry_id, field_name, field_type, value_len, encrypted) \ + VALUES ($1, $2, $3, $4, $5)", + ) + .bind(entry_id) + .bind(field_name) + .bind(field_type) + .bind(value_len) + .bind(&encrypted) + .execute(&mut *tx) + .await?; + } + crate::audit::log_tx( &mut tx, "add", @@ -293,7 +408,10 @@ pub async fn run(pool: &PgPool, args: AddArgs<'_>, master_key: &[u8; 32]) -> Res #[cfg(test)] mod tests { - use super::{build_json, key_path_to_string, parse_kv, remove_path}; + use super::{ + build_json, compute_value_len, flatten_json_fields, infer_field_type, key_path_to_string, + parse_kv, remove_path, + }; use serde_json::Value; use std::fs; use std::path::PathBuf; @@ -363,4 +481,36 @@ mod tests { assert!(removed); assert_eq!(value, serde_json::json!({ "username": "root" })); } + + #[test] + fn flatten_json_fields_nested() { + let v = serde_json::json!({ + "username": "root", + "credentials": { + "type": "ssh", + "content": "pem-data" + } + }); + let mut fields = flatten_json_fields("", &v); + fields.sort_by(|a, b| a.0.cmp(&b.0)); + + assert_eq!(fields[0].0, "credentials.content"); + assert_eq!(fields[1].0, "credentials.type"); + assert_eq!(fields[2].0, "username"); + } + + #[test] + fn infer_field_types() { + assert_eq!(infer_field_type(&Value::String("x".into())), "string"); + assert_eq!(infer_field_type(&serde_json::json!(42)), "number"); + assert_eq!(infer_field_type(&Value::Bool(true)), "boolean"); + assert_eq!(infer_field_type(&serde_json::json!(["a"])), "json"); + } + + #[test] + fn compute_value_len_string() { + assert_eq!(compute_value_len(&Value::String("root".into())), 4); + assert_eq!(compute_value_len(&Value::Null), 0); + assert_eq!(compute_value_len(&serde_json::json!(1234)), 4); + } } diff --git a/src/commands/delete.rs b/src/commands/delete.rs index d97b21b..2d9c3f9 100644 --- a/src/commands/delete.rs +++ b/src/commands/delete.rs @@ -7,11 +7,19 @@ use crate::db; use crate::output::OutputMode; #[derive(FromRow)] -struct DeleteRow { +struct EntryRow { id: Uuid, version: i64, tags: Vec, metadata: Value, +} + +#[derive(FromRow)] +struct SecretFieldRow { + id: Uuid, + field_name: String, + field_type: String, + value_len: i32, encrypted: Vec, } @@ -22,12 +30,12 @@ pub async fn run( name: &str, output: OutputMode, ) -> Result<()> { - tracing::debug!(namespace, kind, name, "deleting record"); + tracing::debug!(namespace, kind, name, "deleting entry"); let mut tx = pool.begin().await?; - let row: Option = sqlx::query_as( - "SELECT id, version, tags, metadata, encrypted FROM secrets \ + let row: Option = sqlx::query_as( + "SELECT id, version, tags, metadata FROM entries \ WHERE namespace = $1 AND kind = $2 AND name = $3 \ FOR UPDATE", ) @@ -39,7 +47,7 @@ pub async fn run( let Some(row) = row else { tx.rollback().await?; - tracing::warn!(namespace, kind, name, "record not found for deletion"); + tracing::warn!(namespace, kind, name, "entry not found for deletion"); match output { OutputMode::Json => println!( "{}", @@ -58,11 +66,11 @@ pub async fn run( return Ok(()); }; - // Snapshot before physical delete so the row can be restored via rollback. - if let Err(e) = db::snapshot_history( + // Snapshot entry history before deleting. + if let Err(e) = db::snapshot_entry_history( &mut tx, - db::SnapshotParams { - secret_id: row.id, + db::EntrySnapshotParams { + entry_id: row.id, namespace, kind, name, @@ -70,15 +78,44 @@ pub async fn run( action: "delete", tags: &row.tags, metadata: &row.metadata, - encrypted: &row.encrypted, }, ) .await { - tracing::warn!(error = %e, "failed to snapshot history before delete"); + tracing::warn!(error = %e, "failed to snapshot entry history before delete"); } - sqlx::query("DELETE FROM secrets WHERE id = $1") + // Snapshot all secret fields before cascade delete. + let fields: Vec = sqlx::query_as( + "SELECT id, field_name, field_type, value_len, encrypted \ + FROM secrets WHERE entry_id = $1", + ) + .bind(row.id) + .fetch_all(&mut *tx) + .await?; + + for f in &fields { + if let Err(e) = db::snapshot_secret_history( + &mut tx, + db::SecretSnapshotParams { + entry_id: row.id, + secret_id: f.id, + entry_version: row.version, + field_name: &f.field_name, + field_type: &f.field_type, + value_len: f.value_len, + encrypted: &f.encrypted, + action: "delete", + }, + ) + .await + { + tracing::warn!(error = %e, "failed to snapshot secret field history before delete"); + } + } + + // Delete the entry — secrets rows are removed via ON DELETE CASCADE. + sqlx::query("DELETE FROM entries WHERE id = $1") .bind(row.id) .execute(&mut *tx) .await?; diff --git a/src/commands/rollback.rs b/src/commands/rollback.rs index 67d22d5..8ba3522 100644 --- a/src/commands/rollback.rs +++ b/src/commands/rollback.rs @@ -3,32 +3,34 @@ use serde_json::{Value, json}; use sqlx::{FromRow, PgPool}; use uuid::Uuid; +use crate::crypto; +use crate::db; use crate::output::{OutputMode, format_local_time}; -#[derive(FromRow)] -struct HistoryRow { - secret_id: Uuid, - version: i64, - action: String, - tags: Vec, - metadata: Value, - encrypted: Vec, -} - pub struct RollbackArgs<'a> { pub namespace: &'a str, pub kind: &'a str, pub name: &'a str, - /// Target version to restore. None → restore the most recent history entry. + /// Target entry version to restore. None → restore the most recent history entry. pub to_version: Option, pub output: OutputMode, } pub async fn run(pool: &PgPool, args: RollbackArgs<'_>, master_key: &[u8; 32]) -> Result<()> { - let snap: Option = if let Some(ver) = args.to_version { + // ── Find the target entry history snapshot ──────────────────────────────── + #[derive(FromRow)] + struct EntryHistoryRow { + entry_id: Uuid, + version: i64, + action: String, + tags: Vec, + metadata: Value, + } + + let snap: Option = if let Some(ver) = args.to_version { sqlx::query_as( - "SELECT secret_id, version, action, tags, metadata, encrypted \ - FROM secrets_history \ + "SELECT entry_id, version, action, tags, metadata \ + FROM entries_history \ WHERE namespace = $1 AND kind = $2 AND name = $3 AND version = $4 \ ORDER BY id DESC LIMIT 1", ) @@ -40,8 +42,8 @@ pub async fn run(pool: &PgPool, args: RollbackArgs<'_>, master_key: &[u8; 32]) - .await? } else { sqlx::query_as( - "SELECT secret_id, version, action, tags, metadata, encrypted \ - FROM secrets_history \ + "SELECT entry_id, version, action, tags, metadata \ + FROM entries_history \ WHERE namespace = $1 AND kind = $2 AND name = $3 \ ORDER BY id DESC LIMIT 1", ) @@ -64,25 +66,53 @@ pub async fn run(pool: &PgPool, args: RollbackArgs<'_>, master_key: &[u8; 32]) - ) })?; - // Validate encrypted blob is non-trivial (re-encrypt guard). - if !snap.encrypted.is_empty() { - // Probe decrypt to ensure the blob is valid before restoring. - crate::crypto::decrypt_json(master_key, &snap.encrypted)?; + // ── Find the matching secret field snapshots ────────────────────────────── + #[derive(FromRow)] + struct SecretHistoryRow { + secret_id: Uuid, + field_name: String, + field_type: String, + value_len: i32, + encrypted: Vec, + action: String, + } + + let field_snaps: Vec = sqlx::query_as( + "SELECT secret_id, field_name, field_type, value_len, encrypted, action \ + FROM secrets_history \ + WHERE entry_id = $1 AND entry_version = $2 \ + ORDER BY field_name", + ) + .bind(snap.entry_id) + .bind(snap.version) + .fetch_all(pool) + .await?; + + // Validate: try decrypting all encrypted fields before writing anything. + for f in &field_snaps { + if f.action != "delete" && !f.encrypted.is_empty() { + crypto::decrypt_json(master_key, &f.encrypted).map_err(|e| { + anyhow::anyhow!( + "Cannot decrypt snapshot for field '{}': {}", + f.field_name, + e + ) + })?; + } } let mut tx = pool.begin().await?; - // Snapshot current live row (if it exists) before overwriting. + // ── Snapshot the current live state before overwriting ──────────────────── #[derive(sqlx::FromRow)] - struct LiveRow { + struct LiveEntry { id: Uuid, version: i64, tags: Vec, metadata: Value, - encrypted: Vec, } - let live: Option = sqlx::query_as( - "SELECT id, version, tags, metadata, encrypted FROM secrets \ + let live: Option = sqlx::query_as( + "SELECT id, version, tags, metadata FROM entries \ WHERE namespace = $1 AND kind = $2 AND name = $3 FOR UPDATE", ) .bind(args.namespace) @@ -91,11 +121,11 @@ pub async fn run(pool: &PgPool, args: RollbackArgs<'_>, master_key: &[u8; 32]) - .fetch_optional(&mut *tx) .await?; - if let Some(lr) = live - && let Err(e) = crate::db::snapshot_history( + if let Some(ref lr) = live { + if let Err(e) = db::snapshot_entry_history( &mut tx, - crate::db::SnapshotParams { - secret_id: lr.id, + db::EntrySnapshotParams { + entry_id: lr.id, namespace: args.namespace, kind: args.kind, name: args.name, @@ -103,35 +133,104 @@ pub async fn run(pool: &PgPool, args: RollbackArgs<'_>, master_key: &[u8; 32]) - action: "rollback", tags: &lr.tags, metadata: &lr.metadata, - encrypted: &lr.encrypted, }, ) .await - { - tracing::warn!(error = %e, "failed to snapshot current row before rollback"); + { + tracing::warn!(error = %e, "failed to snapshot entry before rollback"); + } + + // Snapshot existing secret fields. + #[derive(sqlx::FromRow)] + struct LiveField { + id: Uuid, + field_name: String, + field_type: String, + value_len: i32, + encrypted: Vec, + } + let live_fields: Vec = sqlx::query_as( + "SELECT id, field_name, field_type, value_len, encrypted \ + FROM secrets WHERE entry_id = $1", + ) + .bind(lr.id) + .fetch_all(&mut *tx) + .await?; + + for f in &live_fields { + if let Err(e) = db::snapshot_secret_history( + &mut tx, + db::SecretSnapshotParams { + entry_id: lr.id, + secret_id: f.id, + entry_version: lr.version, + field_name: &f.field_name, + field_type: &f.field_type, + value_len: f.value_len, + encrypted: &f.encrypted, + action: "rollback", + }, + ) + .await + { + tracing::warn!(error = %e, "failed to snapshot secret field before rollback"); + } + } } + // ── Restore entry row ───────────────────────────────────────────────────── sqlx::query( - "INSERT INTO secrets (id, namespace, kind, name, tags, metadata, encrypted, version, updated_at) \ - VALUES ($1, $2, $3, $4, $5, $6, $7, $8, NOW()) \ + "INSERT INTO entries (id, namespace, kind, name, tags, metadata, version, updated_at) \ + VALUES ($1, $2, $3, $4, $5, $6, $7, NOW()) \ ON CONFLICT (namespace, kind, name) DO UPDATE SET \ tags = EXCLUDED.tags, \ metadata = EXCLUDED.metadata, \ - encrypted = EXCLUDED.encrypted, \ - version = secrets.version + 1, \ + version = entries.version + 1, \ updated_at = NOW()", ) - .bind(snap.secret_id) + .bind(snap.entry_id) .bind(args.namespace) .bind(args.kind) .bind(args.name) .bind(&snap.tags) .bind(&snap.metadata) - .bind(&snap.encrypted) .bind(snap.version) .execute(&mut *tx) .await?; + // ── Restore secret fields ───────────────────────────────────────────────── + // Delete all current fields and re-insert from snapshot + // (only non-deleted fields from the snapshot are restored). + sqlx::query("DELETE FROM secrets WHERE entry_id = $1") + .bind(snap.entry_id) + .execute(&mut *tx) + .await?; + + for f in &field_snaps { + if f.action == "delete" { + // Field was deleted at this snapshot point — don't restore it. + continue; + } + sqlx::query( + "INSERT INTO secrets (id, entry_id, field_name, field_type, value_len, encrypted) \ + VALUES ($1, $2, $3, $4, $5, $6) \ + ON CONFLICT (entry_id, field_name) DO UPDATE SET \ + field_type = EXCLUDED.field_type, \ + value_len = EXCLUDED.value_len, \ + encrypted = EXCLUDED.encrypted, \ + version = secrets.version + 1, \ + updated_at = NOW()", + ) + .bind(f.secret_id) + .bind(snap.entry_id) + .bind(&f.field_name) + .bind(&f.field_type) + .bind(f.value_len) + .bind(&f.encrypted) + .execute(&mut *tx) + .await?; + } + crate::audit::log_tx( &mut tx, "rollback", @@ -167,7 +266,7 @@ pub async fn run(pool: &PgPool, args: RollbackArgs<'_>, master_key: &[u8; 32]) - Ok(()) } -/// List history entries for a record. +/// List history entries for an entry. pub async fn list_history( pool: &PgPool, namespace: &str, @@ -185,7 +284,7 @@ pub async fn list_history( } let rows: Vec = sqlx::query_as( - "SELECT version, action, actor, created_at FROM secrets_history \ + "SELECT version, action, actor, created_at FROM entries_history \ WHERE namespace = $1 AND kind = $2 AND name = $3 \ ORDER BY id DESC LIMIT $4", ) diff --git a/src/commands/run.rs b/src/commands/run.rs index 46aadd5..4bbf17e 100644 --- a/src/commands/run.rs +++ b/src/commands/run.rs @@ -3,7 +3,7 @@ use serde_json::Value; use sqlx::PgPool; use std::collections::HashMap; -use crate::commands::search::build_injected_env_map; +use crate::commands::search::{build_injected_env_map, fetch_entries, fetch_secrets_for_entries}; use crate::output::OutputMode; pub struct InjectArgs<'a> { @@ -11,7 +11,6 @@ pub struct InjectArgs<'a> { pub kind: Option<&'a str>, pub name: Option<&'a str>, pub tags: &'a [String], - /// Prefix to prepend to every variable name. Empty string means no prefix. pub prefix: &'a str, pub output: OutputMode, } @@ -22,12 +21,10 @@ pub struct RunArgs<'a> { pub name: Option<&'a str>, pub tags: &'a [String], pub prefix: &'a str, - /// The command and its arguments to execute with injected secrets. pub command: &'a [String], } -/// Fetch secrets matching the filter and build a flat env map. -/// Metadata and secret fields are merged; naming: `_` (uppercased). +/// Fetch entries matching the filter and build a flat env map (metadata + decrypted secrets). pub async fn collect_env_map( pool: &PgPool, namespace: Option<&str>, @@ -42,13 +39,19 @@ pub async fn collect_env_map( "At least one filter (--namespace, --kind, --name, or --tag) is required for inject/run" ); } - let rows = crate::commands::search::fetch_rows(pool, namespace, kind, name, tags, None).await?; - if rows.is_empty() { + let entries = fetch_entries(pool, namespace, kind, name, tags, None).await?; + if entries.is_empty() { anyhow::bail!("No records matched the given filters."); } + + let entry_ids: Vec = entries.iter().map(|e| e.id).collect(); + let fields_map = fetch_secrets_for_entries(pool, &entry_ids).await?; + let mut map = HashMap::new(); - for row in &rows { - let row_map = build_injected_env_map(row, prefix, master_key)?; + for entry in &entries { + let empty = vec![]; + let fields = fields_map.get(&entry.id).unwrap_or(&empty); + let row_map = build_injected_env_map(pool, entry, prefix, master_key, fields).await?; for (k, v) in row_map { map.insert(k, v); } @@ -56,7 +59,7 @@ pub async fn collect_env_map( Ok(map) } -/// `inject` command: print env vars to stdout (suitable for `eval $(...)` or export). +/// `inject` command: print env vars to stdout. pub async fn run_inject(pool: &PgPool, args: InjectArgs<'_>, master_key: &[u8; 32]) -> Result<()> { let env_map = collect_env_map( pool, @@ -85,7 +88,6 @@ pub async fn run_inject(pool: &PgPool, args: InjectArgs<'_>, master_key: &[u8; 3 println!("{}", serde_json::to_string(&Value::Object(obj))?); } _ => { - // Shell-safe KEY=VALUE output, one per line. let mut pairs: Vec<(String, String)> = env_map.into_iter().collect(); pairs.sort_by(|a, b| a.0.cmp(&b.0)); for (k, v) in pairs { @@ -136,8 +138,6 @@ pub async fn run_exec(pool: &PgPool, args: RunArgs<'_>, master_key: &[u8; 32]) - Ok(()) } -/// Quote a value for safe shell output. Wraps the value in single quotes, -/// escaping any single quotes within the value. fn shell_quote(s: &str) -> String { format!("'{}'", s.replace('\'', "'\\''")) } diff --git a/src/commands/search.rs b/src/commands/search.rs index 45fabdb..0a20da2 100644 --- a/src/commands/search.rs +++ b/src/commands/search.rs @@ -4,7 +4,7 @@ use sqlx::PgPool; use std::collections::HashMap; use crate::crypto; -use crate::models::Secret; +use crate::models::{Entry, SecretField}; use crate::output::{OutputMode, format_local_time}; pub struct SearchArgs<'a> { @@ -13,7 +13,6 @@ pub struct SearchArgs<'a> { pub name: Option<&'a str>, pub tags: &'a [String], pub query: Option<&'a str>, - pub show_secrets: bool, pub fields: &'a [String], pub summary: bool, pub limit: u32, @@ -23,9 +22,9 @@ pub struct SearchArgs<'a> { } pub async fn run(pool: &PgPool, args: SearchArgs<'_>) -> Result<()> { - validate_safe_search_args(args.show_secrets, args.fields)?; + validate_safe_search_args(args.fields)?; - let rows = fetch_rows_paged( + let rows = fetch_entries_paged( pool, PagedFetchArgs { namespace: args.namespace, @@ -40,14 +39,25 @@ pub async fn run(pool: &PgPool, args: SearchArgs<'_>) -> Result<()> { ) .await?; - // -f/--field: extract specific field values directly + // -f/--field: extract specific metadata field values directly if !args.fields.is_empty() { return print_fields(&rows, args.fields); } + // Fetch secret schemas for all returned entries (no master key needed). + let entry_ids: Vec = rows.iter().map(|r| r.id).collect(); + let schema_map = if !args.summary && !entry_ids.is_empty() { + fetch_secret_schemas(pool, &entry_ids).await? + } else { + HashMap::new() + }; + match args.output { OutputMode::Json | OutputMode::JsonCompact => { - let arr: Vec = rows.iter().map(|r| to_json(r, args.summary)).collect(); + let arr: Vec = rows + .iter() + .map(|r| to_json(r, args.summary, schema_map.get(&r.id).map(Vec::as_slice))) + .collect(); let out = if args.output == OutputMode::Json { serde_json::to_string_pretty(&arr)? } else { @@ -61,7 +71,11 @@ pub async fn run(pool: &PgPool, args: SearchArgs<'_>) -> Result<()> { return Ok(()); } for row in &rows { - print_text(row, args.summary)?; + print_text( + row, + args.summary, + schema_map.get(&row.id).map(Vec::as_slice), + )?; } println!("{} record(s) found.", rows.len()); if rows.len() == args.limit as usize { @@ -77,20 +91,13 @@ pub async fn run(pool: &PgPool, args: SearchArgs<'_>) -> Result<()> { Ok(()) } -fn validate_safe_search_args(show_secrets: bool, fields: &[String]) -> Result<()> { - if show_secrets { - anyhow::bail!( - "`search` no longer reveals secrets. Use `secrets inject` or `secrets run` instead." - ); - } - +fn validate_safe_search_args(fields: &[String]) -> Result<()> { if let Some(field) = fields.iter().find(|field| is_secret_field(field)) { anyhow::bail!( "Field '{}' is sensitive. `search -f` only supports metadata.* fields; use `secrets inject` or `secrets run` for secrets.", field ); } - Ok(()) } @@ -101,16 +108,29 @@ fn is_secret_field(field: &str) -> bool { ) } -/// Fetch rows with simple equality/tag filters (no pagination). Used by inject/run. -pub async fn fetch_rows( +// ── Entry fetching ──────────────────────────────────────────────────────────── + +struct PagedFetchArgs<'a> { + namespace: Option<&'a str>, + kind: Option<&'a str>, + name: Option<&'a str>, + tags: &'a [String], + query: Option<&'a str>, + sort: &'a str, + limit: u32, + offset: u32, +} + +/// Fetch entries matching the given filters (used by search, inject, run). +pub async fn fetch_entries( pool: &PgPool, namespace: Option<&str>, kind: Option<&str>, name: Option<&str>, tags: &[String], query: Option<&str>, -) -> Result> { - fetch_rows_paged( +) -> Result> { + fetch_entries_paged( pool, PagedFetchArgs { namespace, @@ -126,19 +146,7 @@ pub async fn fetch_rows( .await } -/// Arguments for the internal paged fetch. Grouped to avoid too-many-arguments lint. -struct PagedFetchArgs<'a> { - namespace: Option<&'a str>, - kind: Option<&'a str>, - name: Option<&'a str>, - tags: &'a [String], - query: Option<&'a str>, - sort: &'a str, - limit: u32, - offset: u32, -} - -async fn fetch_rows_paged(pool: &PgPool, a: PagedFetchArgs<'_>) -> Result> { +async fn fetch_entries_paged(pool: &PgPool, a: PagedFetchArgs<'_>) -> Result> { let mut conditions: Vec = Vec::new(); let mut idx: i32 = 1; @@ -187,7 +195,7 @@ async fn fetch_rows_paged(pool: &PgPool, a: PagedFetchArgs<'_>) -> Result) -> Result(&sql); + let mut q = sqlx::query_as::<_, Entry>(&sql); if let Some(v) = a.namespace { q = q.bind(v); } @@ -219,12 +227,62 @@ async fn fetch_rows_paged(pool: &PgPool, a: PagedFetchArgs<'_>) -> Result String { - let name_part = row.name.to_uppercase().replace(['-', '.', ' '], "_"); +// ── Secret schema fetching (no master key) ─────────────────────────────────── + +/// Fetch secret field schemas (field_name, field_type, value_len) for a set of entry ids. +/// Returns a map from entry_id to list of SecretField (encrypted field not used here). +async fn fetch_secret_schemas( + pool: &PgPool, + entry_ids: &[uuid::Uuid], +) -> Result>> { + if entry_ids.is_empty() { + return Ok(HashMap::new()); + } + + let fields: Vec = sqlx::query_as( + "SELECT * FROM secrets WHERE entry_id = ANY($1) ORDER BY entry_id, field_name", + ) + .bind(entry_ids) + .fetch_all(pool) + .await?; + + let mut map: HashMap> = HashMap::new(); + for f in fields { + map.entry(f.entry_id).or_default().push(f); + } + Ok(map) +} + +/// Fetch all secret fields (including encrypted bytes) for a set of entry ids. +pub async fn fetch_secrets_for_entries( + pool: &PgPool, + entry_ids: &[uuid::Uuid], +) -> Result>> { + if entry_ids.is_empty() { + return Ok(HashMap::new()); + } + + let fields: Vec = sqlx::query_as( + "SELECT * FROM secrets WHERE entry_id = ANY($1) ORDER BY entry_id, field_name", + ) + .bind(entry_ids) + .fetch_all(pool) + .await?; + + let mut map: HashMap> = HashMap::new(); + for f in fields { + map.entry(f.entry_id).or_default().push(f); + } + Ok(map) +} + +// ── Display helpers ─────────────────────────────────────────────────────────── + +fn env_prefix(entry: &Entry, prefix: &str) -> String { + let name_part = entry.name.to_uppercase().replace(['-', '.', ' '], "_"); if prefix.is_empty() { name_part } else { @@ -236,15 +294,12 @@ fn env_prefix(row: &Secret, prefix: &str) -> String { } } -/// Build a flat `KEY=VALUE` map from metadata only. -/// Variable names: `_` (all uppercased, hyphens/dots → underscores). -/// If `prefix` is empty, the name segment alone is used as the prefix. -pub fn build_metadata_env_map(row: &Secret, prefix: &str) -> HashMap { - let effective_prefix = env_prefix(row, prefix); - +/// Build a flat KEY=VALUE map from metadata only (no master key required). +pub fn build_metadata_env_map(entry: &Entry, prefix: &str) -> HashMap { + let effective_prefix = env_prefix(entry, prefix); let mut map = HashMap::new(); - if let Some(meta) = row.metadata.as_object() { + if let Some(meta) = entry.metadata.as_object() { for (k, v) in meta { let key = format!( "{}_{}", @@ -254,37 +309,68 @@ pub fn build_metadata_env_map(row: &Secret, prefix: &str) -> HashMap Result> { - let effective_prefix = env_prefix(row, prefix); - let mut map = build_metadata_env_map(row, prefix); + let effective_prefix = env_prefix(entry, prefix); + let mut map = build_metadata_env_map(entry, prefix); - if !row.encrypted.is_empty() { - let decrypted = crypto::decrypt_json(master_key, &row.encrypted)?; - if let Some(enc) = decrypted.as_object() { - for (k, v) in enc { - let key = format!( + // Decrypt each secret field and add to env map. + for f in fields { + let decrypted = crypto::decrypt_json(master_key, &f.encrypted)?; + let key = format!( + "{}_{}", + effective_prefix, + f.field_name.to_uppercase().replace(['-', '.'], "_") + ); + map.insert(key, json_value_to_env_string(&decrypted)); + } + + // Resolve key_ref: merge secrets from the referenced key entry. + if let Some(key_ref) = entry.metadata.get("key_ref").and_then(|v| v.as_str()) { + let key_entries = fetch_entries( + pool, + Some(&entry.namespace), + Some("key"), + Some(key_ref), + &[], + None, + ) + .await?; + + if let Some(key_entry) = key_entries.first() { + let key_ids = vec![key_entry.id]; + let key_fields_map = fetch_secrets_for_entries(pool, &key_ids).await?; + let empty = vec![]; + let key_fields = key_fields_map.get(&key_entry.id).unwrap_or(&empty); + + let key_prefix = env_prefix(key_entry, prefix); + for f in key_fields { + let decrypted = crypto::decrypt_json(master_key, &f.encrypted)?; + let key_var = format!( "{}_{}", - effective_prefix, - k.to_uppercase().replace(['-', '.'], "_") + key_prefix, + f.field_name.to_uppercase().replace(['-', '.'], "_") ); - map.insert(key, json_value_to_env_string(v)); + map.insert(key_var, json_value_to_env_string(&decrypted)); } + } else { + tracing::warn!(key_ref, "key_ref target not found"); } } Ok(map) } -/// Convert a JSON value to its string representation suitable for env vars. fn json_value_to_env_string(v: &Value) -> String { match v { Value::String(s) => s.clone(), @@ -293,81 +379,101 @@ fn json_value_to_env_string(v: &Value) -> String { } } -fn to_json(row: &Secret, summary: bool) -> Value { +fn to_json(entry: &Entry, summary: bool, schema: Option<&[SecretField]>) -> Value { if summary { - let desc = row + let desc = entry .metadata .get("desc") - .or_else(|| row.metadata.get("url")) + .or_else(|| entry.metadata.get("url")) .and_then(|v| v.as_str()) .unwrap_or("") .to_string(); return json!({ - "namespace": row.namespace, - "kind": row.kind, - "name": row.name, - "tags": row.tags, + "namespace": entry.namespace, + "kind": entry.kind, + "name": entry.name, + "tags": entry.tags, "desc": desc, - "updated_at": row.updated_at.format("%Y-%m-%dT%H:%M:%SZ").to_string(), + "updated_at": entry.updated_at.format("%Y-%m-%dT%H:%M:%SZ").to_string(), }); } - let secrets_val = if row.encrypted.is_empty() { - Value::Object(Default::default()) - } else { - json!({"_encrypted": true}) + let secrets_val: Value = match schema { + Some(fields) if !fields.is_empty() => { + let schema_arr: Vec = fields + .iter() + .map(|f| { + json!({ + "field_name": f.field_name, + "field_type": f.field_type, + "value_len": f.value_len, + }) + }) + .collect(); + Value::Array(schema_arr) + } + _ => Value::Array(vec![]), }; json!({ - "id": row.id, - "namespace": row.namespace, - "kind": row.kind, - "name": row.name, - "tags": row.tags, - "metadata": row.metadata, + "id": entry.id, + "namespace": entry.namespace, + "kind": entry.kind, + "name": entry.name, + "tags": entry.tags, + "metadata": entry.metadata, "secrets": secrets_val, - "version": row.version, - "created_at": row.created_at.format("%Y-%m-%dT%H:%M:%SZ").to_string(), - "updated_at": row.updated_at.format("%Y-%m-%dT%H:%M:%SZ").to_string(), + "version": entry.version, + "created_at": entry.created_at.format("%Y-%m-%dT%H:%M:%SZ").to_string(), + "updated_at": entry.updated_at.format("%Y-%m-%dT%H:%M:%SZ").to_string(), }) } -fn print_text(row: &Secret, summary: bool) -> Result<()> { - println!("[{}/{}] {}", row.namespace, row.kind, row.name); +fn print_text(entry: &Entry, summary: bool, schema: Option<&[SecretField]>) -> Result<()> { + println!("[{}/{}] {}", entry.namespace, entry.kind, entry.name); if summary { - let desc = row + let desc = entry .metadata .get("desc") - .or_else(|| row.metadata.get("url")) + .or_else(|| entry.metadata.get("url")) .and_then(|v| v.as_str()) .unwrap_or("-"); - if !row.tags.is_empty() { - println!(" tags: [{}]", row.tags.join(", ")); + if !entry.tags.is_empty() { + println!(" tags: [{}]", entry.tags.join(", ")); } println!(" desc: {}", desc); - println!(" updated: {}", format_local_time(row.updated_at)); + println!(" updated: {}", format_local_time(entry.updated_at)); } else { - println!(" id: {}", row.id); - if !row.tags.is_empty() { - println!(" tags: [{}]", row.tags.join(", ")); + println!(" id: {}", entry.id); + if !entry.tags.is_empty() { + println!(" tags: [{}]", entry.tags.join(", ")); } - if row.metadata.as_object().is_some_and(|m| !m.is_empty()) { + if entry.metadata.as_object().is_some_and(|m| !m.is_empty()) { println!( " metadata: {}", - serde_json::to_string_pretty(&row.metadata)? + serde_json::to_string_pretty(&entry.metadata)? ); } - if !row.encrypted.is_empty() { - println!(" secrets: [encrypted] (use `secrets inject` or `secrets run`)"); + match schema { + Some(fields) if !fields.is_empty() => { + let schema_str: Vec = fields + .iter() + .map(|f| format!("{}: {}({})", f.field_name, f.field_type, f.value_len)) + .collect(); + println!(" secrets: {}", schema_str.join(", ")); + println!(" (use `secrets inject` or `secrets run` to get values)"); + } + _ => {} } - println!(" created: {}", format_local_time(row.created_at)); + println!(" version: {}", entry.version); + println!(" created: {}", format_local_time(entry.created_at)); } println!(); Ok(()) } -/// Extract one or more field paths like `metadata.url`. -fn print_fields(rows: &[Secret], fields: &[String]) -> Result<()> { +/// Extract one or more metadata field paths like `metadata.url`. +fn print_fields(rows: &[Entry], fields: &[String]) -> Result<()> { for row in rows { for field in fields { let val = extract_field(row, field)?; @@ -377,13 +483,13 @@ fn print_fields(rows: &[Secret], fields: &[String]) -> Result<()> { Ok(()) } -fn extract_field(row: &Secret, field: &str) -> Result { +fn extract_field(entry: &Entry, field: &str) -> Result { let (section, key) = field .split_once('.') .ok_or_else(|| anyhow::anyhow!("Invalid field path '{}'. Use metadata..", field))?; let obj = match section { - "metadata" | "meta" => &row.metadata, + "metadata" | "meta" => &entry.metadata, other => anyhow::bail!("Unknown field section '{}'. Use 'metadata'.", other), }; @@ -397,9 +503,9 @@ fn extract_field(row: &Secret, field: &str) -> Result { anyhow::anyhow!( "Field '{}' not found in record [{}/{}/{}]", field, - row.namespace, - row.kind, - row.name + entry.namespace, + entry.kind, + entry.name ) }) } @@ -411,41 +517,47 @@ mod tests { use serde_json::json; use uuid::Uuid; - fn sample_secret() -> Secret { - let key = [0x42u8; 32]; - let encrypted = crypto::encrypt_json(&key, &json!({"token": "abc123"})).unwrap(); - - Secret { + fn sample_entry() -> Entry { + Entry { id: Uuid::nil(), namespace: "refining".to_string(), kind: "service".to_string(), name: "gitea.main".to_string(), tags: vec!["prod".to_string()], metadata: json!({"url": "https://gitea.refining.dev", "enabled": true}), - encrypted, version: 1, created_at: Utc::now(), updated_at: Utc::now(), } } - #[test] - fn rejects_show_secrets_flag() { - let err = validate_safe_search_args(true, &[]).unwrap_err(); - assert!(err.to_string().contains("no longer reveals secrets")); + fn sample_fields() -> Vec { + let key = [0x42u8; 32]; + let enc = crypto::encrypt_json(&key, &json!("abc123")).unwrap(); + vec![SecretField { + id: Uuid::nil(), + entry_id: Uuid::nil(), + field_name: "token".to_string(), + field_type: "string".to_string(), + value_len: 6, + encrypted: enc, + version: 1, + created_at: Utc::now(), + updated_at: Utc::now(), + }] } #[test] fn rejects_secret_field_extraction() { let fields = vec!["secret.token".to_string()]; - let err = validate_safe_search_args(false, &fields).unwrap_err(); + let err = validate_safe_search_args(&fields).unwrap_err(); assert!(err.to_string().contains("sensitive")); } #[test] fn metadata_env_map_excludes_secret_values() { - let row = sample_secret(); - let map = build_metadata_env_map(&row, ""); + let entry = sample_entry(); + let map = build_metadata_env_map(&entry, ""); assert_eq!( map.get("GITEA_MAIN_URL").map(String::as_str), @@ -459,14 +571,23 @@ mod tests { } #[test] - fn injected_env_map_includes_secret_values() { - let row = sample_secret(); - let key = [0x42u8; 32]; - let map = build_injected_env_map(&row, "", &key).unwrap(); + fn to_json_full_includes_secrets_schema() { + let entry = sample_entry(); + let fields = sample_fields(); + let v = to_json(&entry, false, Some(&fields)); - assert_eq!( - map.get("GITEA_MAIN_TOKEN").map(String::as_str), - Some("abc123") - ); + let secrets = v.get("secrets").unwrap().as_array().unwrap(); + assert_eq!(secrets.len(), 1); + assert_eq!(secrets[0]["field_name"], "token"); + assert_eq!(secrets[0]["field_type"], "string"); + assert_eq!(secrets[0]["value_len"], 6); + } + + #[test] + fn to_json_summary_omits_secrets_schema() { + let entry = sample_entry(); + let fields = sample_fields(); + let v = to_json(&entry, true, Some(&fields)); + assert!(v.get("secrets").is_none()); } } diff --git a/src/commands/update.rs b/src/commands/update.rs index 772a26b..cfa5c46 100644 --- a/src/commands/update.rs +++ b/src/commands/update.rs @@ -4,19 +4,19 @@ use sqlx::{FromRow, PgPool}; use uuid::Uuid; use super::add::{ - collect_field_paths, collect_key_paths, insert_path, parse_key_path, parse_kv, remove_path, + collect_field_paths, collect_key_paths, compute_value_len, flatten_json_fields, + infer_field_type, insert_path, parse_key_path, parse_kv, remove_path, }; use crate::crypto; use crate::db; use crate::output::OutputMode; #[derive(FromRow)] -struct UpdateRow { +struct EntryRow { id: Uuid, version: i64, tags: Vec, metadata: Value, - encrypted: Vec, } pub struct UpdateArgs<'a> { @@ -35,9 +35,9 @@ pub struct UpdateArgs<'a> { pub async fn run(pool: &PgPool, args: UpdateArgs<'_>, master_key: &[u8; 32]) -> Result<()> { let mut tx = pool.begin().await?; - let row: Option = sqlx::query_as( - "SELECT id, version, tags, metadata, encrypted \ - FROM secrets \ + let row: Option = sqlx::query_as( + "SELECT id, version, tags, metadata \ + FROM entries \ WHERE namespace = $1 AND kind = $2 AND name = $3 \ FOR UPDATE", ) @@ -56,11 +56,11 @@ pub async fn run(pool: &PgPool, args: UpdateArgs<'_>, master_key: &[u8; 32]) -> ) })?; - // Snapshot current state before modifying. - if let Err(e) = db::snapshot_history( + // Snapshot current entry state before modifying. + if let Err(e) = db::snapshot_entry_history( &mut tx, - db::SnapshotParams { - secret_id: row.id, + db::EntrySnapshotParams { + entry_id: row.id, namespace: args.namespace, kind: args.kind, name: args.name, @@ -68,15 +68,14 @@ pub async fn run(pool: &PgPool, args: UpdateArgs<'_>, master_key: &[u8; 32]) -> action: "update", tags: &row.tags, metadata: &row.metadata, - encrypted: &row.encrypted, }, ) .await { - tracing::warn!(error = %e, "failed to snapshot history before update"); + tracing::warn!(error = %e, "failed to snapshot entry history before update"); } - // Merge tags + // ── Merge tags ──────────────────────────────────────────────────────────── let mut tags: Vec = row.tags; for t in args.add_tags { if !tags.contains(t) { @@ -85,7 +84,7 @@ pub async fn run(pool: &PgPool, args: UpdateArgs<'_>, master_key: &[u8; 32]) -> } tags.retain(|t| !args.remove_tags.contains(t)); - // Merge metadata + // ── Merge metadata ──────────────────────────────────────────────────────── let mut meta_map: Map = match row.metadata { Value::Object(m) => m, _ => Map::new(), @@ -100,43 +99,14 @@ pub async fn run(pool: &PgPool, args: UpdateArgs<'_>, master_key: &[u8; 32]) -> } let metadata = Value::Object(meta_map); - // Decrypt existing encrypted blob, merge changes, re-encrypt - let existing_json = if row.encrypted.is_empty() { - Value::Object(Map::new()) - } else { - crypto::decrypt_json(master_key, &row.encrypted)? - }; - let mut enc_map: Map = match existing_json { - Value::Object(m) => m, - _ => Map::new(), - }; - for entry in args.secret_entries { - let (path, value) = parse_kv(entry)?; - insert_path(&mut enc_map, &path, value)?; - } - for key in args.remove_secrets { - let path = parse_key_path(key)?; - remove_path(&mut enc_map, &path)?; - } - let secret_json = Value::Object(enc_map); - let encrypted_bytes = crypto::encrypt_json(master_key, &secret_json)?; - - tracing::debug!( - namespace = args.namespace, - kind = args.kind, - name = args.name, - "updating record" - ); - - // CAS: update only if version hasn't changed (FOR UPDATE lock ensures this). + // CAS update of the entry row. let result = sqlx::query( - "UPDATE secrets \ - SET tags = $1, metadata = $2, encrypted = $3, version = version + 1, updated_at = NOW() \ - WHERE id = $4 AND version = $5", + "UPDATE entries \ + SET tags = $1, metadata = $2, version = version + 1, updated_at = NOW() \ + WHERE id = $3 AND version = $4", ) .bind(&tags) .bind(&metadata) - .bind(&encrypted_bytes) .bind(row.id) .bind(row.version) .execute(&mut *tx) @@ -152,6 +122,130 @@ pub async fn run(pool: &PgPool, args: UpdateArgs<'_>, master_key: &[u8; 32]) -> ); } + let new_version = row.version + 1; + + // ── Update secret fields ────────────────────────────────────────────────── + for entry in args.secret_entries { + let (path, field_value) = parse_kv(entry)?; + + // For nested paths (e.g. credentials:type), flatten into dot-separated names + // and treat the sub-value as the individual field to store. + let flat = flatten_json_fields("", &{ + let mut m = Map::new(); + insert_path(&mut m, &path, field_value)?; + Value::Object(m) + }); + + for (field_name, fv) in &flat { + let field_type = infer_field_type(fv); + let value_len = compute_value_len(fv); + let encrypted = crypto::encrypt_json(master_key, fv)?; + + // Snapshot existing field before replacing. + #[derive(sqlx::FromRow)] + struct ExistingField { + id: Uuid, + field_type: String, + value_len: i32, + encrypted: Vec, + } + let existing_field: Option = sqlx::query_as( + "SELECT id, field_type, value_len, encrypted \ + FROM secrets WHERE entry_id = $1 AND field_name = $2", + ) + .bind(row.id) + .bind(field_name) + .fetch_optional(&mut *tx) + .await?; + + if let Some(ef) = &existing_field + && let Err(e) = db::snapshot_secret_history( + &mut tx, + db::SecretSnapshotParams { + entry_id: row.id, + secret_id: ef.id, + entry_version: row.version, + field_name, + field_type: &ef.field_type, + value_len: ef.value_len, + encrypted: &ef.encrypted, + action: "update", + }, + ) + .await + { + tracing::warn!(error = %e, "failed to snapshot secret field history"); + } + + sqlx::query( + "INSERT INTO secrets (entry_id, field_name, field_type, value_len, encrypted) \ + VALUES ($1, $2, $3, $4, $5) \ + ON CONFLICT (entry_id, field_name) DO UPDATE SET \ + field_type = EXCLUDED.field_type, \ + value_len = EXCLUDED.value_len, \ + encrypted = EXCLUDED.encrypted, \ + version = secrets.version + 1, \ + updated_at = NOW()", + ) + .bind(row.id) + .bind(field_name) + .bind(field_type) + .bind(value_len) + .bind(&encrypted) + .execute(&mut *tx) + .await?; + } + } + + // ── Remove secret fields ────────────────────────────────────────────────── + for key in args.remove_secrets { + let path = parse_key_path(key)?; + // Dot-join the path to match flattened field_name storage. + let field_name = path.join("."); + + // Snapshot before delete. + #[derive(sqlx::FromRow)] + struct FieldToDelete { + id: Uuid, + field_type: String, + value_len: i32, + encrypted: Vec, + } + let field: Option = sqlx::query_as( + "SELECT id, field_type, value_len, encrypted \ + FROM secrets WHERE entry_id = $1 AND field_name = $2", + ) + .bind(row.id) + .bind(&field_name) + .fetch_optional(&mut *tx) + .await?; + + if let Some(f) = field { + if let Err(e) = db::snapshot_secret_history( + &mut tx, + db::SecretSnapshotParams { + entry_id: row.id, + secret_id: f.id, + entry_version: new_version, + field_name: &field_name, + field_type: &f.field_type, + value_len: f.value_len, + encrypted: &f.encrypted, + action: "delete", + }, + ) + .await + { + tracing::warn!(error = %e, "failed to snapshot secret field history before delete"); + } + + sqlx::query("DELETE FROM secrets WHERE id = $1") + .bind(f.id) + .execute(&mut *tx) + .await?; + } + } + let meta_keys = collect_key_paths(args.meta_entries)?; let remove_meta_keys = collect_field_paths(args.remove_meta)?; let secret_keys = collect_key_paths(args.secret_entries)?; diff --git a/src/db.rs b/src/db.rs index 2573b69..d57ff84 100644 --- a/src/db.rs +++ b/src/db.rs @@ -1,4 +1,5 @@ use anyhow::Result; +use serde_json::Value; use sqlx::PgPool; use sqlx::postgres::PgPoolOptions; @@ -17,61 +18,48 @@ pub async fn migrate(pool: &PgPool) -> Result<()> { tracing::debug!("running migrations"); sqlx::raw_sql( r#" - CREATE TABLE IF NOT EXISTS secrets ( + -- ── entries: top-level entities (server, service, key, …) ────────────── + CREATE TABLE IF NOT EXISTS entries ( id UUID PRIMARY KEY DEFAULT uuidv7(), namespace VARCHAR(64) NOT NULL, kind VARCHAR(64) NOT NULL, name VARCHAR(256) NOT NULL, tags TEXT[] NOT NULL DEFAULT '{}', metadata JSONB NOT NULL DEFAULT '{}', - encrypted BYTEA NOT NULL DEFAULT '\x', version BIGINT NOT NULL DEFAULT 1, created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(), updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW(), UNIQUE(namespace, kind, name) ); - -- idempotent column add for existing tables - DO $$ BEGIN - ALTER TABLE secrets ADD COLUMN IF NOT EXISTS metadata JSONB NOT NULL DEFAULT '{}'; - EXCEPTION WHEN OTHERS THEN NULL; - END $$; + CREATE INDEX IF NOT EXISTS idx_entries_namespace ON entries(namespace); + CREATE INDEX IF NOT EXISTS idx_entries_kind ON entries(kind); + CREATE INDEX IF NOT EXISTS idx_entries_tags ON entries USING GIN(tags); + CREATE INDEX IF NOT EXISTS idx_entries_metadata ON entries USING GIN(metadata jsonb_path_ops); - DO $$ BEGIN - ALTER TABLE secrets ADD COLUMN IF NOT EXISTS version BIGINT NOT NULL DEFAULT 1; - EXCEPTION WHEN OTHERS THEN NULL; - END $$; + -- ── secrets: one row per encrypted field, plaintext schema metadata ──── + CREATE TABLE IF NOT EXISTS secrets ( + id UUID PRIMARY KEY DEFAULT uuidv7(), + entry_id UUID NOT NULL REFERENCES entries(id) ON DELETE CASCADE, + field_name VARCHAR(256) NOT NULL, + field_type VARCHAR(32) NOT NULL DEFAULT 'string', + value_len INT NOT NULL DEFAULT 0, + encrypted BYTEA NOT NULL DEFAULT '\x', + version BIGINT NOT NULL DEFAULT 1, + created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(), + updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW(), + UNIQUE(entry_id, field_name) + ); - -- Migrate encrypted column from JSONB to BYTEA if still JSONB type. - -- After migration, old plaintext rows will have their JSONB data - -- stored as raw bytes (UTF-8 encoded). - DO $$ BEGIN - IF EXISTS ( - SELECT 1 FROM information_schema.columns - WHERE table_name = 'secrets' - AND column_name = 'encrypted' - AND data_type = 'jsonb' - ) THEN - ALTER TABLE secrets RENAME COLUMN encrypted TO encrypted_jsonb_old; - ALTER TABLE secrets ADD COLUMN encrypted BYTEA NOT NULL DEFAULT '\x'; - -- Copy existing JSONB data as raw UTF-8 bytes so nothing is lost - UPDATE secrets SET encrypted = convert_to(encrypted_jsonb_old::text, 'UTF8'); - ALTER TABLE secrets DROP COLUMN encrypted_jsonb_old; - END IF; - EXCEPTION WHEN OTHERS THEN NULL; - END $$; + CREATE INDEX IF NOT EXISTS idx_secrets_entry_id ON secrets(entry_id); - CREATE INDEX IF NOT EXISTS idx_secrets_namespace ON secrets(namespace); - CREATE INDEX IF NOT EXISTS idx_secrets_kind ON secrets(kind); - CREATE INDEX IF NOT EXISTS idx_secrets_tags ON secrets USING GIN(tags); - CREATE INDEX IF NOT EXISTS idx_secrets_metadata ON secrets USING GIN(metadata jsonb_path_ops); - - -- Key-value config table: stores Argon2id salt (shared across devices) + -- ── kv_config: global key-value store (Argon2id salt, etc.) ──────────── CREATE TABLE IF NOT EXISTS kv_config ( key TEXT PRIMARY KEY, value BYTEA NOT NULL ); + -- ── audit_log: append-only operation log ──────────────────────────────── CREATE TABLE IF NOT EXISTS audit_log ( id BIGINT GENERATED ALWAYS AS IDENTITY PRIMARY KEY, action VARCHAR(32) NOT NULL, @@ -83,14 +71,13 @@ pub async fn migrate(pool: &PgPool) -> Result<()> { created_at TIMESTAMPTZ NOT NULL DEFAULT NOW() ); - CREATE INDEX IF NOT EXISTS idx_audit_log_created ON audit_log(created_at DESC); - CREATE INDEX IF NOT EXISTS idx_audit_log_ns_kind ON audit_log(namespace, kind); + CREATE INDEX IF NOT EXISTS idx_audit_log_created ON audit_log(created_at DESC); + CREATE INDEX IF NOT EXISTS idx_audit_log_ns_kind ON audit_log(namespace, kind); - -- History table: snapshot of secrets before each write operation. - -- Supports rollback to any prior version via `secrets rollback`. - CREATE TABLE IF NOT EXISTS secrets_history ( + -- ── entries_history: entry-level snapshot (tags + metadata) ───────────── + CREATE TABLE IF NOT EXISTS entries_history ( id BIGINT GENERATED ALWAYS AS IDENTITY PRIMARY KEY, - secret_id UUID NOT NULL, + entry_id UUID NOT NULL, namespace VARCHAR(64) NOT NULL, kind VARCHAR(64) NOT NULL, name VARCHAR(256) NOT NULL, @@ -98,13 +85,34 @@ pub async fn migrate(pool: &PgPool) -> Result<()> { action VARCHAR(16) NOT NULL, tags TEXT[] NOT NULL DEFAULT '{}', metadata JSONB NOT NULL DEFAULT '{}', - encrypted BYTEA NOT NULL DEFAULT '\x', actor VARCHAR(128) NOT NULL DEFAULT '', created_at TIMESTAMPTZ NOT NULL DEFAULT NOW() ); - CREATE INDEX IF NOT EXISTS idx_history_secret_id ON secrets_history(secret_id, version DESC); - CREATE INDEX IF NOT EXISTS idx_history_ns_kind_name ON secrets_history(namespace, kind, name, version DESC); + CREATE INDEX IF NOT EXISTS idx_entries_history_entry_id + ON entries_history(entry_id, version DESC); + CREATE INDEX IF NOT EXISTS idx_entries_history_ns_kind_name + ON entries_history(namespace, kind, name, version DESC); + + -- ── secrets_history: field-level snapshot ─────────────────────────────── + CREATE TABLE IF NOT EXISTS secrets_history ( + id BIGINT GENERATED ALWAYS AS IDENTITY PRIMARY KEY, + entry_id UUID NOT NULL, + secret_id UUID NOT NULL, + entry_version BIGINT NOT NULL, + field_name VARCHAR(256) NOT NULL, + field_type VARCHAR(32) NOT NULL DEFAULT 'string', + value_len INT NOT NULL DEFAULT 0, + encrypted BYTEA NOT NULL DEFAULT '\x', + action VARCHAR(16) NOT NULL, + actor VARCHAR(128) NOT NULL DEFAULT '', + created_at TIMESTAMPTZ NOT NULL DEFAULT NOW() + ); + + CREATE INDEX IF NOT EXISTS idx_secrets_history_entry_id + ON secrets_history(entry_id, entry_version DESC); + CREATE INDEX IF NOT EXISTS idx_secrets_history_secret_id + ON secrets_history(secret_id); "#, ) .execute(pool) @@ -113,33 +121,31 @@ pub async fn migrate(pool: &PgPool) -> Result<()> { Ok(()) } -/// Snapshot parameters grouped to avoid too-many-arguments lint. -pub struct SnapshotParams<'a> { - pub secret_id: uuid::Uuid, +// ── Entry-level history snapshot ──────────────────────────────────────────── + +pub struct EntrySnapshotParams<'a> { + pub entry_id: uuid::Uuid, pub namespace: &'a str, pub kind: &'a str, pub name: &'a str, pub version: i64, pub action: &'a str, pub tags: &'a [String], - pub metadata: &'a serde_json::Value, - pub encrypted: &'a [u8], + pub metadata: &'a Value, } -/// Snapshot a secrets row into `secrets_history` before a write operation. -/// `action` is one of "add", "update", "delete". -/// Failures are non-fatal (caller should warn). -pub async fn snapshot_history( +/// Snapshot an entry row into `entries_history` before a write operation. +pub async fn snapshot_entry_history( tx: &mut sqlx::Transaction<'_, sqlx::Postgres>, - p: SnapshotParams<'_>, + p: EntrySnapshotParams<'_>, ) -> Result<()> { let actor = std::env::var("USER").unwrap_or_default(); sqlx::query( - "INSERT INTO secrets_history \ - (secret_id, namespace, kind, name, version, action, tags, metadata, encrypted, actor) \ - VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10)", + "INSERT INTO entries_history \ + (entry_id, namespace, kind, name, version, action, tags, metadata, actor) \ + VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9)", ) - .bind(p.secret_id) + .bind(p.entry_id) .bind(p.namespace) .bind(p.kind) .bind(p.name) @@ -147,15 +153,53 @@ pub async fn snapshot_history( .bind(p.action) .bind(p.tags) .bind(p.metadata) - .bind(p.encrypted) .bind(&actor) .execute(&mut **tx) .await?; Ok(()) } +// ── Secret field-level history snapshot ───────────────────────────────────── + +pub struct SecretSnapshotParams<'a> { + pub entry_id: uuid::Uuid, + pub secret_id: uuid::Uuid, + pub entry_version: i64, + pub field_name: &'a str, + pub field_type: &'a str, + pub value_len: i32, + pub encrypted: &'a [u8], + pub action: &'a str, +} + +/// Snapshot a single secret field into `secrets_history`. +pub async fn snapshot_secret_history( + tx: &mut sqlx::Transaction<'_, sqlx::Postgres>, + p: SecretSnapshotParams<'_>, +) -> Result<()> { + let actor = std::env::var("USER").unwrap_or_default(); + sqlx::query( + "INSERT INTO secrets_history \ + (entry_id, secret_id, entry_version, field_name, field_type, value_len, encrypted, action, actor) \ + VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9)", + ) + .bind(p.entry_id) + .bind(p.secret_id) + .bind(p.entry_version) + .bind(p.field_name) + .bind(p.field_type) + .bind(p.value_len) + .bind(p.encrypted) + .bind(p.action) + .bind(&actor) + .execute(&mut **tx) + .await?; + Ok(()) +} + +// ── Argon2 salt helpers ────────────────────────────────────────────────────── + /// Load the Argon2id salt from the database. -/// Returns None if not yet initialized. pub async fn load_argon2_salt(pool: &PgPool) -> Result>> { let row: Option<(Vec,)> = sqlx::query_as("SELECT value FROM kv_config WHERE key = 'argon2_salt'") diff --git a/src/main.rs b/src/main.rs index a1f50da..7887a7b 100644 --- a/src/main.rs +++ b/src/main.rs @@ -186,9 +186,6 @@ EXAMPLES: /// Fuzzy keyword (matches name, namespace, kind, tags, metadata text) #[arg(short, long)] query: Option, - /// Deprecated: search never reveals secrets; use inject/run instead - #[arg(long)] - show_secrets: bool, /// Extract metadata field value(s) directly: metadata. (repeatable) #[arg(short = 'f', long = "field")] fields: Vec, @@ -528,7 +525,6 @@ async fn main() -> Result<()> { name, tag, query, - show_secrets, fields, summary, limit, @@ -546,7 +542,6 @@ async fn main() -> Result<()> { name: name.as_deref(), tags: &tag, query: query.as_deref(), - show_secrets, fields: &fields, summary, limit, diff --git a/src/models.rs b/src/models.rs index 7a09c13..0513304 100644 --- a/src/models.rs +++ b/src/models.rs @@ -3,16 +3,34 @@ use serde::{Deserialize, Serialize}; use serde_json::Value; use uuid::Uuid; +/// A top-level entry (server, service, key, …). +/// Sensitive fields are stored separately in `secrets`. #[derive(Debug, Serialize, Deserialize, sqlx::FromRow)] -pub struct Secret { +pub struct Entry { pub id: Uuid, pub namespace: String, pub kind: String, pub name: String, pub tags: Vec, pub metadata: Value, + pub version: i64, + pub created_at: DateTime, + pub updated_at: DateTime, +} + +/// A single encrypted field belonging to an Entry. +/// field_name, field_type, and value_len are stored in plaintext so that +/// `search` can show the schema without requiring the master key. +#[derive(Debug, Serialize, Deserialize, sqlx::FromRow)] +pub struct SecretField { + pub id: Uuid, + pub entry_id: Uuid, + pub field_name: String, + /// Inferred type: "string", "number", "boolean", "json" + pub field_type: String, + /// Length of the plaintext value in characters (0 for binary-like PEM) + pub value_len: i32, /// AES-256-GCM ciphertext: nonce(12B) || ciphertext+tag - /// Decrypt with crypto::decrypt_json() before use. pub encrypted: Vec, pub version: i64, pub created_at: DateTime,