Compare commits

...

5 Commits

Author SHA1 Message Date
voson
854720f10c chore: remove field_type and value_len from secrets schema
Some checks failed
Secrets CLI - Build & Release / 版本 & Release (push) Successful in 3s
Secrets CLI - Build & Release / 质量检查 (fmt / clippy / test) (push) Successful in 2m34s
Secrets CLI - Build & Release / Build (macOS aarch64 + x86_64) (push) Successful in 1m3s
Secrets CLI - Build & Release / Build (x86_64-unknown-linux-musl) (push) Successful in 1m15s
Secrets CLI - Build & Release / 发布草稿 Release (push) Has been cancelled
Secrets CLI - Build & Release / Build (x86_64-pc-windows-msvc) (push) Has been cancelled
- Drop field_type, value_len from secrets and secrets_history tables
- Remove infer_field_type, compute_value_len from add.rs
- Simplify search output to field names only
- Update AGENTS.md, README.md documentation

Bump version to 0.9.4

Made-with: Cursor
2026-03-19 16:48:23 +08:00
voson
62a1df316b docs: README 补充 delete 批量删除与 --dry-run 示例
Some checks failed
Secrets CLI - Build & Release / 版本 & Release (push) Successful in 3s
Secrets CLI - Build & Release / 质量检查 (fmt / clippy / test) (push) Successful in 2m30s
Secrets CLI - Build & Release / Build (macOS aarch64 + x86_64) (push) Successful in 1m1s
Secrets CLI - Build & Release / Build (x86_64-unknown-linux-musl) (push) Successful in 1m17s
Secrets CLI - Build & Release / 发布草稿 Release (push) Has been cancelled
Secrets CLI - Build & Release / Build (x86_64-pc-windows-msvc) (push) Has been cancelled
Made-with: Cursor
2026-03-19 16:32:20 +08:00
voson
d0796e9c9a feat: delete 命令支持批量删除,--name 改为可选
省略 --name 时按 namespace(+ 可选 --kind)批量删除所有匹配记录;
支持 --dry-run 预览;删除前自动快照历史并写入审计日志。
移除独立的 delete-ns 子命令,合并为统一的 delete 入口。
更新 AGENTS.md 文档,版本 bump 至 0.9.3。

Made-with: Cursor
2026-03-19 16:31:18 +08:00
voson
66b6417faa feat: 开源准备与 upgrade URL 构建时配置
- upgrade: SECRETS_UPGRADE_URL 改为构建时优先(option_env!),CI 自动注入
- upgrade: 支持运行时回退(.env/export),添加 dotenvy 加载 .env
- 泛化示例:IP/实例 ID/域名/密钥名改为示例值(10.0.0.1、example.com 等)
- tasks.json: 文件 secret 测试改用 test-fixtures/example-key.pem
- 文档更新:AGENTS.md、README.md

Made-with: Cursor
2026-03-19 16:08:27 +08:00
voson
56a28e8cf7 refactor: 消除冗余、统一设计,bump 0.9.1
Some checks failed
Secrets CLI - Build & Release / 版本 & Release (push) Successful in 3s
Secrets CLI - Build & Release / 质量检查 (fmt / clippy / test) (push) Successful in 2m46s
Secrets CLI - Build & Release / Build (macOS aarch64 + x86_64) (push) Successful in 1m27s
Secrets CLI - Build & Release / Build (x86_64-unknown-linux-musl) (push) Successful in 2m0s
Secrets CLI - Build & Release / 发布草稿 Release (push) Has been cancelled
Secrets CLI - Build & Release / Build (x86_64-pc-windows-msvc) (push) Has been cancelled
- 提取 EntryRow/SecretFieldRow 到 models.rs
- 提取 current_actor()、print_json() 公共函数
- ExportFormat::from_extension 复用 from_str
- fetch_entries 默认 limit 100k(export/inject/run 不再截断)
- history 独立为 history.rs 模块
- delete 改用 DeleteArgs 结构体
- config_dir 改为 Result,Argon2id 参数提取常量
- Cargo 依赖 ^ 前缀、tokio 精简 features
- 更新 AGENTS.md 项目结构

Made-with: Cursor
2026-03-19 15:46:57 +08:00
24 changed files with 678 additions and 491 deletions

View File

@@ -17,6 +17,7 @@ permissions:
env: env:
BINARY_NAME: secrets BINARY_NAME: secrets
SECRETS_UPGRADE_URL: ${{ github.server_url }}/api/v1/repos/${{ github.repository }}/releases/latest
CARGO_INCREMENTAL: 0 CARGO_INCREMENTAL: 0
CARGO_NET_RETRY: 10 CARGO_NET_RETRY: 10
CARGO_TERM_COLOR: always CARGO_TERM_COLOR: always

2
.vscode/tasks.json vendored
View File

@@ -142,7 +142,7 @@
{ {
"label": "test: add with file secret", "label": "test: add with file secret",
"type": "shell", "type": "shell",
"command": "echo '--- add key from file ---' && ./target/debug/secrets add -n test --kind key --name test-key --tag test -s content=@./refining/keys/Vultr && echo '--- verify metadata ---' && ./target/debug/secrets search -n test --kind key && echo '--- verify inject ---' && ./target/debug/secrets inject -n test --kind key --name test-key && echo '--- cleanup ---' && ./target/debug/secrets delete -n test --kind key --name test-key", "command": "echo '--- add key from file ---' && ./target/debug/secrets add -n test --kind key --name test-key --tag test -s content=@./test-fixtures/example-key.pem && echo '--- verify metadata ---' && ./target/debug/secrets search -n test --kind key && echo '--- verify inject ---' && ./target/debug/secrets inject -n test --kind key --name test-key && echo '--- cleanup ---' && ./target/debug/secrets delete -n test --kind key --name test-key",
"dependsOn": "build" "dependsOn": "build"
} }
] ]

View File

@@ -28,7 +28,8 @@ secrets/
search.rs # search 命令:多条件查询,展示 secrets 字段 schema无需 master_key search.rs # search 命令:多条件查询,展示 secrets 字段 schema无需 master_key
delete.rs # delete 命令事务化CASCADE 删除 secrets含历史快照 delete.rs # delete 命令事务化CASCADE 删除 secrets含历史快照
update.rs # update 命令增量更新secrets 行级 UPSERT/DELETECAS 并发保护 update.rs # update 命令增量更新secrets 行级 UPSERT/DELETECAS 并发保护
rollback.rs # rollback / history 命令:按 entry_version 恢复 entry + secrets rollback.rs # rollback 命令:按 entry_version 恢复 entry + secrets
history.rs # history 命令:查看 entry 变更历史列表
run.rs # inject / run 命令:逐字段解密 + key_ref 引用解析 run.rs # inject / run 命令:逐字段解密 + key_ref 引用解析
upgrade.rs # upgrade 命令:检查、校验摘要并下载最新版本,自动替换二进制 upgrade.rs # upgrade 命令:检查、校验摘要并下载最新版本,自动替换二进制
export_cmd.rs # export 命令:批量导出记录,支持 JSON/TOML/YAML含解密明文 export_cmd.rs # export 命令:批量导出记录,支持 JSON/TOML/YAML含解密明文
@@ -70,8 +71,6 @@ secrets (
id UUID PRIMARY KEY DEFAULT uuidv7(), id UUID PRIMARY KEY DEFAULT uuidv7(),
entry_id UUID NOT NULL REFERENCES entries(id) ON DELETE CASCADE, entry_id UUID NOT NULL REFERENCES entries(id) ON DELETE CASCADE,
field_name VARCHAR(256) NOT NULL, -- 明文字段名: "username", "token", "ssh_key" field_name VARCHAR(256) NOT NULL, -- 明文字段名: "username", "token", "ssh_key"
field_type VARCHAR(32) NOT NULL DEFAULT 'string', -- 明文类型: "string"|"number"|"boolean"|"json"
value_len INT NOT NULL DEFAULT 0, -- 明文原始值字符数PEM≈4096token≈40
encrypted BYTEA NOT NULL DEFAULT '\x', -- 仅加密值本身nonce(12B)||ciphertext+tag encrypted BYTEA NOT NULL DEFAULT '\x', -- 仅加密值本身nonce(12B)||ciphertext+tag
version BIGINT NOT NULL DEFAULT 1, version BIGINT NOT NULL DEFAULT 1,
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(), created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
@@ -129,8 +128,6 @@ secrets_history (
secret_id UUID NOT NULL, -- 对应 secrets.id secret_id UUID NOT NULL, -- 对应 secrets.id
entry_version BIGINT NOT NULL, -- 关联 entries_history 的版本号 entry_version BIGINT NOT NULL, -- 关联 entries_history 的版本号
field_name VARCHAR(256) NOT NULL, field_name VARCHAR(256) NOT NULL,
field_type VARCHAR(32) NOT NULL DEFAULT 'string',
value_len INT NOT NULL DEFAULT 0,
encrypted BYTEA NOT NULL DEFAULT '\x', encrypted BYTEA NOT NULL DEFAULT '\x',
action VARCHAR(16) NOT NULL, -- 'add' | 'update' | 'delete' | 'rollback' action VARCHAR(16) NOT NULL, -- 'add' | 'update' | 'delete' | 'rollback'
actor VARCHAR(128) NOT NULL DEFAULT '', actor VARCHAR(128) NOT NULL DEFAULT '',
@@ -144,12 +141,10 @@ secrets_history (
|------|--------|------| |------|--------|------|
| `namespace` | 项目/团队隔离 | `refining`, `ricnsmart` | | `namespace` | 项目/团队隔离 | `refining`, `ricnsmart` |
| `kind` | 记录类型 | `server`, `service`, `key` | | `kind` | 记录类型 | `server`, `service`, `key` |
| `name` | 唯一标识名 | `i-uf63f2uookgs5uxmrdyc`, `gitea` | | `name` | 唯一标识名 | `i-example0abcd1234efgh`, `gitea` |
| `tags` | 多维分类标签 | `["aliyun","hongkong","ricn"]` | | `tags` | 多维分类标签 | `["aliyun","hongkong","ricn"]` |
| `metadata` | 明文非敏感信息 | `{"ip":"47.243.154.187","desc":"Grafana","key_ref":"ricn-hk-260127"}` | | `metadata` | 明文非敏感信息 | `{"ip":"192.0.2.1","desc":"Grafana","key_ref":"my-shared-key"}` |
| `secrets.field_name` | 加密字段名(明文) | `"username"`, `"token"`, `"ssh_key"` | | `secrets.field_name` | 加密字段名(明文) | `"username"`, `"token"`, `"ssh_key"` |
| `secrets.field_type` | 值类型(明文) | `"string"`, `"number"`, `"boolean"`, `"json"` |
| `secrets.value_len` | 原始值字符数(明文) | `4`root`40`token`4096`PEM |
| `secrets.encrypted` | 仅加密值本身 | AES-256-GCM 密文 | | `secrets.encrypted` | 仅加密值本身 | AES-256-GCM 密文 |
### PEM 共享机制key_ref ### PEM 共享机制key_ref
@@ -158,17 +153,17 @@ secrets_history (
```bash ```bash
# 1. 存共享 PEM # 1. 存共享 PEM
secrets add -n refining --kind key --name ricn-hk-260127 \ secrets add -n refining --kind key --name my-shared-key \
--tag aliyun --tag hongkong \ --tag aliyun --tag hongkong \
-s content=@./keys/ricn-hk-260127.pem -s content=@./keys/my-shared-key.pem
# 2. 服务器通过 metadata.key_ref 引用inject/run 时自动合并 key 的 secrets # 2. 服务器通过 metadata.key_ref 引用inject/run 时自动合并 key 的 secrets
secrets add -n refining --kind server --name i-j6c39dmtkr26vztii0ox \ secrets add -n refining --kind server --name i-example0xyz789 \
-m ip=47.243.154.187 -m key_ref=ricn-hk-260127 \ -m ip=192.0.2.1 -m key_ref=my-shared-key \
-s username=ecs-user -s username=ecs-user
# 3. 轮换只需更新 key 记录,所有引用服务器自动生效 # 3. 轮换只需更新 key 记录,所有引用服务器自动生效
secrets update -n refining --kind key --name ricn-hk-260127 \ secrets update -n refining --kind key --name my-shared-key \
-s content=@./keys/new-key.pem -s content=@./keys/new-key.pem
``` ```
@@ -230,7 +225,7 @@ secrets init
# 参数说明(带典型值) # 参数说明(带典型值)
# -n / --namespace refining | ricnsmart # -n / --namespace refining | ricnsmart
# --kind server | service # --kind server | service
# --name gitea | i-uf63f2uookgs5uxmrdyc | mqtt # --name gitea | i-example0abcd1234efgh | mqtt
# --tag aliyun | hongkong | production # --tag aliyun | hongkong | production
# -q / --query mqtt | grafana | gitea (模糊匹配 name/namespace/kind/tags/metadata # -q / --query mqtt | grafana | gitea (模糊匹配 name/namespace/kind/tags/metadata
# secrets schema search 默认展示 secrets 字段名、类型与长度(无需 master_key # secrets schema search 默认展示 secrets 字段名、类型与长度(无需 master_key
@@ -248,7 +243,7 @@ secrets search --sort updated --limit 10 --summary
# 精确定位单条记录 # 精确定位单条记录
secrets search -n refining --kind service --name gitea secrets search -n refining --kind service --name gitea
secrets search -n refining --kind server --name i-uf63f2uookgs5uxmrdyc secrets search -n refining --kind server --name i-example0abcd1234efgh
# 精确定位并获取完整内容secrets 保持加密占位) # 精确定位并获取完整内容secrets 保持加密占位)
secrets search -n refining --kind service --name gitea -o json secrets search -n refining --kind service --name gitea -o json
@@ -265,7 +260,7 @@ secrets run -n refining --kind service --name gitea -- printenv
# 模糊关键词搜索 # 模糊关键词搜索
secrets search -q mqtt secrets search -q mqtt
secrets search -q grafana secrets search -q grafana
secrets search -q 47.117 secrets search -q 192.0.2
# 按条件过滤 # 按条件过滤
secrets search -n refining --kind service secrets search -n refining --kind service
@@ -289,31 +284,31 @@ secrets search -n refining --kind service | jq '.[].name'
# 参数说明(带典型值) # 参数说明(带典型值)
# -n / --namespace refining | ricnsmart # -n / --namespace refining | ricnsmart
# --kind server | service # --kind server | service
# --name gitea | i-uf63f2uookgs5uxmrdyc # --name gitea | i-example0abcd1234efgh
# --tag aliyun | hongkong可重复 # --tag aliyun | hongkong可重复
# -m / --meta ip=47.117.131.22 | desc="Aliyun ECS" | url=https://... | tls:cert@./cert.pem可重复 # -m / --meta ip=10.0.0.1 | desc="ECS" | url=https://... | tls:cert@./cert.pem可重复
# -s / --secret token=<value> | ssh_key=@./key.pem | password=secret123 | credentials:content@./key.pem可重复 # -s / --secret token=<value> | ssh_key=@./key.pem | password=secret123 | credentials:content@./key.pem可重复
# 添加服务器 # 添加服务器
secrets add -n refining --kind server --name i-uf63f2uookgs5uxmrdyc \ secrets add -n refining --kind server --name i-example0abcd1234efgh \
--tag aliyun --tag shanghai \ --tag aliyun --tag shanghai \
-m ip=47.117.131.22 -m desc="Aliyun Shanghai ECS" \ -m ip=10.0.0.1 -m desc="Aliyun Shanghai ECS" \
-s username=root -s ssh_key=@./keys/voson_shanghai_e.pem -s username=root -s ssh_key=@./keys/deploy-key.pem
# 添加服务凭据 # 添加服务凭据
secrets add -n refining --kind service --name gitea \ secrets add -n refining --kind service --name gitea \
--tag gitea \ --tag gitea \
-m url=https://gitea.refining.dev -m default_org=refining -m username=voson \ -m url=https://code.example.com -m default_org=refining -m username=voson \
-s token=<token> -s runner_token=<runner_token> -s token=<token> -s runner_token=<runner_token>
# 从文件读取 token # 从文件读取 token
secrets add -n ricnsmart --kind service --name mqtt \ secrets add -n ricnsmart --kind service --name mqtt \
-m host=mqtt.ricnsmart.com -m port=1883 \ -m host=mqtt.example.com -m port=1883 \
-s password=@./mqtt_password.txt -s password=@./mqtt_password.txt
# 多行文件直接写入嵌套 secret 字段 # 多行文件直接写入嵌套 secret 字段
secrets add -n refining --kind server --name i-uf63f2uookgs5uxmrdyc \ secrets add -n refining --kind server --name i-example0abcd1234efgh \
-s credentials:content@./keys/voson_shanghai_e.pem -s credentials:content@./keys/deploy-key.pem
# 使用类型化值key:=<json>)存储非字符串类型 # 使用类型化值key:=<json>)存储非字符串类型
secrets add -n refining --kind service --name prometheus \ secrets add -n refining --kind service --name prometheus \
@@ -333,7 +328,7 @@ secrets add -n refining --kind service --name prometheus \
# 参数说明(带典型值) # 参数说明(带典型值)
# -n / --namespace refining | ricnsmart # -n / --namespace refining | ricnsmart
# --kind server | service # --kind server | service
# --name gitea | i-uf63f2uookgs5uxmrdyc # --name gitea | i-example0abcd1234efgh
# --add-tag production | backup不影响已有 tag可重复 # --add-tag production | backup不影响已有 tag可重复
# --remove-tag staging | deprecated可重复 # --remove-tag staging | deprecated可重复
# -m / --meta ip=10.0.0.1 | desc="新描述" | credentials:username=root新增或覆盖可重复 # -m / --meta ip=10.0.0.1 | desc="新描述" | credentials:username=root新增或覆盖可重复
@@ -342,7 +337,7 @@ secrets add -n refining --kind service --name prometheus \
# --remove-secret old_password | deprecated_key | credentials:content删除 secret 字段,可重复) # --remove-secret old_password | deprecated_key | credentials:content删除 secret 字段,可重复)
# 更新单个 metadata 字段 # 更新单个 metadata 字段
secrets update -n refining --kind server --name i-uf63f2uookgs5uxmrdyc \ secrets update -n refining --kind server --name i-example0abcd1234efgh \
-m ip=10.0.0.1 -m ip=10.0.0.1
# 轮换 token # 轮换 token
@@ -359,11 +354,11 @@ secrets update -n refining --kind service --name mqtt \
--remove-meta old_port --remove-secret old_password --remove-meta old_port --remove-secret old_password
# 从文件更新嵌套 secret 字段 # 从文件更新嵌套 secret 字段
secrets update -n refining --kind server --name i-uf63f2uookgs5uxmrdyc \ secrets update -n refining --kind server --name i-example0abcd1234efgh \
-s credentials:content@./keys/voson_shanghai_e.pem -s credentials:content@./keys/deploy-key.pem
# 删除嵌套字段 # 删除嵌套字段
secrets update -n refining --kind server --name i-uf63f2uookgs5uxmrdyc \ secrets update -n refining --kind server --name i-example0abcd1234efgh \
--remove-secret credentials:content --remove-secret credentials:content
# 移除 tag # 移除 tag
@@ -372,19 +367,34 @@ secrets update -n refining --kind service --name gitea --remove-tag staging
--- ---
### delete — 删除记录 ### delete — 删除记录(支持单条精确删除与批量删除)
删除时会自动将 entry 与所有关联 secret 字段快照到历史表,并写入审计日志,可通过 `rollback` 命令恢复。
```bash ```bash
# 参数说明(带典型值) # 参数说明(带典型值)
# -n / --namespace refining | ricnsmart # -n / --namespace refining | ricnsmart(必填)
# --kind server | service # --kind server | service(指定 --name 时必填;批量时可选)
# --name gitea | i-uf63f2uookgs5uxmrdyc必须精确匹配 # --name gitea | i-example0abcd1234efgh精确匹配省略则批量删除
# --dry-run 预览将删除的记录,不实际写入(仅批量模式有效)
# -o / --output text | json | json-compact
# 删除服务凭据 # 精确删除单条记录(--kind 必填)
secrets delete -n refining --kind service --name legacy-mqtt secrets delete -n refining --kind service --name legacy-mqtt
# 删除服务器记录
secrets delete -n ricnsmart --kind server --name i-old-server-id secrets delete -n ricnsmart --kind server --name i-old-server-id
# 预览批量删除(不写入数据库)
secrets delete -n refining --dry-run
secrets delete -n ricnsmart --kind server --dry-run
# 批量删除整个 namespace 的所有记录
secrets delete -n ricnsmart
# 批量删除 namespace 下指定 kind 的所有记录
secrets delete -n ricnsmart --kind server
# JSON 输出
secrets delete -n refining --kind service -o json
``` ```
--- ---
@@ -483,7 +493,9 @@ secrets run -n refining --kind service --name gitea -- printenv
### upgrade — 自动更新 CLI 二进制 ### upgrade — 自动更新 CLI 二进制
Gitea Release 下载最新版本,校验对应 `.sha256` 摘要后替换当前二进制,无需数据库连接或主密钥。 从 Release 服务器下载最新版本,校验对应 `.sha256` 摘要后替换当前二进制,无需数据库连接或主密钥。
**配置方式**`SECRETS_UPGRADE_URL` 必填。优先用**构建时**`SECRETS_UPGRADE_URL=https://... cargo build`CI 已自动注入。或**运行时**:写在 `.env``export` 后执行。
```bash ```bash
# 检查是否有新版本(不下载) # 检查是否有新版本(不下载)
@@ -503,7 +515,7 @@ secrets upgrade
# 参数说明 # 参数说明
# -n / --namespace refining | ricnsmart # -n / --namespace refining | ricnsmart
# --kind server | service # --kind server | service
# --name gitea | i-uf63f2uookgs5uxmrdyc # --name gitea | i-example0abcd1234efgh
# --tag aliyun | production可重复 # --tag aliyun | production可重复
# -q / --query 模糊关键词 # -q / --query 模糊关键词
# --file <path> 输出文件路径,格式由扩展名推断(.json / .toml / .yaml / .yml # --file <path> 输出文件路径,格式由扩展名推断(.json / .toml / .yaml / .yml
@@ -663,5 +675,6 @@ cargo fmt -- --check && cargo clippy -- -D warnings && cargo test
|------|------| |------|------|
| `RUST_LOG` | 日志级别,如 `secrets=debug``secrets=trace`(默认 warn | | `RUST_LOG` | 日志级别,如 `secrets=debug``secrets=trace`(默认 warn |
| `USER` | 审计日志 actor 字段来源Shell 自动设置,通常无需手动配置 | | `USER` | 审计日志 actor 字段来源Shell 自动设置,通常无需手动配置 |
| `SECRETS_UPGRADE_URL` | upgrade 的 Release API 地址。构建时cargo build或运行时.env/export |
数据库连接通过 `secrets config set-db` 持久化到 `~/.config/secrets/config.toml`,不支持环境变量。 数据库连接通过 `secrets config set-db` 持久化到 `~/.config/secrets/config.toml`,不支持环境变量。

4
Cargo.lock generated
View File

@@ -1836,7 +1836,7 @@ checksum = "94143f37725109f92c262ed2cf5e59bce7498c01bcc1502d7b9afe439a4e9f49"
[[package]] [[package]]
name = "secrets" name = "secrets"
version = "0.9.0" version = "0.9.4"
dependencies = [ dependencies = [
"aes-gcm", "aes-gcm",
"anyhow", "anyhow",
@@ -1844,6 +1844,7 @@ dependencies = [
"chrono", "chrono",
"clap", "clap",
"dirs", "dirs",
"dotenvy",
"flate2", "flate2",
"keyring", "keyring",
"rand 0.10.0", "rand 0.10.0",
@@ -2448,7 +2449,6 @@ dependencies = [
"bytes", "bytes",
"libc", "libc",
"mio", "mio",
"parking_lot",
"pin-project-lite", "pin-project-lite",
"signal-hook-registry", "signal-hook-registry",
"socket2", "socket2",

View File

@@ -1,32 +1,33 @@
[package] [package]
name = "secrets" name = "secrets"
version = "0.9.0" version = "0.9.4"
edition = "2024" edition = "2024"
[dependencies] [dependencies]
aes-gcm = "0.10.3" aes-gcm = "^0.10.3"
anyhow = "1.0.102" anyhow = "^1.0.102"
argon2 = { version = "0.5.3", features = ["std"] } argon2 = { version = "^0.5.3", features = ["std"] }
chrono = { version = "0.4.44", features = ["serde"] } chrono = { version = "^0.4.44", features = ["serde"] }
clap = { version = "4.6.0", features = ["derive"] } clap = { version = "^4.6.0", features = ["derive"] }
dirs = "6.0.0" dirs = "^6.0.0"
flate2 = "1.1.9" dotenvy = "^0.15"
keyring = { version = "3.6.3", features = ["apple-native", "windows-native", "linux-native"] } flate2 = "^1.1.9"
rand = "0.10.0" keyring = { version = "^3.6.3", features = ["apple-native", "windows-native", "linux-native"] }
reqwest = { version = "0.12", default-features = false, features = ["rustls-tls", "json"] } rand = "^0.10.0"
rpassword = "7.4.0" reqwest = { version = "^0.12", default-features = false, features = ["rustls-tls", "json"] }
self-replace = "1.5.0" rpassword = "^7.4.0"
semver = "1.0.27" self-replace = "^1.5.0"
serde = { version = "1.0.228", features = ["derive"] } semver = "^1.0.27"
serde_json = "1.0.149" serde = { version = "^1.0.228", features = ["derive"] }
serde_yaml = "0.9" serde_json = "^1.0.149"
sha2 = "0.10.9" serde_yaml = "^0.9"
sqlx = { version = "0.8.6", features = ["runtime-tokio", "tls-rustls", "postgres", "uuid", "json", "chrono"] } sha2 = "^0.10.9"
tar = "0.4.44" sqlx = { version = "^0.8.6", features = ["runtime-tokio", "tls-rustls", "postgres", "uuid", "json", "chrono"] }
tempfile = "3.19" tar = "^0.4.44"
tokio = { version = "1.50.0", features = ["full"] } tempfile = "^3.19"
toml = "1.0.7" tokio = { version = "^1.50.0", features = ["rt-multi-thread", "macros", "fs", "io-util", "process", "signal"] }
tracing = "0.1" toml = "^1.0.7"
tracing-subscriber = { version = "0.3", features = ["env-filter"] } tracing = "^0.1"
uuid = { version = "1.22.0", features = ["serde"] } tracing-subscriber = { version = "^0.3", features = ["env-filter"] }
zip = { version = "8.2.0", default-features = false, features = ["deflate"] } uuid = { version = "^1.22.0", features = ["serde"] }
zip = { version = "^8.2.0", default-features = false, features = ["deflate"] }

View File

@@ -54,7 +54,7 @@ secrets search --sort updated --limit 10 --summary
# 精确定位namespace + kind + name 三元组) # 精确定位namespace + kind + name 三元组)
secrets search -n refining --kind service --name gitea secrets search -n refining --kind service --name gitea
# 获取完整记录(含 secrets 字段 schemafield_name、field_type、value_len,无需 master_key # 获取完整记录(含 secrets 字段,无需 master_key
secrets search -n refining --kind service --name gitea -o json secrets search -n refining --kind service --name gitea -o json
# 直接提取单个 metadata 字段值(最短路径) # 直接提取单个 metadata 字段值(最短路径)
@@ -69,7 +69,7 @@ secrets inject -n refining --kind service --name gitea
secrets run -n refining --kind service --name gitea -- printenv secrets run -n refining --kind service --name gitea -- printenv
``` ```
`search` 展示 metadata 与 secrets 的字段 schema字段名、类型、长度,不展示 secret 值本身;需要值时用 `inject` / `run` `search` 展示 metadata 与 secrets 的字段,不展示 secret 值本身;需要值时用 `inject` / `run`
### 输出格式 ### 输出格式
@@ -120,7 +120,7 @@ secrets search -n refining --summary --limit 10 --offset 10 # 翻页
# ── add ────────────────────────────────────────────────────────────────────── # ── add ──────────────────────────────────────────────────────────────────────
secrets add -n refining --kind server --name my-server \ secrets add -n refining --kind server --name my-server \
--tag aliyun --tag shanghai \ --tag aliyun --tag shanghai \
-m ip=47.117.131.22 -m desc="Aliyun Shanghai ECS" \ -m ip=10.0.0.1 -m desc="Example ECS" \
-s username=root -s ssh_key=@./keys/server.pem -s username=root -s ssh_key=@./keys/server.pem
# 多行文件直接写入嵌套 secret 字段 # 多行文件直接写入嵌套 secret 字段
@@ -136,7 +136,7 @@ secrets add -n refining --kind service --name deploy-bot \
secrets add -n refining --kind service --name gitea \ secrets add -n refining --kind service --name gitea \
--tag gitea \ --tag gitea \
-m url=https://gitea.refining.dev -m default_org=refining \ -m url=https://code.example.com -m default_org=myorg \
-s token=<token> -s token=<token>
# ── update ─────────────────────────────────────────────────────────────────── # ── update ───────────────────────────────────────────────────────────────────
@@ -146,7 +146,10 @@ secrets update -n refining --kind service --name mqtt --remove-meta old_port --r
secrets update -n refining --kind server --name my-server --remove-secret credentials:content secrets update -n refining --kind server --name my-server --remove-secret credentials:content
# ── delete ─────────────────────────────────────────────────────────────────── # ── delete ───────────────────────────────────────────────────────────────────
secrets delete -n refining --kind service --name legacy-mqtt secrets delete -n refining --kind service --name legacy-mqtt # 精确删除单条(--kind 必填)
secrets delete -n refining --dry-run # 预览批量删除(不写入)
secrets delete -n ricnsmart # 批量删除整个 namespace
secrets delete -n ricnsmart --kind server # 批量删除指定 kind
# ── init ───────────────────────────────────────────────────────────────────── # ── init ─────────────────────────────────────────────────────────────────────
secrets init # 主密钥初始化(每台设备一次,主密码至少 8 位,派生后存钥匙串) secrets init # 主密钥初始化(每台设备一次,主密码至少 8 位,派生后存钥匙串)
@@ -158,7 +161,7 @@ secrets config path # 打印配置文件路径
# ── upgrade ────────────────────────────────────────────────────────────────── # ── upgrade ──────────────────────────────────────────────────────────────────
secrets upgrade --check # 仅检查是否有新版本 secrets upgrade --check # 仅检查是否有新版本
secrets upgrade # 下载、校验 SHA-256 并安装最新版(从 Gitea Release secrets upgrade # 下载、校验 SHA-256 并安装最新版(可通过 SECRETS_UPGRADE_URL 自托管
# ── export ──────────────────────────────────────────────────────────────────── # ── export ────────────────────────────────────────────────────────────────────
secrets export --file backup.json # 全量导出到 JSON secrets export --file backup.json # 全量导出到 JSON
@@ -181,7 +184,7 @@ RUST_LOG=secrets=trace secrets search
## 数据模型 ## 数据模型
主表 `entries`namespace、kind、name、tags、metadata+ 子表 `secrets`(每个加密字段一行,含 field_name、field_type、value_len、encrypted。首次连接自动建表同时创建 `audit_log``entries_history``secrets_history` 等表。 主表 `entries`namespace、kind、name、tags、metadata+ 子表 `secrets`(每个加密字段一行,含 field_name、encrypted。首次连接自动建表同时创建 `audit_log``entries_history``secrets_history` 等表。
| 位置 | 字段 | 说明 | | 位置 | 字段 | 说明 |
|------|------|------| |------|------|------|
@@ -190,7 +193,7 @@ RUST_LOG=secrets=trace secrets search
| entries | name | 人类可读唯一标识 | | entries | name | 人类可读唯一标识 |
| entries | tags | 多维标签,如 `["aliyun","hongkong"]` | | entries | tags | 多维标签,如 `["aliyun","hongkong"]` |
| entries | metadata | 明文描述ip、desc、domains、key_ref 等) | | entries | metadata | 明文描述ip、desc、domains、key_ref 等) |
| secrets | field_name / field_type / value_len | 明文search 可见AI 可推断 inject 会生成什么变量 | | secrets | field_name | 明文search 可见AI 可推断 inject 会生成什么变量 |
| secrets | encrypted | 仅加密值本身AES-256-GCM | | secrets | encrypted | 仅加密值本身AES-256-GCM |
`-m` / `--meta` 写入 `metadata``-s` / `--secret` 写入 `secrets` 表的独立行。支持 `key=value``key=@file``key:=<json>`,也支持 `credentials:content@./key.pem` 这类嵌套字段文件写入;删除时支持 `--remove-secret credentials:content`。加解密使用主密钥(由 `secrets init` 设置)。 `-m` / `--meta` 写入 `metadata``-s` / `--secret` 写入 `secrets` 表的独立行。支持 `key=value``key=@file``key:=<json>`,也支持 `credentials:content@./key.pem` 这类嵌套字段文件写入;删除时支持 `--remove-secret credentials:content`。加解密使用主密钥(由 `secrets init` 设置)。
@@ -203,12 +206,12 @@ RUST_LOG=secrets=trace secrets search
| 目标值 | 写法示例 | 实际存入 | | 目标值 | 写法示例 | 实际存入 |
|------|------|------| |------|------|------|
| 普通字符串 | `-m url=https://gitea.refining.dev` | `"https://gitea.refining.dev"` | | 普通字符串 | `-m url=https://code.example.com` | `"https://code.example.com"` |
| 文件内容字符串 | `-m notes=@./service-notes.txt` | `"..."` | | 文件内容字符串 | `-m notes=@./service-notes.txt` | `"..."` |
| 布尔值 | `-m enabled:=true` | `true` | | 布尔值 | `-m enabled:=true` | `true` |
| 数字 | `-m port:=3000` | `3000` | | 数字 | `-m port:=3000` | `3000` |
| `null` | `-m deprecated_at:=null` | `null` | | `null` | `-m deprecated_at:=null` | `null` |
| 数组 | `-m domains:='["gitea.refining.dev","git.refining.dev"]'` | `["gitea.refining.dev","git.refining.dev"]` | | 数组 | `-m domains:='["code.example.com","git.example.com"]'` | `["code.example.com","git.example.com"]` |
| 对象 | `-m tls:='{"enabled":true,"redirect_http":true}'` | `{"enabled":true,"redirect_http":true}` | | 对象 | `-m tls:='{"enabled":true,"redirect_http":true}'` | `{"enabled":true,"redirect_http":true}` |
| 嵌套路径 + JSON | `-m deploy:strategy:='{"type":"rolling","batch":2}'` | `{"deploy":{"strategy":{"type":"rolling","batch":2}}}` | | 嵌套路径 + JSON | `-m deploy:strategy:='{"type":"rolling","batch":2}'` | `{"deploy":{"strategy":{"type":"rolling","batch":2}}}` |
@@ -223,10 +226,10 @@ RUST_LOG=secrets=trace secrets search
```bash ```bash
secrets add -n refining --kind service --name gitea \ secrets add -n refining --kind service --name gitea \
-m url=https://gitea.refining.dev \ -m url=https://code.example.com \
-m port:=3000 \ -m port:=3000 \
-m enabled:=true \ -m enabled:=true \
-m domains:='["gitea.refining.dev","git.refining.dev"]' \ -m domains:='["code.example.com","git.example.com"]' \
-m tls:='{"enabled":true,"redirect_http":true}' -m tls:='{"enabled":true,"redirect_http":true}'
``` ```

View File

@@ -1,6 +1,11 @@
use serde_json::Value; use serde_json::Value;
use sqlx::{Postgres, Transaction}; use sqlx::{Postgres, Transaction};
/// Return the current OS user as the audit actor (falls back to empty string).
pub fn current_actor() -> String {
std::env::var("USER").unwrap_or_default()
}
/// Write an audit entry within an existing transaction. /// Write an audit entry within an existing transaction.
pub async fn log_tx( pub async fn log_tx(
tx: &mut Transaction<'_, Postgres>, tx: &mut Transaction<'_, Postgres>,
@@ -10,7 +15,7 @@ pub async fn log_tx(
name: &str, name: &str,
detail: Value, detail: Value,
) { ) {
let actor = std::env::var("USER").unwrap_or_default(); let actor = current_actor();
let result: Result<_, sqlx::Error> = sqlx::query( let result: Result<_, sqlx::Error> = sqlx::query(
"INSERT INTO audit_log (action, namespace, kind, name, detail, actor) \ "INSERT INTO audit_log (action, namespace, kind, name, detail, actor) \
VALUES ($1, $2, $3, $4, $5, $6)", VALUES ($1, $2, $3, $4, $5, $6)",

View File

@@ -5,7 +5,8 @@ use std::fs;
use crate::crypto; use crate::crypto;
use crate::db; use crate::db;
use crate::output::OutputMode; use crate::models::EntryRow;
use crate::output::{OutputMode, print_json};
// ── Key/value parsing helpers (shared with update.rs) ─────────────────────── // ── Key/value parsing helpers (shared with update.rs) ───────────────────────
@@ -160,28 +161,6 @@ pub(crate) fn remove_path(map: &mut Map<String, Value>, path: &[String]) -> Resu
Ok(removed) Ok(removed)
} }
// ── field_type inference and value_len ──────────────────────────────────────
/// Infer the field type string from a JSON value.
pub(crate) fn infer_field_type(v: &Value) -> &'static str {
match v {
Value::String(_) => "string",
Value::Number(_) => "number",
Value::Bool(_) => "boolean",
Value::Null => "string",
Value::Array(_) | Value::Object(_) => "json",
}
}
/// Compute the plaintext length of a JSON value (chars for string, serialized length otherwise).
pub(crate) fn compute_value_len(v: &Value) -> i32 {
match v {
Value::String(s) => s.chars().count() as i32,
Value::Null => 0,
other => other.to_string().chars().count() as i32,
}
}
/// Flatten a (potentially nested) JSON object into dot-separated field entries. /// Flatten a (potentially nested) JSON object into dot-separated field entries.
/// e.g. `{"credentials": {"type": "ssh", "content": "..."}}` → /// e.g. `{"credentials": {"type": "ssh", "content": "..."}}` →
/// `[("credentials.type", "ssh"), ("credentials.content", "...")]` /// `[("credentials.type", "ssh"), ("credentials.content", "...")]`
@@ -228,13 +207,6 @@ pub async fn run(pool: &PgPool, args: AddArgs<'_>, master_key: &[u8; 32]) -> Res
let mut tx = pool.begin().await?; let mut tx = pool.begin().await?;
// Upsert the entry row (tags + metadata). // Upsert the entry row (tags + metadata).
#[derive(sqlx::FromRow)]
struct EntryRow {
id: uuid::Uuid,
version: i64,
tags: Vec<String>,
metadata: Value,
}
let existing: Option<EntryRow> = sqlx::query_as( let existing: Option<EntryRow> = sqlx::query_as(
"SELECT id, version, tags, metadata FROM entries \ "SELECT id, version, tags, metadata FROM entries \
WHERE namespace = $1 AND kind = $2 AND name = $3", WHERE namespace = $1 AND kind = $2 AND name = $3",
@@ -297,12 +269,10 @@ pub async fn run(pool: &PgPool, args: AddArgs<'_>, master_key: &[u8; 32]) -> Res
struct ExistingField { struct ExistingField {
id: uuid::Uuid, id: uuid::Uuid,
field_name: String, field_name: String,
field_type: String,
value_len: i32,
encrypted: Vec<u8>, encrypted: Vec<u8>,
} }
let existing_fields: Vec<ExistingField> = sqlx::query_as( let existing_fields: Vec<ExistingField> = sqlx::query_as(
"SELECT id, field_name, field_type, value_len, encrypted \ "SELECT id, field_name, encrypted \
FROM secrets WHERE entry_id = $1", FROM secrets WHERE entry_id = $1",
) )
.bind(entry_id) .bind(entry_id)
@@ -317,8 +287,6 @@ pub async fn run(pool: &PgPool, args: AddArgs<'_>, master_key: &[u8; 32]) -> Res
secret_id: f.id, secret_id: f.id,
entry_version: new_entry_version - 1, entry_version: new_entry_version - 1,
field_name: &f.field_name, field_name: &f.field_name,
field_type: &f.field_type,
value_len: f.value_len,
encrypted: &f.encrypted, encrypted: &f.encrypted,
action: "add", action: "add",
}, },
@@ -339,18 +307,14 @@ pub async fn run(pool: &PgPool, args: AddArgs<'_>, master_key: &[u8; 32]) -> Res
// Insert new secret fields. // Insert new secret fields.
let flat_fields = flatten_json_fields("", &secret_json); let flat_fields = flatten_json_fields("", &secret_json);
for (field_name, field_value) in &flat_fields { for (field_name, field_value) in &flat_fields {
let field_type = infer_field_type(field_value);
let value_len = compute_value_len(field_value);
let encrypted = crypto::encrypt_json(master_key, field_value)?; let encrypted = crypto::encrypt_json(master_key, field_value)?;
sqlx::query( sqlx::query(
"INSERT INTO secrets (entry_id, field_name, field_type, value_len, encrypted) \ "INSERT INTO secrets (entry_id, field_name, encrypted) \
VALUES ($1, $2, $3, $4, $5)", VALUES ($1, $2, $3)",
) )
.bind(entry_id) .bind(entry_id)
.bind(field_name) .bind(field_name)
.bind(field_type)
.bind(value_len)
.bind(&encrypted) .bind(&encrypted)
.execute(&mut *tx) .execute(&mut *tx)
.await?; .await?;
@@ -383,11 +347,8 @@ pub async fn run(pool: &PgPool, args: AddArgs<'_>, master_key: &[u8; 32]) -> Res
}); });
match args.output { match args.output {
OutputMode::Json => { OutputMode::Json | OutputMode::JsonCompact => {
println!("{}", serde_json::to_string_pretty(&result_json)?); print_json(&result_json, &args.output)?;
}
OutputMode::JsonCompact => {
println!("{}", serde_json::to_string(&result_json)?);
} }
_ => { _ => {
println!("Added: [{}/{}] {}", args.namespace, args.kind, args.name); println!("Added: [{}/{}] {}", args.namespace, args.kind, args.name);
@@ -408,10 +369,7 @@ pub async fn run(pool: &PgPool, args: AddArgs<'_>, master_key: &[u8; 32]) -> Res
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use super::{ use super::{build_json, flatten_json_fields, key_path_to_string, parse_kv, remove_path};
build_json, compute_value_len, flatten_json_fields, infer_field_type, key_path_to_string,
parse_kv, remove_path,
};
use serde_json::Value; use serde_json::Value;
use std::fs; use std::fs;
use std::path::PathBuf; use std::path::PathBuf;
@@ -498,19 +456,4 @@ mod tests {
assert_eq!(fields[1].0, "credentials.type"); assert_eq!(fields[1].0, "credentials.type");
assert_eq!(fields[2].0, "username"); assert_eq!(fields[2].0, "username");
} }
#[test]
fn infer_field_types() {
assert_eq!(infer_field_type(&Value::String("x".into())), "string");
assert_eq!(infer_field_type(&serde_json::json!(42)), "number");
assert_eq!(infer_field_type(&Value::Bool(true)), "boolean");
assert_eq!(infer_field_type(&serde_json::json!(["a"])), "json");
}
#[test]
fn compute_value_len_string() {
assert_eq!(compute_value_len(&Value::String("root".into())), 4);
assert_eq!(compute_value_len(&Value::Null), 0);
assert_eq!(compute_value_len(&serde_json::json!(1234)), 4);
}
} }

View File

@@ -15,7 +15,7 @@ pub async fn run(action: crate::ConfigAction) -> Result<()> {
database_url: Some(url.clone()), database_url: Some(url.clone()),
}; };
config::save_config(&cfg)?; config::save_config(&cfg)?;
println!("Database URL saved to: {}", config_path().display()); println!("Database URL saved to: {}", config_path()?.display());
println!(" {}", mask_password(&url)); println!(" {}", mask_password(&url));
} }
crate::ConfigAction::Show => { crate::ConfigAction::Show => {
@@ -23,7 +23,7 @@ pub async fn run(action: crate::ConfigAction) -> Result<()> {
match cfg.database_url { match cfg.database_url {
Some(url) => { Some(url) => {
println!("database_url = {}", mask_password(&url)); println!("database_url = {}", mask_password(&url));
println!("config file: {}", config_path().display()); println!("config file: {}", config_path()?.display());
} }
None => { None => {
println!("Database URL not configured."); println!("Database URL not configured.");
@@ -32,7 +32,7 @@ pub async fn run(action: crate::ConfigAction) -> Result<()> {
} }
} }
crate::ConfigAction::Path => { crate::ConfigAction::Path => {
println!("{}", config_path().display()); println!("{}", config_path()?.display());
} }
} }
Ok(()) Ok(())

View File

@@ -1,29 +1,52 @@
use anyhow::Result; use anyhow::Result;
use serde_json::{Value, json}; use serde_json::json;
use sqlx::{FromRow, PgPool}; use sqlx::PgPool;
use uuid::Uuid; use uuid::Uuid;
use crate::db; use crate::db;
use crate::output::OutputMode; use crate::models::{EntryRow, SecretFieldRow};
use crate::output::{OutputMode, print_json};
#[derive(FromRow)] pub struct DeleteArgs<'a> {
struct EntryRow { pub namespace: &'a str,
id: Uuid, /// Kind filter. Required when --name is given; optional for bulk deletes.
version: i64, pub kind: Option<&'a str>,
tags: Vec<String>, /// Exact record name. When None, bulk-delete all matching records.
metadata: Value, pub name: Option<&'a str>,
/// Preview without writing to the database (bulk mode only).
pub dry_run: bool,
pub output: OutputMode,
} }
#[derive(FromRow)] // ── Internal row type used for bulk queries ────────────────────────────────
struct SecretFieldRow {
id: Uuid, #[derive(Debug, sqlx::FromRow)]
field_name: String, struct FullEntryRow {
field_type: String, pub id: Uuid,
value_len: i32, pub version: i64,
encrypted: Vec<u8>, pub kind: String,
pub name: String,
pub metadata: serde_json::Value,
pub tags: Vec<String>,
} }
pub async fn run( // ── Entry point ────────────────────────────────────────────────────────────
pub async fn run(pool: &PgPool, args: DeleteArgs<'_>) -> Result<()> {
match args.name {
Some(name) => {
let kind = args
.kind
.ok_or_else(|| anyhow::anyhow!("--kind is required when --name is specified"))?;
delete_one(pool, args.namespace, kind, name, args.output).await
}
None => delete_bulk(pool, args.namespace, args.kind, args.dry_run, args.output).await,
}
}
// ── Single-record delete (original behaviour) ─────────────────────────────
async fn delete_one(
pool: &PgPool, pool: &PgPool,
namespace: &str, namespace: &str,
kind: &str, kind: &str,
@@ -48,27 +71,175 @@ pub async fn run(
let Some(row) = row else { let Some(row) = row else {
tx.rollback().await?; tx.rollback().await?;
tracing::warn!(namespace, kind, name, "entry not found for deletion"); tracing::warn!(namespace, kind, name, "entry not found for deletion");
let v = json!({"action":"not_found","namespace":namespace,"kind":kind,"name":name});
match output { match output {
OutputMode::Json => println!( OutputMode::Text => println!("Not found: [{}/{}] {}", namespace, kind, name),
"{}", ref mode => print_json(&v, mode)?,
serde_json::to_string_pretty(
&json!({"action":"not_found","namespace":namespace,"kind":kind,"name":name})
)?
),
OutputMode::JsonCompact => println!(
"{}",
serde_json::to_string(
&json!({"action":"not_found","namespace":namespace,"kind":kind,"name":name})
)?
),
_ => println!("Not found: [{}/{}] {}", namespace, kind, name),
} }
return Ok(()); return Ok(());
}; };
// Snapshot entry history before deleting. snapshot_and_delete(&mut tx, namespace, kind, name, &row).await?;
if let Err(e) = db::snapshot_entry_history(
crate::audit::log_tx(&mut tx, "delete", namespace, kind, name, json!({})).await;
tx.commit().await?;
let v = json!({"action":"deleted","namespace":namespace,"kind":kind,"name":name});
match output {
OutputMode::Text => println!("Deleted: [{}/{}] {}", namespace, kind, name),
ref mode => print_json(&v, mode)?,
}
Ok(())
}
// ── Bulk delete by namespace (+ optional kind filter) ─────────────────────
async fn delete_bulk(
pool: &PgPool,
namespace: &str,
kind: Option<&str>,
dry_run: bool,
output: OutputMode,
) -> Result<()> {
tracing::debug!(namespace, ?kind, dry_run, "bulk-deleting entries");
let rows: Vec<FullEntryRow> = if let Some(k) = kind {
sqlx::query_as(
"SELECT id, version, kind, name, metadata, tags FROM entries \
WHERE namespace = $1 AND kind = $2 \
ORDER BY name",
)
.bind(namespace)
.bind(k)
.fetch_all(pool)
.await?
} else {
sqlx::query_as(
"SELECT id, version, kind, name, metadata, tags FROM entries \
WHERE namespace = $1 \
ORDER BY kind, name",
)
.bind(namespace)
.fetch_all(pool)
.await?
};
if rows.is_empty() {
let v = json!({
"action": "noop",
"namespace": namespace,
"kind": kind,
"deleted": 0,
"dry_run": dry_run
});
match output {
OutputMode::Text => println!(
"No records found in namespace \"{}\"{}.",
namespace,
kind.map(|k| format!(" with kind \"{}\"", k))
.unwrap_or_default()
),
ref mode => print_json(&v, mode)?,
}
return Ok(());
}
if dry_run {
let count = rows.len();
match output {
OutputMode::Text => {
println!(
"dry-run: would delete {} record(s) in namespace \"{}\":",
count, namespace
);
for r in &rows {
println!(" [{}/{}] {}", namespace, r.kind, r.name);
}
}
ref mode => {
let items: Vec<_> = rows
.iter()
.map(|r| json!({"namespace": namespace, "kind": r.kind, "name": r.name}))
.collect();
print_json(
&json!({
"action": "dry_run",
"namespace": namespace,
"kind": kind,
"would_delete": count,
"entries": items
}),
mode,
)?;
}
}
return Ok(());
}
let mut deleted = Vec::with_capacity(rows.len());
for row in &rows {
let entry_row = EntryRow {
id: row.id,
version: row.version,
tags: row.tags.clone(),
metadata: row.metadata.clone(),
};
let mut tx = pool.begin().await?;
snapshot_and_delete(&mut tx, namespace, &row.kind, &row.name, &entry_row).await?;
crate::audit::log_tx(
&mut tx, &mut tx,
"delete",
namespace,
&row.kind,
&row.name,
json!({"bulk": true}),
)
.await;
tx.commit().await?;
deleted.push(json!({"namespace": namespace, "kind": row.kind, "name": row.name}));
tracing::info!(namespace, kind = %row.kind, name = %row.name, "bulk deleted");
}
let count = deleted.len();
match output {
OutputMode::Text => {
for item in &deleted {
println!(
"Deleted: [{}/{}] {}",
item["namespace"].as_str().unwrap_or(""),
item["kind"].as_str().unwrap_or(""),
item["name"].as_str().unwrap_or("")
);
}
println!("Total: {} record(s) deleted.", count);
}
ref mode => print_json(
&json!({
"action": "deleted",
"namespace": namespace,
"kind": kind,
"deleted": count,
"entries": deleted
}),
mode,
)?,
}
Ok(())
}
// ── Shared helper: snapshot history then DELETE ────────────────────────────
async fn snapshot_and_delete(
tx: &mut sqlx::Transaction<'_, sqlx::Postgres>,
namespace: &str,
kind: &str,
name: &str,
row: &EntryRow,
) -> Result<()> {
if let Err(e) = db::snapshot_entry_history(
tx,
db::EntrySnapshotParams { db::EntrySnapshotParams {
entry_id: row.id, entry_id: row.id,
namespace, namespace,
@@ -85,60 +256,36 @@ pub async fn run(
tracing::warn!(error = %e, "failed to snapshot entry history before delete"); tracing::warn!(error = %e, "failed to snapshot entry history before delete");
} }
// Snapshot all secret fields before cascade delete.
let fields: Vec<SecretFieldRow> = sqlx::query_as( let fields: Vec<SecretFieldRow> = sqlx::query_as(
"SELECT id, field_name, field_type, value_len, encrypted \ "SELECT id, field_name, encrypted \
FROM secrets WHERE entry_id = $1", FROM secrets WHERE entry_id = $1",
) )
.bind(row.id) .bind(row.id)
.fetch_all(&mut *tx) .fetch_all(&mut **tx)
.await?; .await?;
for f in &fields { for f in &fields {
if let Err(e) = db::snapshot_secret_history( if let Err(e) = db::snapshot_secret_history(
&mut tx, tx,
db::SecretSnapshotParams { db::SecretSnapshotParams {
entry_id: row.id, entry_id: row.id,
secret_id: f.id, secret_id: f.id,
entry_version: row.version, entry_version: row.version,
field_name: &f.field_name, field_name: &f.field_name,
field_type: &f.field_type,
value_len: f.value_len,
encrypted: &f.encrypted, encrypted: &f.encrypted,
action: "delete", action: "delete",
}, },
) )
.await .await
{ {
tracing::warn!(error = %e, "failed to snapshot secret field history before delete"); tracing::warn!(error = %e, "failed to snapshot secret history before delete");
} }
} }
// Delete the entry — secrets rows are removed via ON DELETE CASCADE.
sqlx::query("DELETE FROM entries WHERE id = $1") sqlx::query("DELETE FROM entries WHERE id = $1")
.bind(row.id) .bind(row.id)
.execute(&mut *tx) .execute(&mut **tx)
.await?; .await?;
crate::audit::log_tx(&mut tx, "delete", namespace, kind, name, json!({})).await;
tx.commit().await?;
match output {
OutputMode::Json => println!(
"{}",
serde_json::to_string_pretty(
&json!({"action":"deleted","namespace":namespace,"kind":kind,"name":name})
)?
),
OutputMode::JsonCompact => println!(
"{}",
serde_json::to_string(
&json!({"action":"deleted","namespace":namespace,"kind":kind,"name":name})
)?
),
_ => println!("Deleted: [{}/{}] {}", namespace, kind, name),
}
Ok(()) Ok(())
} }

78
src/commands/history.rs Normal file
View File

@@ -0,0 +1,78 @@
use anyhow::Result;
use serde_json::{Value, json};
use sqlx::{FromRow, PgPool};
use crate::output::{OutputMode, format_local_time, print_json};
pub struct HistoryArgs<'a> {
pub namespace: &'a str,
pub kind: &'a str,
pub name: &'a str,
pub limit: u32,
pub output: OutputMode,
}
/// List history entries for an entry.
pub async fn run(pool: &PgPool, args: HistoryArgs<'_>) -> Result<()> {
#[derive(FromRow)]
struct HistorySummary {
version: i64,
action: String,
actor: String,
created_at: chrono::DateTime<chrono::Utc>,
}
let rows: Vec<HistorySummary> = sqlx::query_as(
"SELECT version, action, actor, created_at FROM entries_history \
WHERE namespace = $1 AND kind = $2 AND name = $3 \
ORDER BY id DESC LIMIT $4",
)
.bind(args.namespace)
.bind(args.kind)
.bind(args.name)
.bind(args.limit as i64)
.fetch_all(pool)
.await?;
match args.output {
OutputMode::Json | OutputMode::JsonCompact => {
let arr: Vec<Value> = rows
.iter()
.map(|r| {
json!({
"version": r.version,
"action": r.action,
"actor": r.actor,
"created_at": r.created_at.format("%Y-%m-%dT%H:%M:%SZ").to_string(),
})
})
.collect();
print_json(&Value::Array(arr), &args.output)?;
}
_ => {
if rows.is_empty() {
println!(
"No history found for [{}/{}] {}.",
args.namespace, args.kind, args.name
);
return Ok(());
}
println!(
"History for [{}/{}] {}:",
args.namespace, args.kind, args.name
);
for r in &rows {
println!(
" v{:<4} {:8} {} {}",
r.version,
r.action,
r.actor,
format_local_time(r.created_at)
);
}
println!(" (use `secrets rollback --to-version <N>` to restore)");
}
}
Ok(())
}

View File

@@ -5,11 +5,13 @@ use std::collections::BTreeMap;
use crate::commands::add::{self, AddArgs}; use crate::commands::add::{self, AddArgs};
use crate::models::ExportFormat; use crate::models::ExportFormat;
use crate::output::OutputMode; use crate::output::{OutputMode, print_json};
pub struct ImportArgs<'a> { pub struct ImportArgs<'a> {
pub file: &'a str, pub file: &'a str,
/// Overwrite existing records when there is a conflict (upsert). /// Overwrite existing records when there is a conflict (upsert).
/// Without this flag, the import aborts on the first conflict.
/// A future `--skip` flag could allow silently skipping conflicts and continuing.
pub force: bool, pub force: bool,
/// Check and preview operations without writing to the database. /// Check and preview operations without writing to the database.
pub dry_run: bool, pub dry_run: bool,
@@ -48,26 +50,29 @@ pub async fn run(pool: &PgPool, args: ImportArgs<'_>, master_key: &[u8; 32]) ->
.unwrap_or(false); .unwrap_or(false);
if exists && !args.force { if exists && !args.force {
let msg = format!(
"[{}/{}/{}] conflict — record already exists (use --force to overwrite)",
entry.namespace, entry.kind, entry.name
);
match args.output {
OutputMode::Json | OutputMode::JsonCompact => {
let v = serde_json::json!({ let v = serde_json::json!({
"action": "conflict", "action": "conflict",
"namespace": entry.namespace, "namespace": entry.namespace,
"kind": entry.kind, "kind": entry.kind,
"name": entry.name, "name": entry.name,
}); });
let s = if args.output == OutputMode::Json { match args.output {
OutputMode::Text => eprintln!(
"[{}/{}/{}] conflict — record already exists (use --force to overwrite)",
entry.namespace, entry.kind, entry.name
),
ref mode => {
// Write conflict notice to stderr so it does not mix with summary JSON.
eprint!(
"{}",
if *mode == OutputMode::Json {
serde_json::to_string_pretty(&v)? serde_json::to_string_pretty(&v)?
} else { } else {
serde_json::to_string(&v)? serde_json::to_string(&v)?
};
eprintln!("{}", s);
} }
_ => eprintln!("{}", msg), );
eprintln!();
}
} }
return Err(anyhow::anyhow!( return Err(anyhow::anyhow!(
"Import aborted: conflict on [{}/{}/{}]", "Import aborted: conflict on [{}/{}/{}]",
@@ -80,8 +85,6 @@ pub async fn run(pool: &PgPool, args: ImportArgs<'_>, master_key: &[u8; 32]) ->
let action = if exists { "upsert" } else { "insert" }; let action = if exists { "upsert" } else { "insert" };
if args.dry_run { if args.dry_run {
match args.output {
OutputMode::Json | OutputMode::JsonCompact => {
let v = serde_json::json!({ let v = serde_json::json!({
"action": action, "action": action,
"namespace": entry.namespace, "namespace": entry.namespace,
@@ -89,17 +92,12 @@ pub async fn run(pool: &PgPool, args: ImportArgs<'_>, master_key: &[u8; 32]) ->
"name": entry.name, "name": entry.name,
"dry_run": true, "dry_run": true,
}); });
let s = if args.output == OutputMode::Json { match args.output {
serde_json::to_string_pretty(&v)? OutputMode::Text => println!(
} else {
serde_json::to_string(&v)?
};
println!("{}", s);
}
_ => println!(
"[dry-run] {} [{}/{}/{}]", "[dry-run] {} [{}/{}/{}]",
action, entry.namespace, entry.kind, entry.name action, entry.namespace, entry.kind, entry.name
), ),
ref mode => print_json(&v, mode)?,
} }
if exists { if exists {
skipped += 1; skipped += 1;
@@ -131,25 +129,18 @@ pub async fn run(pool: &PgPool, args: ImportArgs<'_>, master_key: &[u8; 32]) ->
.await .await
{ {
Ok(()) => { Ok(()) => {
match args.output {
OutputMode::Json | OutputMode::JsonCompact => {
let v = serde_json::json!({ let v = serde_json::json!({
"action": action, "action": action,
"namespace": entry.namespace, "namespace": entry.namespace,
"kind": entry.kind, "kind": entry.kind,
"name": entry.name, "name": entry.name,
}); });
let s = if args.output == OutputMode::Json { match args.output {
serde_json::to_string_pretty(&v)? OutputMode::Text => println!(
} else {
serde_json::to_string(&v)?
};
println!("{}", s);
}
_ => println!(
"Imported [{}/{}/{}]", "Imported [{}/{}/{}]",
entry.namespace, entry.kind, entry.name entry.namespace, entry.kind, entry.name
), ),
ref mode => print_json(&v, mode)?,
} }
inserted += 1; inserted += 1;
} }
@@ -163,23 +154,15 @@ pub async fn run(pool: &PgPool, args: ImportArgs<'_>, master_key: &[u8; 32]) ->
} }
} }
match args.output { let summary = serde_json::json!({
OutputMode::Json | OutputMode::JsonCompact => {
let v = serde_json::json!({
"total": total, "total": total,
"inserted": inserted, "inserted": inserted,
"skipped": skipped, "skipped": skipped,
"failed": failed, "failed": failed,
"dry_run": args.dry_run, "dry_run": args.dry_run,
}); });
let s = if args.output == OutputMode::Json { match args.output {
serde_json::to_string_pretty(&v)? OutputMode::Text => {
} else {
serde_json::to_string(&v)?
};
println!("{}", s);
}
_ => {
if args.dry_run { if args.dry_run {
println!( println!(
"\n[dry-run] {} total: {} would insert, {} would skip, {} would fail", "\n[dry-run] {} total: {} would insert, {} would skip, {} would fail",
@@ -192,6 +175,7 @@ pub async fn run(pool: &PgPool, args: ImportArgs<'_>, master_key: &[u8; 32]) ->
); );
} }
} }
ref mode => print_json(&summary, mode)?,
} }
if failed > 0 { if failed > 0 {

View File

@@ -2,6 +2,7 @@ pub mod add;
pub mod config; pub mod config;
pub mod delete; pub mod delete;
pub mod export_cmd; pub mod export_cmd;
pub mod history;
pub mod import_cmd; pub mod import_cmd;
pub mod init; pub mod init;
pub mod rollback; pub mod rollback;

View File

@@ -5,7 +5,7 @@ use uuid::Uuid;
use crate::crypto; use crate::crypto;
use crate::db; use crate::db;
use crate::output::{OutputMode, format_local_time}; use crate::output::{OutputMode, print_json};
pub struct RollbackArgs<'a> { pub struct RollbackArgs<'a> {
pub namespace: &'a str, pub namespace: &'a str,
@@ -71,14 +71,12 @@ pub async fn run(pool: &PgPool, args: RollbackArgs<'_>, master_key: &[u8; 32]) -
struct SecretHistoryRow { struct SecretHistoryRow {
secret_id: Uuid, secret_id: Uuid,
field_name: String, field_name: String,
field_type: String,
value_len: i32,
encrypted: Vec<u8>, encrypted: Vec<u8>,
action: String, action: String,
} }
let field_snaps: Vec<SecretHistoryRow> = sqlx::query_as( let field_snaps: Vec<SecretHistoryRow> = sqlx::query_as(
"SELECT secret_id, field_name, field_type, value_len, encrypted, action \ "SELECT secret_id, field_name, encrypted, action \
FROM secrets_history \ FROM secrets_history \
WHERE entry_id = $1 AND entry_version = $2 \ WHERE entry_id = $1 AND entry_version = $2 \
ORDER BY field_name", ORDER BY field_name",
@@ -145,12 +143,10 @@ pub async fn run(pool: &PgPool, args: RollbackArgs<'_>, master_key: &[u8; 32]) -
struct LiveField { struct LiveField {
id: Uuid, id: Uuid,
field_name: String, field_name: String,
field_type: String,
value_len: i32,
encrypted: Vec<u8>, encrypted: Vec<u8>,
} }
let live_fields: Vec<LiveField> = sqlx::query_as( let live_fields: Vec<LiveField> = sqlx::query_as(
"SELECT id, field_name, field_type, value_len, encrypted \ "SELECT id, field_name, encrypted \
FROM secrets WHERE entry_id = $1", FROM secrets WHERE entry_id = $1",
) )
.bind(lr.id) .bind(lr.id)
@@ -165,8 +161,6 @@ pub async fn run(pool: &PgPool, args: RollbackArgs<'_>, master_key: &[u8; 32]) -
secret_id: f.id, secret_id: f.id,
entry_version: lr.version, entry_version: lr.version,
field_name: &f.field_name, field_name: &f.field_name,
field_type: &f.field_type,
value_len: f.value_len,
encrypted: &f.encrypted, encrypted: &f.encrypted,
action: "rollback", action: "rollback",
}, },
@@ -212,11 +206,9 @@ pub async fn run(pool: &PgPool, args: RollbackArgs<'_>, master_key: &[u8; 32]) -
continue; continue;
} }
sqlx::query( sqlx::query(
"INSERT INTO secrets (id, entry_id, field_name, field_type, value_len, encrypted) \ "INSERT INTO secrets (id, entry_id, field_name, encrypted) \
VALUES ($1, $2, $3, $4, $5, $6) \ VALUES ($1, $2, $3, $4) \
ON CONFLICT (entry_id, field_name) DO UPDATE SET \ ON CONFLICT (entry_id, field_name) DO UPDATE SET \
field_type = EXCLUDED.field_type, \
value_len = EXCLUDED.value_len, \
encrypted = EXCLUDED.encrypted, \ encrypted = EXCLUDED.encrypted, \
version = secrets.version + 1, \ version = secrets.version + 1, \
updated_at = NOW()", updated_at = NOW()",
@@ -224,8 +216,6 @@ pub async fn run(pool: &PgPool, args: RollbackArgs<'_>, master_key: &[u8; 32]) -
.bind(f.secret_id) .bind(f.secret_id)
.bind(snap.entry_id) .bind(snap.entry_id)
.bind(&f.field_name) .bind(&f.field_name)
.bind(&f.field_type)
.bind(f.value_len)
.bind(&f.encrypted) .bind(&f.encrypted)
.execute(&mut *tx) .execute(&mut *tx)
.await?; .await?;
@@ -255,83 +245,11 @@ pub async fn run(pool: &PgPool, args: RollbackArgs<'_>, master_key: &[u8; 32]) -
}); });
match args.output { match args.output {
OutputMode::Json => println!("{}", serde_json::to_string_pretty(&result_json)?), OutputMode::Text => println!(
OutputMode::JsonCompact => println!("{}", serde_json::to_string(&result_json)?),
_ => println!(
"Rolled back: [{}/{}] {} → version {}", "Rolled back: [{}/{}] {} → version {}",
args.namespace, args.kind, args.name, snap.version args.namespace, args.kind, args.name, snap.version
), ),
} ref mode => print_json(&result_json, mode)?,
Ok(())
}
/// List history entries for an entry.
pub async fn list_history(
pool: &PgPool,
namespace: &str,
kind: &str,
name: &str,
limit: u32,
output: OutputMode,
) -> Result<()> {
#[derive(FromRow)]
struct HistorySummary {
version: i64,
action: String,
actor: String,
created_at: chrono::DateTime<chrono::Utc>,
}
let rows: Vec<HistorySummary> = sqlx::query_as(
"SELECT version, action, actor, created_at FROM entries_history \
WHERE namespace = $1 AND kind = $2 AND name = $3 \
ORDER BY id DESC LIMIT $4",
)
.bind(namespace)
.bind(kind)
.bind(name)
.bind(limit as i64)
.fetch_all(pool)
.await?;
match output {
OutputMode::Json | OutputMode::JsonCompact => {
let arr: Vec<Value> = rows
.iter()
.map(|r| {
json!({
"version": r.version,
"action": r.action,
"actor": r.actor,
"created_at": r.created_at.format("%Y-%m-%dT%H:%M:%SZ").to_string(),
})
})
.collect();
let out = if output == OutputMode::Json {
serde_json::to_string_pretty(&arr)?
} else {
serde_json::to_string(&arr)?
};
println!("{}", out);
}
_ => {
if rows.is_empty() {
println!("No history found for [{}/{}] {}.", namespace, kind, name);
return Ok(());
}
println!("History for [{}/{}] {}:", namespace, kind, name);
for r in &rows {
println!(
" v{:<4} {:8} {} {}",
r.version,
r.action,
r.actor,
format_local_time(r.created_at)
);
}
println!(" (use `secrets rollback --to-version <N>` to restore)");
}
} }
Ok(()) Ok(())

View File

@@ -121,7 +121,12 @@ struct PagedFetchArgs<'a> {
offset: u32, offset: u32,
} }
/// A very large limit used when callers need all matching records (export, inject, run).
/// Postgres will stop scanning when this many rows are found; adjust if needed.
pub const FETCH_ALL_LIMIT: u32 = 100_000;
/// Fetch entries matching the given filters (used by search, inject, run). /// Fetch entries matching the given filters (used by search, inject, run).
/// `limit` caps the result set; pass `FETCH_ALL_LIMIT` when you need all matching records.
pub async fn fetch_entries( pub async fn fetch_entries(
pool: &PgPool, pool: &PgPool,
namespace: Option<&str>, namespace: Option<&str>,
@@ -129,6 +134,19 @@ pub async fn fetch_entries(
name: Option<&str>, name: Option<&str>,
tags: &[String], tags: &[String],
query: Option<&str>, query: Option<&str>,
) -> Result<Vec<Entry>> {
fetch_entries_with_limit(pool, namespace, kind, name, tags, query, FETCH_ALL_LIMIT).await
}
/// Like `fetch_entries` but with an explicit limit. Used internally by `search`.
pub(crate) async fn fetch_entries_with_limit(
pool: &PgPool,
namespace: Option<&str>,
kind: Option<&str>,
name: Option<&str>,
tags: &[String],
query: Option<&str>,
limit: u32,
) -> Result<Vec<Entry>> { ) -> Result<Vec<Entry>> {
fetch_entries_paged( fetch_entries_paged(
pool, pool,
@@ -139,7 +157,7 @@ pub async fn fetch_entries(
tags, tags,
query, query,
sort: "name", sort: "name",
limit: 200, limit,
offset: 0, offset: 0,
}, },
) )
@@ -232,8 +250,8 @@ async fn fetch_entries_paged(pool: &PgPool, a: PagedFetchArgs<'_>) -> Result<Vec
// ── Secret schema fetching (no master key) ─────────────────────────────────── // ── Secret schema fetching (no master key) ───────────────────────────────────
/// Fetch secret field schemas (field_name, field_type, value_len) for a set of entry ids. /// Fetch secret field names for a set of entry ids.
/// Returns a map from entry_id to list of SecretField (encrypted field not used here). /// Returns a map from entry_id to list of SecretField.
async fn fetch_secret_schemas( async fn fetch_secret_schemas(
pool: &PgPool, pool: &PgPool,
entry_ids: &[uuid::Uuid], entry_ids: &[uuid::Uuid],
@@ -405,8 +423,6 @@ fn to_json(entry: &Entry, summary: bool, schema: Option<&[SecretField]>) -> Valu
.map(|f| { .map(|f| {
json!({ json!({
"field_name": f.field_name, "field_name": f.field_name,
"field_type": f.field_type,
"value_len": f.value_len,
}) })
}) })
.collect(); .collect();
@@ -456,10 +472,7 @@ fn print_text(entry: &Entry, summary: bool, schema: Option<&[SecretField]>) -> R
} }
match schema { match schema {
Some(fields) if !fields.is_empty() => { Some(fields) if !fields.is_empty() => {
let schema_str: Vec<String> = fields let schema_str: Vec<String> = fields.iter().map(|f| f.field_name.clone()).collect();
.iter()
.map(|f| format!("{}: {}({})", f.field_name, f.field_type, f.value_len))
.collect();
println!(" secrets: {}", schema_str.join(", ")); println!(" secrets: {}", schema_str.join(", "));
println!(" (use `secrets inject` or `secrets run` to get values)"); println!(" (use `secrets inject` or `secrets run` to get values)");
} }
@@ -524,7 +537,7 @@ mod tests {
kind: "service".to_string(), kind: "service".to_string(),
name: "gitea.main".to_string(), name: "gitea.main".to_string(),
tags: vec!["prod".to_string()], tags: vec!["prod".to_string()],
metadata: json!({"url": "https://gitea.refining.dev", "enabled": true}), metadata: json!({"url": "https://code.example.com", "enabled": true}),
version: 1, version: 1,
created_at: Utc::now(), created_at: Utc::now(),
updated_at: Utc::now(), updated_at: Utc::now(),
@@ -538,8 +551,6 @@ mod tests {
id: Uuid::nil(), id: Uuid::nil(),
entry_id: Uuid::nil(), entry_id: Uuid::nil(),
field_name: "token".to_string(), field_name: "token".to_string(),
field_type: "string".to_string(),
value_len: 6,
encrypted: enc, encrypted: enc,
version: 1, version: 1,
created_at: Utc::now(), created_at: Utc::now(),
@@ -561,7 +572,7 @@ mod tests {
assert_eq!( assert_eq!(
map.get("GITEA_MAIN_URL").map(String::as_str), map.get("GITEA_MAIN_URL").map(String::as_str),
Some("https://gitea.refining.dev") Some("https://code.example.com")
); );
assert_eq!( assert_eq!(
map.get("GITEA_MAIN_ENABLED").map(String::as_str), map.get("GITEA_MAIN_ENABLED").map(String::as_str),
@@ -579,8 +590,6 @@ mod tests {
let secrets = v.get("secrets").unwrap().as_array().unwrap(); let secrets = v.get("secrets").unwrap().as_array().unwrap();
assert_eq!(secrets.len(), 1); assert_eq!(secrets.len(), 1);
assert_eq!(secrets[0]["field_name"], "token"); assert_eq!(secrets[0]["field_name"], "token");
assert_eq!(secrets[0]["field_type"], "string");
assert_eq!(secrets[0]["value_len"], 6);
} }
#[test] #[test]

View File

@@ -1,23 +1,16 @@
use anyhow::Result; use anyhow::Result;
use serde_json::{Map, Value, json}; use serde_json::{Map, Value, json};
use sqlx::{FromRow, PgPool}; use sqlx::PgPool;
use uuid::Uuid; use uuid::Uuid;
use super::add::{ use super::add::{
collect_field_paths, collect_key_paths, compute_value_len, flatten_json_fields, collect_field_paths, collect_key_paths, flatten_json_fields, insert_path, parse_key_path,
infer_field_type, insert_path, parse_key_path, parse_kv, remove_path, parse_kv, remove_path,
}; };
use crate::crypto; use crate::crypto;
use crate::db; use crate::db;
use crate::output::OutputMode; use crate::models::EntryRow;
use crate::output::{OutputMode, print_json};
#[derive(FromRow)]
struct EntryRow {
id: Uuid,
version: i64,
tags: Vec<String>,
metadata: Value,
}
pub struct UpdateArgs<'a> { pub struct UpdateArgs<'a> {
pub namespace: &'a str, pub namespace: &'a str,
@@ -137,20 +130,16 @@ pub async fn run(pool: &PgPool, args: UpdateArgs<'_>, master_key: &[u8; 32]) ->
}); });
for (field_name, fv) in &flat { for (field_name, fv) in &flat {
let field_type = infer_field_type(fv);
let value_len = compute_value_len(fv);
let encrypted = crypto::encrypt_json(master_key, fv)?; let encrypted = crypto::encrypt_json(master_key, fv)?;
// Snapshot existing field before replacing. // Snapshot existing field before replacing.
#[derive(sqlx::FromRow)] #[derive(sqlx::FromRow)]
struct ExistingField { struct ExistingField {
id: Uuid, id: Uuid,
field_type: String,
value_len: i32,
encrypted: Vec<u8>, encrypted: Vec<u8>,
} }
let existing_field: Option<ExistingField> = sqlx::query_as( let existing_field: Option<ExistingField> = sqlx::query_as(
"SELECT id, field_type, value_len, encrypted \ "SELECT id, encrypted \
FROM secrets WHERE entry_id = $1 AND field_name = $2", FROM secrets WHERE entry_id = $1 AND field_name = $2",
) )
.bind(row.id) .bind(row.id)
@@ -166,8 +155,6 @@ pub async fn run(pool: &PgPool, args: UpdateArgs<'_>, master_key: &[u8; 32]) ->
secret_id: ef.id, secret_id: ef.id,
entry_version: row.version, entry_version: row.version,
field_name, field_name,
field_type: &ef.field_type,
value_len: ef.value_len,
encrypted: &ef.encrypted, encrypted: &ef.encrypted,
action: "update", action: "update",
}, },
@@ -178,19 +165,15 @@ pub async fn run(pool: &PgPool, args: UpdateArgs<'_>, master_key: &[u8; 32]) ->
} }
sqlx::query( sqlx::query(
"INSERT INTO secrets (entry_id, field_name, field_type, value_len, encrypted) \ "INSERT INTO secrets (entry_id, field_name, encrypted) \
VALUES ($1, $2, $3, $4, $5) \ VALUES ($1, $2, $3) \
ON CONFLICT (entry_id, field_name) DO UPDATE SET \ ON CONFLICT (entry_id, field_name) DO UPDATE SET \
field_type = EXCLUDED.field_type, \
value_len = EXCLUDED.value_len, \
encrypted = EXCLUDED.encrypted, \ encrypted = EXCLUDED.encrypted, \
version = secrets.version + 1, \ version = secrets.version + 1, \
updated_at = NOW()", updated_at = NOW()",
) )
.bind(row.id) .bind(row.id)
.bind(field_name) .bind(field_name)
.bind(field_type)
.bind(value_len)
.bind(&encrypted) .bind(&encrypted)
.execute(&mut *tx) .execute(&mut *tx)
.await?; .await?;
@@ -207,12 +190,10 @@ pub async fn run(pool: &PgPool, args: UpdateArgs<'_>, master_key: &[u8; 32]) ->
#[derive(sqlx::FromRow)] #[derive(sqlx::FromRow)]
struct FieldToDelete { struct FieldToDelete {
id: Uuid, id: Uuid,
field_type: String,
value_len: i32,
encrypted: Vec<u8>, encrypted: Vec<u8>,
} }
let field: Option<FieldToDelete> = sqlx::query_as( let field: Option<FieldToDelete> = sqlx::query_as(
"SELECT id, field_type, value_len, encrypted \ "SELECT id, encrypted \
FROM secrets WHERE entry_id = $1 AND field_name = $2", FROM secrets WHERE entry_id = $1 AND field_name = $2",
) )
.bind(row.id) .bind(row.id)
@@ -228,8 +209,6 @@ pub async fn run(pool: &PgPool, args: UpdateArgs<'_>, master_key: &[u8; 32]) ->
secret_id: f.id, secret_id: f.id,
entry_version: new_version, entry_version: new_version,
field_name: &field_name, field_name: &field_name,
field_type: &f.field_type,
value_len: f.value_len,
encrypted: &f.encrypted, encrypted: &f.encrypted,
action: "delete", action: "delete",
}, },
@@ -284,11 +263,8 @@ pub async fn run(pool: &PgPool, args: UpdateArgs<'_>, master_key: &[u8; 32]) ->
}); });
match args.output { match args.output {
OutputMode::Json => { OutputMode::Json | OutputMode::JsonCompact => {
println!("{}", serde_json::to_string_pretty(&result_json)?); print_json(&result_json, &args.output)?;
}
OutputMode::JsonCompact => {
println!("{}", serde_json::to_string(&result_json)?);
} }
_ => { _ => {
println!("Updated: [{}/{}] {}", args.namespace, args.kind, args.name); println!("Updated: [{}/{}] {}", args.namespace, args.kind, args.name);

View File

@@ -5,10 +5,26 @@ use sha2::{Digest, Sha256};
use std::io::{Cursor, Read, Write}; use std::io::{Cursor, Read, Write};
use std::time::Duration; use std::time::Duration;
const GITEA_API: &str = "https://gitea.refining.dev/api/v1/repos/refining/secrets/releases/latest";
const CURRENT_VERSION: &str = env!("CARGO_PKG_VERSION"); const CURRENT_VERSION: &str = env!("CARGO_PKG_VERSION");
/// Build-time config via `option_env!("SECRETS_UPGRADE_URL")`. Set during `cargo build`, e.g.:
/// SECRETS_UPGRADE_URL=https://... cargo build --release
const BUILD_UPGRADE_URL: Option<&'static str> = option_env!("SECRETS_UPGRADE_URL");
fn upgrade_api_url() -> Result<String> {
if let Some(url) = BUILD_UPGRADE_URL.filter(|s| !s.trim().is_empty()) {
return Ok(url.to_string());
}
let url = std::env::var("SECRETS_UPGRADE_URL").context(
"SECRETS_UPGRADE_URL is not set at build or runtime. Set it when building: \
SECRETS_UPGRADE_URL=https://... cargo build, or export before running secrets upgrade.",
)?;
if url.trim().is_empty() {
anyhow::bail!("SECRETS_UPGRADE_URL is empty.");
}
Ok(url)
}
#[derive(Debug, Deserialize)] #[derive(Debug, Deserialize)]
struct Release { struct Release {
tag_name: String, tag_name: String,
@@ -186,13 +202,14 @@ pub async fn run(check_only: bool) -> Result<()> {
.build() .build()
.context("failed to build HTTP client")?; .context("failed to build HTTP client")?;
let api_url = upgrade_api_url()?;
let release: Release = client let release: Release = client
.get(GITEA_API) .get(&api_url)
.send() .send()
.await .await
.context("failed to fetch release info from Gitea")? .context("failed to fetch release info")?
.error_for_status() .error_for_status()
.context("Gitea API returned an error")? .context("release API returned an error")?
.json() .json()
.await .await
.context("failed to parse release JSON")?; .context("failed to parse release JSON")?;

View File

@@ -8,19 +8,23 @@ pub struct Config {
pub database_url: Option<String>, pub database_url: Option<String>,
} }
pub fn config_dir() -> PathBuf { pub fn config_dir() -> Result<PathBuf> {
dirs::config_dir() let dir = dirs::config_dir()
.or_else(|| dirs::home_dir().map(|h| h.join(".config"))) .or_else(|| dirs::home_dir().map(|h| h.join(".config")))
.unwrap_or_else(|| PathBuf::from(".config")) .context(
.join("secrets") "Cannot determine config directory: \
neither XDG_CONFIG_HOME nor HOME is set",
)?
.join("secrets");
Ok(dir)
} }
pub fn config_path() -> PathBuf { pub fn config_path() -> Result<PathBuf> {
config_dir().join("config.toml") Ok(config_dir()?.join("config.toml"))
} }
pub fn load_config() -> Result<Config> { pub fn load_config() -> Result<Config> {
let path = config_path(); let path = config_path()?;
if !path.exists() { if !path.exists() {
return Ok(Config::default()); return Ok(Config::default());
} }
@@ -32,11 +36,11 @@ pub fn load_config() -> Result<Config> {
} }
pub fn save_config(config: &Config) -> Result<()> { pub fn save_config(config: &Config) -> Result<()> {
let dir = config_dir(); let dir = config_dir()?;
fs::create_dir_all(&dir) fs::create_dir_all(&dir)
.with_context(|| format!("failed to create config dir: {}", dir.display()))?; .with_context(|| format!("failed to create config dir: {}", dir.display()))?;
let path = config_path(); let path = dir.join("config.toml");
let content = toml::to_string_pretty(config).context("failed to serialize config")?; let content = toml::to_string_pretty(config).context("failed to serialize config")?;
fs::write(&path, &content) fs::write(&path, &content)
.with_context(|| format!("failed to write config file: {}", path.display()))?; .with_context(|| format!("failed to write config file: {}", path.display()))?;

View File

@@ -10,12 +10,24 @@ const KEYRING_SERVICE: &str = "secrets-cli";
const KEYRING_USER: &str = "master-key"; const KEYRING_USER: &str = "master-key";
const NONCE_LEN: usize = 12; const NONCE_LEN: usize = 12;
// Argon2id parameters — OWASP recommended (m=64 MiB, t=3 iterations, p=4 threads, key=32 B)
const ARGON2_M_COST: u32 = 65_536;
const ARGON2_T_COST: u32 = 3;
const ARGON2_P_COST: u32 = 4;
const ARGON2_KEY_LEN: usize = 32;
// ─── Argon2id key derivation ───────────────────────────────────────────────── // ─── Argon2id key derivation ─────────────────────────────────────────────────
/// Derive a 32-byte Master Key from a password and salt using Argon2id. /// Derive a 32-byte Master Key from a password and salt using Argon2id.
/// Parameters: m=65536 KiB (64 MB), t=3, p=4 — OWASP recommended. /// Parameters: m=65536 KiB (64 MB), t=3, p=4 — OWASP recommended.
pub fn derive_master_key(password: &str, salt: &[u8]) -> Result<[u8; 32]> { pub fn derive_master_key(password: &str, salt: &[u8]) -> Result<[u8; 32]> {
let params = Params::new(65536, 3, 4, Some(32)).context("invalid Argon2id params")?; let params = Params::new(
ARGON2_M_COST,
ARGON2_T_COST,
ARGON2_P_COST,
Some(ARGON2_KEY_LEN),
)
.context("invalid Argon2id params")?;
let argon2 = Argon2::new(argon2::Algorithm::Argon2id, Version::V0x13, params); let argon2 = Argon2::new(argon2::Algorithm::Argon2id, Version::V0x13, params);
let mut key = [0u8; 32]; let mut key = [0u8; 32];
argon2 argon2

View File

@@ -3,6 +3,8 @@ use serde_json::Value;
use sqlx::PgPool; use sqlx::PgPool;
use sqlx::postgres::PgPoolOptions; use sqlx::postgres::PgPoolOptions;
use crate::audit::current_actor;
pub async fn create_pool(database_url: &str) -> Result<PgPool> { pub async fn create_pool(database_url: &str) -> Result<PgPool> {
tracing::debug!("connecting to database"); tracing::debug!("connecting to database");
let pool = PgPoolOptions::new() let pool = PgPoolOptions::new()
@@ -42,8 +44,6 @@ pub async fn migrate(pool: &PgPool) -> Result<()> {
id UUID PRIMARY KEY DEFAULT uuidv7(), id UUID PRIMARY KEY DEFAULT uuidv7(),
entry_id UUID NOT NULL REFERENCES entries(id) ON DELETE CASCADE, entry_id UUID NOT NULL REFERENCES entries(id) ON DELETE CASCADE,
field_name VARCHAR(256) NOT NULL, field_name VARCHAR(256) NOT NULL,
field_type VARCHAR(32) NOT NULL DEFAULT 'string',
value_len INT NOT NULL DEFAULT 0,
encrypted BYTEA NOT NULL DEFAULT '\x', encrypted BYTEA NOT NULL DEFAULT '\x',
version BIGINT NOT NULL DEFAULT 1, version BIGINT NOT NULL DEFAULT 1,
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(), created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
@@ -101,8 +101,6 @@ pub async fn migrate(pool: &PgPool) -> Result<()> {
secret_id UUID NOT NULL, secret_id UUID NOT NULL,
entry_version BIGINT NOT NULL, entry_version BIGINT NOT NULL,
field_name VARCHAR(256) NOT NULL, field_name VARCHAR(256) NOT NULL,
field_type VARCHAR(32) NOT NULL DEFAULT 'string',
value_len INT NOT NULL DEFAULT 0,
encrypted BYTEA NOT NULL DEFAULT '\x', encrypted BYTEA NOT NULL DEFAULT '\x',
action VARCHAR(16) NOT NULL, action VARCHAR(16) NOT NULL,
actor VARCHAR(128) NOT NULL DEFAULT '', actor VARCHAR(128) NOT NULL DEFAULT '',
@@ -139,7 +137,7 @@ pub async fn snapshot_entry_history(
tx: &mut sqlx::Transaction<'_, sqlx::Postgres>, tx: &mut sqlx::Transaction<'_, sqlx::Postgres>,
p: EntrySnapshotParams<'_>, p: EntrySnapshotParams<'_>,
) -> Result<()> { ) -> Result<()> {
let actor = std::env::var("USER").unwrap_or_default(); let actor = current_actor();
sqlx::query( sqlx::query(
"INSERT INTO entries_history \ "INSERT INTO entries_history \
(entry_id, namespace, kind, name, version, action, tags, metadata, actor) \ (entry_id, namespace, kind, name, version, action, tags, metadata, actor) \
@@ -166,8 +164,6 @@ pub struct SecretSnapshotParams<'a> {
pub secret_id: uuid::Uuid, pub secret_id: uuid::Uuid,
pub entry_version: i64, pub entry_version: i64,
pub field_name: &'a str, pub field_name: &'a str,
pub field_type: &'a str,
pub value_len: i32,
pub encrypted: &'a [u8], pub encrypted: &'a [u8],
pub action: &'a str, pub action: &'a str,
} }
@@ -177,18 +173,16 @@ pub async fn snapshot_secret_history(
tx: &mut sqlx::Transaction<'_, sqlx::Postgres>, tx: &mut sqlx::Transaction<'_, sqlx::Postgres>,
p: SecretSnapshotParams<'_>, p: SecretSnapshotParams<'_>,
) -> Result<()> { ) -> Result<()> {
let actor = std::env::var("USER").unwrap_or_default(); let actor = current_actor();
sqlx::query( sqlx::query(
"INSERT INTO secrets_history \ "INSERT INTO secrets_history \
(entry_id, secret_id, entry_version, field_name, field_type, value_len, encrypted, action, actor) \ (entry_id, secret_id, entry_version, field_name, encrypted, action, actor) \
VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9)", VALUES ($1, $2, $3, $4, $5, $6, $7)",
) )
.bind(p.entry_id) .bind(p.entry_id)
.bind(p.secret_id) .bind(p.secret_id)
.bind(p.entry_version) .bind(p.entry_version)
.bind(p.field_name) .bind(p.field_name)
.bind(p.field_type)
.bind(p.value_len)
.bind(p.encrypted) .bind(p.encrypted)
.bind(p.action) .bind(p.action)
.bind(&actor) .bind(&actor)

View File

@@ -7,6 +7,11 @@ mod models;
mod output; mod output;
use anyhow::Result; use anyhow::Result;
/// Load .env from current or parent directories (best-effort, no error if missing).
fn load_dotenv() {
let _ = dotenvy::dotenv();
}
use clap::{Parser, Subcommand}; use clap::{Parser, Subcommand};
use tracing_subscriber::EnvFilter; use tracing_subscriber::EnvFilter;
@@ -76,25 +81,25 @@ EXAMPLES:
# Add a server # Add a server
secrets add -n refining --kind server --name my-server \\ secrets add -n refining --kind server --name my-server \\
--tag aliyun --tag shanghai \\ --tag aliyun --tag shanghai \\
-m ip=47.117.131.22 -m desc=\"Aliyun Shanghai ECS\" \\ -m ip=10.0.0.1 -m desc=\"Example ECS\" \\
-s username=root -s ssh_key=@./keys/server.pem -s username=root -s ssh_key=@./keys/server.pem
# Add a service credential # Add a service credential
secrets add -n refining --kind service --name gitea \\ secrets add -n refining --kind service --name gitea \\
--tag gitea \\ --tag gitea \\
-m url=https://gitea.refining.dev -m default_org=refining \\ -m url=https://code.example.com -m default_org=myorg \\
-s token=<token> -s token=<token>
# Add typed JSON metadata # Add typed JSON metadata
secrets add -n refining --kind service --name gitea \\ secrets add -n refining --kind service --name gitea \\
-m port:=3000 \\ -m port:=3000 \\
-m enabled:=true \\ -m enabled:=true \\
-m domains:='[\"gitea.refining.dev\",\"git.refining.dev\"]' \\ -m domains:='[\"code.example.com\",\"git.example.com\"]' \\
-m tls:='{\"enabled\":true,\"redirect_http\":true}' -m tls:='{\"enabled\":true,\"redirect_http\":true}'
# Add with token read from a file # Add with token read from a file
secrets add -n ricnsmart --kind service --name mqtt \\ secrets add -n ricnsmart --kind service --name mqtt \\
-m host=mqtt.ricnsmart.com -m port=1883 \\ -m host=mqtt.example.com -m port=1883 \\
-s password=@./mqtt_password.txt -s password=@./mqtt_password.txt
# Add typed JSON secrets # Add typed JSON secrets
@@ -106,7 +111,13 @@ EXAMPLES:
# Write a multiline file into a nested secret field # Write a multiline file into a nested secret field
secrets add -n refining --kind server --name my-server \\ secrets add -n refining --kind server --name my-server \\
-s credentials:content@./keys/server.pem")] -s credentials:content@./keys/server.pem
# Shared PEM (key_ref): store key once, reference from multiple servers
secrets add -n refining --kind key --name my-shared-key \\
--tag aliyun -s content=@./keys/shared.pem
secrets add -n refining --kind server --name i-abc123 \\
-m ip=10.0.0.1 -m key_ref=my-shared-key -s username=ecs-user")]
Add { Add {
/// Namespace, e.g. refining, ricnsmart /// Namespace, e.g. refining, ricnsmart
#[arg(short, long)] #[arg(short, long)]
@@ -114,13 +125,14 @@ EXAMPLES:
/// Kind of record: server, service, key, ... /// Kind of record: server, service, key, ...
#[arg(long)] #[arg(long)]
kind: String, kind: String,
/// Human-readable unique name, e.g. gitea, i-uf63f2uookgs5uxmrdyc /// Human-readable unique name, e.g. gitea, i-example0abcd1234efgh
#[arg(long)] #[arg(long)]
name: String, name: String,
/// Tag for categorization (repeatable), e.g. --tag aliyun --tag hongkong /// Tag for categorization (repeatable), e.g. --tag aliyun --tag hongkong
#[arg(long = "tag")] #[arg(long = "tag")]
tags: Vec<String>, tags: Vec<String>,
/// Plaintext metadata: key=value, key:=<json>, key=@file, or nested:path@file /// Plaintext metadata: key=value, key:=<json>, key=@file, or nested:path@file.
/// Use key_ref=<name> to reference a shared key entry (kind=key); inject/run merge its secrets.
#[arg(long = "meta", short = 'm')] #[arg(long = "meta", short = 'm')]
meta: Vec<String>, meta: Vec<String>,
/// Secret entry: key=value, key:=<json>, key=@file, or nested:path@file /// Secret entry: key=value, key:=<json>, key=@file, or nested:path@file
@@ -177,7 +189,7 @@ EXAMPLES:
/// Filter by kind, e.g. server, service /// Filter by kind, e.g. server, service
#[arg(long)] #[arg(long)]
kind: Option<String>, kind: Option<String>,
/// Exact name filter, e.g. gitea, i-uf63f2uookgs5uxmrdyc /// Exact name filter, e.g. gitea, i-example0abcd1234efgh
#[arg(long)] #[arg(long)]
name: Option<String>, name: Option<String>,
/// Filter by tag, e.g. --tag aliyun (repeatable for AND intersection) /// Filter by tag, e.g. --tag aliyun (repeatable for AND intersection)
@@ -206,23 +218,39 @@ EXAMPLES:
output: Option<String>, output: Option<String>,
}, },
/// Delete a record permanently. Requires exact namespace + kind + name. /// Delete one record precisely, or bulk-delete by namespace.
///
/// With --name: deletes exactly that record (--kind also required).
/// Without --name: bulk-deletes all records matching namespace + optional --kind.
/// Use --dry-run to preview bulk deletes before committing.
#[command(after_help = "EXAMPLES: #[command(after_help = "EXAMPLES:
# Delete a service credential # Delete a single record (exact match)
secrets delete -n refining --kind service --name legacy-mqtt secrets delete -n refining --kind service --name legacy-mqtt
# Delete a server record # Preview what a bulk delete would remove (no writes)
secrets delete -n ricnsmart --kind server --name i-old-server-id")] secrets delete -n refining --dry-run
# Bulk-delete all records in a namespace
secrets delete -n ricnsmart
# Bulk-delete only server records in a namespace
secrets delete -n ricnsmart --kind server
# JSON output
secrets delete -n refining --kind service -o json")]
Delete { Delete {
/// Namespace, e.g. refining /// Namespace, e.g. refining
#[arg(short, long)] #[arg(short, long)]
namespace: String, namespace: String,
/// Kind, e.g. server, service /// Kind filter, e.g. server, service (required with --name; optional for bulk)
#[arg(long)] #[arg(long)]
kind: String, kind: Option<String>,
/// Exact name of the record to delete /// Exact name of the record to delete (omit for bulk delete)
#[arg(long)] #[arg(long)]
name: String, name: Option<String>,
/// Preview what would be deleted without making any changes (bulk mode only)
#[arg(long)]
dry_run: bool,
/// Output format: text (default on TTY), json, json-compact /// Output format: text (default on TTY), json, json-compact
#[arg(short, long = "output")] #[arg(short, long = "output")]
output: Option<String>, output: Option<String>,
@@ -266,7 +294,11 @@ EXAMPLES:
# Update nested typed JSON fields # Update nested typed JSON fields
secrets update -n refining --kind service --name deploy-bot \\ secrets update -n refining --kind service --name deploy-bot \\
-s auth:config:='{\"issuer\":\"gitea\",\"rotate\":true}' \\ -s auth:config:='{\"issuer\":\"gitea\",\"rotate\":true}' \\
-s auth:retry:=5")] -s auth:retry:=5
# Rotate shared PEM (all servers with key_ref=my-shared-key get the new key)
secrets update -n refining --kind key --name my-shared-key \\
-s content=@./keys/new-shared.pem")]
Update { Update {
/// Namespace, e.g. refining, ricnsmart /// Namespace, e.g. refining, ricnsmart
#[arg(short, long)] #[arg(short, long)]
@@ -283,7 +315,8 @@ EXAMPLES:
/// Remove a tag (repeatable) /// Remove a tag (repeatable)
#[arg(long = "remove-tag")] #[arg(long = "remove-tag")]
remove_tags: Vec<String>, remove_tags: Vec<String>,
/// Set or overwrite a metadata field: key=value, key:=<json>, key=@file, or nested:path@file /// Set or overwrite a metadata field: key=value, key:=<json>, key=@file, or nested:path@file.
/// Use key_ref=<name> to reference a shared key entry (kind=key).
#[arg(long = "meta", short = 'm')] #[arg(long = "meta", short = 'm')]
meta: Vec<String>, meta: Vec<String>,
/// Delete a metadata field by key or nested path, e.g. old_port or credentials:content /// Delete a metadata field by key or nested path, e.g. old_port or credentials:content
@@ -373,7 +406,9 @@ EXAMPLES:
secrets inject -n refining --kind service --name gitea -o json secrets inject -n refining --kind service --name gitea -o json
# Eval into current shell (use with caution) # Eval into current shell (use with caution)
eval $(secrets inject -n refining --kind service --name gitea)")] eval $(secrets inject -n refining --kind service --name gitea)
# For entries with metadata.key_ref, referenced key's secrets are merged automatically")]
Inject { Inject {
#[arg(short, long)] #[arg(short, long)]
namespace: Option<String>, namespace: Option<String>,
@@ -403,7 +438,9 @@ EXAMPLES:
secrets run --tag production -- env | grep GITEA secrets run --tag production -- env | grep GITEA
# With prefix # With prefix
secrets run -n refining --kind service --name gitea --prefix GITEA -- printenv")] secrets run -n refining --kind service --name gitea --prefix GITEA -- printenv
# metadata.key_ref entries get key secrets merged (e.g. server + shared PEM)")]
Run { Run {
#[arg(short, long)] #[arg(short, long)]
namespace: Option<String>, namespace: Option<String>,
@@ -423,8 +460,8 @@ EXAMPLES:
/// Check for a newer version and update the binary in-place. /// Check for a newer version and update the binary in-place.
/// ///
/// Downloads the latest release from Gitea and replaces the current binary. /// Downloads the latest release and replaces the current binary. No database connection or master key required.
/// No database connection or master key required. /// Release URL defaults to the upstream server; override via SECRETS_UPGRADE_URL for self-hosted or fork.
#[command(after_help = "EXAMPLES: #[command(after_help = "EXAMPLES:
# Check for updates only (no download) # Check for updates only (no download)
secrets upgrade --check secrets upgrade --check
@@ -530,6 +567,7 @@ enum ConfigAction {
#[tokio::main] #[tokio::main]
async fn main() -> Result<()> { async fn main() -> Result<()> {
load_dotenv();
let cli = Cli::parse(); let cli = Cli::parse();
let filter = if cli.verbose { let filter = if cli.verbose {
@@ -634,12 +672,23 @@ async fn main() -> Result<()> {
namespace, namespace,
kind, kind,
name, name,
dry_run,
output, output,
} => { } => {
let _span = let _span =
tracing::info_span!("cmd", command = "delete", %namespace, %kind, %name).entered(); tracing::info_span!("cmd", command = "delete", %namespace, ?kind, ?name).entered();
let out = resolve_output_mode(output.as_deref())?; let out = resolve_output_mode(output.as_deref())?;
commands::delete::run(&pool, &namespace, &kind, &name, out).await?; commands::delete::run(
&pool,
commands::delete::DeleteArgs {
namespace: &namespace,
kind: kind.as_deref(),
name: name.as_deref(),
dry_run,
output: out,
},
)
.await?;
} }
Commands::Update { Commands::Update {
@@ -685,7 +734,17 @@ async fn main() -> Result<()> {
output, output,
} => { } => {
let out = resolve_output_mode(output.as_deref())?; let out = resolve_output_mode(output.as_deref())?;
commands::rollback::list_history(&pool, &namespace, &kind, &name, limit, out).await?; commands::history::run(
&pool,
commands::history::HistoryArgs {
namespace: &namespace,
kind: &kind,
name: &name,
limit,
output: out,
},
)
.await?;
} }
Commands::Rollback { Commands::Rollback {

View File

@@ -20,17 +20,11 @@ pub struct Entry {
} }
/// A single encrypted field belonging to an Entry. /// A single encrypted field belonging to an Entry.
/// field_name, field_type, and value_len are stored in plaintext so that
/// `search` can show the schema without requiring the master key.
#[derive(Debug, Serialize, Deserialize, sqlx::FromRow)] #[derive(Debug, Serialize, Deserialize, sqlx::FromRow)]
pub struct SecretField { pub struct SecretField {
pub id: Uuid, pub id: Uuid,
pub entry_id: Uuid, pub entry_id: Uuid,
pub field_name: String, pub field_name: String,
/// Inferred type: "string", "number", "boolean", "json"
pub field_type: String,
/// Length of the plaintext value in characters (0 for binary-like PEM)
pub value_len: i32,
/// AES-256-GCM ciphertext: nonce(12B) || ciphertext+tag /// AES-256-GCM ciphertext: nonce(12B) || ciphertext+tag
pub encrypted: Vec<u8>, pub encrypted: Vec<u8>,
pub version: i64, pub version: i64,
@@ -38,6 +32,25 @@ pub struct SecretField {
pub updated_at: DateTime<Utc>, pub updated_at: DateTime<Utc>,
} }
// ── Internal query row types (shared across commands) ─────────────────────────
/// Minimal entry row fetched for write operations (add / update / delete / rollback).
#[derive(Debug, sqlx::FromRow)]
pub struct EntryRow {
pub id: Uuid,
pub version: i64,
pub tags: Vec<String>,
pub metadata: Value,
}
/// Minimal secret field row fetched before snapshots or cascade deletes.
#[derive(Debug, sqlx::FromRow)]
pub struct SecretFieldRow {
pub id: Uuid,
pub field_name: String,
pub encrypted: Vec<u8>,
}
// ── Export / Import types ────────────────────────────────────────────────────── // ── Export / Import types ──────────────────────────────────────────────────────
/// Supported file formats for export/import. /// Supported file formats for export/import.
@@ -52,15 +65,12 @@ impl ExportFormat {
/// Infer format from file extension (.json / .toml / .yaml / .yml). /// Infer format from file extension (.json / .toml / .yaml / .yml).
pub fn from_extension(path: &str) -> anyhow::Result<Self> { pub fn from_extension(path: &str) -> anyhow::Result<Self> {
let ext = path.rsplit('.').next().unwrap_or("").to_lowercase(); let ext = path.rsplit('.').next().unwrap_or("").to_lowercase();
match ext.as_str() { Self::from_str(&ext).map_err(|_| {
"json" => Ok(Self::Json), anyhow::anyhow!(
"toml" => Ok(Self::Toml),
"yaml" | "yml" => Ok(Self::Yaml),
other => anyhow::bail!(
"Cannot infer format from extension '.{}'. Use --format json|toml|yaml", "Cannot infer format from extension '.{}'. Use --format json|toml|yaml",
other ext
), )
} })
} }
/// Parse from --format CLI value. /// Parse from --format CLI value.
@@ -146,16 +156,12 @@ pub fn json_to_toml_value(v: &Value) -> anyhow::Result<toml::Value> {
} }
Value::String(s) => Ok(toml::Value::String(s.clone())), Value::String(s) => Ok(toml::Value::String(s.clone())),
Value::Array(arr) => { Value::Array(arr) => {
// Check for uniform scalar type (TOML requires homogeneous arrays at the value level,
// though arrays of tables are handled separately via TOML's [[table]] syntax).
// For simplicity we convert each element; if types are mixed, toml crate will
// handle it gracefully or we fall back to a JSON string.
let items: anyhow::Result<Vec<toml::Value>> = let items: anyhow::Result<Vec<toml::Value>> =
arr.iter().map(json_to_toml_value).collect(); arr.iter().map(json_to_toml_value).collect();
match items { match items {
Ok(vals) => Ok(toml::Value::Array(vals)), Ok(vals) => Ok(toml::Value::Array(vals)),
Err(_) => { Err(e) => {
// Fallback: serialise as JSON string tracing::debug!(error = %e, "mixed-type array; falling back to JSON string");
Ok(toml::Value::String(serde_json::to_string(v)?)) Ok(toml::Value::String(serde_json::to_string(v)?))
} }
} }
@@ -171,8 +177,8 @@ pub fn json_to_toml_value(v: &Value) -> anyhow::Result<toml::Value> {
Ok(tv) => { Ok(tv) => {
toml_map.insert(k.clone(), tv); toml_map.insert(k.clone(), tv);
} }
Err(_) => { Err(e) => {
// Fallback: serialise as JSON string tracing::debug!(key = %k, error = %e, "field not representable in TOML; falling back to JSON string");
toml_map toml_map
.insert(k.clone(), toml::Value::String(serde_json::to_string(val)?)); .insert(k.clone(), toml::Value::String(serde_json::to_string(val)?));
} }

View File

@@ -50,3 +50,16 @@ pub fn format_local_time(dt: DateTime<Utc>) -> String {
.format("%Y-%m-%d %H:%M:%S %:z") .format("%Y-%m-%d %H:%M:%S %:z")
.to_string() .to_string()
} }
/// Print a JSON value to stdout in the requested output mode.
/// - `Json` → pretty-printed
/// - `JsonCompact` → single line
/// - `Text` → no-op (caller is responsible for the text branch)
pub fn print_json(value: &serde_json::Value, mode: &OutputMode) -> anyhow::Result<()> {
match mode {
OutputMode::Json => println!("{}", serde_json::to_string_pretty(value)?),
OutputMode::JsonCompact => println!("{}", serde_json::to_string(value)?),
OutputMode::Text => {}
}
Ok(())
}

View File

@@ -0,0 +1,3 @@
-----BEGIN EXAMPLE KEY PLACEHOLDER-----
This file is for local dev/testing. Replace with a real key when needed.
-----END EXAMPLE KEY PLACEHOLDER-----