Compare commits
12 Commits
secrets-0.
...
secrets-0.
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
62a1df316b | ||
|
|
d0796e9c9a | ||
|
|
66b6417faa | ||
|
|
56a28e8cf7 | ||
|
|
12aec6675a | ||
|
|
e1cd6e736c | ||
|
|
0a5317e477 | ||
|
|
efa76cae55 | ||
|
|
5a5867adc1 | ||
|
|
4ddafbe4b6 | ||
|
|
6ea9f0861b | ||
|
|
3973295d6a |
@@ -7,7 +7,6 @@ on:
|
||||
- 'src/**'
|
||||
- 'Cargo.toml'
|
||||
- 'Cargo.lock'
|
||||
- '.gitea/workflows/secrets.yml'
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.ref }}
|
||||
@@ -18,6 +17,7 @@ permissions:
|
||||
|
||||
env:
|
||||
BINARY_NAME: secrets
|
||||
SECRETS_UPGRADE_URL: ${{ github.server_url }}/api/v1/repos/${{ github.repository }}/releases/latest
|
||||
CARGO_INCREMENTAL: 0
|
||||
CARGO_NET_RETRY: 10
|
||||
CARGO_TERM_COLOR: always
|
||||
@@ -56,6 +56,13 @@ jobs:
|
||||
echo "将创建新版本 ${tag}"
|
||||
fi
|
||||
|
||||
- name: 严格拦截重复版本
|
||||
if: steps.ver.outputs.tag_exists == 'true'
|
||||
run: |
|
||||
echo "错误: 版本 ${{ steps.ver.outputs.tag }} 已存在,禁止重复发版。"
|
||||
echo "请先 bump Cargo.toml 中的 version,并执行 cargo build 同步 Cargo.lock。"
|
||||
exit 1
|
||||
|
||||
- name: 创建 Tag
|
||||
if: steps.ver.outputs.tag_exists == 'false'
|
||||
run: |
|
||||
@@ -327,11 +334,14 @@ jobs:
|
||||
- name: 安装依赖
|
||||
shell: pwsh
|
||||
run: |
|
||||
$cargoBin = Join-Path $env:USERPROFILE ".cargo\bin"
|
||||
if (-not (Get-Command cargo -ErrorAction SilentlyContinue)) {
|
||||
Invoke-WebRequest -Uri "https://win.rustup.rs/x86_64" -OutFile rustup-init.exe
|
||||
.\rustup-init.exe -y --default-toolchain stable
|
||||
Remove-Item rustup-init.exe
|
||||
}
|
||||
$env:Path = "$cargoBin;$env:Path"
|
||||
Add-Content -Path $env:GITHUB_PATH -Value $cargoBin
|
||||
rustup target add x86_64-pc-windows-msvc
|
||||
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
8
.vscode/tasks.json
vendored
8
.vscode/tasks.json
vendored
@@ -104,9 +104,9 @@
|
||||
"dependsOn": "build"
|
||||
},
|
||||
{
|
||||
"label": "test: search with secrets revealed",
|
||||
"label": "test: inject service secrets",
|
||||
"type": "shell",
|
||||
"command": "./target/debug/secrets search -n refining --kind service --show-secrets",
|
||||
"command": "./target/debug/secrets inject -n refining --kind service --name gitea",
|
||||
"dependsOn": "build"
|
||||
},
|
||||
{
|
||||
@@ -118,7 +118,7 @@
|
||||
{
|
||||
"label": "test: add + delete roundtrip",
|
||||
"type": "shell",
|
||||
"command": "echo '--- add ---' && ./target/debug/secrets add -n test --kind demo --name roundtrip-test --tag test -m foo=bar -s password=secret123 && echo '--- search ---' && ./target/debug/secrets search -n test --show-secrets && echo '--- delete ---' && ./target/debug/secrets delete -n test --kind demo --name roundtrip-test && echo '--- verify deleted ---' && ./target/debug/secrets search -n test",
|
||||
"command": "echo '--- add ---' && ./target/debug/secrets add -n test --kind demo --name roundtrip-test --tag test -m foo=bar -s password=secret123 && echo '--- search metadata ---' && ./target/debug/secrets search -n test && echo '--- inject secrets ---' && ./target/debug/secrets inject -n test --kind demo --name roundtrip-test && echo '--- delete ---' && ./target/debug/secrets delete -n test --kind demo --name roundtrip-test && echo '--- verify deleted ---' && ./target/debug/secrets search -n test",
|
||||
"dependsOn": "build"
|
||||
},
|
||||
{
|
||||
@@ -142,7 +142,7 @@
|
||||
{
|
||||
"label": "test: add with file secret",
|
||||
"type": "shell",
|
||||
"command": "echo '--- add key from file ---' && ./target/debug/secrets add -n test --kind key --name test-key --tag test -s content=@./refining/keys/Vultr && echo '--- verify ---' && ./target/debug/secrets search -n test --kind key --show-secrets && echo '--- cleanup ---' && ./target/debug/secrets delete -n test --kind key --name test-key",
|
||||
"command": "echo '--- add key from file ---' && ./target/debug/secrets add -n test --kind key --name test-key --tag test -s content=@./test-fixtures/example-key.pem && echo '--- verify metadata ---' && ./target/debug/secrets search -n test --kind key && echo '--- verify inject ---' && ./target/debug/secrets inject -n test --kind key --name test-key && echo '--- cleanup ---' && ./target/debug/secrets delete -n test --kind key --name test-key",
|
||||
"dependsOn": "build"
|
||||
}
|
||||
]
|
||||
|
||||
319
AGENTS.md
319
AGENTS.md
@@ -1,6 +1,13 @@
|
||||
# Secrets CLI — AGENTS.md
|
||||
|
||||
跨设备密钥与配置管理 CLI 工具,将 refining / ricnsmart 两个项目的服务器信息、服务凭据存储到 PostgreSQL 18,供 AI 工具读取上下文。敏感数据(encrypted 字段)使用 AES-256-GCM 加密,主密钥由 Argon2id 从主密码派生并存入平台安全存储(macOS Keychain / Windows Credential Manager / Linux keyutils)。
|
||||
## 提交 / 发版硬规则(优先于下文其他说明)
|
||||
|
||||
1. 涉及 `src/**`、`Cargo.toml`、`Cargo.lock`、CLI 行为变更的提交,默认视为**需要发版**,除非用户明确说明“本次不发版”。
|
||||
2. 发版前必须先检查 `Cargo.toml` 中的 `version`,再检查是否已存在对应 tag:`git tag -l 'secrets-*'`。
|
||||
3. 若当前版本对应 tag 已存在,必须先 bump `Cargo.toml` 的 `version`,再执行 `cargo build` 同步 `Cargo.lock`,然后才能提交。
|
||||
4. 提交前优先运行 `./scripts/release-check.sh`;该脚本会检查重复版本并执行 `cargo fmt -- --check && cargo clippy --locked -- -D warnings && cargo test --locked`。
|
||||
|
||||
跨设备密钥与配置管理 CLI 工具,将服务器信息、服务凭据等存储到 PostgreSQL 18,供 AI 工具读取上下文。每个加密字段单独行存储(`secrets` 子表),字段名、类型、长度以明文保存,主密钥由 Argon2id 从主密码派生并存入平台安全存储(macOS Keychain / Windows Credential Manager / Linux keyutils)。
|
||||
|
||||
## 项目结构
|
||||
|
||||
@@ -10,21 +17,25 @@ secrets/
|
||||
main.rs # CLI 入口,clap 命令定义,auto-migrate,--verbose 全局参数
|
||||
output.rs # OutputMode 枚举 + TTY 检测(TTY→text,非 TTY→json-compact)
|
||||
config.rs # 配置读写:~/.config/secrets/config.toml(database_url)
|
||||
db.rs # PgPool 创建 + 建表/索引(幂等,含 audit_log + kv_config + secrets_history)
|
||||
db.rs # PgPool 创建 + 建表/索引(DROP+CREATE,含所有表)
|
||||
crypto.rs # AES-256-GCM 加解密、Argon2id 派生、OS 钥匙串
|
||||
models.rs # Secret 结构体(sqlx::FromRow + serde,含 version 字段)
|
||||
audit.rs # 审计写入:log_tx(事务内)/ log(池,保留备用)
|
||||
models.rs # Entry + SecretField 结构体(sqlx::FromRow + serde)
|
||||
audit.rs # 审计写入:log_tx(事务内)
|
||||
commands/
|
||||
init.rs # init 命令:主密钥初始化(每台设备一次)
|
||||
add.rs # add 命令:upsert,事务化,含历史快照,支持 key:=json 类型化值
|
||||
add.rs # add 命令:upsert entries + 逐字段写入 secrets,含历史快照
|
||||
config.rs # config 命令:set-db / show / path(持久化 database_url)
|
||||
search.rs # search 命令:多条件查询,公开 fetch_rows / build_env_map
|
||||
delete.rs # delete 命令:事务化,含历史快照
|
||||
update.rs # update 命令:增量更新,CAS 并发保护,含历史快照
|
||||
rollback.rs # rollback / history 命令:版本回滚与历史查看
|
||||
run.rs # inject / run 命令:临时环境变量注入
|
||||
search.rs # search 命令:多条件查询,展示 secrets 字段 schema(无需 master_key)
|
||||
delete.rs # delete 命令:事务化,CASCADE 删除 secrets,含历史快照
|
||||
update.rs # update 命令:增量更新,secrets 行级 UPSERT/DELETE,CAS 并发保护
|
||||
rollback.rs # rollback 命令:按 entry_version 恢复 entry + secrets
|
||||
history.rs # history 命令:查看 entry 变更历史列表
|
||||
run.rs # inject / run 命令:逐字段解密 + key_ref 引用解析
|
||||
upgrade.rs # upgrade 命令:检查、校验摘要并下载最新版本,自动替换二进制
|
||||
export_cmd.rs # export 命令:批量导出记录,支持 JSON/TOML/YAML,含解密明文
|
||||
import_cmd.rs # import 命令:批量导入记录,冲突检测,dry-run,重新加密写入
|
||||
scripts/
|
||||
release-check.sh # 发版前检查版本号/tag 是否重复,并执行 fmt/clippy/test
|
||||
setup-gitea-actions.sh # 配置 Gitea Actions 变量与 Secrets
|
||||
.gitea/workflows/
|
||||
secrets.yml # CI:fmt + clippy + musl 构建 + Release 上传 + 飞书通知
|
||||
@@ -36,19 +47,18 @@ secrets/
|
||||
- **Host**: `<host>:<port>`
|
||||
- **Database**: `secrets`
|
||||
- **连接串**: `postgres://postgres:<password>@<host>:<port>/secrets`
|
||||
- **表**: `secrets`(主表)+ `audit_log`(审计表)+ `kv_config`(Argon2 salt 等),首次连接自动建表(auto-migrate)
|
||||
- **表**: `entries`(主表)+ `secrets`(加密字段子表)+ `entries_history` + `secrets_history` + `audit_log` + `kv_config`,首次连接自动建表(auto-migrate)
|
||||
|
||||
### 表结构
|
||||
|
||||
```sql
|
||||
secrets (
|
||||
entries (
|
||||
id UUID PRIMARY KEY DEFAULT uuidv7(), -- PG18 时间有序 UUID
|
||||
namespace VARCHAR(64) NOT NULL, -- 一级隔离: "refining" | "ricnsmart"
|
||||
kind VARCHAR(64) NOT NULL, -- 类型: "server" | "service"(可扩展)
|
||||
kind VARCHAR(64) NOT NULL, -- 类型: "server" | "service" | "key"(可扩展)
|
||||
name VARCHAR(256) NOT NULL, -- 人类可读标识
|
||||
tags TEXT[] NOT NULL DEFAULT '{}', -- 灵活标签: ["aliyun","hongkong"]
|
||||
metadata JSONB NOT NULL DEFAULT '{}', -- 明文描述: ip, desc, domains, location...
|
||||
encrypted BYTEA NOT NULL DEFAULT '\x', -- AES-256-GCM 密文: nonce(12B)||ciphertext+tag
|
||||
version BIGINT NOT NULL DEFAULT 1, -- 乐观锁版本号,每次写操作自增
|
||||
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
|
||||
updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
|
||||
@@ -57,26 +67,24 @@ secrets (
|
||||
```
|
||||
|
||||
```sql
|
||||
secrets_history (
|
||||
id BIGINT GENERATED ALWAYS AS IDENTITY PRIMARY KEY,
|
||||
secret_id UUID NOT NULL, -- 对应 secrets.id
|
||||
namespace VARCHAR(64) NOT NULL,
|
||||
kind VARCHAR(64) NOT NULL,
|
||||
name VARCHAR(256) NOT NULL,
|
||||
version BIGINT NOT NULL, -- 被快照时的版本号
|
||||
action VARCHAR(16) NOT NULL, -- 'add' | 'update' | 'delete' | 'rollback'
|
||||
tags TEXT[] NOT NULL DEFAULT '{}',
|
||||
metadata JSONB NOT NULL DEFAULT '{}',
|
||||
encrypted BYTEA NOT NULL DEFAULT '\x', -- 快照时的加密密文
|
||||
actor VARCHAR(128) NOT NULL DEFAULT '',
|
||||
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW()
|
||||
secrets (
|
||||
id UUID PRIMARY KEY DEFAULT uuidv7(),
|
||||
entry_id UUID NOT NULL REFERENCES entries(id) ON DELETE CASCADE,
|
||||
field_name VARCHAR(256) NOT NULL, -- 明文字段名: "username", "token", "ssh_key"
|
||||
field_type VARCHAR(32) NOT NULL DEFAULT 'string', -- 明文类型: "string"|"number"|"boolean"|"json"
|
||||
value_len INT NOT NULL DEFAULT 0, -- 明文原始值字符数(PEM≈4096,token≈40)
|
||||
encrypted BYTEA NOT NULL DEFAULT '\x', -- 仅加密值本身:nonce(12B)||ciphertext+tag
|
||||
version BIGINT NOT NULL DEFAULT 1,
|
||||
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
|
||||
updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
|
||||
UNIQUE(entry_id, field_name)
|
||||
)
|
||||
```
|
||||
|
||||
```sql
|
||||
kv_config (
|
||||
key TEXT PRIMARY KEY, -- 如 'argon2_salt'
|
||||
value BYTEA NOT NULL -- Argon2id salt,首台设备 init 时生成
|
||||
value BYTEA NOT NULL -- Argon2id salt,首台设备 init 时生成
|
||||
)
|
||||
```
|
||||
|
||||
@@ -85,26 +93,85 @@ kv_config (
|
||||
```sql
|
||||
audit_log (
|
||||
id BIGINT GENERATED ALWAYS AS IDENTITY PRIMARY KEY,
|
||||
action VARCHAR(32) NOT NULL, -- 'add' | 'update' | 'delete'
|
||||
action VARCHAR(32) NOT NULL, -- 'add' | 'update' | 'delete' | 'rollback'
|
||||
namespace VARCHAR(64) NOT NULL,
|
||||
kind VARCHAR(64) NOT NULL,
|
||||
name VARCHAR(256) NOT NULL,
|
||||
detail JSONB NOT NULL DEFAULT '{}', -- 变更摘要(tags/meta keys/secret keys,不含 value)
|
||||
actor VARCHAR(128) NOT NULL DEFAULT '', -- 操作者($USER 环境变量)
|
||||
actor VARCHAR(128) NOT NULL DEFAULT '', -- 操作者($USER 环境变量)
|
||||
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW()
|
||||
)
|
||||
```
|
||||
|
||||
### entries_history 表结构
|
||||
|
||||
```sql
|
||||
entries_history (
|
||||
id BIGINT GENERATED ALWAYS AS IDENTITY PRIMARY KEY,
|
||||
entry_id UUID NOT NULL,
|
||||
namespace VARCHAR(64) NOT NULL,
|
||||
kind VARCHAR(64) NOT NULL,
|
||||
name VARCHAR(256) NOT NULL,
|
||||
version BIGINT NOT NULL, -- 被快照时的版本号
|
||||
action VARCHAR(16) NOT NULL, -- 'add' | 'update' | 'delete' | 'rollback'
|
||||
tags TEXT[] NOT NULL DEFAULT '{}',
|
||||
metadata JSONB NOT NULL DEFAULT '{}',
|
||||
actor VARCHAR(128) NOT NULL DEFAULT '',
|
||||
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW()
|
||||
)
|
||||
```
|
||||
|
||||
### secrets_history 表结构
|
||||
|
||||
```sql
|
||||
secrets_history (
|
||||
id BIGINT GENERATED ALWAYS AS IDENTITY PRIMARY KEY,
|
||||
entry_id UUID NOT NULL,
|
||||
secret_id UUID NOT NULL, -- 对应 secrets.id
|
||||
entry_version BIGINT NOT NULL, -- 关联 entries_history 的版本号
|
||||
field_name VARCHAR(256) NOT NULL,
|
||||
field_type VARCHAR(32) NOT NULL DEFAULT 'string',
|
||||
value_len INT NOT NULL DEFAULT 0,
|
||||
encrypted BYTEA NOT NULL DEFAULT '\x',
|
||||
action VARCHAR(16) NOT NULL, -- 'add' | 'update' | 'delete' | 'rollback'
|
||||
actor VARCHAR(128) NOT NULL DEFAULT '',
|
||||
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW()
|
||||
)
|
||||
```
|
||||
|
||||
### 字段职责划分
|
||||
|
||||
| 字段 | 存什么 | 示例 |
|
||||
|------|--------|------|
|
||||
| `namespace` | 项目/团队隔离 | `refining`, `ricnsmart` |
|
||||
| `kind` | 记录类型 | `server`, `service` |
|
||||
| `name` | 唯一标识名 | `i-uf63f2uookgs5uxmrdyc`, `gitea` |
|
||||
| `kind` | 记录类型 | `server`, `service`, `key` |
|
||||
| `name` | 唯一标识名 | `i-example0abcd1234efgh`, `gitea` |
|
||||
| `tags` | 多维分类标签 | `["aliyun","hongkong","ricn"]` |
|
||||
| `metadata` | 明文非敏感信息 | `{"ip":"47.243.154.187","desc":"Grafana","domains":["..."]}` |
|
||||
| `encrypted` | 敏感凭据,AES-256-GCM 加密存储 | 二进制密文,解密后为 `{"ssh_key":"...","password":"..."}` |
|
||||
| `metadata` | 明文非敏感信息 | `{"ip":"192.0.2.1","desc":"Grafana","key_ref":"my-shared-key"}` |
|
||||
| `secrets.field_name` | 加密字段名(明文) | `"username"`, `"token"`, `"ssh_key"` |
|
||||
| `secrets.field_type` | 值类型(明文) | `"string"`, `"number"`, `"boolean"`, `"json"` |
|
||||
| `secrets.value_len` | 原始值字符数(明文) | `4`(root),`40`(token),`4096`(PEM) |
|
||||
| `secrets.encrypted` | 仅加密值本身 | AES-256-GCM 密文 |
|
||||
|
||||
### PEM 共享机制(key_ref)
|
||||
|
||||
同一 PEM 被多台服务器共享时,将 PEM 存为独立的 `kind=key` 记录,服务器通过 `metadata.key_ref` 引用:
|
||||
|
||||
```bash
|
||||
# 1. 存共享 PEM
|
||||
secrets add -n refining --kind key --name my-shared-key \
|
||||
--tag aliyun --tag hongkong \
|
||||
-s content=@./keys/my-shared-key.pem
|
||||
|
||||
# 2. 服务器通过 metadata.key_ref 引用(inject/run 时自动合并 key 的 secrets)
|
||||
secrets add -n refining --kind server --name i-example0xyz789 \
|
||||
-m ip=192.0.2.1 -m key_ref=my-shared-key \
|
||||
-s username=ecs-user
|
||||
|
||||
# 3. 轮换只需更新 key 记录,所有引用服务器自动生效
|
||||
secrets update -n refining --kind key --name my-shared-key \
|
||||
-s content=@./keys/new-key.pem
|
||||
```
|
||||
|
||||
## 数据库配置
|
||||
|
||||
@@ -145,7 +212,6 @@ secrets init # 提示输入主密码,Argon2id 派生主密钥后存入 OS
|
||||
- TTY(终端直接运行)→ 默认 `text`
|
||||
- 非 TTY(管道/重定向/AI 调用)→ 自动 `json-compact`
|
||||
- 显式 `-o json` → 美化 JSON
|
||||
- 显式 `-o env` → KEY=VALUE(可 source)
|
||||
|
||||
---
|
||||
|
||||
@@ -165,16 +231,16 @@ secrets init
|
||||
# 参数说明(带典型值)
|
||||
# -n / --namespace refining | ricnsmart
|
||||
# --kind server | service
|
||||
# --name gitea | i-uf63f2uookgs5uxmrdyc | mqtt
|
||||
# --name gitea | i-example0abcd1234efgh | mqtt
|
||||
# --tag aliyun | hongkong | production
|
||||
# -q / --query mqtt | grafana | gitea (模糊匹配 name/namespace/kind/tags/metadata)
|
||||
# --show-secrets 不带值的 flag,显示 encrypted 字段内容
|
||||
# -f / --field metadata.ip | metadata.url | secret.token | secret.ssh_key
|
||||
# secrets schema search 默认展示 secrets 字段名、类型与长度(无需 master_key)
|
||||
# -f / --field metadata.ip | metadata.url | metadata.default_org
|
||||
# --summary 不带值的 flag,仅返回摘要(name/tags/desc/updated_at)
|
||||
# --limit 20 | 50(默认 50)
|
||||
# --offset 0 | 10 | 20(分页偏移)
|
||||
# --sort name(默认)| updated | created
|
||||
# -o / --output text | json | json-compact | env
|
||||
# -o / --output text | json | json-compact
|
||||
|
||||
# 发现概览(起步推荐)
|
||||
secrets search --summary --limit 20
|
||||
@@ -183,21 +249,24 @@ secrets search --sort updated --limit 10 --summary
|
||||
|
||||
# 精确定位单条记录
|
||||
secrets search -n refining --kind service --name gitea
|
||||
secrets search -n refining --kind server --name i-uf63f2uookgs5uxmrdyc
|
||||
secrets search -n refining --kind server --name i-example0abcd1234efgh
|
||||
|
||||
# 精确定位并获取完整内容(含 secrets)
|
||||
secrets search -n refining --kind service --name gitea -o json --show-secrets
|
||||
# 精确定位并获取完整内容(secrets 保持加密占位)
|
||||
secrets search -n refining --kind service --name gitea -o json
|
||||
|
||||
# 直接提取字段值(最短路径,-f secret.* 自动解锁 secrets)
|
||||
secrets search -n refining --kind service --name gitea -f secret.token
|
||||
# 直接提取 metadata 字段值(最短路径)
|
||||
secrets search -n refining --kind service --name gitea -f metadata.url
|
||||
secrets search -n refining --kind service --name gitea \
|
||||
-f metadata.url -f metadata.default_org -f secret.token
|
||||
-f metadata.url -f metadata.default_org
|
||||
|
||||
# 需要 secrets 时,改用 inject / run
|
||||
secrets inject -n refining --kind service --name gitea
|
||||
secrets run -n refining --kind service --name gitea -- printenv
|
||||
|
||||
# 模糊关键词搜索
|
||||
secrets search -q mqtt
|
||||
secrets search -q grafana
|
||||
secrets search -q 47.117
|
||||
secrets search -q 192.0.2
|
||||
|
||||
# 按条件过滤
|
||||
secrets search -n refining --kind service
|
||||
@@ -211,11 +280,6 @@ secrets search -n refining --summary --limit 10 --offset 10
|
||||
|
||||
# 管道 / AI 调用(非 TTY 自动 json-compact)
|
||||
secrets search -n refining --kind service | jq '.[].name'
|
||||
secrets search -n refining --kind service --name gitea --show-secrets | jq '.secrets.token'
|
||||
|
||||
# 导出为 env 文件(单条记录)
|
||||
secrets search -n refining --kind service --name gitea -o env --show-secrets \
|
||||
> ~/.config/gitea/config.env
|
||||
```
|
||||
|
||||
---
|
||||
@@ -226,28 +290,32 @@ secrets search -n refining --kind service --name gitea -o env --show-secrets \
|
||||
# 参数说明(带典型值)
|
||||
# -n / --namespace refining | ricnsmart
|
||||
# --kind server | service
|
||||
# --name gitea | i-uf63f2uookgs5uxmrdyc
|
||||
# --name gitea | i-example0abcd1234efgh
|
||||
# --tag aliyun | hongkong(可重复)
|
||||
# -m / --meta ip=47.117.131.22 | desc="Aliyun ECS" | url=https://...(可重复)
|
||||
# -s / --secret token=<value> | ssh_key=@./key.pem | password=secret123(可重复)
|
||||
# -m / --meta ip=10.0.0.1 | desc="ECS" | url=https://... | tls:cert@./cert.pem(可重复)
|
||||
# -s / --secret token=<value> | ssh_key=@./key.pem | password=secret123 | credentials:content@./key.pem(可重复)
|
||||
|
||||
# 添加服务器
|
||||
secrets add -n refining --kind server --name i-uf63f2uookgs5uxmrdyc \
|
||||
secrets add -n refining --kind server --name i-example0abcd1234efgh \
|
||||
--tag aliyun --tag shanghai \
|
||||
-m ip=47.117.131.22 -m desc="Aliyun Shanghai ECS" \
|
||||
-s username=root -s ssh_key=@./keys/voson_shanghai_e.pem
|
||||
-m ip=10.0.0.1 -m desc="Aliyun Shanghai ECS" \
|
||||
-s username=root -s ssh_key=@./keys/deploy-key.pem
|
||||
|
||||
# 添加服务凭据
|
||||
secrets add -n refining --kind service --name gitea \
|
||||
--tag gitea \
|
||||
-m url=https://gitea.refining.dev -m default_org=refining -m username=voson \
|
||||
-m url=https://code.example.com -m default_org=refining -m username=voson \
|
||||
-s token=<token> -s runner_token=<runner_token>
|
||||
|
||||
# 从文件读取 token
|
||||
secrets add -n ricnsmart --kind service --name mqtt \
|
||||
-m host=mqtt.ricnsmart.com -m port=1883 \
|
||||
-m host=mqtt.example.com -m port=1883 \
|
||||
-s password=@./mqtt_password.txt
|
||||
|
||||
# 多行文件直接写入嵌套 secret 字段
|
||||
secrets add -n refining --kind server --name i-example0abcd1234efgh \
|
||||
-s credentials:content@./keys/deploy-key.pem
|
||||
|
||||
# 使用类型化值(key:=<json>)存储非字符串类型
|
||||
secrets add -n refining --kind service --name prometheus \
|
||||
-m scrape_interval:=15 \
|
||||
@@ -266,16 +334,16 @@ secrets add -n refining --kind service --name prometheus \
|
||||
# 参数说明(带典型值)
|
||||
# -n / --namespace refining | ricnsmart
|
||||
# --kind server | service
|
||||
# --name gitea | i-uf63f2uookgs5uxmrdyc
|
||||
# --name gitea | i-example0abcd1234efgh
|
||||
# --add-tag production | backup(不影响已有 tag,可重复)
|
||||
# --remove-tag staging | deprecated(可重复)
|
||||
# -m / --meta ip=10.0.0.1 | desc="新描述"(新增或覆盖,可重复)
|
||||
# --remove-meta old_port | legacy_key(删除 metadata 字段,可重复)
|
||||
# -s / --secret token=<new> | ssh_key=@./new.pem(新增或覆盖,可重复)
|
||||
# --remove-secret old_password | deprecated_key(删除 secret 字段,可重复)
|
||||
# -m / --meta ip=10.0.0.1 | desc="新描述" | credentials:username=root(新增或覆盖,可重复)
|
||||
# --remove-meta old_port | legacy_key | credentials:content(删除 metadata 字段,可重复)
|
||||
# -s / --secret token=<new> | ssh_key=@./new.pem | credentials:content@./new.pem(新增或覆盖,可重复)
|
||||
# --remove-secret old_password | deprecated_key | credentials:content(删除 secret 字段,可重复)
|
||||
|
||||
# 更新单个 metadata 字段
|
||||
secrets update -n refining --kind server --name i-uf63f2uookgs5uxmrdyc \
|
||||
secrets update -n refining --kind server --name i-example0abcd1234efgh \
|
||||
-m ip=10.0.0.1
|
||||
|
||||
# 轮换 token
|
||||
@@ -291,25 +359,48 @@ secrets update -n refining --kind service --name gitea \
|
||||
secrets update -n refining --kind service --name mqtt \
|
||||
--remove-meta old_port --remove-secret old_password
|
||||
|
||||
# 从文件更新嵌套 secret 字段
|
||||
secrets update -n refining --kind server --name i-example0abcd1234efgh \
|
||||
-s credentials:content@./keys/deploy-key.pem
|
||||
|
||||
# 删除嵌套字段
|
||||
secrets update -n refining --kind server --name i-example0abcd1234efgh \
|
||||
--remove-secret credentials:content
|
||||
|
||||
# 移除 tag
|
||||
secrets update -n refining --kind service --name gitea --remove-tag staging
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
### delete — 删除记录
|
||||
### delete — 删除记录(支持单条精确删除与批量删除)
|
||||
|
||||
删除时会自动将 entry 与所有关联 secret 字段快照到历史表,并写入审计日志,可通过 `rollback` 命令恢复。
|
||||
|
||||
```bash
|
||||
# 参数说明(带典型值)
|
||||
# -n / --namespace refining | ricnsmart
|
||||
# --kind server | service
|
||||
# --name gitea | i-uf63f2uookgs5uxmrdyc(必须精确匹配)
|
||||
# -n / --namespace refining | ricnsmart(必填)
|
||||
# --kind server | service(指定 --name 时必填;批量时可选)
|
||||
# --name gitea | i-example0abcd1234efgh(精确匹配;省略则批量删除)
|
||||
# --dry-run 预览将删除的记录,不实际写入(仅批量模式有效)
|
||||
# -o / --output text | json | json-compact
|
||||
|
||||
# 删除服务凭据
|
||||
# 精确删除单条记录(--kind 必填)
|
||||
secrets delete -n refining --kind service --name legacy-mqtt
|
||||
|
||||
# 删除服务器记录
|
||||
secrets delete -n ricnsmart --kind server --name i-old-server-id
|
||||
|
||||
# 预览批量删除(不写入数据库)
|
||||
secrets delete -n refining --dry-run
|
||||
secrets delete -n ricnsmart --kind server --dry-run
|
||||
|
||||
# 批量删除整个 namespace 的所有记录
|
||||
secrets delete -n ricnsmart
|
||||
|
||||
# 批量删除 namespace 下指定 kind 的所有记录
|
||||
secrets delete -n ricnsmart --kind server
|
||||
|
||||
# JSON 输出
|
||||
secrets delete -n refining --kind service -o json
|
||||
```
|
||||
|
||||
---
|
||||
@@ -408,7 +499,9 @@ secrets run -n refining --kind service --name gitea -- printenv
|
||||
|
||||
### upgrade — 自动更新 CLI 二进制
|
||||
|
||||
从 Gitea Release 下载最新版本,校验对应 `.sha256` 摘要后替换当前二进制,无需数据库连接或主密钥。
|
||||
从 Release 服务器下载最新版本,校验对应 `.sha256` 摘要后替换当前二进制,无需数据库连接或主密钥。
|
||||
|
||||
**配置方式**:`SECRETS_UPGRADE_URL` 必填。优先用**构建时**:`SECRETS_UPGRADE_URL=https://... cargo build`,CI 已自动注入。或**运行时**:写在 `.env` 或 `export` 后执行。
|
||||
|
||||
```bash
|
||||
# 检查是否有新版本(不下载)
|
||||
@@ -420,6 +513,75 @@ secrets upgrade
|
||||
|
||||
---
|
||||
|
||||
### export — 批量导出记录
|
||||
|
||||
将匹配的记录(含解密后的明文 secrets)导出到文件或 stdout。支持 JSON、TOML、YAML 三种格式,文件格式由扩展名自动推断。使用 `--no-secrets` 时无需主密钥。
|
||||
|
||||
```bash
|
||||
# 参数说明
|
||||
# -n / --namespace refining | ricnsmart
|
||||
# --kind server | service
|
||||
# --name gitea | i-example0abcd1234efgh
|
||||
# --tag aliyun | production(可重复)
|
||||
# -q / --query 模糊关键词
|
||||
# --file <path> 输出文件路径,格式由扩展名推断(.json / .toml / .yaml / .yml)
|
||||
# --format json | toml | yaml 显式指定格式(输出到 stdout 时必须指定)
|
||||
# --no-secrets 不导出 secrets,无需主密钥
|
||||
|
||||
# 全量导出到 JSON 文件
|
||||
secrets export --file backup.json
|
||||
|
||||
# 按 namespace 导出为 TOML
|
||||
secrets export -n refining --file refining.toml
|
||||
|
||||
# 按 kind 导出为 YAML
|
||||
secrets export -n refining --kind service --file services.yaml
|
||||
|
||||
# 按 tag 过滤导出
|
||||
secrets export --tag production --file prod.json
|
||||
|
||||
# 模糊关键词导出
|
||||
secrets export -q mqtt --file mqtt.json
|
||||
|
||||
# 仅导出 schema(不含 secrets,无需主密钥)
|
||||
secrets export --no-secrets --file schema.json
|
||||
|
||||
# 输出到 stdout(必须指定 --format)
|
||||
secrets export -n refining --format yaml
|
||||
secrets export --format json | jq '.'
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
### import — 批量导入记录
|
||||
|
||||
从导出文件读取记录并写入数据库,自动重新加密 secrets。支持 JSON、TOML、YAML 三种格式,文件格式由扩展名自动推断。
|
||||
|
||||
```bash
|
||||
# 参数说明
|
||||
# <file> 必选,输入文件路径(格式由扩展名推断)
|
||||
# --force 冲突时覆盖已有记录(默认:报错并停止)
|
||||
# --dry-run 预览将执行的操作,不写入数据库
|
||||
# -o / --output text | json | json-compact
|
||||
|
||||
# 导入 JSON 文件(遇到已存在记录报错)
|
||||
secrets import backup.json
|
||||
|
||||
# 导入 TOML 文件,冲突时覆盖
|
||||
secrets import --force refining.toml
|
||||
|
||||
# 导入 YAML 文件,冲突时覆盖
|
||||
secrets import --force services.yaml
|
||||
|
||||
# 预览将执行的操作(不写入)
|
||||
secrets import --dry-run backup.json
|
||||
|
||||
# JSON 格式输出导入摘要
|
||||
secrets import backup.json -o json
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
### config — 配置管理(无需主密钥)
|
||||
|
||||
```bash
|
||||
@@ -458,7 +620,7 @@ secrets --db-url "postgres://..." search -n refining
|
||||
- 新增 `kind` 类型时:只需在 `add` 调用时传入,无需改代码
|
||||
- 字段命名:CLI 短标志 `-n`=namespace,`-m`=meta,`-s`=secret,`-q`=query,`-v`=verbose,`-f`=field,`-o`=output
|
||||
- 日志:用户可见输出用 `println!`;调试/运维信息用 `tracing::debug!`/`info!`/`warn!`/`error!`
|
||||
- 审计:`add`/`update`/`delete` 成功后调用 `audit::log()`,写入 `audit_log` 表;失败只 warn 不中断
|
||||
- 审计:`add`/`update`/`delete` 成功后调用 `audit::log_tx`,写入 `audit_log` 表;失败只 warn 不中断
|
||||
- 加密:`encrypted` 列存储 AES-256-GCM 密文;`add`/`update`/`search`/`delete` 需主密钥(`secrets init` 后从 OS 钥匙串加载)
|
||||
- 输出:读命令通过 `OutputMode` 支持 text/json/json-compact/env;写命令 `add` 同样支持 `-o json`
|
||||
|
||||
@@ -466,6 +628,14 @@ secrets --db-url "postgres://..." search -n refining
|
||||
|
||||
每次提交代码前,请在本地依次执行以下检查,**全部通过后再 push**:
|
||||
|
||||
优先使用:
|
||||
|
||||
```bash
|
||||
./scripts/release-check.sh
|
||||
```
|
||||
|
||||
它等价于先检查版本号 / tag,再执行下面的格式、Lint、测试。
|
||||
|
||||
### 1. 版本号(按需)
|
||||
|
||||
若本次改动需要发版,请先确认 `Cargo.toml` 中的 `version` 已提升,避免 CI 打出的 Tag 与已有版本重复。**升级版本后需同时更新 `Cargo.lock`**(运行 `cargo build` 即可自动同步),否则 CI 中 `cargo clippy --locked` 会因 lock 与 manifest 不一致而失败。可通过 git tag 判断:
|
||||
@@ -511,5 +681,6 @@ cargo fmt -- --check && cargo clippy -- -D warnings && cargo test
|
||||
|------|------|
|
||||
| `RUST_LOG` | 日志级别,如 `secrets=debug`、`secrets=trace`(默认 warn) |
|
||||
| `USER` | 审计日志 actor 字段来源,Shell 自动设置,通常无需手动配置 |
|
||||
| `SECRETS_UPGRADE_URL` | upgrade 的 Release API 地址。构建时(cargo build)或运行时(.env/export) |
|
||||
|
||||
数据库连接通过 `secrets config set-db` 持久化到 `~/.config/secrets/config.toml`,不支持环境变量。
|
||||
|
||||
24
Cargo.lock
generated
24
Cargo.lock
generated
@@ -1836,7 +1836,7 @@ checksum = "94143f37725109f92c262ed2cf5e59bce7498c01bcc1502d7b9afe439a4e9f49"
|
||||
|
||||
[[package]]
|
||||
name = "secrets"
|
||||
version = "0.7.0"
|
||||
version = "0.9.3"
|
||||
dependencies = [
|
||||
"aes-gcm",
|
||||
"anyhow",
|
||||
@@ -1844,6 +1844,7 @@ dependencies = [
|
||||
"chrono",
|
||||
"clap",
|
||||
"dirs",
|
||||
"dotenvy",
|
||||
"flate2",
|
||||
"keyring",
|
||||
"rand 0.10.0",
|
||||
@@ -1853,6 +1854,7 @@ dependencies = [
|
||||
"semver",
|
||||
"serde",
|
||||
"serde_json",
|
||||
"serde_yaml",
|
||||
"sha2",
|
||||
"sqlx",
|
||||
"tar",
|
||||
@@ -1982,6 +1984,19 @@ dependencies = [
|
||||
"serde",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "serde_yaml"
|
||||
version = "0.9.34+deprecated"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "6a8b1a1a2ebf674015cc02edccce75287f1a0130d394307b36743c2f5d504b47"
|
||||
dependencies = [
|
||||
"indexmap",
|
||||
"itoa",
|
||||
"ryu",
|
||||
"serde",
|
||||
"unsafe-libyaml",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "sha1"
|
||||
version = "0.10.6"
|
||||
@@ -2434,7 +2449,6 @@ dependencies = [
|
||||
"bytes",
|
||||
"libc",
|
||||
"mio",
|
||||
"parking_lot",
|
||||
"pin-project-lite",
|
||||
"signal-hook-registry",
|
||||
"socket2",
|
||||
@@ -2681,6 +2695,12 @@ dependencies = [
|
||||
"subtle",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "unsafe-libyaml"
|
||||
version = "0.2.11"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "673aac59facbab8a9007c7f6108d11f63b603f7cabff99fabf650fea5c32b861"
|
||||
|
||||
[[package]]
|
||||
name = "untrusted"
|
||||
version = "0.9.0"
|
||||
|
||||
54
Cargo.toml
54
Cargo.toml
@@ -1,31 +1,33 @@
|
||||
[package]
|
||||
name = "secrets"
|
||||
version = "0.7.0"
|
||||
version = "0.9.3"
|
||||
edition = "2024"
|
||||
|
||||
[dependencies]
|
||||
aes-gcm = "0.10.3"
|
||||
anyhow = "1.0.102"
|
||||
argon2 = { version = "0.5.3", features = ["std"] }
|
||||
chrono = { version = "0.4.44", features = ["serde"] }
|
||||
clap = { version = "4.6.0", features = ["derive"] }
|
||||
dirs = "6.0.0"
|
||||
flate2 = "1.1.9"
|
||||
keyring = { version = "3.6.3", features = ["apple-native", "windows-native", "linux-native"] }
|
||||
rand = "0.10.0"
|
||||
reqwest = { version = "0.12", default-features = false, features = ["rustls-tls", "json"] }
|
||||
rpassword = "7.4.0"
|
||||
self-replace = "1.5.0"
|
||||
semver = "1.0.27"
|
||||
serde = { version = "1.0.228", features = ["derive"] }
|
||||
serde_json = "1.0.149"
|
||||
sha2 = "0.10.9"
|
||||
sqlx = { version = "0.8.6", features = ["runtime-tokio", "tls-rustls", "postgres", "uuid", "json", "chrono"] }
|
||||
tar = "0.4.44"
|
||||
tempfile = "3.19"
|
||||
tokio = { version = "1.50.0", features = ["full"] }
|
||||
toml = "1.0.7"
|
||||
tracing = "0.1"
|
||||
tracing-subscriber = { version = "0.3", features = ["env-filter"] }
|
||||
uuid = { version = "1.22.0", features = ["serde"] }
|
||||
zip = { version = "8.2.0", default-features = false, features = ["deflate"] }
|
||||
aes-gcm = "^0.10.3"
|
||||
anyhow = "^1.0.102"
|
||||
argon2 = { version = "^0.5.3", features = ["std"] }
|
||||
chrono = { version = "^0.4.44", features = ["serde"] }
|
||||
clap = { version = "^4.6.0", features = ["derive"] }
|
||||
dirs = "^6.0.0"
|
||||
dotenvy = "^0.15"
|
||||
flate2 = "^1.1.9"
|
||||
keyring = { version = "^3.6.3", features = ["apple-native", "windows-native", "linux-native"] }
|
||||
rand = "^0.10.0"
|
||||
reqwest = { version = "^0.12", default-features = false, features = ["rustls-tls", "json"] }
|
||||
rpassword = "^7.4.0"
|
||||
self-replace = "^1.5.0"
|
||||
semver = "^1.0.27"
|
||||
serde = { version = "^1.0.228", features = ["derive"] }
|
||||
serde_json = "^1.0.149"
|
||||
serde_yaml = "^0.9"
|
||||
sha2 = "^0.10.9"
|
||||
sqlx = { version = "^0.8.6", features = ["runtime-tokio", "tls-rustls", "postgres", "uuid", "json", "chrono"] }
|
||||
tar = "^0.4.44"
|
||||
tempfile = "^3.19"
|
||||
tokio = { version = "^1.50.0", features = ["rt-multi-thread", "macros", "fs", "io-util", "process", "signal"] }
|
||||
toml = "^1.0.7"
|
||||
tracing = "^0.1"
|
||||
tracing-subscriber = { version = "^0.3", features = ["env-filter"] }
|
||||
uuid = { version = "^1.22.0", features = ["serde"] }
|
||||
zip = { version = "^8.2.0", default-features = false, features = ["deflate"] }
|
||||
|
||||
201
README.md
201
README.md
@@ -2,7 +2,7 @@
|
||||
|
||||
跨设备密钥与配置管理 CLI,基于 Rust + PostgreSQL 18。
|
||||
|
||||
将服务器信息、服务凭据统一存入数据库,供本地工具和 AI 读取上下文。敏感数据(`encrypted` 字段)使用 AES-256-GCM 加密存储,主密钥由 Argon2id 从主密码派生并存入系统钥匙串。
|
||||
将服务器信息、服务凭据统一存入数据库,供本地工具和 AI 读取上下文。每个敏感字段单独行存储(`secrets` 子表),字段名、类型、长度以明文保存便于 AI 理解,仅值本身使用 AES-256-GCM 加密;主密钥由 Argon2id 从主密码派生并存入系统钥匙串。
|
||||
|
||||
## 安装
|
||||
|
||||
@@ -19,11 +19,11 @@ cargo build --release
|
||||
# 1. 配置数据库连接(会先验证连接可用再写入)
|
||||
secrets config set-db "postgres://postgres:<password>@<host>:<port>/secrets"
|
||||
|
||||
# 2. 初始化主密钥(提示输入主密码,派生后存入 OS 钥匙串)
|
||||
# 2. 初始化主密钥(提示输入至少 8 位的主密码,派生后存入 OS 钥匙串)
|
||||
secrets init
|
||||
```
|
||||
|
||||
主密码不会存储,仅用于派生主密钥。同一主密码在所有设备上会得到相同主密钥(salt 存于数据库,首台设备生成后共享)。
|
||||
主密码不会存储,仅用于派生主密钥,且至少需 8 位。同一主密码在所有设备上会得到相同主密钥(salt 存于数据库,首台设备生成后共享)。
|
||||
|
||||
**主密钥存储**:macOS → Keychain;Windows → Credential Manager;Linux → keyutils(会话级,重启后需再次 `secrets init`)。
|
||||
|
||||
@@ -54,37 +54,41 @@ secrets search --sort updated --limit 10 --summary
|
||||
# 精确定位(namespace + kind + name 三元组)
|
||||
secrets search -n refining --kind service --name gitea
|
||||
|
||||
# 获取完整记录含 secrets(JSON 格式,AI 最易解析)
|
||||
secrets search -n refining --kind service --name gitea -o json --show-secrets
|
||||
# 获取完整记录(含 secrets 字段 schema:field_name、field_type、value_len,无需 master_key)
|
||||
secrets search -n refining --kind service --name gitea -o json
|
||||
|
||||
# 直接提取单个字段值(最短路径)
|
||||
secrets search -n refining --kind service --name gitea -f secret.token
|
||||
# 直接提取单个 metadata 字段值(最短路径)
|
||||
secrets search -n refining --kind service --name gitea -f metadata.url
|
||||
|
||||
# 同时提取多个字段
|
||||
# 同时提取多个 metadata 字段
|
||||
secrets search -n refining --kind service --name gitea \
|
||||
-f metadata.url -f metadata.default_org -f secret.token
|
||||
-f metadata.url -f metadata.default_org
|
||||
|
||||
# 需要 secrets 时,改用 inject / run
|
||||
secrets inject -n refining --kind service --name gitea
|
||||
secrets run -n refining --kind service --name gitea -- printenv
|
||||
```
|
||||
|
||||
`-f secret.*` 会自动解锁 secrets,无需额外加 `--show-secrets`。
|
||||
`search` 展示 metadata 与 secrets 的字段 schema(字段名、类型、长度),不展示 secret 值本身;需要值时用 `inject` / `run`。
|
||||
|
||||
### 输出格式
|
||||
|
||||
| 场景 | 推荐命令 |
|
||||
|------|----------|
|
||||
| AI 解析 / 管道处理 | `-o json` 或 `-o json-compact` |
|
||||
| 写入 `.env` 文件 | `-o env --show-secrets` |
|
||||
| 注入 secrets 到环境变量 | `inject` / `run` |
|
||||
| 人类查看 | 默认 `text`(TTY 下自动启用) |
|
||||
| 非 TTY(管道/重定向) | 自动 `json-compact` |
|
||||
|
||||
说明:`text` 输出中的时间会按当前机器本地时区显示;`json/json-compact` 继续使用 UTC(RFC3339 风格)以便脚本和 AI 稳定解析。
|
||||
|
||||
```bash
|
||||
# 管道直接 jq 解析(非 TTY 自动 json-compact)
|
||||
secrets search -n refining --kind service | jq '.[].name'
|
||||
secrets search -n refining --kind service --name gitea --show-secrets | jq '.secrets.token'
|
||||
|
||||
# 导出为可 source 的 env 文件(单条记录)
|
||||
secrets search -n refining --kind service --name gitea -o env --show-secrets \
|
||||
> ~/.config/gitea/config.env
|
||||
# 需要 secrets 时,使用 inject / run
|
||||
secrets inject -n refining --kind service --name gitea > ~/.config/gitea/secrets.env
|
||||
secrets run -n refining --kind service --name gitea -- ./deploy.sh
|
||||
```
|
||||
|
||||
## 完整命令参考
|
||||
@@ -99,6 +103,8 @@ secrets update --help
|
||||
secrets delete --help
|
||||
secrets config --help
|
||||
secrets upgrade --help # 检查并更新 CLI 版本
|
||||
secrets export --help # 批量导出(JSON/TOML/YAML)
|
||||
secrets import --help # 批量导入(JSON/TOML/YAML)
|
||||
|
||||
# ── search ──────────────────────────────────────────────────────────────────
|
||||
secrets search --summary --limit 20 # 发现概览
|
||||
@@ -106,32 +112,47 @@ secrets search -n refining --kind service # 按 namespace + kin
|
||||
secrets search -n refining --kind service --name gitea # 精确查找
|
||||
secrets search -q mqtt # 关键词模糊搜索
|
||||
secrets search --tag hongkong # 按 tag 过滤
|
||||
secrets search -n refining --kind service --name gitea -f secret.token # 提取字段
|
||||
secrets search -n refining --kind service --name gitea -o json --show-secrets # 完整 JSON
|
||||
secrets search -n refining --kind service --name gitea -f metadata.url # 提取 metadata 字段
|
||||
secrets search -n refining --kind service --name gitea -o json # 完整记录(含 secrets schema)
|
||||
secrets search --sort updated --limit 10 --summary # 最近改动
|
||||
secrets search -n refining --summary --limit 10 --offset 10 # 翻页
|
||||
|
||||
# ── add ──────────────────────────────────────────────────────────────────────
|
||||
secrets add -n refining --kind server --name my-server \
|
||||
--tag aliyun --tag shanghai \
|
||||
-m ip=47.117.131.22 -m desc="Aliyun Shanghai ECS" \
|
||||
-m ip=10.0.0.1 -m desc="Example ECS" \
|
||||
-s username=root -s ssh_key=@./keys/server.pem
|
||||
|
||||
# 多行文件直接写入嵌套 secret 字段
|
||||
secrets add -n refining --kind server --name my-server \
|
||||
-s credentials:content@./keys/server.pem
|
||||
|
||||
# 使用 typed JSON 写入 secret(布尔、数字、数组、对象)
|
||||
secrets add -n refining --kind service --name deploy-bot \
|
||||
-s enabled:=true \
|
||||
-s retry_count:=3 \
|
||||
-s scopes:='["repo","workflow"]' \
|
||||
-s extra:='{"region":"ap-east-1","verify_tls":true}'
|
||||
|
||||
secrets add -n refining --kind service --name gitea \
|
||||
--tag gitea \
|
||||
-m url=https://gitea.refining.dev -m default_org=refining \
|
||||
-m url=https://code.example.com -m default_org=myorg \
|
||||
-s token=<token>
|
||||
|
||||
# ── update ───────────────────────────────────────────────────────────────────
|
||||
secrets update -n refining --kind server --name my-server -m ip=10.0.0.1
|
||||
secrets update -n refining --kind service --name gitea --add-tag production -s token=<new>
|
||||
secrets update -n refining --kind service --name mqtt --remove-meta old_port --remove-secret old_key
|
||||
secrets update -n refining --kind server --name my-server --remove-secret credentials:content
|
||||
|
||||
# ── delete ───────────────────────────────────────────────────────────────────
|
||||
secrets delete -n refining --kind service --name legacy-mqtt
|
||||
secrets delete -n refining --kind service --name legacy-mqtt # 精确删除单条(--kind 必填)
|
||||
secrets delete -n refining --dry-run # 预览批量删除(不写入)
|
||||
secrets delete -n ricnsmart # 批量删除整个 namespace
|
||||
secrets delete -n ricnsmart --kind server # 批量删除指定 kind
|
||||
|
||||
# ── init ─────────────────────────────────────────────────────────────────────
|
||||
secrets init # 主密钥初始化(每台设备一次,主密码派生后存钥匙串)
|
||||
secrets init # 主密钥初始化(每台设备一次,主密码至少 8 位,派生后存钥匙串)
|
||||
|
||||
# ── config ───────────────────────────────────────────────────────────────────
|
||||
secrets config set-db "postgres://postgres:<password>@<host>:<port>/secrets" # 先验证再写入
|
||||
@@ -140,7 +161,21 @@ secrets config path # 打印配置文件路径
|
||||
|
||||
# ── upgrade ──────────────────────────────────────────────────────────────────
|
||||
secrets upgrade --check # 仅检查是否有新版本
|
||||
secrets upgrade # 下载、校验 SHA-256 并安装最新版(从 Gitea Release)
|
||||
secrets upgrade # 下载、校验 SHA-256 并安装最新版(可通过 SECRETS_UPGRADE_URL 自托管)
|
||||
|
||||
# ── export ────────────────────────────────────────────────────────────────────
|
||||
secrets export --file backup.json # 全量导出到 JSON
|
||||
secrets export -n refining --file refining.toml # 按 namespace 导出为 TOML
|
||||
secrets export -n refining --kind service --file svc.yaml # 按 kind 导出为 YAML
|
||||
secrets export --tag production --file prod.json # 按 tag 过滤
|
||||
secrets export -q mqtt --file mqtt.json # 模糊搜索导出
|
||||
secrets export --no-secrets --file schema.json # 仅导出 schema(无需主密钥)
|
||||
secrets export -n refining --format yaml # 输出到 stdout,指定格式
|
||||
|
||||
# ── import ────────────────────────────────────────────────────────────────────
|
||||
secrets import backup.json # 导入(冲突时报错)
|
||||
secrets import --force refining.toml # 冲突时覆盖已有记录
|
||||
secrets import --dry-run backup.yaml # 预览将要执行的操作(不写入)
|
||||
|
||||
# ── 调试 ──────────────────────────────────────────────────────────────────────
|
||||
secrets --verbose search -q mqtt
|
||||
@@ -149,18 +184,104 @@ RUST_LOG=secrets=trace secrets search
|
||||
|
||||
## 数据模型
|
||||
|
||||
单张 `secrets` 表,首次连接自动建表;同时自动创建 `audit_log` 表,记录所有写操作。
|
||||
主表 `entries`(namespace、kind、name、tags、metadata)+ 子表 `secrets`(每个加密字段一行,含 field_name、field_type、value_len、encrypted)。首次连接自动建表;同时创建 `audit_log`、`entries_history`、`secrets_history` 等表。
|
||||
|
||||
| 字段 | 说明 |
|
||||
|------|------|
|
||||
| `namespace` | 一级隔离,如 `refining`、`ricnsmart` |
|
||||
| `kind` | 记录类型,如 `server`、`service`(可自由扩展) |
|
||||
| `name` | 人类可读唯一标识 |
|
||||
| `tags` | 多维标签,如 `["aliyun","hongkong"]` |
|
||||
| `metadata` | 明文描述信息(ip、desc、domains 等) |
|
||||
| `encrypted` | 敏感凭据(ssh_key、password、token 等),AES-256-GCM 加密存储 |
|
||||
| 位置 | 字段 | 说明 |
|
||||
|------|------|------|
|
||||
| entries | namespace | 一级隔离,如 `refining`、`ricnsmart` |
|
||||
| entries | kind | 记录类型,如 `server`、`service`、`key`(可自由扩展) |
|
||||
| entries | name | 人类可读唯一标识 |
|
||||
| entries | tags | 多维标签,如 `["aliyun","hongkong"]` |
|
||||
| entries | metadata | 明文描述(ip、desc、domains、key_ref 等) |
|
||||
| secrets | field_name / field_type / value_len | 明文,search 可见,AI 可推断 inject 会生成什么变量 |
|
||||
| secrets | encrypted | 仅加密值本身,AES-256-GCM |
|
||||
|
||||
`-m` / `--meta` 写入 `metadata`,`-s` / `--secret` 写入 `encrypted`,`value=@file` 从文件读取内容。加解密使用主密钥(由 `secrets init` 设置)。
|
||||
`-m` / `--meta` 写入 `metadata`,`-s` / `--secret` 写入 `secrets` 表的独立行。支持 `key=value`、`key=@file`、`key:=<json>`,也支持 `credentials:content@./key.pem` 这类嵌套字段文件写入;删除时支持 `--remove-secret credentials:content`。加解密使用主密钥(由 `secrets init` 设置)。
|
||||
|
||||
**PEM 共享**:同一 PEM 被多台服务器共享时,可存为 `kind=key` 记录,服务器通过 `metadata.key_ref` 引用;轮换只需 update 一条 key 记录,所有引用自动生效。详见 [AGENTS.md](AGENTS.md)。
|
||||
|
||||
### `-m` / `--meta` JSON 语法速查
|
||||
|
||||
`-m` 和 `-s` 走的是同一套解析规则,只是写入位置不同:`-m` 写到明文 `metadata`,适合端口、开关、标签、描述性配置等非敏感信息。
|
||||
|
||||
| 目标值 | 写法示例 | 实际存入 |
|
||||
|------|------|------|
|
||||
| 普通字符串 | `-m url=https://code.example.com` | `"https://code.example.com"` |
|
||||
| 文件内容字符串 | `-m notes=@./service-notes.txt` | `"..."` |
|
||||
| 布尔值 | `-m enabled:=true` | `true` |
|
||||
| 数字 | `-m port:=3000` | `3000` |
|
||||
| `null` | `-m deprecated_at:=null` | `null` |
|
||||
| 数组 | `-m domains:='["code.example.com","git.example.com"]'` | `["code.example.com","git.example.com"]` |
|
||||
| 对象 | `-m tls:='{"enabled":true,"redirect_http":true}'` | `{"enabled":true,"redirect_http":true}` |
|
||||
| 嵌套路径 + JSON | `-m deploy:strategy:='{"type":"rolling","batch":2}'` | `{"deploy":{"strategy":{"type":"rolling","batch":2}}}` |
|
||||
|
||||
常见规则:
|
||||
|
||||
- `=` 表示按字符串存储。
|
||||
- `:=` 表示按 JSON 解析。
|
||||
- shell 中数组和对象建议整体用单引号包住。
|
||||
- 嵌套字段继续用冒号分隔:`-m runtime:max_open_conns:=20`。
|
||||
|
||||
示例:新增一条带 typed metadata 的记录
|
||||
|
||||
```bash
|
||||
secrets add -n refining --kind service --name gitea \
|
||||
-m url=https://code.example.com \
|
||||
-m port:=3000 \
|
||||
-m enabled:=true \
|
||||
-m domains:='["code.example.com","git.example.com"]' \
|
||||
-m tls:='{"enabled":true,"redirect_http":true}'
|
||||
```
|
||||
|
||||
示例:更新已有记录中的嵌套 metadata
|
||||
|
||||
```bash
|
||||
secrets update -n refining --kind service --name gitea \
|
||||
-m deploy:strategy:='{"type":"rolling","batch":2}' \
|
||||
-m runtime:max_open_conns:=20
|
||||
```
|
||||
|
||||
### `-s` / `--secret` JSON 语法速查
|
||||
|
||||
当你希望写入的不是普通字符串,而是 `true`、`123`、`null`、数组或对象时,用 `:=`,右侧按 JSON 解析。
|
||||
|
||||
| 目标值 | 写法示例 | 实际存入 |
|
||||
|------|------|------|
|
||||
| 普通字符串 | `-s token=abc123` | `"abc123"` |
|
||||
| 文件内容字符串 | `-s ssh_key=@./id_ed25519` | `"-----BEGIN ..."` |
|
||||
| 布尔值 | `-s enabled:=true` | `true` |
|
||||
| 数字 | `-s retry_count:=3` | `3` |
|
||||
| `null` | `-s deprecated_at:=null` | `null` |
|
||||
| 数组 | `-s scopes:='["repo","workflow"]'` | `["repo","workflow"]` |
|
||||
| 对象 | `-s extra:='{"region":"ap-east-1","verify_tls":true}'` | `{"region":"ap-east-1","verify_tls":true}` |
|
||||
| 嵌套路径 + JSON | `-s auth:policy:='{"mfa":true,"ttl":3600}'` | `{"auth":{"policy":{"mfa":true,"ttl":3600}}}` |
|
||||
|
||||
常见规则:
|
||||
|
||||
- `=` 表示按字符串存储,不做 JSON 解析。
|
||||
- `:=` 表示按 JSON 解析,适合布尔、数字、数组、对象、`null`。
|
||||
- shell 里对象和数组通常要整体加引号,推荐单引号:`-s flags:='["a","b"]'`。
|
||||
- 嵌套字段继续用冒号分隔:`-s credentials:enabled:=true`。
|
||||
- 如果你就是想存一个“JSON 字符串字面量”,可以写成 `-s note:='"hello"'`,但大多数字符串场景直接用 `=` 更直观。
|
||||
|
||||
示例:新增一条同时包含字符串、文件、布尔、数组、对象的记录
|
||||
|
||||
```bash
|
||||
secrets add -n refining --kind service --name deploy-bot \
|
||||
-s token=abc123 \
|
||||
-s ssh_key=@./keys/deploy-bot.pem \
|
||||
-s enabled:=true \
|
||||
-s scopes:='["repo","workflow"]' \
|
||||
-s policy:='{"ttl":3600,"mfa":true}'
|
||||
```
|
||||
|
||||
示例:更新已有记录中的嵌套 JSON 字段
|
||||
|
||||
```bash
|
||||
secrets update -n refining --kind service --name deploy-bot \
|
||||
-s auth:config:='{"issuer":"gitea","rotate":true}' \
|
||||
-s auth:retry:=5
|
||||
```
|
||||
|
||||
## 审计日志
|
||||
|
||||
@@ -181,18 +302,22 @@ src/
|
||||
main.rs # CLI 入口(clap),含各子命令 after_help 示例
|
||||
output.rs # OutputMode 枚举 + TTY 检测
|
||||
config.rs # 配置读写(~/.config/secrets/config.toml)
|
||||
db.rs # 连接池 + auto-migrate(secrets + audit_log + kv_config)
|
||||
db.rs # 连接池 + auto-migrate(entries + secrets + entries_history + secrets_history + audit_log + kv_config)
|
||||
crypto.rs # AES-256-GCM 加解密、Argon2id 派生、OS 钥匙串
|
||||
models.rs # Secret 结构体
|
||||
models.rs # Entry + SecretField 结构体
|
||||
audit.rs # 审计日志写入(audit_log 表)
|
||||
commands/
|
||||
init.rs # 主密钥初始化(首次/新设备)
|
||||
add.rs # upsert,支持 -o json
|
||||
add.rs # upsert entries + secrets 行,支持 -o json
|
||||
config.rs # config set-db/show/path
|
||||
search.rs # 多条件查询,支持 -f/-o/--summary/--limit/--offset/--sort
|
||||
delete.rs # 删除
|
||||
update.rs # 增量更新(合并 tags/metadata/encrypted)
|
||||
search.rs # 多条件查询,展示 secrets schema(-f/-o/--summary/--limit/--offset/--sort)
|
||||
delete.rs # 删除(CASCADE 删除 secrets)
|
||||
update.rs # 增量更新(tags/metadata + secrets 行级 UPSERT/DELETE)
|
||||
rollback.rs # rollback / history:按 entry_version 恢复
|
||||
run.rs # inject / run,逐字段解密 + key_ref 引用解析
|
||||
upgrade.rs # 从 Gitea Release 自更新
|
||||
export_cmd.rs # export:批量导出,支持 JSON/TOML/YAML,含解密明文
|
||||
import_cmd.rs # import:批量导入,冲突检测,dry-run,重新加密写入
|
||||
scripts/
|
||||
setup-gitea-actions.sh # 配置 Gitea Actions 变量与 Secrets
|
||||
```
|
||||
|
||||
23
scripts/release-check.sh
Executable file
23
scripts/release-check.sh
Executable file
@@ -0,0 +1,23 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
repo_root="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)"
|
||||
cd "$repo_root"
|
||||
|
||||
version="$(grep -m1 '^version' Cargo.toml | sed 's/.*"\(.*\)".*/\1/')"
|
||||
tag="secrets-${version}"
|
||||
|
||||
echo "==> 当前版本: ${version}"
|
||||
echo "==> 检查是否已存在 tag: ${tag}"
|
||||
|
||||
if git rev-parse "refs/tags/${tag}" >/dev/null 2>&1; then
|
||||
echo "错误: 已存在 tag ${tag}"
|
||||
echo "请先 bump Cargo.toml 中的 version,再执行 cargo build 同步 Cargo.lock。"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "==> 未发现重复 tag,开始执行检查"
|
||||
cargo fmt -- --check
|
||||
cargo clippy --locked -- -D warnings
|
||||
cargo test --locked
|
||||
41
src/audit.rs
41
src/audit.rs
@@ -1,5 +1,10 @@
|
||||
use serde_json::Value;
|
||||
use sqlx::{PgPool, Postgres, Transaction};
|
||||
use sqlx::{Postgres, Transaction};
|
||||
|
||||
/// Return the current OS user as the audit actor (falls back to empty string).
|
||||
pub fn current_actor() -> String {
|
||||
std::env::var("USER").unwrap_or_default()
|
||||
}
|
||||
|
||||
/// Write an audit entry within an existing transaction.
|
||||
pub async fn log_tx(
|
||||
@@ -10,7 +15,7 @@ pub async fn log_tx(
|
||||
name: &str,
|
||||
detail: Value,
|
||||
) {
|
||||
let actor = std::env::var("USER").unwrap_or_default();
|
||||
let actor = current_actor();
|
||||
let result: Result<_, sqlx::Error> = sqlx::query(
|
||||
"INSERT INTO audit_log (action, namespace, kind, name, detail, actor) \
|
||||
VALUES ($1, $2, $3, $4, $5, $6)",
|
||||
@@ -30,35 +35,3 @@ pub async fn log_tx(
|
||||
tracing::debug!(action, namespace, kind, name, actor, "audit logged");
|
||||
}
|
||||
}
|
||||
|
||||
/// Write an audit entry using the pool (fire-and-forget, non-fatal).
|
||||
/// Kept for future use or scenarios without an active transaction.
|
||||
#[allow(dead_code)]
|
||||
pub async fn log(
|
||||
pool: &PgPool,
|
||||
action: &str,
|
||||
namespace: &str,
|
||||
kind: &str,
|
||||
name: &str,
|
||||
detail: Value,
|
||||
) {
|
||||
let actor = std::env::var("USER").unwrap_or_default();
|
||||
let result: Result<_, sqlx::Error> = sqlx::query(
|
||||
"INSERT INTO audit_log (action, namespace, kind, name, detail, actor) \
|
||||
VALUES ($1, $2, $3, $4, $5, $6)",
|
||||
)
|
||||
.bind(action)
|
||||
.bind(namespace)
|
||||
.bind(kind)
|
||||
.bind(name)
|
||||
.bind(&detail)
|
||||
.bind(&actor)
|
||||
.execute(pool)
|
||||
.await;
|
||||
|
||||
if let Err(e) = result {
|
||||
tracing::warn!(error = %e, "failed to write audit log");
|
||||
} else {
|
||||
tracing::debug!(action, namespace, kind, name, actor, "audit logged");
|
||||
}
|
||||
}
|
||||
|
||||
@@ -5,13 +5,18 @@ use std::fs;
|
||||
|
||||
use crate::crypto;
|
||||
use crate::db;
|
||||
use crate::output::OutputMode;
|
||||
use crate::models::EntryRow;
|
||||
use crate::output::{OutputMode, print_json};
|
||||
|
||||
/// Parse "key=value" or "key:=<json>" entries.
|
||||
/// - `key=value` → stores the literal string `value`
|
||||
/// - `key:=<json>` → parses `<json>` as a typed JSON value (number, bool, null, array, object)
|
||||
/// - `value=@file` → reads the file content as a string (only for `=` form)
|
||||
pub(crate) fn parse_kv(entry: &str) -> Result<(String, Value)> {
|
||||
// ── Key/value parsing helpers (shared with update.rs) ───────────────────────
|
||||
|
||||
/// Parse secret / metadata entries into a nested key path and JSON value.
|
||||
/// - `key=value` → stores the literal string `value`
|
||||
/// - `key:=<json>` → parses `<json>` as a typed JSON value
|
||||
/// - `key=@file` → reads the file content as a string
|
||||
/// - `a:b=value` → writes nested fields: `{ "a": { "b": "value" } }`
|
||||
/// - `a:b@./file.txt` → shorthand for nested file reads without manual JSON escaping
|
||||
pub(crate) fn parse_kv(entry: &str) -> Result<(Vec<String>, Value)> {
|
||||
// Typed JSON form: key:=<json>
|
||||
if let Some((key, json_str)) = entry.split_once(":=") {
|
||||
let val: Value = serde_json::from_str(json_str).map_err(|e| {
|
||||
@@ -21,36 +26,187 @@ pub(crate) fn parse_kv(entry: &str) -> Result<(String, Value)> {
|
||||
e
|
||||
)
|
||||
})?;
|
||||
return Ok((key.to_string(), val));
|
||||
return Ok((parse_key_path(key)?, val));
|
||||
}
|
||||
|
||||
// Plain string form: key=value or key=@file
|
||||
let (key, raw_val) = entry.split_once('=').ok_or_else(|| {
|
||||
anyhow::anyhow!(
|
||||
"Invalid format '{}'. Expected: key=value, key=@file, or key:=<json>",
|
||||
entry
|
||||
)
|
||||
})?;
|
||||
if let Some((key, raw_val)) = entry.split_once('=') {
|
||||
let value = if let Some(path) = raw_val.strip_prefix('@') {
|
||||
fs::read_to_string(path)
|
||||
.map_err(|e| anyhow::anyhow!("Failed to read file '{}': {}", path, e))?
|
||||
} else {
|
||||
raw_val.to_string()
|
||||
};
|
||||
|
||||
let value = if let Some(path) = raw_val.strip_prefix('@') {
|
||||
fs::read_to_string(path)
|
||||
.map_err(|e| anyhow::anyhow!("Failed to read file '{}': {}", path, e))?
|
||||
} else {
|
||||
raw_val.to_string()
|
||||
};
|
||||
return Ok((parse_key_path(key)?, Value::String(value)));
|
||||
}
|
||||
|
||||
Ok((key.to_string(), Value::String(value)))
|
||||
// Shorthand file form: nested:key@file
|
||||
if let Some((key, path)) = entry.split_once('@') {
|
||||
let value = fs::read_to_string(path)
|
||||
.map_err(|e| anyhow::anyhow!("Failed to read file '{}': {}", path, e))?;
|
||||
return Ok((parse_key_path(key)?, Value::String(value)));
|
||||
}
|
||||
|
||||
anyhow::bail!(
|
||||
"Invalid format '{}'. Expected: key=value, key=@file, nested:key@file, or key:=<json>",
|
||||
entry
|
||||
)
|
||||
}
|
||||
|
||||
pub(crate) fn build_json(entries: &[String]) -> Result<Value> {
|
||||
let mut map = Map::new();
|
||||
for entry in entries {
|
||||
let (key, value) = parse_kv(entry)?;
|
||||
map.insert(key, value);
|
||||
let (path, value) = parse_kv(entry)?;
|
||||
insert_path(&mut map, &path, value)?;
|
||||
}
|
||||
Ok(Value::Object(map))
|
||||
}
|
||||
|
||||
pub(crate) fn key_path_to_string(path: &[String]) -> String {
|
||||
path.join(":")
|
||||
}
|
||||
|
||||
pub(crate) fn collect_key_paths(entries: &[String]) -> Result<Vec<String>> {
|
||||
entries
|
||||
.iter()
|
||||
.map(|entry| parse_kv(entry).map(|(path, _)| key_path_to_string(&path)))
|
||||
.collect()
|
||||
}
|
||||
|
||||
pub(crate) fn collect_field_paths(entries: &[String]) -> Result<Vec<String>> {
|
||||
entries
|
||||
.iter()
|
||||
.map(|entry| parse_key_path(entry).map(|path| key_path_to_string(&path)))
|
||||
.collect()
|
||||
}
|
||||
|
||||
pub(crate) fn parse_key_path(key: &str) -> Result<Vec<String>> {
|
||||
let path: Vec<String> = key
|
||||
.split(':')
|
||||
.map(str::trim)
|
||||
.map(ToOwned::to_owned)
|
||||
.collect();
|
||||
|
||||
if path.is_empty() || path.iter().any(|part| part.is_empty()) {
|
||||
anyhow::bail!(
|
||||
"Invalid key path '{}'. Use non-empty segments like 'credentials:content'.",
|
||||
key
|
||||
);
|
||||
}
|
||||
|
||||
Ok(path)
|
||||
}
|
||||
|
||||
pub(crate) fn insert_path(
|
||||
map: &mut Map<String, Value>,
|
||||
path: &[String],
|
||||
value: Value,
|
||||
) -> Result<()> {
|
||||
if path.is_empty() {
|
||||
anyhow::bail!("Key path cannot be empty");
|
||||
}
|
||||
|
||||
if path.len() == 1 {
|
||||
map.insert(path[0].clone(), value);
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
let head = path[0].clone();
|
||||
let tail = &path[1..];
|
||||
|
||||
match map.entry(head.clone()) {
|
||||
serde_json::map::Entry::Vacant(entry) => {
|
||||
let mut child = Map::new();
|
||||
insert_path(&mut child, tail, value)?;
|
||||
entry.insert(Value::Object(child));
|
||||
}
|
||||
serde_json::map::Entry::Occupied(mut entry) => match entry.get_mut() {
|
||||
Value::Object(child) => insert_path(child, tail, value)?,
|
||||
_ => {
|
||||
anyhow::bail!(
|
||||
"Cannot set nested key '{}' because '{}' is already a non-object value",
|
||||
key_path_to_string(path),
|
||||
head
|
||||
);
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub(crate) fn remove_path(map: &mut Map<String, Value>, path: &[String]) -> Result<bool> {
|
||||
if path.is_empty() {
|
||||
anyhow::bail!("Key path cannot be empty");
|
||||
}
|
||||
|
||||
if path.len() == 1 {
|
||||
return Ok(map.remove(&path[0]).is_some());
|
||||
}
|
||||
|
||||
let Some(value) = map.get_mut(&path[0]) else {
|
||||
return Ok(false);
|
||||
};
|
||||
|
||||
let Value::Object(child) = value else {
|
||||
return Ok(false);
|
||||
};
|
||||
|
||||
let removed = remove_path(child, &path[1..])?;
|
||||
if child.is_empty() {
|
||||
map.remove(&path[0]);
|
||||
}
|
||||
|
||||
Ok(removed)
|
||||
}
|
||||
|
||||
// ── field_type inference and value_len ──────────────────────────────────────
|
||||
|
||||
/// Infer the field type string from a JSON value.
|
||||
pub(crate) fn infer_field_type(v: &Value) -> &'static str {
|
||||
match v {
|
||||
Value::String(_) => "string",
|
||||
Value::Number(_) => "number",
|
||||
Value::Bool(_) => "boolean",
|
||||
Value::Null => "string",
|
||||
Value::Array(_) | Value::Object(_) => "json",
|
||||
}
|
||||
}
|
||||
|
||||
/// Compute the plaintext length of a JSON value (chars for string, serialized length otherwise).
|
||||
pub(crate) fn compute_value_len(v: &Value) -> i32 {
|
||||
match v {
|
||||
Value::String(s) => s.chars().count() as i32,
|
||||
Value::Null => 0,
|
||||
other => other.to_string().chars().count() as i32,
|
||||
}
|
||||
}
|
||||
|
||||
/// Flatten a (potentially nested) JSON object into dot-separated field entries.
|
||||
/// e.g. `{"credentials": {"type": "ssh", "content": "..."}}` →
|
||||
/// `[("credentials.type", "ssh"), ("credentials.content", "...")]`
|
||||
/// Top-level non-object values are emitted directly.
|
||||
pub(crate) fn flatten_json_fields(prefix: &str, value: &Value) -> Vec<(String, Value)> {
|
||||
match value {
|
||||
Value::Object(map) => {
|
||||
let mut out = Vec::new();
|
||||
for (k, v) in map {
|
||||
let full_key = if prefix.is_empty() {
|
||||
k.clone()
|
||||
} else {
|
||||
format!("{}.{}", prefix, k)
|
||||
};
|
||||
out.extend(flatten_json_fields(&full_key, v));
|
||||
}
|
||||
out
|
||||
}
|
||||
other => vec![(prefix.to_string(), other.clone())],
|
||||
}
|
||||
}
|
||||
|
||||
// ── Add command ──────────────────────────────────────────────────────────────
|
||||
|
||||
pub struct AddArgs<'a> {
|
||||
pub namespace: &'a str,
|
||||
pub kind: &'a str,
|
||||
@@ -64,34 +220,17 @@ pub struct AddArgs<'a> {
|
||||
pub async fn run(pool: &PgPool, args: AddArgs<'_>, master_key: &[u8; 32]) -> Result<()> {
|
||||
let metadata = build_json(args.meta_entries)?;
|
||||
let secret_json = build_json(args.secret_entries)?;
|
||||
let encrypted_bytes = crypto::encrypt_json(master_key, &secret_json)?;
|
||||
|
||||
tracing::debug!(args.namespace, args.kind, args.name, "upserting record");
|
||||
tracing::debug!(args.namespace, args.kind, args.name, "upserting entry");
|
||||
|
||||
let meta_keys: Vec<&str> = args
|
||||
.meta_entries
|
||||
.iter()
|
||||
.filter_map(|s| s.split_once(['=', ':']).map(|(k, _)| k))
|
||||
.collect();
|
||||
let secret_keys: Vec<&str> = args
|
||||
.secret_entries
|
||||
.iter()
|
||||
.filter_map(|s| s.split_once(['=', ':']).map(|(k, _)| k))
|
||||
.collect();
|
||||
let meta_keys = collect_key_paths(args.meta_entries)?;
|
||||
let secret_keys = collect_key_paths(args.secret_entries)?;
|
||||
|
||||
let mut tx = pool.begin().await?;
|
||||
|
||||
// Snapshot existing row into history before overwriting (if it exists).
|
||||
#[derive(sqlx::FromRow)]
|
||||
struct ExistingRow {
|
||||
id: uuid::Uuid,
|
||||
version: i64,
|
||||
tags: Vec<String>,
|
||||
metadata: serde_json::Value,
|
||||
encrypted: Vec<u8>,
|
||||
}
|
||||
let existing: Option<ExistingRow> = sqlx::query_as(
|
||||
"SELECT id, version, tags, metadata, encrypted FROM secrets \
|
||||
// Upsert the entry row (tags + metadata).
|
||||
let existing: Option<EntryRow> = sqlx::query_as(
|
||||
"SELECT id, version, tags, metadata FROM entries \
|
||||
WHERE namespace = $1 AND kind = $2 AND name = $3",
|
||||
)
|
||||
.bind(args.namespace)
|
||||
@@ -100,11 +239,12 @@ pub async fn run(pool: &PgPool, args: AddArgs<'_>, master_key: &[u8; 32]) -> Res
|
||||
.fetch_optional(&mut *tx)
|
||||
.await?;
|
||||
|
||||
if let Some(ex) = existing
|
||||
&& let Err(e) = db::snapshot_history(
|
||||
// Snapshot the current entry state before overwriting.
|
||||
if let Some(ref ex) = existing
|
||||
&& let Err(e) = db::snapshot_entry_history(
|
||||
&mut tx,
|
||||
db::SnapshotParams {
|
||||
secret_id: ex.id,
|
||||
db::EntrySnapshotParams {
|
||||
entry_id: ex.id,
|
||||
namespace: args.namespace,
|
||||
kind: args.kind,
|
||||
name: args.name,
|
||||
@@ -112,25 +252,24 @@ pub async fn run(pool: &PgPool, args: AddArgs<'_>, master_key: &[u8; 32]) -> Res
|
||||
action: "add",
|
||||
tags: &ex.tags,
|
||||
metadata: &ex.metadata,
|
||||
encrypted: &ex.encrypted,
|
||||
},
|
||||
)
|
||||
.await
|
||||
{
|
||||
tracing::warn!(error = %e, "failed to snapshot history before upsert");
|
||||
tracing::warn!(error = %e, "failed to snapshot entry history before upsert");
|
||||
}
|
||||
|
||||
sqlx::query(
|
||||
let entry_id: uuid::Uuid = sqlx::query_scalar(
|
||||
r#"
|
||||
INSERT INTO secrets (namespace, kind, name, tags, metadata, encrypted, version, updated_at)
|
||||
VALUES ($1, $2, $3, $4, $5, $6, 1, NOW())
|
||||
INSERT INTO entries (namespace, kind, name, tags, metadata, version, updated_at)
|
||||
VALUES ($1, $2, $3, $4, $5, 1, NOW())
|
||||
ON CONFLICT (namespace, kind, name)
|
||||
DO UPDATE SET
|
||||
tags = EXCLUDED.tags,
|
||||
metadata = EXCLUDED.metadata,
|
||||
encrypted = EXCLUDED.encrypted,
|
||||
version = secrets.version + 1,
|
||||
version = entries.version + 1,
|
||||
updated_at = NOW()
|
||||
RETURNING id
|
||||
"#,
|
||||
)
|
||||
.bind(args.namespace)
|
||||
@@ -138,10 +277,79 @@ pub async fn run(pool: &PgPool, args: AddArgs<'_>, master_key: &[u8; 32]) -> Res
|
||||
.bind(args.name)
|
||||
.bind(args.tags)
|
||||
.bind(&metadata)
|
||||
.bind(&encrypted_bytes)
|
||||
.execute(&mut *tx)
|
||||
.fetch_one(&mut *tx)
|
||||
.await?;
|
||||
|
||||
let new_entry_version: i64 = sqlx::query_scalar("SELECT version FROM entries WHERE id = $1")
|
||||
.bind(entry_id)
|
||||
.fetch_one(&mut *tx)
|
||||
.await?;
|
||||
|
||||
// Snapshot existing secret fields before replacing.
|
||||
if existing.is_some() {
|
||||
#[derive(sqlx::FromRow)]
|
||||
struct ExistingField {
|
||||
id: uuid::Uuid,
|
||||
field_name: String,
|
||||
field_type: String,
|
||||
value_len: i32,
|
||||
encrypted: Vec<u8>,
|
||||
}
|
||||
let existing_fields: Vec<ExistingField> = sqlx::query_as(
|
||||
"SELECT id, field_name, field_type, value_len, encrypted \
|
||||
FROM secrets WHERE entry_id = $1",
|
||||
)
|
||||
.bind(entry_id)
|
||||
.fetch_all(&mut *tx)
|
||||
.await?;
|
||||
|
||||
for f in &existing_fields {
|
||||
if let Err(e) = db::snapshot_secret_history(
|
||||
&mut tx,
|
||||
db::SecretSnapshotParams {
|
||||
entry_id,
|
||||
secret_id: f.id,
|
||||
entry_version: new_entry_version - 1,
|
||||
field_name: &f.field_name,
|
||||
field_type: &f.field_type,
|
||||
value_len: f.value_len,
|
||||
encrypted: &f.encrypted,
|
||||
action: "add",
|
||||
},
|
||||
)
|
||||
.await
|
||||
{
|
||||
tracing::warn!(error = %e, "failed to snapshot secret field history");
|
||||
}
|
||||
}
|
||||
|
||||
// Delete existing secret fields so we can re-insert the full set.
|
||||
sqlx::query("DELETE FROM secrets WHERE entry_id = $1")
|
||||
.bind(entry_id)
|
||||
.execute(&mut *tx)
|
||||
.await?;
|
||||
}
|
||||
|
||||
// Insert new secret fields.
|
||||
let flat_fields = flatten_json_fields("", &secret_json);
|
||||
for (field_name, field_value) in &flat_fields {
|
||||
let field_type = infer_field_type(field_value);
|
||||
let value_len = compute_value_len(field_value);
|
||||
let encrypted = crypto::encrypt_json(master_key, field_value)?;
|
||||
|
||||
sqlx::query(
|
||||
"INSERT INTO secrets (entry_id, field_name, field_type, value_len, encrypted) \
|
||||
VALUES ($1, $2, $3, $4, $5)",
|
||||
)
|
||||
.bind(entry_id)
|
||||
.bind(field_name)
|
||||
.bind(field_type)
|
||||
.bind(value_len)
|
||||
.bind(&encrypted)
|
||||
.execute(&mut *tx)
|
||||
.await?;
|
||||
}
|
||||
|
||||
crate::audit::log_tx(
|
||||
&mut tx,
|
||||
"add",
|
||||
@@ -169,11 +377,8 @@ pub async fn run(pool: &PgPool, args: AddArgs<'_>, master_key: &[u8; 32]) -> Res
|
||||
});
|
||||
|
||||
match args.output {
|
||||
OutputMode::Json => {
|
||||
println!("{}", serde_json::to_string_pretty(&result_json)?);
|
||||
}
|
||||
OutputMode::JsonCompact => {
|
||||
println!("{}", serde_json::to_string(&result_json)?);
|
||||
OutputMode::Json | OutputMode::JsonCompact => {
|
||||
print_json(&result_json, &args.output)?;
|
||||
}
|
||||
_ => {
|
||||
println!("Added: [{}/{}] {}", args.namespace, args.kind, args.name);
|
||||
@@ -191,3 +396,112 @@ pub async fn run(pool: &PgPool, args: AddArgs<'_>, master_key: &[u8; 32]) -> Res
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::{
|
||||
build_json, compute_value_len, flatten_json_fields, infer_field_type, key_path_to_string,
|
||||
parse_kv, remove_path,
|
||||
};
|
||||
use serde_json::Value;
|
||||
use std::fs;
|
||||
use std::path::PathBuf;
|
||||
use std::time::{SystemTime, UNIX_EPOCH};
|
||||
|
||||
fn temp_file_path(name: &str) -> PathBuf {
|
||||
let nanos = SystemTime::now()
|
||||
.duration_since(UNIX_EPOCH)
|
||||
.expect("clock should be after unix epoch")
|
||||
.as_nanos();
|
||||
std::env::temp_dir().join(format!("secrets-{name}-{nanos}.txt"))
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn parse_nested_file_shorthand() {
|
||||
let path = temp_file_path("ssh-key");
|
||||
fs::write(&path, "line1\nline2\n").expect("should write temp file");
|
||||
|
||||
let entry = format!("credentials:content@{}", path.display());
|
||||
let (path_parts, value) = parse_kv(&entry).expect("should parse nested file shorthand");
|
||||
|
||||
assert_eq!(key_path_to_string(&path_parts), "credentials:content");
|
||||
assert_eq!(value, serde_json::Value::String("line1\nline2\n".into()));
|
||||
|
||||
fs::remove_file(path).expect("should remove temp file");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn build_nested_json_from_mixed_entries() {
|
||||
let payload = vec![
|
||||
"credentials:type=ssh".to_string(),
|
||||
"credentials:enabled:=true".to_string(),
|
||||
"username=root".to_string(),
|
||||
];
|
||||
|
||||
let value = build_json(&payload).expect("should build nested json");
|
||||
|
||||
assert_eq!(
|
||||
value,
|
||||
serde_json::json!({
|
||||
"credentials": {
|
||||
"type": "ssh",
|
||||
"enabled": true
|
||||
},
|
||||
"username": "root"
|
||||
})
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn remove_nested_path_prunes_empty_parents() {
|
||||
let mut value = serde_json::json!({
|
||||
"credentials": {
|
||||
"content": "pem-data"
|
||||
},
|
||||
"username": "root"
|
||||
});
|
||||
|
||||
let map = match &mut value {
|
||||
Value::Object(map) => map,
|
||||
_ => panic!("expected object"),
|
||||
};
|
||||
|
||||
let removed = remove_path(map, &["credentials".to_string(), "content".to_string()])
|
||||
.expect("should remove nested field");
|
||||
|
||||
assert!(removed);
|
||||
assert_eq!(value, serde_json::json!({ "username": "root" }));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn flatten_json_fields_nested() {
|
||||
let v = serde_json::json!({
|
||||
"username": "root",
|
||||
"credentials": {
|
||||
"type": "ssh",
|
||||
"content": "pem-data"
|
||||
}
|
||||
});
|
||||
let mut fields = flatten_json_fields("", &v);
|
||||
fields.sort_by(|a, b| a.0.cmp(&b.0));
|
||||
|
||||
assert_eq!(fields[0].0, "credentials.content");
|
||||
assert_eq!(fields[1].0, "credentials.type");
|
||||
assert_eq!(fields[2].0, "username");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn infer_field_types() {
|
||||
assert_eq!(infer_field_type(&Value::String("x".into())), "string");
|
||||
assert_eq!(infer_field_type(&serde_json::json!(42)), "number");
|
||||
assert_eq!(infer_field_type(&Value::Bool(true)), "boolean");
|
||||
assert_eq!(infer_field_type(&serde_json::json!(["a"])), "json");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn compute_value_len_string() {
|
||||
assert_eq!(compute_value_len(&Value::String("root".into())), 4);
|
||||
assert_eq!(compute_value_len(&Value::Null), 0);
|
||||
assert_eq!(compute_value_len(&serde_json::json!(1234)), 4);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -15,7 +15,7 @@ pub async fn run(action: crate::ConfigAction) -> Result<()> {
|
||||
database_url: Some(url.clone()),
|
||||
};
|
||||
config::save_config(&cfg)?;
|
||||
println!("Database URL saved to: {}", config_path().display());
|
||||
println!("Database URL saved to: {}", config_path()?.display());
|
||||
println!(" {}", mask_password(&url));
|
||||
}
|
||||
crate::ConfigAction::Show => {
|
||||
@@ -23,7 +23,7 @@ pub async fn run(action: crate::ConfigAction) -> Result<()> {
|
||||
match cfg.database_url {
|
||||
Some(url) => {
|
||||
println!("database_url = {}", mask_password(&url));
|
||||
println!("config file: {}", config_path().display());
|
||||
println!("config file: {}", config_path()?.display());
|
||||
}
|
||||
None => {
|
||||
println!("Database URL not configured.");
|
||||
@@ -32,7 +32,7 @@ pub async fn run(action: crate::ConfigAction) -> Result<()> {
|
||||
}
|
||||
}
|
||||
crate::ConfigAction::Path => {
|
||||
println!("{}", config_path().display());
|
||||
println!("{}", config_path()?.display());
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
|
||||
@@ -1,33 +1,64 @@
|
||||
use anyhow::Result;
|
||||
use serde_json::{Value, json};
|
||||
use sqlx::{FromRow, PgPool};
|
||||
use serde_json::json;
|
||||
use sqlx::PgPool;
|
||||
use uuid::Uuid;
|
||||
|
||||
use crate::db;
|
||||
use crate::output::OutputMode;
|
||||
use crate::models::{EntryRow, SecretFieldRow};
|
||||
use crate::output::{OutputMode, print_json};
|
||||
|
||||
#[derive(FromRow)]
|
||||
struct DeleteRow {
|
||||
id: Uuid,
|
||||
version: i64,
|
||||
tags: Vec<String>,
|
||||
metadata: Value,
|
||||
encrypted: Vec<u8>,
|
||||
pub struct DeleteArgs<'a> {
|
||||
pub namespace: &'a str,
|
||||
/// Kind filter. Required when --name is given; optional for bulk deletes.
|
||||
pub kind: Option<&'a str>,
|
||||
/// Exact record name. When None, bulk-delete all matching records.
|
||||
pub name: Option<&'a str>,
|
||||
/// Preview without writing to the database (bulk mode only).
|
||||
pub dry_run: bool,
|
||||
pub output: OutputMode,
|
||||
}
|
||||
|
||||
pub async fn run(
|
||||
// ── Internal row type used for bulk queries ────────────────────────────────
|
||||
|
||||
#[derive(Debug, sqlx::FromRow)]
|
||||
struct FullEntryRow {
|
||||
pub id: Uuid,
|
||||
pub version: i64,
|
||||
pub kind: String,
|
||||
pub name: String,
|
||||
pub metadata: serde_json::Value,
|
||||
pub tags: Vec<String>,
|
||||
}
|
||||
|
||||
// ── Entry point ────────────────────────────────────────────────────────────
|
||||
|
||||
pub async fn run(pool: &PgPool, args: DeleteArgs<'_>) -> Result<()> {
|
||||
match args.name {
|
||||
Some(name) => {
|
||||
let kind = args
|
||||
.kind
|
||||
.ok_or_else(|| anyhow::anyhow!("--kind is required when --name is specified"))?;
|
||||
delete_one(pool, args.namespace, kind, name, args.output).await
|
||||
}
|
||||
None => delete_bulk(pool, args.namespace, args.kind, args.dry_run, args.output).await,
|
||||
}
|
||||
}
|
||||
|
||||
// ── Single-record delete (original behaviour) ─────────────────────────────
|
||||
|
||||
async fn delete_one(
|
||||
pool: &PgPool,
|
||||
namespace: &str,
|
||||
kind: &str,
|
||||
name: &str,
|
||||
output: OutputMode,
|
||||
) -> Result<()> {
|
||||
tracing::debug!(namespace, kind, name, "deleting record");
|
||||
tracing::debug!(namespace, kind, name, "deleting entry");
|
||||
|
||||
let mut tx = pool.begin().await?;
|
||||
|
||||
let row: Option<DeleteRow> = sqlx::query_as(
|
||||
"SELECT id, version, tags, metadata, encrypted FROM secrets \
|
||||
let row: Option<EntryRow> = sqlx::query_as(
|
||||
"SELECT id, version, tags, metadata FROM entries \
|
||||
WHERE namespace = $1 AND kind = $2 AND name = $3 \
|
||||
FOR UPDATE",
|
||||
)
|
||||
@@ -39,30 +70,178 @@ pub async fn run(
|
||||
|
||||
let Some(row) = row else {
|
||||
tx.rollback().await?;
|
||||
tracing::warn!(namespace, kind, name, "record not found for deletion");
|
||||
tracing::warn!(namespace, kind, name, "entry not found for deletion");
|
||||
let v = json!({"action":"not_found","namespace":namespace,"kind":kind,"name":name});
|
||||
match output {
|
||||
OutputMode::Json => println!(
|
||||
"{}",
|
||||
serde_json::to_string_pretty(
|
||||
&json!({"action":"not_found","namespace":namespace,"kind":kind,"name":name})
|
||||
)?
|
||||
),
|
||||
OutputMode::JsonCompact => println!(
|
||||
"{}",
|
||||
serde_json::to_string(
|
||||
&json!({"action":"not_found","namespace":namespace,"kind":kind,"name":name})
|
||||
)?
|
||||
),
|
||||
_ => println!("Not found: [{}/{}] {}", namespace, kind, name),
|
||||
OutputMode::Text => println!("Not found: [{}/{}] {}", namespace, kind, name),
|
||||
ref mode => print_json(&v, mode)?,
|
||||
}
|
||||
return Ok(());
|
||||
};
|
||||
|
||||
// Snapshot before physical delete so the row can be restored via rollback.
|
||||
if let Err(e) = db::snapshot_history(
|
||||
&mut tx,
|
||||
db::SnapshotParams {
|
||||
secret_id: row.id,
|
||||
snapshot_and_delete(&mut tx, namespace, kind, name, &row).await?;
|
||||
|
||||
crate::audit::log_tx(&mut tx, "delete", namespace, kind, name, json!({})).await;
|
||||
tx.commit().await?;
|
||||
|
||||
let v = json!({"action":"deleted","namespace":namespace,"kind":kind,"name":name});
|
||||
match output {
|
||||
OutputMode::Text => println!("Deleted: [{}/{}] {}", namespace, kind, name),
|
||||
ref mode => print_json(&v, mode)?,
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// ── Bulk delete by namespace (+ optional kind filter) ─────────────────────
|
||||
|
||||
async fn delete_bulk(
|
||||
pool: &PgPool,
|
||||
namespace: &str,
|
||||
kind: Option<&str>,
|
||||
dry_run: bool,
|
||||
output: OutputMode,
|
||||
) -> Result<()> {
|
||||
tracing::debug!(namespace, ?kind, dry_run, "bulk-deleting entries");
|
||||
|
||||
let rows: Vec<FullEntryRow> = if let Some(k) = kind {
|
||||
sqlx::query_as(
|
||||
"SELECT id, version, kind, name, metadata, tags FROM entries \
|
||||
WHERE namespace = $1 AND kind = $2 \
|
||||
ORDER BY name",
|
||||
)
|
||||
.bind(namespace)
|
||||
.bind(k)
|
||||
.fetch_all(pool)
|
||||
.await?
|
||||
} else {
|
||||
sqlx::query_as(
|
||||
"SELECT id, version, kind, name, metadata, tags FROM entries \
|
||||
WHERE namespace = $1 \
|
||||
ORDER BY kind, name",
|
||||
)
|
||||
.bind(namespace)
|
||||
.fetch_all(pool)
|
||||
.await?
|
||||
};
|
||||
|
||||
if rows.is_empty() {
|
||||
let v = json!({
|
||||
"action": "noop",
|
||||
"namespace": namespace,
|
||||
"kind": kind,
|
||||
"deleted": 0,
|
||||
"dry_run": dry_run
|
||||
});
|
||||
match output {
|
||||
OutputMode::Text => println!(
|
||||
"No records found in namespace \"{}\"{}.",
|
||||
namespace,
|
||||
kind.map(|k| format!(" with kind \"{}\"", k))
|
||||
.unwrap_or_default()
|
||||
),
|
||||
ref mode => print_json(&v, mode)?,
|
||||
}
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
if dry_run {
|
||||
let count = rows.len();
|
||||
match output {
|
||||
OutputMode::Text => {
|
||||
println!(
|
||||
"dry-run: would delete {} record(s) in namespace \"{}\":",
|
||||
count, namespace
|
||||
);
|
||||
for r in &rows {
|
||||
println!(" [{}/{}] {}", namespace, r.kind, r.name);
|
||||
}
|
||||
}
|
||||
ref mode => {
|
||||
let items: Vec<_> = rows
|
||||
.iter()
|
||||
.map(|r| json!({"namespace": namespace, "kind": r.kind, "name": r.name}))
|
||||
.collect();
|
||||
print_json(
|
||||
&json!({
|
||||
"action": "dry_run",
|
||||
"namespace": namespace,
|
||||
"kind": kind,
|
||||
"would_delete": count,
|
||||
"entries": items
|
||||
}),
|
||||
mode,
|
||||
)?;
|
||||
}
|
||||
}
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
let mut deleted = Vec::with_capacity(rows.len());
|
||||
|
||||
for row in &rows {
|
||||
let entry_row = EntryRow {
|
||||
id: row.id,
|
||||
version: row.version,
|
||||
tags: row.tags.clone(),
|
||||
metadata: row.metadata.clone(),
|
||||
};
|
||||
let mut tx = pool.begin().await?;
|
||||
snapshot_and_delete(&mut tx, namespace, &row.kind, &row.name, &entry_row).await?;
|
||||
crate::audit::log_tx(
|
||||
&mut tx,
|
||||
"delete",
|
||||
namespace,
|
||||
&row.kind,
|
||||
&row.name,
|
||||
json!({"bulk": true}),
|
||||
)
|
||||
.await;
|
||||
tx.commit().await?;
|
||||
|
||||
deleted.push(json!({"namespace": namespace, "kind": row.kind, "name": row.name}));
|
||||
tracing::info!(namespace, kind = %row.kind, name = %row.name, "bulk deleted");
|
||||
}
|
||||
|
||||
let count = deleted.len();
|
||||
match output {
|
||||
OutputMode::Text => {
|
||||
for item in &deleted {
|
||||
println!(
|
||||
"Deleted: [{}/{}] {}",
|
||||
item["namespace"].as_str().unwrap_or(""),
|
||||
item["kind"].as_str().unwrap_or(""),
|
||||
item["name"].as_str().unwrap_or("")
|
||||
);
|
||||
}
|
||||
println!("Total: {} record(s) deleted.", count);
|
||||
}
|
||||
ref mode => print_json(
|
||||
&json!({
|
||||
"action": "deleted",
|
||||
"namespace": namespace,
|
||||
"kind": kind,
|
||||
"deleted": count,
|
||||
"entries": deleted
|
||||
}),
|
||||
mode,
|
||||
)?,
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// ── Shared helper: snapshot history then DELETE ────────────────────────────
|
||||
|
||||
async fn snapshot_and_delete(
|
||||
tx: &mut sqlx::Transaction<'_, sqlx::Postgres>,
|
||||
namespace: &str,
|
||||
kind: &str,
|
||||
name: &str,
|
||||
row: &EntryRow,
|
||||
) -> Result<()> {
|
||||
if let Err(e) = db::snapshot_entry_history(
|
||||
tx,
|
||||
db::EntrySnapshotParams {
|
||||
entry_id: row.id,
|
||||
namespace,
|
||||
kind,
|
||||
name,
|
||||
@@ -70,38 +249,45 @@ pub async fn run(
|
||||
action: "delete",
|
||||
tags: &row.tags,
|
||||
metadata: &row.metadata,
|
||||
encrypted: &row.encrypted,
|
||||
},
|
||||
)
|
||||
.await
|
||||
{
|
||||
tracing::warn!(error = %e, "failed to snapshot history before delete");
|
||||
tracing::warn!(error = %e, "failed to snapshot entry history before delete");
|
||||
}
|
||||
|
||||
sqlx::query("DELETE FROM secrets WHERE id = $1")
|
||||
let fields: Vec<SecretFieldRow> = sqlx::query_as(
|
||||
"SELECT id, field_name, field_type, value_len, encrypted \
|
||||
FROM secrets WHERE entry_id = $1",
|
||||
)
|
||||
.bind(row.id)
|
||||
.fetch_all(&mut **tx)
|
||||
.await?;
|
||||
|
||||
for f in &fields {
|
||||
if let Err(e) = db::snapshot_secret_history(
|
||||
tx,
|
||||
db::SecretSnapshotParams {
|
||||
entry_id: row.id,
|
||||
secret_id: f.id,
|
||||
entry_version: row.version,
|
||||
field_name: &f.field_name,
|
||||
field_type: &f.field_type,
|
||||
value_len: f.value_len,
|
||||
encrypted: &f.encrypted,
|
||||
action: "delete",
|
||||
},
|
||||
)
|
||||
.await
|
||||
{
|
||||
tracing::warn!(error = %e, "failed to snapshot secret history before delete");
|
||||
}
|
||||
}
|
||||
|
||||
sqlx::query("DELETE FROM entries WHERE id = $1")
|
||||
.bind(row.id)
|
||||
.execute(&mut *tx)
|
||||
.execute(&mut **tx)
|
||||
.await?;
|
||||
|
||||
crate::audit::log_tx(&mut tx, "delete", namespace, kind, name, json!({})).await;
|
||||
|
||||
tx.commit().await?;
|
||||
|
||||
match output {
|
||||
OutputMode::Json => println!(
|
||||
"{}",
|
||||
serde_json::to_string_pretty(
|
||||
&json!({"action":"deleted","namespace":namespace,"kind":kind,"name":name})
|
||||
)?
|
||||
),
|
||||
OutputMode::JsonCompact => println!(
|
||||
"{}",
|
||||
serde_json::to_string(
|
||||
&json!({"action":"deleted","namespace":namespace,"kind":kind,"name":name})
|
||||
)?
|
||||
),
|
||||
_ => println!("Deleted: [{}/{}] {}", namespace, kind, name),
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
109
src/commands/export_cmd.rs
Normal file
109
src/commands/export_cmd.rs
Normal file
@@ -0,0 +1,109 @@
|
||||
use anyhow::Result;
|
||||
use sqlx::PgPool;
|
||||
use std::collections::BTreeMap;
|
||||
use std::io::Write;
|
||||
|
||||
use crate::commands::search::{fetch_entries, fetch_secrets_for_entries};
|
||||
use crate::crypto;
|
||||
use crate::models::{ExportData, ExportEntry, ExportFormat};
|
||||
|
||||
pub struct ExportArgs<'a> {
|
||||
pub namespace: Option<&'a str>,
|
||||
pub kind: Option<&'a str>,
|
||||
pub name: Option<&'a str>,
|
||||
pub tags: &'a [String],
|
||||
pub query: Option<&'a str>,
|
||||
/// Output file path. None means write to stdout.
|
||||
pub file: Option<&'a str>,
|
||||
/// Explicit format override (e.g. from --format flag).
|
||||
pub format: Option<&'a str>,
|
||||
/// When true, secrets are omitted and master_key is not used.
|
||||
pub no_secrets: bool,
|
||||
}
|
||||
|
||||
pub async fn run(pool: &PgPool, args: ExportArgs<'_>, master_key: Option<&[u8; 32]>) -> Result<()> {
|
||||
// Determine output format: --format > file extension > default JSON.
|
||||
let format = if let Some(fmt_str) = args.format {
|
||||
ExportFormat::from_str(fmt_str)?
|
||||
} else if let Some(path) = args.file {
|
||||
ExportFormat::from_extension(path).unwrap_or(ExportFormat::Json)
|
||||
} else {
|
||||
ExportFormat::Json
|
||||
};
|
||||
|
||||
let entries = fetch_entries(
|
||||
pool,
|
||||
args.namespace,
|
||||
args.kind,
|
||||
args.name,
|
||||
args.tags,
|
||||
args.query,
|
||||
)
|
||||
.await?;
|
||||
|
||||
let entry_ids: Vec<uuid::Uuid> = entries.iter().map(|e| e.id).collect();
|
||||
|
||||
let secrets_map = if !args.no_secrets && !entry_ids.is_empty() {
|
||||
fetch_secrets_for_entries(pool, &entry_ids).await?
|
||||
} else {
|
||||
std::collections::HashMap::new()
|
||||
};
|
||||
|
||||
let key = if !args.no_secrets { master_key } else { None };
|
||||
|
||||
let mut export_entries: Vec<ExportEntry> = Vec::with_capacity(entries.len());
|
||||
for entry in &entries {
|
||||
let secrets = if args.no_secrets {
|
||||
None
|
||||
} else {
|
||||
let fields = secrets_map.get(&entry.id).map(Vec::as_slice).unwrap_or(&[]);
|
||||
if fields.is_empty() {
|
||||
Some(BTreeMap::new())
|
||||
} else {
|
||||
let mk =
|
||||
key.ok_or_else(|| anyhow::anyhow!("master key required to decrypt secrets"))?;
|
||||
let mut map = BTreeMap::new();
|
||||
for f in fields {
|
||||
let decrypted = crypto::decrypt_json(mk, &f.encrypted)?;
|
||||
map.insert(f.field_name.clone(), decrypted);
|
||||
}
|
||||
Some(map)
|
||||
}
|
||||
};
|
||||
|
||||
export_entries.push(ExportEntry {
|
||||
namespace: entry.namespace.clone(),
|
||||
kind: entry.kind.clone(),
|
||||
name: entry.name.clone(),
|
||||
tags: entry.tags.clone(),
|
||||
metadata: entry.metadata.clone(),
|
||||
secrets,
|
||||
});
|
||||
}
|
||||
|
||||
let data = ExportData {
|
||||
version: 1,
|
||||
exported_at: chrono::Utc::now().format("%Y-%m-%dT%H:%M:%SZ").to_string(),
|
||||
entries: export_entries,
|
||||
};
|
||||
|
||||
let serialized = format.serialize(&data)?;
|
||||
|
||||
if let Some(path) = args.file {
|
||||
std::fs::write(path, &serialized)?;
|
||||
println!(
|
||||
"Exported {} record(s) to {} ({:?})",
|
||||
data.entries.len(),
|
||||
path,
|
||||
format
|
||||
);
|
||||
} else {
|
||||
std::io::stdout().write_all(serialized.as_bytes())?;
|
||||
// Ensure trailing newline on stdout.
|
||||
if !serialized.ends_with('\n') {
|
||||
println!();
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
78
src/commands/history.rs
Normal file
78
src/commands/history.rs
Normal file
@@ -0,0 +1,78 @@
|
||||
use anyhow::Result;
|
||||
use serde_json::{Value, json};
|
||||
use sqlx::{FromRow, PgPool};
|
||||
|
||||
use crate::output::{OutputMode, format_local_time, print_json};
|
||||
|
||||
pub struct HistoryArgs<'a> {
|
||||
pub namespace: &'a str,
|
||||
pub kind: &'a str,
|
||||
pub name: &'a str,
|
||||
pub limit: u32,
|
||||
pub output: OutputMode,
|
||||
}
|
||||
|
||||
/// List history entries for an entry.
|
||||
pub async fn run(pool: &PgPool, args: HistoryArgs<'_>) -> Result<()> {
|
||||
#[derive(FromRow)]
|
||||
struct HistorySummary {
|
||||
version: i64,
|
||||
action: String,
|
||||
actor: String,
|
||||
created_at: chrono::DateTime<chrono::Utc>,
|
||||
}
|
||||
|
||||
let rows: Vec<HistorySummary> = sqlx::query_as(
|
||||
"SELECT version, action, actor, created_at FROM entries_history \
|
||||
WHERE namespace = $1 AND kind = $2 AND name = $3 \
|
||||
ORDER BY id DESC LIMIT $4",
|
||||
)
|
||||
.bind(args.namespace)
|
||||
.bind(args.kind)
|
||||
.bind(args.name)
|
||||
.bind(args.limit as i64)
|
||||
.fetch_all(pool)
|
||||
.await?;
|
||||
|
||||
match args.output {
|
||||
OutputMode::Json | OutputMode::JsonCompact => {
|
||||
let arr: Vec<Value> = rows
|
||||
.iter()
|
||||
.map(|r| {
|
||||
json!({
|
||||
"version": r.version,
|
||||
"action": r.action,
|
||||
"actor": r.actor,
|
||||
"created_at": r.created_at.format("%Y-%m-%dT%H:%M:%SZ").to_string(),
|
||||
})
|
||||
})
|
||||
.collect();
|
||||
print_json(&Value::Array(arr), &args.output)?;
|
||||
}
|
||||
_ => {
|
||||
if rows.is_empty() {
|
||||
println!(
|
||||
"No history found for [{}/{}] {}.",
|
||||
args.namespace, args.kind, args.name
|
||||
);
|
||||
return Ok(());
|
||||
}
|
||||
println!(
|
||||
"History for [{}/{}] {}:",
|
||||
args.namespace, args.kind, args.name
|
||||
);
|
||||
for r in &rows {
|
||||
println!(
|
||||
" v{:<4} {:8} {} {}",
|
||||
r.version,
|
||||
r.action,
|
||||
r.actor,
|
||||
format_local_time(r.created_at)
|
||||
);
|
||||
}
|
||||
println!(" (use `secrets rollback --to-version <N>` to restore)");
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
217
src/commands/import_cmd.rs
Normal file
217
src/commands/import_cmd.rs
Normal file
@@ -0,0 +1,217 @@
|
||||
use anyhow::Result;
|
||||
use serde_json::Value;
|
||||
use sqlx::PgPool;
|
||||
use std::collections::BTreeMap;
|
||||
|
||||
use crate::commands::add::{self, AddArgs};
|
||||
use crate::models::ExportFormat;
|
||||
use crate::output::{OutputMode, print_json};
|
||||
|
||||
pub struct ImportArgs<'a> {
|
||||
pub file: &'a str,
|
||||
/// Overwrite existing records when there is a conflict (upsert).
|
||||
/// Without this flag, the import aborts on the first conflict.
|
||||
/// A future `--skip` flag could allow silently skipping conflicts and continuing.
|
||||
pub force: bool,
|
||||
/// Check and preview operations without writing to the database.
|
||||
pub dry_run: bool,
|
||||
pub output: OutputMode,
|
||||
}
|
||||
|
||||
pub async fn run(pool: &PgPool, args: ImportArgs<'_>, master_key: &[u8; 32]) -> Result<()> {
|
||||
let format = ExportFormat::from_extension(args.file)?;
|
||||
let content = std::fs::read_to_string(args.file)
|
||||
.map_err(|e| anyhow::anyhow!("Cannot read file '{}': {}", args.file, e))?;
|
||||
let data = format.deserialize(&content)?;
|
||||
|
||||
if data.version != 1 {
|
||||
anyhow::bail!(
|
||||
"Unsupported export version {}. Only version 1 is supported.",
|
||||
data.version
|
||||
);
|
||||
}
|
||||
|
||||
let total = data.entries.len();
|
||||
let mut inserted = 0usize;
|
||||
let mut skipped = 0usize;
|
||||
let mut failed = 0usize;
|
||||
|
||||
for entry in &data.entries {
|
||||
// Check if record already exists.
|
||||
let exists: bool = sqlx::query_scalar(
|
||||
"SELECT EXISTS(SELECT 1 FROM entries \
|
||||
WHERE namespace = $1 AND kind = $2 AND name = $3)",
|
||||
)
|
||||
.bind(&entry.namespace)
|
||||
.bind(&entry.kind)
|
||||
.bind(&entry.name)
|
||||
.fetch_one(pool)
|
||||
.await
|
||||
.unwrap_or(false);
|
||||
|
||||
if exists && !args.force {
|
||||
let v = serde_json::json!({
|
||||
"action": "conflict",
|
||||
"namespace": entry.namespace,
|
||||
"kind": entry.kind,
|
||||
"name": entry.name,
|
||||
});
|
||||
match args.output {
|
||||
OutputMode::Text => eprintln!(
|
||||
"[{}/{}/{}] conflict — record already exists (use --force to overwrite)",
|
||||
entry.namespace, entry.kind, entry.name
|
||||
),
|
||||
ref mode => {
|
||||
// Write conflict notice to stderr so it does not mix with summary JSON.
|
||||
eprint!(
|
||||
"{}",
|
||||
if *mode == OutputMode::Json {
|
||||
serde_json::to_string_pretty(&v)?
|
||||
} else {
|
||||
serde_json::to_string(&v)?
|
||||
}
|
||||
);
|
||||
eprintln!();
|
||||
}
|
||||
}
|
||||
return Err(anyhow::anyhow!(
|
||||
"Import aborted: conflict on [{}/{}/{}]",
|
||||
entry.namespace,
|
||||
entry.kind,
|
||||
entry.name
|
||||
));
|
||||
}
|
||||
|
||||
let action = if exists { "upsert" } else { "insert" };
|
||||
|
||||
if args.dry_run {
|
||||
let v = serde_json::json!({
|
||||
"action": action,
|
||||
"namespace": entry.namespace,
|
||||
"kind": entry.kind,
|
||||
"name": entry.name,
|
||||
"dry_run": true,
|
||||
});
|
||||
match args.output {
|
||||
OutputMode::Text => println!(
|
||||
"[dry-run] {} [{}/{}/{}]",
|
||||
action, entry.namespace, entry.kind, entry.name
|
||||
),
|
||||
ref mode => print_json(&v, mode)?,
|
||||
}
|
||||
if exists {
|
||||
skipped += 1;
|
||||
} else {
|
||||
inserted += 1;
|
||||
}
|
||||
continue;
|
||||
}
|
||||
|
||||
// Build secret_entries: convert BTreeMap<String, Value> to Vec<String> ("key:=json")
|
||||
let secret_entries = build_secret_entries(entry.secrets.as_ref());
|
||||
|
||||
// Build meta_entries from metadata JSON object.
|
||||
let meta_entries = build_meta_entries(&entry.metadata);
|
||||
|
||||
match add::run(
|
||||
pool,
|
||||
AddArgs {
|
||||
namespace: &entry.namespace,
|
||||
kind: &entry.kind,
|
||||
name: &entry.name,
|
||||
tags: &entry.tags,
|
||||
meta_entries: &meta_entries,
|
||||
secret_entries: &secret_entries,
|
||||
output: OutputMode::Text,
|
||||
},
|
||||
master_key,
|
||||
)
|
||||
.await
|
||||
{
|
||||
Ok(()) => {
|
||||
let v = serde_json::json!({
|
||||
"action": action,
|
||||
"namespace": entry.namespace,
|
||||
"kind": entry.kind,
|
||||
"name": entry.name,
|
||||
});
|
||||
match args.output {
|
||||
OutputMode::Text => println!(
|
||||
"Imported [{}/{}/{}]",
|
||||
entry.namespace, entry.kind, entry.name
|
||||
),
|
||||
ref mode => print_json(&v, mode)?,
|
||||
}
|
||||
inserted += 1;
|
||||
}
|
||||
Err(e) => {
|
||||
eprintln!(
|
||||
"Error importing [{}/{}/{}]: {}",
|
||||
entry.namespace, entry.kind, entry.name, e
|
||||
);
|
||||
failed += 1;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
let summary = serde_json::json!({
|
||||
"total": total,
|
||||
"inserted": inserted,
|
||||
"skipped": skipped,
|
||||
"failed": failed,
|
||||
"dry_run": args.dry_run,
|
||||
});
|
||||
match args.output {
|
||||
OutputMode::Text => {
|
||||
if args.dry_run {
|
||||
println!(
|
||||
"\n[dry-run] {} total: {} would insert, {} would skip, {} would fail",
|
||||
total, inserted, skipped, failed
|
||||
);
|
||||
} else {
|
||||
println!(
|
||||
"\nImport done: {} total — {} inserted, {} skipped, {} failed",
|
||||
total, inserted, skipped, failed
|
||||
);
|
||||
}
|
||||
}
|
||||
ref mode => print_json(&summary, mode)?,
|
||||
}
|
||||
|
||||
if failed > 0 {
|
||||
anyhow::bail!("{} record(s) failed to import", failed);
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Convert metadata JSON object into Vec<String> of "key:=json_value" entries.
|
||||
fn build_meta_entries(metadata: &Value) -> Vec<String> {
|
||||
let mut entries = Vec::new();
|
||||
if let Some(obj) = metadata.as_object() {
|
||||
for (k, v) in obj {
|
||||
entries.push(value_to_kv_entry(k, v));
|
||||
}
|
||||
}
|
||||
entries
|
||||
}
|
||||
|
||||
/// Convert a BTreeMap<String, Value> (secrets) into Vec<String> of "key:=json_value" entries.
|
||||
fn build_secret_entries(secrets: Option<&BTreeMap<String, Value>>) -> Vec<String> {
|
||||
let mut entries = Vec::new();
|
||||
if let Some(map) = secrets {
|
||||
for (k, v) in map {
|
||||
entries.push(value_to_kv_entry(k, v));
|
||||
}
|
||||
}
|
||||
entries
|
||||
}
|
||||
|
||||
/// Convert a key/value pair to a CLI-style entry string.
|
||||
/// Strings use `key=value`; everything else uses `key:=<json>`.
|
||||
fn value_to_kv_entry(key: &str, value: &Value) -> String {
|
||||
match value {
|
||||
Value::String(s) => format!("{}={}", key, s),
|
||||
other => format!("{}:={}", key, other),
|
||||
}
|
||||
}
|
||||
@@ -4,15 +4,23 @@ use sqlx::PgPool;
|
||||
|
||||
use crate::{crypto, db};
|
||||
|
||||
const MIN_MASTER_PASSWORD_LEN: usize = 8;
|
||||
|
||||
pub async fn run(pool: &PgPool) -> Result<()> {
|
||||
println!("Initializing secrets master key...");
|
||||
println!();
|
||||
|
||||
// Read password (no echo)
|
||||
let password =
|
||||
rpassword::prompt_password("Enter master password: ").context("failed to read password")?;
|
||||
if password.is_empty() {
|
||||
anyhow::bail!("Master password must not be empty.");
|
||||
let password = rpassword::prompt_password(format!(
|
||||
"Enter master password (at least {} characters): ",
|
||||
MIN_MASTER_PASSWORD_LEN
|
||||
))
|
||||
.context("failed to read password")?;
|
||||
if password.chars().count() < MIN_MASTER_PASSWORD_LEN {
|
||||
anyhow::bail!(
|
||||
"Master password must be at least {} characters.",
|
||||
MIN_MASTER_PASSWORD_LEN
|
||||
);
|
||||
}
|
||||
let confirm = rpassword::prompt_password("Confirm master password: ")
|
||||
.context("failed to read password confirmation")?;
|
||||
|
||||
@@ -1,6 +1,9 @@
|
||||
pub mod add;
|
||||
pub mod config;
|
||||
pub mod delete;
|
||||
pub mod export_cmd;
|
||||
pub mod history;
|
||||
pub mod import_cmd;
|
||||
pub mod init;
|
||||
pub mod rollback;
|
||||
pub mod run;
|
||||
|
||||
@@ -3,38 +3,34 @@ use serde_json::{Value, json};
|
||||
use sqlx::{FromRow, PgPool};
|
||||
use uuid::Uuid;
|
||||
|
||||
use crate::output::OutputMode;
|
||||
|
||||
#[derive(FromRow)]
|
||||
struct HistoryRow {
|
||||
secret_id: Uuid,
|
||||
#[allow(dead_code)]
|
||||
namespace: String,
|
||||
#[allow(dead_code)]
|
||||
kind: String,
|
||||
#[allow(dead_code)]
|
||||
name: String,
|
||||
version: i64,
|
||||
action: String,
|
||||
tags: Vec<String>,
|
||||
metadata: Value,
|
||||
encrypted: Vec<u8>,
|
||||
}
|
||||
use crate::crypto;
|
||||
use crate::db;
|
||||
use crate::output::{OutputMode, print_json};
|
||||
|
||||
pub struct RollbackArgs<'a> {
|
||||
pub namespace: &'a str,
|
||||
pub kind: &'a str,
|
||||
pub name: &'a str,
|
||||
/// Target version to restore. None → restore the most recent history entry.
|
||||
/// Target entry version to restore. None → restore the most recent history entry.
|
||||
pub to_version: Option<i64>,
|
||||
pub output: OutputMode,
|
||||
}
|
||||
|
||||
pub async fn run(pool: &PgPool, args: RollbackArgs<'_>, master_key: &[u8; 32]) -> Result<()> {
|
||||
let snap: Option<HistoryRow> = if let Some(ver) = args.to_version {
|
||||
// ── Find the target entry history snapshot ────────────────────────────────
|
||||
#[derive(FromRow)]
|
||||
struct EntryHistoryRow {
|
||||
entry_id: Uuid,
|
||||
version: i64,
|
||||
action: String,
|
||||
tags: Vec<String>,
|
||||
metadata: Value,
|
||||
}
|
||||
|
||||
let snap: Option<EntryHistoryRow> = if let Some(ver) = args.to_version {
|
||||
sqlx::query_as(
|
||||
"SELECT secret_id, namespace, kind, name, version, action, tags, metadata, encrypted \
|
||||
FROM secrets_history \
|
||||
"SELECT entry_id, version, action, tags, metadata \
|
||||
FROM entries_history \
|
||||
WHERE namespace = $1 AND kind = $2 AND name = $3 AND version = $4 \
|
||||
ORDER BY id DESC LIMIT 1",
|
||||
)
|
||||
@@ -46,8 +42,8 @@ pub async fn run(pool: &PgPool, args: RollbackArgs<'_>, master_key: &[u8; 32]) -
|
||||
.await?
|
||||
} else {
|
||||
sqlx::query_as(
|
||||
"SELECT secret_id, namespace, kind, name, version, action, tags, metadata, encrypted \
|
||||
FROM secrets_history \
|
||||
"SELECT entry_id, version, action, tags, metadata \
|
||||
FROM entries_history \
|
||||
WHERE namespace = $1 AND kind = $2 AND name = $3 \
|
||||
ORDER BY id DESC LIMIT 1",
|
||||
)
|
||||
@@ -70,25 +66,53 @@ pub async fn run(pool: &PgPool, args: RollbackArgs<'_>, master_key: &[u8; 32]) -
|
||||
)
|
||||
})?;
|
||||
|
||||
// Validate encrypted blob is non-trivial (re-encrypt guard).
|
||||
if !snap.encrypted.is_empty() {
|
||||
// Probe decrypt to ensure the blob is valid before restoring.
|
||||
crate::crypto::decrypt_json(master_key, &snap.encrypted)?;
|
||||
// ── Find the matching secret field snapshots ──────────────────────────────
|
||||
#[derive(FromRow)]
|
||||
struct SecretHistoryRow {
|
||||
secret_id: Uuid,
|
||||
field_name: String,
|
||||
field_type: String,
|
||||
value_len: i32,
|
||||
encrypted: Vec<u8>,
|
||||
action: String,
|
||||
}
|
||||
|
||||
let field_snaps: Vec<SecretHistoryRow> = sqlx::query_as(
|
||||
"SELECT secret_id, field_name, field_type, value_len, encrypted, action \
|
||||
FROM secrets_history \
|
||||
WHERE entry_id = $1 AND entry_version = $2 \
|
||||
ORDER BY field_name",
|
||||
)
|
||||
.bind(snap.entry_id)
|
||||
.bind(snap.version)
|
||||
.fetch_all(pool)
|
||||
.await?;
|
||||
|
||||
// Validate: try decrypting all encrypted fields before writing anything.
|
||||
for f in &field_snaps {
|
||||
if f.action != "delete" && !f.encrypted.is_empty() {
|
||||
crypto::decrypt_json(master_key, &f.encrypted).map_err(|e| {
|
||||
anyhow::anyhow!(
|
||||
"Cannot decrypt snapshot for field '{}': {}",
|
||||
f.field_name,
|
||||
e
|
||||
)
|
||||
})?;
|
||||
}
|
||||
}
|
||||
|
||||
let mut tx = pool.begin().await?;
|
||||
|
||||
// Snapshot current live row (if it exists) before overwriting.
|
||||
// ── Snapshot the current live state before overwriting ────────────────────
|
||||
#[derive(sqlx::FromRow)]
|
||||
struct LiveRow {
|
||||
struct LiveEntry {
|
||||
id: Uuid,
|
||||
version: i64,
|
||||
tags: Vec<String>,
|
||||
metadata: Value,
|
||||
encrypted: Vec<u8>,
|
||||
}
|
||||
let live: Option<LiveRow> = sqlx::query_as(
|
||||
"SELECT id, version, tags, metadata, encrypted FROM secrets \
|
||||
let live: Option<LiveEntry> = sqlx::query_as(
|
||||
"SELECT id, version, tags, metadata FROM entries \
|
||||
WHERE namespace = $1 AND kind = $2 AND name = $3 FOR UPDATE",
|
||||
)
|
||||
.bind(args.namespace)
|
||||
@@ -97,11 +121,11 @@ pub async fn run(pool: &PgPool, args: RollbackArgs<'_>, master_key: &[u8; 32]) -
|
||||
.fetch_optional(&mut *tx)
|
||||
.await?;
|
||||
|
||||
if let Some(lr) = live
|
||||
&& let Err(e) = crate::db::snapshot_history(
|
||||
if let Some(ref lr) = live {
|
||||
if let Err(e) = db::snapshot_entry_history(
|
||||
&mut tx,
|
||||
crate::db::SnapshotParams {
|
||||
secret_id: lr.id,
|
||||
db::EntrySnapshotParams {
|
||||
entry_id: lr.id,
|
||||
namespace: args.namespace,
|
||||
kind: args.kind,
|
||||
name: args.name,
|
||||
@@ -109,35 +133,104 @@ pub async fn run(pool: &PgPool, args: RollbackArgs<'_>, master_key: &[u8; 32]) -
|
||||
action: "rollback",
|
||||
tags: &lr.tags,
|
||||
metadata: &lr.metadata,
|
||||
encrypted: &lr.encrypted,
|
||||
},
|
||||
)
|
||||
.await
|
||||
{
|
||||
tracing::warn!(error = %e, "failed to snapshot current row before rollback");
|
||||
{
|
||||
tracing::warn!(error = %e, "failed to snapshot entry before rollback");
|
||||
}
|
||||
|
||||
// Snapshot existing secret fields.
|
||||
#[derive(sqlx::FromRow)]
|
||||
struct LiveField {
|
||||
id: Uuid,
|
||||
field_name: String,
|
||||
field_type: String,
|
||||
value_len: i32,
|
||||
encrypted: Vec<u8>,
|
||||
}
|
||||
let live_fields: Vec<LiveField> = sqlx::query_as(
|
||||
"SELECT id, field_name, field_type, value_len, encrypted \
|
||||
FROM secrets WHERE entry_id = $1",
|
||||
)
|
||||
.bind(lr.id)
|
||||
.fetch_all(&mut *tx)
|
||||
.await?;
|
||||
|
||||
for f in &live_fields {
|
||||
if let Err(e) = db::snapshot_secret_history(
|
||||
&mut tx,
|
||||
db::SecretSnapshotParams {
|
||||
entry_id: lr.id,
|
||||
secret_id: f.id,
|
||||
entry_version: lr.version,
|
||||
field_name: &f.field_name,
|
||||
field_type: &f.field_type,
|
||||
value_len: f.value_len,
|
||||
encrypted: &f.encrypted,
|
||||
action: "rollback",
|
||||
},
|
||||
)
|
||||
.await
|
||||
{
|
||||
tracing::warn!(error = %e, "failed to snapshot secret field before rollback");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// ── Restore entry row ─────────────────────────────────────────────────────
|
||||
sqlx::query(
|
||||
"INSERT INTO secrets (id, namespace, kind, name, tags, metadata, encrypted, version, updated_at) \
|
||||
VALUES ($1, $2, $3, $4, $5, $6, $7, $8, NOW()) \
|
||||
"INSERT INTO entries (id, namespace, kind, name, tags, metadata, version, updated_at) \
|
||||
VALUES ($1, $2, $3, $4, $5, $6, $7, NOW()) \
|
||||
ON CONFLICT (namespace, kind, name) DO UPDATE SET \
|
||||
tags = EXCLUDED.tags, \
|
||||
metadata = EXCLUDED.metadata, \
|
||||
encrypted = EXCLUDED.encrypted, \
|
||||
version = secrets.version + 1, \
|
||||
version = entries.version + 1, \
|
||||
updated_at = NOW()",
|
||||
)
|
||||
.bind(snap.secret_id)
|
||||
.bind(snap.entry_id)
|
||||
.bind(args.namespace)
|
||||
.bind(args.kind)
|
||||
.bind(args.name)
|
||||
.bind(&snap.tags)
|
||||
.bind(&snap.metadata)
|
||||
.bind(&snap.encrypted)
|
||||
.bind(snap.version)
|
||||
.execute(&mut *tx)
|
||||
.await?;
|
||||
|
||||
// ── Restore secret fields ─────────────────────────────────────────────────
|
||||
// Delete all current fields and re-insert from snapshot
|
||||
// (only non-deleted fields from the snapshot are restored).
|
||||
sqlx::query("DELETE FROM secrets WHERE entry_id = $1")
|
||||
.bind(snap.entry_id)
|
||||
.execute(&mut *tx)
|
||||
.await?;
|
||||
|
||||
for f in &field_snaps {
|
||||
if f.action == "delete" {
|
||||
// Field was deleted at this snapshot point — don't restore it.
|
||||
continue;
|
||||
}
|
||||
sqlx::query(
|
||||
"INSERT INTO secrets (id, entry_id, field_name, field_type, value_len, encrypted) \
|
||||
VALUES ($1, $2, $3, $4, $5, $6) \
|
||||
ON CONFLICT (entry_id, field_name) DO UPDATE SET \
|
||||
field_type = EXCLUDED.field_type, \
|
||||
value_len = EXCLUDED.value_len, \
|
||||
encrypted = EXCLUDED.encrypted, \
|
||||
version = secrets.version + 1, \
|
||||
updated_at = NOW()",
|
||||
)
|
||||
.bind(f.secret_id)
|
||||
.bind(snap.entry_id)
|
||||
.bind(&f.field_name)
|
||||
.bind(&f.field_type)
|
||||
.bind(f.value_len)
|
||||
.bind(&f.encrypted)
|
||||
.execute(&mut *tx)
|
||||
.await?;
|
||||
}
|
||||
|
||||
crate::audit::log_tx(
|
||||
&mut tx,
|
||||
"rollback",
|
||||
@@ -162,83 +255,11 @@ pub async fn run(pool: &PgPool, args: RollbackArgs<'_>, master_key: &[u8; 32]) -
|
||||
});
|
||||
|
||||
match args.output {
|
||||
OutputMode::Json => println!("{}", serde_json::to_string_pretty(&result_json)?),
|
||||
OutputMode::JsonCompact => println!("{}", serde_json::to_string(&result_json)?),
|
||||
_ => println!(
|
||||
OutputMode::Text => println!(
|
||||
"Rolled back: [{}/{}] {} → version {}",
|
||||
args.namespace, args.kind, args.name, snap.version
|
||||
),
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// List history entries for a record.
|
||||
pub async fn list_history(
|
||||
pool: &PgPool,
|
||||
namespace: &str,
|
||||
kind: &str,
|
||||
name: &str,
|
||||
limit: u32,
|
||||
output: OutputMode,
|
||||
) -> Result<()> {
|
||||
#[derive(FromRow)]
|
||||
struct HistorySummary {
|
||||
version: i64,
|
||||
action: String,
|
||||
actor: String,
|
||||
created_at: chrono::DateTime<chrono::Utc>,
|
||||
}
|
||||
|
||||
let rows: Vec<HistorySummary> = sqlx::query_as(
|
||||
"SELECT version, action, actor, created_at FROM secrets_history \
|
||||
WHERE namespace = $1 AND kind = $2 AND name = $3 \
|
||||
ORDER BY id DESC LIMIT $4",
|
||||
)
|
||||
.bind(namespace)
|
||||
.bind(kind)
|
||||
.bind(name)
|
||||
.bind(limit as i64)
|
||||
.fetch_all(pool)
|
||||
.await?;
|
||||
|
||||
match output {
|
||||
OutputMode::Json | OutputMode::JsonCompact => {
|
||||
let arr: Vec<Value> = rows
|
||||
.iter()
|
||||
.map(|r| {
|
||||
json!({
|
||||
"version": r.version,
|
||||
"action": r.action,
|
||||
"actor": r.actor,
|
||||
"created_at": r.created_at.format("%Y-%m-%dT%H:%M:%SZ").to_string(),
|
||||
})
|
||||
})
|
||||
.collect();
|
||||
let out = if output == OutputMode::Json {
|
||||
serde_json::to_string_pretty(&arr)?
|
||||
} else {
|
||||
serde_json::to_string(&arr)?
|
||||
};
|
||||
println!("{}", out);
|
||||
}
|
||||
_ => {
|
||||
if rows.is_empty() {
|
||||
println!("No history found for [{}/{}] {}.", namespace, kind, name);
|
||||
return Ok(());
|
||||
}
|
||||
println!("History for [{}/{}] {}:", namespace, kind, name);
|
||||
for r in &rows {
|
||||
println!(
|
||||
" v{:<4} {:8} {} {}",
|
||||
r.version,
|
||||
r.action,
|
||||
r.actor,
|
||||
r.created_at.format("%Y-%m-%d %H:%M:%S UTC")
|
||||
);
|
||||
}
|
||||
println!(" (use `secrets rollback --to-version <N>` to restore)");
|
||||
}
|
||||
ref mode => print_json(&result_json, mode)?,
|
||||
}
|
||||
|
||||
Ok(())
|
||||
|
||||
@@ -3,7 +3,7 @@ use serde_json::Value;
|
||||
use sqlx::PgPool;
|
||||
use std::collections::HashMap;
|
||||
|
||||
use crate::commands::search::build_env_map;
|
||||
use crate::commands::search::{build_injected_env_map, fetch_entries, fetch_secrets_for_entries};
|
||||
use crate::output::OutputMode;
|
||||
|
||||
pub struct InjectArgs<'a> {
|
||||
@@ -11,7 +11,6 @@ pub struct InjectArgs<'a> {
|
||||
pub kind: Option<&'a str>,
|
||||
pub name: Option<&'a str>,
|
||||
pub tags: &'a [String],
|
||||
/// Prefix to prepend to every variable name. Empty string means no prefix.
|
||||
pub prefix: &'a str,
|
||||
pub output: OutputMode,
|
||||
}
|
||||
@@ -22,12 +21,10 @@ pub struct RunArgs<'a> {
|
||||
pub name: Option<&'a str>,
|
||||
pub tags: &'a [String],
|
||||
pub prefix: &'a str,
|
||||
/// The command and its arguments to execute with injected secrets.
|
||||
pub command: &'a [String],
|
||||
}
|
||||
|
||||
/// Fetch secrets matching the filter and build a flat env map.
|
||||
/// Metadata and secret fields are merged; naming: `<PREFIX_><NAME>_<KEY>` (uppercased).
|
||||
/// Fetch entries matching the filter and build a flat env map (metadata + decrypted secrets).
|
||||
pub async fn collect_env_map(
|
||||
pool: &PgPool,
|
||||
namespace: Option<&str>,
|
||||
@@ -42,13 +39,19 @@ pub async fn collect_env_map(
|
||||
"At least one filter (--namespace, --kind, --name, or --tag) is required for inject/run"
|
||||
);
|
||||
}
|
||||
let rows = crate::commands::search::fetch_rows(pool, namespace, kind, name, tags, None).await?;
|
||||
if rows.is_empty() {
|
||||
let entries = fetch_entries(pool, namespace, kind, name, tags, None).await?;
|
||||
if entries.is_empty() {
|
||||
anyhow::bail!("No records matched the given filters.");
|
||||
}
|
||||
|
||||
let entry_ids: Vec<uuid::Uuid> = entries.iter().map(|e| e.id).collect();
|
||||
let fields_map = fetch_secrets_for_entries(pool, &entry_ids).await?;
|
||||
|
||||
let mut map = HashMap::new();
|
||||
for row in &rows {
|
||||
let row_map = build_env_map(row, prefix, Some(master_key))?;
|
||||
for entry in &entries {
|
||||
let empty = vec![];
|
||||
let fields = fields_map.get(&entry.id).unwrap_or(&empty);
|
||||
let row_map = build_injected_env_map(pool, entry, prefix, master_key, fields).await?;
|
||||
for (k, v) in row_map {
|
||||
map.insert(k, v);
|
||||
}
|
||||
@@ -56,7 +59,7 @@ pub async fn collect_env_map(
|
||||
Ok(map)
|
||||
}
|
||||
|
||||
/// `inject` command: print env vars to stdout (suitable for `eval $(...)` or export).
|
||||
/// `inject` command: print env vars to stdout.
|
||||
pub async fn run_inject(pool: &PgPool, args: InjectArgs<'_>, master_key: &[u8; 32]) -> Result<()> {
|
||||
let env_map = collect_env_map(
|
||||
pool,
|
||||
@@ -85,7 +88,6 @@ pub async fn run_inject(pool: &PgPool, args: InjectArgs<'_>, master_key: &[u8; 3
|
||||
println!("{}", serde_json::to_string(&Value::Object(obj))?);
|
||||
}
|
||||
_ => {
|
||||
// Shell-safe KEY=VALUE output, one per line.
|
||||
let mut pairs: Vec<(String, String)> = env_map.into_iter().collect();
|
||||
pairs.sort_by(|a, b| a.0.cmp(&b.0));
|
||||
for (k, v) in pairs {
|
||||
@@ -136,8 +138,6 @@ pub async fn run_exec(pool: &PgPool, args: RunArgs<'_>, master_key: &[u8; 32]) -
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Quote a value for safe shell output. Wraps the value in single quotes,
|
||||
/// escaping any single quotes within the value.
|
||||
fn shell_quote(s: &str) -> String {
|
||||
format!("'{}'", s.replace('\'', "'\\''"))
|
||||
}
|
||||
|
||||
@@ -4,8 +4,8 @@ use sqlx::PgPool;
|
||||
use std::collections::HashMap;
|
||||
|
||||
use crate::crypto;
|
||||
use crate::models::Secret;
|
||||
use crate::output::OutputMode;
|
||||
use crate::models::{Entry, SecretField};
|
||||
use crate::output::{OutputMode, format_local_time};
|
||||
|
||||
pub struct SearchArgs<'a> {
|
||||
pub namespace: Option<&'a str>,
|
||||
@@ -13,7 +13,6 @@ pub struct SearchArgs<'a> {
|
||||
pub name: Option<&'a str>,
|
||||
pub tags: &'a [String],
|
||||
pub query: Option<&'a str>,
|
||||
pub show_secrets: bool,
|
||||
pub fields: &'a [String],
|
||||
pub summary: bool,
|
||||
pub limit: u32,
|
||||
@@ -22,8 +21,10 @@ pub struct SearchArgs<'a> {
|
||||
pub output: OutputMode,
|
||||
}
|
||||
|
||||
pub async fn run(pool: &PgPool, args: SearchArgs<'_>, master_key: Option<&[u8; 32]>) -> Result<()> {
|
||||
let rows = fetch_rows_paged(
|
||||
pub async fn run(pool: &PgPool, args: SearchArgs<'_>) -> Result<()> {
|
||||
validate_safe_search_args(args.fields)?;
|
||||
|
||||
let rows = fetch_entries_paged(
|
||||
pool,
|
||||
PagedFetchArgs {
|
||||
namespace: args.namespace,
|
||||
@@ -38,16 +39,24 @@ pub async fn run(pool: &PgPool, args: SearchArgs<'_>, master_key: Option<&[u8; 3
|
||||
)
|
||||
.await?;
|
||||
|
||||
// -f/--field: extract specific field values directly
|
||||
// -f/--field: extract specific metadata field values directly
|
||||
if !args.fields.is_empty() {
|
||||
return print_fields(&rows, args.fields, master_key);
|
||||
return print_fields(&rows, args.fields);
|
||||
}
|
||||
|
||||
// Fetch secret schemas for all returned entries (no master key needed).
|
||||
let entry_ids: Vec<uuid::Uuid> = rows.iter().map(|r| r.id).collect();
|
||||
let schema_map = if !args.summary && !entry_ids.is_empty() {
|
||||
fetch_secret_schemas(pool, &entry_ids).await?
|
||||
} else {
|
||||
HashMap::new()
|
||||
};
|
||||
|
||||
match args.output {
|
||||
OutputMode::Json | OutputMode::JsonCompact => {
|
||||
let arr: Vec<Value> = rows
|
||||
.iter()
|
||||
.map(|r| to_json(r, args.show_secrets, args.summary, master_key))
|
||||
.map(|r| to_json(r, args.summary, schema_map.get(&r.id).map(Vec::as_slice)))
|
||||
.collect();
|
||||
let out = if args.output == OutputMode::Json {
|
||||
serde_json::to_string_pretty(&arr)?
|
||||
@@ -56,31 +65,17 @@ pub async fn run(pool: &PgPool, args: SearchArgs<'_>, master_key: Option<&[u8; 3
|
||||
};
|
||||
println!("{}", out);
|
||||
}
|
||||
OutputMode::Env => {
|
||||
if rows.len() > 1 {
|
||||
anyhow::bail!(
|
||||
"env output requires exactly one record; got {}. Add more filters.",
|
||||
rows.len()
|
||||
);
|
||||
}
|
||||
if let Some(row) = rows.first() {
|
||||
let map = build_env_map(row, "", master_key)?;
|
||||
let mut pairs: Vec<(String, String)> = map.into_iter().collect();
|
||||
pairs.sort_by(|a, b| a.0.cmp(&b.0));
|
||||
for (k, v) in pairs {
|
||||
println!("{}={}", k, shell_quote(&v));
|
||||
}
|
||||
} else {
|
||||
eprintln!("No records found.");
|
||||
}
|
||||
}
|
||||
OutputMode::Text => {
|
||||
if rows.is_empty() {
|
||||
println!("No records found.");
|
||||
return Ok(());
|
||||
}
|
||||
for row in &rows {
|
||||
print_text(row, args.show_secrets, args.summary, master_key)?;
|
||||
print_text(
|
||||
row,
|
||||
args.summary,
|
||||
schema_map.get(&row.id).map(Vec::as_slice),
|
||||
)?;
|
||||
}
|
||||
println!("{} record(s) found.", rows.len());
|
||||
if rows.len() == args.limit as usize {
|
||||
@@ -96,32 +91,25 @@ pub async fn run(pool: &PgPool, args: SearchArgs<'_>, master_key: Option<&[u8; 3
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Fetch rows with simple equality/tag filters (no pagination). Used by inject/run.
|
||||
pub async fn fetch_rows(
|
||||
pool: &PgPool,
|
||||
namespace: Option<&str>,
|
||||
kind: Option<&str>,
|
||||
name: Option<&str>,
|
||||
tags: &[String],
|
||||
query: Option<&str>,
|
||||
) -> Result<Vec<Secret>> {
|
||||
fetch_rows_paged(
|
||||
pool,
|
||||
PagedFetchArgs {
|
||||
namespace,
|
||||
kind,
|
||||
name,
|
||||
tags,
|
||||
query,
|
||||
sort: "name",
|
||||
limit: 200,
|
||||
offset: 0,
|
||||
},
|
||||
)
|
||||
.await
|
||||
fn validate_safe_search_args(fields: &[String]) -> Result<()> {
|
||||
if let Some(field) = fields.iter().find(|field| is_secret_field(field)) {
|
||||
anyhow::bail!(
|
||||
"Field '{}' is sensitive. `search -f` only supports metadata.* fields; use `secrets inject` or `secrets run` for secrets.",
|
||||
field
|
||||
);
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Arguments for the internal paged fetch. Grouped to avoid too-many-arguments lint.
|
||||
fn is_secret_field(field: &str) -> bool {
|
||||
matches!(
|
||||
field.split_once('.').map(|(section, _)| section),
|
||||
Some("secret" | "secrets" | "encrypted")
|
||||
)
|
||||
}
|
||||
|
||||
// ── Entry fetching ────────────────────────────────────────────────────────────
|
||||
|
||||
struct PagedFetchArgs<'a> {
|
||||
namespace: Option<&'a str>,
|
||||
kind: Option<&'a str>,
|
||||
@@ -133,7 +121,50 @@ struct PagedFetchArgs<'a> {
|
||||
offset: u32,
|
||||
}
|
||||
|
||||
async fn fetch_rows_paged(pool: &PgPool, a: PagedFetchArgs<'_>) -> Result<Vec<Secret>> {
|
||||
/// A very large limit used when callers need all matching records (export, inject, run).
|
||||
/// Postgres will stop scanning when this many rows are found; adjust if needed.
|
||||
pub const FETCH_ALL_LIMIT: u32 = 100_000;
|
||||
|
||||
/// Fetch entries matching the given filters (used by search, inject, run).
|
||||
/// `limit` caps the result set; pass `FETCH_ALL_LIMIT` when you need all matching records.
|
||||
pub async fn fetch_entries(
|
||||
pool: &PgPool,
|
||||
namespace: Option<&str>,
|
||||
kind: Option<&str>,
|
||||
name: Option<&str>,
|
||||
tags: &[String],
|
||||
query: Option<&str>,
|
||||
) -> Result<Vec<Entry>> {
|
||||
fetch_entries_with_limit(pool, namespace, kind, name, tags, query, FETCH_ALL_LIMIT).await
|
||||
}
|
||||
|
||||
/// Like `fetch_entries` but with an explicit limit. Used internally by `search`.
|
||||
pub(crate) async fn fetch_entries_with_limit(
|
||||
pool: &PgPool,
|
||||
namespace: Option<&str>,
|
||||
kind: Option<&str>,
|
||||
name: Option<&str>,
|
||||
tags: &[String],
|
||||
query: Option<&str>,
|
||||
limit: u32,
|
||||
) -> Result<Vec<Entry>> {
|
||||
fetch_entries_paged(
|
||||
pool,
|
||||
PagedFetchArgs {
|
||||
namespace,
|
||||
kind,
|
||||
name,
|
||||
tags,
|
||||
query,
|
||||
sort: "name",
|
||||
limit,
|
||||
offset: 0,
|
||||
},
|
||||
)
|
||||
.await
|
||||
}
|
||||
|
||||
async fn fetch_entries_paged(pool: &PgPool, a: PagedFetchArgs<'_>) -> Result<Vec<Entry>> {
|
||||
let mut conditions: Vec<String> = Vec::new();
|
||||
let mut idx: i32 = 1;
|
||||
|
||||
@@ -182,7 +213,7 @@ async fn fetch_rows_paged(pool: &PgPool, a: PagedFetchArgs<'_>) -> Result<Vec<Se
|
||||
};
|
||||
|
||||
let sql = format!(
|
||||
"SELECT * FROM secrets {} ORDER BY {} LIMIT ${} OFFSET ${}",
|
||||
"SELECT * FROM entries {} ORDER BY {} LIMIT ${} OFFSET ${}",
|
||||
where_clause,
|
||||
order,
|
||||
idx,
|
||||
@@ -191,7 +222,7 @@ async fn fetch_rows_paged(pool: &PgPool, a: PagedFetchArgs<'_>) -> Result<Vec<Se
|
||||
|
||||
tracing::debug!(sql, "executing search query");
|
||||
|
||||
let mut q = sqlx::query_as::<_, Secret>(&sql);
|
||||
let mut q = sqlx::query_as::<_, Entry>(&sql);
|
||||
if let Some(v) = a.namespace {
|
||||
q = q.bind(v);
|
||||
}
|
||||
@@ -214,20 +245,63 @@ async fn fetch_rows_paged(pool: &PgPool, a: PagedFetchArgs<'_>) -> Result<Vec<Se
|
||||
}
|
||||
q = q.bind(a.limit as i64).bind(a.offset as i64);
|
||||
|
||||
let rows = q.fetch_all(pool).await?;
|
||||
Ok(rows)
|
||||
Ok(q.fetch_all(pool).await?)
|
||||
}
|
||||
|
||||
/// Build a flat `KEY=VALUE` map from a record's metadata and decrypted secrets.
|
||||
/// Variable names: `<PREFIX><NAME>_<FIELD>` (all uppercased, hyphens/dots → underscores).
|
||||
/// If `prefix` is empty, the name segment alone is used as the prefix.
|
||||
pub fn build_env_map(
|
||||
row: &Secret,
|
||||
prefix: &str,
|
||||
master_key: Option<&[u8; 32]>,
|
||||
) -> Result<HashMap<String, String>> {
|
||||
let name_part = row.name.to_uppercase().replace(['-', '.', ' '], "_");
|
||||
let effective_prefix = if prefix.is_empty() {
|
||||
// ── Secret schema fetching (no master key) ───────────────────────────────────
|
||||
|
||||
/// Fetch secret field schemas (field_name, field_type, value_len) for a set of entry ids.
|
||||
/// Returns a map from entry_id to list of SecretField (encrypted field not used here).
|
||||
async fn fetch_secret_schemas(
|
||||
pool: &PgPool,
|
||||
entry_ids: &[uuid::Uuid],
|
||||
) -> Result<HashMap<uuid::Uuid, Vec<SecretField>>> {
|
||||
if entry_ids.is_empty() {
|
||||
return Ok(HashMap::new());
|
||||
}
|
||||
|
||||
let fields: Vec<SecretField> = sqlx::query_as(
|
||||
"SELECT * FROM secrets WHERE entry_id = ANY($1) ORDER BY entry_id, field_name",
|
||||
)
|
||||
.bind(entry_ids)
|
||||
.fetch_all(pool)
|
||||
.await?;
|
||||
|
||||
let mut map: HashMap<uuid::Uuid, Vec<SecretField>> = HashMap::new();
|
||||
for f in fields {
|
||||
map.entry(f.entry_id).or_default().push(f);
|
||||
}
|
||||
Ok(map)
|
||||
}
|
||||
|
||||
/// Fetch all secret fields (including encrypted bytes) for a set of entry ids.
|
||||
pub async fn fetch_secrets_for_entries(
|
||||
pool: &PgPool,
|
||||
entry_ids: &[uuid::Uuid],
|
||||
) -> Result<HashMap<uuid::Uuid, Vec<SecretField>>> {
|
||||
if entry_ids.is_empty() {
|
||||
return Ok(HashMap::new());
|
||||
}
|
||||
|
||||
let fields: Vec<SecretField> = sqlx::query_as(
|
||||
"SELECT * FROM secrets WHERE entry_id = ANY($1) ORDER BY entry_id, field_name",
|
||||
)
|
||||
.bind(entry_ids)
|
||||
.fetch_all(pool)
|
||||
.await?;
|
||||
|
||||
let mut map: HashMap<uuid::Uuid, Vec<SecretField>> = HashMap::new();
|
||||
for f in fields {
|
||||
map.entry(f.entry_id).or_default().push(f);
|
||||
}
|
||||
Ok(map)
|
||||
}
|
||||
|
||||
// ── Display helpers ───────────────────────────────────────────────────────────
|
||||
|
||||
fn env_prefix(entry: &Entry, prefix: &str) -> String {
|
||||
let name_part = entry.name.to_uppercase().replace(['-', '.', ' '], "_");
|
||||
if prefix.is_empty() {
|
||||
name_part
|
||||
} else {
|
||||
format!(
|
||||
@@ -235,11 +309,15 @@ pub fn build_env_map(
|
||||
prefix.to_uppercase().replace(['-', '.', ' '], "_"),
|
||||
name_part
|
||||
)
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
/// Build a flat KEY=VALUE map from metadata only (no master key required).
|
||||
pub fn build_metadata_env_map(entry: &Entry, prefix: &str) -> HashMap<String, String> {
|
||||
let effective_prefix = env_prefix(entry, prefix);
|
||||
let mut map = HashMap::new();
|
||||
|
||||
if let Some(meta) = row.metadata.as_object() {
|
||||
if let Some(meta) = entry.metadata.as_object() {
|
||||
for (k, v) in meta {
|
||||
let key = format!(
|
||||
"{}_{}",
|
||||
@@ -249,33 +327,68 @@ pub fn build_env_map(
|
||||
map.insert(key, json_value_to_env_string(v));
|
||||
}
|
||||
}
|
||||
map
|
||||
}
|
||||
|
||||
if let Some(master_key) = master_key
|
||||
&& !row.encrypted.is_empty()
|
||||
{
|
||||
let decrypted = crypto::decrypt_json(master_key, &row.encrypted)?;
|
||||
if let Some(enc) = decrypted.as_object() {
|
||||
for (k, v) in enc {
|
||||
let key = format!(
|
||||
/// Build a flat KEY=VALUE map from metadata + decrypted secret fields.
|
||||
/// Resolves key_ref: if metadata.key_ref is set, merges secret fields from that key entry.
|
||||
pub async fn build_injected_env_map(
|
||||
pool: &PgPool,
|
||||
entry: &Entry,
|
||||
prefix: &str,
|
||||
master_key: &[u8; 32],
|
||||
fields: &[SecretField],
|
||||
) -> Result<HashMap<String, String>> {
|
||||
let effective_prefix = env_prefix(entry, prefix);
|
||||
let mut map = build_metadata_env_map(entry, prefix);
|
||||
|
||||
// Decrypt each secret field and add to env map.
|
||||
for f in fields {
|
||||
let decrypted = crypto::decrypt_json(master_key, &f.encrypted)?;
|
||||
let key = format!(
|
||||
"{}_{}",
|
||||
effective_prefix,
|
||||
f.field_name.to_uppercase().replace(['-', '.'], "_")
|
||||
);
|
||||
map.insert(key, json_value_to_env_string(&decrypted));
|
||||
}
|
||||
|
||||
// Resolve key_ref: merge secrets from the referenced key entry.
|
||||
if let Some(key_ref) = entry.metadata.get("key_ref").and_then(|v| v.as_str()) {
|
||||
let key_entries = fetch_entries(
|
||||
pool,
|
||||
Some(&entry.namespace),
|
||||
Some("key"),
|
||||
Some(key_ref),
|
||||
&[],
|
||||
None,
|
||||
)
|
||||
.await?;
|
||||
|
||||
if let Some(key_entry) = key_entries.first() {
|
||||
let key_ids = vec![key_entry.id];
|
||||
let key_fields_map = fetch_secrets_for_entries(pool, &key_ids).await?;
|
||||
let empty = vec![];
|
||||
let key_fields = key_fields_map.get(&key_entry.id).unwrap_or(&empty);
|
||||
|
||||
let key_prefix = env_prefix(key_entry, prefix);
|
||||
for f in key_fields {
|
||||
let decrypted = crypto::decrypt_json(master_key, &f.encrypted)?;
|
||||
let key_var = format!(
|
||||
"{}_{}",
|
||||
effective_prefix,
|
||||
k.to_uppercase().replace(['-', '.'], "_")
|
||||
key_prefix,
|
||||
f.field_name.to_uppercase().replace(['-', '.'], "_")
|
||||
);
|
||||
map.insert(key, json_value_to_env_string(v));
|
||||
map.insert(key_var, json_value_to_env_string(&decrypted));
|
||||
}
|
||||
} else {
|
||||
tracing::warn!(key_ref, "key_ref target not found");
|
||||
}
|
||||
}
|
||||
|
||||
Ok(map)
|
||||
}
|
||||
|
||||
/// Quote a value for safe shell / env output. Wraps in single quotes,
|
||||
/// escaping any single quotes within the value.
|
||||
fn shell_quote(s: &str) -> String {
|
||||
format!("'{}'", s.replace('\'', "'\\''"))
|
||||
}
|
||||
|
||||
/// Convert a JSON value to its string representation suitable for env vars.
|
||||
fn json_value_to_env_string(v: &Value) -> String {
|
||||
match v {
|
||||
Value::String(s) => s.clone(),
|
||||
@@ -284,153 +397,118 @@ fn json_value_to_env_string(v: &Value) -> String {
|
||||
}
|
||||
}
|
||||
|
||||
/// Decrypt the encrypted blob for a row. Returns an empty object on empty blobs.
|
||||
fn try_decrypt(row: &Secret, master_key: Option<&[u8; 32]>) -> Result<Value> {
|
||||
if row.encrypted.is_empty() {
|
||||
return Ok(Value::Object(Default::default()));
|
||||
}
|
||||
let key = master_key.ok_or_else(|| {
|
||||
anyhow::anyhow!("master key required to decrypt secrets (run `secrets init`)")
|
||||
})?;
|
||||
crypto::decrypt_json(key, &row.encrypted)
|
||||
}
|
||||
|
||||
fn to_json(
|
||||
row: &Secret,
|
||||
show_secrets: bool,
|
||||
summary: bool,
|
||||
master_key: Option<&[u8; 32]>,
|
||||
) -> Value {
|
||||
fn to_json(entry: &Entry, summary: bool, schema: Option<&[SecretField]>) -> Value {
|
||||
if summary {
|
||||
let desc = row
|
||||
let desc = entry
|
||||
.metadata
|
||||
.get("desc")
|
||||
.or_else(|| row.metadata.get("url"))
|
||||
.or_else(|| entry.metadata.get("url"))
|
||||
.and_then(|v| v.as_str())
|
||||
.unwrap_or("")
|
||||
.to_string();
|
||||
return json!({
|
||||
"namespace": row.namespace,
|
||||
"kind": row.kind,
|
||||
"name": row.name,
|
||||
"tags": row.tags,
|
||||
"namespace": entry.namespace,
|
||||
"kind": entry.kind,
|
||||
"name": entry.name,
|
||||
"tags": entry.tags,
|
||||
"desc": desc,
|
||||
"updated_at": row.updated_at.format("%Y-%m-%dT%H:%M:%SZ").to_string(),
|
||||
"updated_at": entry.updated_at.format("%Y-%m-%dT%H:%M:%SZ").to_string(),
|
||||
});
|
||||
}
|
||||
|
||||
let secrets_val = if show_secrets {
|
||||
match try_decrypt(row, master_key) {
|
||||
Ok(v) => v,
|
||||
Err(e) => json!({"_error": e.to_string()}),
|
||||
let secrets_val: Value = match schema {
|
||||
Some(fields) if !fields.is_empty() => {
|
||||
let schema_arr: Vec<Value> = fields
|
||||
.iter()
|
||||
.map(|f| {
|
||||
json!({
|
||||
"field_name": f.field_name,
|
||||
"field_type": f.field_type,
|
||||
"value_len": f.value_len,
|
||||
})
|
||||
})
|
||||
.collect();
|
||||
Value::Array(schema_arr)
|
||||
}
|
||||
} else {
|
||||
json!({"_encrypted": true})
|
||||
_ => Value::Array(vec![]),
|
||||
};
|
||||
|
||||
json!({
|
||||
"id": row.id,
|
||||
"namespace": row.namespace,
|
||||
"kind": row.kind,
|
||||
"name": row.name,
|
||||
"tags": row.tags,
|
||||
"metadata": row.metadata,
|
||||
"id": entry.id,
|
||||
"namespace": entry.namespace,
|
||||
"kind": entry.kind,
|
||||
"name": entry.name,
|
||||
"tags": entry.tags,
|
||||
"metadata": entry.metadata,
|
||||
"secrets": secrets_val,
|
||||
"version": row.version,
|
||||
"created_at": row.created_at.format("%Y-%m-%dT%H:%M:%SZ").to_string(),
|
||||
"updated_at": row.updated_at.format("%Y-%m-%dT%H:%M:%SZ").to_string(),
|
||||
"version": entry.version,
|
||||
"created_at": entry.created_at.format("%Y-%m-%dT%H:%M:%SZ").to_string(),
|
||||
"updated_at": entry.updated_at.format("%Y-%m-%dT%H:%M:%SZ").to_string(),
|
||||
})
|
||||
}
|
||||
|
||||
fn print_text(
|
||||
row: &Secret,
|
||||
show_secrets: bool,
|
||||
summary: bool,
|
||||
master_key: Option<&[u8; 32]>,
|
||||
) -> Result<()> {
|
||||
println!("[{}/{}] {}", row.namespace, row.kind, row.name);
|
||||
fn print_text(entry: &Entry, summary: bool, schema: Option<&[SecretField]>) -> Result<()> {
|
||||
println!("[{}/{}] {}", entry.namespace, entry.kind, entry.name);
|
||||
if summary {
|
||||
let desc = row
|
||||
let desc = entry
|
||||
.metadata
|
||||
.get("desc")
|
||||
.or_else(|| row.metadata.get("url"))
|
||||
.or_else(|| entry.metadata.get("url"))
|
||||
.and_then(|v| v.as_str())
|
||||
.unwrap_or("-");
|
||||
if !row.tags.is_empty() {
|
||||
println!(" tags: [{}]", row.tags.join(", "));
|
||||
if !entry.tags.is_empty() {
|
||||
println!(" tags: [{}]", entry.tags.join(", "));
|
||||
}
|
||||
println!(" desc: {}", desc);
|
||||
println!(
|
||||
" updated: {}",
|
||||
row.updated_at.format("%Y-%m-%d %H:%M:%S UTC")
|
||||
);
|
||||
println!(" updated: {}", format_local_time(entry.updated_at));
|
||||
} else {
|
||||
println!(" id: {}", row.id);
|
||||
if !row.tags.is_empty() {
|
||||
println!(" tags: [{}]", row.tags.join(", "));
|
||||
println!(" id: {}", entry.id);
|
||||
if !entry.tags.is_empty() {
|
||||
println!(" tags: [{}]", entry.tags.join(", "));
|
||||
}
|
||||
if row.metadata.as_object().is_some_and(|m| !m.is_empty()) {
|
||||
if entry.metadata.as_object().is_some_and(|m| !m.is_empty()) {
|
||||
println!(
|
||||
" metadata: {}",
|
||||
serde_json::to_string_pretty(&row.metadata)?
|
||||
serde_json::to_string_pretty(&entry.metadata)?
|
||||
);
|
||||
}
|
||||
if !row.encrypted.is_empty() {
|
||||
if show_secrets {
|
||||
match try_decrypt(row, master_key) {
|
||||
Ok(v) => println!(" secrets: {}", serde_json::to_string_pretty(&v)?),
|
||||
Err(e) => println!(" secrets: [decrypt error: {}]", e),
|
||||
}
|
||||
} else {
|
||||
println!(" secrets: [encrypted] (--show-secrets to reveal)");
|
||||
match schema {
|
||||
Some(fields) if !fields.is_empty() => {
|
||||
let schema_str: Vec<String> = fields
|
||||
.iter()
|
||||
.map(|f| format!("{}: {}({})", f.field_name, f.field_type, f.value_len))
|
||||
.collect();
|
||||
println!(" secrets: {}", schema_str.join(", "));
|
||||
println!(" (use `secrets inject` or `secrets run` to get values)");
|
||||
}
|
||||
_ => {}
|
||||
}
|
||||
println!(
|
||||
" created: {}",
|
||||
row.created_at.format("%Y-%m-%d %H:%M:%S UTC")
|
||||
);
|
||||
println!(" version: {}", entry.version);
|
||||
println!(" created: {}", format_local_time(entry.created_at));
|
||||
}
|
||||
println!();
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Extract one or more field paths like `metadata.url` or `secret.token`.
|
||||
fn print_fields(rows: &[Secret], fields: &[String], master_key: Option<&[u8; 32]>) -> Result<()> {
|
||||
/// Extract one or more metadata field paths like `metadata.url`.
|
||||
fn print_fields(rows: &[Entry], fields: &[String]) -> Result<()> {
|
||||
for row in rows {
|
||||
let decrypted: Option<Value> = if fields
|
||||
.iter()
|
||||
.any(|f| f.starts_with("secret") || f.starts_with("encrypted"))
|
||||
{
|
||||
Some(try_decrypt(row, master_key)?)
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
for field in fields {
|
||||
let val = extract_field(row, field, decrypted.as_ref())?;
|
||||
let val = extract_field(row, field)?;
|
||||
println!("{}", val);
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn extract_field(row: &Secret, field: &str, decrypted: Option<&Value>) -> Result<String> {
|
||||
let (section, key) = field.split_once('.').ok_or_else(|| {
|
||||
anyhow::anyhow!(
|
||||
"Invalid field path '{}'. Use metadata.<key> or secret.<key>",
|
||||
field
|
||||
)
|
||||
})?;
|
||||
fn extract_field(entry: &Entry, field: &str) -> Result<String> {
|
||||
let (section, key) = field
|
||||
.split_once('.')
|
||||
.ok_or_else(|| anyhow::anyhow!("Invalid field path '{}'. Use metadata.<key>.", field))?;
|
||||
|
||||
let obj = match section {
|
||||
"metadata" | "meta" => &row.metadata,
|
||||
"secret" | "secrets" | "encrypted" => {
|
||||
decrypted.ok_or_else(|| anyhow::anyhow!("secret field requires master key"))?
|
||||
}
|
||||
other => anyhow::bail!(
|
||||
"Unknown field section '{}'. Use 'metadata' or 'secret'",
|
||||
other
|
||||
),
|
||||
"metadata" | "meta" => &entry.metadata,
|
||||
other => anyhow::bail!("Unknown field section '{}'. Use 'metadata'.", other),
|
||||
};
|
||||
|
||||
obj.get(key)
|
||||
@@ -443,9 +521,91 @@ fn extract_field(row: &Secret, field: &str, decrypted: Option<&Value>) -> Result
|
||||
anyhow::anyhow!(
|
||||
"Field '{}' not found in record [{}/{}/{}]",
|
||||
field,
|
||||
row.namespace,
|
||||
row.kind,
|
||||
row.name
|
||||
entry.namespace,
|
||||
entry.kind,
|
||||
entry.name
|
||||
)
|
||||
})
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use chrono::Utc;
|
||||
use serde_json::json;
|
||||
use uuid::Uuid;
|
||||
|
||||
fn sample_entry() -> Entry {
|
||||
Entry {
|
||||
id: Uuid::nil(),
|
||||
namespace: "refining".to_string(),
|
||||
kind: "service".to_string(),
|
||||
name: "gitea.main".to_string(),
|
||||
tags: vec!["prod".to_string()],
|
||||
metadata: json!({"url": "https://code.example.com", "enabled": true}),
|
||||
version: 1,
|
||||
created_at: Utc::now(),
|
||||
updated_at: Utc::now(),
|
||||
}
|
||||
}
|
||||
|
||||
fn sample_fields() -> Vec<SecretField> {
|
||||
let key = [0x42u8; 32];
|
||||
let enc = crypto::encrypt_json(&key, &json!("abc123")).unwrap();
|
||||
vec![SecretField {
|
||||
id: Uuid::nil(),
|
||||
entry_id: Uuid::nil(),
|
||||
field_name: "token".to_string(),
|
||||
field_type: "string".to_string(),
|
||||
value_len: 6,
|
||||
encrypted: enc,
|
||||
version: 1,
|
||||
created_at: Utc::now(),
|
||||
updated_at: Utc::now(),
|
||||
}]
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn rejects_secret_field_extraction() {
|
||||
let fields = vec!["secret.token".to_string()];
|
||||
let err = validate_safe_search_args(&fields).unwrap_err();
|
||||
assert!(err.to_string().contains("sensitive"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn metadata_env_map_excludes_secret_values() {
|
||||
let entry = sample_entry();
|
||||
let map = build_metadata_env_map(&entry, "");
|
||||
|
||||
assert_eq!(
|
||||
map.get("GITEA_MAIN_URL").map(String::as_str),
|
||||
Some("https://code.example.com")
|
||||
);
|
||||
assert_eq!(
|
||||
map.get("GITEA_MAIN_ENABLED").map(String::as_str),
|
||||
Some("true")
|
||||
);
|
||||
assert!(!map.contains_key("GITEA_MAIN_TOKEN"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn to_json_full_includes_secrets_schema() {
|
||||
let entry = sample_entry();
|
||||
let fields = sample_fields();
|
||||
let v = to_json(&entry, false, Some(&fields));
|
||||
|
||||
let secrets = v.get("secrets").unwrap().as_array().unwrap();
|
||||
assert_eq!(secrets.len(), 1);
|
||||
assert_eq!(secrets[0]["field_name"], "token");
|
||||
assert_eq!(secrets[0]["field_type"], "string");
|
||||
assert_eq!(secrets[0]["value_len"], 6);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn to_json_summary_omits_secrets_schema() {
|
||||
let entry = sample_entry();
|
||||
let fields = sample_fields();
|
||||
let v = to_json(&entry, true, Some(&fields));
|
||||
assert!(v.get("secrets").is_none());
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,21 +1,16 @@
|
||||
use anyhow::Result;
|
||||
use serde_json::{Map, Value, json};
|
||||
use sqlx::{FromRow, PgPool};
|
||||
use sqlx::PgPool;
|
||||
use uuid::Uuid;
|
||||
|
||||
use super::add::parse_kv;
|
||||
use super::add::{
|
||||
collect_field_paths, collect_key_paths, compute_value_len, flatten_json_fields,
|
||||
infer_field_type, insert_path, parse_key_path, parse_kv, remove_path,
|
||||
};
|
||||
use crate::crypto;
|
||||
use crate::db;
|
||||
use crate::output::OutputMode;
|
||||
|
||||
#[derive(FromRow)]
|
||||
struct UpdateRow {
|
||||
id: Uuid,
|
||||
version: i64,
|
||||
tags: Vec<String>,
|
||||
metadata: Value,
|
||||
encrypted: Vec<u8>,
|
||||
}
|
||||
use crate::models::EntryRow;
|
||||
use crate::output::{OutputMode, print_json};
|
||||
|
||||
pub struct UpdateArgs<'a> {
|
||||
pub namespace: &'a str,
|
||||
@@ -33,9 +28,9 @@ pub struct UpdateArgs<'a> {
|
||||
pub async fn run(pool: &PgPool, args: UpdateArgs<'_>, master_key: &[u8; 32]) -> Result<()> {
|
||||
let mut tx = pool.begin().await?;
|
||||
|
||||
let row: Option<UpdateRow> = sqlx::query_as(
|
||||
"SELECT id, version, tags, metadata, encrypted \
|
||||
FROM secrets \
|
||||
let row: Option<EntryRow> = sqlx::query_as(
|
||||
"SELECT id, version, tags, metadata \
|
||||
FROM entries \
|
||||
WHERE namespace = $1 AND kind = $2 AND name = $3 \
|
||||
FOR UPDATE",
|
||||
)
|
||||
@@ -54,11 +49,11 @@ pub async fn run(pool: &PgPool, args: UpdateArgs<'_>, master_key: &[u8; 32]) ->
|
||||
)
|
||||
})?;
|
||||
|
||||
// Snapshot current state before modifying.
|
||||
if let Err(e) = db::snapshot_history(
|
||||
// Snapshot current entry state before modifying.
|
||||
if let Err(e) = db::snapshot_entry_history(
|
||||
&mut tx,
|
||||
db::SnapshotParams {
|
||||
secret_id: row.id,
|
||||
db::EntrySnapshotParams {
|
||||
entry_id: row.id,
|
||||
namespace: args.namespace,
|
||||
kind: args.kind,
|
||||
name: args.name,
|
||||
@@ -66,15 +61,14 @@ pub async fn run(pool: &PgPool, args: UpdateArgs<'_>, master_key: &[u8; 32]) ->
|
||||
action: "update",
|
||||
tags: &row.tags,
|
||||
metadata: &row.metadata,
|
||||
encrypted: &row.encrypted,
|
||||
},
|
||||
)
|
||||
.await
|
||||
{
|
||||
tracing::warn!(error = %e, "failed to snapshot history before update");
|
||||
tracing::warn!(error = %e, "failed to snapshot entry history before update");
|
||||
}
|
||||
|
||||
// Merge tags
|
||||
// ── Merge tags ────────────────────────────────────────────────────────────
|
||||
let mut tags: Vec<String> = row.tags;
|
||||
for t in args.add_tags {
|
||||
if !tags.contains(t) {
|
||||
@@ -83,56 +77,29 @@ pub async fn run(pool: &PgPool, args: UpdateArgs<'_>, master_key: &[u8; 32]) ->
|
||||
}
|
||||
tags.retain(|t| !args.remove_tags.contains(t));
|
||||
|
||||
// Merge metadata
|
||||
// ── Merge metadata ────────────────────────────────────────────────────────
|
||||
let mut meta_map: Map<String, Value> = match row.metadata {
|
||||
Value::Object(m) => m,
|
||||
_ => Map::new(),
|
||||
};
|
||||
for entry in args.meta_entries {
|
||||
let (key, value) = parse_kv(entry)?;
|
||||
meta_map.insert(key, value);
|
||||
let (path, value) = parse_kv(entry)?;
|
||||
insert_path(&mut meta_map, &path, value)?;
|
||||
}
|
||||
for key in args.remove_meta {
|
||||
meta_map.remove(key);
|
||||
let path = parse_key_path(key)?;
|
||||
remove_path(&mut meta_map, &path)?;
|
||||
}
|
||||
let metadata = Value::Object(meta_map);
|
||||
|
||||
// Decrypt existing encrypted blob, merge changes, re-encrypt
|
||||
let existing_json = if row.encrypted.is_empty() {
|
||||
Value::Object(Map::new())
|
||||
} else {
|
||||
crypto::decrypt_json(master_key, &row.encrypted)?
|
||||
};
|
||||
let mut enc_map: Map<String, Value> = match existing_json {
|
||||
Value::Object(m) => m,
|
||||
_ => Map::new(),
|
||||
};
|
||||
for entry in args.secret_entries {
|
||||
let (key, value) = parse_kv(entry)?;
|
||||
enc_map.insert(key, value);
|
||||
}
|
||||
for key in args.remove_secrets {
|
||||
enc_map.remove(key);
|
||||
}
|
||||
let secret_json = Value::Object(enc_map);
|
||||
let encrypted_bytes = crypto::encrypt_json(master_key, &secret_json)?;
|
||||
|
||||
tracing::debug!(
|
||||
namespace = args.namespace,
|
||||
kind = args.kind,
|
||||
name = args.name,
|
||||
"updating record"
|
||||
);
|
||||
|
||||
// CAS: update only if version hasn't changed (FOR UPDATE lock ensures this).
|
||||
// CAS update of the entry row.
|
||||
let result = sqlx::query(
|
||||
"UPDATE secrets \
|
||||
SET tags = $1, metadata = $2, encrypted = $3, version = version + 1, updated_at = NOW() \
|
||||
WHERE id = $4 AND version = $5",
|
||||
"UPDATE entries \
|
||||
SET tags = $1, metadata = $2, version = version + 1, updated_at = NOW() \
|
||||
WHERE id = $3 AND version = $4",
|
||||
)
|
||||
.bind(&tags)
|
||||
.bind(&metadata)
|
||||
.bind(&encrypted_bytes)
|
||||
.bind(row.id)
|
||||
.bind(row.version)
|
||||
.execute(&mut *tx)
|
||||
@@ -148,16 +115,134 @@ pub async fn run(pool: &PgPool, args: UpdateArgs<'_>, master_key: &[u8; 32]) ->
|
||||
);
|
||||
}
|
||||
|
||||
let meta_keys: Vec<&str> = args
|
||||
.meta_entries
|
||||
.iter()
|
||||
.filter_map(|s| s.split_once(['=', ':']).map(|(k, _)| k))
|
||||
.collect();
|
||||
let secret_keys: Vec<&str> = args
|
||||
.secret_entries
|
||||
.iter()
|
||||
.filter_map(|s| s.split_once(['=', ':']).map(|(k, _)| k))
|
||||
.collect();
|
||||
let new_version = row.version + 1;
|
||||
|
||||
// ── Update secret fields ──────────────────────────────────────────────────
|
||||
for entry in args.secret_entries {
|
||||
let (path, field_value) = parse_kv(entry)?;
|
||||
|
||||
// For nested paths (e.g. credentials:type), flatten into dot-separated names
|
||||
// and treat the sub-value as the individual field to store.
|
||||
let flat = flatten_json_fields("", &{
|
||||
let mut m = Map::new();
|
||||
insert_path(&mut m, &path, field_value)?;
|
||||
Value::Object(m)
|
||||
});
|
||||
|
||||
for (field_name, fv) in &flat {
|
||||
let field_type = infer_field_type(fv);
|
||||
let value_len = compute_value_len(fv);
|
||||
let encrypted = crypto::encrypt_json(master_key, fv)?;
|
||||
|
||||
// Snapshot existing field before replacing.
|
||||
#[derive(sqlx::FromRow)]
|
||||
struct ExistingField {
|
||||
id: Uuid,
|
||||
field_type: String,
|
||||
value_len: i32,
|
||||
encrypted: Vec<u8>,
|
||||
}
|
||||
let existing_field: Option<ExistingField> = sqlx::query_as(
|
||||
"SELECT id, field_type, value_len, encrypted \
|
||||
FROM secrets WHERE entry_id = $1 AND field_name = $2",
|
||||
)
|
||||
.bind(row.id)
|
||||
.bind(field_name)
|
||||
.fetch_optional(&mut *tx)
|
||||
.await?;
|
||||
|
||||
if let Some(ef) = &existing_field
|
||||
&& let Err(e) = db::snapshot_secret_history(
|
||||
&mut tx,
|
||||
db::SecretSnapshotParams {
|
||||
entry_id: row.id,
|
||||
secret_id: ef.id,
|
||||
entry_version: row.version,
|
||||
field_name,
|
||||
field_type: &ef.field_type,
|
||||
value_len: ef.value_len,
|
||||
encrypted: &ef.encrypted,
|
||||
action: "update",
|
||||
},
|
||||
)
|
||||
.await
|
||||
{
|
||||
tracing::warn!(error = %e, "failed to snapshot secret field history");
|
||||
}
|
||||
|
||||
sqlx::query(
|
||||
"INSERT INTO secrets (entry_id, field_name, field_type, value_len, encrypted) \
|
||||
VALUES ($1, $2, $3, $4, $5) \
|
||||
ON CONFLICT (entry_id, field_name) DO UPDATE SET \
|
||||
field_type = EXCLUDED.field_type, \
|
||||
value_len = EXCLUDED.value_len, \
|
||||
encrypted = EXCLUDED.encrypted, \
|
||||
version = secrets.version + 1, \
|
||||
updated_at = NOW()",
|
||||
)
|
||||
.bind(row.id)
|
||||
.bind(field_name)
|
||||
.bind(field_type)
|
||||
.bind(value_len)
|
||||
.bind(&encrypted)
|
||||
.execute(&mut *tx)
|
||||
.await?;
|
||||
}
|
||||
}
|
||||
|
||||
// ── Remove secret fields ──────────────────────────────────────────────────
|
||||
for key in args.remove_secrets {
|
||||
let path = parse_key_path(key)?;
|
||||
// Dot-join the path to match flattened field_name storage.
|
||||
let field_name = path.join(".");
|
||||
|
||||
// Snapshot before delete.
|
||||
#[derive(sqlx::FromRow)]
|
||||
struct FieldToDelete {
|
||||
id: Uuid,
|
||||
field_type: String,
|
||||
value_len: i32,
|
||||
encrypted: Vec<u8>,
|
||||
}
|
||||
let field: Option<FieldToDelete> = sqlx::query_as(
|
||||
"SELECT id, field_type, value_len, encrypted \
|
||||
FROM secrets WHERE entry_id = $1 AND field_name = $2",
|
||||
)
|
||||
.bind(row.id)
|
||||
.bind(&field_name)
|
||||
.fetch_optional(&mut *tx)
|
||||
.await?;
|
||||
|
||||
if let Some(f) = field {
|
||||
if let Err(e) = db::snapshot_secret_history(
|
||||
&mut tx,
|
||||
db::SecretSnapshotParams {
|
||||
entry_id: row.id,
|
||||
secret_id: f.id,
|
||||
entry_version: new_version,
|
||||
field_name: &field_name,
|
||||
field_type: &f.field_type,
|
||||
value_len: f.value_len,
|
||||
encrypted: &f.encrypted,
|
||||
action: "delete",
|
||||
},
|
||||
)
|
||||
.await
|
||||
{
|
||||
tracing::warn!(error = %e, "failed to snapshot secret field history before delete");
|
||||
}
|
||||
|
||||
sqlx::query("DELETE FROM secrets WHERE id = $1")
|
||||
.bind(f.id)
|
||||
.execute(&mut *tx)
|
||||
.await?;
|
||||
}
|
||||
}
|
||||
|
||||
let meta_keys = collect_key_paths(args.meta_entries)?;
|
||||
let remove_meta_keys = collect_field_paths(args.remove_meta)?;
|
||||
let secret_keys = collect_key_paths(args.secret_entries)?;
|
||||
let remove_secret_keys = collect_field_paths(args.remove_secrets)?;
|
||||
|
||||
crate::audit::log_tx(
|
||||
&mut tx,
|
||||
@@ -169,9 +254,9 @@ pub async fn run(pool: &PgPool, args: UpdateArgs<'_>, master_key: &[u8; 32]) ->
|
||||
"add_tags": args.add_tags,
|
||||
"remove_tags": args.remove_tags,
|
||||
"meta_keys": meta_keys,
|
||||
"remove_meta": args.remove_meta,
|
||||
"remove_meta": remove_meta_keys,
|
||||
"secret_keys": secret_keys,
|
||||
"remove_secrets": args.remove_secrets,
|
||||
"remove_secrets": remove_secret_keys,
|
||||
}),
|
||||
)
|
||||
.await;
|
||||
@@ -186,17 +271,14 @@ pub async fn run(pool: &PgPool, args: UpdateArgs<'_>, master_key: &[u8; 32]) ->
|
||||
"add_tags": args.add_tags,
|
||||
"remove_tags": args.remove_tags,
|
||||
"meta_keys": meta_keys,
|
||||
"remove_meta": args.remove_meta,
|
||||
"remove_meta": remove_meta_keys,
|
||||
"secret_keys": secret_keys,
|
||||
"remove_secrets": args.remove_secrets,
|
||||
"remove_secrets": remove_secret_keys,
|
||||
});
|
||||
|
||||
match args.output {
|
||||
OutputMode::Json => {
|
||||
println!("{}", serde_json::to_string_pretty(&result_json)?);
|
||||
}
|
||||
OutputMode::JsonCompact => {
|
||||
println!("{}", serde_json::to_string(&result_json)?);
|
||||
OutputMode::Json | OutputMode::JsonCompact => {
|
||||
print_json(&result_json, &args.output)?;
|
||||
}
|
||||
_ => {
|
||||
println!("Updated: [{}/{}] {}", args.namespace, args.kind, args.name);
|
||||
@@ -210,13 +292,13 @@ pub async fn run(pool: &PgPool, args: UpdateArgs<'_>, master_key: &[u8; 32]) ->
|
||||
println!(" +metadata: {}", meta_keys.join(", "));
|
||||
}
|
||||
if !args.remove_meta.is_empty() {
|
||||
println!(" -metadata: {}", args.remove_meta.join(", "));
|
||||
println!(" -metadata: {}", remove_meta_keys.join(", "));
|
||||
}
|
||||
if !args.secret_entries.is_empty() {
|
||||
println!(" +secrets: {}", secret_keys.join(", "));
|
||||
}
|
||||
if !args.remove_secrets.is_empty() {
|
||||
println!(" -secrets: {}", args.remove_secrets.join(", "));
|
||||
println!(" -secrets: {}", remove_secret_keys.join(", "));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -3,11 +3,28 @@ use flate2::read::GzDecoder;
|
||||
use serde::Deserialize;
|
||||
use sha2::{Digest, Sha256};
|
||||
use std::io::{Cursor, Read, Write};
|
||||
|
||||
const GITEA_API: &str = "https://gitea.refining.dev/api/v1/repos/refining/secrets/releases/latest";
|
||||
use std::time::Duration;
|
||||
|
||||
const CURRENT_VERSION: &str = env!("CARGO_PKG_VERSION");
|
||||
|
||||
/// Build-time config via `option_env!("SECRETS_UPGRADE_URL")`. Set during `cargo build`, e.g.:
|
||||
/// SECRETS_UPGRADE_URL=https://... cargo build --release
|
||||
const BUILD_UPGRADE_URL: Option<&'static str> = option_env!("SECRETS_UPGRADE_URL");
|
||||
|
||||
fn upgrade_api_url() -> Result<String> {
|
||||
if let Some(url) = BUILD_UPGRADE_URL.filter(|s| !s.trim().is_empty()) {
|
||||
return Ok(url.to_string());
|
||||
}
|
||||
let url = std::env::var("SECRETS_UPGRADE_URL").context(
|
||||
"SECRETS_UPGRADE_URL is not set at build or runtime. Set it when building: \
|
||||
SECRETS_UPGRADE_URL=https://... cargo build, or export before running secrets upgrade.",
|
||||
)?;
|
||||
if url.trim().is_empty() {
|
||||
anyhow::bail!("SECRETS_UPGRADE_URL is empty.");
|
||||
}
|
||||
Ok(url)
|
||||
}
|
||||
|
||||
#[derive(Debug, Deserialize)]
|
||||
struct Release {
|
||||
tag_name: String,
|
||||
@@ -28,16 +45,17 @@ fn available_assets(assets: &[Asset]) -> String {
|
||||
.join(", ")
|
||||
}
|
||||
|
||||
fn find_asset_by_suffix<'a>(assets: &'a [Asset], suffix: &str) -> Result<&'a Asset> {
|
||||
assets
|
||||
.iter()
|
||||
.find(|a| a.name.ends_with(suffix))
|
||||
.with_context(|| {
|
||||
format!(
|
||||
"no asset found for this platform (looking for suffix: {suffix})\navailable: {}",
|
||||
available_assets(assets)
|
||||
)
|
||||
})
|
||||
fn release_asset_name(tag_name: &str, suffix: &str) -> String {
|
||||
format!("secrets-{tag_name}-{suffix}")
|
||||
}
|
||||
|
||||
fn find_asset_by_name<'a>(assets: &'a [Asset], name: &str) -> Result<&'a Asset> {
|
||||
assets.iter().find(|a| a.name == name).with_context(|| {
|
||||
format!(
|
||||
"no matching release asset found: {name}\navailable: {}",
|
||||
available_assets(assets)
|
||||
)
|
||||
})
|
||||
}
|
||||
|
||||
/// Detect the asset suffix for the current platform/arch at compile time.
|
||||
@@ -89,6 +107,22 @@ fn sha256_hex(bytes: &[u8]) -> String {
|
||||
format!("{digest:x}")
|
||||
}
|
||||
|
||||
fn verify_checksum(asset_name: &str, archive: &[u8], checksum_contents: &str) -> Result<String> {
|
||||
let expected_checksum = parse_checksum_file(checksum_contents)?;
|
||||
let actual_checksum = sha256_hex(archive);
|
||||
|
||||
if actual_checksum != expected_checksum {
|
||||
bail!(
|
||||
"checksum verification failed for {}: expected {}, got {}",
|
||||
asset_name,
|
||||
expected_checksum,
|
||||
actual_checksum
|
||||
);
|
||||
}
|
||||
|
||||
Ok(actual_checksum)
|
||||
}
|
||||
|
||||
fn parse_checksum_file(contents: &str) -> Result<String> {
|
||||
let checksum = contents
|
||||
.split_whitespace()
|
||||
@@ -163,16 +197,19 @@ pub async fn run(check_only: bool) -> Result<()> {
|
||||
|
||||
let client = reqwest::Client::builder()
|
||||
.user_agent(format!("secrets-cli/{CURRENT_VERSION}"))
|
||||
.connect_timeout(Duration::from_secs(10))
|
||||
.timeout(Duration::from_secs(120))
|
||||
.build()
|
||||
.context("failed to build HTTP client")?;
|
||||
|
||||
let api_url = upgrade_api_url()?;
|
||||
let release: Release = client
|
||||
.get(GITEA_API)
|
||||
.get(&api_url)
|
||||
.send()
|
||||
.await
|
||||
.context("failed to fetch release info from Gitea")?
|
||||
.context("failed to fetch release info")?
|
||||
.error_for_status()
|
||||
.context("Gitea API returned an error")?
|
||||
.context("release API returned an error")?
|
||||
.json()
|
||||
.await
|
||||
.context("failed to parse release JSON")?;
|
||||
@@ -192,18 +229,10 @@ pub async fn run(check_only: bool) -> Result<()> {
|
||||
}
|
||||
|
||||
let suffix = platform_asset_suffix()?;
|
||||
let asset = find_asset_by_suffix(&release.assets, suffix)?;
|
||||
let asset_name = release_asset_name(&release.tag_name, suffix);
|
||||
let asset = find_asset_by_name(&release.assets, &asset_name)?;
|
||||
let checksum_name = format!("{}.sha256", asset.name);
|
||||
let checksum_asset = release
|
||||
.assets
|
||||
.iter()
|
||||
.find(|a| a.name == checksum_name)
|
||||
.with_context(|| {
|
||||
format!(
|
||||
"missing checksum asset for download: {checksum_name}\navailable: {}",
|
||||
available_assets(&release.assets)
|
||||
)
|
||||
})?;
|
||||
let checksum_asset = find_asset_by_name(&release.assets, &checksum_name)?;
|
||||
|
||||
println!("Downloading {}...", asset.name);
|
||||
|
||||
@@ -214,19 +243,11 @@ pub async fn run(check_only: bool) -> Result<()> {
|
||||
"checksum download",
|
||||
)
|
||||
.await?;
|
||||
let expected_checksum = parse_checksum_file(
|
||||
let actual_checksum = verify_checksum(
|
||||
&asset.name,
|
||||
&archive,
|
||||
std::str::from_utf8(&checksum_contents).context("checksum file is not valid UTF-8")?,
|
||||
)?;
|
||||
let actual_checksum = sha256_hex(&archive);
|
||||
|
||||
if actual_checksum != expected_checksum {
|
||||
bail!(
|
||||
"checksum verification failed for {}: expected {}, got {}",
|
||||
asset.name,
|
||||
expected_checksum,
|
||||
actual_checksum
|
||||
);
|
||||
}
|
||||
|
||||
println!("Verified SHA-256: {actual_checksum}");
|
||||
|
||||
@@ -298,6 +319,33 @@ mod tests {
|
||||
assert!(err.to_string().contains("invalid SHA-256 checksum format"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn release_asset_name_matches_release_tag() {
|
||||
assert_eq!(
|
||||
release_asset_name("secrets-0.7.0", "x86_64-linux-musl.tar.gz"),
|
||||
"secrets-secrets-0.7.0-x86_64-linux-musl.tar.gz"
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn find_asset_by_name_rejects_stale_platform_match() {
|
||||
let assets = vec![
|
||||
Asset {
|
||||
name: "secrets-secrets-0.6.9-x86_64-linux-musl.tar.gz".into(),
|
||||
browser_download_url: "https://example.invalid/old".into(),
|
||||
},
|
||||
Asset {
|
||||
name: "secrets-secrets-0.7.0-aarch64-macos.tar.gz".into(),
|
||||
browser_download_url: "https://example.invalid/other".into(),
|
||||
},
|
||||
];
|
||||
|
||||
let err = find_asset_by_name(&assets, "secrets-secrets-0.7.0-x86_64-linux-musl.tar.gz")
|
||||
.expect_err("stale asset should not match");
|
||||
|
||||
assert!(err.to_string().contains("no matching release asset found"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn sha256_hex_matches_known_value() {
|
||||
assert_eq!(
|
||||
@@ -306,6 +354,18 @@ mod tests {
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn verify_checksum_rejects_mismatch() {
|
||||
let err = verify_checksum(
|
||||
"secrets.tar.gz",
|
||||
b"abc",
|
||||
"0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef secrets.tar.gz",
|
||||
)
|
||||
.expect_err("checksum mismatch should fail");
|
||||
|
||||
assert!(err.to_string().contains("checksum verification failed"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn extract_from_targz_reads_binary() {
|
||||
let payload = b"fake-secrets-binary";
|
||||
|
||||
@@ -8,19 +8,23 @@ pub struct Config {
|
||||
pub database_url: Option<String>,
|
||||
}
|
||||
|
||||
pub fn config_dir() -> PathBuf {
|
||||
dirs::config_dir()
|
||||
pub fn config_dir() -> Result<PathBuf> {
|
||||
let dir = dirs::config_dir()
|
||||
.or_else(|| dirs::home_dir().map(|h| h.join(".config")))
|
||||
.unwrap_or_else(|| PathBuf::from(".config"))
|
||||
.join("secrets")
|
||||
.context(
|
||||
"Cannot determine config directory: \
|
||||
neither XDG_CONFIG_HOME nor HOME is set",
|
||||
)?
|
||||
.join("secrets");
|
||||
Ok(dir)
|
||||
}
|
||||
|
||||
pub fn config_path() -> PathBuf {
|
||||
config_dir().join("config.toml")
|
||||
pub fn config_path() -> Result<PathBuf> {
|
||||
Ok(config_dir()?.join("config.toml"))
|
||||
}
|
||||
|
||||
pub fn load_config() -> Result<Config> {
|
||||
let path = config_path();
|
||||
let path = config_path()?;
|
||||
if !path.exists() {
|
||||
return Ok(Config::default());
|
||||
}
|
||||
@@ -32,11 +36,11 @@ pub fn load_config() -> Result<Config> {
|
||||
}
|
||||
|
||||
pub fn save_config(config: &Config) -> Result<()> {
|
||||
let dir = config_dir();
|
||||
let dir = config_dir()?;
|
||||
fs::create_dir_all(&dir)
|
||||
.with_context(|| format!("failed to create config dir: {}", dir.display()))?;
|
||||
|
||||
let path = config_path();
|
||||
let path = dir.join("config.toml");
|
||||
let content = toml::to_string_pretty(config).context("failed to serialize config")?;
|
||||
fs::write(&path, &content)
|
||||
.with_context(|| format!("failed to write config file: {}", path.display()))?;
|
||||
|
||||
@@ -10,12 +10,24 @@ const KEYRING_SERVICE: &str = "secrets-cli";
|
||||
const KEYRING_USER: &str = "master-key";
|
||||
const NONCE_LEN: usize = 12;
|
||||
|
||||
// Argon2id parameters — OWASP recommended (m=64 MiB, t=3 iterations, p=4 threads, key=32 B)
|
||||
const ARGON2_M_COST: u32 = 65_536;
|
||||
const ARGON2_T_COST: u32 = 3;
|
||||
const ARGON2_P_COST: u32 = 4;
|
||||
const ARGON2_KEY_LEN: usize = 32;
|
||||
|
||||
// ─── Argon2id key derivation ─────────────────────────────────────────────────
|
||||
|
||||
/// Derive a 32-byte Master Key from a password and salt using Argon2id.
|
||||
/// Parameters: m=65536 KiB (64 MB), t=3, p=4 — OWASP recommended.
|
||||
pub fn derive_master_key(password: &str, salt: &[u8]) -> Result<[u8; 32]> {
|
||||
let params = Params::new(65536, 3, 4, Some(32)).context("invalid Argon2id params")?;
|
||||
let params = Params::new(
|
||||
ARGON2_M_COST,
|
||||
ARGON2_T_COST,
|
||||
ARGON2_P_COST,
|
||||
Some(ARGON2_KEY_LEN),
|
||||
)
|
||||
.context("invalid Argon2id params")?;
|
||||
let argon2 = Argon2::new(argon2::Algorithm::Argon2id, Version::V0x13, params);
|
||||
let mut key = [0u8; 32];
|
||||
argon2
|
||||
@@ -105,15 +117,6 @@ pub fn store_master_key(key: &[u8; 32]) -> Result<()> {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Delete the Master Key from the OS Keychain (used by tests / reset).
|
||||
#[cfg(test)]
|
||||
pub fn delete_master_key() -> Result<()> {
|
||||
let entry =
|
||||
keyring::Entry::new(KEYRING_SERVICE, KEYRING_USER).context("create keychain entry")?;
|
||||
let _ = entry.delete_credential();
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// ─── Minimal hex helpers (avoid extra dep) ────────────────────────────────────
|
||||
|
||||
mod hex {
|
||||
|
||||
168
src/db.rs
168
src/db.rs
@@ -1,7 +1,10 @@
|
||||
use anyhow::Result;
|
||||
use serde_json::Value;
|
||||
use sqlx::PgPool;
|
||||
use sqlx::postgres::PgPoolOptions;
|
||||
|
||||
use crate::audit::current_actor;
|
||||
|
||||
pub async fn create_pool(database_url: &str) -> Result<PgPool> {
|
||||
tracing::debug!("connecting to database");
|
||||
let pool = PgPoolOptions::new()
|
||||
@@ -17,61 +20,48 @@ pub async fn migrate(pool: &PgPool) -> Result<()> {
|
||||
tracing::debug!("running migrations");
|
||||
sqlx::raw_sql(
|
||||
r#"
|
||||
CREATE TABLE IF NOT EXISTS secrets (
|
||||
-- ── entries: top-level entities (server, service, key, …) ──────────────
|
||||
CREATE TABLE IF NOT EXISTS entries (
|
||||
id UUID PRIMARY KEY DEFAULT uuidv7(),
|
||||
namespace VARCHAR(64) NOT NULL,
|
||||
kind VARCHAR(64) NOT NULL,
|
||||
name VARCHAR(256) NOT NULL,
|
||||
tags TEXT[] NOT NULL DEFAULT '{}',
|
||||
metadata JSONB NOT NULL DEFAULT '{}',
|
||||
encrypted BYTEA NOT NULL DEFAULT '\x',
|
||||
version BIGINT NOT NULL DEFAULT 1,
|
||||
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
|
||||
updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
|
||||
UNIQUE(namespace, kind, name)
|
||||
);
|
||||
|
||||
-- idempotent column add for existing tables
|
||||
DO $$ BEGIN
|
||||
ALTER TABLE secrets ADD COLUMN IF NOT EXISTS metadata JSONB NOT NULL DEFAULT '{}';
|
||||
EXCEPTION WHEN OTHERS THEN NULL;
|
||||
END $$;
|
||||
CREATE INDEX IF NOT EXISTS idx_entries_namespace ON entries(namespace);
|
||||
CREATE INDEX IF NOT EXISTS idx_entries_kind ON entries(kind);
|
||||
CREATE INDEX IF NOT EXISTS idx_entries_tags ON entries USING GIN(tags);
|
||||
CREATE INDEX IF NOT EXISTS idx_entries_metadata ON entries USING GIN(metadata jsonb_path_ops);
|
||||
|
||||
DO $$ BEGIN
|
||||
ALTER TABLE secrets ADD COLUMN IF NOT EXISTS version BIGINT NOT NULL DEFAULT 1;
|
||||
EXCEPTION WHEN OTHERS THEN NULL;
|
||||
END $$;
|
||||
-- ── secrets: one row per encrypted field, plaintext schema metadata ────
|
||||
CREATE TABLE IF NOT EXISTS secrets (
|
||||
id UUID PRIMARY KEY DEFAULT uuidv7(),
|
||||
entry_id UUID NOT NULL REFERENCES entries(id) ON DELETE CASCADE,
|
||||
field_name VARCHAR(256) NOT NULL,
|
||||
field_type VARCHAR(32) NOT NULL DEFAULT 'string',
|
||||
value_len INT NOT NULL DEFAULT 0,
|
||||
encrypted BYTEA NOT NULL DEFAULT '\x',
|
||||
version BIGINT NOT NULL DEFAULT 1,
|
||||
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
|
||||
updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
|
||||
UNIQUE(entry_id, field_name)
|
||||
);
|
||||
|
||||
-- Migrate encrypted column from JSONB to BYTEA if still JSONB type.
|
||||
-- After migration, old plaintext rows will have their JSONB data
|
||||
-- stored as raw bytes (UTF-8 encoded).
|
||||
DO $$ BEGIN
|
||||
IF EXISTS (
|
||||
SELECT 1 FROM information_schema.columns
|
||||
WHERE table_name = 'secrets'
|
||||
AND column_name = 'encrypted'
|
||||
AND data_type = 'jsonb'
|
||||
) THEN
|
||||
ALTER TABLE secrets RENAME COLUMN encrypted TO encrypted_jsonb_old;
|
||||
ALTER TABLE secrets ADD COLUMN encrypted BYTEA NOT NULL DEFAULT '\x';
|
||||
-- Copy existing JSONB data as raw UTF-8 bytes so nothing is lost
|
||||
UPDATE secrets SET encrypted = convert_to(encrypted_jsonb_old::text, 'UTF8');
|
||||
ALTER TABLE secrets DROP COLUMN encrypted_jsonb_old;
|
||||
END IF;
|
||||
EXCEPTION WHEN OTHERS THEN NULL;
|
||||
END $$;
|
||||
CREATE INDEX IF NOT EXISTS idx_secrets_entry_id ON secrets(entry_id);
|
||||
|
||||
CREATE INDEX IF NOT EXISTS idx_secrets_namespace ON secrets(namespace);
|
||||
CREATE INDEX IF NOT EXISTS idx_secrets_kind ON secrets(kind);
|
||||
CREATE INDEX IF NOT EXISTS idx_secrets_tags ON secrets USING GIN(tags);
|
||||
CREATE INDEX IF NOT EXISTS idx_secrets_metadata ON secrets USING GIN(metadata jsonb_path_ops);
|
||||
|
||||
-- Key-value config table: stores Argon2id salt (shared across devices)
|
||||
-- ── kv_config: global key-value store (Argon2id salt, etc.) ────────────
|
||||
CREATE TABLE IF NOT EXISTS kv_config (
|
||||
key TEXT PRIMARY KEY,
|
||||
value BYTEA NOT NULL
|
||||
);
|
||||
|
||||
-- ── audit_log: append-only operation log ────────────────────────────────
|
||||
CREATE TABLE IF NOT EXISTS audit_log (
|
||||
id BIGINT GENERATED ALWAYS AS IDENTITY PRIMARY KEY,
|
||||
action VARCHAR(32) NOT NULL,
|
||||
@@ -83,14 +73,13 @@ pub async fn migrate(pool: &PgPool) -> Result<()> {
|
||||
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW()
|
||||
);
|
||||
|
||||
CREATE INDEX IF NOT EXISTS idx_audit_log_created ON audit_log(created_at DESC);
|
||||
CREATE INDEX IF NOT EXISTS idx_audit_log_ns_kind ON audit_log(namespace, kind);
|
||||
CREATE INDEX IF NOT EXISTS idx_audit_log_created ON audit_log(created_at DESC);
|
||||
CREATE INDEX IF NOT EXISTS idx_audit_log_ns_kind ON audit_log(namespace, kind);
|
||||
|
||||
-- History table: snapshot of secrets before each write operation.
|
||||
-- Supports rollback to any prior version via `secrets rollback`.
|
||||
CREATE TABLE IF NOT EXISTS secrets_history (
|
||||
-- ── entries_history: entry-level snapshot (tags + metadata) ─────────────
|
||||
CREATE TABLE IF NOT EXISTS entries_history (
|
||||
id BIGINT GENERATED ALWAYS AS IDENTITY PRIMARY KEY,
|
||||
secret_id UUID NOT NULL,
|
||||
entry_id UUID NOT NULL,
|
||||
namespace VARCHAR(64) NOT NULL,
|
||||
kind VARCHAR(64) NOT NULL,
|
||||
name VARCHAR(256) NOT NULL,
|
||||
@@ -98,13 +87,34 @@ pub async fn migrate(pool: &PgPool) -> Result<()> {
|
||||
action VARCHAR(16) NOT NULL,
|
||||
tags TEXT[] NOT NULL DEFAULT '{}',
|
||||
metadata JSONB NOT NULL DEFAULT '{}',
|
||||
encrypted BYTEA NOT NULL DEFAULT '\x',
|
||||
actor VARCHAR(128) NOT NULL DEFAULT '',
|
||||
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW()
|
||||
);
|
||||
|
||||
CREATE INDEX IF NOT EXISTS idx_history_secret_id ON secrets_history(secret_id, version DESC);
|
||||
CREATE INDEX IF NOT EXISTS idx_history_ns_kind_name ON secrets_history(namespace, kind, name, version DESC);
|
||||
CREATE INDEX IF NOT EXISTS idx_entries_history_entry_id
|
||||
ON entries_history(entry_id, version DESC);
|
||||
CREATE INDEX IF NOT EXISTS idx_entries_history_ns_kind_name
|
||||
ON entries_history(namespace, kind, name, version DESC);
|
||||
|
||||
-- ── secrets_history: field-level snapshot ───────────────────────────────
|
||||
CREATE TABLE IF NOT EXISTS secrets_history (
|
||||
id BIGINT GENERATED ALWAYS AS IDENTITY PRIMARY KEY,
|
||||
entry_id UUID NOT NULL,
|
||||
secret_id UUID NOT NULL,
|
||||
entry_version BIGINT NOT NULL,
|
||||
field_name VARCHAR(256) NOT NULL,
|
||||
field_type VARCHAR(32) NOT NULL DEFAULT 'string',
|
||||
value_len INT NOT NULL DEFAULT 0,
|
||||
encrypted BYTEA NOT NULL DEFAULT '\x',
|
||||
action VARCHAR(16) NOT NULL,
|
||||
actor VARCHAR(128) NOT NULL DEFAULT '',
|
||||
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW()
|
||||
);
|
||||
|
||||
CREATE INDEX IF NOT EXISTS idx_secrets_history_entry_id
|
||||
ON secrets_history(entry_id, entry_version DESC);
|
||||
CREATE INDEX IF NOT EXISTS idx_secrets_history_secret_id
|
||||
ON secrets_history(secret_id);
|
||||
"#,
|
||||
)
|
||||
.execute(pool)
|
||||
@@ -113,33 +123,31 @@ pub async fn migrate(pool: &PgPool) -> Result<()> {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Snapshot parameters grouped to avoid too-many-arguments lint.
|
||||
pub struct SnapshotParams<'a> {
|
||||
pub secret_id: uuid::Uuid,
|
||||
// ── Entry-level history snapshot ────────────────────────────────────────────
|
||||
|
||||
pub struct EntrySnapshotParams<'a> {
|
||||
pub entry_id: uuid::Uuid,
|
||||
pub namespace: &'a str,
|
||||
pub kind: &'a str,
|
||||
pub name: &'a str,
|
||||
pub version: i64,
|
||||
pub action: &'a str,
|
||||
pub tags: &'a [String],
|
||||
pub metadata: &'a serde_json::Value,
|
||||
pub encrypted: &'a [u8],
|
||||
pub metadata: &'a Value,
|
||||
}
|
||||
|
||||
/// Snapshot a secrets row into `secrets_history` before a write operation.
|
||||
/// `action` is one of "add", "update", "delete".
|
||||
/// Failures are non-fatal (caller should warn).
|
||||
pub async fn snapshot_history(
|
||||
/// Snapshot an entry row into `entries_history` before a write operation.
|
||||
pub async fn snapshot_entry_history(
|
||||
tx: &mut sqlx::Transaction<'_, sqlx::Postgres>,
|
||||
p: SnapshotParams<'_>,
|
||||
p: EntrySnapshotParams<'_>,
|
||||
) -> Result<()> {
|
||||
let actor = std::env::var("USER").unwrap_or_default();
|
||||
let actor = current_actor();
|
||||
sqlx::query(
|
||||
"INSERT INTO secrets_history \
|
||||
(secret_id, namespace, kind, name, version, action, tags, metadata, encrypted, actor) \
|
||||
VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10)",
|
||||
"INSERT INTO entries_history \
|
||||
(entry_id, namespace, kind, name, version, action, tags, metadata, actor) \
|
||||
VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9)",
|
||||
)
|
||||
.bind(p.secret_id)
|
||||
.bind(p.entry_id)
|
||||
.bind(p.namespace)
|
||||
.bind(p.kind)
|
||||
.bind(p.name)
|
||||
@@ -147,15 +155,53 @@ pub async fn snapshot_history(
|
||||
.bind(p.action)
|
||||
.bind(p.tags)
|
||||
.bind(p.metadata)
|
||||
.bind(p.encrypted)
|
||||
.bind(&actor)
|
||||
.execute(&mut **tx)
|
||||
.await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// ── Secret field-level history snapshot ─────────────────────────────────────
|
||||
|
||||
pub struct SecretSnapshotParams<'a> {
|
||||
pub entry_id: uuid::Uuid,
|
||||
pub secret_id: uuid::Uuid,
|
||||
pub entry_version: i64,
|
||||
pub field_name: &'a str,
|
||||
pub field_type: &'a str,
|
||||
pub value_len: i32,
|
||||
pub encrypted: &'a [u8],
|
||||
pub action: &'a str,
|
||||
}
|
||||
|
||||
/// Snapshot a single secret field into `secrets_history`.
|
||||
pub async fn snapshot_secret_history(
|
||||
tx: &mut sqlx::Transaction<'_, sqlx::Postgres>,
|
||||
p: SecretSnapshotParams<'_>,
|
||||
) -> Result<()> {
|
||||
let actor = current_actor();
|
||||
sqlx::query(
|
||||
"INSERT INTO secrets_history \
|
||||
(entry_id, secret_id, entry_version, field_name, field_type, value_len, encrypted, action, actor) \
|
||||
VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9)",
|
||||
)
|
||||
.bind(p.entry_id)
|
||||
.bind(p.secret_id)
|
||||
.bind(p.entry_version)
|
||||
.bind(p.field_name)
|
||||
.bind(p.field_type)
|
||||
.bind(p.value_len)
|
||||
.bind(p.encrypted)
|
||||
.bind(p.action)
|
||||
.bind(&actor)
|
||||
.execute(&mut **tx)
|
||||
.await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// ── Argon2 salt helpers ──────────────────────────────────────────────────────
|
||||
|
||||
/// Load the Argon2id salt from the database.
|
||||
/// Returns None if not yet initialized.
|
||||
pub async fn load_argon2_salt(pool: &PgPool) -> Result<Option<Vec<u8>>> {
|
||||
let row: Option<(Vec<u8>,)> =
|
||||
sqlx::query_as("SELECT value FROM kv_config WHERE key = 'argon2_salt'")
|
||||
|
||||
306
src/main.rs
306
src/main.rs
@@ -7,6 +7,11 @@ mod models;
|
||||
mod output;
|
||||
|
||||
use anyhow::Result;
|
||||
|
||||
/// Load .env from current or parent directories (best-effort, no error if missing).
|
||||
fn load_dotenv() {
|
||||
let _ = dotenvy::dotenv();
|
||||
}
|
||||
use clap::{Parser, Subcommand};
|
||||
use tracing_subscriber::EnvFilter;
|
||||
|
||||
@@ -28,13 +33,16 @@ use output::resolve_output_mode;
|
||||
secrets search --summary --limit 20
|
||||
|
||||
# Precise lookup (JSON output for easy parsing)
|
||||
secrets search -n refining --kind service --name gitea -o json --show-secrets
|
||||
secrets search -n refining --kind service --name gitea -o json
|
||||
|
||||
# Extract a single field value directly
|
||||
secrets search -n refining --kind service --name gitea -f secret.token
|
||||
# Extract a single metadata field directly
|
||||
secrets search -n refining --kind service --name gitea -f metadata.url
|
||||
|
||||
# Pipe-friendly (non-TTY defaults to json-compact automatically)
|
||||
secrets search -n refining --kind service | jq '.[].name'"
|
||||
secrets search -n refining --kind service | jq '.[].name'
|
||||
|
||||
# Inject secrets into environment variables when you really need them
|
||||
secrets inject -n refining --kind service --name gitea"
|
||||
)]
|
||||
struct Cli {
|
||||
/// Database URL, overrides saved config (one-time override)
|
||||
@@ -73,19 +81,37 @@ EXAMPLES:
|
||||
# Add a server
|
||||
secrets add -n refining --kind server --name my-server \\
|
||||
--tag aliyun --tag shanghai \\
|
||||
-m ip=47.117.131.22 -m desc=\"Aliyun Shanghai ECS\" \\
|
||||
-m ip=10.0.0.1 -m desc=\"Example ECS\" \\
|
||||
-s username=root -s ssh_key=@./keys/server.pem
|
||||
|
||||
# Add a service credential
|
||||
secrets add -n refining --kind service --name gitea \\
|
||||
--tag gitea \\
|
||||
-m url=https://gitea.refining.dev -m default_org=refining \\
|
||||
-m url=https://code.example.com -m default_org=myorg \\
|
||||
-s token=<token>
|
||||
|
||||
# Add typed JSON metadata
|
||||
secrets add -n refining --kind service --name gitea \\
|
||||
-m port:=3000 \\
|
||||
-m enabled:=true \\
|
||||
-m domains:='[\"code.example.com\",\"git.example.com\"]' \\
|
||||
-m tls:='{\"enabled\":true,\"redirect_http\":true}'
|
||||
|
||||
# Add with token read from a file
|
||||
secrets add -n ricnsmart --kind service --name mqtt \\
|
||||
-m host=mqtt.ricnsmart.com -m port=1883 \\
|
||||
-s password=@./mqtt_password.txt")]
|
||||
-m host=mqtt.example.com -m port=1883 \\
|
||||
-s password=@./mqtt_password.txt
|
||||
|
||||
# Add typed JSON secrets
|
||||
secrets add -n refining --kind service --name deploy-bot \\
|
||||
-s enabled:=true \\
|
||||
-s retry_count:=3 \\
|
||||
-s scopes:='[\"repo\",\"workflow\"]' \\
|
||||
-s extra:='{\"region\":\"ap-east-1\",\"verify_tls\":true}'
|
||||
|
||||
# Write a multiline file into a nested secret field
|
||||
secrets add -n refining --kind server --name my-server \\
|
||||
-s credentials:content@./keys/server.pem")]
|
||||
Add {
|
||||
/// Namespace, e.g. refining, ricnsmart
|
||||
#[arg(short, long)]
|
||||
@@ -93,19 +119,19 @@ EXAMPLES:
|
||||
/// Kind of record: server, service, key, ...
|
||||
#[arg(long)]
|
||||
kind: String,
|
||||
/// Human-readable unique name, e.g. gitea, i-uf63f2uookgs5uxmrdyc
|
||||
/// Human-readable unique name, e.g. gitea, i-example0abcd1234efgh
|
||||
#[arg(long)]
|
||||
name: String,
|
||||
/// Tag for categorization (repeatable), e.g. --tag aliyun --tag hongkong
|
||||
#[arg(long = "tag")]
|
||||
tags: Vec<String>,
|
||||
/// Plaintext metadata: key=value (repeatable; value=@file reads from file)
|
||||
/// Plaintext metadata: key=value, key:=<json>, key=@file, or nested:path@file
|
||||
#[arg(long = "meta", short = 'm')]
|
||||
meta: Vec<String>,
|
||||
/// Secret entry: key=value (repeatable; value=@file reads from file)
|
||||
/// Secret entry: key=value, key:=<json>, key=@file, or nested:path@file
|
||||
#[arg(long = "secret", short = 's')]
|
||||
secrets: Vec<String>,
|
||||
/// Output format: text (default on TTY), json, json-compact, env
|
||||
/// Output format: text (default on TTY), json, json-compact
|
||||
#[arg(short, long = "output")]
|
||||
output: Option<String>,
|
||||
},
|
||||
@@ -114,7 +140,7 @@ EXAMPLES:
|
||||
///
|
||||
/// Supports fuzzy search (-q), exact lookup (--name), field extraction (-f),
|
||||
/// summary view (--summary), pagination (--limit / --offset), and structured
|
||||
/// output (-o json / json-compact / env). When stdout is not a TTY, output
|
||||
/// output (-o json / json-compact). When stdout is not a TTY, output
|
||||
/// defaults to json-compact automatically.
|
||||
#[command(after_help = "EXAMPLES:
|
||||
# Discover all records (summary, safe default limit)
|
||||
@@ -129,19 +155,16 @@ EXAMPLES:
|
||||
# Fuzzy keyword search (matches name, namespace, kind, tags, metadata)
|
||||
secrets search -q mqtt
|
||||
|
||||
# Extract a single field value (implies --show-secrets for secret.*)
|
||||
secrets search -n refining --kind service --name gitea -f secret.token
|
||||
# Extract a single metadata field value
|
||||
secrets search -n refining --kind service --name gitea -f metadata.url
|
||||
|
||||
# Multiple fields at once
|
||||
secrets search -n refining --kind service --name gitea \\
|
||||
-f metadata.url -f metadata.default_org -f secret.token
|
||||
-f metadata.url -f metadata.default_org
|
||||
|
||||
# Full JSON output with secrets revealed (ideal for AI parsing)
|
||||
secrets search -n refining --kind service --name gitea -o json --show-secrets
|
||||
|
||||
# Export as env vars (source-able; single record only)
|
||||
secrets search -n refining --kind service --name gitea -o env --show-secrets
|
||||
# Inject decrypted secrets only when needed
|
||||
secrets inject -n refining --kind service --name gitea
|
||||
secrets run -n refining --kind service --name gitea -- printenv
|
||||
|
||||
# Paginate large result sets
|
||||
secrets search -n refining --summary --limit 10 --offset 0
|
||||
@@ -151,8 +174,7 @@ EXAMPLES:
|
||||
secrets search --sort updated --limit 5 --summary
|
||||
|
||||
# Non-TTY / pipe: output is json-compact by default
|
||||
secrets search -n refining --kind service | jq '.[].name'
|
||||
secrets search -n refining --kind service --name gitea --show-secrets | jq '.secrets.token'")]
|
||||
secrets search -n refining --kind service | jq '.[].name'")]
|
||||
Search {
|
||||
/// Filter by namespace, e.g. refining, ricnsmart
|
||||
#[arg(short, long)]
|
||||
@@ -160,7 +182,7 @@ EXAMPLES:
|
||||
/// Filter by kind, e.g. server, service
|
||||
#[arg(long)]
|
||||
kind: Option<String>,
|
||||
/// Exact name filter, e.g. gitea, i-uf63f2uookgs5uxmrdyc
|
||||
/// Exact name filter, e.g. gitea, i-example0abcd1234efgh
|
||||
#[arg(long)]
|
||||
name: Option<String>,
|
||||
/// Filter by tag, e.g. --tag aliyun (repeatable for AND intersection)
|
||||
@@ -169,10 +191,7 @@ EXAMPLES:
|
||||
/// Fuzzy keyword (matches name, namespace, kind, tags, metadata text)
|
||||
#[arg(short, long)]
|
||||
query: Option<String>,
|
||||
/// Reveal encrypted secret values in output
|
||||
#[arg(long)]
|
||||
show_secrets: bool,
|
||||
/// Extract field value(s) directly: metadata.<key> or secret.<key> (repeatable)
|
||||
/// Extract metadata field value(s) directly: metadata.<key> (repeatable)
|
||||
#[arg(short = 'f', long = "field")]
|
||||
fields: Vec<String>,
|
||||
/// Return lightweight summary only (namespace, kind, name, tags, desc, updated_at)
|
||||
@@ -187,28 +206,44 @@ EXAMPLES:
|
||||
/// Sort order: name (default), updated, created
|
||||
#[arg(long, default_value = "name")]
|
||||
sort: String,
|
||||
/// Output format: text (default on TTY), json, json-compact, env
|
||||
/// Output format: text (default on TTY), json, json-compact
|
||||
#[arg(short, long = "output")]
|
||||
output: Option<String>,
|
||||
},
|
||||
|
||||
/// Delete a record permanently. Requires exact namespace + kind + name.
|
||||
/// Delete one record precisely, or bulk-delete by namespace.
|
||||
///
|
||||
/// With --name: deletes exactly that record (--kind also required).
|
||||
/// Without --name: bulk-deletes all records matching namespace + optional --kind.
|
||||
/// Use --dry-run to preview bulk deletes before committing.
|
||||
#[command(after_help = "EXAMPLES:
|
||||
# Delete a service credential
|
||||
# Delete a single record (exact match)
|
||||
secrets delete -n refining --kind service --name legacy-mqtt
|
||||
|
||||
# Delete a server record
|
||||
secrets delete -n ricnsmart --kind server --name i-old-server-id")]
|
||||
# Preview what a bulk delete would remove (no writes)
|
||||
secrets delete -n refining --dry-run
|
||||
|
||||
# Bulk-delete all records in a namespace
|
||||
secrets delete -n ricnsmart
|
||||
|
||||
# Bulk-delete only server records in a namespace
|
||||
secrets delete -n ricnsmart --kind server
|
||||
|
||||
# JSON output
|
||||
secrets delete -n refining --kind service -o json")]
|
||||
Delete {
|
||||
/// Namespace, e.g. refining
|
||||
#[arg(short, long)]
|
||||
namespace: String,
|
||||
/// Kind, e.g. server, service
|
||||
/// Kind filter, e.g. server, service (required with --name; optional for bulk)
|
||||
#[arg(long)]
|
||||
kind: String,
|
||||
/// Exact name of the record to delete
|
||||
kind: Option<String>,
|
||||
/// Exact name of the record to delete (omit for bulk delete)
|
||||
#[arg(long)]
|
||||
name: String,
|
||||
name: Option<String>,
|
||||
/// Preview what would be deleted without making any changes (bulk mode only)
|
||||
#[arg(long)]
|
||||
dry_run: bool,
|
||||
/// Output format: text (default on TTY), json, json-compact
|
||||
#[arg(short, long = "output")]
|
||||
output: Option<String>,
|
||||
@@ -225,6 +260,11 @@ EXAMPLES:
|
||||
# Rotate a secret token
|
||||
secrets update -n refining --kind service --name gitea -s token=<new-token>
|
||||
|
||||
# Update typed JSON metadata
|
||||
secrets update -n refining --kind service --name gitea \\
|
||||
-m deploy:strategy:='{\"type\":\"rolling\",\"batch\":2}' \\
|
||||
-m runtime:max_open_conns:=20
|
||||
|
||||
# Add a tag and rotate password at the same time
|
||||
secrets update -n refining --kind service --name gitea \\
|
||||
--add-tag production -s token=<new-token>
|
||||
@@ -233,8 +273,21 @@ EXAMPLES:
|
||||
secrets update -n refining --kind service --name mqtt \\
|
||||
--remove-meta old_port --remove-secret old_password
|
||||
|
||||
# Remove a nested field
|
||||
secrets update -n refining --kind server --name my-server \\
|
||||
--remove-secret credentials:content
|
||||
|
||||
# Remove a tag
|
||||
secrets update -n refining --kind service --name gitea --remove-tag staging")]
|
||||
secrets update -n refining --kind service --name gitea --remove-tag staging
|
||||
|
||||
# Update a nested secret field from a file
|
||||
secrets update -n refining --kind server --name my-server \\
|
||||
-s credentials:content@./keys/server.pem
|
||||
|
||||
# Update nested typed JSON fields
|
||||
secrets update -n refining --kind service --name deploy-bot \\
|
||||
-s auth:config:='{\"issuer\":\"gitea\",\"rotate\":true}' \\
|
||||
-s auth:retry:=5")]
|
||||
Update {
|
||||
/// Namespace, e.g. refining, ricnsmart
|
||||
#[arg(short, long)]
|
||||
@@ -251,16 +304,16 @@ EXAMPLES:
|
||||
/// Remove a tag (repeatable)
|
||||
#[arg(long = "remove-tag")]
|
||||
remove_tags: Vec<String>,
|
||||
/// Set or overwrite a metadata field: key=value (repeatable, @file supported)
|
||||
/// Set or overwrite a metadata field: key=value, key:=<json>, key=@file, or nested:path@file
|
||||
#[arg(long = "meta", short = 'm')]
|
||||
meta: Vec<String>,
|
||||
/// Delete a metadata field by key (repeatable)
|
||||
/// Delete a metadata field by key or nested path, e.g. old_port or credentials:content
|
||||
#[arg(long = "remove-meta")]
|
||||
remove_meta: Vec<String>,
|
||||
/// Set or overwrite a secret field: key=value (repeatable, @file supported)
|
||||
/// Set or overwrite a secret field: key=value, key:=<json>, key=@file, or nested:path@file
|
||||
#[arg(long = "secret", short = 's')]
|
||||
secrets: Vec<String>,
|
||||
/// Delete a secret field by key (repeatable)
|
||||
/// Delete a secret field by key or nested path, e.g. old_password or credentials:content
|
||||
#[arg(long = "remove-secret")]
|
||||
remove_secrets: Vec<String>,
|
||||
/// Output format: text (default on TTY), json, json-compact
|
||||
@@ -391,8 +444,8 @@ EXAMPLES:
|
||||
|
||||
/// Check for a newer version and update the binary in-place.
|
||||
///
|
||||
/// Downloads the latest release from Gitea and replaces the current binary.
|
||||
/// No database connection or master key required.
|
||||
/// Downloads the latest release and replaces the current binary. No database connection or master key required.
|
||||
/// Release URL defaults to the upstream server; override via SECRETS_UPGRADE_URL for self-hosted or fork.
|
||||
#[command(after_help = "EXAMPLES:
|
||||
# Check for updates only (no download)
|
||||
secrets upgrade --check
|
||||
@@ -404,6 +457,83 @@ EXAMPLES:
|
||||
#[arg(long)]
|
||||
check: bool,
|
||||
},
|
||||
|
||||
/// Export records to a file (JSON, TOML, or YAML).
|
||||
///
|
||||
/// Decrypts and exports all matched records. Requires master key unless --no-secrets is used.
|
||||
#[command(after_help = "EXAMPLES:
|
||||
# Export everything to JSON
|
||||
secrets export --file backup.json
|
||||
|
||||
# Export a specific namespace to TOML
|
||||
secrets export -n refining --file refining.toml
|
||||
|
||||
# Export a specific kind
|
||||
secrets export -n refining --kind service --file services.yaml
|
||||
|
||||
# Export by tag
|
||||
secrets export --tag production --file prod.json
|
||||
|
||||
# Export schema only (no decryption needed)
|
||||
secrets export --no-secrets --file schema.json
|
||||
|
||||
# Print to stdout in YAML
|
||||
secrets export -n refining --format yaml")]
|
||||
Export {
|
||||
/// Filter by namespace
|
||||
#[arg(short, long)]
|
||||
namespace: Option<String>,
|
||||
/// Filter by kind, e.g. server, service
|
||||
#[arg(long)]
|
||||
kind: Option<String>,
|
||||
/// Exact name filter
|
||||
#[arg(long)]
|
||||
name: Option<String>,
|
||||
/// Filter by tag (repeatable)
|
||||
#[arg(long)]
|
||||
tag: Vec<String>,
|
||||
/// Fuzzy keyword search
|
||||
#[arg(short, long)]
|
||||
query: Option<String>,
|
||||
/// Output file path (format inferred from extension: .json / .toml / .yaml / .yml)
|
||||
#[arg(long)]
|
||||
file: Option<String>,
|
||||
/// Explicit format: json, toml, or yaml (overrides file extension; required for stdout)
|
||||
#[arg(long)]
|
||||
format: Option<String>,
|
||||
/// Omit secrets from output (no master key required)
|
||||
#[arg(long)]
|
||||
no_secrets: bool,
|
||||
},
|
||||
|
||||
/// Import records from a file (JSON, TOML, or YAML).
|
||||
///
|
||||
/// Reads an export file and inserts or updates entries. Requires master key to re-encrypt secrets.
|
||||
#[command(after_help = "EXAMPLES:
|
||||
# Import a JSON backup (conflict = error by default)
|
||||
secrets import backup.json
|
||||
|
||||
# Import and overwrite existing records
|
||||
secrets import --force refining.toml
|
||||
|
||||
# Preview what would be imported (no writes)
|
||||
secrets import --dry-run backup.yaml
|
||||
|
||||
# JSON output for the import summary
|
||||
secrets import backup.json -o json")]
|
||||
Import {
|
||||
/// Input file path (format inferred from extension: .json / .toml / .yaml / .yml)
|
||||
file: String,
|
||||
/// Overwrite existing records on conflict (default: error and abort)
|
||||
#[arg(long)]
|
||||
force: bool,
|
||||
/// Preview operations without writing to the database
|
||||
#[arg(long)]
|
||||
dry_run: bool,
|
||||
/// Output format: text (default on TTY), json, json-compact
|
||||
#[arg(short, long = "output")]
|
||||
output: Option<String>,
|
||||
},
|
||||
}
|
||||
|
||||
#[derive(Subcommand)]
|
||||
@@ -421,6 +551,7 @@ enum ConfigAction {
|
||||
|
||||
#[tokio::main]
|
||||
async fn main() -> Result<()> {
|
||||
load_dotenv();
|
||||
let cli = Cli::parse();
|
||||
|
||||
let filter = if cli.verbose {
|
||||
@@ -493,7 +624,6 @@ async fn main() -> Result<()> {
|
||||
name,
|
||||
tag,
|
||||
query,
|
||||
show_secrets,
|
||||
fields,
|
||||
summary,
|
||||
limit,
|
||||
@@ -501,9 +631,7 @@ async fn main() -> Result<()> {
|
||||
sort,
|
||||
output,
|
||||
} => {
|
||||
let master_key = crypto::load_master_key()?;
|
||||
let _span = tracing::info_span!("cmd", command = "search").entered();
|
||||
let show = show_secrets || fields.iter().any(|f| f.starts_with("secret"));
|
||||
let out = resolve_output_mode(output.as_deref())?;
|
||||
commands::search::run(
|
||||
&pool,
|
||||
@@ -513,7 +641,6 @@ async fn main() -> Result<()> {
|
||||
name: name.as_deref(),
|
||||
tags: &tag,
|
||||
query: query.as_deref(),
|
||||
show_secrets: show,
|
||||
fields: &fields,
|
||||
summary,
|
||||
limit,
|
||||
@@ -521,7 +648,6 @@ async fn main() -> Result<()> {
|
||||
sort: &sort,
|
||||
output: out,
|
||||
},
|
||||
Some(&master_key),
|
||||
)
|
||||
.await?;
|
||||
}
|
||||
@@ -530,12 +656,23 @@ async fn main() -> Result<()> {
|
||||
namespace,
|
||||
kind,
|
||||
name,
|
||||
dry_run,
|
||||
output,
|
||||
} => {
|
||||
let _span =
|
||||
tracing::info_span!("cmd", command = "delete", %namespace, %kind, %name).entered();
|
||||
tracing::info_span!("cmd", command = "delete", %namespace, ?kind, ?name).entered();
|
||||
let out = resolve_output_mode(output.as_deref())?;
|
||||
commands::delete::run(&pool, &namespace, &kind, &name, out).await?;
|
||||
commands::delete::run(
|
||||
&pool,
|
||||
commands::delete::DeleteArgs {
|
||||
namespace: &namespace,
|
||||
kind: kind.as_deref(),
|
||||
name: name.as_deref(),
|
||||
dry_run,
|
||||
output: out,
|
||||
},
|
||||
)
|
||||
.await?;
|
||||
}
|
||||
|
||||
Commands::Update {
|
||||
@@ -581,7 +718,17 @@ async fn main() -> Result<()> {
|
||||
output,
|
||||
} => {
|
||||
let out = resolve_output_mode(output.as_deref())?;
|
||||
commands::rollback::list_history(&pool, &namespace, &kind, &name, limit, out).await?;
|
||||
commands::history::run(
|
||||
&pool,
|
||||
commands::history::HistoryArgs {
|
||||
namespace: &namespace,
|
||||
kind: &kind,
|
||||
name: &name,
|
||||
limit,
|
||||
output: out,
|
||||
},
|
||||
)
|
||||
.await?;
|
||||
}
|
||||
|
||||
Commands::Rollback {
|
||||
@@ -655,6 +802,61 @@ async fn main() -> Result<()> {
|
||||
)
|
||||
.await?;
|
||||
}
|
||||
|
||||
Commands::Export {
|
||||
namespace,
|
||||
kind,
|
||||
name,
|
||||
tag,
|
||||
query,
|
||||
file,
|
||||
format,
|
||||
no_secrets,
|
||||
} => {
|
||||
let master_key = if no_secrets {
|
||||
None
|
||||
} else {
|
||||
Some(crypto::load_master_key()?)
|
||||
};
|
||||
let _span = tracing::info_span!("cmd", command = "export").entered();
|
||||
commands::export_cmd::run(
|
||||
&pool,
|
||||
commands::export_cmd::ExportArgs {
|
||||
namespace: namespace.as_deref(),
|
||||
kind: kind.as_deref(),
|
||||
name: name.as_deref(),
|
||||
tags: &tag,
|
||||
query: query.as_deref(),
|
||||
file: file.as_deref(),
|
||||
format: format.as_deref(),
|
||||
no_secrets,
|
||||
},
|
||||
master_key.as_ref(),
|
||||
)
|
||||
.await?;
|
||||
}
|
||||
|
||||
Commands::Import {
|
||||
file,
|
||||
force,
|
||||
dry_run,
|
||||
output,
|
||||
} => {
|
||||
let master_key = crypto::load_master_key()?;
|
||||
let _span = tracing::info_span!("cmd", command = "import").entered();
|
||||
let out = resolve_output_mode(output.as_deref())?;
|
||||
commands::import_cmd::run(
|
||||
&pool,
|
||||
commands::import_cmd::ImportArgs {
|
||||
file: &file,
|
||||
force,
|
||||
dry_run,
|
||||
output: out,
|
||||
},
|
||||
&master_key,
|
||||
)
|
||||
.await?;
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
|
||||
203
src/models.rs
203
src/models.rs
@@ -1,20 +1,219 @@
|
||||
use chrono::{DateTime, Utc};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use serde_json::Value;
|
||||
use std::collections::BTreeMap;
|
||||
use uuid::Uuid;
|
||||
|
||||
/// A top-level entry (server, service, key, …).
|
||||
/// Sensitive fields are stored separately in `secrets`.
|
||||
#[derive(Debug, Serialize, Deserialize, sqlx::FromRow)]
|
||||
pub struct Secret {
|
||||
pub struct Entry {
|
||||
pub id: Uuid,
|
||||
pub namespace: String,
|
||||
pub kind: String,
|
||||
pub name: String,
|
||||
pub tags: Vec<String>,
|
||||
pub metadata: Value,
|
||||
pub version: i64,
|
||||
pub created_at: DateTime<Utc>,
|
||||
pub updated_at: DateTime<Utc>,
|
||||
}
|
||||
|
||||
/// A single encrypted field belonging to an Entry.
|
||||
/// field_name, field_type, and value_len are stored in plaintext so that
|
||||
/// `search` can show the schema without requiring the master key.
|
||||
#[derive(Debug, Serialize, Deserialize, sqlx::FromRow)]
|
||||
pub struct SecretField {
|
||||
pub id: Uuid,
|
||||
pub entry_id: Uuid,
|
||||
pub field_name: String,
|
||||
/// Inferred type: "string", "number", "boolean", "json"
|
||||
pub field_type: String,
|
||||
/// Length of the plaintext value in characters (0 for binary-like PEM)
|
||||
pub value_len: i32,
|
||||
/// AES-256-GCM ciphertext: nonce(12B) || ciphertext+tag
|
||||
/// Decrypt with crypto::decrypt_json() before use.
|
||||
pub encrypted: Vec<u8>,
|
||||
pub version: i64,
|
||||
pub created_at: DateTime<Utc>,
|
||||
pub updated_at: DateTime<Utc>,
|
||||
}
|
||||
|
||||
// ── Internal query row types (shared across commands) ─────────────────────────
|
||||
|
||||
/// Minimal entry row fetched for write operations (add / update / delete / rollback).
|
||||
#[derive(Debug, sqlx::FromRow)]
|
||||
pub struct EntryRow {
|
||||
pub id: Uuid,
|
||||
pub version: i64,
|
||||
pub tags: Vec<String>,
|
||||
pub metadata: Value,
|
||||
}
|
||||
|
||||
/// Minimal secret field row fetched before snapshots or cascade deletes.
|
||||
#[derive(Debug, sqlx::FromRow)]
|
||||
pub struct SecretFieldRow {
|
||||
pub id: Uuid,
|
||||
pub field_name: String,
|
||||
pub field_type: String,
|
||||
pub value_len: i32,
|
||||
pub encrypted: Vec<u8>,
|
||||
}
|
||||
|
||||
// ── Export / Import types ──────────────────────────────────────────────────────
|
||||
|
||||
/// Supported file formats for export/import.
|
||||
#[derive(Debug, Clone, Copy, PartialEq)]
|
||||
pub enum ExportFormat {
|
||||
Json,
|
||||
Toml,
|
||||
Yaml,
|
||||
}
|
||||
|
||||
impl ExportFormat {
|
||||
/// Infer format from file extension (.json / .toml / .yaml / .yml).
|
||||
pub fn from_extension(path: &str) -> anyhow::Result<Self> {
|
||||
let ext = path.rsplit('.').next().unwrap_or("").to_lowercase();
|
||||
Self::from_str(&ext).map_err(|_| {
|
||||
anyhow::anyhow!(
|
||||
"Cannot infer format from extension '.{}'. Use --format json|toml|yaml",
|
||||
ext
|
||||
)
|
||||
})
|
||||
}
|
||||
|
||||
/// Parse from --format CLI value.
|
||||
pub fn from_str(s: &str) -> anyhow::Result<Self> {
|
||||
match s.to_lowercase().as_str() {
|
||||
"json" => Ok(Self::Json),
|
||||
"toml" => Ok(Self::Toml),
|
||||
"yaml" | "yml" => Ok(Self::Yaml),
|
||||
other => anyhow::bail!("Unknown format '{}'. Expected: json, toml, or yaml", other),
|
||||
}
|
||||
}
|
||||
|
||||
/// Serialize ExportData to a string in this format.
|
||||
pub fn serialize(&self, data: &ExportData) -> anyhow::Result<String> {
|
||||
match self {
|
||||
Self::Json => Ok(serde_json::to_string_pretty(data)?),
|
||||
Self::Toml => {
|
||||
let toml_val = json_to_toml_value(&serde_json::to_value(data)?)?;
|
||||
toml::to_string_pretty(&toml_val)
|
||||
.map_err(|e| anyhow::anyhow!("TOML serialization failed: {}", e))
|
||||
}
|
||||
Self::Yaml => serde_yaml::to_string(data)
|
||||
.map_err(|e| anyhow::anyhow!("YAML serialization failed: {}", e)),
|
||||
}
|
||||
}
|
||||
|
||||
/// Deserialize ExportData from a string in this format.
|
||||
pub fn deserialize(&self, content: &str) -> anyhow::Result<ExportData> {
|
||||
match self {
|
||||
Self::Json => Ok(serde_json::from_str(content)?),
|
||||
Self::Toml => {
|
||||
let toml_val: toml::Value = toml::from_str(content)
|
||||
.map_err(|e| anyhow::anyhow!("TOML parse error: {}", e))?;
|
||||
let json_val = toml_to_json_value(&toml_val);
|
||||
Ok(serde_json::from_value(json_val)?)
|
||||
}
|
||||
Self::Yaml => serde_yaml::from_str(content)
|
||||
.map_err(|e| anyhow::anyhow!("YAML parse error: {}", e)),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Top-level structure for export/import files.
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
pub struct ExportData {
|
||||
pub version: u32,
|
||||
pub exported_at: String,
|
||||
pub entries: Vec<ExportEntry>,
|
||||
}
|
||||
|
||||
/// A single entry with decrypted secrets for export/import.
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
pub struct ExportEntry {
|
||||
pub namespace: String,
|
||||
pub kind: String,
|
||||
pub name: String,
|
||||
#[serde(default)]
|
||||
pub tags: Vec<String>,
|
||||
#[serde(default)]
|
||||
pub metadata: Value,
|
||||
/// Decrypted secret fields. None means no secrets in this export (--no-secrets).
|
||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||
pub secrets: Option<BTreeMap<String, Value>>,
|
||||
}
|
||||
|
||||
// ── TOML ↔ JSON value conversion ──────────────────────────────────────────────
|
||||
|
||||
/// Convert a serde_json Value to a toml Value.
|
||||
/// `null` values are filtered out (TOML does not support null).
|
||||
/// Mixed-type arrays are serialised as JSON strings.
|
||||
pub fn json_to_toml_value(v: &Value) -> anyhow::Result<toml::Value> {
|
||||
match v {
|
||||
Value::Null => anyhow::bail!("TOML does not support null values"),
|
||||
Value::Bool(b) => Ok(toml::Value::Boolean(*b)),
|
||||
Value::Number(n) => {
|
||||
if let Some(i) = n.as_i64() {
|
||||
Ok(toml::Value::Integer(i))
|
||||
} else if let Some(f) = n.as_f64() {
|
||||
Ok(toml::Value::Float(f))
|
||||
} else {
|
||||
anyhow::bail!("unsupported number: {}", n)
|
||||
}
|
||||
}
|
||||
Value::String(s) => Ok(toml::Value::String(s.clone())),
|
||||
Value::Array(arr) => {
|
||||
let items: anyhow::Result<Vec<toml::Value>> =
|
||||
arr.iter().map(json_to_toml_value).collect();
|
||||
match items {
|
||||
Ok(vals) => Ok(toml::Value::Array(vals)),
|
||||
Err(e) => {
|
||||
tracing::debug!(error = %e, "mixed-type array; falling back to JSON string");
|
||||
Ok(toml::Value::String(serde_json::to_string(v)?))
|
||||
}
|
||||
}
|
||||
}
|
||||
Value::Object(map) => {
|
||||
let mut toml_map = toml::map::Map::new();
|
||||
for (k, val) in map {
|
||||
if val.is_null() {
|
||||
// Skip null entries
|
||||
continue;
|
||||
}
|
||||
match json_to_toml_value(val) {
|
||||
Ok(tv) => {
|
||||
toml_map.insert(k.clone(), tv);
|
||||
}
|
||||
Err(e) => {
|
||||
tracing::debug!(key = %k, error = %e, "field not representable in TOML; falling back to JSON string");
|
||||
toml_map
|
||||
.insert(k.clone(), toml::Value::String(serde_json::to_string(val)?));
|
||||
}
|
||||
}
|
||||
}
|
||||
Ok(toml::Value::Table(toml_map))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Convert a toml Value back to a serde_json Value.
|
||||
pub fn toml_to_json_value(v: &toml::Value) -> Value {
|
||||
match v {
|
||||
toml::Value::Boolean(b) => Value::Bool(*b),
|
||||
toml::Value::Integer(i) => Value::Number((*i).into()),
|
||||
toml::Value::Float(f) => serde_json::Number::from_f64(*f)
|
||||
.map(Value::Number)
|
||||
.unwrap_or(Value::Null),
|
||||
toml::Value::String(s) => Value::String(s.clone()),
|
||||
toml::Value::Datetime(dt) => Value::String(dt.to_string()),
|
||||
toml::Value::Array(arr) => Value::Array(arr.iter().map(toml_to_json_value).collect()),
|
||||
toml::Value::Table(map) => {
|
||||
let obj: serde_json::Map<String, Value> = map
|
||||
.iter()
|
||||
.map(|(k, v)| (k.clone(), toml_to_json_value(v)))
|
||||
.collect();
|
||||
Value::Object(obj)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
use chrono::{DateTime, Local, Utc};
|
||||
use std::io::IsTerminal;
|
||||
use std::str::FromStr;
|
||||
|
||||
@@ -11,8 +12,6 @@ pub enum OutputMode {
|
||||
Json,
|
||||
/// Single-line JSON (default when stdout is NOT a TTY, e.g. piped to jq)
|
||||
JsonCompact,
|
||||
/// KEY=VALUE pairs suitable for `source` or `.env` files
|
||||
Env,
|
||||
}
|
||||
|
||||
impl FromStr for OutputMode {
|
||||
@@ -23,9 +22,8 @@ impl FromStr for OutputMode {
|
||||
"text" => Ok(Self::Text),
|
||||
"json" => Ok(Self::Json),
|
||||
"json-compact" => Ok(Self::JsonCompact),
|
||||
"env" => Ok(Self::Env),
|
||||
other => Err(anyhow::anyhow!(
|
||||
"Unknown output format '{}'. Valid: text, json, json-compact, env",
|
||||
"Unknown output format '{}'. Valid: text, json, json-compact",
|
||||
other
|
||||
)),
|
||||
}
|
||||
@@ -45,3 +43,23 @@ pub fn resolve_output_mode(explicit: Option<&str>) -> anyhow::Result<OutputMode>
|
||||
Ok(OutputMode::JsonCompact)
|
||||
}
|
||||
}
|
||||
|
||||
/// Format a UTC timestamp for local human-readable output.
|
||||
pub fn format_local_time(dt: DateTime<Utc>) -> String {
|
||||
dt.with_timezone(&Local)
|
||||
.format("%Y-%m-%d %H:%M:%S %:z")
|
||||
.to_string()
|
||||
}
|
||||
|
||||
/// Print a JSON value to stdout in the requested output mode.
|
||||
/// - `Json` → pretty-printed
|
||||
/// - `JsonCompact` → single line
|
||||
/// - `Text` → no-op (caller is responsible for the text branch)
|
||||
pub fn print_json(value: &serde_json::Value, mode: &OutputMode) -> anyhow::Result<()> {
|
||||
match mode {
|
||||
OutputMode::Json => println!("{}", serde_json::to_string_pretty(value)?),
|
||||
OutputMode::JsonCompact => println!("{}", serde_json::to_string(value)?),
|
||||
OutputMode::Text => {}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
3
test-fixtures/example-key.pem
Normal file
3
test-fixtures/example-key.pem
Normal file
@@ -0,0 +1,3 @@
|
||||
-----BEGIN EXAMPLE KEY PLACEHOLDER-----
|
||||
This file is for local dev/testing. Replace with a real key when needed.
|
||||
-----END EXAMPLE KEY PLACEHOLDER-----
|
||||
Reference in New Issue
Block a user