Compare commits
61 Commits
secrets-0.
...
secrets-mc
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
53d53ff96a | ||
|
|
cab234cfcb | ||
|
|
e0fee639c1 | ||
|
|
7c53bfb782 | ||
|
|
63cb3a8216 | ||
|
|
2b994141b8 | ||
|
|
9d6ac5c13a | ||
|
|
1860cce86c | ||
| dd24f7cc44 | |||
|
|
aefad33870 | ||
|
|
0ffb81e57f | ||
|
|
4a1654c820 | ||
|
|
a15e2eaf4a | ||
|
|
1518388374 | ||
| b99d821644 | |||
|
|
32f275f88a | ||
|
|
c6fb457734 | ||
| df701f21b9 | |||
| c3c536200e | |||
| 7909f7102d | |||
| 87a29af82d | |||
| 1b11f7e976 | |||
| 08e81363c9 | |||
|
|
beade4503d | ||
|
|
409fd78a35 | ||
|
|
f7afd7f819 | ||
|
|
719bdd7e08 | ||
|
|
1e597559a2 | ||
|
|
e3ca43ca3f | ||
|
|
0b57605103 | ||
|
|
8b191937cd | ||
|
|
11c936a5b8 | ||
|
|
b6349dd1c8 | ||
|
|
f720983328 | ||
|
|
7bd0603dc6 | ||
|
|
17a95bea5b | ||
|
|
a42db62702 | ||
|
|
2edb970cba | ||
|
|
17f8ac0dbc | ||
|
|
259fbe10a6 | ||
|
|
c815fb4cc8 | ||
|
|
90cd1eca15 | ||
|
|
da007348ea | ||
|
|
f2344b7543 | ||
|
|
ee028d45c3 | ||
|
|
a44c8ebf08 | ||
|
|
a595081c4c | ||
|
|
0a8b14211a | ||
|
|
9cebbd7587 | ||
|
|
4d136a5a20 | ||
|
|
7ce4aaf835 | ||
|
|
bce01a0f2b | ||
|
|
8cd4dbf592 | ||
|
|
ad3c8d1672 | ||
|
|
8d6b9f0368 | ||
|
|
ce9e089348 | ||
|
|
786675ce42 | ||
|
|
5df4141935 | ||
|
|
49fb7430a8 | ||
|
|
ff9767ff95 | ||
|
|
955acfe9ec |
@@ -1,12 +1,14 @@
|
||||
name: Secrets CLI - Build & Release
|
||||
# MCP 分支:仅构建/发布 secrets-mcp(CLI 在 main 分支维护)
|
||||
name: Secrets MCP — Build & Release
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [main]
|
||||
paths:
|
||||
- 'src/**'
|
||||
- 'crates/**'
|
||||
- 'Cargo.toml'
|
||||
- 'Cargo.lock'
|
||||
- 'deploy/**'
|
||||
- '.gitea/workflows/**'
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.ref }}
|
||||
@@ -16,468 +18,239 @@ permissions:
|
||||
contents: write
|
||||
|
||||
env:
|
||||
BINARY_NAME: secrets
|
||||
SECRETS_UPGRADE_URL: ${{ github.server_url }}/api/v1/repos/${{ github.repository }}/releases/latest
|
||||
MCP_BINARY: secrets-mcp
|
||||
RUST_TOOLCHAIN: 1.94.0
|
||||
CARGO_INCREMENTAL: 0
|
||||
CARGO_NET_RETRY: 10
|
||||
CARGO_TERM_COLOR: always
|
||||
RUST_BACKTRACE: short
|
||||
MUSL_TARGET: x86_64-unknown-linux-musl
|
||||
|
||||
jobs:
|
||||
version:
|
||||
name: 版本 & Release
|
||||
ci:
|
||||
name: 检查 / 构建 / 发版
|
||||
runs-on: debian
|
||||
timeout-minutes: 40
|
||||
outputs:
|
||||
version: ${{ steps.ver.outputs.version }}
|
||||
tag: ${{ steps.ver.outputs.tag }}
|
||||
tag_exists: ${{ steps.ver.outputs.tag_exists }}
|
||||
release_id: ${{ steps.release.outputs.release_id }}
|
||||
version: ${{ steps.ver.outputs.version }}
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
# ── 版本解析 ────────────────────────────────────────────────────────
|
||||
- name: 解析版本
|
||||
id: ver
|
||||
run: |
|
||||
version=$(grep -m1 '^version' Cargo.toml | sed 's/.*"\(.*\)".*/\1/')
|
||||
tag="secrets-${version}"
|
||||
previous_tag=$(git tag --list 'secrets-*' --sort=-v:refname | awk -v tag="$tag" '$0 != tag { print; exit }')
|
||||
|
||||
version=$(grep -m1 '^version' crates/secrets-mcp/Cargo.toml | sed 's/.*"\(.*\)".*/\1/')
|
||||
tag="secrets-mcp-${version}"
|
||||
echo "version=${version}" >> "$GITHUB_OUTPUT"
|
||||
echo "tag=${tag}" >> "$GITHUB_OUTPUT"
|
||||
echo "previous_tag=${previous_tag}" >> "$GITHUB_OUTPUT"
|
||||
|
||||
if git rev-parse "refs/tags/${tag}" >/dev/null 2>&1; then
|
||||
echo "⚠ 版本 ${tag} 已存在,将覆盖重新发版。"
|
||||
echo "tag_exists=true" >> "$GITHUB_OUTPUT"
|
||||
echo "版本 ${tag} 已存在"
|
||||
else
|
||||
echo "tag_exists=false" >> "$GITHUB_OUTPUT"
|
||||
echo "将创建新版本 ${tag}"
|
||||
echo "tag_exists=false" >> "$GITHUB_OUTPUT"
|
||||
fi
|
||||
|
||||
- name: 严格拦截重复版本
|
||||
if: steps.ver.outputs.tag_exists == 'true'
|
||||
# ── Rust 工具链 ──────────────────────────────────────────────────────
|
||||
- name: 安装 Rust 与 musl 工具链
|
||||
run: |
|
||||
echo "错误: 版本 ${{ steps.ver.outputs.tag }} 已存在,禁止重复发版。"
|
||||
echo "请先 bump Cargo.toml 中的 version,并执行 cargo build 同步 Cargo.lock。"
|
||||
exit 1
|
||||
sudo apt-get update -qq
|
||||
sudo apt-get install -y -qq pkg-config musl-tools binutils jq
|
||||
if ! command -v rustup >/dev/null 2>&1; then
|
||||
curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y --default-toolchain "${RUST_TOOLCHAIN}"
|
||||
echo "$HOME/.cargo/bin" >> "$GITHUB_PATH"
|
||||
fi
|
||||
source "$HOME/.cargo/env" 2>/dev/null || true
|
||||
rustup toolchain install "${RUST_TOOLCHAIN}" --profile minimal \
|
||||
--component rustfmt --component clippy
|
||||
rustup default "${RUST_TOOLCHAIN}"
|
||||
rustup target add "${MUSL_TARGET}" --toolchain "${RUST_TOOLCHAIN}"
|
||||
rustc -V && cargo -V
|
||||
|
||||
- name: 缓存 Cargo
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: |
|
||||
~/.cargo/registry/index
|
||||
~/.cargo/registry/cache
|
||||
~/.cargo/git/db
|
||||
target
|
||||
key: cargo-${{ env.MUSL_TARGET }}-${{ env.RUST_TOOLCHAIN }}-${{ hashFiles('Cargo.lock') }}
|
||||
restore-keys: |
|
||||
cargo-${{ env.MUSL_TARGET }}-${{ env.RUST_TOOLCHAIN }}-
|
||||
cargo-${{ env.MUSL_TARGET }}-
|
||||
|
||||
# ── 质量检查(先于构建,失败即止)──────────────────────────────────
|
||||
- name: fmt
|
||||
run: cargo fmt -- --check
|
||||
|
||||
- name: clippy
|
||||
run: cargo clippy --locked -- -D warnings
|
||||
|
||||
- name: test
|
||||
run: cargo test --locked
|
||||
|
||||
# ── 构建(质量检查通过后才执行)────────────────────────────────────
|
||||
- name: 构建 secrets-mcp (musl)
|
||||
run: |
|
||||
cargo build --release --locked --target "${MUSL_TARGET}" -p secrets-mcp
|
||||
strip "target/${MUSL_TARGET}/release/${MCP_BINARY}"
|
||||
|
||||
- name: 上传构建产物
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: ${{ env.MCP_BINARY }}-linux-musl
|
||||
path: target/${{ env.MUSL_TARGET }}/release/${{ env.MCP_BINARY }}
|
||||
retention-days: 3
|
||||
|
||||
# ── 创建 / 覆盖 Tag(构建成功后才打)───────────────────────────────
|
||||
- name: 创建 Tag
|
||||
if: steps.ver.outputs.tag_exists == 'false'
|
||||
run: |
|
||||
git config user.name "github-actions[bot]"
|
||||
git config user.email "github-actions[bot]@users.noreply.github.com"
|
||||
git tag -a "${{ steps.ver.outputs.tag }}" -m "Release ${{ steps.ver.outputs.tag }}"
|
||||
git push origin "${{ steps.ver.outputs.tag }}"
|
||||
tag="${{ steps.ver.outputs.tag }}"
|
||||
if [ "${{ steps.ver.outputs.tag_exists }}" = "true" ]; then
|
||||
git tag -d "$tag" 2>/dev/null || true
|
||||
git push origin ":refs/tags/$tag" 2>/dev/null || true
|
||||
fi
|
||||
git tag -a "$tag" -m "Release $tag"
|
||||
git push origin "$tag"
|
||||
|
||||
- name: 解析或创建 Release
|
||||
id: release
|
||||
# ── Release(可选,需配置 RELEASE_TOKEN)───────────────────────────
|
||||
- name: Upsert Release
|
||||
if: env.RELEASE_TOKEN != ''
|
||||
env:
|
||||
RELEASE_TOKEN: ${{ secrets.RELEASE_TOKEN }}
|
||||
run: |
|
||||
if [ -z "$RELEASE_TOKEN" ]; then
|
||||
echo "release_id=" >> "$GITHUB_OUTPUT"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
command -v jq >/dev/null 2>&1 || (sudo apt-get update -qq && sudo apt-get install -y -qq jq)
|
||||
|
||||
tag="${{ steps.ver.outputs.tag }}"
|
||||
version="${{ steps.ver.outputs.version }}"
|
||||
release_api="${{ github.server_url }}/api/v1/repos/${{ github.repository }}/releases"
|
||||
api="${{ github.server_url }}/api/v1/repos/${{ github.repository }}/releases"
|
||||
auth="Authorization: token $RELEASE_TOKEN"
|
||||
|
||||
http_code=$(curl -sS -o /tmp/release.json -w '%{http_code}' \
|
||||
-H "Authorization: token $RELEASE_TOKEN" \
|
||||
"${release_api}/tags/${tag}")
|
||||
|
||||
if [ "$http_code" = "200" ]; then
|
||||
release_id=$(jq -r '.id // empty' /tmp/release.json)
|
||||
if [ -n "$release_id" ]; then
|
||||
echo "已找到现有 Release: ${release_id}"
|
||||
echo "release_id=${release_id}" >> "$GITHUB_OUTPUT"
|
||||
exit 0
|
||||
fi
|
||||
fi
|
||||
|
||||
previous_tag="${{ steps.ver.outputs.previous_tag }}"
|
||||
previous_tag=$(git tag --list 'secrets-mcp-*' --sort=-v:refname | awk -v t="$tag" '$0 != t { print; exit }')
|
||||
if [ -n "$previous_tag" ]; then
|
||||
changes=$(git log --pretty=format:'- %s (%h)' "${previous_tag}..HEAD")
|
||||
else
|
||||
changes=$(git log --pretty=format:'- %s (%h)')
|
||||
fi
|
||||
[ -z "$changes" ] && changes="- 首次发布"
|
||||
|
||||
body=$(printf '## 变更日志\n\n%s' "$changes")
|
||||
|
||||
payload=$(jq -n \
|
||||
--arg tag "$tag" \
|
||||
--arg name "${{ env.BINARY_NAME }} ${version}" \
|
||||
--arg body "$body" \
|
||||
'{tag_name: $tag, name: $name, body: $body, draft: true}')
|
||||
|
||||
http_code=$(curl -sS -o /tmp/create-release.json -w '%{http_code}' \
|
||||
-H "Authorization: token $RELEASE_TOKEN" \
|
||||
-H "Content-Type: application/json" \
|
||||
-X POST "$release_api" \
|
||||
-d "$payload")
|
||||
|
||||
if [ "$http_code" = "201" ] || [ "$http_code" = "200" ]; then
|
||||
release_id=$(jq -r '.id // empty' /tmp/create-release.json)
|
||||
fi
|
||||
|
||||
# Upsert: 存在 → PATCH + 清旧 assets;不存在 → POST
|
||||
release_id=$(curl -sS -H "$auth" "${api}/tags/${tag}" 2>/dev/null | jq -r '.id // empty')
|
||||
if [ -n "$release_id" ]; then
|
||||
echo "已创建草稿 Release: ${release_id}"
|
||||
echo "release_id=${release_id}" >> "$GITHUB_OUTPUT"
|
||||
curl -sS -o /dev/null -H "$auth" -H "Content-Type: application/json" \
|
||||
-X PATCH "${api}/${release_id}" \
|
||||
-d "$(jq -n --arg n "secrets-mcp ${version}" --arg b "$body" '{name:$n,body:$b,draft:false}')"
|
||||
curl -sS -H "$auth" "${api}/${release_id}/assets" | \
|
||||
jq -r '.[].id' | xargs -I{} curl -sS -o /dev/null -H "$auth" -X DELETE "${api}/${release_id}/assets/{}"
|
||||
echo "已更新 Release ${release_id}"
|
||||
else
|
||||
echo "⚠ 创建 Release 失败 (HTTP ${http_code}),跳过产物上传"
|
||||
cat /tmp/create-release.json 2>/dev/null || true
|
||||
echo "release_id=" >> "$GITHUB_OUTPUT"
|
||||
release_id=$(curl -fsS -H "$auth" -H "Content-Type: application/json" \
|
||||
-X POST "$api" \
|
||||
-d "$(jq -n --arg t "$tag" --arg n "secrets-mcp ${version}" --arg b "$body" \
|
||||
'{tag_name:$t,name:$n,body:$b,draft:false}')" | jq -r '.id')
|
||||
echo "已创建 Release ${release_id}"
|
||||
fi
|
||||
|
||||
check:
|
||||
name: 质量检查 (fmt / clippy / test)
|
||||
bin="target/${MUSL_TARGET}/release/${MCP_BINARY}"
|
||||
archive="${MCP_BINARY}-${tag}-x86_64-linux-musl.tar.gz"
|
||||
tar -czf "$archive" -C "$(dirname "$bin")" "$(basename "$bin")"
|
||||
sha256sum "$archive" > "${archive}.sha256"
|
||||
curl -fsS -H "$auth" -F "attachment=@${archive}" "${api}/${release_id}/assets"
|
||||
curl -fsS -H "$auth" -F "attachment=@${archive}.sha256" "${api}/${release_id}/assets"
|
||||
echo "Release ${tag} 已发布"
|
||||
|
||||
# ── 飞书汇总通知 ─────────────────────────────────────────────────────
|
||||
- name: 飞书通知
|
||||
if: always()
|
||||
env:
|
||||
WEBHOOK_URL: ${{ vars.WEBHOOK_URL }}
|
||||
run: |
|
||||
[ -z "$WEBHOOK_URL" ] && exit 0
|
||||
tag="${{ steps.ver.outputs.tag }}"
|
||||
commit="${{ github.event.head_commit.message }}"
|
||||
[ -z "$commit" ] && commit="${{ github.sha }}"
|
||||
url="${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_number }}"
|
||||
result="${{ job.status }}"
|
||||
if [ "$result" = "success" ]; then icon="✅"; else icon="❌"; fi
|
||||
msg="secrets-mcp 构建&发版 ${icon}
|
||||
版本:${tag}
|
||||
提交:${commit}
|
||||
作者:${{ github.actor }}
|
||||
详情:${url}"
|
||||
payload=$(jq -n --arg text "$msg" '{msg_type: "text", content: {text: $text}}')
|
||||
curl -sS -H "Content-Type: application/json" -X POST -d "$payload" "$WEBHOOK_URL"
|
||||
|
||||
deploy:
|
||||
name: 部署 secrets-mcp
|
||||
needs: [ci]
|
||||
if: |
|
||||
github.ref == 'refs/heads/main' ||
|
||||
github.ref == 'refs/heads/feat/mcp' ||
|
||||
github.ref == 'refs/heads/mcp'
|
||||
runs-on: debian
|
||||
timeout-minutes: 10
|
||||
steps:
|
||||
- name: 安装 Rust
|
||||
run: |
|
||||
if ! command -v cargo >/dev/null 2>&1; then
|
||||
curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y --default-toolchain stable
|
||||
echo "$HOME/.cargo/bin" >> "$GITHUB_PATH"
|
||||
fi
|
||||
source "$HOME/.cargo/env" 2>/dev/null || true
|
||||
rustup component add rustfmt clippy
|
||||
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: 缓存 Cargo
|
||||
uses: actions/cache@v4
|
||||
- name: 下载构建产物
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
path: |
|
||||
~/.cargo/registry/index
|
||||
~/.cargo/registry/cache
|
||||
~/.cargo/git/db
|
||||
target
|
||||
key: cargo-check-${{ hashFiles('Cargo.lock') }}
|
||||
restore-keys: |
|
||||
cargo-check-
|
||||
name: ${{ env.MCP_BINARY }}-linux-musl
|
||||
path: /tmp/artifact
|
||||
|
||||
- run: cargo fmt -- --check
|
||||
- run: cargo clippy --locked -- -D warnings
|
||||
- run: cargo test --locked
|
||||
|
||||
build-linux:
|
||||
name: Build (x86_64-unknown-linux-musl)
|
||||
needs: [version, check]
|
||||
runs-on: debian
|
||||
timeout-minutes: 15
|
||||
steps:
|
||||
- name: 安装依赖
|
||||
run: |
|
||||
sudo apt-get update
|
||||
sudo apt-get install -y pkg-config musl-tools binutils curl
|
||||
if ! command -v cargo >/dev/null 2>&1; then
|
||||
curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y --default-toolchain stable
|
||||
fi
|
||||
source "$HOME/.cargo/env" 2>/dev/null || true
|
||||
rustup target add x86_64-unknown-linux-musl
|
||||
echo "$HOME/.cargo/bin" >> "$GITHUB_PATH"
|
||||
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: 缓存 Cargo
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: |
|
||||
~/.cargo/registry/index
|
||||
~/.cargo/registry/cache
|
||||
~/.cargo/git/db
|
||||
target
|
||||
key: cargo-x86_64-unknown-linux-musl-${{ hashFiles('Cargo.lock') }}
|
||||
restore-keys: |
|
||||
cargo-x86_64-unknown-linux-musl-
|
||||
|
||||
- run: cargo build --release --locked --target x86_64-unknown-linux-musl
|
||||
- run: strip target/x86_64-unknown-linux-musl/release/${{ env.BINARY_NAME }}
|
||||
|
||||
- name: 上传 Release 产物
|
||||
if: needs.version.outputs.release_id != ''
|
||||
- name: 部署到阿里云 ECS
|
||||
env:
|
||||
RELEASE_TOKEN: ${{ secrets.RELEASE_TOKEN }}
|
||||
DEPLOY_HOST: ${{ vars.DEPLOY_HOST }}
|
||||
DEPLOY_USER: ${{ vars.DEPLOY_USER }}
|
||||
DEPLOY_SSH_KEY: ${{ secrets.DEPLOY_SSH_KEY }}
|
||||
DEPLOY_KNOWN_HOSTS: ${{ vars.DEPLOY_KNOWN_HOSTS }}
|
||||
run: |
|
||||
[ -z "$RELEASE_TOKEN" ] && exit 0
|
||||
tag="${{ needs.version.outputs.tag }}"
|
||||
bin="target/x86_64-unknown-linux-musl/release/${{ env.BINARY_NAME }}"
|
||||
archive="${{ env.BINARY_NAME }}-${tag}-x86_64-linux-musl.tar.gz"
|
||||
tar -czf "$archive" -C "$(dirname "$bin")" "$(basename "$bin")"
|
||||
sha256sum "$archive" > "${archive}.sha256"
|
||||
curl -fsS -H "Authorization: token $RELEASE_TOKEN" \
|
||||
-F "attachment=@${archive}" \
|
||||
"${{ github.server_url }}/api/v1/repos/${{ github.repository }}/releases/${{ needs.version.outputs.release_id }}/assets"
|
||||
curl -fsS -H "Authorization: token $RELEASE_TOKEN" \
|
||||
-F "attachment=@${archive}.sha256" \
|
||||
"${{ github.server_url }}/api/v1/repos/${{ github.repository }}/releases/${{ needs.version.outputs.release_id }}/assets"
|
||||
|
||||
- name: 飞书通知
|
||||
if: always()
|
||||
env:
|
||||
WEBHOOK_URL: ${{ vars.WEBHOOK_URL }}
|
||||
run: |
|
||||
[ -z "$WEBHOOK_URL" ] && exit 0
|
||||
command -v jq >/dev/null 2>&1 || (sudo apt-get update -qq && sudo apt-get install -y -qq jq)
|
||||
tag="${{ needs.version.outputs.tag }}"
|
||||
commit=$(git log -1 --pretty=format:"%s" 2>/dev/null || echo "N/A")
|
||||
url="${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_number }}"
|
||||
result="${{ job.status }}"
|
||||
if [ "$result" = "success" ]; then icon="✅"; else icon="❌"; fi
|
||||
msg="secrets linux 构建${icon}
|
||||
版本:${tag}
|
||||
提交:${commit}
|
||||
作者:${{ github.actor }}
|
||||
详情:${url}"
|
||||
payload=$(jq -n --arg text "$msg" '{msg_type: "text", content: {text: $text}}')
|
||||
curl -sS -H "Content-Type: application/json" -X POST -d "$payload" "$WEBHOOK_URL"
|
||||
|
||||
build-macos:
|
||||
name: Build (macOS aarch64 + x86_64)
|
||||
needs: [version, check]
|
||||
runs-on: darwin-arm64
|
||||
timeout-minutes: 15
|
||||
steps:
|
||||
- name: 安装依赖
|
||||
run: |
|
||||
if ! command -v cargo >/dev/null 2>&1; then
|
||||
curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y --default-toolchain stable
|
||||
fi
|
||||
source "$HOME/.cargo/env" 2>/dev/null || true
|
||||
rustup target add aarch64-apple-darwin
|
||||
rustup target add x86_64-apple-darwin
|
||||
echo "$HOME/.cargo/bin" >> "$GITHUB_PATH"
|
||||
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: 缓存 Cargo
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: |
|
||||
~/.cargo/registry/index
|
||||
~/.cargo/registry/cache
|
||||
~/.cargo/git/db
|
||||
target
|
||||
key: cargo-macos-${{ hashFiles('Cargo.lock') }}
|
||||
restore-keys: |
|
||||
cargo-macos-
|
||||
|
||||
- run: cargo build --release --locked --target aarch64-apple-darwin
|
||||
- run: cargo build --release --locked --target x86_64-apple-darwin
|
||||
- run: strip -x target/aarch64-apple-darwin/release/${{ env.BINARY_NAME }}
|
||||
- run: strip -x target/x86_64-apple-darwin/release/${{ env.BINARY_NAME }}
|
||||
|
||||
- name: 上传 Release 产物
|
||||
if: needs.version.outputs.release_id != ''
|
||||
env:
|
||||
RELEASE_TOKEN: ${{ secrets.RELEASE_TOKEN }}
|
||||
run: |
|
||||
[ -z "$RELEASE_TOKEN" ] && exit 0
|
||||
tag="${{ needs.version.outputs.tag }}"
|
||||
release_id="${{ needs.version.outputs.release_id }}"
|
||||
|
||||
arm_bin="target/aarch64-apple-darwin/release/${{ env.BINARY_NAME }}"
|
||||
arm_archive="${{ env.BINARY_NAME }}-${tag}-aarch64-macos.tar.gz"
|
||||
tar -czf "$arm_archive" -C "$(dirname "$arm_bin")" "$(basename "$arm_bin")"
|
||||
shasum -a 256 "$arm_archive" > "${arm_archive}.sha256"
|
||||
curl -fsS -H "Authorization: token $RELEASE_TOKEN" \
|
||||
-F "attachment=@${arm_archive}" \
|
||||
"${{ github.server_url }}/api/v1/repos/${{ github.repository }}/releases/${release_id}/assets"
|
||||
curl -fsS -H "Authorization: token $RELEASE_TOKEN" \
|
||||
-F "attachment=@${arm_archive}.sha256" \
|
||||
"${{ github.server_url }}/api/v1/repos/${{ github.repository }}/releases/${release_id}/assets"
|
||||
|
||||
intel_bin="target/x86_64-apple-darwin/release/${{ env.BINARY_NAME }}"
|
||||
intel_archive="${{ env.BINARY_NAME }}-${tag}-x86_64-macos.tar.gz"
|
||||
tar -czf "$intel_archive" -C "$(dirname "$intel_bin")" "$(basename "$intel_bin")"
|
||||
shasum -a 256 "$intel_archive" > "${intel_archive}.sha256"
|
||||
curl -fsS -H "Authorization: token $RELEASE_TOKEN" \
|
||||
-F "attachment=@${intel_archive}" \
|
||||
"${{ github.server_url }}/api/v1/repos/${{ github.repository }}/releases/${release_id}/assets"
|
||||
curl -fsS -H "Authorization: token $RELEASE_TOKEN" \
|
||||
-F "attachment=@${intel_archive}.sha256" \
|
||||
"${{ github.server_url }}/api/v1/repos/${{ github.repository }}/releases/${release_id}/assets"
|
||||
|
||||
- name: 飞书通知
|
||||
if: always()
|
||||
env:
|
||||
WEBHOOK_URL: ${{ vars.WEBHOOK_URL }}
|
||||
run: |
|
||||
[ -z "$WEBHOOK_URL" ] && exit 0
|
||||
tag="${{ needs.version.outputs.tag }}"
|
||||
commit=$(git log -1 --pretty=format:"%s" 2>/dev/null || echo "N/A")
|
||||
url="${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_number }}"
|
||||
result="${{ job.status }}"
|
||||
if [ "$result" = "success" ]; then icon="✅"; else icon="❌"; fi
|
||||
msg="secrets macOS 双架构构建${icon}
|
||||
版本:${tag}
|
||||
目标:aarch64-apple-darwin, x86_64-apple-darwin
|
||||
提交:${commit}
|
||||
作者:${{ github.actor }}
|
||||
详情:${url}"
|
||||
payload=$(python3 -c "import json,sys; print(json.dumps({'msg_type':'text','content':{'text':sys.argv[1]}}))" "$msg")
|
||||
curl -sS -H "Content-Type: application/json" -X POST -d "$payload" "$WEBHOOK_URL"
|
||||
|
||||
build-windows:
|
||||
name: Build (x86_64-pc-windows-msvc)
|
||||
needs: [version, check]
|
||||
runs-on: windows
|
||||
timeout-minutes: 15
|
||||
steps:
|
||||
- name: 安装依赖
|
||||
shell: pwsh
|
||||
run: |
|
||||
$cargoBin = Join-Path $env:USERPROFILE ".cargo\bin"
|
||||
if (-not (Get-Command cargo -ErrorAction SilentlyContinue)) {
|
||||
Invoke-WebRequest -Uri "https://win.rustup.rs/x86_64" -OutFile rustup-init.exe
|
||||
.\rustup-init.exe -y --default-toolchain stable
|
||||
Remove-Item rustup-init.exe
|
||||
}
|
||||
$env:Path = "$cargoBin;$env:Path"
|
||||
Add-Content -Path $env:GITHUB_PATH -Value $cargoBin
|
||||
rustup target add x86_64-pc-windows-msvc
|
||||
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: 缓存 Cargo
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: |
|
||||
~/.cargo/registry/index
|
||||
~/.cargo/registry/cache
|
||||
~/.cargo/git/db
|
||||
target
|
||||
key: cargo-x86_64-pc-windows-msvc-${{ hashFiles('Cargo.lock') }}
|
||||
restore-keys: |
|
||||
cargo-x86_64-pc-windows-msvc-
|
||||
|
||||
- name: 构建
|
||||
shell: pwsh
|
||||
run: cargo build --release --locked --target x86_64-pc-windows-msvc
|
||||
|
||||
- name: 上传 Release 产物
|
||||
if: needs.version.outputs.release_id != ''
|
||||
shell: pwsh
|
||||
env:
|
||||
RELEASE_TOKEN: ${{ secrets.RELEASE_TOKEN }}
|
||||
run: |
|
||||
if (-not $env:RELEASE_TOKEN) { exit 0 }
|
||||
$tag = "${{ needs.version.outputs.tag }}"
|
||||
$bin = "target\x86_64-pc-windows-msvc\release\${{ env.BINARY_NAME }}.exe"
|
||||
$archive = "${{ env.BINARY_NAME }}-${tag}-x86_64-windows.zip"
|
||||
Compress-Archive -Path $bin -DestinationPath $archive -Force
|
||||
$hash = (Get-FileHash -Algorithm SHA256 $archive).Hash.ToLower()
|
||||
Set-Content -Path "${archive}.sha256" -Value "$hash $archive" -NoNewline
|
||||
$url = "${{ github.server_url }}/api/v1/repos/${{ github.repository }}/releases/${{ needs.version.outputs.release_id }}/assets"
|
||||
Invoke-RestMethod -Uri $url -Method Post `
|
||||
-Headers @{ "Authorization" = "token $env:RELEASE_TOKEN" } `
|
||||
-Form @{ attachment = Get-Item $archive }
|
||||
Invoke-RestMethod -Uri $url -Method Post `
|
||||
-Headers @{ "Authorization" = "token $env:RELEASE_TOKEN" } `
|
||||
-Form @{ attachment = Get-Item "${archive}.sha256" }
|
||||
|
||||
- name: 飞书通知
|
||||
if: always()
|
||||
shell: pwsh
|
||||
env:
|
||||
WEBHOOK_URL: ${{ vars.WEBHOOK_URL }}
|
||||
run: |
|
||||
if (-not $env:WEBHOOK_URL) { exit 0 }
|
||||
$tag = "${{ needs.version.outputs.tag }}"
|
||||
$commit = (git log -1 --pretty=format:"%s" 2>$null) ?? "N/A"
|
||||
$url = "${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_number }}"
|
||||
$result = "${{ job.status }}"
|
||||
$icon = if ($result -eq "success") { "✅" } else { "❌" }
|
||||
$msg = "secrets windows 构建${icon}`n版本:${tag}`n提交:${commit}`n作者:${{ github.actor }}`n详情:${url}"
|
||||
$payload = @{ msg_type = "text"; content = @{ text = $msg } } | ConvertTo-Json
|
||||
Invoke-RestMethod -Uri $env:WEBHOOK_URL -Method Post `
|
||||
-ContentType "application/json" -Body $payload
|
||||
|
||||
publish-release:
|
||||
name: 发布草稿 Release
|
||||
needs: [version, build-linux, build-macos, build-windows]
|
||||
if: always() && needs.version.outputs.release_id != ''
|
||||
runs-on: debian
|
||||
timeout-minutes: 5
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: 发布草稿
|
||||
env:
|
||||
RELEASE_TOKEN: ${{ secrets.RELEASE_TOKEN }}
|
||||
run: |
|
||||
[ -z "$RELEASE_TOKEN" ] && exit 0
|
||||
|
||||
linux_r="${{ needs.build-linux.result }}"
|
||||
macos_r="${{ needs.build-macos.result }}"
|
||||
windows_r="${{ needs.build-windows.result }}"
|
||||
if [ "$linux_r" != "success" ] || [ "$macos_r" != "success" ] || [ "$windows_r" != "success" ]; then
|
||||
echo "存在未成功的构建任务,保留草稿 Release"
|
||||
echo "linux=${linux_r} macos=${macos_r} windows=${windows_r}"
|
||||
if [ -z "$DEPLOY_HOST" ] || [ -z "$DEPLOY_USER" ] || [ -z "$DEPLOY_SSH_KEY" ]; then
|
||||
echo "部署跳过:请配置 vars.DEPLOY_HOST、vars.DEPLOY_USER 与 secrets.DEPLOY_SSH_KEY"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
release_api="${{ github.server_url }}/api/v1/repos/${{ github.repository }}/releases/${{ needs.version.outputs.release_id }}"
|
||||
http_code=$(curl -sS -o /tmp/publish-release.json -w '%{http_code}' \
|
||||
-H "Authorization: token $RELEASE_TOKEN" \
|
||||
-H "Content-Type: application/json" \
|
||||
-X PATCH "$release_api" \
|
||||
-d '{"draft":false}')
|
||||
echo "$DEPLOY_SSH_KEY" > /tmp/deploy_key
|
||||
chmod 600 /tmp/deploy_key
|
||||
trap 'rm -f /tmp/deploy_key' EXIT
|
||||
|
||||
if [ "$http_code" != "200" ]; then
|
||||
echo "发布草稿 Release 失败 (HTTP ${http_code})"
|
||||
cat /tmp/publish-release.json 2>/dev/null || true
|
||||
exit 1
|
||||
if [ -n "$DEPLOY_KNOWN_HOSTS" ]; then
|
||||
echo "$DEPLOY_KNOWN_HOSTS" > /tmp/deploy_known_hosts
|
||||
ssh_opts="-o UserKnownHostsFile=/tmp/deploy_known_hosts -o StrictHostKeyChecking=yes"
|
||||
else
|
||||
ssh_opts="-o StrictHostKeyChecking=accept-new"
|
||||
fi
|
||||
echo "Release 已发布"
|
||||
|
||||
- name: 飞书汇总通知
|
||||
scp -i /tmp/deploy_key $ssh_opts \
|
||||
"/tmp/artifact/${MCP_BINARY}" \
|
||||
"${DEPLOY_USER}@${DEPLOY_HOST}:/tmp/secrets-mcp.new"
|
||||
|
||||
ssh -i /tmp/deploy_key $ssh_opts "${DEPLOY_USER}@${DEPLOY_HOST}" "
|
||||
sudo mv /tmp/secrets-mcp.new /opt/secrets-mcp/secrets-mcp
|
||||
sudo chmod +x /opt/secrets-mcp/secrets-mcp
|
||||
sudo systemctl restart secrets-mcp
|
||||
sleep 2
|
||||
sudo systemctl is-active secrets-mcp && echo '服务启动成功' || (sudo journalctl -u secrets-mcp -n 20 && exit 1)
|
||||
"
|
||||
|
||||
- name: 飞书通知
|
||||
if: always()
|
||||
env:
|
||||
WEBHOOK_URL: ${{ vars.WEBHOOK_URL }}
|
||||
run: |
|
||||
[ -z "$WEBHOOK_URL" ] && exit 0
|
||||
command -v jq >/dev/null 2>&1 || (sudo apt-get update -qq && sudo apt-get install -y -qq jq)
|
||||
|
||||
tag="${{ needs.version.outputs.tag }}"
|
||||
tag_exists="${{ needs.version.outputs.tag_exists }}"
|
||||
commit=$(git log -1 --pretty=format:"%s" 2>/dev/null || echo "N/A")
|
||||
tag="${{ needs.ci.outputs.tag }}"
|
||||
url="${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_number }}"
|
||||
|
||||
linux_r="${{ needs.build-linux.result }}"
|
||||
macos_r="${{ needs.build-macos.result }}"
|
||||
windows_r="${{ needs.build-windows.result }}"
|
||||
publish_r="${{ job.status }}"
|
||||
|
||||
icon() { case "$1" in success) echo "✅";; skipped) echo "⏭";; *) echo "❌";; esac; }
|
||||
|
||||
if [ "$linux_r" = "success" ] && [ "$macos_r" = "success" ] && [ "$windows_r" = "success" ] && [ "$publish_r" = "success" ]; then
|
||||
status="发布成功 ✅"
|
||||
elif [ "$linux_r" != "success" ] || [ "$macos_r" != "success" ] || [ "$windows_r" != "success" ]; then
|
||||
status="构建失败 ❌"
|
||||
else
|
||||
status="发布失败 ❌"
|
||||
fi
|
||||
|
||||
if [ "$tag_exists" = "false" ]; then
|
||||
version_line="🆕 新版本 ${tag}"
|
||||
else
|
||||
version_line="🔄 重复构建 ${tag}"
|
||||
fi
|
||||
|
||||
msg="secrets ${status}
|
||||
${version_line}
|
||||
linux $(icon "$linux_r") | macOS $(icon "$macos_r") | windows $(icon "$windows_r") | Release $(icon "$publish_r")
|
||||
提交:${commit}
|
||||
result="${{ job.status }}"
|
||||
if [ "$result" = "success" ]; then icon="✅"; else icon="❌"; fi
|
||||
msg="secrets-mcp 部署 ${icon}
|
||||
版本:${tag}
|
||||
作者:${{ github.actor }}
|
||||
详情:${url}"
|
||||
|
||||
payload=$(jq -n --arg text "$msg" '{msg_type: "text", content: {text: $text}}')
|
||||
curl -sS -H "Content-Type: application/json" -X POST -d "$payload" "$WEBHOOK_URL"
|
||||
|
||||
6
.gitignore
vendored
6
.gitignore
vendored
@@ -1,4 +1,8 @@
|
||||
/target
|
||||
.env
|
||||
.DS_Store
|
||||
.cursor/
|
||||
.cursor/
|
||||
*.pem
|
||||
tmp/
|
||||
client_secret_*.apps.googleusercontent.com.json
|
||||
node_modules/
|
||||
149
.vscode/tasks.json
vendored
149
.vscode/tasks.json
vendored
@@ -2,148 +2,45 @@
|
||||
"version": "2.0.0",
|
||||
"tasks": [
|
||||
{
|
||||
"label": "build",
|
||||
"label": "mcp: build",
|
||||
"type": "shell",
|
||||
"command": "cargo build",
|
||||
"group": { "kind": "build", "isDefault": true }
|
||||
"command": "cargo build --locked -p secrets-mcp",
|
||||
"group": "build",
|
||||
"options": {
|
||||
"envFile": "${workspaceFolder}/.env"
|
||||
}
|
||||
},
|
||||
{
|
||||
"label": "cli: version",
|
||||
"label": "mcp: run",
|
||||
"type": "shell",
|
||||
"command": "./target/debug/secrets -V",
|
||||
"dependsOn": "build"
|
||||
"command": "cargo run --locked -p secrets-mcp",
|
||||
"options": {
|
||||
"envFile": "${workspaceFolder}/.env"
|
||||
}
|
||||
},
|
||||
{
|
||||
"label": "cli: help",
|
||||
"label": "test: workspace",
|
||||
"type": "shell",
|
||||
"command": "./target/debug/secrets --help",
|
||||
"dependsOn": "build"
|
||||
"command": "cargo test --workspace --locked",
|
||||
"group": { "kind": "test", "isDefault": true }
|
||||
},
|
||||
{
|
||||
"label": "cli: help add",
|
||||
"label": "fmt: check",
|
||||
"type": "shell",
|
||||
"command": "./target/debug/secrets help add",
|
||||
"dependsOn": "build"
|
||||
"command": "cargo fmt -- --check",
|
||||
"problemMatcher": []
|
||||
},
|
||||
{
|
||||
"label": "cli: help config",
|
||||
"label": "clippy: workspace",
|
||||
"type": "shell",
|
||||
"command": "./target/debug/secrets help config",
|
||||
"dependsOn": "build"
|
||||
"command": "cargo clippy --workspace --locked -- -D warnings",
|
||||
"problemMatcher": []
|
||||
},
|
||||
{
|
||||
"label": "cli: config path",
|
||||
"label": "ci: release-check",
|
||||
"type": "shell",
|
||||
"command": "./target/debug/secrets config path",
|
||||
"dependsOn": "build"
|
||||
},
|
||||
{
|
||||
"label": "cli: config show",
|
||||
"type": "shell",
|
||||
"command": "./target/debug/secrets config show",
|
||||
"dependsOn": "build"
|
||||
},
|
||||
{
|
||||
"label": "test: search all",
|
||||
"type": "shell",
|
||||
"command": "./target/debug/secrets search",
|
||||
"dependsOn": "build"
|
||||
},
|
||||
{
|
||||
"label": "test: search all (verbose)",
|
||||
"type": "shell",
|
||||
"command": "./target/debug/secrets --verbose search",
|
||||
"dependsOn": "build"
|
||||
},
|
||||
{
|
||||
"label": "test: search by namespace (refining)",
|
||||
"type": "shell",
|
||||
"command": "./target/debug/secrets search -n refining",
|
||||
"dependsOn": "build"
|
||||
},
|
||||
{
|
||||
"label": "test: search by namespace (ricnsmart)",
|
||||
"type": "shell",
|
||||
"command": "./target/debug/secrets search -n ricnsmart",
|
||||
"dependsOn": "build"
|
||||
},
|
||||
{
|
||||
"label": "test: search servers",
|
||||
"type": "shell",
|
||||
"command": "./target/debug/secrets search --kind server",
|
||||
"dependsOn": "build"
|
||||
},
|
||||
{
|
||||
"label": "test: search services",
|
||||
"type": "shell",
|
||||
"command": "./target/debug/secrets search --kind service",
|
||||
"dependsOn": "build"
|
||||
},
|
||||
{
|
||||
"label": "test: search keys",
|
||||
"type": "shell",
|
||||
"command": "./target/debug/secrets search --kind key",
|
||||
"dependsOn": "build"
|
||||
},
|
||||
{
|
||||
"label": "test: search by tag (aliyun)",
|
||||
"type": "shell",
|
||||
"command": "./target/debug/secrets search --tag aliyun",
|
||||
"dependsOn": "build"
|
||||
},
|
||||
{
|
||||
"label": "test: search by tag (hongkong)",
|
||||
"type": "shell",
|
||||
"command": "./target/debug/secrets search --tag hongkong",
|
||||
"dependsOn": "build"
|
||||
},
|
||||
{
|
||||
"label": "test: search keyword (gitea)",
|
||||
"type": "shell",
|
||||
"command": "./target/debug/secrets search -q gitea",
|
||||
"dependsOn": "build"
|
||||
},
|
||||
{
|
||||
"label": "test: inject service secrets",
|
||||
"type": "shell",
|
||||
"command": "./target/debug/secrets inject -n refining --kind service --name gitea",
|
||||
"dependsOn": "build"
|
||||
},
|
||||
{
|
||||
"label": "test: combined search (ricnsmart + server + shanghai)",
|
||||
"type": "shell",
|
||||
"command": "./target/debug/secrets search -n ricnsmart --kind server --tag shanghai",
|
||||
"dependsOn": "build"
|
||||
},
|
||||
{
|
||||
"label": "test: add + delete roundtrip",
|
||||
"type": "shell",
|
||||
"command": "echo '--- add ---' && ./target/debug/secrets add -n test --kind demo --name roundtrip-test --tag test -m foo=bar -s password=secret123 && echo '--- search metadata ---' && ./target/debug/secrets search -n test && echo '--- inject secrets ---' && ./target/debug/secrets inject -n test --kind demo --name roundtrip-test && echo '--- delete ---' && ./target/debug/secrets delete -n test --kind demo --name roundtrip-test && echo '--- verify deleted ---' && ./target/debug/secrets search -n test",
|
||||
"dependsOn": "build"
|
||||
},
|
||||
{
|
||||
"label": "test: add + delete roundtrip (verbose)",
|
||||
"type": "shell",
|
||||
"command": "echo '--- add (verbose) ---' && ./target/debug/secrets --verbose add -n test --kind demo --name roundtrip-verbose --tag test -m foo=bar -s password=secret123 && echo '--- delete (verbose) ---' && ./target/debug/secrets --verbose delete -n test --kind demo --name roundtrip-verbose",
|
||||
"dependsOn": "build"
|
||||
},
|
||||
{
|
||||
"label": "test: update roundtrip",
|
||||
"type": "shell",
|
||||
"command": "echo '--- add ---' && ./target/debug/secrets add -n test --kind demo --name update-test --tag v1 -m env=staging && echo '--- update ---' && ./target/debug/secrets update -n test --kind demo --name update-test --add-tag v2 --remove-tag v1 -m env=production && echo '--- verify ---' && ./target/debug/secrets search -n test --kind demo && echo '--- cleanup ---' && ./target/debug/secrets delete -n test --kind demo --name update-test",
|
||||
"dependsOn": "build"
|
||||
},
|
||||
{
|
||||
"label": "test: audit log",
|
||||
"type": "shell",
|
||||
"command": "echo '--- add ---' && ./target/debug/secrets add -n test --kind demo --name audit-test -m foo=bar -s key=val && echo '--- update ---' && ./target/debug/secrets update -n test --kind demo --name audit-test -m foo=baz && echo '--- delete ---' && ./target/debug/secrets delete -n test --kind demo --name audit-test && echo '--- audit log (last 5) ---' && psql $DATABASE_URL -c \"SELECT action, namespace, kind, name, actor, detail, created_at FROM audit_log ORDER BY created_at DESC LIMIT 5;\"",
|
||||
"dependsOn": "build"
|
||||
},
|
||||
{
|
||||
"label": "test: add with file secret",
|
||||
"type": "shell",
|
||||
"command": "echo '--- add key from file ---' && ./target/debug/secrets add -n test --kind key --name test-key --tag test -s content=@./test-fixtures/example-key.pem && echo '--- verify metadata ---' && ./target/debug/secrets search -n test --kind key && echo '--- verify inject ---' && ./target/debug/secrets inject -n test --kind key --name test-key && echo '--- cleanup ---' && ./target/debug/secrets delete -n test --kind key --name test-key",
|
||||
"dependsOn": "build"
|
||||
"command": "./scripts/release-check.sh",
|
||||
"problemMatcher": []
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
765
AGENTS.md
765
AGENTS.md
@@ -1,680 +1,233 @@
|
||||
# Secrets CLI — AGENTS.md
|
||||
# Secrets MCP — AGENTS.md
|
||||
|
||||
## 提交 / 发版硬规则(优先于下文其他说明)
|
||||
本仓库为 **MCP SaaS**:`secrets-core`(业务与持久化)+ `secrets-mcp`(Streamable HTTP MCP、Web、OAuth、API Key)。对外入口见 `crates/secrets-mcp`。
|
||||
|
||||
1. 涉及 `src/**`、`Cargo.toml`、`Cargo.lock`、CLI 行为变更的提交,默认视为**需要发版**,除非用户明确说明“本次不发版”。
|
||||
2. 发版前必须先检查 `Cargo.toml` 中的 `version`,再检查是否已存在对应 tag:`git tag -l 'secrets-*'`。
|
||||
3. 若当前版本对应 tag 已存在,必须先 bump `Cargo.toml` 的 `version`,再执行 `cargo build` 同步 `Cargo.lock`,然后才能提交。
|
||||
4. 提交前优先运行 `./scripts/release-check.sh`;该脚本会检查重复版本并执行 `cargo fmt -- --check && cargo clippy --locked -- -D warnings && cargo test --locked`。
|
||||
## 版本控制
|
||||
|
||||
跨设备密钥与配置管理 CLI 工具,将服务器信息、服务凭据等存储到 PostgreSQL 18,供 AI 工具读取上下文。每个加密字段单独行存储(`secrets` 子表),字段名、类型、长度以明文保存,主密钥由 Argon2id 从主密码派生并存入平台安全存储(macOS Keychain / Windows Credential Manager / Linux keyutils)。
|
||||
本仓库使用 **[Jujutsu (jj)](https://jj-vcs.dev/)** 作为版本控制系统(纯 jj 模式,无 `.git` 目录)。
|
||||
|
||||
### 常用 jj 命令对照
|
||||
|
||||
| 操作 | jj 命令 |
|
||||
|------|---------|
|
||||
| 查看历史 | `jj log` / `jj log 'all()'` |
|
||||
| 查看状态 | `jj status` |
|
||||
| 新建提交 | `jj commit` |
|
||||
| 创建新变更 | `jj new` |
|
||||
| 变基 | `jj rebase` |
|
||||
| 合并提交 | `jj squash` |
|
||||
| 撤销操作 | `jj undo` |
|
||||
| 查看标签 | `jj tag list` |
|
||||
| 查看分支 | `jj bookmark list` |
|
||||
| 推送远端 | `jj git push` |
|
||||
| 拉取远端 | `jj git fetch` |
|
||||
|
||||
### 注意事项
|
||||
- 本仓库为**纯 jj 模式**,无 `.git` 目录;本地不要使用 `git` 命令
|
||||
- CI/CD(Gitea Actions)仍通过 Git 协议拉取代码,Runner 侧自动使用 `git`,无需修改
|
||||
- 检查标签是否存在时使用 `jj log --no-graph --revisions "tag(${tag})"` 而非 `git rev-parse`
|
||||
|
||||
## 提交 / 推送硬规则(优先于下文)
|
||||
|
||||
**每次提交和推送前必须执行以下检查,无论是否明确「发版」:**
|
||||
|
||||
1. 涉及 `crates/**`、根目录 `Cargo.toml`/`Cargo.lock`、`secrets-mcp` 行为变更的提交,默认视为**需要发版**,除非明确说明「本次不发版」。
|
||||
2. 提交前检查 `crates/secrets-mcp/Cargo.toml` 的 `version`,再查 tag:`jj tag list`。若当前版本对应 tag 已存在且有代码变更,**必须 bump 版本号**并 `cargo build` 同步 `Cargo.lock`。
|
||||
3. 提交前运行 `./scripts/release-check.sh`(版本/tag + `fmt` + `clippy --locked` + `test --locked`)。若脚本不存在或不可用,至少运行 `cargo fmt -- --check && cargo clippy --locked -- -D warnings && cargo test --locked`。
|
||||
|
||||
## 项目结构
|
||||
|
||||
```
|
||||
secrets/
|
||||
src/
|
||||
main.rs # CLI 入口,clap 命令定义,auto-migrate,--verbose 全局参数
|
||||
output.rs # OutputMode 枚举 + TTY 检测(TTY→text,非 TTY→json-compact)
|
||||
config.rs # 配置读写:~/.config/secrets/config.toml(database_url)
|
||||
db.rs # PgPool 创建 + 建表/索引(DROP+CREATE,含所有表)
|
||||
crypto.rs # AES-256-GCM 加解密、Argon2id 派生、OS 钥匙串
|
||||
models.rs # Entry + SecretField 结构体(sqlx::FromRow + serde)
|
||||
audit.rs # 审计写入:log_tx(事务内)
|
||||
commands/
|
||||
init.rs # init 命令:主密钥初始化(每台设备一次)
|
||||
add.rs # add 命令:upsert entries + 逐字段写入 secrets,含历史快照
|
||||
config.rs # config 命令:set-db / show / path(持久化 database_url)
|
||||
search.rs # search 命令:多条件查询,展示 secrets 字段 schema(无需 master_key)
|
||||
delete.rs # delete 命令:事务化,CASCADE 删除 secrets,含历史快照
|
||||
update.rs # update 命令:增量更新,secrets 行级 UPSERT/DELETE,CAS 并发保护
|
||||
rollback.rs # rollback 命令:按 entry_version 恢复 entry + secrets
|
||||
history.rs # history 命令:查看 entry 变更历史列表
|
||||
run.rs # inject / run 命令:仅 secrets 逐字段解密 + key_ref 引用解析(不含 metadata)
|
||||
upgrade.rs # upgrade 命令:检查、校验摘要并下载最新版本,自动替换二进制
|
||||
export_cmd.rs # export 命令:批量导出记录,支持 JSON/TOML/YAML,含解密明文
|
||||
import_cmd.rs # import 命令:批量导入记录,冲突检测,dry-run,重新加密写入
|
||||
Cargo.toml
|
||||
crates/
|
||||
secrets-core/ # db / crypto / models / audit / service
|
||||
secrets-mcp/ # rmcp tools、axum、OAuth、Dashboard
|
||||
scripts/
|
||||
release-check.sh # 发版前检查版本号/tag 是否重复,并执行 fmt/clippy/test
|
||||
setup-gitea-actions.sh # 配置 Gitea Actions 变量与 Secrets
|
||||
.gitea/workflows/
|
||||
secrets.yml # CI:fmt + clippy + musl 构建 + Release 上传 + 飞书通知
|
||||
.vscode/tasks.json # 本地测试任务(build / config / search / add+delete / update / audit 等)
|
||||
release-check.sh
|
||||
setup-gitea-actions.sh
|
||||
.gitea/workflows/secrets.yml
|
||||
.vscode/tasks.json
|
||||
```
|
||||
|
||||
## 数据库
|
||||
|
||||
- **Host**: `<host>:<port>`
|
||||
- **Database**: `secrets`
|
||||
- **连接串**: `postgres://postgres:<password>@<host>:<port>/secrets`
|
||||
- **表**: `entries`(主表)+ `secrets`(加密字段子表)+ `entries_history` + `secrets_history` + `audit_log` + `kv_config`,首次连接自动建表(auto-migrate)
|
||||
- **建议库名**:`secrets-mcp`(专用实例,与历史库名区分)。
|
||||
- **连接**:环境变量 **`SECRETS_DATABASE_URL`**(本分支无本地配置文件路径)。
|
||||
- **表**:`entries`(含 `user_id`)、`secrets`、`entries_history`、`secrets_history`、`audit_log`、`users`、`oauth_accounts`,首次连接 **auto-migrate**(`secrets-core` 的 `migrate`)。
|
||||
- **Web 会话**:与上项 **同一数据库 URL**;`secrets-mcp` 启动时对 tower-sessions 的 PostgreSQL 存储 **auto-migrate**(会话表与业务表共存于该实例,无需第二套连接串)。
|
||||
|
||||
### 表结构
|
||||
### 表结构(摘录)
|
||||
|
||||
```sql
|
||||
entries (
|
||||
id UUID PRIMARY KEY DEFAULT uuidv7(), -- PG18 时间有序 UUID
|
||||
namespace VARCHAR(64) NOT NULL, -- 一级隔离: "refining" | "ricnsmart"
|
||||
kind VARCHAR(64) NOT NULL, -- 类型: "server" | "service" | "key"(可扩展)
|
||||
name VARCHAR(256) NOT NULL, -- 人类可读标识
|
||||
tags TEXT[] NOT NULL DEFAULT '{}', -- 灵活标签: ["aliyun","hongkong"]
|
||||
metadata JSONB NOT NULL DEFAULT '{}', -- 明文描述: ip, desc, domains, location...
|
||||
version BIGINT NOT NULL DEFAULT 1, -- 乐观锁版本号,每次写操作自增
|
||||
id UUID PRIMARY KEY DEFAULT uuidv7(),
|
||||
user_id UUID, -- 多租户:NULL=遗留行;非空=归属用户
|
||||
folder VARCHAR(128) NOT NULL DEFAULT '',
|
||||
type VARCHAR(64) NOT NULL DEFAULT '',
|
||||
name VARCHAR(256) NOT NULL,
|
||||
notes TEXT NOT NULL DEFAULT '',
|
||||
tags TEXT[] NOT NULL DEFAULT '{}',
|
||||
metadata JSONB NOT NULL DEFAULT '{}',
|
||||
version BIGINT NOT NULL DEFAULT 1,
|
||||
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
|
||||
updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
|
||||
UNIQUE(namespace, kind, name)
|
||||
updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW()
|
||||
)
|
||||
-- 唯一:UNIQUE(user_id, folder, name) WHERE user_id IS NOT NULL;
|
||||
-- UNIQUE(folder, name) WHERE user_id IS NULL(单租户遗留)
|
||||
```
|
||||
|
||||
```sql
|
||||
secrets (
|
||||
id UUID PRIMARY KEY DEFAULT uuidv7(),
|
||||
entry_id UUID NOT NULL REFERENCES entries(id) ON DELETE CASCADE,
|
||||
field_name VARCHAR(256) NOT NULL, -- 明文字段名: "username", "token", "ssh_key"
|
||||
encrypted BYTEA NOT NULL DEFAULT '\x', -- 仅加密值本身:nonce(12B)||ciphertext+tag
|
||||
user_id UUID,
|
||||
name VARCHAR(256) NOT NULL,
|
||||
type VARCHAR(64) NOT NULL DEFAULT 'text',
|
||||
encrypted BYTEA NOT NULL DEFAULT '\x',
|
||||
version BIGINT NOT NULL DEFAULT 1,
|
||||
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
|
||||
updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
|
||||
UNIQUE(entry_id, field_name)
|
||||
updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW()
|
||||
)
|
||||
-- 唯一:UNIQUE(user_id, name) WHERE user_id IS NOT NULL
|
||||
```
|
||||
|
||||
```sql
|
||||
kv_config (
|
||||
key TEXT PRIMARY KEY, -- 如 'argon2_salt'
|
||||
value BYTEA NOT NULL -- Argon2id salt,首台设备 init 时生成
|
||||
entry_secrets (
|
||||
entry_id UUID NOT NULL REFERENCES entries(id) ON DELETE CASCADE,
|
||||
secret_id UUID NOT NULL REFERENCES secrets(id) ON DELETE CASCADE,
|
||||
sort_order INT NOT NULL DEFAULT 0,
|
||||
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
|
||||
PRIMARY KEY(entry_id, secret_id)
|
||||
)
|
||||
```
|
||||
|
||||
### audit_log 表结构
|
||||
### users / oauth_accounts
|
||||
|
||||
```sql
|
||||
audit_log (
|
||||
id BIGINT GENERATED ALWAYS AS IDENTITY PRIMARY KEY,
|
||||
action VARCHAR(32) NOT NULL, -- 'add' | 'update' | 'delete' | 'rollback'
|
||||
namespace VARCHAR(64) NOT NULL,
|
||||
kind VARCHAR(64) NOT NULL,
|
||||
name VARCHAR(256) NOT NULL,
|
||||
detail JSONB NOT NULL DEFAULT '{}', -- 变更摘要(tags/meta keys/secret keys,不含 value)
|
||||
actor VARCHAR(128) NOT NULL DEFAULT '', -- 操作者($USER 环境变量)
|
||||
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW()
|
||||
users (
|
||||
id UUID PRIMARY KEY DEFAULT uuidv7(),
|
||||
email VARCHAR(256),
|
||||
name VARCHAR(256) NOT NULL DEFAULT '',
|
||||
avatar_url TEXT,
|
||||
key_salt BYTEA, -- PBKDF2 salt(32B),首次设置密码短语时写入
|
||||
key_check BYTEA, -- 派生密钥加密已知常量,用于验证密码短语
|
||||
key_params JSONB, -- 算法参数,如 {"alg":"pbkdf2-sha256","iterations":600000}
|
||||
api_key TEXT UNIQUE, -- MCP Bearer token(当前实现为明文存储)
|
||||
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
|
||||
updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW()
|
||||
)
|
||||
```
|
||||
|
||||
### entries_history 表结构
|
||||
|
||||
```sql
|
||||
entries_history (
|
||||
id BIGINT GENERATED ALWAYS AS IDENTITY PRIMARY KEY,
|
||||
entry_id UUID NOT NULL,
|
||||
namespace VARCHAR(64) NOT NULL,
|
||||
kind VARCHAR(64) NOT NULL,
|
||||
name VARCHAR(256) NOT NULL,
|
||||
version BIGINT NOT NULL, -- 被快照时的版本号
|
||||
action VARCHAR(16) NOT NULL, -- 'add' | 'update' | 'delete' | 'rollback'
|
||||
tags TEXT[] NOT NULL DEFAULT '{}',
|
||||
metadata JSONB NOT NULL DEFAULT '{}',
|
||||
actor VARCHAR(128) NOT NULL DEFAULT '',
|
||||
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW()
|
||||
oauth_accounts (
|
||||
id UUID PRIMARY KEY DEFAULT uuidv7(),
|
||||
user_id UUID NOT NULL REFERENCES users(id) ON DELETE CASCADE,
|
||||
provider VARCHAR(32) NOT NULL,
|
||||
provider_id VARCHAR(256) NOT NULL,
|
||||
email VARCHAR(256),
|
||||
name VARCHAR(256),
|
||||
avatar_url TEXT,
|
||||
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
|
||||
UNIQUE(provider, provider_id)
|
||||
)
|
||||
-- 另有唯一索引 UNIQUE(user_id, provider)(迁移中 idx_oauth_accounts_user_provider):同一用户每种 provider 至多一条关联。
|
||||
```
|
||||
|
||||
### secrets_history 表结构
|
||||
### audit_log / history
|
||||
|
||||
```sql
|
||||
secrets_history (
|
||||
id BIGINT GENERATED ALWAYS AS IDENTITY PRIMARY KEY,
|
||||
entry_id UUID NOT NULL,
|
||||
secret_id UUID NOT NULL, -- 对应 secrets.id
|
||||
entry_version BIGINT NOT NULL, -- 关联 entries_history 的版本号
|
||||
field_name VARCHAR(256) NOT NULL,
|
||||
encrypted BYTEA NOT NULL DEFAULT '\x',
|
||||
action VARCHAR(16) NOT NULL, -- 'add' | 'update' | 'delete' | 'rollback'
|
||||
actor VARCHAR(128) NOT NULL DEFAULT '',
|
||||
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW()
|
||||
)
|
||||
```
|
||||
与迁移脚本一致:`audit_log`、`entries_history`、`secrets_history` 用于审计与时间旅行恢复;字段定义见 `crates/secrets-core/src/db.rs` 内 `migrate` SQL。`audit_log` 含可选 **`user_id`**(多租户下标识操作者;可空以兼容遗留数据)。`audit_log` 中普通业务事件使用 **`folder` / `type` / `name`** 对应 entry 坐标;登录类事件固定使用 **`folder='auth'`**,此时 `type`/`name` 表示认证目标而非 entry 身份。
|
||||
|
||||
### 字段职责划分
|
||||
### MCP 消歧(AI 调用)
|
||||
|
||||
| 字段 | 存什么 | 示例 |
|
||||
|------|--------|------|
|
||||
| `namespace` | 项目/团队隔离 | `refining`, `ricnsmart` |
|
||||
| `kind` | 记录类型 | `server`, `service`, `key` |
|
||||
| `name` | 唯一标识名 | `i-example0abcd1234efgh`, `gitea` |
|
||||
| `tags` | 多维分类标签 | `["aliyun","hongkong","ricn"]` |
|
||||
| `metadata` | 明文非敏感信息 | `{"ip":"192.0.2.1","desc":"Grafana","key_ref":"my-shared-key"}` |
|
||||
| `secrets.field_name` | 加密字段名(明文) | `"username"`, `"token"`, `"ssh_key"` |
|
||||
| `secrets.encrypted` | 仅加密值本身 | AES-256-GCM 密文 |
|
||||
按 `name` 定位条目的工具(`secrets_update` / `secrets_history` / `secrets_rollback` / `secrets_delete` 单条模式):若该用户下仅一条匹配则直接执行;若多条(同 `name`、不同 `folder`)则返回错误并提示补全 `folder`。也可直接传 `id`(UUID)跳过消歧。
|
||||
|
||||
### PEM 共享机制(key_ref)
|
||||
注意:`secrets_get` 只接受 UUID `id`(来自 `secrets_find` 结果),不支持按 `name` 定位。
|
||||
|
||||
同一 PEM 被多台服务器共享时,将 PEM 存为独立的 `kind=key` 记录,服务器通过 `metadata.key_ref` 引用:
|
||||
### 字段职责
|
||||
|
||||
```bash
|
||||
# 1. 存共享 PEM
|
||||
secrets add -n refining --kind key --name my-shared-key \
|
||||
--tag aliyun --tag hongkong \
|
||||
-s content=@./keys/my-shared-key.pem
|
||||
| 字段 | 含义 | 示例 |
|
||||
|------|------|------|
|
||||
| `folder` | 隔离空间(参与唯一键) | `refining` |
|
||||
| `type` | 软分类(不参与唯一键,用户自定义) | `server`, `service`, `account`, `person`, `document` |
|
||||
| `name` | 标识名 | `gitea`, `aliyun` |
|
||||
| `notes` | 非敏感说明 | 自由文本 |
|
||||
| `tags` | 标签 | `["aliyun","prod"]` |
|
||||
| `metadata` | 明文描述 | `ip`、`url`、`subtype` |
|
||||
| `secrets.name` | 密钥名称(调用方提供) | `token`, `ssh_key`, `password` |
|
||||
| `secrets.type` | 密钥类型(调用方提供,默认 `text`) | `text`, `password`, `key` |
|
||||
| `secrets.encrypted` | 密文 | AES-GCM |
|
||||
|
||||
# 2. 服务器通过 metadata.key_ref 引用(inject/run 时自动合并 key 的 secrets)
|
||||
secrets add -n refining --kind server --name i-example0xyz789 \
|
||||
-m ip=192.0.2.1 -m key_ref=my-shared-key \
|
||||
-s username=ecs-user
|
||||
### 共享密钥(N:N 关联)
|
||||
|
||||
# 3. 轮换只需更新 key 记录,所有引用服务器自动生效
|
||||
secrets update -n refining --kind key --name my-shared-key \
|
||||
-s content=@./keys/new-key.pem
|
||||
```
|
||||
|
||||
## 数据库配置
|
||||
|
||||
首次使用需显式配置数据库连接,设置一次后在该设备上持久生效:
|
||||
|
||||
```bash
|
||||
secrets config set-db "postgres://postgres:<password>@<host>:<port>/secrets"
|
||||
secrets config show # 查看当前配置(密码脱敏)
|
||||
secrets config path # 打印配置文件路径
|
||||
```
|
||||
|
||||
`set-db` 会先验证连接可用,成功后才写入配置文件;连接失败时提示 "Database connection failed" 且不修改配置。
|
||||
|
||||
配置文件:`~/.config/secrets/config.toml`,权限 0600。`--db-url` 参数可一次性覆盖。
|
||||
|
||||
## 主密钥与加密
|
||||
|
||||
首次使用(每台设备各执行一次):
|
||||
|
||||
```bash
|
||||
secrets config set-db "postgres://postgres:<password>@<host>:<port>/secrets"
|
||||
secrets init # 提示输入主密码,Argon2id 派生主密钥后存入 OS 钥匙串
|
||||
```
|
||||
|
||||
主密码不存储;salt 存于 `kv_config`,首台设备生成后共享,确保同一主密码在所有设备派生出相同主密钥。
|
||||
|
||||
主密钥存储后端:macOS Keychain、Windows Credential Manager、Linux keyutils(会话级,重启后需再次 `secrets init`)。
|
||||
|
||||
**从旧版(明文 JSONB)升级**:升级后执行 `secrets init` 即可(明文记录需手动重新 add 或通过 update 更新)。
|
||||
|
||||
## CLI 命令
|
||||
|
||||
### AI 使用主路径
|
||||
|
||||
**读取一律用 `search`,写入用 `add` / `update`,避免反复查帮助。**
|
||||
|
||||
输出格式规则:
|
||||
- TTY(终端直接运行)→ 默认 `text`
|
||||
- 非 TTY(管道/重定向/AI 调用)→ 自动 `json-compact`
|
||||
- 显式 `-o json` → 美化 JSON
|
||||
|
||||
---
|
||||
|
||||
### init — 主密钥初始化(每台设备一次)
|
||||
|
||||
```bash
|
||||
# 首次设备:生成 Argon2id salt 并存库,派生主密钥后存 OS 钥匙串
|
||||
secrets init
|
||||
|
||||
# 后续设备:复用已有 salt,派生主密钥后存钥匙串(主密码需与首台相同)
|
||||
secrets init
|
||||
```
|
||||
|
||||
### search — 发现与读取
|
||||
|
||||
```bash
|
||||
# 参数说明(带典型值)
|
||||
# -n / --namespace refining | ricnsmart
|
||||
# --kind server | service
|
||||
# --name gitea | i-example0abcd1234efgh | mqtt
|
||||
# --tag aliyun | hongkong | production
|
||||
# -q / --query mqtt | grafana | gitea (模糊匹配 name/namespace/kind/tags/metadata)
|
||||
# secrets schema search 默认展示 secrets 字段名、类型与长度(无需 master_key)
|
||||
# -f / --field metadata.ip | metadata.url | metadata.default_org
|
||||
# --summary 不带值的 flag,仅返回摘要(name/tags/desc/updated_at)
|
||||
# --limit 20 | 50(默认 50)
|
||||
# --offset 0 | 10 | 20(分页偏移)
|
||||
# --sort name(默认)| updated | created
|
||||
# -o / --output text | json | json-compact
|
||||
|
||||
# 发现概览(起步推荐)
|
||||
secrets search --summary --limit 20
|
||||
secrets search -n refining --summary --limit 20
|
||||
secrets search --sort updated --limit 10 --summary
|
||||
|
||||
# 精确定位单条记录
|
||||
secrets search -n refining --kind service --name gitea
|
||||
secrets search -n refining --kind server --name i-example0abcd1234efgh
|
||||
|
||||
# 精确定位并获取完整内容(secrets 保持加密占位)
|
||||
secrets search -n refining --kind service --name gitea -o json
|
||||
|
||||
# 直接提取 metadata 字段值(最短路径)
|
||||
secrets search -n refining --kind service --name gitea -f metadata.url
|
||||
secrets search -n refining --kind service --name gitea \
|
||||
-f metadata.url -f metadata.default_org
|
||||
|
||||
# 需要 secrets 时,改用 inject / run
|
||||
secrets inject -n refining --kind service --name gitea
|
||||
secrets run -n refining --kind service --name gitea -- printenv
|
||||
|
||||
# 模糊关键词搜索
|
||||
secrets search -q mqtt
|
||||
secrets search -q grafana
|
||||
secrets search -q 192.0.2
|
||||
|
||||
# 按条件过滤
|
||||
secrets search -n refining --kind service
|
||||
secrets search -n ricnsmart --kind server
|
||||
secrets search --tag hongkong
|
||||
secrets search --tag aliyun --summary
|
||||
|
||||
# 分页
|
||||
secrets search -n refining --summary --limit 10 --offset 0
|
||||
secrets search -n refining --summary --limit 10 --offset 10
|
||||
|
||||
# 管道 / AI 调用(非 TTY 自动 json-compact)
|
||||
secrets search -n refining --kind service | jq '.[].name'
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
### add — 新增或全量覆盖(upsert)
|
||||
|
||||
```bash
|
||||
# 参数说明(带典型值)
|
||||
# -n / --namespace refining | ricnsmart
|
||||
# --kind server | service
|
||||
# --name gitea | i-example0abcd1234efgh
|
||||
# --tag aliyun | hongkong(可重复)
|
||||
# -m / --meta ip=10.0.0.1 | desc="ECS" | url=https://... | tls:cert@./cert.pem(可重复)
|
||||
# -s / --secret token=<value> | ssh_key=@./key.pem | password=secret123 | credentials:content@./key.pem(可重复)
|
||||
|
||||
# 添加服务器
|
||||
secrets add -n refining --kind server --name i-example0abcd1234efgh \
|
||||
--tag aliyun --tag shanghai \
|
||||
-m ip=10.0.0.1 -m desc="Aliyun Shanghai ECS" \
|
||||
-s username=root -s ssh_key=@./keys/deploy-key.pem
|
||||
|
||||
# 添加服务凭据
|
||||
secrets add -n refining --kind service --name gitea \
|
||||
--tag gitea \
|
||||
-m url=https://code.example.com -m default_org=refining -m username=voson \
|
||||
-s token=<token> -s runner_token=<runner_token>
|
||||
|
||||
# 从文件读取 token
|
||||
secrets add -n ricnsmart --kind service --name mqtt \
|
||||
-m host=mqtt.example.com -m port=1883 \
|
||||
-s password=@./mqtt_password.txt
|
||||
|
||||
# 多行文件直接写入嵌套 secret 字段
|
||||
secrets add -n refining --kind server --name i-example0abcd1234efgh \
|
||||
-s credentials:content@./keys/deploy-key.pem
|
||||
|
||||
# 使用类型化值(key:=<json>)存储非字符串类型
|
||||
secrets add -n refining --kind service --name prometheus \
|
||||
-m scrape_interval:=15 \
|
||||
-m enabled:=true \
|
||||
-m labels:='["prod","metrics"]' \
|
||||
-s api_key=abc123
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
### update — 增量更新(记录必须已存在)
|
||||
|
||||
只有传入的字段才会变动,其余全部保留。
|
||||
|
||||
```bash
|
||||
# 参数说明(带典型值)
|
||||
# -n / --namespace refining | ricnsmart
|
||||
# --kind server | service
|
||||
# --name gitea | i-example0abcd1234efgh
|
||||
# --add-tag production | backup(不影响已有 tag,可重复)
|
||||
# --remove-tag staging | deprecated(可重复)
|
||||
# -m / --meta ip=10.0.0.1 | desc="新描述" | credentials:username=root(新增或覆盖,可重复)
|
||||
# --remove-meta old_port | legacy_key | credentials:content(删除 metadata 字段,可重复)
|
||||
# -s / --secret token=<new> | ssh_key=@./new.pem | credentials:content@./new.pem(新增或覆盖,可重复)
|
||||
# --remove-secret old_password | deprecated_key | credentials:content(删除 secret 字段,可重复)
|
||||
|
||||
# 更新单个 metadata 字段
|
||||
secrets update -n refining --kind server --name i-example0abcd1234efgh \
|
||||
-m ip=10.0.0.1
|
||||
|
||||
# 轮换 token
|
||||
secrets update -n refining --kind service --name gitea \
|
||||
-s token=<new-token>
|
||||
|
||||
# 新增 tag 并轮换 token
|
||||
secrets update -n refining --kind service --name gitea \
|
||||
--add-tag production \
|
||||
-s token=<new-token>
|
||||
|
||||
# 移除废弃字段
|
||||
secrets update -n refining --kind service --name mqtt \
|
||||
--remove-meta old_port --remove-secret old_password
|
||||
|
||||
# 从文件更新嵌套 secret 字段
|
||||
secrets update -n refining --kind server --name i-example0abcd1234efgh \
|
||||
-s credentials:content@./keys/deploy-key.pem
|
||||
|
||||
# 删除嵌套字段
|
||||
secrets update -n refining --kind server --name i-example0abcd1234efgh \
|
||||
--remove-secret credentials:content
|
||||
|
||||
# 移除 tag
|
||||
secrets update -n refining --kind service --name gitea --remove-tag staging
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
### delete — 删除记录(支持单条精确删除与批量删除)
|
||||
|
||||
删除时会自动将 entry 与所有关联 secret 字段快照到历史表,并写入审计日志,可通过 `rollback` 命令恢复。
|
||||
|
||||
```bash
|
||||
# 参数说明(带典型值)
|
||||
# -n / --namespace refining | ricnsmart(必填)
|
||||
# --kind server | service(指定 --name 时必填;批量时可选)
|
||||
# --name gitea | i-example0abcd1234efgh(精确匹配;省略则批量删除)
|
||||
# --dry-run 预览将删除的记录,不实际写入(仅批量模式有效)
|
||||
# -o / --output text | json | json-compact
|
||||
|
||||
# 精确删除单条记录(--kind 必填)
|
||||
secrets delete -n refining --kind service --name legacy-mqtt
|
||||
secrets delete -n ricnsmart --kind server --name i-old-server-id
|
||||
|
||||
# 预览批量删除(不写入数据库)
|
||||
secrets delete -n refining --dry-run
|
||||
secrets delete -n ricnsmart --kind server --dry-run
|
||||
|
||||
# 批量删除整个 namespace 的所有记录
|
||||
secrets delete -n ricnsmart
|
||||
|
||||
# 批量删除 namespace 下指定 kind 的所有记录
|
||||
secrets delete -n ricnsmart --kind server
|
||||
|
||||
# JSON 输出
|
||||
secrets delete -n refining --kind service -o json
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
### history — 查看变更历史
|
||||
|
||||
```bash
|
||||
# 参数说明
|
||||
# -n / --namespace refining | ricnsmart
|
||||
# --kind server | service
|
||||
# --name 记录名
|
||||
# --limit 返回条数(默认 20)
|
||||
|
||||
# 查看某条记录的历史版本列表
|
||||
secrets history -n refining --kind service --name gitea
|
||||
|
||||
# 查最近 5 条
|
||||
secrets history -n refining --kind service --name gitea --limit 5
|
||||
|
||||
# JSON 输出
|
||||
secrets history -n refining --kind service --name gitea -o json
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
### rollback — 回滚到指定版本
|
||||
|
||||
```bash
|
||||
# 参数说明
|
||||
# -n / --namespace refining | ricnsmart
|
||||
# --kind server | service
|
||||
# --name 记录名
|
||||
# --to-version <N> 目标版本号(省略则恢复最近一次快照)
|
||||
|
||||
# 撤销上次修改(回滚到最近一次快照)
|
||||
secrets rollback -n refining --kind service --name gitea
|
||||
|
||||
# 回滚到版本 3
|
||||
secrets rollback -n refining --kind service --name gitea --to-version 3
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
### inject — 输出临时环境变量
|
||||
|
||||
仅注入 secrets 表中的加密字段(解密后),不含 metadata。敏感值仅打印到 stdout,不持久化、不写入当前 shell。
|
||||
|
||||
```bash
|
||||
# 参数说明
|
||||
# -n / --namespace refining | ricnsmart
|
||||
# --kind server | service
|
||||
# --name 记录名
|
||||
# --tag 按 tag 过滤(可重复)
|
||||
# --prefix 变量名前缀(留空则以记录 name 作前缀)
|
||||
# -o / --output text(默认 KEY=VALUE)| json | json-compact
|
||||
|
||||
# 打印单条记录的 secrets 变量(KEY=VALUE 格式)
|
||||
secrets inject -n refining --kind service --name gitea
|
||||
|
||||
# 自定义前缀
|
||||
secrets inject -n refining --kind service --name gitea --prefix GITEA
|
||||
|
||||
# JSON 格式(适合管道或脚本解析)
|
||||
secrets inject -n refining --kind service --name gitea -o json
|
||||
|
||||
# eval 注入当前 shell(谨慎使用)
|
||||
eval $(secrets inject -n refining --kind service --name gitea)
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
### run — 向子进程注入 secrets 并执行命令
|
||||
|
||||
仅注入 secrets 表中的加密字段(解密后),不含 metadata。secrets 仅作用于子进程环境,不修改当前 shell,进程退出码透传。
|
||||
|
||||
```bash
|
||||
# 参数说明
|
||||
# -n / --namespace refining | ricnsmart
|
||||
# --kind server | service
|
||||
# --name 记录名
|
||||
# --tag 按 tag 过滤(可重复)
|
||||
# --prefix 变量名前缀
|
||||
# -- <command> 执行的命令及参数
|
||||
|
||||
# 向脚本注入单条记录的 secrets
|
||||
secrets run -n refining --kind service --name gitea -- ./deploy.sh
|
||||
|
||||
# 按 tag 批量注入(多条记录合并)
|
||||
secrets run --tag production -- env | grep -i token
|
||||
|
||||
# 验证注入了哪些变量
|
||||
secrets run -n refining --kind service --name gitea -- printenv
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
### upgrade — 自动更新 CLI 二进制
|
||||
|
||||
从 Release 服务器下载最新版本,校验对应 `.sha256` 摘要后替换当前二进制,无需数据库连接或主密钥。
|
||||
|
||||
**配置方式**:`SECRETS_UPGRADE_URL` 必填。优先用**构建时**:`SECRETS_UPGRADE_URL=https://... cargo build`,CI 已自动注入。或**运行时**:写在 `.env` 或 `export` 后执行。
|
||||
|
||||
```bash
|
||||
# 检查是否有新版本(不下载)
|
||||
secrets upgrade --check
|
||||
|
||||
# 下载、校验 SHA-256 并安装最新版本
|
||||
secrets upgrade
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
### export — 批量导出记录
|
||||
|
||||
将匹配的记录(含解密后的明文 secrets)导出到文件或 stdout。支持 JSON、TOML、YAML 三种格式,文件格式由扩展名自动推断。使用 `--no-secrets` 时无需主密钥。
|
||||
|
||||
```bash
|
||||
# 参数说明
|
||||
# -n / --namespace refining | ricnsmart
|
||||
# --kind server | service
|
||||
# --name gitea | i-example0abcd1234efgh
|
||||
# --tag aliyun | production(可重复)
|
||||
# -q / --query 模糊关键词
|
||||
# --file <path> 输出文件路径,格式由扩展名推断(.json / .toml / .yaml / .yml)
|
||||
# --format json | toml | yaml 显式指定格式(输出到 stdout 时必须指定)
|
||||
# --no-secrets 不导出 secrets,无需主密钥
|
||||
|
||||
# 全量导出到 JSON 文件
|
||||
secrets export --file backup.json
|
||||
|
||||
# 按 namespace 导出为 TOML
|
||||
secrets export -n refining --file refining.toml
|
||||
|
||||
# 按 kind 导出为 YAML
|
||||
secrets export -n refining --kind service --file services.yaml
|
||||
|
||||
# 按 tag 过滤导出
|
||||
secrets export --tag production --file prod.json
|
||||
|
||||
# 模糊关键词导出
|
||||
secrets export -q mqtt --file mqtt.json
|
||||
|
||||
# 仅导出 schema(不含 secrets,无需主密钥)
|
||||
secrets export --no-secrets --file schema.json
|
||||
|
||||
# 输出到 stdout(必须指定 --format)
|
||||
secrets export -n refining --format yaml
|
||||
secrets export --format json | jq '.'
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
### import — 批量导入记录
|
||||
|
||||
从导出文件读取记录并写入数据库,自动重新加密 secrets。支持 JSON、TOML、YAML 三种格式,文件格式由扩展名自动推断。
|
||||
|
||||
```bash
|
||||
# 参数说明
|
||||
# <file> 必选,输入文件路径(格式由扩展名推断)
|
||||
# --force 冲突时覆盖已有记录(默认:报错并停止)
|
||||
# --dry-run 预览将执行的操作,不写入数据库
|
||||
# -o / --output text | json | json-compact
|
||||
|
||||
# 导入 JSON 文件(遇到已存在记录报错)
|
||||
secrets import backup.json
|
||||
|
||||
# 导入 TOML 文件,冲突时覆盖
|
||||
secrets import --force refining.toml
|
||||
|
||||
# 导入 YAML 文件,冲突时覆盖
|
||||
secrets import --force services.yaml
|
||||
|
||||
# 预览将执行的操作(不写入)
|
||||
secrets import --dry-run backup.json
|
||||
|
||||
# JSON 格式输出导入摘要
|
||||
secrets import backup.json -o json
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
### config — 配置管理(无需主密钥)
|
||||
|
||||
```bash
|
||||
# 设置数据库连接(每台设备执行一次,之后永久生效;先验证连接可用再写入)
|
||||
secrets config set-db "postgres://postgres:<password>@<host>:<port>/secrets"
|
||||
|
||||
# 查看当前配置(密码脱敏)
|
||||
secrets config show
|
||||
|
||||
# 打印配置文件路径
|
||||
secrets config path
|
||||
# 输出: /Users/<user>/.config/secrets/config.toml
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
### 全局参数
|
||||
|
||||
```bash
|
||||
# debug 日志(位于子命令之前)
|
||||
secrets --verbose search -q mqtt
|
||||
secrets -v add -n refining --kind service --name gitea -m url=xxx -s token=yyy
|
||||
|
||||
# 或通过环境变量精细控制
|
||||
RUST_LOG=secrets=trace secrets search
|
||||
|
||||
# 一次性覆盖数据库连接
|
||||
secrets --db-url "postgres://..." search -n refining
|
||||
```
|
||||
多个 entry 可共享同一 secret 字段,通过 `entry_secrets` 中间表关联。
|
||||
添加条目时通过 `link_secret_names` 参数指定要关联的已有 secret name(按 `(user_id, name)` 精确匹配)。
|
||||
删除 entry 时仅解除关联,secret 本身若仍被引用则保留;不再被任何 entry 引用时自动清理。
|
||||
|
||||
## 代码规范
|
||||
|
||||
- 错误处理:统一使用 `anyhow::Result`,不用 `unwrap()`
|
||||
- 异步:全程 `tokio`,数据库操作 `sqlx` async
|
||||
- SQL:使用 `sqlx::query` / `sqlx::query_as` 绑定参数,禁止字符串拼接(搜索的动态 WHERE 子句除外,需使用参数绑定 `$1/$2`)
|
||||
- 新增 `kind` 类型时:只需在 `add` 调用时传入,无需改代码
|
||||
- 字段命名:CLI 短标志 `-n`=namespace,`-m`=meta,`-s`=secret,`-q`=query,`-v`=verbose,`-f`=field,`-o`=output
|
||||
- 日志:用户可见输出用 `println!`;调试/运维信息用 `tracing::debug!`/`info!`/`warn!`/`error!`
|
||||
- 审计:`add`/`update`/`delete` 成功后调用 `audit::log_tx`,写入 `audit_log` 表;失败只 warn 不中断
|
||||
- 加密:`encrypted` 列存储 AES-256-GCM 密文;`add`/`update`/`search`/`delete` 需主密钥(`secrets init` 后从 OS 钥匙串加载)
|
||||
- 输出:读命令通过 `OutputMode` 支持 text/json/json-compact/env;写命令 `add` 同样支持 `-o json`
|
||||
- 错误:业务层 `anyhow::Result`,避免生产路径 `unwrap()`。
|
||||
- 异步:`tokio` + `sqlx` async。
|
||||
- SQL:`sqlx::query` / `query_as` 参数绑定;动态 WHERE 仍须用占位符绑定。
|
||||
- 日志:运维用 `tracing`;面向用户的 Web 响应走 axum handler。tracing 字段风格:变量名即字段名时用简写(`%var`、`?var`、`var`),否则用显式形式(`field = %expr`)。
|
||||
- 审计:写操作成功后尽量 `audit::log_tx`;失败可 `warn`,不掩盖主错误。
|
||||
- 加密:密钥由用户密码短语通过 **PBKDF2-SHA256(600k 次)** 在客户端派生,服务端只存 `key_salt`/`key_check`/`key_params`,不持有原始密钥。Web 客户端在浏览器本地完成加解密;MCP 客户端通过 `X-Encryption-Key` 请求头传递密钥,服务端临时解密后返回明文。
|
||||
- MCP:tools 参数与 JSON Schema(`schemars`)保持同步,鉴权以请求扩展中的用户上下文为准。
|
||||
|
||||
## 提交前检查(必须全部通过)
|
||||
## 生产 CORS
|
||||
|
||||
每次提交代码前,请在本地依次执行以下检查,**全部通过后再 push**:
|
||||
生产环境 CORS 使用显式请求头白名单(`build_cors_layer`),而非 `allow_headers(Any)`,
|
||||
因为 `tower-http` 禁止 `allow_credentials(true)` 与 `allow_headers(Any)` 同时使用。
|
||||
|
||||
优先使用:
|
||||
**维护约束**:若 MCP 协议或客户端新增自定义请求头,必须同步更新 `production_allowed_headers()`。
|
||||
当前允许的请求头:`Authorization`、`Content-Type`、`X-Encryption-Key`、`mcp-session-id`、`x-mcp-session`。
|
||||
|
||||
## 提交前检查
|
||||
|
||||
```bash
|
||||
./scripts/release-check.sh
|
||||
```
|
||||
|
||||
它等价于先检查版本号 / tag,再执行下面的格式、Lint、测试。
|
||||
|
||||
### 1. 版本号(按需)
|
||||
|
||||
若本次改动需要发版,请先确认 `Cargo.toml` 中的 `version` 已提升,避免 CI 打出的 Tag 与已有版本重复。**升级版本后需同时更新 `Cargo.lock`**(运行 `cargo build` 即可自动同步),否则 CI 中 `cargo clippy --locked` 会因 lock 与 manifest 不一致而失败。可通过 git tag 判断:
|
||||
或手动:
|
||||
|
||||
```bash
|
||||
# 查看当前 Cargo.toml 版本
|
||||
grep '^version' Cargo.toml
|
||||
|
||||
# 查看是否已存在该版本对应的 tag(CI 使用格式 secrets-<version>)
|
||||
git tag -l 'secrets-*'
|
||||
cargo fmt -- --check
|
||||
cargo clippy --locked -- -D warnings
|
||||
cargo test --locked
|
||||
```
|
||||
|
||||
若当前版本已被 tag(例如已有 `secrets-0.3.0` 且 `Cargo.toml` 仍为 `0.3.0`),则应在 `Cargo.toml` 中 bump 版本号,再执行 `cargo build` 同步 `Cargo.lock`,最后一并提交,以便 CI 自动打新 Tag 并发布 Release。
|
||||
|
||||
### 2. 格式、Lint、测试
|
||||
发版前确认未重复 tag:
|
||||
|
||||
```bash
|
||||
cargo fmt -- --check # 格式检查(不通过则运行 cargo fmt 修复)
|
||||
cargo clippy -- -D warnings # Lint 检查(消除所有 warning)
|
||||
cargo test # 单元/集成测试
|
||||
```
|
||||
|
||||
或一次性执行:
|
||||
|
||||
```bash
|
||||
cargo fmt -- --check && cargo clippy -- -D warnings && cargo test
|
||||
grep '^version' crates/secrets-mcp/Cargo.toml
|
||||
jj tag list
|
||||
```
|
||||
|
||||
## CI/CD
|
||||
|
||||
- Gitea Actions(runners: debian / darwin-arm64 / windows)
|
||||
- 触发:`src/**`、`Cargo.toml`、`Cargo.lock` 变更推送到 main
|
||||
- 构建目标:`x86_64-unknown-linux-musl`、`aarch64-apple-darwin`、`x86_64-apple-darwin`(由 ARM mac runner 交叉编译)、`x86_64-pc-windows-msvc`
|
||||
- 新版本自动打 Tag(格式 `secrets-<version>`)并上传二进制与对应 `.sha256` 摘要到 Gitea Release
|
||||
- Release 仅在 Linux/macOS/Windows 构建全部成功后才会从 draft 发布
|
||||
- 通知:飞书 Webhook(`vars.WEBHOOK_URL`)
|
||||
- 所需 secrets/vars:`RELEASE_TOKEN`(Release 上传,Gitea PAT)、`vars.WEBHOOK_URL`(通知,可选)
|
||||
- **注意**:Gitea Actions 的 Secret/Variable 创建时,`data`/`value` 字段需传入**原始值**,不要使用 base64 编码
|
||||
- **触发**:任意分支 `push`,且路径含 `crates/**`、`deploy/**`、根目录 `Cargo.toml`、`Cargo.lock`、`.gitea/workflows/**`(见 `.gitea/workflows/secrets.yml`)。
|
||||
- **版本与 tag**:从 `crates/secrets-mcp/Cargo.toml` 读版本;构建成功后打 `secrets-mcp-<version>`:若远端已存在同名 tag,CI 会先删后于**当前提交**重建并推送(覆盖式发版)。
|
||||
- **质量与构建**:`fmt` / `clippy --locked` / `test --locked` → `x86_64-unknown-linux-musl` 发布构建 `secrets-mcp`。
|
||||
- **Release(可选)**:`secrets.RELEASE_TOKEN`(Gitea PAT)用于通过 API **创建或更新**该 tag 的 Release(非 draft)、上传 `tar.gz` + `.sha256`;未配置则跳过 API Release,仅 tag + 构建。
|
||||
- **部署(可选)**:仅 `main`、`feat/mcp`、`mcp` 分支在构建成功时跑 `deploy-mcp`;需 `vars.DEPLOY_HOST`、`vars.DEPLOY_USER`、`secrets.DEPLOY_SSH_KEY`。勿把 OAuth/DB 等写进 workflow,用 `deploy/.env.example` 在目标机配置。
|
||||
- **Secrets 写法**:Actions **secrets 须为原始值**(PEM、PAT 明文),**勿** base64;否则 SSH/Release 会失败。**勿**在 CI 中保存 `GOOGLE_CLIENT_SECRET`、DB 密码。
|
||||
- **通知**:`vars.WEBHOOK_URL`(可选,飞书)。
|
||||
|
||||
## 环境变量
|
||||
## 环境变量(secrets-mcp)
|
||||
|
||||
| 变量 | 说明 |
|
||||
|------|------|
|
||||
| `RUST_LOG` | 日志级别,如 `secrets=debug`、`secrets=trace`(默认 warn) |
|
||||
| `USER` | 审计日志 actor 字段来源,Shell 自动设置,通常无需手动配置 |
|
||||
| `SECRETS_UPGRADE_URL` | upgrade 的 Release API 地址。构建时(cargo build)或运行时(.env/export) |
|
||||
| `SECRETS_DATABASE_URL` | **必填**。PostgreSQL URL。 |
|
||||
| `SECRETS_DATABASE_SSL_MODE` | 可选但强烈建议生产必填。推荐 `verify-full`(至少 `verify-ca`)。 |
|
||||
| `SECRETS_DATABASE_SSL_ROOT_CERT` | 可选。私有 CA 或自签链路时指定 CA 根证书路径。 |
|
||||
| `SECRETS_DATABASE_POOL_SIZE` | 可选。连接池最大连接数,默认 `10`。 |
|
||||
| `SECRETS_DATABASE_ACQUIRE_TIMEOUT` | 可选。获取连接超时秒数,默认 `5`。 |
|
||||
| `SECRETS_ENV` | 可选。设为 `prod` / `production` 时会拒绝弱 PostgreSQL TLS 模式。 |
|
||||
| `BASE_URL` | 对外基址;OAuth 回调 `${BASE_URL}/auth/google/callback`。 |
|
||||
| `SECRETS_MCP_BIND` | 监听地址,默认 `127.0.0.1:9315`(容器/远程直接暴露时需改为 `0.0.0.0:9315`)。 |
|
||||
| `GOOGLE_CLIENT_ID` / `GOOGLE_CLIENT_SECRET` | 可选;仅运行时配置。 |
|
||||
| `RUST_LOG` | 如 `secrets_mcp=debug`。 |
|
||||
| `RATE_LIMIT_GLOBAL_PER_SECOND` | 可选。全局限流速率,默认 `100` req/s。 |
|
||||
| `RATE_LIMIT_GLOBAL_BURST` | 可选。全局限流突发量,默认 `200`。 |
|
||||
| `RATE_LIMIT_IP_PER_SECOND` | 可选。单 IP 限流速率,默认 `20` req/s。 |
|
||||
| `RATE_LIMIT_IP_BURST` | 可选。单 IP 限流突发量,默认 `40`。 |
|
||||
| `TRUST_PROXY` | 可选。设为 `1`/`true`/`yes` 时从 `X-Forwarded-For` / `X-Real-IP` 提取客户端 IP。 |
|
||||
|
||||
数据库连接通过 `secrets config set-db` 持久化到 `~/.config/secrets/config.toml`,不支持环境变量。
|
||||
> `SERVER_MASTER_KEY` 已不再需要。新架构下密钥由用户密码短语在客户端派生,服务端不持有。
|
||||
|
||||
55
CONTRIBUTING.md
Normal file
55
CONTRIBUTING.md
Normal file
@@ -0,0 +1,55 @@
|
||||
# Contributing
|
||||
|
||||
## 版本控制
|
||||
|
||||
本仓库使用 **[Jujutsu (jj)](https://jj-vcs.dev/)**。请勿使用 `git` 命令。
|
||||
|
||||
```bash
|
||||
jj log # 查看历史
|
||||
jj status # 查看状态
|
||||
jj new # 创建新变更
|
||||
jj commit # 提交
|
||||
jj rebase # 变基
|
||||
jj squash # 合并提交
|
||||
jj git push # 推送到远端
|
||||
```
|
||||
|
||||
详见 [AGENTS.md](AGENTS.md) 的「版本控制」章节。
|
||||
|
||||
## 本地开发
|
||||
|
||||
```bash
|
||||
# 复制环境变量
|
||||
cp deploy/.env.example .env
|
||||
|
||||
# 填写数据库连接等配置后
|
||||
cargo build
|
||||
cargo test --locked
|
||||
```
|
||||
|
||||
## 提交前检查
|
||||
|
||||
每次提交前必须通过:
|
||||
|
||||
```bash
|
||||
cargo fmt -- --check
|
||||
cargo clippy --locked -- -D warnings
|
||||
cargo test --locked
|
||||
```
|
||||
|
||||
或使用脚本:
|
||||
|
||||
```bash
|
||||
./scripts/release-check.sh
|
||||
```
|
||||
|
||||
## 发版规则
|
||||
|
||||
涉及 `crates/**`、根目录 `Cargo.toml`/`Cargo.lock`、`secrets-mcp` 行为变更的提交,默认需要发版。
|
||||
|
||||
1. 检查 `crates/secrets-mcp/Cargo.toml` 的 `version`
|
||||
2. 运行 `jj tag list` 确认对应 tag 是否已存在
|
||||
3. 若 tag 已存在且有代码变更,**必须 bump 版本**并 `cargo build` 同步 `Cargo.lock`
|
||||
4. 通过 release-check 后再提交
|
||||
|
||||
详见 [AGENTS.md](AGENTS.md) 的「提交 / 推送硬规则」章节。
|
||||
1128
Cargo.lock
generated
1128
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
56
Cargo.toml
56
Cargo.toml
@@ -1,33 +1,39 @@
|
||||
[package]
|
||||
name = "secrets"
|
||||
version = "0.9.5"
|
||||
[workspace]
|
||||
members = [
|
||||
"crates/secrets-core",
|
||||
"crates/secrets-mcp",
|
||||
]
|
||||
resolver = "2"
|
||||
|
||||
[workspace.package]
|
||||
edition = "2024"
|
||||
|
||||
[dependencies]
|
||||
aes-gcm = "^0.10.3"
|
||||
anyhow = "^1.0.102"
|
||||
argon2 = { version = "^0.5.3", features = ["std"] }
|
||||
chrono = { version = "^0.4.44", features = ["serde"] }
|
||||
clap = { version = "^4.6.0", features = ["derive"] }
|
||||
dirs = "^6.0.0"
|
||||
dotenvy = "^0.15"
|
||||
flate2 = "^1.1.9"
|
||||
keyring = { version = "^3.6.3", features = ["apple-native", "windows-native", "linux-native"] }
|
||||
rand = "^0.10.0"
|
||||
reqwest = { version = "^0.12", default-features = false, features = ["rustls-tls", "json"] }
|
||||
rpassword = "^7.4.0"
|
||||
self-replace = "^1.5.0"
|
||||
semver = "^1.0.27"
|
||||
[workspace.dependencies]
|
||||
# Async runtime
|
||||
tokio = { version = "^1.50.0", features = ["rt-multi-thread", "macros", "fs", "io-util", "process", "signal"] }
|
||||
|
||||
# Database
|
||||
sqlx = { version = "^0.8.6", features = ["runtime-tokio", "tls-rustls", "postgres", "uuid", "json", "chrono"] }
|
||||
|
||||
# Serialization
|
||||
serde = { version = "^1.0.228", features = ["derive"] }
|
||||
serde_json = "^1.0.149"
|
||||
serde_yaml = "^0.9"
|
||||
sha2 = "^0.10.9"
|
||||
sqlx = { version = "^0.8.6", features = ["runtime-tokio", "tls-rustls", "postgres", "uuid", "json", "chrono"] }
|
||||
tar = "^0.4.44"
|
||||
tempfile = "^3.19"
|
||||
tokio = { version = "^1.50.0", features = ["rt-multi-thread", "macros", "fs", "io-util", "process", "signal"] }
|
||||
toml = "^1.0.7"
|
||||
|
||||
# Crypto
|
||||
aes-gcm = "^0.10.3"
|
||||
sha2 = "^0.10.9"
|
||||
rand = "^0.10.0"
|
||||
|
||||
# Utils
|
||||
anyhow = "^1.0.102"
|
||||
thiserror = "^2"
|
||||
chrono = { version = "^0.4.44", features = ["serde"] }
|
||||
uuid = { version = "^1.22.0", features = ["serde"] }
|
||||
tracing = "^0.1"
|
||||
tracing-subscriber = { version = "^0.3", features = ["env-filter"] }
|
||||
uuid = { version = "^1.22.0", features = ["serde"] }
|
||||
zip = { version = "^8.2.0", default-features = false, features = ["deflate"] }
|
||||
dotenvy = "^0.15"
|
||||
|
||||
# HTTP
|
||||
reqwest = { version = "^0.12", default-features = false, features = ["rustls-tls", "json"] }
|
||||
|
||||
468
README.md
468
README.md
@@ -1,295 +1,218 @@
|
||||
# secrets
|
||||
# secrets-mcp
|
||||
|
||||
跨设备密钥与配置管理 CLI,基于 Rust + PostgreSQL 18。
|
||||
|
||||
将服务器信息、服务凭据统一存入数据库,供本地工具和 AI 读取上下文。每个敏感字段单独行存储(`secrets` 子表),字段名、类型、长度以明文保存便于 AI 理解,仅值本身使用 AES-256-GCM 加密;主密钥由 Argon2id 从主密码派生并存入系统钥匙串。
|
||||
Workspace:**`secrets-core`** + **`secrets-mcp`**(HTTP Streamable MCP + Web)。多租户密钥与元数据存 PostgreSQL;用户通过 **Google OAuth** 登录,**API Key** 鉴权 MCP 请求;秘密数据用**用户密码短语派生的密钥**在客户端加密,服务端不持有原始密钥。
|
||||
|
||||
## 安装
|
||||
|
||||
```bash
|
||||
cargo build --release
|
||||
# 或从 Release 页面下载预编译二进制
|
||||
cargo build --release -p secrets-mcp
|
||||
# 产物: target/release/secrets-mcp
|
||||
```
|
||||
|
||||
已有旧版本时,可执行 `secrets upgrade` 自动下载最新版并替换。该命令会校验 Release 附带的 `.sha256` 摘要后再安装。
|
||||
发版产物见 Gitea Release(tag:`secrets-mcp-<version>`,Linux musl 预编译);其它平台本地 `cargo build`。
|
||||
|
||||
## 首次使用(每台设备各执行一次)
|
||||
## 环境变量与本地运行
|
||||
|
||||
复制 `deploy/.env.example` 为项目根目录 `.env`(已在 `.gitignore`),或导出同名变量:
|
||||
|
||||
| 变量 | 说明 |
|
||||
|------|------|
|
||||
| `SECRETS_DATABASE_URL` | **必填**。PostgreSQL 连接串(推荐使用域名,例如 `db.refining.ltd`,避免直连 IP)。 |
|
||||
| `SECRETS_DATABASE_SSL_MODE` | 可选但强烈建议生产必填。推荐 `verify-full`(至少 `verify-ca`),避免回退到弱 TLS 模式。 |
|
||||
| `SECRETS_DATABASE_SSL_ROOT_CERT` | 可选。私有 CA 或自签链路时指定 CA 根证书路径(如 `/etc/secrets/pg-ca.crt`)。 |
|
||||
| `SECRETS_ENV` | 可选。设为 `prod` / `production` 时会拒绝弱 PostgreSQL TLS 模式(`prefer`、`disable`、`allow`、`require`)。 |
|
||||
| `BASE_URL` | 对外访问基址;OAuth 回调为 `{BASE_URL}/auth/google/callback`。默认 `http://localhost:9315`。 |
|
||||
| `SECRETS_MCP_BIND` | 监听地址,默认 `127.0.0.1:9315`。容器内或直接对外暴露端口时请改为 `0.0.0.0:9315`;反代时常为 `127.0.0.1:9315`。 |
|
||||
| `GOOGLE_CLIENT_ID` / `GOOGLE_CLIENT_SECRET` | 可选;不配置则无 Google 登录入口。运行时从环境读取,勿写入 CI、勿打入二进制。 |
|
||||
| `RUST_LOG` | 可选;日志级别,如 `secrets_mcp=debug`。 |
|
||||
| `SECRETS_DATABASE_POOL_SIZE` | 可选。连接池最大连接数,默认 `10`。 |
|
||||
| `SECRETS_DATABASE_ACQUIRE_TIMEOUT` | 可选。获取连接超时秒数,默认 `5`。 |
|
||||
| `RATE_LIMIT_GLOBAL_PER_SECOND` | 可选。全局限流速率,默认 `100` req/s。 |
|
||||
| `RATE_LIMIT_GLOBAL_BURST` | 可选。全局限流突发量,默认 `200`。 |
|
||||
| `RATE_LIMIT_IP_PER_SECOND` | 可选。单 IP 限流速率,默认 `20` req/s。 |
|
||||
| `RATE_LIMIT_IP_BURST` | 可选。单 IP 限流突发量,默认 `40`。 |
|
||||
| `TRUST_PROXY` | 可选。设为 `1`/`true`/`yes` 时从 `X-Forwarded-For` / `X-Real-IP` 提取客户端 IP;仅在反代环境下启用。 |
|
||||
|
||||
```bash
|
||||
# 1. 配置数据库连接(会先验证连接可用再写入)
|
||||
secrets config set-db "postgres://postgres:<password>@<host>:<port>/secrets"
|
||||
|
||||
# 2. 初始化主密钥(提示输入至少 8 位的主密码,派生后存入 OS 钥匙串)
|
||||
secrets init
|
||||
cargo run -p secrets-mcp
|
||||
```
|
||||
|
||||
主密码不会存储,仅用于派生主密钥,且至少需 8 位。同一主密码在所有设备上会得到相同主密钥(salt 存于数据库,首台设备生成后共享)。
|
||||
|
||||
**主密钥存储**:macOS → Keychain;Windows → Credential Manager;Linux → keyutils(会话级,重启后需再次 `secrets init`)。
|
||||
|
||||
**从旧版(明文存储)升级**:升级后首次运行需执行 `secrets init` 即可(明文记录需手动重新 add 或通过 update 更新)。
|
||||
|
||||
## AI Agent 快速指南
|
||||
|
||||
这个 CLI 以 AI 使用优先设计。核心路径只有一条:**读取用 `search`,写入用 `add` / `update`**。
|
||||
|
||||
### 第一步:发现有哪些数据
|
||||
生产推荐示例(PostgreSQL TLS):
|
||||
|
||||
```bash
|
||||
# 列出所有记录摘要(默认最多 50 条,安全起步)
|
||||
secrets search --summary --limit 20
|
||||
|
||||
# 按 namespace 过滤
|
||||
secrets search -n refining --summary --limit 20
|
||||
|
||||
# 按最近更新排序
|
||||
secrets search --sort updated --limit 10 --summary
|
||||
SECRETS_DATABASE_URL=postgres://postgres:***@db.refining.ltd:5432/secrets-mcp
|
||||
SECRETS_DATABASE_SSL_MODE=verify-full
|
||||
SECRETS_DATABASE_SSL_ROOT_CERT=/etc/secrets/pg-ca.crt
|
||||
SECRETS_ENV=production
|
||||
```
|
||||
|
||||
`--summary` 只返回轻量字段(namespace、kind、name、tags、desc、updated_at),不含完整 metadata 和 secrets。
|
||||
- **Web**:`BASE_URL`(登录、Dashboard、设置密码短语、创建 API Key)。
|
||||
- **MCP**:Streamable HTTP 基址 `{BASE_URL}/mcp`,需 `Authorization: Bearer <api_key>` + `X-Encryption-Key: <hex>` 请求头(读密文工具须带密钥)。
|
||||
|
||||
### 第二步:精确读取单条记录
|
||||
## PostgreSQL TLS 加固
|
||||
|
||||
```bash
|
||||
# 精确定位(namespace + kind + name 三元组)
|
||||
secrets search -n refining --kind service --name gitea
|
||||
- 推荐将数据库域名单独设置为 `db.refining.ltd`,服务域名保持 `secrets.refining.app`。
|
||||
- 数据库证书建议使用可校验链路(如 Let's Encrypt 或私有 CA),并保证证书 `SAN` 包含 `db.refining.ltd`。
|
||||
- PostgreSQL 侧建议使用 `hostssl` 规则限制应用来源(如 `47.238.146.244/32`),逐步移除公网明文 `host` 访问。
|
||||
- 应用端推荐 `SECRETS_DATABASE_SSL_MODE=verify-full`;仅在过渡阶段可临时用 `verify-ca`。
|
||||
- 可执行运维步骤见 [`deploy/postgres-tls-hardening.md`](deploy/postgres-tls-hardening.md)。
|
||||
|
||||
# 获取完整记录(含 secrets 字段名,无需 master_key)
|
||||
secrets search -n refining --kind service --name gitea -o json
|
||||
## MCP 与 AI 工作流(v0.3+)
|
||||
|
||||
# 直接提取单个 metadata 字段值(最短路径)
|
||||
secrets search -n refining --kind service --name gitea -f metadata.url
|
||||
条目在逻辑上以 **`(folder, name)`** 在用户内唯一(数据库唯一索引:`user_id + folder + name`)。同名可在不同 folder 下各存一条(例如 `refining/aliyun` 与 `ricnsmart/aliyun`)。
|
||||
|
||||
# 同时提取多个 metadata 字段
|
||||
secrets search -n refining --kind service --name gitea \
|
||||
-f metadata.url -f metadata.default_org
|
||||
### 工具列表
|
||||
|
||||
# 需要 secrets 时,改用 inject / run
|
||||
secrets inject -n refining --kind service --name gitea
|
||||
secrets run -n refining --kind service --name gitea -- printenv
|
||||
| 工具 | 需要加密密钥 | 说明 |
|
||||
|------|-------------|------|
|
||||
| `secrets_find` | 否 | 发现条目(返回含 secret_fields schema),支持 `name_query` 模糊匹配 |
|
||||
| `secrets_search` | 否 | 搜索条目,支持 `query`/`folder`/`type`/`name` 过滤、`sort`/`offset` 分页、`summary` 摘要模式 |
|
||||
| `secrets_get` | 是 | 按 UUID `id` 获取单条条目及解密后的 secrets |
|
||||
| `secrets_add` | 是 | 添加新条目,支持 `meta_obj`/`secrets_obj` JSON 对象参数、`secret_types` 指定密钥类型、`link_secret_names` 关联已有 secret |
|
||||
| `secrets_update` | 是 | 更新条目,支持 `id` 或 `name`+`folder` 定位 |
|
||||
| `secrets_delete` | 否 | 删除条目,支持 `id` 或 `name`+`folder` 定位;`dry_run=true` 预览删除 |
|
||||
| `secrets_history` | 否 | 查看条目历史,支持 `id` 或 `name`+`folder` 定位 |
|
||||
| `secrets_rollback` | 是 | 回滚条目到指定历史版本,支持 `id` 或 `name`+`folder` 定位 |
|
||||
| `secrets_export` | 是 | 导出条目(含解密明文),支持 JSON/TOML/YAML 格式 |
|
||||
| `secrets_env_map` | 是 | 将 secrets 转换为环境变量映射(`UPPER(entry)_UPPER(field)` 格式),支持 `prefix` |
|
||||
| `secrets_overview` | 否 | 返回各 folder 和 type 的 entry 计数概览 |
|
||||
|
||||
### 消歧规则
|
||||
|
||||
- **按 `name` 定位的工具**(`secrets_update` / `secrets_delete` / `secrets_history` / `secrets_rollback`):若该用户下仅一条匹配则直接执行;若多条(同 `name`、不同 `folder`)则返回错误并提示补全 `folder`。也可直接传 `id`(UUID)跳过消歧。
|
||||
- **`secrets_get`** 仅支持通过 `id`(UUID)获取。
|
||||
- **`secrets_delete`** 的 `dry_run=true` 与真实删除使用相同消歧规则——唯一则预览一条,多条则报错并要求 `folder`。
|
||||
|
||||
### 共享密钥
|
||||
|
||||
N:N 关联下,删除 entry 仅解除关联,被共享的 secret 若仍被其他 entry 引用则保留;无引用时自动清理。
|
||||
|
||||
## 加密架构(混合 E2EE)
|
||||
|
||||
### 密钥派生
|
||||
|
||||
用户在 Web Dashboard 设置**密码短语**,浏览器使用 **Web Crypto API(PBKDF2-SHA256,600k 次迭代)**在本地派生 256-bit AES 密钥。
|
||||
|
||||
- **Salt(32B)**:首次设置时在浏览器生成,存入服务端 `users.key_salt`
|
||||
- **key_check**:派生密钥加密已知常量 `"secrets-mcp-key-check"`,存入 `users.key_check`,用于登录时验证密码短语
|
||||
- **服务端不存储原始密钥**,只存 salt + key_check
|
||||
|
||||
跨设备同步:新设备登录 → 输入相同密码短语 → 从服务端取 salt → 同样的 PBKDF2 → 得到相同密钥。
|
||||
|
||||
### 写入与读取流程
|
||||
|
||||
```mermaid
|
||||
flowchart LR
|
||||
subgraph Web["Web 浏览器(E2E)"]
|
||||
P["密码短语"] --> K["PBKDF2 → 256-bit key"]
|
||||
K --> Enc["AES-256-GCM 加密"]
|
||||
K --> Dec["AES-256-GCM 解密"]
|
||||
end
|
||||
|
||||
subgraph AI["AI 客户端(MCP)"]
|
||||
HdrKey["X-Encryption-Key: hex"]
|
||||
end
|
||||
|
||||
subgraph Server["secrets-mcp 服务端"]
|
||||
Middleware["请求中临时持有 key\n请求结束即丢弃"]
|
||||
DB[(PostgreSQL\nsecrets.encrypted = 密文\nentries.metadata = 明文)]
|
||||
end
|
||||
|
||||
Enc -->|密文| Server
|
||||
HdrKey -->|key + 请求| Middleware
|
||||
Middleware <-->|加解密| DB
|
||||
DB -->|密文| Dec
|
||||
```
|
||||
|
||||
`search` 展示 metadata 与 secrets 的字段名,不展示 secret 值本身;需要 secret 值时用 `inject` / `run`(仅注入加密字段,不含 metadata)。
|
||||
### 两种客户端对比
|
||||
|
||||
### 输出格式
|
||||
| | Web 浏览器 | AI 客户端(MCP) |
|
||||
|---|---|---|
|
||||
| 密钥位置 | 仅在浏览器内存 / sessionStorage | MCP 配置 headers 中 |
|
||||
| 加解密位置 | 客户端(真正 E2E) | 服务端临时(请求级生命周期) |
|
||||
| 安全边界 | 服务端零知识 | 依赖 TLS + 服务端内存隔离 |
|
||||
|
||||
| 场景 | 推荐命令 |
|
||||
|------|----------|
|
||||
| AI 解析 / 管道处理 | `-o json` 或 `-o json-compact` |
|
||||
| 注入 secrets 到环境变量 | `inject` / `run` |
|
||||
| 人类查看 | 默认 `text`(TTY 下自动启用) |
|
||||
| 非 TTY(管道/重定向) | 自动 `json-compact` |
|
||||
### 敏感数据传输
|
||||
|
||||
说明:`text` 输出中的时间会按当前机器本地时区显示;`json/json-compact` 继续使用 UTC(RFC3339 风格)以便脚本和 AI 稳定解析。
|
||||
- **OAuth `client_secret`** 只存服务端环境变量,不发给浏览器
|
||||
- **API Key** 当前存放在 `users.api_key`,Dashboard 会明文展示并可重置
|
||||
- **X-Encryption-Key** 随 MCP 请求经 TLS 传输,服务端仅在请求处理期间持有(不持久化)
|
||||
- **生产环境必须走 HTTPS/TLS**
|
||||
|
||||
```bash
|
||||
# 管道直接 jq 解析(非 TTY 自动 json-compact)
|
||||
secrets search -n refining --kind service | jq '.[].name'
|
||||
## AI 客户端配置
|
||||
|
||||
# 需要 secrets 时,使用 inject / run
|
||||
secrets inject -n refining --kind service --name gitea > ~/.config/gitea/secrets.env
|
||||
secrets run -n refining --kind service --name gitea -- ./deploy.sh
|
||||
在 Web Dashboard 设置密码短语后,解锁页面会按客户端格式生成配置。常见客户端示例如下:
|
||||
|
||||
`Cursor / Claude Desktop` 风格:
|
||||
|
||||
```json
|
||||
{
|
||||
"mcpServers": {
|
||||
"secrets": {
|
||||
"url": "https://secrets.example.com/mcp",
|
||||
"headers": {
|
||||
"Authorization": "Bearer sk_abc123...",
|
||||
"X-Encryption-Key": "a1b2c3...(64位hex)"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## 完整命令参考
|
||||
`OpenCode` 风格:
|
||||
|
||||
```bash
|
||||
# 查看帮助(包含各子命令 EXAMPLES)
|
||||
secrets --help
|
||||
secrets init --help # 主密钥初始化
|
||||
secrets search --help
|
||||
secrets add --help
|
||||
secrets update --help
|
||||
secrets delete --help
|
||||
secrets config --help
|
||||
secrets upgrade --help # 检查并更新 CLI 版本
|
||||
secrets export --help # 批量导出(JSON/TOML/YAML)
|
||||
secrets import --help # 批量导入(JSON/TOML/YAML)
|
||||
|
||||
# ── search ──────────────────────────────────────────────────────────────────
|
||||
secrets search --summary --limit 20 # 发现概览
|
||||
secrets search -n refining --kind service # 按 namespace + kind
|
||||
secrets search -n refining --kind service --name gitea # 精确查找
|
||||
secrets search -q mqtt # 关键词模糊搜索
|
||||
secrets search --tag hongkong # 按 tag 过滤
|
||||
secrets search -n refining --kind service --name gitea -f metadata.url # 提取 metadata 字段
|
||||
secrets search -n refining --kind service --name gitea -o json # 完整记录(含 secrets schema)
|
||||
secrets search --sort updated --limit 10 --summary # 最近改动
|
||||
secrets search -n refining --summary --limit 10 --offset 10 # 翻页
|
||||
|
||||
# ── add ──────────────────────────────────────────────────────────────────────
|
||||
secrets add -n refining --kind server --name my-server \
|
||||
--tag aliyun --tag shanghai \
|
||||
-m ip=10.0.0.1 -m desc="Example ECS" \
|
||||
-s username=root -s ssh_key=@./keys/server.pem
|
||||
|
||||
# 多行文件直接写入嵌套 secret 字段
|
||||
secrets add -n refining --kind server --name my-server \
|
||||
-s credentials:content@./keys/server.pem
|
||||
|
||||
# 使用 typed JSON 写入 secret(布尔、数字、数组、对象)
|
||||
secrets add -n refining --kind service --name deploy-bot \
|
||||
-s enabled:=true \
|
||||
-s retry_count:=3 \
|
||||
-s scopes:='["repo","workflow"]' \
|
||||
-s extra:='{"region":"ap-east-1","verify_tls":true}'
|
||||
|
||||
secrets add -n refining --kind service --name gitea \
|
||||
--tag gitea \
|
||||
-m url=https://code.example.com -m default_org=myorg \
|
||||
-s token=<token>
|
||||
|
||||
# ── update ───────────────────────────────────────────────────────────────────
|
||||
secrets update -n refining --kind server --name my-server -m ip=10.0.0.1
|
||||
secrets update -n refining --kind service --name gitea --add-tag production -s token=<new>
|
||||
secrets update -n refining --kind service --name mqtt --remove-meta old_port --remove-secret old_key
|
||||
secrets update -n refining --kind server --name my-server --remove-secret credentials:content
|
||||
|
||||
# ── delete ───────────────────────────────────────────────────────────────────
|
||||
secrets delete -n refining --kind service --name legacy-mqtt # 精确删除单条(--kind 必填)
|
||||
secrets delete -n refining --dry-run # 预览批量删除(不写入)
|
||||
secrets delete -n ricnsmart # 批量删除整个 namespace
|
||||
secrets delete -n ricnsmart --kind server # 批量删除指定 kind
|
||||
|
||||
# ── init ─────────────────────────────────────────────────────────────────────
|
||||
secrets init # 主密钥初始化(每台设备一次,主密码至少 8 位,派生后存钥匙串)
|
||||
|
||||
# ── config ───────────────────────────────────────────────────────────────────
|
||||
secrets config set-db "postgres://postgres:<password>@<host>:<port>/secrets" # 先验证再写入
|
||||
secrets config show # 密码脱敏展示
|
||||
secrets config path # 打印配置文件路径
|
||||
|
||||
# ── upgrade ──────────────────────────────────────────────────────────────────
|
||||
secrets upgrade --check # 仅检查是否有新版本
|
||||
secrets upgrade # 下载、校验 SHA-256 并安装最新版(可通过 SECRETS_UPGRADE_URL 自托管)
|
||||
|
||||
# ── export ────────────────────────────────────────────────────────────────────
|
||||
secrets export --file backup.json # 全量导出到 JSON
|
||||
secrets export -n refining --file refining.toml # 按 namespace 导出为 TOML
|
||||
secrets export -n refining --kind service --file svc.yaml # 按 kind 导出为 YAML
|
||||
secrets export --tag production --file prod.json # 按 tag 过滤
|
||||
secrets export -q mqtt --file mqtt.json # 模糊搜索导出
|
||||
secrets export --no-secrets --file schema.json # 仅导出 schema(无需主密钥)
|
||||
secrets export -n refining --format yaml # 输出到 stdout,指定格式
|
||||
|
||||
# ── import ────────────────────────────────────────────────────────────────────
|
||||
secrets import backup.json # 导入(冲突时报错)
|
||||
secrets import --force refining.toml # 冲突时覆盖已有记录
|
||||
secrets import --dry-run backup.yaml # 预览将要执行的操作(不写入)
|
||||
|
||||
# ── 调试 ──────────────────────────────────────────────────────────────────────
|
||||
secrets --verbose search -q mqtt
|
||||
RUST_LOG=secrets=trace secrets search
|
||||
```json
|
||||
{
|
||||
"mcp": {
|
||||
"secrets": {
|
||||
"type": "remote",
|
||||
"enabled": true,
|
||||
"url": "https://secrets.example.com/mcp",
|
||||
"headers": {
|
||||
"Authorization": "Bearer sk_abc123...",
|
||||
"X-Encryption-Key": "a1b2c3...(64位hex)"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## 数据模型
|
||||
|
||||
主表 `entries`(namespace、kind、name、tags、metadata)+ 子表 `secrets`(每个加密字段一行,含 field_name、encrypted)。首次连接自动建表;同时创建 `audit_log`、`entries_history`、`secrets_history` 等表。
|
||||
主表 **`entries`**(`folder`、`type`、`name`、`notes`、`tags`、`metadata`,多租户时带 `user_id`)+ 子表 **`secrets`**(每行一个加密字段:`name`、`type`、`encrypted`,通过 `entry_secrets` 中间表与 entry 建立 N:N 关联)。**唯一性**:`UNIQUE(user_id, folder, name)`(`user_id` 为空时为遗留行唯一 `(folder, name)`)。另有 `entries_history`、`secrets_history`、`audit_log`,以及 **`users`**(含 `key_salt`、`key_check`、`key_params`、`api_key`)、**`oauth_accounts`**。首次连库自动迁移建表(`secrets-core` 的 `migrate`);已有库在进程启动时亦由同一 `migrate()` 增量补齐表、索引与 N:N 结构。若需从更早版本对照一次性 SQL,可在 git 历史中检索已移除的 `scripts/migrate-v0.3.0.sql`。**Web 登录会话**(tower-sessions)使用同一 `SECRETS_DATABASE_URL`,进程启动时对会话存储执行迁移(见 `secrets-mcp` 中 `PostgresStore::migrate`),无需额外环境变量。
|
||||
|
||||
| 位置 | 字段 | 说明 |
|
||||
|------|------|------|
|
||||
| entries | namespace | 一级隔离,如 `refining`、`ricnsmart` |
|
||||
| entries | kind | 记录类型,如 `server`、`service`、`key`(可自由扩展) |
|
||||
| entries | name | 人类可读唯一标识 |
|
||||
| entries | tags | 多维标签,如 `["aliyun","hongkong"]` |
|
||||
| entries | metadata | 明文描述(ip、desc、domains、key_ref 等) |
|
||||
| secrets | field_name | 明文,search 可见,AI 可推断 inject 会生成什么变量 |
|
||||
| secrets | encrypted | 仅加密值本身,AES-256-GCM |
|
||||
| entries | folder | 组织/隔离空间,如 `refining`、`ricnsmart`;参与唯一键 |
|
||||
| entries | type | 软分类,用户自定义,如 `server`、`service`、`account`、`person`、`document`(不参与唯一键) |
|
||||
| entries | name | 人类可读标识;与 `folder` 一起在用户内唯一 |
|
||||
| entries | notes | 非敏感说明文本 |
|
||||
| entries | metadata | 明文 JSON(ip、url、subtype 等) |
|
||||
| secrets | name | 密钥名称(调用方提供) |
|
||||
| secrets | type | 密钥类型(调用方提供,默认 `text`) |
|
||||
| secrets | encrypted | AES-GCM 密文(含 nonce) |
|
||||
| users | key_salt | PBKDF2 salt(32B),首次设置密码短语时写入 |
|
||||
| users | key_check | 派生密钥加密已知常量,用于验证密码短语 |
|
||||
| users | key_params | 派生算法参数,如 `{"alg":"pbkdf2-sha256","iterations":600000}` |
|
||||
|
||||
`-m` / `--meta` 写入 `metadata`,`-s` / `--secret` 写入 `secrets` 表的独立行。支持 `key=value`、`key=@file`、`key:=<json>`,也支持 `credentials:content@./key.pem` 这类嵌套字段文件写入;删除时支持 `--remove-secret credentials:content`。加解密使用主密钥(由 `secrets init` 设置)。
|
||||
### 共享密钥(N:N 关联)
|
||||
|
||||
**PEM 共享**:同一 PEM 被多台服务器共享时,可存为 `kind=key` 记录,服务器通过 `metadata.key_ref` 引用;轮换只需 update 一条 key 记录,所有引用自动生效。详见 [AGENTS.md](AGENTS.md)。
|
||||
多个条目可共享同一密文字段,通过 `entry_secrets` 中间表实现 N:N 关联:
|
||||
- 添加条目时可通过 `link_secret_names` 参数关联已有的 secret(按 `(user_id, name)` 精确匹配查找)
|
||||
- 同一 secret 可被多个 entry 引用,删除某 entry 不会级联删除被共享的 secret
|
||||
- 当 secret 不再被任何 entry 引用时,自动清理(`NOT EXISTS` 子查询)
|
||||
|
||||
### `-m` / `--meta` JSON 语法速查
|
||||
### 类型(Type)
|
||||
|
||||
`-m` 和 `-s` 走的是同一套解析规则,只是写入位置不同:`-m` 写到明文 `metadata`,适合端口、开关、标签、描述性配置等非敏感信息。
|
||||
|
||||
| 目标值 | 写法示例 | 实际存入 |
|
||||
|------|------|------|
|
||||
| 普通字符串 | `-m url=https://code.example.com` | `"https://code.example.com"` |
|
||||
| 文件内容字符串 | `-m notes=@./service-notes.txt` | `"..."` |
|
||||
| 布尔值 | `-m enabled:=true` | `true` |
|
||||
| 数字 | `-m port:=3000` | `3000` |
|
||||
| `null` | `-m deprecated_at:=null` | `null` |
|
||||
| 数组 | `-m domains:='["code.example.com","git.example.com"]'` | `["code.example.com","git.example.com"]` |
|
||||
| 对象 | `-m tls:='{"enabled":true,"redirect_http":true}'` | `{"enabled":true,"redirect_http":true}` |
|
||||
| 嵌套路径 + JSON | `-m deploy:strategy:='{"type":"rolling","batch":2}'` | `{"deploy":{"strategy":{"type":"rolling","batch":2}}}` |
|
||||
|
||||
常见规则:
|
||||
|
||||
- `=` 表示按字符串存储。
|
||||
- `:=` 表示按 JSON 解析。
|
||||
- shell 中数组和对象建议整体用单引号包住。
|
||||
- 嵌套字段继续用冒号分隔:`-m runtime:max_open_conns:=20`。
|
||||
|
||||
示例:新增一条带 typed metadata 的记录
|
||||
|
||||
```bash
|
||||
secrets add -n refining --kind service --name gitea \
|
||||
-m url=https://code.example.com \
|
||||
-m port:=3000 \
|
||||
-m enabled:=true \
|
||||
-m domains:='["code.example.com","git.example.com"]' \
|
||||
-m tls:='{"enabled":true,"redirect_http":true}'
|
||||
```
|
||||
|
||||
示例:更新已有记录中的嵌套 metadata
|
||||
|
||||
```bash
|
||||
secrets update -n refining --kind service --name gitea \
|
||||
-m deploy:strategy:='{"type":"rolling","batch":2}' \
|
||||
-m runtime:max_open_conns:=20
|
||||
```
|
||||
|
||||
### `-s` / `--secret` JSON 语法速查
|
||||
|
||||
当你希望写入的不是普通字符串,而是 `true`、`123`、`null`、数组或对象时,用 `:=`,右侧按 JSON 解析。
|
||||
|
||||
| 目标值 | 写法示例 | 实际存入 |
|
||||
|------|------|------|
|
||||
| 普通字符串 | `-s token=abc123` | `"abc123"` |
|
||||
| 文件内容字符串 | `-s ssh_key=@./id_ed25519` | `"-----BEGIN ..."` |
|
||||
| 布尔值 | `-s enabled:=true` | `true` |
|
||||
| 数字 | `-s retry_count:=3` | `3` |
|
||||
| `null` | `-s deprecated_at:=null` | `null` |
|
||||
| 数组 | `-s scopes:='["repo","workflow"]'` | `["repo","workflow"]` |
|
||||
| 对象 | `-s extra:='{"region":"ap-east-1","verify_tls":true}'` | `{"region":"ap-east-1","verify_tls":true}` |
|
||||
| 嵌套路径 + JSON | `-s auth:policy:='{"mfa":true,"ttl":3600}'` | `{"auth":{"policy":{"mfa":true,"ttl":3600}}}` |
|
||||
|
||||
常见规则:
|
||||
|
||||
- `=` 表示按字符串存储,不做 JSON 解析。
|
||||
- `:=` 表示按 JSON 解析,适合布尔、数字、数组、对象、`null`。
|
||||
- shell 里对象和数组通常要整体加引号,推荐单引号:`-s flags:='["a","b"]'`。
|
||||
- 嵌套字段继续用冒号分隔:`-s credentials:enabled:=true`。
|
||||
- 如果你就是想存一个“JSON 字符串字面量”,可以写成 `-s note:='"hello"'`,但大多数字符串场景直接用 `=` 更直观。
|
||||
|
||||
示例:新增一条同时包含字符串、文件、布尔、数组、对象的记录
|
||||
|
||||
```bash
|
||||
secrets add -n refining --kind service --name deploy-bot \
|
||||
-s token=abc123 \
|
||||
-s ssh_key=@./keys/deploy-bot.pem \
|
||||
-s enabled:=true \
|
||||
-s scopes:='["repo","workflow"]' \
|
||||
-s policy:='{"ttl":3600,"mfa":true}'
|
||||
```
|
||||
|
||||
示例:更新已有记录中的嵌套 JSON 字段
|
||||
|
||||
```bash
|
||||
secrets update -n refining --kind service --name deploy-bot \
|
||||
-s auth:config:='{"issuer":"gitea","rotate":true}' \
|
||||
-s auth:retry:=5
|
||||
```
|
||||
`type` 字段用于软分类,由用户自由填写,不做任何自动转换或归一化。常见示例:`server`、`service`、`account`、`person`、`document`,但任何值均可接受。
|
||||
|
||||
## 审计日志
|
||||
|
||||
`add`、`update`、`delete` 操作成功后自动向 `audit_log` 表写入一条记录,包含操作类型、操作对象和变更摘要(不含 secret 值)。操作者取自 `$USER` 环境变量。
|
||||
`add`、`update`、`delete` 等写操作写入 **`audit_log`**(操作类型、对象、摘要,不含 secret 明文)。多租户场景下可写 **`user_id`**(可空,兼容遗留行)。
|
||||
业务条目事件使用 **`folder` / `type` / `name`**;登录类事件使用 **`folder='auth'`**,此时 `type`/`name` 表示认证目标(例如 `oauth` / `google`),不表示某条 secrets entry。
|
||||
|
||||
```sql
|
||||
-- 查看最近 20 条审计记录
|
||||
SELECT action, namespace, kind, name, actor, detail, created_at
|
||||
SELECT action, folder, type, name, detail, user_id, created_at
|
||||
FROM audit_log
|
||||
ORDER BY created_at DESC
|
||||
LIMIT 20;
|
||||
@@ -298,49 +221,34 @@ LIMIT 20;
|
||||
## 项目结构
|
||||
|
||||
```
|
||||
src/
|
||||
main.rs # CLI 入口(clap),含各子命令 after_help 示例
|
||||
output.rs # OutputMode 枚举 + TTY 检测
|
||||
config.rs # 配置读写(~/.config/secrets/config.toml)
|
||||
db.rs # 连接池 + auto-migrate(entries + secrets + entries_history + secrets_history + audit_log + kv_config)
|
||||
crypto.rs # AES-256-GCM 加解密、Argon2id 派生、OS 钥匙串
|
||||
models.rs # Entry + SecretField 结构体
|
||||
audit.rs # 审计日志写入(audit_log 表)
|
||||
commands/
|
||||
init.rs # 主密钥初始化(首次/新设备)
|
||||
add.rs # upsert entries + secrets 行,支持 -o json
|
||||
config.rs # config set-db/show/path
|
||||
search.rs # 多条件查询,展示 secrets schema(-f/-o/--summary/--limit/--offset/--sort)
|
||||
delete.rs # 删除(CASCADE 删除 secrets)
|
||||
update.rs # 增量更新(tags/metadata + secrets 行级 UPSERT/DELETE)
|
||||
rollback.rs # rollback / history:按 entry_version 恢复
|
||||
run.rs # inject / run,仅 secrets 逐字段解密 + key_ref 引用解析(不含 metadata)
|
||||
upgrade.rs # 从 Gitea Release 自更新
|
||||
export_cmd.rs # export:批量导出,支持 JSON/TOML/YAML,含解密明文
|
||||
import_cmd.rs # import:批量导入,冲突检测,dry-run,重新加密写入
|
||||
Cargo.toml
|
||||
crates/secrets-core/ # db / crypto / models / audit / service
|
||||
src/
|
||||
taxonomy.rs # SECRET_TYPE_OPTIONS(secret 字段类型下拉选项)
|
||||
service/ # 业务逻辑(add, search, update, delete, export, env_map 等)
|
||||
crates/secrets-mcp/ # MCP HTTP、Web、OAuth、API Key
|
||||
scripts/
|
||||
setup-gitea-actions.sh # 配置 Gitea Actions 变量与 Secrets
|
||||
release-check.sh # 发版前 fmt / clippy / test
|
||||
setup-gitea-actions.sh
|
||||
sync-test-to-prod.sh # 测试库同步到生产(按需)
|
||||
deploy/
|
||||
.env.example # 环境变量模板
|
||||
secrets-mcp.service # systemd 服务文件(生产部署用)
|
||||
postgres-tls-hardening.md # PostgreSQL TLS 加固运维手册
|
||||
```
|
||||
|
||||
## CI/CD(Gitea Actions)
|
||||
|
||||
推送 `main` 分支时自动:fmt/clippy/test 检查 → Linux/macOS/Windows 构建 → 上传二进制与 `.sha256` 摘要 → 所有平台成功后发布 Release。
|
||||
见 [`.gitea/workflows/secrets.yml`](.gitea/workflows/secrets.yml)。
|
||||
|
||||
**首次使用需配置 Actions 变量和 Secrets:**
|
||||
- **触发**:任意分支 `push`,且变更路径包含 `crates/**`、`deploy/**`、根目录 `Cargo.toml` / `Cargo.lock`、`.gitea/workflows/**`。
|
||||
- **流水线**:解析 `crates/secrets-mcp/Cargo.toml` 版本 → `cargo fmt` / `clippy --locked` / `test --locked` → 交叉编译 `x86_64-unknown-linux-musl` 的 `secrets-mcp` → 构建成功后打 tag `secrets-mcp-<version>`(若远端已存在同名 tag,会先删除再于**当前提交**重建并推送,覆盖式发版)。
|
||||
- **Release(可选)**:配置仓库 Secret `RELEASE_TOKEN`(Gitea PAT,明文勿 base64)时,会通过 API **创建或更新**已指向该 tag 的 Release(非 draft)、上传 `tar.gz` 与 `.sha256`;未配置则跳过 API Release,仅 tag + 构建结果。
|
||||
- **部署(可选)**:仅在 `main`、`feat/mcp` 或 `mcp` 分支且构建成功时,若已配置 `vars.DEPLOY_HOST`、`vars.DEPLOY_USER` 与 `secrets.DEPLOY_SSH_KEY`,则 `deploy-mcp` 通过 SCP/SSH 更新目标机二进制并 `systemctl restart secrets-mcp`。
|
||||
- **通知(可选)**:`vars.WEBHOOK_URL` 为飞书 Webhook 时,构建/部署/发布节点会推送简要状态。
|
||||
|
||||
```bash
|
||||
# 需有 ~/.config/gitea/config.env(GITEA_URL、GITEA_TOKEN、GITEA_WEBHOOK_URL)
|
||||
./scripts/setup-gitea-actions.sh
|
||||
./scripts/setup-gitea-actions.sh # 通过 Gitea API 写入 RELEASE_TOKEN、WEBHOOK_URL、部署相关变量等
|
||||
```
|
||||
|
||||
- `RELEASE_TOKEN`(Secret):Gitea PAT,用于创建 Release 上传二进制
|
||||
- `WEBHOOK_URL`(Variable):飞书通知,可选
|
||||
- **注意**:Secret/Variable 的 `data`/`value` 字段需传入原始值,不要 base64 编码
|
||||
|
||||
当前 Release 预编译产物覆盖:
|
||||
- Linux `x86_64-unknown-linux-musl`
|
||||
- macOS Apple Silicon `aarch64-apple-darwin`
|
||||
- macOS Intel `x86_64-apple-darwin`(由 ARM mac runner 交叉编译)
|
||||
- Windows `x86_64-pc-windows-msvc`
|
||||
|
||||
详见 [AGENTS.md](AGENTS.md)。
|
||||
详见 [AGENTS.md](AGENTS.md)(发版规则、代码规范)。
|
||||
|
||||
28
crates/secrets-core/Cargo.toml
Normal file
28
crates/secrets-core/Cargo.toml
Normal file
@@ -0,0 +1,28 @@
|
||||
[package]
|
||||
name = "secrets-core"
|
||||
version = "0.1.0"
|
||||
edition.workspace = true
|
||||
|
||||
[lib]
|
||||
name = "secrets_core"
|
||||
path = "src/lib.rs"
|
||||
|
||||
[dependencies]
|
||||
aes-gcm.workspace = true
|
||||
anyhow.workspace = true
|
||||
thiserror.workspace = true
|
||||
chrono.workspace = true
|
||||
hex = "0.4"
|
||||
rand.workspace = true
|
||||
serde.workspace = true
|
||||
serde_json.workspace = true
|
||||
serde_yaml.workspace = true
|
||||
sha2.workspace = true
|
||||
sqlx.workspace = true
|
||||
toml.workspace = true
|
||||
tokio.workspace = true
|
||||
tracing.workspace = true
|
||||
uuid.workspace = true
|
||||
|
||||
[dev-dependencies]
|
||||
tempfile = "3"
|
||||
88
crates/secrets-core/src/audit.rs
Normal file
88
crates/secrets-core/src/audit.rs
Normal file
@@ -0,0 +1,88 @@
|
||||
use serde_json::{Value, json};
|
||||
use sqlx::{PgPool, Postgres, Transaction};
|
||||
use uuid::Uuid;
|
||||
|
||||
pub const ACTION_LOGIN: &str = "login";
|
||||
pub const FOLDER_AUTH: &str = "auth";
|
||||
|
||||
fn login_detail(provider: &str, client_ip: Option<&str>, user_agent: Option<&str>) -> Value {
|
||||
json!({
|
||||
"provider": provider,
|
||||
"client_ip": client_ip,
|
||||
"user_agent": user_agent,
|
||||
})
|
||||
}
|
||||
|
||||
/// Write a login audit entry without requiring an explicit transaction.
|
||||
pub async fn log_login(
|
||||
pool: &PgPool,
|
||||
entry_type: &str,
|
||||
provider: &str,
|
||||
user_id: Uuid,
|
||||
client_ip: Option<&str>,
|
||||
user_agent: Option<&str>,
|
||||
) {
|
||||
let detail = login_detail(provider, client_ip, user_agent);
|
||||
let result: Result<_, sqlx::Error> = sqlx::query(
|
||||
"INSERT INTO audit_log (user_id, action, folder, type, name, detail) \
|
||||
VALUES ($1, $2, $3, $4, $5, $6)",
|
||||
)
|
||||
.bind(user_id)
|
||||
.bind(ACTION_LOGIN)
|
||||
.bind(FOLDER_AUTH)
|
||||
.bind(entry_type)
|
||||
.bind(provider)
|
||||
.bind(&detail)
|
||||
.execute(pool)
|
||||
.await;
|
||||
|
||||
if let Err(e) = result {
|
||||
tracing::warn!(error = %e, entry_type, provider, "failed to write login audit log");
|
||||
} else {
|
||||
tracing::debug!(entry_type, provider, ?user_id, "login audit logged");
|
||||
}
|
||||
}
|
||||
|
||||
/// Write an audit entry within an existing transaction.
|
||||
pub async fn log_tx(
|
||||
tx: &mut Transaction<'_, Postgres>,
|
||||
user_id: Option<Uuid>,
|
||||
action: &str,
|
||||
folder: &str,
|
||||
entry_type: &str,
|
||||
name: &str,
|
||||
detail: Value,
|
||||
) {
|
||||
let result: Result<_, sqlx::Error> = sqlx::query(
|
||||
"INSERT INTO audit_log (user_id, action, folder, type, name, detail) \
|
||||
VALUES ($1, $2, $3, $4, $5, $6)",
|
||||
)
|
||||
.bind(user_id)
|
||||
.bind(action)
|
||||
.bind(folder)
|
||||
.bind(entry_type)
|
||||
.bind(name)
|
||||
.bind(&detail)
|
||||
.execute(&mut **tx)
|
||||
.await;
|
||||
|
||||
if let Err(e) = result {
|
||||
tracing::warn!(error = %e, "failed to write audit log");
|
||||
} else {
|
||||
tracing::debug!(action, folder, entry_type, name, "audit logged");
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn login_detail_includes_expected_fields() {
|
||||
let detail = login_detail("google", Some("127.0.0.1"), Some("Mozilla/5.0"));
|
||||
|
||||
assert_eq!(detail["provider"], "google");
|
||||
assert_eq!(detail["client_ip"], "127.0.0.1");
|
||||
assert_eq!(detail["user_agent"], "Mozilla/5.0");
|
||||
}
|
||||
}
|
||||
82
crates/secrets-core/src/config.rs
Normal file
82
crates/secrets-core/src/config.rs
Normal file
@@ -0,0 +1,82 @@
|
||||
use std::path::PathBuf;
|
||||
|
||||
use anyhow::{Context, Result};
|
||||
use sqlx::postgres::PgSslMode;
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct DatabaseConfig {
|
||||
pub url: String,
|
||||
pub ssl_mode: Option<PgSslMode>,
|
||||
pub ssl_root_cert: Option<PathBuf>,
|
||||
pub enforce_strict_tls: bool,
|
||||
}
|
||||
|
||||
/// Resolve database URL from environment.
|
||||
/// Priority: `SECRETS_DATABASE_URL` env var → error.
|
||||
pub fn resolve_db_url(override_url: &str) -> Result<String> {
|
||||
if !override_url.is_empty() {
|
||||
return Ok(override_url.to_string());
|
||||
}
|
||||
|
||||
if let Ok(url) = std::env::var("SECRETS_DATABASE_URL")
|
||||
&& !url.is_empty()
|
||||
{
|
||||
return Ok(url);
|
||||
}
|
||||
|
||||
anyhow::bail!(
|
||||
"Database not configured. Set the SECRETS_DATABASE_URL environment variable.\n\
|
||||
Example: SECRETS_DATABASE_URL=postgres://user:pass@host:port/dbname"
|
||||
)
|
||||
}
|
||||
|
||||
fn env_var_non_empty(name: &str) -> Option<String> {
|
||||
std::env::var(name)
|
||||
.ok()
|
||||
.filter(|value| !value.trim().is_empty())
|
||||
}
|
||||
|
||||
fn parse_ssl_mode_from_env() -> Result<Option<PgSslMode>> {
|
||||
let Some(mode) = env_var_non_empty("SECRETS_DATABASE_SSL_MODE") else {
|
||||
return Ok(None);
|
||||
};
|
||||
|
||||
let parsed = mode.parse::<PgSslMode>().with_context(|| {
|
||||
format!(
|
||||
"Invalid SECRETS_DATABASE_SSL_MODE='{mode}'. Use one of: disable, allow, prefer, require, verify-ca, verify-full."
|
||||
)
|
||||
})?;
|
||||
Ok(Some(parsed))
|
||||
}
|
||||
|
||||
fn resolve_ssl_root_cert_from_env() -> Result<Option<PathBuf>> {
|
||||
let Some(path) = env_var_non_empty("SECRETS_DATABASE_SSL_ROOT_CERT") else {
|
||||
return Ok(None);
|
||||
};
|
||||
let path = PathBuf::from(path);
|
||||
if !path.exists() {
|
||||
anyhow::bail!(
|
||||
"SECRETS_DATABASE_SSL_ROOT_CERT points to a missing file: {}",
|
||||
path.display()
|
||||
);
|
||||
}
|
||||
Ok(Some(path))
|
||||
}
|
||||
|
||||
fn is_production_env() -> bool {
|
||||
matches!(
|
||||
env_var_non_empty("SECRETS_ENV")
|
||||
.as_deref()
|
||||
.map(|value| value.to_ascii_lowercase()),
|
||||
Some(value) if value == "prod" || value == "production"
|
||||
)
|
||||
}
|
||||
|
||||
pub fn resolve_db_config(override_url: &str) -> Result<DatabaseConfig> {
|
||||
Ok(DatabaseConfig {
|
||||
url: resolve_db_url(override_url)?,
|
||||
ssl_mode: parse_ssl_mode_from_env()?,
|
||||
ssl_root_cert: resolve_ssl_root_cert_from_env()?,
|
||||
enforce_strict_tls: is_production_env(),
|
||||
})
|
||||
}
|
||||
@@ -3,39 +3,12 @@ use aes_gcm::{
|
||||
aead::{Aead, AeadCore, KeyInit, OsRng},
|
||||
};
|
||||
use anyhow::{Context, Result, bail};
|
||||
use argon2::{Argon2, Params, Version};
|
||||
use serde_json::Value;
|
||||
|
||||
const KEYRING_SERVICE: &str = "secrets-cli";
|
||||
const KEYRING_USER: &str = "master-key";
|
||||
use crate::error::AppError;
|
||||
|
||||
const NONCE_LEN: usize = 12;
|
||||
|
||||
// Argon2id parameters — OWASP recommended (m=64 MiB, t=3 iterations, p=4 threads, key=32 B)
|
||||
const ARGON2_M_COST: u32 = 65_536;
|
||||
const ARGON2_T_COST: u32 = 3;
|
||||
const ARGON2_P_COST: u32 = 4;
|
||||
const ARGON2_KEY_LEN: usize = 32;
|
||||
|
||||
// ─── Argon2id key derivation ─────────────────────────────────────────────────
|
||||
|
||||
/// Derive a 32-byte Master Key from a password and salt using Argon2id.
|
||||
/// Parameters: m=65536 KiB (64 MB), t=3, p=4 — OWASP recommended.
|
||||
pub fn derive_master_key(password: &str, salt: &[u8]) -> Result<[u8; 32]> {
|
||||
let params = Params::new(
|
||||
ARGON2_M_COST,
|
||||
ARGON2_T_COST,
|
||||
ARGON2_P_COST,
|
||||
Some(ARGON2_KEY_LEN),
|
||||
)
|
||||
.context("invalid Argon2id params")?;
|
||||
let argon2 = Argon2::new(argon2::Algorithm::Argon2id, Version::V0x13, params);
|
||||
let mut key = [0u8; 32];
|
||||
argon2
|
||||
.hash_password_into(password.as_bytes(), salt, &mut key)
|
||||
.map_err(|e| anyhow::anyhow!("Argon2id derivation failed: {}", e))?;
|
||||
Ok(key)
|
||||
}
|
||||
|
||||
// ─── AES-256-GCM encrypt / decrypt ───────────────────────────────────────────
|
||||
|
||||
/// Encrypt plaintext bytes with AES-256-GCM.
|
||||
@@ -67,7 +40,7 @@ pub fn decrypt(master_key: &[u8; 32], data: &[u8]) -> Result<Vec<u8>> {
|
||||
let nonce = Nonce::from_slice(nonce_bytes);
|
||||
cipher
|
||||
.decrypt(nonce, ciphertext)
|
||||
.map_err(|_| anyhow::anyhow!("decryption failed — wrong master key or corrupted data"))
|
||||
.map_err(|_| AppError::DecryptionFailed.into())
|
||||
}
|
||||
|
||||
// ─── JSON helpers ─────────────────────────────────────────────────────────────
|
||||
@@ -84,20 +57,14 @@ pub fn decrypt_json(master_key: &[u8; 32], data: &[u8]) -> Result<Value> {
|
||||
serde_json::from_slice(&bytes).context("deserialize decrypted JSON")
|
||||
}
|
||||
|
||||
// ─── OS Keychain ──────────────────────────────────────────────────────────────
|
||||
// ─── Client-supplied key extraction ──────────────────────────────────────────
|
||||
|
||||
/// Load the Master Key from the OS Keychain.
|
||||
/// Returns an error with a helpful message if it hasn't been initialized.
|
||||
pub fn load_master_key() -> Result<[u8; 32]> {
|
||||
let entry =
|
||||
keyring::Entry::new(KEYRING_SERVICE, KEYRING_USER).context("create keychain entry")?;
|
||||
let hex = entry.get_password().map_err(|_| {
|
||||
anyhow::anyhow!("Master key not found in keychain. Run `secrets init` first.")
|
||||
})?;
|
||||
let bytes = hex::decode_hex(&hex)?;
|
||||
/// Parse a 64-char hex string (from X-Encryption-Key header) into a 32-byte key.
|
||||
pub fn extract_key_from_hex(hex_str: &str) -> Result<[u8; 32]> {
|
||||
let bytes = ::hex::decode(hex_str.trim())?;
|
||||
if bytes.len() != 32 {
|
||||
bail!(
|
||||
"stored master key has unexpected length {}; re-run `secrets init`",
|
||||
"X-Encryption-Key must be 64 hex chars (32 bytes), got {} bytes",
|
||||
bytes.len()
|
||||
);
|
||||
}
|
||||
@@ -106,34 +73,17 @@ pub fn load_master_key() -> Result<[u8; 32]> {
|
||||
Ok(key)
|
||||
}
|
||||
|
||||
/// Store the Master Key in the OS Keychain (overwrites any existing value).
|
||||
pub fn store_master_key(key: &[u8; 32]) -> Result<()> {
|
||||
let entry =
|
||||
keyring::Entry::new(KEYRING_SERVICE, KEYRING_USER).context("create keychain entry")?;
|
||||
let hex = hex::encode_hex(key);
|
||||
entry
|
||||
.set_password(&hex)
|
||||
.map_err(|e| anyhow::anyhow!("keychain write failed: {}", e))?;
|
||||
Ok(())
|
||||
}
|
||||
// ─── Public hex helpers ───────────────────────────────────────────────────────
|
||||
|
||||
// ─── Minimal hex helpers (avoid extra dep) ────────────────────────────────────
|
||||
|
||||
mod hex {
|
||||
use anyhow::{Result, bail};
|
||||
pub mod hex {
|
||||
use anyhow::Result;
|
||||
|
||||
pub fn encode_hex(bytes: &[u8]) -> String {
|
||||
bytes.iter().map(|b| format!("{:02x}", b)).collect()
|
||||
}
|
||||
|
||||
pub fn decode_hex(s: &str) -> Result<Vec<u8>> {
|
||||
if !s.len().is_multiple_of(2) {
|
||||
bail!("hex string has odd length");
|
||||
}
|
||||
(0..s.len())
|
||||
.step_by(2)
|
||||
.map(|i| u8::from_str_radix(&s[i..i + 2], 16).map_err(|e| anyhow::anyhow!("{}", e)))
|
||||
.collect()
|
||||
Ok(::hex::decode(s.trim())?)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -156,7 +106,6 @@ mod tests {
|
||||
let plaintext = b"hello world";
|
||||
let enc1 = encrypt(&key, plaintext).unwrap();
|
||||
let enc2 = encrypt(&key, plaintext).unwrap();
|
||||
// Different nonces → different ciphertexts
|
||||
assert_ne!(enc1, enc2);
|
||||
}
|
||||
|
||||
@@ -176,20 +125,4 @@ mod tests {
|
||||
let dec = decrypt_json(&key, &enc).unwrap();
|
||||
assert_eq!(dec, value);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn derive_master_key_deterministic() {
|
||||
let salt = b"fixed_test_salt_";
|
||||
let k1 = derive_master_key("password", salt).unwrap();
|
||||
let k2 = derive_master_key("password", salt).unwrap();
|
||||
assert_eq!(k1, k2);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn derive_master_key_different_passwords() {
|
||||
let salt = b"fixed_test_salt_";
|
||||
let k1 = derive_master_key("password1", salt).unwrap();
|
||||
let k2 = derive_master_key("password2", salt).unwrap();
|
||||
assert_ne!(k1, k2);
|
||||
}
|
||||
}
|
||||
636
crates/secrets-core/src/db.rs
Normal file
636
crates/secrets-core/src/db.rs
Normal file
@@ -0,0 +1,636 @@
|
||||
use std::str::FromStr;
|
||||
|
||||
use anyhow::{Context, Result};
|
||||
use serde_json::{Map, Value};
|
||||
use sqlx::PgPool;
|
||||
use sqlx::postgres::{PgConnectOptions, PgPoolOptions, PgSslMode};
|
||||
|
||||
use crate::config::DatabaseConfig;
|
||||
|
||||
fn build_connect_options(config: &DatabaseConfig) -> Result<PgConnectOptions> {
|
||||
let mut options = PgConnectOptions::from_str(&config.url)
|
||||
.with_context(|| "failed to parse SECRETS_DATABASE_URL".to_string())?;
|
||||
|
||||
if let Some(mode) = config.ssl_mode {
|
||||
options = options.ssl_mode(mode);
|
||||
}
|
||||
if let Some(path) = &config.ssl_root_cert {
|
||||
options = options.ssl_root_cert(path);
|
||||
}
|
||||
|
||||
if config.enforce_strict_tls
|
||||
&& !matches!(
|
||||
options.get_ssl_mode(),
|
||||
PgSslMode::VerifyCa | PgSslMode::VerifyFull
|
||||
)
|
||||
{
|
||||
anyhow::bail!(
|
||||
"Refusing to start in production with weak PostgreSQL TLS mode. \
|
||||
Set SECRETS_DATABASE_SSL_MODE=verify-ca or verify-full."
|
||||
);
|
||||
}
|
||||
|
||||
Ok(options)
|
||||
}
|
||||
|
||||
pub async fn create_pool(config: &DatabaseConfig) -> Result<PgPool> {
|
||||
tracing::debug!("connecting to database");
|
||||
let connect_options = build_connect_options(config)?;
|
||||
|
||||
// Connection pool configuration from environment
|
||||
let max_connections = std::env::var("SECRETS_DATABASE_POOL_SIZE")
|
||||
.ok()
|
||||
.and_then(|v| v.parse::<u32>().ok())
|
||||
.unwrap_or(10);
|
||||
|
||||
let acquire_timeout_secs = std::env::var("SECRETS_DATABASE_ACQUIRE_TIMEOUT")
|
||||
.ok()
|
||||
.and_then(|v| v.parse::<u64>().ok())
|
||||
.unwrap_or(5);
|
||||
|
||||
let pool = PgPoolOptions::new()
|
||||
.max_connections(max_connections)
|
||||
.acquire_timeout(std::time::Duration::from_secs(acquire_timeout_secs))
|
||||
.max_lifetime(std::time::Duration::from_secs(1800)) // 30 minutes
|
||||
.idle_timeout(std::time::Duration::from_secs(600)) // 10 minutes
|
||||
.connect_with(connect_options)
|
||||
.await?;
|
||||
|
||||
tracing::debug!(
|
||||
max_connections,
|
||||
acquire_timeout_secs,
|
||||
"database connection established"
|
||||
);
|
||||
Ok(pool)
|
||||
}
|
||||
|
||||
pub async fn migrate(pool: &PgPool) -> Result<()> {
|
||||
tracing::debug!("running migrations");
|
||||
sqlx::raw_sql(
|
||||
r#"
|
||||
-- ── entries: top-level entities ─────────────────────────────────────────
|
||||
CREATE TABLE IF NOT EXISTS entries (
|
||||
id UUID PRIMARY KEY DEFAULT uuidv7(),
|
||||
user_id UUID,
|
||||
folder VARCHAR(128) NOT NULL DEFAULT '',
|
||||
type VARCHAR(64) NOT NULL DEFAULT '',
|
||||
name VARCHAR(256) NOT NULL,
|
||||
notes TEXT NOT NULL DEFAULT '',
|
||||
tags TEXT[] NOT NULL DEFAULT '{}',
|
||||
metadata JSONB NOT NULL DEFAULT '{}',
|
||||
version BIGINT NOT NULL DEFAULT 1,
|
||||
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
|
||||
updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW()
|
||||
);
|
||||
|
||||
-- Legacy unique constraint without user_id (single-user mode)
|
||||
CREATE UNIQUE INDEX IF NOT EXISTS idx_entries_unique_legacy
|
||||
ON entries(folder, name)
|
||||
WHERE user_id IS NULL;
|
||||
|
||||
-- Multi-user unique constraint
|
||||
CREATE UNIQUE INDEX IF NOT EXISTS idx_entries_unique_user
|
||||
ON entries(user_id, folder, name)
|
||||
WHERE user_id IS NOT NULL;
|
||||
|
||||
CREATE INDEX IF NOT EXISTS idx_entries_folder ON entries(folder) WHERE folder <> '';
|
||||
CREATE INDEX IF NOT EXISTS idx_entries_type ON entries(type) WHERE type <> '';
|
||||
CREATE INDEX IF NOT EXISTS idx_entries_user_id ON entries(user_id) WHERE user_id IS NOT NULL;
|
||||
CREATE INDEX IF NOT EXISTS idx_entries_tags ON entries USING GIN(tags);
|
||||
CREATE INDEX IF NOT EXISTS idx_entries_metadata ON entries USING GIN(metadata jsonb_path_ops);
|
||||
|
||||
-- ── secrets: one row per encrypted field ─────────────────────────────────
|
||||
CREATE TABLE IF NOT EXISTS secrets (
|
||||
id UUID PRIMARY KEY DEFAULT uuidv7(),
|
||||
user_id UUID,
|
||||
name VARCHAR(256) NOT NULL,
|
||||
type VARCHAR(64) NOT NULL DEFAULT 'text',
|
||||
encrypted BYTEA NOT NULL DEFAULT '\x',
|
||||
version BIGINT NOT NULL DEFAULT 1,
|
||||
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
|
||||
updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW()
|
||||
);
|
||||
|
||||
CREATE INDEX IF NOT EXISTS idx_secrets_user_id ON secrets(user_id) WHERE user_id IS NOT NULL;
|
||||
CREATE UNIQUE INDEX IF NOT EXISTS idx_secrets_unique_user_name
|
||||
ON secrets(user_id, name) WHERE user_id IS NOT NULL;
|
||||
CREATE INDEX IF NOT EXISTS idx_secrets_name ON secrets(name);
|
||||
CREATE INDEX IF NOT EXISTS idx_secrets_type ON secrets(type);
|
||||
|
||||
-- ── entry_secrets: N:N relation ────────────────────────────────────────────
|
||||
CREATE TABLE IF NOT EXISTS entry_secrets (
|
||||
entry_id UUID NOT NULL REFERENCES entries(id) ON DELETE CASCADE,
|
||||
secret_id UUID NOT NULL REFERENCES secrets(id) ON DELETE CASCADE,
|
||||
sort_order INT NOT NULL DEFAULT 0,
|
||||
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
|
||||
PRIMARY KEY(entry_id, secret_id)
|
||||
);
|
||||
CREATE INDEX IF NOT EXISTS idx_entry_secrets_secret_id ON entry_secrets(secret_id);
|
||||
|
||||
-- ── audit_log: append-only operation log ─────────────────────────────────
|
||||
CREATE TABLE IF NOT EXISTS audit_log (
|
||||
id BIGINT GENERATED ALWAYS AS IDENTITY PRIMARY KEY,
|
||||
user_id UUID,
|
||||
action VARCHAR(32) NOT NULL,
|
||||
folder VARCHAR(128) NOT NULL DEFAULT '',
|
||||
type VARCHAR(64) NOT NULL DEFAULT '',
|
||||
name VARCHAR(256) NOT NULL,
|
||||
detail JSONB NOT NULL DEFAULT '{}',
|
||||
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW()
|
||||
);
|
||||
|
||||
CREATE INDEX IF NOT EXISTS idx_audit_log_created ON audit_log(created_at DESC);
|
||||
CREATE INDEX IF NOT EXISTS idx_audit_log_folder_type ON audit_log(folder, type);
|
||||
CREATE INDEX IF NOT EXISTS idx_audit_log_user_id ON audit_log(user_id) WHERE user_id IS NOT NULL;
|
||||
|
||||
-- ── entries_history ───────────────────────────────────────────────────────
|
||||
CREATE TABLE IF NOT EXISTS entries_history (
|
||||
id BIGINT GENERATED ALWAYS AS IDENTITY PRIMARY KEY,
|
||||
entry_id UUID NOT NULL,
|
||||
folder VARCHAR(128) NOT NULL DEFAULT '',
|
||||
type VARCHAR(64) NOT NULL DEFAULT '',
|
||||
name VARCHAR(256) NOT NULL,
|
||||
version BIGINT NOT NULL,
|
||||
action VARCHAR(16) NOT NULL,
|
||||
tags TEXT[] NOT NULL DEFAULT '{}',
|
||||
metadata JSONB NOT NULL DEFAULT '{}',
|
||||
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW()
|
||||
);
|
||||
|
||||
CREATE INDEX IF NOT EXISTS idx_entries_history_entry_id
|
||||
ON entries_history(entry_id, version DESC);
|
||||
CREATE INDEX IF NOT EXISTS idx_entries_history_folder_type_name
|
||||
ON entries_history(folder, type, name, version DESC);
|
||||
|
||||
-- Backfill: add user_id to entries_history for multi-tenant isolation
|
||||
ALTER TABLE entries_history ADD COLUMN IF NOT EXISTS user_id UUID;
|
||||
CREATE INDEX IF NOT EXISTS idx_entries_history_user_id
|
||||
ON entries_history(user_id) WHERE user_id IS NOT NULL;
|
||||
ALTER TABLE entries_history DROP COLUMN IF EXISTS actor;
|
||||
|
||||
-- Backfill: add notes to entries if not present (fresh installs already have it)
|
||||
ALTER TABLE entries ADD COLUMN IF NOT EXISTS notes TEXT NOT NULL DEFAULT '';
|
||||
|
||||
-- ── secrets_history: field-level snapshot ────────────────────────────────
|
||||
CREATE TABLE IF NOT EXISTS secrets_history (
|
||||
id BIGINT GENERATED ALWAYS AS IDENTITY PRIMARY KEY,
|
||||
secret_id UUID NOT NULL,
|
||||
name VARCHAR(256) NOT NULL,
|
||||
encrypted BYTEA NOT NULL DEFAULT '\x',
|
||||
action VARCHAR(16) NOT NULL,
|
||||
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW()
|
||||
);
|
||||
|
||||
CREATE INDEX IF NOT EXISTS idx_secrets_history_secret_id
|
||||
ON secrets_history(secret_id);
|
||||
|
||||
-- Drop redundant actor column (derivable via entries_history JOIN)
|
||||
ALTER TABLE secrets_history DROP COLUMN IF EXISTS actor;
|
||||
|
||||
-- ── users ─────────────────────────────────────────────────────────────────
|
||||
CREATE TABLE IF NOT EXISTS users (
|
||||
id UUID PRIMARY KEY DEFAULT uuidv7(),
|
||||
email VARCHAR(256),
|
||||
name VARCHAR(256) NOT NULL DEFAULT '',
|
||||
avatar_url TEXT,
|
||||
key_salt BYTEA,
|
||||
key_check BYTEA,
|
||||
key_params JSONB,
|
||||
api_key TEXT UNIQUE,
|
||||
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
|
||||
updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW()
|
||||
);
|
||||
|
||||
-- ── oauth_accounts: per-provider identity links ───────────────────────────
|
||||
CREATE TABLE IF NOT EXISTS oauth_accounts (
|
||||
id UUID PRIMARY KEY DEFAULT uuidv7(),
|
||||
user_id UUID NOT NULL REFERENCES users(id) ON DELETE CASCADE,
|
||||
provider VARCHAR(32) NOT NULL,
|
||||
provider_id VARCHAR(256) NOT NULL,
|
||||
email VARCHAR(256),
|
||||
name VARCHAR(256),
|
||||
avatar_url TEXT,
|
||||
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
|
||||
UNIQUE(provider, provider_id)
|
||||
);
|
||||
|
||||
CREATE INDEX IF NOT EXISTS idx_oauth_accounts_user ON oauth_accounts(user_id);
|
||||
CREATE UNIQUE INDEX IF NOT EXISTS idx_oauth_accounts_user_provider
|
||||
ON oauth_accounts(user_id, provider);
|
||||
|
||||
-- FK: user_id columns -> users(id) (nullable = legacy rows; ON DELETE SET NULL)
|
||||
DO $$ BEGIN
|
||||
IF NOT EXISTS (
|
||||
SELECT 1 FROM pg_constraint WHERE conname = 'fk_entries_user_id'
|
||||
) THEN
|
||||
ALTER TABLE entries
|
||||
ADD CONSTRAINT fk_entries_user_id
|
||||
FOREIGN KEY (user_id) REFERENCES users(id) ON DELETE SET NULL;
|
||||
END IF;
|
||||
END $$;
|
||||
|
||||
DO $$ BEGIN
|
||||
IF NOT EXISTS (
|
||||
SELECT 1 FROM pg_constraint WHERE conname = 'fk_entries_history_user_id'
|
||||
) THEN
|
||||
ALTER TABLE entries_history
|
||||
ADD CONSTRAINT fk_entries_history_user_id
|
||||
FOREIGN KEY (user_id) REFERENCES users(id) ON DELETE SET NULL;
|
||||
END IF;
|
||||
END $$;
|
||||
|
||||
DO $$ BEGIN
|
||||
IF NOT EXISTS (
|
||||
SELECT 1 FROM pg_constraint WHERE conname = 'fk_secrets_user_id'
|
||||
) THEN
|
||||
ALTER TABLE secrets
|
||||
ADD CONSTRAINT fk_secrets_user_id
|
||||
FOREIGN KEY (user_id) REFERENCES users(id) ON DELETE SET NULL;
|
||||
END IF;
|
||||
END $$;
|
||||
|
||||
DO $$ BEGIN
|
||||
IF NOT EXISTS (
|
||||
SELECT 1 FROM pg_constraint WHERE conname = 'fk_audit_log_user_id'
|
||||
) THEN
|
||||
ALTER TABLE audit_log
|
||||
ADD CONSTRAINT fk_audit_log_user_id
|
||||
FOREIGN KEY (user_id) REFERENCES users(id) ON DELETE SET NULL;
|
||||
END IF;
|
||||
END $$;
|
||||
"#,
|
||||
)
|
||||
.execute(pool)
|
||||
.await?;
|
||||
migrate_schema(pool).await?;
|
||||
restore_plaintext_api_keys(pool).await?;
|
||||
|
||||
tracing::debug!("migrations complete");
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Idempotent schema migration: rename namespace→folder, kind→type in existing databases.
|
||||
async fn migrate_schema(pool: &PgPool) -> Result<()> {
|
||||
sqlx::raw_sql(
|
||||
r#"
|
||||
-- ── entries: rename namespace→folder, kind→type ──────────────────────────
|
||||
DO $$ BEGIN
|
||||
IF EXISTS (
|
||||
SELECT 1 FROM information_schema.columns
|
||||
WHERE table_name = 'entries' AND column_name = 'namespace'
|
||||
) THEN
|
||||
ALTER TABLE entries RENAME COLUMN namespace TO folder;
|
||||
END IF;
|
||||
END $$;
|
||||
|
||||
DO $$ BEGIN
|
||||
IF EXISTS (
|
||||
SELECT 1 FROM information_schema.columns
|
||||
WHERE table_name = 'entries' AND column_name = 'kind'
|
||||
) THEN
|
||||
ALTER TABLE entries RENAME COLUMN kind TO type;
|
||||
END IF;
|
||||
END $$;
|
||||
|
||||
-- ── audit_log: rename namespace→folder, kind→type ────────────────────────
|
||||
DO $$ BEGIN
|
||||
IF EXISTS (
|
||||
SELECT 1 FROM information_schema.columns
|
||||
WHERE table_name = 'audit_log' AND column_name = 'namespace'
|
||||
) THEN
|
||||
ALTER TABLE audit_log RENAME COLUMN namespace TO folder;
|
||||
END IF;
|
||||
END $$;
|
||||
|
||||
DO $$ BEGIN
|
||||
IF EXISTS (
|
||||
SELECT 1 FROM information_schema.columns
|
||||
WHERE table_name = 'audit_log' AND column_name = 'kind'
|
||||
) THEN
|
||||
ALTER TABLE audit_log RENAME COLUMN kind TO type;
|
||||
END IF;
|
||||
END $$;
|
||||
|
||||
-- ── entries_history: rename namespace→folder, kind→type ──────────────────
|
||||
DO $$ BEGIN
|
||||
IF EXISTS (
|
||||
SELECT 1 FROM information_schema.columns
|
||||
WHERE table_name = 'entries_history' AND column_name = 'namespace'
|
||||
) THEN
|
||||
ALTER TABLE entries_history RENAME COLUMN namespace TO folder;
|
||||
END IF;
|
||||
END $$;
|
||||
|
||||
DO $$ BEGIN
|
||||
IF EXISTS (
|
||||
SELECT 1 FROM information_schema.columns
|
||||
WHERE table_name = 'entries_history' AND column_name = 'kind'
|
||||
) THEN
|
||||
ALTER TABLE entries_history RENAME COLUMN kind TO type;
|
||||
END IF;
|
||||
END $$;
|
||||
|
||||
-- ── Set empty defaults for new folder/type columns ────────────────────────
|
||||
DO $$ BEGIN
|
||||
IF EXISTS (
|
||||
SELECT 1 FROM information_schema.columns
|
||||
WHERE table_name = 'entries' AND column_name = 'folder'
|
||||
) THEN
|
||||
UPDATE entries SET folder = '' WHERE folder IS NULL;
|
||||
ALTER TABLE entries ALTER COLUMN folder SET NOT NULL;
|
||||
ALTER TABLE entries ALTER COLUMN folder SET DEFAULT '';
|
||||
END IF;
|
||||
END $$;
|
||||
|
||||
DO $$ BEGIN
|
||||
IF EXISTS (
|
||||
SELECT 1 FROM information_schema.columns
|
||||
WHERE table_name = 'entries' AND column_name = 'type'
|
||||
) THEN
|
||||
UPDATE entries SET type = '' WHERE type IS NULL;
|
||||
ALTER TABLE entries ALTER COLUMN type SET NOT NULL;
|
||||
ALTER TABLE entries ALTER COLUMN type SET DEFAULT '';
|
||||
END IF;
|
||||
END $$;
|
||||
|
||||
DO $$ BEGIN
|
||||
IF EXISTS (
|
||||
SELECT 1 FROM information_schema.columns
|
||||
WHERE table_name = 'audit_log' AND column_name = 'folder'
|
||||
) THEN
|
||||
UPDATE audit_log SET folder = '' WHERE folder IS NULL;
|
||||
ALTER TABLE audit_log ALTER COLUMN folder SET NOT NULL;
|
||||
ALTER TABLE audit_log ALTER COLUMN folder SET DEFAULT '';
|
||||
END IF;
|
||||
END $$;
|
||||
|
||||
DO $$ BEGIN
|
||||
IF EXISTS (
|
||||
SELECT 1 FROM information_schema.columns
|
||||
WHERE table_name = 'audit_log' AND column_name = 'type'
|
||||
) THEN
|
||||
UPDATE audit_log SET type = '' WHERE type IS NULL;
|
||||
ALTER TABLE audit_log ALTER COLUMN type SET NOT NULL;
|
||||
ALTER TABLE audit_log ALTER COLUMN type SET DEFAULT '';
|
||||
END IF;
|
||||
END $$;
|
||||
|
||||
DO $$ BEGIN
|
||||
IF EXISTS (
|
||||
SELECT 1 FROM information_schema.columns
|
||||
WHERE table_name = 'entries_history' AND column_name = 'folder'
|
||||
) THEN
|
||||
UPDATE entries_history SET folder = '' WHERE folder IS NULL;
|
||||
ALTER TABLE entries_history ALTER COLUMN folder SET NOT NULL;
|
||||
ALTER TABLE entries_history ALTER COLUMN folder SET DEFAULT '';
|
||||
END IF;
|
||||
END $$;
|
||||
|
||||
DO $$ BEGIN
|
||||
IF EXISTS (
|
||||
SELECT 1 FROM information_schema.columns
|
||||
WHERE table_name = 'entries_history' AND column_name = 'type'
|
||||
) THEN
|
||||
UPDATE entries_history SET type = '' WHERE type IS NULL;
|
||||
ALTER TABLE entries_history ALTER COLUMN type SET NOT NULL;
|
||||
ALTER TABLE entries_history ALTER COLUMN type SET DEFAULT '';
|
||||
END IF;
|
||||
END $$;
|
||||
|
||||
-- ── Rebuild unique indexes on entries: folder is now part of the key ────────
|
||||
-- (user_id, folder, name) allows same name in different folders.
|
||||
DROP INDEX IF EXISTS idx_entries_unique_legacy;
|
||||
DROP INDEX IF EXISTS idx_entries_unique_user;
|
||||
|
||||
CREATE UNIQUE INDEX IF NOT EXISTS idx_entries_unique_legacy
|
||||
ON entries(folder, name)
|
||||
WHERE user_id IS NULL;
|
||||
|
||||
CREATE UNIQUE INDEX IF NOT EXISTS idx_entries_unique_user
|
||||
ON entries(user_id, folder, name)
|
||||
WHERE user_id IS NOT NULL;
|
||||
|
||||
-- ── Replace old namespace/kind indexes ────────────────────────────────────
|
||||
DROP INDEX IF EXISTS idx_entries_namespace;
|
||||
DROP INDEX IF EXISTS idx_entries_kind;
|
||||
DROP INDEX IF EXISTS idx_audit_log_ns_kind;
|
||||
DROP INDEX IF EXISTS idx_entries_history_ns_kind_name;
|
||||
|
||||
CREATE INDEX IF NOT EXISTS idx_entries_folder
|
||||
ON entries(folder) WHERE folder <> '';
|
||||
CREATE INDEX IF NOT EXISTS idx_entries_type
|
||||
ON entries(type) WHERE type <> '';
|
||||
CREATE INDEX IF NOT EXISTS idx_audit_log_folder_type
|
||||
ON audit_log(folder, type);
|
||||
CREATE INDEX IF NOT EXISTS idx_entries_history_folder_type_name
|
||||
ON entries_history(folder, type, name, version DESC);
|
||||
|
||||
-- ── Drop legacy actor columns ─────────────────────────────────────────────
|
||||
ALTER TABLE secrets_history DROP COLUMN IF EXISTS actor;
|
||||
ALTER TABLE audit_log DROP COLUMN IF EXISTS actor;
|
||||
"#,
|
||||
)
|
||||
.execute(pool)
|
||||
.await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn restore_plaintext_api_keys(pool: &PgPool) -> Result<()> {
|
||||
let has_users_api_key: bool = sqlx::query_scalar(
|
||||
"SELECT EXISTS (
|
||||
SELECT 1
|
||||
FROM information_schema.columns
|
||||
WHERE table_schema = 'public'
|
||||
AND table_name = 'users'
|
||||
AND column_name = 'api_key'
|
||||
)",
|
||||
)
|
||||
.fetch_one(pool)
|
||||
.await?;
|
||||
|
||||
if !has_users_api_key {
|
||||
sqlx::query("ALTER TABLE users ADD COLUMN api_key TEXT")
|
||||
.execute(pool)
|
||||
.await?;
|
||||
sqlx::query("CREATE UNIQUE INDEX IF NOT EXISTS idx_users_api_key ON users(api_key) WHERE api_key IS NOT NULL")
|
||||
.execute(pool)
|
||||
.await?;
|
||||
}
|
||||
|
||||
let has_api_keys_table: bool = sqlx::query_scalar(
|
||||
"SELECT EXISTS (
|
||||
SELECT 1
|
||||
FROM information_schema.tables
|
||||
WHERE table_schema = 'public'
|
||||
AND table_name = 'api_keys'
|
||||
)",
|
||||
)
|
||||
.fetch_one(pool)
|
||||
.await?;
|
||||
|
||||
if !has_api_keys_table {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
#[derive(sqlx::FromRow)]
|
||||
struct UserWithoutKey {
|
||||
id: uuid::Uuid,
|
||||
}
|
||||
|
||||
let users_without_key: Vec<UserWithoutKey> =
|
||||
sqlx::query_as("SELECT DISTINCT user_id AS id FROM api_keys WHERE user_id NOT IN (SELECT id FROM users WHERE api_key IS NOT NULL)")
|
||||
.fetch_all(pool)
|
||||
.await?;
|
||||
|
||||
for user in users_without_key {
|
||||
let new_key = crate::service::api_key::generate_api_key();
|
||||
sqlx::query("UPDATE users SET api_key = $1 WHERE id = $2")
|
||||
.bind(&new_key)
|
||||
.bind(user.id)
|
||||
.execute(pool)
|
||||
.await?;
|
||||
}
|
||||
|
||||
sqlx::query("DROP TABLE IF EXISTS api_keys")
|
||||
.execute(pool)
|
||||
.await?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// ── Entry-level history snapshot ─────────────────────────────────────────────
|
||||
|
||||
pub struct EntrySnapshotParams<'a> {
|
||||
pub entry_id: uuid::Uuid,
|
||||
pub user_id: Option<uuid::Uuid>,
|
||||
pub folder: &'a str,
|
||||
pub entry_type: &'a str,
|
||||
pub name: &'a str,
|
||||
pub version: i64,
|
||||
pub action: &'a str,
|
||||
pub tags: &'a [String],
|
||||
pub metadata: &'a Value,
|
||||
}
|
||||
|
||||
pub async fn snapshot_entry_history(
|
||||
tx: &mut sqlx::Transaction<'_, sqlx::Postgres>,
|
||||
p: EntrySnapshotParams<'_>,
|
||||
) -> Result<()> {
|
||||
sqlx::query(
|
||||
"INSERT INTO entries_history \
|
||||
(entry_id, folder, type, name, version, action, tags, metadata, user_id) \
|
||||
VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9)",
|
||||
)
|
||||
.bind(p.entry_id)
|
||||
.bind(p.folder)
|
||||
.bind(p.entry_type)
|
||||
.bind(p.name)
|
||||
.bind(p.version)
|
||||
.bind(p.action)
|
||||
.bind(p.tags)
|
||||
.bind(p.metadata)
|
||||
.bind(p.user_id)
|
||||
.execute(&mut **tx)
|
||||
.await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// ── Secret field-level history snapshot ──────────────────────────────────────
|
||||
|
||||
pub struct SecretSnapshotParams<'a> {
|
||||
pub secret_id: uuid::Uuid,
|
||||
pub name: &'a str,
|
||||
pub encrypted: &'a [u8],
|
||||
pub action: &'a str,
|
||||
}
|
||||
|
||||
pub async fn snapshot_secret_history(
|
||||
tx: &mut sqlx::Transaction<'_, sqlx::Postgres>,
|
||||
p: SecretSnapshotParams<'_>,
|
||||
) -> Result<()> {
|
||||
sqlx::query(
|
||||
"INSERT INTO secrets_history \
|
||||
(secret_id, name, encrypted, action) \
|
||||
VALUES ($1, $2, $3, $4)",
|
||||
)
|
||||
.bind(p.secret_id)
|
||||
.bind(p.name)
|
||||
.bind(p.encrypted)
|
||||
.bind(p.action)
|
||||
.execute(&mut **tx)
|
||||
.await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub const ENTRY_HISTORY_SECRETS_KEY: &str = "__secrets_snapshot_v1";
|
||||
|
||||
#[derive(Debug, Clone, serde::Serialize, serde::Deserialize)]
|
||||
pub struct EntrySecretSnapshot {
|
||||
pub name: String,
|
||||
#[serde(rename = "type")]
|
||||
pub secret_type: String,
|
||||
pub encrypted_hex: String,
|
||||
}
|
||||
|
||||
pub async fn metadata_with_secret_snapshot(
|
||||
tx: &mut sqlx::Transaction<'_, sqlx::Postgres>,
|
||||
entry_id: uuid::Uuid,
|
||||
metadata: &Value,
|
||||
) -> Result<Value> {
|
||||
#[derive(sqlx::FromRow)]
|
||||
struct Row {
|
||||
name: String,
|
||||
#[sqlx(rename = "type")]
|
||||
secret_type: String,
|
||||
encrypted: Vec<u8>,
|
||||
}
|
||||
|
||||
let rows: Vec<Row> = sqlx::query_as(
|
||||
"SELECT s.name, s.type, s.encrypted \
|
||||
FROM entry_secrets es \
|
||||
JOIN secrets s ON s.id = es.secret_id \
|
||||
WHERE es.entry_id = $1 \
|
||||
ORDER BY s.name ASC",
|
||||
)
|
||||
.bind(entry_id)
|
||||
.fetch_all(&mut **tx)
|
||||
.await?;
|
||||
|
||||
let snapshots: Vec<EntrySecretSnapshot> = rows
|
||||
.into_iter()
|
||||
.map(|r| EntrySecretSnapshot {
|
||||
name: r.name,
|
||||
secret_type: r.secret_type,
|
||||
encrypted_hex: ::hex::encode(r.encrypted),
|
||||
})
|
||||
.collect();
|
||||
|
||||
let mut merged = match metadata.clone() {
|
||||
Value::Object(obj) => obj,
|
||||
_ => Map::new(),
|
||||
};
|
||||
merged.insert(
|
||||
ENTRY_HISTORY_SECRETS_KEY.to_string(),
|
||||
serde_json::to_value(snapshots)?,
|
||||
);
|
||||
Ok(Value::Object(merged))
|
||||
}
|
||||
|
||||
pub fn strip_secret_snapshot_from_metadata(metadata: &Value) -> Value {
|
||||
let mut m = match metadata.clone() {
|
||||
Value::Object(obj) => obj,
|
||||
_ => return metadata.clone(),
|
||||
};
|
||||
m.remove(ENTRY_HISTORY_SECRETS_KEY);
|
||||
Value::Object(m)
|
||||
}
|
||||
|
||||
pub fn entry_secret_snapshot_from_metadata(metadata: &Value) -> Option<Vec<EntrySecretSnapshot>> {
|
||||
let Value::Object(map) = metadata else {
|
||||
return None;
|
||||
};
|
||||
let raw = map.get(ENTRY_HISTORY_SECRETS_KEY)?;
|
||||
serde_json::from_value(raw.clone()).ok()
|
||||
}
|
||||
|
||||
// ── DB helpers ────────────────────────────────────────────────────────────────
|
||||
172
crates/secrets-core/src/error.rs
Normal file
172
crates/secrets-core/src/error.rs
Normal file
@@ -0,0 +1,172 @@
|
||||
use sqlx::error::DatabaseError;
|
||||
|
||||
/// Structured business errors for the secrets service.
|
||||
///
|
||||
/// These replace ad-hoc `anyhow` strings for expected failure modes,
|
||||
/// allowing MCP and Web layers to map to appropriate protocol-level errors.
|
||||
#[derive(Debug, thiserror::Error)]
|
||||
pub enum AppError {
|
||||
#[error("A secret with the name '{secret_name}' already exists for this user")]
|
||||
ConflictSecretName { secret_name: String },
|
||||
|
||||
#[error("An entry with folder='{folder}' and name='{name}' already exists")]
|
||||
ConflictEntryName { folder: String, name: String },
|
||||
|
||||
#[error("Entry not found")]
|
||||
NotFoundEntry,
|
||||
|
||||
#[error("User not found")]
|
||||
NotFoundUser,
|
||||
|
||||
#[error("Secret not found")]
|
||||
NotFoundSecret,
|
||||
|
||||
#[error("Authentication failed")]
|
||||
AuthenticationFailed,
|
||||
|
||||
#[error("Unauthorized: insufficient permissions")]
|
||||
Unauthorized,
|
||||
|
||||
#[error("Validation failed: {message}")]
|
||||
Validation { message: String },
|
||||
|
||||
#[error("Concurrent modification detected")]
|
||||
ConcurrentModification,
|
||||
|
||||
#[error("Decryption failed — the encryption key may be incorrect")]
|
||||
DecryptionFailed,
|
||||
|
||||
#[error("Encryption key not set — user must set passphrase first")]
|
||||
EncryptionKeyNotSet,
|
||||
|
||||
#[error(transparent)]
|
||||
Internal(#[from] anyhow::Error),
|
||||
}
|
||||
|
||||
impl AppError {
|
||||
/// Try to convert a sqlx database error into a structured `AppError`.
|
||||
///
|
||||
/// The caller should provide the context (which table was being written,
|
||||
/// what values were being inserted) so we can produce a meaningful error.
|
||||
pub fn from_db_error(err: sqlx::Error, ctx: DbErrorContext<'_>) -> Self {
|
||||
if let sqlx::Error::Database(ref db_err) = err
|
||||
&& db_err.code().as_deref() == Some("23505")
|
||||
{
|
||||
return Self::from_unique_violation(db_err.as_ref(), ctx);
|
||||
}
|
||||
AppError::Internal(err.into())
|
||||
}
|
||||
|
||||
fn from_unique_violation(db_err: &dyn DatabaseError, ctx: DbErrorContext<'_>) -> Self {
|
||||
let constraint = db_err.constraint();
|
||||
|
||||
match constraint {
|
||||
Some("idx_secrets_unique_user_name") => AppError::ConflictSecretName {
|
||||
secret_name: ctx.secret_name.unwrap_or("unknown").to_string(),
|
||||
},
|
||||
Some("idx_entries_unique_user") | Some("idx_entries_unique_legacy") => {
|
||||
AppError::ConflictEntryName {
|
||||
folder: ctx.folder.unwrap_or("").to_string(),
|
||||
name: ctx.name.unwrap_or("unknown").to_string(),
|
||||
}
|
||||
}
|
||||
_ => {
|
||||
// Fall back to message-based detection for unnamed constraints
|
||||
let msg = db_err.message();
|
||||
if msg.contains("secrets") {
|
||||
AppError::ConflictSecretName {
|
||||
secret_name: ctx.secret_name.unwrap_or("unknown").to_string(),
|
||||
}
|
||||
} else {
|
||||
AppError::ConflictEntryName {
|
||||
folder: ctx.folder.unwrap_or("").to_string(),
|
||||
name: ctx.name.unwrap_or("unknown").to_string(),
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Context hints used when converting a database error to `AppError`.
|
||||
#[derive(Debug, Default, Clone, Copy)]
|
||||
pub struct DbErrorContext<'a> {
|
||||
pub secret_name: Option<&'a str>,
|
||||
pub folder: Option<&'a str>,
|
||||
pub name: Option<&'a str>,
|
||||
}
|
||||
|
||||
impl<'a> DbErrorContext<'a> {
|
||||
pub fn secret_name(name: &'a str) -> Self {
|
||||
Self {
|
||||
secret_name: Some(name),
|
||||
..Default::default()
|
||||
}
|
||||
}
|
||||
|
||||
pub fn entry(folder: &'a str, name: &'a str) -> Self {
|
||||
Self {
|
||||
folder: Some(folder),
|
||||
name: Some(name),
|
||||
..Default::default()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn app_error_display_messages() {
|
||||
let err = AppError::ConflictSecretName {
|
||||
secret_name: "token".to_string(),
|
||||
};
|
||||
assert!(err.to_string().contains("token"));
|
||||
|
||||
let err = AppError::ConflictEntryName {
|
||||
folder: "refining".to_string(),
|
||||
name: "gitea".to_string(),
|
||||
};
|
||||
assert!(err.to_string().contains("refining"));
|
||||
assert!(err.to_string().contains("gitea"));
|
||||
|
||||
let err = AppError::NotFoundEntry;
|
||||
assert_eq!(err.to_string(), "Entry not found");
|
||||
|
||||
let err = AppError::NotFoundUser;
|
||||
assert_eq!(err.to_string(), "User not found");
|
||||
|
||||
let err = AppError::NotFoundSecret;
|
||||
assert_eq!(err.to_string(), "Secret not found");
|
||||
|
||||
let err = AppError::AuthenticationFailed;
|
||||
assert_eq!(err.to_string(), "Authentication failed");
|
||||
|
||||
let err = AppError::Unauthorized;
|
||||
assert!(err.to_string().contains("Unauthorized"));
|
||||
|
||||
let err = AppError::Validation {
|
||||
message: "too long".to_string(),
|
||||
};
|
||||
assert!(err.to_string().contains("too long"));
|
||||
|
||||
let err = AppError::ConcurrentModification;
|
||||
assert!(err.to_string().contains("Concurrent modification"));
|
||||
|
||||
let err = AppError::EncryptionKeyNotSet;
|
||||
assert!(err.to_string().contains("Encryption key not set"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn db_error_context_helpers() {
|
||||
let ctx = DbErrorContext::secret_name("my_key");
|
||||
assert_eq!(ctx.secret_name, Some("my_key"));
|
||||
assert!(ctx.folder.is_none());
|
||||
|
||||
let ctx = DbErrorContext::entry("prod", "db-creds");
|
||||
assert_eq!(ctx.folder, Some("prod"));
|
||||
assert_eq!(ctx.name, Some("db-creds"));
|
||||
assert!(ctx.secret_name.is_none());
|
||||
}
|
||||
}
|
||||
8
crates/secrets-core/src/lib.rs
Normal file
8
crates/secrets-core/src/lib.rs
Normal file
@@ -0,0 +1,8 @@
|
||||
pub mod audit;
|
||||
pub mod config;
|
||||
pub mod crypto;
|
||||
pub mod db;
|
||||
pub mod error;
|
||||
pub mod models;
|
||||
pub mod service;
|
||||
pub mod taxonomy;
|
||||
@@ -4,14 +4,18 @@ use serde_json::Value;
|
||||
use std::collections::BTreeMap;
|
||||
use uuid::Uuid;
|
||||
|
||||
/// A top-level entry (server, service, key, …).
|
||||
/// A top-level entry (server, service, account, person, …).
|
||||
/// Sensitive fields are stored separately in `secrets`.
|
||||
#[derive(Debug, Serialize, Deserialize, sqlx::FromRow)]
|
||||
pub struct Entry {
|
||||
pub id: Uuid,
|
||||
pub namespace: String,
|
||||
pub kind: String,
|
||||
pub user_id: Option<Uuid>,
|
||||
pub folder: String,
|
||||
#[serde(rename = "type")]
|
||||
#[sqlx(rename = "type")]
|
||||
pub entry_type: String,
|
||||
pub name: String,
|
||||
pub notes: String,
|
||||
pub tags: Vec<String>,
|
||||
pub metadata: Value,
|
||||
pub version: i64,
|
||||
@@ -23,8 +27,11 @@ pub struct Entry {
|
||||
#[derive(Debug, Serialize, Deserialize, sqlx::FromRow)]
|
||||
pub struct SecretField {
|
||||
pub id: Uuid,
|
||||
pub entry_id: Uuid,
|
||||
pub field_name: String,
|
||||
pub user_id: Option<Uuid>,
|
||||
pub name: String,
|
||||
#[serde(rename = "type")]
|
||||
#[sqlx(rename = "type")]
|
||||
pub secret_type: String,
|
||||
/// AES-256-GCM ciphertext: nonce(12B) || ciphertext+tag
|
||||
pub encrypted: Vec<u8>,
|
||||
pub version: i64,
|
||||
@@ -39,15 +46,47 @@ pub struct SecretField {
|
||||
pub struct EntryRow {
|
||||
pub id: Uuid,
|
||||
pub version: i64,
|
||||
pub folder: String,
|
||||
#[sqlx(rename = "type")]
|
||||
pub entry_type: String,
|
||||
pub tags: Vec<String>,
|
||||
pub metadata: Value,
|
||||
pub notes: String,
|
||||
}
|
||||
|
||||
/// Entry row including `name` (used for id-scoped web / service updates).
|
||||
#[derive(Debug, sqlx::FromRow)]
|
||||
pub struct EntryWriteRow {
|
||||
pub id: Uuid,
|
||||
pub version: i64,
|
||||
pub folder: String,
|
||||
#[sqlx(rename = "type")]
|
||||
pub entry_type: String,
|
||||
pub name: String,
|
||||
pub tags: Vec<String>,
|
||||
pub metadata: Value,
|
||||
pub notes: String,
|
||||
}
|
||||
|
||||
impl From<&EntryWriteRow> for EntryRow {
|
||||
fn from(r: &EntryWriteRow) -> Self {
|
||||
EntryRow {
|
||||
id: r.id,
|
||||
version: r.version,
|
||||
folder: r.folder.clone(),
|
||||
entry_type: r.entry_type.clone(),
|
||||
tags: r.tags.clone(),
|
||||
metadata: r.metadata.clone(),
|
||||
notes: r.notes.clone(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Minimal secret field row fetched before snapshots or cascade deletes.
|
||||
#[derive(Debug, sqlx::FromRow)]
|
||||
pub struct SecretFieldRow {
|
||||
pub id: Uuid,
|
||||
pub field_name: String,
|
||||
pub name: String,
|
||||
pub encrypted: Vec<u8>,
|
||||
}
|
||||
|
||||
@@ -61,20 +100,10 @@ pub enum ExportFormat {
|
||||
Yaml,
|
||||
}
|
||||
|
||||
impl ExportFormat {
|
||||
/// Infer format from file extension (.json / .toml / .yaml / .yml).
|
||||
pub fn from_extension(path: &str) -> anyhow::Result<Self> {
|
||||
let ext = path.rsplit('.').next().unwrap_or("").to_lowercase();
|
||||
Self::from_str(&ext).map_err(|_| {
|
||||
anyhow::anyhow!(
|
||||
"Cannot infer format from extension '.{}'. Use --format json|toml|yaml",
|
||||
ext
|
||||
)
|
||||
})
|
||||
}
|
||||
impl std::str::FromStr for ExportFormat {
|
||||
type Err = anyhow::Error;
|
||||
|
||||
/// Parse from --format CLI value.
|
||||
pub fn from_str(s: &str) -> anyhow::Result<Self> {
|
||||
fn from_str(s: &str) -> Result<Self, Self::Err> {
|
||||
match s.to_lowercase().as_str() {
|
||||
"json" => Ok(Self::Json),
|
||||
"toml" => Ok(Self::Toml),
|
||||
@@ -82,6 +111,19 @@ impl ExportFormat {
|
||||
other => anyhow::bail!("Unknown format '{}'. Expected: json, toml, or yaml", other),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl ExportFormat {
|
||||
/// Infer format from file extension (.json / .toml / .yaml / .yml).
|
||||
pub fn from_extension(path: &str) -> anyhow::Result<Self> {
|
||||
let ext = path.rsplit('.').next().unwrap_or("").to_lowercase();
|
||||
ext.parse().map_err(|_| {
|
||||
anyhow::anyhow!(
|
||||
"Cannot infer format from extension '.{}'. Use --format json|toml|yaml",
|
||||
ext
|
||||
)
|
||||
})
|
||||
}
|
||||
|
||||
/// Serialize ExportData to a string in this format.
|
||||
pub fn serialize(&self, data: &ExportData) -> anyhow::Result<String> {
|
||||
@@ -124,10 +166,14 @@ pub struct ExportData {
|
||||
/// A single entry with decrypted secrets for export/import.
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
pub struct ExportEntry {
|
||||
pub namespace: String,
|
||||
pub kind: String,
|
||||
pub name: String,
|
||||
#[serde(default)]
|
||||
pub folder: String,
|
||||
#[serde(default, rename = "type")]
|
||||
pub entry_type: String,
|
||||
#[serde(default)]
|
||||
pub notes: String,
|
||||
#[serde(default)]
|
||||
pub tags: Vec<String>,
|
||||
#[serde(default)]
|
||||
pub metadata: Value,
|
||||
@@ -136,6 +182,56 @@ pub struct ExportEntry {
|
||||
pub secrets: Option<BTreeMap<String, Value>>,
|
||||
}
|
||||
|
||||
// ── Multi-user models ──────────────────────────────────────────────────────────
|
||||
|
||||
/// A registered user (created on first OAuth login).
|
||||
#[derive(Debug, Serialize, Deserialize, sqlx::FromRow)]
|
||||
pub struct User {
|
||||
pub id: Uuid,
|
||||
pub email: Option<String>,
|
||||
pub name: String,
|
||||
pub avatar_url: Option<String>,
|
||||
/// PBKDF2 salt (32 B). NULL until user sets up passphrase.
|
||||
pub key_salt: Option<Vec<u8>>,
|
||||
/// AES-256-GCM encryption of the known constant "secrets-mcp-key-check".
|
||||
/// Used to verify the passphrase without storing the key itself.
|
||||
pub key_check: Option<Vec<u8>>,
|
||||
/// Key derivation parameters, e.g. {"alg":"pbkdf2-sha256","iterations":600000}.
|
||||
pub key_params: Option<serde_json::Value>,
|
||||
/// Plaintext API key for MCP Bearer authentication. Auto-created on first login.
|
||||
pub api_key: Option<String>,
|
||||
pub created_at: DateTime<Utc>,
|
||||
pub updated_at: DateTime<Utc>,
|
||||
}
|
||||
|
||||
/// An OAuth account linked to a user.
|
||||
#[derive(Debug, Serialize, Deserialize, sqlx::FromRow)]
|
||||
pub struct OauthAccount {
|
||||
pub id: Uuid,
|
||||
pub user_id: Uuid,
|
||||
pub provider: String,
|
||||
pub provider_id: String,
|
||||
pub email: Option<String>,
|
||||
pub name: Option<String>,
|
||||
pub avatar_url: Option<String>,
|
||||
pub created_at: DateTime<Utc>,
|
||||
}
|
||||
|
||||
/// A single audit log row, optionally scoped to a business user.
|
||||
#[derive(Debug, Serialize, Deserialize, sqlx::FromRow)]
|
||||
pub struct AuditLogEntry {
|
||||
pub id: i64,
|
||||
pub user_id: Option<Uuid>,
|
||||
pub action: String,
|
||||
pub folder: String,
|
||||
#[serde(rename = "type")]
|
||||
#[sqlx(rename = "type")]
|
||||
pub entry_type: String,
|
||||
pub name: String,
|
||||
pub detail: Value,
|
||||
pub created_at: DateTime<Utc>,
|
||||
}
|
||||
|
||||
// ── TOML ↔ JSON value conversion ──────────────────────────────────────────────
|
||||
|
||||
/// Convert a serde_json Value to a toml Value.
|
||||
802
crates/secrets-core/src/service/add.rs
Normal file
802
crates/secrets-core/src/service/add.rs
Normal file
@@ -0,0 +1,802 @@
|
||||
use anyhow::Result;
|
||||
use serde_json::{Map, Value};
|
||||
use sqlx::PgPool;
|
||||
use std::collections::{BTreeSet, HashSet};
|
||||
use std::fs;
|
||||
use uuid::Uuid;
|
||||
|
||||
use crate::crypto;
|
||||
use crate::db;
|
||||
use crate::error::{AppError, DbErrorContext};
|
||||
use crate::models::EntryRow;
|
||||
|
||||
// ── Key/value parsing helpers ─────────────────────────────────────────────────
|
||||
|
||||
pub fn parse_kv(entry: &str) -> Result<(Vec<String>, Value)> {
|
||||
if let Some((key, json_str)) = entry.split_once(":=") {
|
||||
let val: Value = serde_json::from_str(json_str).map_err(|e| {
|
||||
anyhow::anyhow!(
|
||||
"Invalid JSON value for key '{}': {} (use key=value for plain strings)",
|
||||
key,
|
||||
e
|
||||
)
|
||||
})?;
|
||||
return Ok((parse_key_path(key)?, val));
|
||||
}
|
||||
|
||||
if let Some((key, raw_val)) = entry.split_once('=') {
|
||||
let value = if let Some(path) = raw_val.strip_prefix('@') {
|
||||
fs::read_to_string(path)
|
||||
.map_err(|e| anyhow::anyhow!("Failed to read file '{}': {}", path, e))?
|
||||
} else {
|
||||
raw_val.to_string()
|
||||
};
|
||||
return Ok((parse_key_path(key)?, Value::String(value)));
|
||||
}
|
||||
|
||||
if let Some((key, path)) = entry.split_once('@') {
|
||||
let value = fs::read_to_string(path)
|
||||
.map_err(|e| anyhow::anyhow!("Failed to read file '{}': {}", path, e))?;
|
||||
return Ok((parse_key_path(key)?, Value::String(value)));
|
||||
}
|
||||
|
||||
anyhow::bail!(
|
||||
"Invalid format '{}'. Expected: key=value, key=@file, nested:key@file, or key:=<json>",
|
||||
entry
|
||||
)
|
||||
}
|
||||
|
||||
pub fn build_json(entries: &[String]) -> Result<Value> {
|
||||
let mut map = Map::new();
|
||||
for entry in entries {
|
||||
let (path, value) = parse_kv(entry)?;
|
||||
insert_path(&mut map, &path, value)?;
|
||||
}
|
||||
Ok(Value::Object(map))
|
||||
}
|
||||
|
||||
pub fn key_path_to_string(path: &[String]) -> String {
|
||||
path.join(":")
|
||||
}
|
||||
|
||||
pub fn collect_key_paths(entries: &[String]) -> Result<Vec<String>> {
|
||||
entries
|
||||
.iter()
|
||||
.map(|entry| parse_kv(entry).map(|(path, _)| key_path_to_string(&path)))
|
||||
.collect()
|
||||
}
|
||||
|
||||
pub fn collect_field_paths(entries: &[String]) -> Result<Vec<String>> {
|
||||
entries
|
||||
.iter()
|
||||
.map(|entry| parse_key_path(entry).map(|path| key_path_to_string(&path)))
|
||||
.collect()
|
||||
}
|
||||
|
||||
pub fn parse_key_path(key: &str) -> Result<Vec<String>> {
|
||||
let path: Vec<String> = key
|
||||
.split(':')
|
||||
.map(str::trim)
|
||||
.map(ToOwned::to_owned)
|
||||
.collect();
|
||||
|
||||
if path.is_empty() || path.iter().any(|part| part.is_empty()) {
|
||||
anyhow::bail!(
|
||||
"Invalid key path '{}'. Use non-empty segments like 'credentials:content'.",
|
||||
key
|
||||
);
|
||||
}
|
||||
Ok(path)
|
||||
}
|
||||
|
||||
pub fn insert_path(map: &mut Map<String, Value>, path: &[String], value: Value) -> Result<()> {
|
||||
if path.is_empty() {
|
||||
anyhow::bail!("Key path cannot be empty");
|
||||
}
|
||||
if path.len() == 1 {
|
||||
map.insert(path[0].clone(), value);
|
||||
return Ok(());
|
||||
}
|
||||
let head = path[0].clone();
|
||||
let tail = &path[1..];
|
||||
match map.entry(head.clone()) {
|
||||
serde_json::map::Entry::Vacant(entry) => {
|
||||
let mut child = Map::new();
|
||||
insert_path(&mut child, tail, value)?;
|
||||
entry.insert(Value::Object(child));
|
||||
}
|
||||
serde_json::map::Entry::Occupied(mut entry) => match entry.get_mut() {
|
||||
Value::Object(child) => insert_path(child, tail, value)?,
|
||||
_ => {
|
||||
anyhow::bail!(
|
||||
"Cannot set nested key '{}' because '{}' is already a non-object value",
|
||||
key_path_to_string(path),
|
||||
head
|
||||
);
|
||||
}
|
||||
},
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn remove_path(map: &mut Map<String, Value>, path: &[String]) -> Result<bool> {
|
||||
if path.is_empty() {
|
||||
anyhow::bail!("Key path cannot be empty");
|
||||
}
|
||||
if path.len() == 1 {
|
||||
return Ok(map.remove(&path[0]).is_some());
|
||||
}
|
||||
let Some(value) = map.get_mut(&path[0]) else {
|
||||
return Ok(false);
|
||||
};
|
||||
let Value::Object(child) = value else {
|
||||
return Ok(false);
|
||||
};
|
||||
let removed = remove_path(child, &path[1..])?;
|
||||
if child.is_empty() {
|
||||
map.remove(&path[0]);
|
||||
}
|
||||
Ok(removed)
|
||||
}
|
||||
|
||||
pub fn flatten_json_fields(prefix: &str, value: &Value) -> Vec<(String, Value)> {
|
||||
match value {
|
||||
Value::Object(map) => {
|
||||
let mut out = Vec::new();
|
||||
for (k, v) in map {
|
||||
let full_key = if prefix.is_empty() {
|
||||
k.clone()
|
||||
} else {
|
||||
format!("{}.{}", prefix, k)
|
||||
};
|
||||
out.extend(flatten_json_fields(&full_key, v));
|
||||
}
|
||||
out
|
||||
}
|
||||
other => vec![(prefix.to_string(), other.clone())],
|
||||
}
|
||||
}
|
||||
|
||||
// ── AddResult ─────────────────────────────────────────────────────────────────
|
||||
|
||||
#[derive(Debug, serde::Serialize)]
|
||||
pub struct AddResult {
|
||||
pub name: String,
|
||||
pub folder: String,
|
||||
#[serde(rename = "type")]
|
||||
pub entry_type: String,
|
||||
pub tags: Vec<String>,
|
||||
pub meta_keys: Vec<String>,
|
||||
pub secret_keys: Vec<String>,
|
||||
}
|
||||
|
||||
pub struct AddParams<'a> {
|
||||
pub name: &'a str,
|
||||
pub folder: &'a str,
|
||||
pub entry_type: &'a str,
|
||||
pub notes: &'a str,
|
||||
pub tags: &'a [String],
|
||||
pub meta_entries: &'a [String],
|
||||
pub secret_entries: &'a [String],
|
||||
pub secret_types: &'a std::collections::HashMap<String, String>,
|
||||
pub link_secret_names: &'a [String],
|
||||
/// Optional user_id for multi-user isolation (None = single-user CLI mode)
|
||||
pub user_id: Option<Uuid>,
|
||||
}
|
||||
|
||||
pub async fn run(pool: &PgPool, params: AddParams<'_>, master_key: &[u8; 32]) -> Result<AddResult> {
|
||||
let Value::Object(metadata_map) = build_json(params.meta_entries)? else {
|
||||
unreachable!("build_json always returns a JSON object");
|
||||
};
|
||||
let entry_type = params.entry_type.trim();
|
||||
let metadata = Value::Object(metadata_map);
|
||||
let secret_json = build_json(params.secret_entries)?;
|
||||
let meta_keys = collect_key_paths(params.meta_entries)?;
|
||||
let secret_keys = collect_key_paths(params.secret_entries)?;
|
||||
let flat_fields = flatten_json_fields("", &secret_json);
|
||||
let new_secret_names: BTreeSet<String> =
|
||||
flat_fields.iter().map(|(name, _)| name.clone()).collect();
|
||||
let link_secret_names =
|
||||
validate_link_secret_names(params.link_secret_names, &new_secret_names)?;
|
||||
|
||||
let mut tx = pool.begin().await?;
|
||||
|
||||
// Fetch existing entry by (user_id, folder, name) — the natural unique key
|
||||
let existing: Option<EntryRow> = if let Some(uid) = params.user_id {
|
||||
sqlx::query_as(
|
||||
"SELECT id, version, folder, type, tags, metadata, notes FROM entries \
|
||||
WHERE user_id = $1 AND folder = $2 AND name = $3",
|
||||
)
|
||||
.bind(uid)
|
||||
.bind(params.folder)
|
||||
.bind(params.name)
|
||||
.fetch_optional(&mut *tx)
|
||||
.await?
|
||||
} else {
|
||||
sqlx::query_as(
|
||||
"SELECT id, version, folder, type, tags, metadata, notes FROM entries \
|
||||
WHERE user_id IS NULL AND folder = $1 AND name = $2",
|
||||
)
|
||||
.bind(params.folder)
|
||||
.bind(params.name)
|
||||
.fetch_optional(&mut *tx)
|
||||
.await?
|
||||
};
|
||||
|
||||
if let Some(ref ex) = existing {
|
||||
let history_metadata =
|
||||
match db::metadata_with_secret_snapshot(&mut tx, ex.id, &ex.metadata).await {
|
||||
Ok(v) => v,
|
||||
Err(e) => {
|
||||
tracing::warn!(error = %e, "failed to build secret snapshot for entry history");
|
||||
ex.metadata.clone()
|
||||
}
|
||||
};
|
||||
if let Err(e) = db::snapshot_entry_history(
|
||||
&mut tx,
|
||||
db::EntrySnapshotParams {
|
||||
entry_id: ex.id,
|
||||
user_id: params.user_id,
|
||||
folder: params.folder,
|
||||
entry_type,
|
||||
name: params.name,
|
||||
version: ex.version,
|
||||
action: "add",
|
||||
tags: &ex.tags,
|
||||
metadata: &history_metadata,
|
||||
},
|
||||
)
|
||||
.await
|
||||
{
|
||||
tracing::warn!(error = %e, "failed to snapshot entry history before upsert");
|
||||
}
|
||||
}
|
||||
|
||||
// Upsert the entry row. On conflict (existing entry with same user_id+folder+name),
|
||||
// the entry columns are replaced wholesale. The old secret associations are torn down
|
||||
// below within the same transaction, so the whole operation is atomic: if any step
|
||||
// after this point fails, the transaction rolls back and the entry reverts to its
|
||||
// pre-upsert state (including the version bump that happened in the DO UPDATE clause).
|
||||
let entry_id: Uuid = if let Some(uid) = params.user_id {
|
||||
sqlx::query_scalar(
|
||||
r#"INSERT INTO entries (user_id, folder, type, name, notes, tags, metadata, version, updated_at)
|
||||
VALUES ($1, $2, $3, $4, $5, $6, $7, 1, NOW())
|
||||
ON CONFLICT (user_id, folder, name) WHERE user_id IS NOT NULL
|
||||
DO UPDATE SET
|
||||
folder = EXCLUDED.folder,
|
||||
type = EXCLUDED.type,
|
||||
notes = EXCLUDED.notes,
|
||||
tags = EXCLUDED.tags,
|
||||
metadata = EXCLUDED.metadata,
|
||||
version = entries.version + 1,
|
||||
updated_at = NOW()
|
||||
RETURNING id"#,
|
||||
)
|
||||
.bind(uid)
|
||||
.bind(params.folder)
|
||||
.bind(entry_type)
|
||||
.bind(params.name)
|
||||
.bind(params.notes)
|
||||
.bind(params.tags)
|
||||
.bind(&metadata)
|
||||
.fetch_one(&mut *tx)
|
||||
.await?
|
||||
} else {
|
||||
sqlx::query_scalar(
|
||||
r#"INSERT INTO entries (folder, type, name, notes, tags, metadata, version, updated_at)
|
||||
VALUES ($1, $2, $3, $4, $5, $6, 1, NOW())
|
||||
ON CONFLICT (folder, name) WHERE user_id IS NULL
|
||||
DO UPDATE SET
|
||||
folder = EXCLUDED.folder,
|
||||
type = EXCLUDED.type,
|
||||
notes = EXCLUDED.notes,
|
||||
tags = EXCLUDED.tags,
|
||||
metadata = EXCLUDED.metadata,
|
||||
version = entries.version + 1,
|
||||
updated_at = NOW()
|
||||
RETURNING id"#,
|
||||
)
|
||||
.bind(params.folder)
|
||||
.bind(entry_type)
|
||||
.bind(params.name)
|
||||
.bind(params.notes)
|
||||
.bind(params.tags)
|
||||
.bind(&metadata)
|
||||
.fetch_one(&mut *tx)
|
||||
.await?
|
||||
};
|
||||
|
||||
let current_entry_version: i64 =
|
||||
sqlx::query_scalar("SELECT version FROM entries WHERE id = $1")
|
||||
.bind(entry_id)
|
||||
.fetch_one(&mut *tx)
|
||||
.await?;
|
||||
|
||||
if existing.is_some() {
|
||||
#[derive(sqlx::FromRow)]
|
||||
struct ExistingField {
|
||||
id: Uuid,
|
||||
name: String,
|
||||
encrypted: Vec<u8>,
|
||||
}
|
||||
let existing_fields: Vec<ExistingField> = sqlx::query_as(
|
||||
"SELECT s.id, s.name, s.encrypted \
|
||||
FROM entry_secrets es \
|
||||
JOIN secrets s ON s.id = es.secret_id \
|
||||
WHERE es.entry_id = $1",
|
||||
)
|
||||
.bind(entry_id)
|
||||
.fetch_all(&mut *tx)
|
||||
.await?;
|
||||
|
||||
for f in &existing_fields {
|
||||
if let Err(e) = db::snapshot_secret_history(
|
||||
&mut tx,
|
||||
db::SecretSnapshotParams {
|
||||
secret_id: f.id,
|
||||
name: &f.name,
|
||||
encrypted: &f.encrypted,
|
||||
action: "add",
|
||||
},
|
||||
)
|
||||
.await
|
||||
{
|
||||
tracing::warn!(error = %e, "failed to snapshot secret field history");
|
||||
}
|
||||
}
|
||||
|
||||
let orphan_candidates: Vec<Uuid> = existing_fields.iter().map(|f| f.id).collect();
|
||||
|
||||
sqlx::query("DELETE FROM entry_secrets WHERE entry_id = $1")
|
||||
.bind(entry_id)
|
||||
.execute(&mut *tx)
|
||||
.await?;
|
||||
|
||||
if !orphan_candidates.is_empty() {
|
||||
sqlx::query(
|
||||
"DELETE FROM secrets s \
|
||||
WHERE s.id = ANY($1) \
|
||||
AND NOT EXISTS (SELECT 1 FROM entry_secrets es WHERE es.secret_id = s.id)",
|
||||
)
|
||||
.bind(&orphan_candidates)
|
||||
.execute(&mut *tx)
|
||||
.await?;
|
||||
}
|
||||
}
|
||||
|
||||
for (field_name, field_value) in &flat_fields {
|
||||
let encrypted = crypto::encrypt_json(master_key, field_value)?;
|
||||
let secret_type = params
|
||||
.secret_types
|
||||
.get(field_name)
|
||||
.map(|s| s.as_str())
|
||||
.unwrap_or("text");
|
||||
let secret_id: Uuid = sqlx::query_scalar(
|
||||
"INSERT INTO secrets (user_id, name, type, encrypted) VALUES ($1, $2, $3, $4) RETURNING id",
|
||||
)
|
||||
.bind(params.user_id)
|
||||
.bind(field_name)
|
||||
.bind(secret_type)
|
||||
.bind(&encrypted)
|
||||
.fetch_one(&mut *tx)
|
||||
.await
|
||||
.map_err(|e| AppError::from_db_error(e, DbErrorContext::secret_name(field_name)))?;
|
||||
sqlx::query("INSERT INTO entry_secrets (entry_id, secret_id) VALUES ($1, $2)")
|
||||
.bind(entry_id)
|
||||
.bind(secret_id)
|
||||
.execute(&mut *tx)
|
||||
.await?;
|
||||
}
|
||||
|
||||
for link_name in &link_secret_names {
|
||||
let secret_ids: Vec<Uuid> = if let Some(uid) = params.user_id {
|
||||
sqlx::query_scalar("SELECT id FROM secrets WHERE user_id = $1 AND name = $2")
|
||||
.bind(uid)
|
||||
.bind(link_name)
|
||||
.fetch_all(&mut *tx)
|
||||
.await?
|
||||
} else {
|
||||
sqlx::query_scalar("SELECT id FROM secrets WHERE user_id IS NULL AND name = $1")
|
||||
.bind(link_name)
|
||||
.fetch_all(&mut *tx)
|
||||
.await?
|
||||
};
|
||||
|
||||
match secret_ids.len() {
|
||||
0 => anyhow::bail!("Not found: secret named '{}'", link_name),
|
||||
1 => {
|
||||
sqlx::query(
|
||||
"INSERT INTO entry_secrets (entry_id, secret_id) VALUES ($1, $2) ON CONFLICT DO NOTHING",
|
||||
)
|
||||
.bind(entry_id)
|
||||
.bind(secret_ids[0])
|
||||
.execute(&mut *tx)
|
||||
.await?;
|
||||
}
|
||||
n => anyhow::bail!(
|
||||
"Ambiguous: {} secrets named '{}' found. Please deduplicate names first.",
|
||||
n,
|
||||
link_name
|
||||
),
|
||||
}
|
||||
}
|
||||
|
||||
if existing.is_none() {
|
||||
let history_metadata =
|
||||
match db::metadata_with_secret_snapshot(&mut tx, entry_id, &metadata).await {
|
||||
Ok(v) => v,
|
||||
Err(e) => {
|
||||
tracing::warn!(error = %e, "failed to build secret snapshot for entry history");
|
||||
metadata.clone()
|
||||
}
|
||||
};
|
||||
if let Err(e) = db::snapshot_entry_history(
|
||||
&mut tx,
|
||||
db::EntrySnapshotParams {
|
||||
entry_id,
|
||||
user_id: params.user_id,
|
||||
folder: params.folder,
|
||||
entry_type,
|
||||
name: params.name,
|
||||
version: current_entry_version,
|
||||
action: "create",
|
||||
tags: params.tags,
|
||||
metadata: &history_metadata,
|
||||
},
|
||||
)
|
||||
.await
|
||||
{
|
||||
tracing::warn!(error = %e, "failed to snapshot entry history on create");
|
||||
}
|
||||
}
|
||||
|
||||
crate::audit::log_tx(
|
||||
&mut tx,
|
||||
params.user_id,
|
||||
"add",
|
||||
params.folder,
|
||||
entry_type,
|
||||
params.name,
|
||||
serde_json::json!({
|
||||
"tags": params.tags,
|
||||
"meta_keys": meta_keys,
|
||||
"secret_keys": secret_keys,
|
||||
}),
|
||||
)
|
||||
.await;
|
||||
|
||||
tx.commit().await?;
|
||||
|
||||
Ok(AddResult {
|
||||
name: params.name.to_string(),
|
||||
folder: params.folder.to_string(),
|
||||
entry_type: entry_type.to_string(),
|
||||
tags: params.tags.to_vec(),
|
||||
meta_keys,
|
||||
secret_keys,
|
||||
})
|
||||
}
|
||||
|
||||
fn validate_link_secret_names(
|
||||
link_secret_names: &[String],
|
||||
new_secret_names: &BTreeSet<String>,
|
||||
) -> Result<Vec<String>> {
|
||||
let mut deduped = Vec::new();
|
||||
let mut seen = HashSet::new();
|
||||
|
||||
for raw in link_secret_names {
|
||||
let trimmed = raw.trim();
|
||||
if trimmed.is_empty() {
|
||||
anyhow::bail!("link_secret_names contains an empty name");
|
||||
}
|
||||
if new_secret_names.contains(trimmed) {
|
||||
anyhow::bail!(
|
||||
"Conflict: secret '{}' is provided both in secrets/secrets_obj and link_secret_names",
|
||||
trimmed
|
||||
);
|
||||
}
|
||||
if seen.insert(trimmed.to_string()) {
|
||||
deduped.push(trimmed.to_string());
|
||||
}
|
||||
}
|
||||
|
||||
Ok(deduped)
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use sqlx::PgPool;
|
||||
use std::collections::BTreeSet;
|
||||
|
||||
#[test]
|
||||
fn parse_nested_file_shorthand() {
|
||||
use std::io::Write;
|
||||
let mut f = tempfile::NamedTempFile::new().unwrap();
|
||||
writeln!(f, "line1\nline2").unwrap();
|
||||
let path = f.path().to_str().unwrap().to_string();
|
||||
let entry = format!("credentials:content@{}", path);
|
||||
let (path_parts, value) = parse_kv(&entry).unwrap();
|
||||
assert_eq!(key_path_to_string(&path_parts), "credentials:content");
|
||||
assert!(matches!(value, Value::String(_)));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn flatten_json_fields_nested() {
|
||||
let v = serde_json::json!({
|
||||
"username": "root",
|
||||
"credentials": {
|
||||
"type": "ssh",
|
||||
"content": "pem"
|
||||
}
|
||||
});
|
||||
let mut fields = flatten_json_fields("", &v);
|
||||
fields.sort_by(|a, b| a.0.cmp(&b.0));
|
||||
assert_eq!(fields[0].0, "credentials.content");
|
||||
assert_eq!(fields[1].0, "credentials.type");
|
||||
assert_eq!(fields[2].0, "username");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn validate_link_secret_names_conflict_with_new_secret() {
|
||||
let mut new_names = BTreeSet::new();
|
||||
new_names.insert("password".to_string());
|
||||
let err = validate_link_secret_names(&[String::from("password")], &new_names)
|
||||
.expect_err("must fail on overlap");
|
||||
assert!(
|
||||
err.to_string()
|
||||
.contains("provided both in secrets/secrets_obj and link_secret_names")
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn validate_link_secret_names_dedup_and_trim() {
|
||||
let names = vec![
|
||||
" shared_key ".to_string(),
|
||||
"shared_key".to_string(),
|
||||
"runner_token".to_string(),
|
||||
];
|
||||
let deduped = validate_link_secret_names(&names, &BTreeSet::new()).unwrap();
|
||||
assert_eq!(deduped, vec!["shared_key", "runner_token"]);
|
||||
}
|
||||
|
||||
async fn maybe_test_pool() -> Option<PgPool> {
|
||||
let Ok(url) = std::env::var("SECRETS_DATABASE_URL") else {
|
||||
eprintln!("skip add linkage tests: SECRETS_DATABASE_URL is not set");
|
||||
return None;
|
||||
};
|
||||
let Ok(pool) = PgPool::connect(&url).await else {
|
||||
eprintln!("skip add linkage tests: cannot connect to database");
|
||||
return None;
|
||||
};
|
||||
if let Err(e) = crate::db::migrate(&pool).await {
|
||||
eprintln!("skip add linkage tests: migrate failed: {e}");
|
||||
return None;
|
||||
}
|
||||
Some(pool)
|
||||
}
|
||||
|
||||
async fn cleanup_test_rows(pool: &PgPool, marker: &str) -> Result<()> {
|
||||
sqlx::query(
|
||||
"DELETE FROM entries WHERE user_id IS NULL AND (name LIKE $1 OR folder LIKE $1)",
|
||||
)
|
||||
.bind(format!("%{marker}%"))
|
||||
.execute(pool)
|
||||
.await?;
|
||||
sqlx::query(
|
||||
"DELETE FROM secrets WHERE user_id IS NULL AND name LIKE $1 \
|
||||
AND NOT EXISTS (SELECT 1 FROM entry_secrets es WHERE es.secret_id = secrets.id)",
|
||||
)
|
||||
.bind(format!("%{marker}%"))
|
||||
.execute(pool)
|
||||
.await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn add_links_existing_secret_by_unique_name() -> Result<()> {
|
||||
let Some(pool) = maybe_test_pool().await else {
|
||||
return Ok(());
|
||||
};
|
||||
let suffix = Uuid::from_u128(rand::random()).to_string();
|
||||
let marker = format!("link_unique_{}", &suffix[..8]);
|
||||
let secret_name = format!("{}_secret", marker);
|
||||
let entry_name = format!("{}_entry", marker);
|
||||
|
||||
cleanup_test_rows(&pool, &marker).await?;
|
||||
|
||||
let secret_id: Uuid = sqlx::query_scalar(
|
||||
"INSERT INTO secrets (user_id, name, type, encrypted) VALUES (NULL, $1, 'text', $2) RETURNING id",
|
||||
)
|
||||
.bind(&secret_name)
|
||||
.bind(vec![1_u8, 2, 3])
|
||||
.fetch_one(&pool)
|
||||
.await?;
|
||||
|
||||
run(
|
||||
&pool,
|
||||
AddParams {
|
||||
name: &entry_name,
|
||||
folder: &marker,
|
||||
entry_type: "service",
|
||||
notes: "",
|
||||
tags: &[],
|
||||
meta_entries: &[],
|
||||
secret_entries: &[],
|
||||
secret_types: &Default::default(),
|
||||
link_secret_names: std::slice::from_ref(&secret_name),
|
||||
user_id: None,
|
||||
},
|
||||
&[0_u8; 32],
|
||||
)
|
||||
.await?;
|
||||
|
||||
let linked: bool = sqlx::query_scalar(
|
||||
"SELECT EXISTS( \
|
||||
SELECT 1 FROM entry_secrets es \
|
||||
JOIN entries e ON e.id = es.entry_id \
|
||||
WHERE e.user_id IS NULL AND e.name = $1 AND es.secret_id = $2 \
|
||||
)",
|
||||
)
|
||||
.bind(&entry_name)
|
||||
.bind(secret_id)
|
||||
.fetch_one(&pool)
|
||||
.await?;
|
||||
assert!(linked);
|
||||
|
||||
cleanup_test_rows(&pool, &marker).await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn add_link_secret_name_not_found_fails() -> Result<()> {
|
||||
let Some(pool) = maybe_test_pool().await else {
|
||||
return Ok(());
|
||||
};
|
||||
let suffix = Uuid::from_u128(rand::random()).to_string();
|
||||
let marker = format!("link_missing_{}", &suffix[..8]);
|
||||
let secret_name = format!("{}_secret", marker);
|
||||
let entry_name = format!("{}_entry", marker);
|
||||
|
||||
cleanup_test_rows(&pool, &marker).await?;
|
||||
|
||||
let err = run(
|
||||
&pool,
|
||||
AddParams {
|
||||
name: &entry_name,
|
||||
folder: &marker,
|
||||
entry_type: "service",
|
||||
notes: "",
|
||||
tags: &[],
|
||||
meta_entries: &[],
|
||||
secret_entries: &[],
|
||||
secret_types: &Default::default(),
|
||||
link_secret_names: std::slice::from_ref(&secret_name),
|
||||
user_id: None,
|
||||
},
|
||||
&[0_u8; 32],
|
||||
)
|
||||
.await
|
||||
.expect_err("must fail when linked secret is not found");
|
||||
assert!(err.to_string().contains("Not found: secret named"));
|
||||
|
||||
cleanup_test_rows(&pool, &marker).await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn add_link_secret_name_ambiguous_fails() -> Result<()> {
|
||||
let Some(pool) = maybe_test_pool().await else {
|
||||
return Ok(());
|
||||
};
|
||||
let suffix = Uuid::from_u128(rand::random()).to_string();
|
||||
let marker = format!("link_amb_{}", &suffix[..8]);
|
||||
let secret_name = format!("{}_dup_secret", marker);
|
||||
let entry_name = format!("{}_entry", marker);
|
||||
|
||||
cleanup_test_rows(&pool, &marker).await?;
|
||||
|
||||
sqlx::query(
|
||||
"INSERT INTO secrets (user_id, name, type, encrypted) VALUES (NULL, $1, 'text', $2)",
|
||||
)
|
||||
.bind(&secret_name)
|
||||
.bind(vec![1_u8])
|
||||
.execute(&pool)
|
||||
.await?;
|
||||
sqlx::query(
|
||||
"INSERT INTO secrets (user_id, name, type, encrypted) VALUES (NULL, $1, 'text', $2)",
|
||||
)
|
||||
.bind(&secret_name)
|
||||
.bind(vec![2_u8])
|
||||
.execute(&pool)
|
||||
.await?;
|
||||
|
||||
let err = run(
|
||||
&pool,
|
||||
AddParams {
|
||||
name: &entry_name,
|
||||
folder: &marker,
|
||||
entry_type: "service",
|
||||
notes: "",
|
||||
tags: &[],
|
||||
meta_entries: &[],
|
||||
secret_entries: &[],
|
||||
secret_types: &Default::default(),
|
||||
link_secret_names: std::slice::from_ref(&secret_name),
|
||||
user_id: None,
|
||||
},
|
||||
&[0_u8; 32],
|
||||
)
|
||||
.await
|
||||
.expect_err("must fail on ambiguous linked secret name");
|
||||
assert!(err.to_string().contains("Ambiguous:"));
|
||||
|
||||
cleanup_test_rows(&pool, &marker).await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn add_duplicate_secret_name_returns_conflict_error() -> Result<()> {
|
||||
let Some(pool) = maybe_test_pool().await else {
|
||||
return Ok(());
|
||||
};
|
||||
let suffix = Uuid::from_u128(rand::random()).to_string();
|
||||
let marker = format!("dup_secret_{}", &suffix[..8]);
|
||||
let entry_name = format!("{}_entry", marker);
|
||||
let secret_name = "shared_token";
|
||||
|
||||
cleanup_test_rows(&pool, &marker).await?;
|
||||
|
||||
// First add succeeds
|
||||
run(
|
||||
&pool,
|
||||
AddParams {
|
||||
name: &entry_name,
|
||||
folder: &marker,
|
||||
entry_type: "service",
|
||||
notes: "",
|
||||
tags: &[],
|
||||
meta_entries: &[],
|
||||
secret_entries: &[format!("{}=value1", secret_name)],
|
||||
secret_types: &Default::default(),
|
||||
link_secret_names: &[],
|
||||
user_id: None,
|
||||
},
|
||||
&[0_u8; 32],
|
||||
)
|
||||
.await?;
|
||||
|
||||
// Second add with same secret name under same user_id should fail with ConflictSecretName
|
||||
let entry_name2 = format!("{}_entry2", marker);
|
||||
let err = run(
|
||||
&pool,
|
||||
AddParams {
|
||||
name: &entry_name2,
|
||||
folder: &marker,
|
||||
entry_type: "service",
|
||||
notes: "",
|
||||
tags: &[],
|
||||
meta_entries: &[],
|
||||
secret_entries: &[format!("{}=value2", secret_name)],
|
||||
secret_types: &Default::default(),
|
||||
link_secret_names: &[],
|
||||
user_id: None,
|
||||
},
|
||||
&[0_u8; 32],
|
||||
)
|
||||
.await
|
||||
.expect_err("must fail on duplicate secret name");
|
||||
|
||||
let app_err = err
|
||||
.downcast_ref::<crate::error::AppError>()
|
||||
.expect("error should be AppError");
|
||||
assert!(
|
||||
matches!(app_err, crate::error::AppError::ConflictSecretName { .. }),
|
||||
"expected ConflictSecretName, got: {}",
|
||||
app_err
|
||||
);
|
||||
|
||||
cleanup_test_rows(&pool, &marker).await?;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
66
crates/secrets-core/src/service/api_key.rs
Normal file
66
crates/secrets-core/src/service/api_key.rs
Normal file
@@ -0,0 +1,66 @@
|
||||
use anyhow::Result;
|
||||
use sqlx::PgPool;
|
||||
use uuid::Uuid;
|
||||
|
||||
use crate::error::AppError;
|
||||
|
||||
const KEY_PREFIX: &str = "sk_";
|
||||
|
||||
/// Generate a new API key: `sk_<64 hex chars>` = 67 characters total.
|
||||
pub fn generate_api_key() -> String {
|
||||
use rand::RngExt;
|
||||
let mut bytes = [0u8; 32];
|
||||
rand::rng().fill(&mut bytes);
|
||||
let hex: String = bytes.iter().map(|b| format!("{:02x}", b)).collect();
|
||||
format!("{}{}", KEY_PREFIX, hex)
|
||||
}
|
||||
|
||||
/// Return the user's existing API key, or generate and store a new one if NULL.
|
||||
/// Uses a transaction with atomic update to prevent TOCTOU race conditions.
|
||||
pub async fn ensure_api_key(pool: &PgPool, user_id: Uuid) -> Result<String> {
|
||||
let mut tx = pool.begin().await?;
|
||||
|
||||
// Lock the row and check existing key
|
||||
let existing: (Option<String>,) =
|
||||
sqlx::query_as("SELECT api_key FROM users WHERE id = $1 FOR UPDATE")
|
||||
.bind(user_id)
|
||||
.fetch_optional(&mut *tx)
|
||||
.await?
|
||||
.ok_or(AppError::NotFoundUser)?;
|
||||
|
||||
if let Some(key) = existing.0 {
|
||||
tx.commit().await?;
|
||||
return Ok(key);
|
||||
}
|
||||
|
||||
// Generate and store new key atomically
|
||||
let new_key = generate_api_key();
|
||||
sqlx::query("UPDATE users SET api_key = $1 WHERE id = $2")
|
||||
.bind(&new_key)
|
||||
.bind(user_id)
|
||||
.execute(&mut *tx)
|
||||
.await?;
|
||||
|
||||
tx.commit().await?;
|
||||
Ok(new_key)
|
||||
}
|
||||
|
||||
/// Generate a fresh API key for the user, replacing the old one.
|
||||
pub async fn regenerate_api_key(pool: &PgPool, user_id: Uuid) -> Result<String> {
|
||||
let new_key = generate_api_key();
|
||||
sqlx::query("UPDATE users SET api_key = $1 WHERE id = $2")
|
||||
.bind(&new_key)
|
||||
.bind(user_id)
|
||||
.execute(pool)
|
||||
.await?;
|
||||
Ok(new_key)
|
||||
}
|
||||
|
||||
/// Validate a Bearer token. Returns the `user_id` if the key matches.
|
||||
pub async fn validate_api_key(pool: &PgPool, raw_key: &str) -> Result<Option<Uuid>> {
|
||||
let row: Option<(Uuid,)> = sqlx::query_as("SELECT id FROM users WHERE api_key = $1")
|
||||
.bind(raw_key)
|
||||
.fetch_optional(pool)
|
||||
.await?;
|
||||
Ok(row.map(|(id,)| id))
|
||||
}
|
||||
39
crates/secrets-core/src/service/audit_log.rs
Normal file
39
crates/secrets-core/src/service/audit_log.rs
Normal file
@@ -0,0 +1,39 @@
|
||||
use anyhow::Result;
|
||||
use sqlx::PgPool;
|
||||
use uuid::Uuid;
|
||||
|
||||
use crate::models::AuditLogEntry;
|
||||
|
||||
pub async fn list_for_user(
|
||||
pool: &PgPool,
|
||||
user_id: Uuid,
|
||||
limit: i64,
|
||||
offset: i64,
|
||||
) -> Result<Vec<AuditLogEntry>> {
|
||||
let limit = limit.clamp(1, 200);
|
||||
let offset = offset.max(0);
|
||||
|
||||
let rows = sqlx::query_as(
|
||||
"SELECT id, user_id, action, folder, type, name, detail, created_at \
|
||||
FROM audit_log \
|
||||
WHERE user_id = $1 \
|
||||
ORDER BY created_at DESC, id DESC \
|
||||
LIMIT $2 OFFSET $3",
|
||||
)
|
||||
.bind(user_id)
|
||||
.bind(limit)
|
||||
.bind(offset)
|
||||
.fetch_all(pool)
|
||||
.await?;
|
||||
|
||||
Ok(rows)
|
||||
}
|
||||
|
||||
pub async fn count_for_user(pool: &PgPool, user_id: Uuid) -> Result<i64> {
|
||||
let count: i64 =
|
||||
sqlx::query_scalar("SELECT COUNT(*)::bigint FROM audit_log WHERE user_id = $1")
|
||||
.bind(user_id)
|
||||
.fetch_one(pool)
|
||||
.await?;
|
||||
Ok(count)
|
||||
}
|
||||
670
crates/secrets-core/src/service/delete.rs
Normal file
670
crates/secrets-core/src/service/delete.rs
Normal file
@@ -0,0 +1,670 @@
|
||||
use anyhow::Result;
|
||||
use serde_json::json;
|
||||
use sqlx::PgPool;
|
||||
use uuid::Uuid;
|
||||
|
||||
use crate::db;
|
||||
use crate::models::{EntryRow, EntryWriteRow, SecretFieldRow};
|
||||
|
||||
#[derive(Debug, serde::Serialize)]
|
||||
pub struct DeletedEntry {
|
||||
pub name: String,
|
||||
pub folder: String,
|
||||
#[serde(rename = "type")]
|
||||
pub entry_type: String,
|
||||
}
|
||||
|
||||
#[derive(Debug, serde::Serialize)]
|
||||
pub struct DeleteResult {
|
||||
pub deleted: Vec<DeletedEntry>,
|
||||
pub dry_run: bool,
|
||||
}
|
||||
|
||||
pub struct DeleteParams<'a> {
|
||||
/// If set, delete a single entry by name.
|
||||
pub name: Option<&'a str>,
|
||||
/// Folder filter for bulk delete.
|
||||
pub folder: Option<&'a str>,
|
||||
/// Type filter for bulk delete.
|
||||
pub entry_type: Option<&'a str>,
|
||||
pub dry_run: bool,
|
||||
pub user_id: Option<Uuid>,
|
||||
}
|
||||
|
||||
/// Maximum number of entries that can be deleted in a single bulk operation.
|
||||
/// Prevents accidental mass deletion when filters are too broad.
|
||||
pub const MAX_BULK_DELETE: usize = 1000;
|
||||
|
||||
/// Delete a single entry by id (multi-tenant: `user_id` must match).
|
||||
pub async fn delete_by_id(pool: &PgPool, entry_id: Uuid, user_id: Uuid) -> Result<DeleteResult> {
|
||||
let mut tx = pool.begin().await?;
|
||||
let row: Option<EntryWriteRow> = sqlx::query_as(
|
||||
"SELECT id, version, folder, type, name, tags, metadata, notes FROM entries \
|
||||
WHERE id = $1 AND user_id = $2 FOR UPDATE",
|
||||
)
|
||||
.bind(entry_id)
|
||||
.bind(user_id)
|
||||
.fetch_optional(&mut *tx)
|
||||
.await?;
|
||||
|
||||
let row = match row {
|
||||
Some(r) => r,
|
||||
None => {
|
||||
tx.rollback().await?;
|
||||
anyhow::bail!("Entry not found");
|
||||
}
|
||||
};
|
||||
|
||||
let folder = row.folder.clone();
|
||||
let entry_type = row.entry_type.clone();
|
||||
let name = row.name.clone();
|
||||
let entry_row: EntryRow = (&row).into();
|
||||
|
||||
snapshot_and_delete(
|
||||
&mut tx,
|
||||
&folder,
|
||||
&entry_type,
|
||||
&name,
|
||||
&entry_row,
|
||||
Some(user_id),
|
||||
)
|
||||
.await?;
|
||||
crate::audit::log_tx(
|
||||
&mut tx,
|
||||
Some(user_id),
|
||||
"delete",
|
||||
&folder,
|
||||
&entry_type,
|
||||
&name,
|
||||
json!({ "source": "web", "entry_id": entry_id }),
|
||||
)
|
||||
.await;
|
||||
tx.commit().await?;
|
||||
|
||||
Ok(DeleteResult {
|
||||
deleted: vec![DeletedEntry {
|
||||
name,
|
||||
folder,
|
||||
entry_type,
|
||||
}],
|
||||
dry_run: false,
|
||||
})
|
||||
}
|
||||
|
||||
pub async fn run(pool: &PgPool, params: DeleteParams<'_>) -> Result<DeleteResult> {
|
||||
match params.name {
|
||||
Some(name) => delete_one(pool, name, params.folder, params.dry_run, params.user_id).await,
|
||||
None => {
|
||||
if params.folder.is_none() && params.entry_type.is_none() {
|
||||
anyhow::bail!(
|
||||
"Bulk delete requires at least one of: name, folder, or type filter."
|
||||
);
|
||||
}
|
||||
delete_bulk(
|
||||
pool,
|
||||
params.folder,
|
||||
params.entry_type,
|
||||
params.dry_run,
|
||||
params.user_id,
|
||||
)
|
||||
.await
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async fn delete_one(
|
||||
pool: &PgPool,
|
||||
name: &str,
|
||||
folder: Option<&str>,
|
||||
dry_run: bool,
|
||||
user_id: Option<Uuid>,
|
||||
) -> Result<DeleteResult> {
|
||||
if dry_run {
|
||||
// Dry-run uses the same disambiguation logic as actual delete:
|
||||
// - 0 matches → nothing to delete
|
||||
// - 1 match → show what would be deleted (with correct folder/type)
|
||||
// - 2+ matches → disambiguation error (same as non-dry-run)
|
||||
#[derive(sqlx::FromRow)]
|
||||
struct DryRunRow {
|
||||
#[allow(dead_code)]
|
||||
id: Uuid,
|
||||
folder: String,
|
||||
#[sqlx(rename = "type")]
|
||||
entry_type: String,
|
||||
}
|
||||
|
||||
let rows: Vec<DryRunRow> = if let Some(uid) = user_id {
|
||||
if let Some(f) = folder {
|
||||
sqlx::query_as(
|
||||
"SELECT id, folder, type FROM entries WHERE user_id = $1 AND folder = $2 AND name = $3",
|
||||
)
|
||||
.bind(uid)
|
||||
.bind(f)
|
||||
.bind(name)
|
||||
.fetch_all(pool)
|
||||
.await?
|
||||
} else {
|
||||
sqlx::query_as(
|
||||
"SELECT id, folder, type FROM entries WHERE user_id = $1 AND name = $2",
|
||||
)
|
||||
.bind(uid)
|
||||
.bind(name)
|
||||
.fetch_all(pool)
|
||||
.await?
|
||||
}
|
||||
} else if let Some(f) = folder {
|
||||
sqlx::query_as(
|
||||
"SELECT id, folder, type FROM entries WHERE user_id IS NULL AND folder = $1 AND name = $2",
|
||||
)
|
||||
.bind(f)
|
||||
.bind(name)
|
||||
.fetch_all(pool)
|
||||
.await?
|
||||
} else {
|
||||
sqlx::query_as(
|
||||
"SELECT id, folder, type FROM entries WHERE user_id IS NULL AND name = $1",
|
||||
)
|
||||
.bind(name)
|
||||
.fetch_all(pool)
|
||||
.await?
|
||||
};
|
||||
|
||||
return match rows.len() {
|
||||
0 => Ok(DeleteResult {
|
||||
deleted: vec![],
|
||||
dry_run: true,
|
||||
}),
|
||||
1 => {
|
||||
let row = rows.into_iter().next().unwrap();
|
||||
Ok(DeleteResult {
|
||||
deleted: vec![DeletedEntry {
|
||||
name: name.to_string(),
|
||||
folder: row.folder,
|
||||
entry_type: row.entry_type,
|
||||
}],
|
||||
dry_run: true,
|
||||
})
|
||||
}
|
||||
_ => {
|
||||
let folders: Vec<&str> = rows.iter().map(|r| r.folder.as_str()).collect();
|
||||
anyhow::bail!(
|
||||
"Ambiguous: {} entries named '{}' found in folders: [{}]. \
|
||||
Specify 'folder' to disambiguate.",
|
||||
rows.len(),
|
||||
name,
|
||||
folders.join(", ")
|
||||
)
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
let mut tx = pool.begin().await?;
|
||||
|
||||
// Fetch matching rows with FOR UPDATE; use folder when provided to resolve ambiguity.
|
||||
let rows: Vec<EntryRow> = if let Some(uid) = user_id {
|
||||
if let Some(f) = folder {
|
||||
sqlx::query_as(
|
||||
"SELECT id, version, folder, type, tags, metadata, notes FROM entries \
|
||||
WHERE user_id = $1 AND folder = $2 AND name = $3 FOR UPDATE",
|
||||
)
|
||||
.bind(uid)
|
||||
.bind(f)
|
||||
.bind(name)
|
||||
.fetch_all(&mut *tx)
|
||||
.await?
|
||||
} else {
|
||||
sqlx::query_as(
|
||||
"SELECT id, version, folder, type, tags, metadata, notes FROM entries \
|
||||
WHERE user_id = $1 AND name = $2 FOR UPDATE",
|
||||
)
|
||||
.bind(uid)
|
||||
.bind(name)
|
||||
.fetch_all(&mut *tx)
|
||||
.await?
|
||||
}
|
||||
} else if let Some(f) = folder {
|
||||
sqlx::query_as(
|
||||
"SELECT id, version, folder, type, tags, metadata, notes FROM entries \
|
||||
WHERE user_id IS NULL AND folder = $1 AND name = $2 FOR UPDATE",
|
||||
)
|
||||
.bind(f)
|
||||
.bind(name)
|
||||
.fetch_all(&mut *tx)
|
||||
.await?
|
||||
} else {
|
||||
sqlx::query_as(
|
||||
"SELECT id, version, folder, type, tags, metadata, notes FROM entries \
|
||||
WHERE user_id IS NULL AND name = $1 FOR UPDATE",
|
||||
)
|
||||
.bind(name)
|
||||
.fetch_all(&mut *tx)
|
||||
.await?
|
||||
};
|
||||
|
||||
let row = match rows.len() {
|
||||
0 => {
|
||||
tx.rollback().await?;
|
||||
return Ok(DeleteResult {
|
||||
deleted: vec![],
|
||||
dry_run: false,
|
||||
});
|
||||
}
|
||||
1 => rows.into_iter().next().unwrap(),
|
||||
_ => {
|
||||
tx.rollback().await?;
|
||||
let folders: Vec<&str> = rows.iter().map(|r| r.folder.as_str()).collect();
|
||||
anyhow::bail!(
|
||||
"Ambiguous: {} entries named '{}' found in folders: [{}]. \
|
||||
Specify 'folder' to disambiguate.",
|
||||
rows.len(),
|
||||
name,
|
||||
folders.join(", ")
|
||||
)
|
||||
}
|
||||
};
|
||||
|
||||
let folder = row.folder.clone();
|
||||
let entry_type = row.entry_type.clone();
|
||||
snapshot_and_delete(&mut tx, &folder, &entry_type, name, &row, user_id).await?;
|
||||
crate::audit::log_tx(
|
||||
&mut tx,
|
||||
user_id,
|
||||
"delete",
|
||||
&folder,
|
||||
&entry_type,
|
||||
name,
|
||||
json!({}),
|
||||
)
|
||||
.await;
|
||||
tx.commit().await?;
|
||||
|
||||
Ok(DeleteResult {
|
||||
deleted: vec![DeletedEntry {
|
||||
name: name.to_string(),
|
||||
folder,
|
||||
entry_type,
|
||||
}],
|
||||
dry_run: false,
|
||||
})
|
||||
}
|
||||
|
||||
async fn delete_bulk(
|
||||
pool: &PgPool,
|
||||
folder: Option<&str>,
|
||||
entry_type: Option<&str>,
|
||||
dry_run: bool,
|
||||
user_id: Option<Uuid>,
|
||||
) -> Result<DeleteResult> {
|
||||
#[derive(Debug, sqlx::FromRow)]
|
||||
struct FullEntryRow {
|
||||
id: Uuid,
|
||||
version: i64,
|
||||
folder: String,
|
||||
#[sqlx(rename = "type")]
|
||||
entry_type: String,
|
||||
name: String,
|
||||
metadata: serde_json::Value,
|
||||
tags: Vec<String>,
|
||||
notes: String,
|
||||
}
|
||||
|
||||
let mut conditions: Vec<String> = Vec::new();
|
||||
let mut idx: i32 = 1;
|
||||
|
||||
if user_id.is_some() {
|
||||
conditions.push(format!("user_id = ${}", idx));
|
||||
idx += 1;
|
||||
} else {
|
||||
conditions.push("user_id IS NULL".to_string());
|
||||
}
|
||||
if folder.is_some() {
|
||||
conditions.push(format!("folder = ${}", idx));
|
||||
idx += 1;
|
||||
}
|
||||
if entry_type.is_some() {
|
||||
conditions.push(format!("type = ${}", idx));
|
||||
idx += 1;
|
||||
}
|
||||
|
||||
let where_clause = format!("WHERE {}", conditions.join(" AND "));
|
||||
let _ = idx; // used only for placeholder numbering in conditions
|
||||
|
||||
if dry_run {
|
||||
let sql = format!(
|
||||
"SELECT id, version, folder, type, name, metadata, tags, notes \
|
||||
FROM entries {where_clause} ORDER BY type, name"
|
||||
);
|
||||
let mut q = sqlx::query_as::<_, FullEntryRow>(&sql);
|
||||
if let Some(uid) = user_id {
|
||||
q = q.bind(uid);
|
||||
}
|
||||
if let Some(f) = folder {
|
||||
q = q.bind(f);
|
||||
}
|
||||
if let Some(t) = entry_type {
|
||||
q = q.bind(t);
|
||||
}
|
||||
let rows = q.fetch_all(pool).await?;
|
||||
|
||||
let deleted = rows
|
||||
.iter()
|
||||
.map(|r| DeletedEntry {
|
||||
name: r.name.clone(),
|
||||
folder: r.folder.clone(),
|
||||
entry_type: r.entry_type.clone(),
|
||||
})
|
||||
.collect();
|
||||
return Ok(DeleteResult {
|
||||
deleted,
|
||||
dry_run: true,
|
||||
});
|
||||
}
|
||||
|
||||
let mut tx = pool.begin().await?;
|
||||
|
||||
let sql = format!(
|
||||
"SELECT id, version, folder, type, name, metadata, tags, notes \
|
||||
FROM entries {where_clause} ORDER BY type, name FOR UPDATE"
|
||||
);
|
||||
let mut q = sqlx::query_as::<_, FullEntryRow>(&sql);
|
||||
if let Some(uid) = user_id {
|
||||
q = q.bind(uid);
|
||||
}
|
||||
if let Some(f) = folder {
|
||||
q = q.bind(f);
|
||||
}
|
||||
if let Some(t) = entry_type {
|
||||
q = q.bind(t);
|
||||
}
|
||||
let rows = q.fetch_all(&mut *tx).await?;
|
||||
|
||||
if rows.len() > MAX_BULK_DELETE {
|
||||
tx.rollback().await?;
|
||||
anyhow::bail!(
|
||||
"Bulk delete would affect {} entries (limit: {}). \
|
||||
Narrow your filters or delete entries individually.",
|
||||
rows.len(),
|
||||
MAX_BULK_DELETE,
|
||||
);
|
||||
}
|
||||
|
||||
let mut deleted = Vec::with_capacity(rows.len());
|
||||
for row in &rows {
|
||||
let entry_row: EntryRow = EntryRow {
|
||||
id: row.id,
|
||||
version: row.version,
|
||||
folder: row.folder.clone(),
|
||||
entry_type: row.entry_type.clone(),
|
||||
tags: row.tags.clone(),
|
||||
metadata: row.metadata.clone(),
|
||||
notes: row.notes.clone(),
|
||||
};
|
||||
snapshot_and_delete(
|
||||
&mut tx,
|
||||
&row.folder,
|
||||
&row.entry_type,
|
||||
&row.name,
|
||||
&entry_row,
|
||||
user_id,
|
||||
)
|
||||
.await?;
|
||||
crate::audit::log_tx(
|
||||
&mut tx,
|
||||
user_id,
|
||||
"delete",
|
||||
&row.folder,
|
||||
&row.entry_type,
|
||||
&row.name,
|
||||
json!({"bulk": true}),
|
||||
)
|
||||
.await;
|
||||
deleted.push(DeletedEntry {
|
||||
name: row.name.clone(),
|
||||
folder: row.folder.clone(),
|
||||
entry_type: row.entry_type.clone(),
|
||||
});
|
||||
}
|
||||
|
||||
tx.commit().await?;
|
||||
|
||||
Ok(DeleteResult {
|
||||
deleted,
|
||||
dry_run: false,
|
||||
})
|
||||
}
|
||||
|
||||
async fn snapshot_and_delete(
|
||||
tx: &mut sqlx::Transaction<'_, sqlx::Postgres>,
|
||||
folder: &str,
|
||||
entry_type: &str,
|
||||
name: &str,
|
||||
row: &EntryRow,
|
||||
user_id: Option<Uuid>,
|
||||
) -> Result<()> {
|
||||
let history_metadata = match db::metadata_with_secret_snapshot(tx, row.id, &row.metadata).await
|
||||
{
|
||||
Ok(v) => v,
|
||||
Err(e) => {
|
||||
tracing::warn!(error = %e, "failed to build secret snapshot for entry history");
|
||||
row.metadata.clone()
|
||||
}
|
||||
};
|
||||
|
||||
if let Err(e) = db::snapshot_entry_history(
|
||||
tx,
|
||||
db::EntrySnapshotParams {
|
||||
entry_id: row.id,
|
||||
user_id,
|
||||
folder,
|
||||
entry_type,
|
||||
name,
|
||||
version: row.version,
|
||||
action: "delete",
|
||||
tags: &row.tags,
|
||||
metadata: &history_metadata,
|
||||
},
|
||||
)
|
||||
.await
|
||||
{
|
||||
tracing::warn!(error = %e, "failed to snapshot entry history before delete");
|
||||
}
|
||||
|
||||
let fields: Vec<SecretFieldRow> = sqlx::query_as(
|
||||
"SELECT s.id, s.name, s.encrypted \
|
||||
FROM entry_secrets es \
|
||||
JOIN secrets s ON s.id = es.secret_id \
|
||||
WHERE es.entry_id = $1",
|
||||
)
|
||||
.bind(row.id)
|
||||
.fetch_all(&mut **tx)
|
||||
.await?;
|
||||
|
||||
for f in &fields {
|
||||
if let Err(e) = db::snapshot_secret_history(
|
||||
tx,
|
||||
db::SecretSnapshotParams {
|
||||
secret_id: f.id,
|
||||
name: &f.name,
|
||||
encrypted: &f.encrypted,
|
||||
action: "delete",
|
||||
},
|
||||
)
|
||||
.await
|
||||
{
|
||||
tracing::warn!(error = %e, "failed to snapshot secret history before delete");
|
||||
}
|
||||
}
|
||||
|
||||
sqlx::query("DELETE FROM entries WHERE id = $1")
|
||||
.bind(row.id)
|
||||
.execute(&mut **tx)
|
||||
.await?;
|
||||
|
||||
let secret_ids: Vec<Uuid> = fields.iter().map(|f| f.id).collect();
|
||||
if !secret_ids.is_empty() {
|
||||
sqlx::query(
|
||||
"DELETE FROM secrets s \
|
||||
WHERE s.id = ANY($1) \
|
||||
AND NOT EXISTS (SELECT 1 FROM entry_secrets es WHERE es.secret_id = s.id)",
|
||||
)
|
||||
.bind(&secret_ids)
|
||||
.execute(&mut **tx)
|
||||
.await?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use sqlx::PgPool;
|
||||
|
||||
async fn maybe_test_pool() -> Option<PgPool> {
|
||||
let Ok(url) = std::env::var("SECRETS_DATABASE_URL") else {
|
||||
eprintln!("skip delete tests: SECRETS_DATABASE_URL is not set");
|
||||
return None;
|
||||
};
|
||||
let Ok(pool) = PgPool::connect(&url).await else {
|
||||
eprintln!("skip delete tests: cannot connect to database");
|
||||
return None;
|
||||
};
|
||||
if let Err(e) = crate::db::migrate(&pool).await {
|
||||
eprintln!("skip delete tests: migrate failed: {e}");
|
||||
return None;
|
||||
}
|
||||
Some(pool)
|
||||
}
|
||||
|
||||
async fn cleanup_single_user_rows(pool: &PgPool, marker: &str) -> Result<()> {
|
||||
sqlx::query(
|
||||
"DELETE FROM entries WHERE user_id IS NULL AND (name LIKE $1 OR folder LIKE $1)",
|
||||
)
|
||||
.bind(format!("%{marker}%"))
|
||||
.execute(pool)
|
||||
.await?;
|
||||
sqlx::query(
|
||||
"DELETE FROM secrets WHERE user_id IS NULL AND name LIKE $1 \
|
||||
AND NOT EXISTS (SELECT 1 FROM entry_secrets es WHERE es.secret_id = secrets.id)",
|
||||
)
|
||||
.bind(format!("%{marker}%"))
|
||||
.execute(pool)
|
||||
.await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn delete_dry_run_reports_matching_entry_without_writes() -> Result<()> {
|
||||
let Some(pool) = maybe_test_pool().await else {
|
||||
return Ok(());
|
||||
};
|
||||
let suffix = Uuid::from_u128(rand::random()).to_string();
|
||||
let marker = format!("delete_dry_{}", &suffix[..8]);
|
||||
let entry_name = format!("{}_entry", marker);
|
||||
|
||||
cleanup_single_user_rows(&pool, &marker).await?;
|
||||
|
||||
sqlx::query(
|
||||
"INSERT INTO entries (user_id, folder, type, name, notes, tags, metadata) \
|
||||
VALUES (NULL, $1, 'service', $2, '', '{}', '{}')",
|
||||
)
|
||||
.bind(&marker)
|
||||
.bind(&entry_name)
|
||||
.execute(&pool)
|
||||
.await?;
|
||||
|
||||
let result = run(
|
||||
&pool,
|
||||
DeleteParams {
|
||||
name: Some(&entry_name),
|
||||
folder: Some(&marker),
|
||||
entry_type: None,
|
||||
dry_run: true,
|
||||
user_id: None,
|
||||
},
|
||||
)
|
||||
.await?;
|
||||
|
||||
assert!(result.dry_run);
|
||||
assert_eq!(result.deleted.len(), 1);
|
||||
assert_eq!(result.deleted[0].name, entry_name);
|
||||
|
||||
let still_exists: bool = sqlx::query_scalar(
|
||||
"SELECT EXISTS(SELECT 1 FROM entries WHERE user_id IS NULL AND folder = $1 AND name = $2)",
|
||||
)
|
||||
.bind(&marker)
|
||||
.bind(&entry_name)
|
||||
.fetch_one(&pool)
|
||||
.await?;
|
||||
assert!(still_exists);
|
||||
|
||||
cleanup_single_user_rows(&pool, &marker).await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn delete_by_id_removes_entry_and_orphan_secret() -> Result<()> {
|
||||
let Some(pool) = maybe_test_pool().await else {
|
||||
return Ok(());
|
||||
};
|
||||
let suffix = Uuid::from_u128(rand::random()).to_string();
|
||||
let marker = format!("delete_id_{}", &suffix[..8]);
|
||||
let user_id = Uuid::from_u128(rand::random());
|
||||
let entry_name = format!("{}_entry", marker);
|
||||
let secret_name = format!("{}_secret", marker);
|
||||
|
||||
sqlx::query("DELETE FROM entries WHERE user_id = $1 AND folder = $2")
|
||||
.bind(user_id)
|
||||
.bind(&marker)
|
||||
.execute(&pool)
|
||||
.await?;
|
||||
sqlx::query("DELETE FROM secrets WHERE user_id = $1 AND name = $2")
|
||||
.bind(user_id)
|
||||
.bind(&secret_name)
|
||||
.execute(&pool)
|
||||
.await?;
|
||||
|
||||
let entry_id: Uuid = sqlx::query_scalar(
|
||||
"INSERT INTO entries (user_id, folder, type, name, notes, tags, metadata) \
|
||||
VALUES ($1, $2, 'service', $3, '', '{}', '{}') RETURNING id",
|
||||
)
|
||||
.bind(user_id)
|
||||
.bind(&marker)
|
||||
.bind(&entry_name)
|
||||
.fetch_one(&pool)
|
||||
.await?;
|
||||
let secret_id: Uuid = sqlx::query_scalar(
|
||||
"INSERT INTO secrets (user_id, name, type, encrypted) VALUES ($1, $2, 'text', $3) RETURNING id",
|
||||
)
|
||||
.bind(user_id)
|
||||
.bind(&secret_name)
|
||||
.bind(vec![1_u8, 2, 3])
|
||||
.fetch_one(&pool)
|
||||
.await?;
|
||||
sqlx::query("INSERT INTO entry_secrets (entry_id, secret_id) VALUES ($1, $2)")
|
||||
.bind(entry_id)
|
||||
.bind(secret_id)
|
||||
.execute(&pool)
|
||||
.await?;
|
||||
|
||||
let result = delete_by_id(&pool, entry_id, user_id).await?;
|
||||
assert!(!result.dry_run);
|
||||
assert_eq!(result.deleted.len(), 1);
|
||||
assert_eq!(result.deleted[0].name, entry_name);
|
||||
|
||||
let entry_exists: bool =
|
||||
sqlx::query_scalar("SELECT EXISTS(SELECT 1 FROM entries WHERE id = $1)")
|
||||
.bind(entry_id)
|
||||
.fetch_one(&pool)
|
||||
.await?;
|
||||
let secret_exists: bool =
|
||||
sqlx::query_scalar("SELECT EXISTS(SELECT 1 FROM secrets WHERE id = $1)")
|
||||
.bind(secret_id)
|
||||
.fetch_one(&pool)
|
||||
.await?;
|
||||
assert!(!entry_exists);
|
||||
assert!(!secret_exists);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
91
crates/secrets-core/src/service/env_map.rs
Normal file
91
crates/secrets-core/src/service/env_map.rs
Normal file
@@ -0,0 +1,91 @@
|
||||
use anyhow::Result;
|
||||
use serde_json::Value;
|
||||
use sqlx::PgPool;
|
||||
use std::collections::HashMap;
|
||||
use uuid::Uuid;
|
||||
|
||||
use crate::crypto;
|
||||
use crate::models::Entry;
|
||||
use crate::service::search::{fetch_entries, fetch_secrets_for_entries};
|
||||
|
||||
/// Build an env variable map from entry secrets (for dry-run preview or injection).
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
pub async fn build_env_map(
|
||||
pool: &PgPool,
|
||||
folder: Option<&str>,
|
||||
entry_type: Option<&str>,
|
||||
name: Option<&str>,
|
||||
tags: &[String],
|
||||
only_fields: &[String],
|
||||
prefix: &str,
|
||||
master_key: &[u8; 32],
|
||||
user_id: Option<Uuid>,
|
||||
) -> Result<HashMap<String, String>> {
|
||||
let entries = fetch_entries(pool, folder, entry_type, name, tags, None, user_id).await?;
|
||||
|
||||
let mut combined: HashMap<String, String> = HashMap::new();
|
||||
|
||||
for entry in &entries {
|
||||
let entry_map =
|
||||
build_entry_env_map(pool, entry, only_fields, prefix, master_key, user_id).await?;
|
||||
combined.extend(entry_map);
|
||||
}
|
||||
|
||||
Ok(combined)
|
||||
}
|
||||
|
||||
async fn build_entry_env_map(
|
||||
pool: &PgPool,
|
||||
entry: &Entry,
|
||||
only_fields: &[String],
|
||||
prefix: &str,
|
||||
master_key: &[u8; 32],
|
||||
_user_id: Option<Uuid>,
|
||||
) -> Result<HashMap<String, String>> {
|
||||
let entry_ids = vec![entry.id];
|
||||
let secrets_map = fetch_secrets_for_entries(pool, &entry_ids).await?;
|
||||
let all_fields = secrets_map.get(&entry.id).map(Vec::as_slice).unwrap_or(&[]);
|
||||
|
||||
let fields: Vec<_> = if only_fields.is_empty() {
|
||||
all_fields.iter().collect()
|
||||
} else {
|
||||
all_fields
|
||||
.iter()
|
||||
.filter(|f| only_fields.contains(&f.name))
|
||||
.collect()
|
||||
};
|
||||
|
||||
let effective_prefix = env_prefix(entry, prefix);
|
||||
let mut map = HashMap::new();
|
||||
|
||||
for f in fields {
|
||||
let decrypted = crypto::decrypt_json(master_key, &f.encrypted)?;
|
||||
let key = format!(
|
||||
"{}_{}",
|
||||
effective_prefix,
|
||||
f.name.to_uppercase().replace(['-', '.'], "_")
|
||||
);
|
||||
map.insert(key, json_to_env_string(&decrypted));
|
||||
}
|
||||
|
||||
Ok(map)
|
||||
}
|
||||
|
||||
fn env_prefix(entry: &Entry, prefix: &str) -> String {
|
||||
let name_part = entry.name.to_uppercase().replace(['-', '.', ' '], "_");
|
||||
if prefix.is_empty() {
|
||||
name_part
|
||||
} else {
|
||||
let normalized = prefix.to_uppercase().replace(['-', '.', ' '], "_");
|
||||
let normalized = normalized.trim_end_matches('_');
|
||||
format!("{}_{}", normalized, name_part)
|
||||
}
|
||||
}
|
||||
|
||||
fn json_to_env_string(v: &Value) -> String {
|
||||
match v {
|
||||
Value::String(s) => s.clone(),
|
||||
Value::Null => String::new(),
|
||||
other => other.to_string(),
|
||||
}
|
||||
}
|
||||
140
crates/secrets-core/src/service/export.rs
Normal file
140
crates/secrets-core/src/service/export.rs
Normal file
@@ -0,0 +1,140 @@
|
||||
use anyhow::Result;
|
||||
use serde_json::Value;
|
||||
use sqlx::PgPool;
|
||||
use std::collections::{BTreeMap, HashMap};
|
||||
use uuid::Uuid;
|
||||
|
||||
use crate::crypto;
|
||||
use crate::models::{ExportData, ExportEntry, ExportFormat};
|
||||
use crate::service::search::{fetch_entries, fetch_secrets_for_entries};
|
||||
|
||||
pub struct ExportParams<'a> {
|
||||
pub folder: Option<&'a str>,
|
||||
pub entry_type: Option<&'a str>,
|
||||
pub name: Option<&'a str>,
|
||||
pub tags: &'a [String],
|
||||
pub query: Option<&'a str>,
|
||||
pub no_secrets: bool,
|
||||
pub user_id: Option<Uuid>,
|
||||
}
|
||||
|
||||
pub async fn export(
|
||||
pool: &PgPool,
|
||||
params: ExportParams<'_>,
|
||||
master_key: Option<&[u8; 32]>,
|
||||
) -> Result<ExportData> {
|
||||
let entries = fetch_entries(
|
||||
pool,
|
||||
params.folder,
|
||||
params.entry_type,
|
||||
params.name,
|
||||
params.tags,
|
||||
params.query,
|
||||
params.user_id,
|
||||
)
|
||||
.await?;
|
||||
|
||||
let entry_ids: Vec<Uuid> = entries.iter().map(|e| e.id).collect();
|
||||
let secrets_map: HashMap<Uuid, Vec<_>> = if !params.no_secrets && !entry_ids.is_empty() {
|
||||
fetch_secrets_for_entries(pool, &entry_ids).await?
|
||||
} else {
|
||||
HashMap::new()
|
||||
};
|
||||
|
||||
let mut export_entries: Vec<ExportEntry> = Vec::with_capacity(entries.len());
|
||||
for entry in &entries {
|
||||
let secrets = if params.no_secrets {
|
||||
None
|
||||
} else {
|
||||
let fields = secrets_map.get(&entry.id).map(Vec::as_slice).unwrap_or(&[]);
|
||||
if fields.is_empty() {
|
||||
Some(BTreeMap::new())
|
||||
} else {
|
||||
let mk = master_key
|
||||
.ok_or_else(|| anyhow::anyhow!("master key required to decrypt secrets"))?;
|
||||
let mut map = BTreeMap::new();
|
||||
for f in fields {
|
||||
let decrypted = crypto::decrypt_json(mk, &f.encrypted)?;
|
||||
map.insert(f.name.clone(), decrypted);
|
||||
}
|
||||
Some(map)
|
||||
}
|
||||
};
|
||||
|
||||
export_entries.push(ExportEntry {
|
||||
name: entry.name.clone(),
|
||||
folder: entry.folder.clone(),
|
||||
entry_type: entry.entry_type.clone(),
|
||||
notes: entry.notes.clone(),
|
||||
tags: entry.tags.clone(),
|
||||
metadata: entry.metadata.clone(),
|
||||
secrets,
|
||||
});
|
||||
}
|
||||
|
||||
Ok(ExportData {
|
||||
version: 1,
|
||||
exported_at: chrono::Utc::now().format("%Y-%m-%dT%H:%M:%SZ").to_string(),
|
||||
entries: export_entries,
|
||||
})
|
||||
}
|
||||
|
||||
pub async fn export_to_file(
|
||||
pool: &PgPool,
|
||||
params: ExportParams<'_>,
|
||||
master_key: Option<&[u8; 32]>,
|
||||
file_path: &str,
|
||||
format_override: Option<&str>,
|
||||
) -> Result<usize> {
|
||||
let format = if let Some(f) = format_override {
|
||||
f.parse::<ExportFormat>()?
|
||||
} else {
|
||||
ExportFormat::from_extension(file_path).unwrap_or(ExportFormat::Json)
|
||||
};
|
||||
|
||||
let data = export(pool, params, master_key).await?;
|
||||
let count = data.entries.len();
|
||||
let serialized = format.serialize(&data)?;
|
||||
std::fs::write(file_path, &serialized)?;
|
||||
Ok(count)
|
||||
}
|
||||
|
||||
pub async fn export_to_string(
|
||||
pool: &PgPool,
|
||||
params: ExportParams<'_>,
|
||||
master_key: Option<&[u8; 32]>,
|
||||
format: &str,
|
||||
) -> Result<String> {
|
||||
let fmt = format.parse::<ExportFormat>()?;
|
||||
let data = export(pool, params, master_key).await?;
|
||||
fmt.serialize(&data)
|
||||
}
|
||||
|
||||
// ── Build helpers for re-encoding values as CLI-style entries ─────────────────
|
||||
|
||||
pub fn build_meta_entries(metadata: &Value) -> Vec<String> {
|
||||
let mut entries = Vec::new();
|
||||
if let Some(obj) = metadata.as_object() {
|
||||
for (k, v) in obj {
|
||||
entries.push(value_to_kv_entry(k, v));
|
||||
}
|
||||
}
|
||||
entries
|
||||
}
|
||||
|
||||
pub fn build_secret_entries(secrets: Option<&BTreeMap<String, Value>>) -> Vec<String> {
|
||||
let mut entries = Vec::new();
|
||||
if let Some(map) = secrets {
|
||||
for (k, v) in map {
|
||||
entries.push(value_to_kv_entry(k, v));
|
||||
}
|
||||
}
|
||||
entries
|
||||
}
|
||||
|
||||
pub fn value_to_kv_entry(key: &str, value: &Value) -> String {
|
||||
match value {
|
||||
Value::String(s) => format!("{}={}", key, s),
|
||||
other => format!("{}:={}", key, other),
|
||||
}
|
||||
}
|
||||
104
crates/secrets-core/src/service/get_secret.rs
Normal file
104
crates/secrets-core/src/service/get_secret.rs
Normal file
@@ -0,0 +1,104 @@
|
||||
use anyhow::Result;
|
||||
use serde_json::Value;
|
||||
use sqlx::PgPool;
|
||||
use std::collections::HashMap;
|
||||
use uuid::Uuid;
|
||||
|
||||
use crate::crypto;
|
||||
use crate::service::search::{fetch_secrets_for_entries, resolve_entry, resolve_entry_by_id};
|
||||
|
||||
/// Decrypt a single named field from an entry.
|
||||
/// `folder` is optional; if omitted and multiple entries share the name, an error is returned.
|
||||
pub async fn get_secret_field(
|
||||
pool: &PgPool,
|
||||
name: &str,
|
||||
folder: Option<&str>,
|
||||
field_name: &str,
|
||||
master_key: &[u8; 32],
|
||||
user_id: Option<Uuid>,
|
||||
) -> Result<Value> {
|
||||
let entry = resolve_entry(pool, name, folder, user_id).await?;
|
||||
|
||||
let entry_ids = vec![entry.id];
|
||||
let secrets_map = fetch_secrets_for_entries(pool, &entry_ids).await?;
|
||||
let fields = secrets_map.get(&entry.id).map(Vec::as_slice).unwrap_or(&[]);
|
||||
|
||||
let field = fields
|
||||
.iter()
|
||||
.find(|f| f.name == field_name)
|
||||
.ok_or_else(|| anyhow::anyhow!("Secret field '{}' not found", field_name))?;
|
||||
|
||||
crypto::decrypt_json(master_key, &field.encrypted)
|
||||
}
|
||||
|
||||
/// Decrypt all secret fields from an entry. Returns a map field_name → decrypted Value.
|
||||
/// `folder` is optional; if omitted and multiple entries share the name, an error is returned.
|
||||
pub async fn get_all_secrets(
|
||||
pool: &PgPool,
|
||||
name: &str,
|
||||
folder: Option<&str>,
|
||||
master_key: &[u8; 32],
|
||||
user_id: Option<Uuid>,
|
||||
) -> Result<HashMap<String, Value>> {
|
||||
let entry = resolve_entry(pool, name, folder, user_id).await?;
|
||||
|
||||
let entry_ids = vec![entry.id];
|
||||
let secrets_map = fetch_secrets_for_entries(pool, &entry_ids).await?;
|
||||
let fields = secrets_map.get(&entry.id).map(Vec::as_slice).unwrap_or(&[]);
|
||||
|
||||
let mut map = HashMap::new();
|
||||
for f in fields {
|
||||
let decrypted = crypto::decrypt_json(master_key, &f.encrypted)?;
|
||||
map.insert(f.name.clone(), decrypted);
|
||||
}
|
||||
Ok(map)
|
||||
}
|
||||
|
||||
/// Decrypt a single named field from an entry, located by its UUID.
|
||||
pub async fn get_secret_field_by_id(
|
||||
pool: &PgPool,
|
||||
entry_id: Uuid,
|
||||
field_name: &str,
|
||||
master_key: &[u8; 32],
|
||||
user_id: Option<Uuid>,
|
||||
) -> Result<Value> {
|
||||
resolve_entry_by_id(pool, entry_id, user_id)
|
||||
.await
|
||||
.map_err(|_| anyhow::anyhow!("Entry with id '{}' not found", entry_id))?;
|
||||
|
||||
let entry_ids = vec![entry_id];
|
||||
let secrets_map = fetch_secrets_for_entries(pool, &entry_ids).await?;
|
||||
let fields = secrets_map.get(&entry_id).map(Vec::as_slice).unwrap_or(&[]);
|
||||
|
||||
let field = fields
|
||||
.iter()
|
||||
.find(|f| f.name == field_name)
|
||||
.ok_or_else(|| anyhow::anyhow!("Secret field '{}' not found", field_name))?;
|
||||
|
||||
crypto::decrypt_json(master_key, &field.encrypted)
|
||||
}
|
||||
|
||||
/// Decrypt all secret fields from an entry, located by its UUID.
|
||||
/// Returns a map field_name → decrypted Value.
|
||||
pub async fn get_all_secrets_by_id(
|
||||
pool: &PgPool,
|
||||
entry_id: Uuid,
|
||||
master_key: &[u8; 32],
|
||||
user_id: Option<Uuid>,
|
||||
) -> Result<HashMap<String, Value>> {
|
||||
// Validate entry exists (and that it belongs to the requesting user)
|
||||
resolve_entry_by_id(pool, entry_id, user_id)
|
||||
.await
|
||||
.map_err(|_| anyhow::anyhow!("Entry with id '{}' not found", entry_id))?;
|
||||
|
||||
let entry_ids = vec![entry_id];
|
||||
let secrets_map = fetch_secrets_for_entries(pool, &entry_ids).await?;
|
||||
let fields = secrets_map.get(&entry_id).map(Vec::as_slice).unwrap_or(&[]);
|
||||
|
||||
let mut map = HashMap::new();
|
||||
for f in fields {
|
||||
let decrypted = crypto::decrypt_json(master_key, &f.encrypted)?;
|
||||
map.insert(f.name.clone(), decrypted);
|
||||
}
|
||||
Ok(map)
|
||||
}
|
||||
64
crates/secrets-core/src/service/history.rs
Normal file
64
crates/secrets-core/src/service/history.rs
Normal file
@@ -0,0 +1,64 @@
|
||||
use anyhow::Result;
|
||||
use serde_json::Value;
|
||||
use sqlx::PgPool;
|
||||
use uuid::Uuid;
|
||||
|
||||
use crate::service::search::resolve_entry;
|
||||
|
||||
#[derive(Debug, serde::Serialize)]
|
||||
pub struct HistoryEntry {
|
||||
pub version: i64,
|
||||
pub action: String,
|
||||
pub created_at: String,
|
||||
}
|
||||
|
||||
/// Return version history for the entry identified by `name`.
|
||||
/// `folder` is optional; if omitted and multiple entries share the name, an error is returned.
|
||||
pub async fn run(
|
||||
pool: &PgPool,
|
||||
name: &str,
|
||||
folder: Option<&str>,
|
||||
limit: u32,
|
||||
user_id: Option<Uuid>,
|
||||
) -> Result<Vec<HistoryEntry>> {
|
||||
#[derive(sqlx::FromRow)]
|
||||
struct Row {
|
||||
version: i64,
|
||||
action: String,
|
||||
created_at: chrono::DateTime<chrono::Utc>,
|
||||
}
|
||||
|
||||
let entry = resolve_entry(pool, name, folder, user_id).await?;
|
||||
|
||||
let rows: Vec<Row> = sqlx::query_as(
|
||||
"SELECT DISTINCT ON (version) version, action, created_at \
|
||||
FROM entries_history \
|
||||
WHERE entry_id = $1 \
|
||||
ORDER BY version DESC, id DESC \
|
||||
LIMIT $2",
|
||||
)
|
||||
.bind(entry.id)
|
||||
.bind(limit as i64)
|
||||
.fetch_all(pool)
|
||||
.await?;
|
||||
|
||||
Ok(rows
|
||||
.into_iter()
|
||||
.map(|r| HistoryEntry {
|
||||
version: r.version,
|
||||
action: r.action,
|
||||
created_at: r.created_at.format("%Y-%m-%dT%H:%M:%SZ").to_string(),
|
||||
})
|
||||
.collect())
|
||||
}
|
||||
|
||||
pub async fn run_json(
|
||||
pool: &PgPool,
|
||||
name: &str,
|
||||
folder: Option<&str>,
|
||||
limit: u32,
|
||||
user_id: Option<Uuid>,
|
||||
) -> Result<Value> {
|
||||
let entries = run(pool, name, folder, limit, user_id).await?;
|
||||
Ok(serde_json::to_value(entries)?)
|
||||
}
|
||||
127
crates/secrets-core/src/service/import.rs
Normal file
127
crates/secrets-core/src/service/import.rs
Normal file
@@ -0,0 +1,127 @@
|
||||
use anyhow::Result;
|
||||
use sqlx::PgPool;
|
||||
use uuid::Uuid;
|
||||
|
||||
use crate::models::ExportFormat;
|
||||
use crate::service::add::{AddParams, run as add_run};
|
||||
use crate::service::export::{build_meta_entries, build_secret_entries};
|
||||
|
||||
#[derive(Debug, serde::Serialize)]
|
||||
pub struct ImportSummary {
|
||||
pub total: usize,
|
||||
pub inserted: usize,
|
||||
pub skipped: usize,
|
||||
pub failed: usize,
|
||||
pub dry_run: bool,
|
||||
}
|
||||
|
||||
pub struct ImportParams<'a> {
|
||||
pub file: &'a str,
|
||||
pub force: bool,
|
||||
pub dry_run: bool,
|
||||
pub user_id: Option<Uuid>,
|
||||
}
|
||||
|
||||
pub async fn run(
|
||||
pool: &PgPool,
|
||||
params: ImportParams<'_>,
|
||||
master_key: &[u8; 32],
|
||||
) -> Result<ImportSummary> {
|
||||
let format = ExportFormat::from_extension(params.file)?;
|
||||
let content = std::fs::read_to_string(params.file)
|
||||
.map_err(|e| anyhow::anyhow!("Cannot read file '{}': {}", params.file, e))?;
|
||||
let data = format.deserialize(&content)?;
|
||||
|
||||
if data.version != 1 {
|
||||
anyhow::bail!(
|
||||
"Unsupported export version {}. Only version 1 is supported.",
|
||||
data.version
|
||||
);
|
||||
}
|
||||
|
||||
let total = data.entries.len();
|
||||
let mut inserted = 0usize;
|
||||
let mut skipped = 0usize;
|
||||
let mut failed = 0usize;
|
||||
|
||||
for entry in &data.entries {
|
||||
let exists: bool = sqlx::query_scalar(
|
||||
"SELECT EXISTS(SELECT 1 FROM entries \
|
||||
WHERE folder = $1 AND name = $2 AND user_id IS NOT DISTINCT FROM $3)",
|
||||
)
|
||||
.bind(&entry.folder)
|
||||
.bind(&entry.name)
|
||||
.bind(params.user_id)
|
||||
.fetch_one(pool)
|
||||
.await
|
||||
.map_err(|e| {
|
||||
anyhow::anyhow!(
|
||||
"Failed to check entry existence for '{}': {}",
|
||||
entry.name,
|
||||
e
|
||||
)
|
||||
})?;
|
||||
|
||||
if exists && !params.force {
|
||||
return Err(anyhow::anyhow!(
|
||||
"Import aborted: conflict on '{}'",
|
||||
entry.name
|
||||
));
|
||||
}
|
||||
|
||||
if params.dry_run {
|
||||
if exists {
|
||||
skipped += 1;
|
||||
} else {
|
||||
inserted += 1;
|
||||
}
|
||||
continue;
|
||||
}
|
||||
|
||||
let secret_entries = build_secret_entries(entry.secrets.as_ref());
|
||||
let meta_entries = build_meta_entries(&entry.metadata);
|
||||
|
||||
match add_run(
|
||||
pool,
|
||||
AddParams {
|
||||
name: &entry.name,
|
||||
folder: &entry.folder,
|
||||
entry_type: &entry.entry_type,
|
||||
notes: &entry.notes,
|
||||
tags: &entry.tags,
|
||||
meta_entries: &meta_entries,
|
||||
secret_entries: &secret_entries,
|
||||
secret_types: &Default::default(),
|
||||
link_secret_names: &[],
|
||||
user_id: params.user_id,
|
||||
},
|
||||
master_key,
|
||||
)
|
||||
.await
|
||||
{
|
||||
Ok(_) => {
|
||||
inserted += 1;
|
||||
}
|
||||
Err(e) => {
|
||||
tracing::error!(
|
||||
name = entry.name,
|
||||
error = %e,
|
||||
"failed to import entry"
|
||||
);
|
||||
failed += 1;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if failed > 0 {
|
||||
return Err(anyhow::anyhow!("{} record(s) failed to import", failed));
|
||||
}
|
||||
|
||||
Ok(ImportSummary {
|
||||
total,
|
||||
inserted,
|
||||
skipped,
|
||||
failed,
|
||||
dry_run: params.dry_run,
|
||||
})
|
||||
}
|
||||
13
crates/secrets-core/src/service/mod.rs
Normal file
13
crates/secrets-core/src/service/mod.rs
Normal file
@@ -0,0 +1,13 @@
|
||||
pub mod add;
|
||||
pub mod api_key;
|
||||
pub mod audit_log;
|
||||
pub mod delete;
|
||||
pub mod env_map;
|
||||
pub mod export;
|
||||
pub mod get_secret;
|
||||
pub mod history;
|
||||
pub mod import;
|
||||
pub mod rollback;
|
||||
pub mod search;
|
||||
pub mod update;
|
||||
pub mod user;
|
||||
457
crates/secrets-core/src/service/rollback.rs
Normal file
457
crates/secrets-core/src/service/rollback.rs
Normal file
@@ -0,0 +1,457 @@
|
||||
use std::collections::HashSet;
|
||||
|
||||
use anyhow::Result;
|
||||
use serde_json::Value;
|
||||
use sqlx::PgPool;
|
||||
use uuid::Uuid;
|
||||
|
||||
use crate::db;
|
||||
|
||||
#[derive(Debug, serde::Serialize)]
|
||||
pub struct RollbackResult {
|
||||
pub name: String,
|
||||
pub folder: String,
|
||||
#[serde(rename = "type")]
|
||||
pub entry_type: String,
|
||||
pub restored_version: i64,
|
||||
}
|
||||
|
||||
/// Roll back entry `name` to `to_version` (or the most recent snapshot if None).
|
||||
/// `folder` is optional; if omitted and multiple entries share the name, an error is returned.
|
||||
pub async fn run(
|
||||
pool: &PgPool,
|
||||
name: &str,
|
||||
folder: Option<&str>,
|
||||
to_version: Option<i64>,
|
||||
master_key: &[u8; 32],
|
||||
user_id: Option<Uuid>,
|
||||
) -> Result<RollbackResult> {
|
||||
#[derive(sqlx::FromRow)]
|
||||
struct EntryHistoryRow {
|
||||
folder: String,
|
||||
#[sqlx(rename = "type")]
|
||||
entry_type: String,
|
||||
version: i64,
|
||||
action: String,
|
||||
tags: Vec<String>,
|
||||
metadata: Value,
|
||||
}
|
||||
|
||||
// Disambiguate: find the unique entry_id for (name, folder).
|
||||
// Query entries_history by entry_id once we know it; first resolve via name + optional folder.
|
||||
let entry_id: Option<Uuid> = if let Some(uid) = user_id {
|
||||
if let Some(f) = folder {
|
||||
sqlx::query_scalar(
|
||||
"SELECT DISTINCT entry_id FROM entries_history \
|
||||
WHERE name = $1 AND folder = $2 AND user_id = $3 LIMIT 1",
|
||||
)
|
||||
.bind(name)
|
||||
.bind(f)
|
||||
.bind(uid)
|
||||
.fetch_optional(pool)
|
||||
.await?
|
||||
} else {
|
||||
let ids: Vec<Uuid> = sqlx::query_scalar(
|
||||
"SELECT DISTINCT entry_id FROM entries_history \
|
||||
WHERE name = $1 AND user_id = $2",
|
||||
)
|
||||
.bind(name)
|
||||
.bind(uid)
|
||||
.fetch_all(pool)
|
||||
.await?;
|
||||
match ids.len() {
|
||||
0 => None,
|
||||
1 => Some(ids[0]),
|
||||
_ => {
|
||||
let folders: Vec<String> = sqlx::query_scalar(
|
||||
"SELECT DISTINCT folder FROM entries_history \
|
||||
WHERE name = $1 AND user_id = $2",
|
||||
)
|
||||
.bind(name)
|
||||
.bind(uid)
|
||||
.fetch_all(pool)
|
||||
.await?;
|
||||
anyhow::bail!(
|
||||
"Ambiguous: entries named '{}' exist in folders: [{}]. \
|
||||
Specify 'folder' to disambiguate.",
|
||||
name,
|
||||
folders.join(", ")
|
||||
)
|
||||
}
|
||||
}
|
||||
}
|
||||
} else if let Some(f) = folder {
|
||||
sqlx::query_scalar(
|
||||
"SELECT DISTINCT entry_id FROM entries_history \
|
||||
WHERE name = $1 AND folder = $2 AND user_id IS NULL LIMIT 1",
|
||||
)
|
||||
.bind(name)
|
||||
.bind(f)
|
||||
.fetch_optional(pool)
|
||||
.await?
|
||||
} else {
|
||||
let ids: Vec<Uuid> = sqlx::query_scalar(
|
||||
"SELECT DISTINCT entry_id FROM entries_history \
|
||||
WHERE name = $1 AND user_id IS NULL",
|
||||
)
|
||||
.bind(name)
|
||||
.fetch_all(pool)
|
||||
.await?;
|
||||
match ids.len() {
|
||||
0 => None,
|
||||
1 => Some(ids[0]),
|
||||
_ => {
|
||||
let folders: Vec<String> = sqlx::query_scalar(
|
||||
"SELECT DISTINCT folder FROM entries_history \
|
||||
WHERE name = $1 AND user_id IS NULL",
|
||||
)
|
||||
.bind(name)
|
||||
.fetch_all(pool)
|
||||
.await?;
|
||||
anyhow::bail!(
|
||||
"Ambiguous: entries named '{}' exist in folders: [{}]. \
|
||||
Specify 'folder' to disambiguate.",
|
||||
name,
|
||||
folders.join(", ")
|
||||
)
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
let entry_id = entry_id.ok_or_else(|| anyhow::anyhow!("No history found for '{}'", name))?;
|
||||
|
||||
let snap: Option<EntryHistoryRow> = if let Some(ver) = to_version {
|
||||
sqlx::query_as(
|
||||
"SELECT folder, type, version, action, tags, metadata \
|
||||
FROM entries_history \
|
||||
WHERE entry_id = $1 AND version = $2 ORDER BY id ASC LIMIT 1",
|
||||
)
|
||||
.bind(entry_id)
|
||||
.bind(ver)
|
||||
.fetch_optional(pool)
|
||||
.await?
|
||||
} else {
|
||||
sqlx::query_as(
|
||||
"SELECT folder, type, version, action, tags, metadata \
|
||||
FROM entries_history \
|
||||
WHERE entry_id = $1 ORDER BY id DESC LIMIT 1",
|
||||
)
|
||||
.bind(entry_id)
|
||||
.fetch_optional(pool)
|
||||
.await?
|
||||
};
|
||||
|
||||
let snap = snap.ok_or_else(|| {
|
||||
anyhow::anyhow!(
|
||||
"No history found for '{}'{}.",
|
||||
name,
|
||||
to_version
|
||||
.map(|v| format!(" at version {}", v))
|
||||
.unwrap_or_default()
|
||||
)
|
||||
})?;
|
||||
|
||||
let snap_secret_snapshot = db::entry_secret_snapshot_from_metadata(&snap.metadata);
|
||||
let snap_metadata = db::strip_secret_snapshot_from_metadata(&snap.metadata);
|
||||
|
||||
let _ = master_key;
|
||||
|
||||
let mut tx = pool.begin().await?;
|
||||
|
||||
#[derive(sqlx::FromRow)]
|
||||
struct LiveEntry {
|
||||
id: Uuid,
|
||||
version: i64,
|
||||
folder: String,
|
||||
#[sqlx(rename = "type")]
|
||||
entry_type: String,
|
||||
tags: Vec<String>,
|
||||
metadata: Value,
|
||||
#[allow(dead_code)]
|
||||
notes: String,
|
||||
}
|
||||
|
||||
// Lock the live entry if it exists (matched by entry_id for precision).
|
||||
let live: Option<LiveEntry> = sqlx::query_as(
|
||||
"SELECT id, version, folder, type, tags, metadata, notes FROM entries \
|
||||
WHERE id = $1 FOR UPDATE",
|
||||
)
|
||||
.bind(entry_id)
|
||||
.fetch_optional(&mut *tx)
|
||||
.await?;
|
||||
|
||||
let live_entry_id = if let Some(ref lr) = live {
|
||||
let history_metadata =
|
||||
match db::metadata_with_secret_snapshot(&mut tx, lr.id, &lr.metadata).await {
|
||||
Ok(v) => v,
|
||||
Err(e) => {
|
||||
tracing::warn!(error = %e, "failed to build secret snapshot for entry history");
|
||||
lr.metadata.clone()
|
||||
}
|
||||
};
|
||||
|
||||
if let Err(e) = db::snapshot_entry_history(
|
||||
&mut tx,
|
||||
db::EntrySnapshotParams {
|
||||
entry_id: lr.id,
|
||||
user_id,
|
||||
folder: &lr.folder,
|
||||
entry_type: &lr.entry_type,
|
||||
name,
|
||||
version: lr.version,
|
||||
action: "rollback",
|
||||
tags: &lr.tags,
|
||||
metadata: &history_metadata,
|
||||
},
|
||||
)
|
||||
.await
|
||||
{
|
||||
tracing::warn!(error = %e, "failed to snapshot entry before rollback");
|
||||
}
|
||||
|
||||
#[derive(sqlx::FromRow)]
|
||||
struct LiveField {
|
||||
id: Uuid,
|
||||
name: String,
|
||||
encrypted: Vec<u8>,
|
||||
}
|
||||
let live_fields: Vec<LiveField> = sqlx::query_as(
|
||||
"SELECT s.id, s.name, s.encrypted \
|
||||
FROM entry_secrets es \
|
||||
JOIN secrets s ON s.id = es.secret_id \
|
||||
WHERE es.entry_id = $1",
|
||||
)
|
||||
.bind(lr.id)
|
||||
.fetch_all(&mut *tx)
|
||||
.await?;
|
||||
|
||||
for f in &live_fields {
|
||||
if let Err(e) = db::snapshot_secret_history(
|
||||
&mut tx,
|
||||
db::SecretSnapshotParams {
|
||||
secret_id: f.id,
|
||||
name: &f.name,
|
||||
encrypted: &f.encrypted,
|
||||
action: "rollback",
|
||||
},
|
||||
)
|
||||
.await
|
||||
{
|
||||
tracing::warn!(error = %e, "failed to snapshot secret field before rollback");
|
||||
}
|
||||
}
|
||||
|
||||
sqlx::query(
|
||||
"UPDATE entries SET folder = $1, type = $2, tags = $3, metadata = $4, version = version + 1, \
|
||||
updated_at = NOW() WHERE id = $5",
|
||||
)
|
||||
.bind(&snap.folder)
|
||||
.bind(&snap.entry_type)
|
||||
.bind(&snap.tags)
|
||||
.bind(&snap_metadata)
|
||||
.bind(lr.id)
|
||||
.execute(&mut *tx)
|
||||
.await?;
|
||||
|
||||
lr.id
|
||||
} else {
|
||||
if let Some(uid) = user_id {
|
||||
sqlx::query_scalar(
|
||||
"INSERT INTO entries \
|
||||
(user_id, folder, type, name, notes, tags, metadata, version, updated_at) \
|
||||
VALUES ($1, $2, $3, $4, '', $5, $6, $7, NOW()) RETURNING id",
|
||||
)
|
||||
.bind(uid)
|
||||
.bind(&snap.folder)
|
||||
.bind(&snap.entry_type)
|
||||
.bind(name)
|
||||
.bind(&snap.tags)
|
||||
.bind(&snap_metadata)
|
||||
.bind(snap.version)
|
||||
.fetch_one(&mut *tx)
|
||||
.await?
|
||||
} else {
|
||||
sqlx::query_scalar(
|
||||
"INSERT INTO entries \
|
||||
(folder, type, name, notes, tags, metadata, version, updated_at) \
|
||||
VALUES ($1, $2, $3, '', $4, $5, $6, NOW()) RETURNING id",
|
||||
)
|
||||
.bind(&snap.folder)
|
||||
.bind(&snap.entry_type)
|
||||
.bind(name)
|
||||
.bind(&snap.tags)
|
||||
.bind(&snap_metadata)
|
||||
.bind(snap.version)
|
||||
.fetch_one(&mut *tx)
|
||||
.await?
|
||||
}
|
||||
};
|
||||
|
||||
if let Some(secret_snapshot) = snap_secret_snapshot {
|
||||
restore_entry_secrets(&mut tx, live_entry_id, user_id, &secret_snapshot).await?;
|
||||
}
|
||||
|
||||
crate::audit::log_tx(
|
||||
&mut tx,
|
||||
user_id,
|
||||
"rollback",
|
||||
&snap.folder,
|
||||
&snap.entry_type,
|
||||
name,
|
||||
serde_json::json!({
|
||||
"restored_version": snap.version,
|
||||
"original_action": snap.action,
|
||||
}),
|
||||
)
|
||||
.await;
|
||||
|
||||
tx.commit().await?;
|
||||
|
||||
Ok(RollbackResult {
|
||||
name: name.to_string(),
|
||||
folder: snap.folder,
|
||||
entry_type: snap.entry_type,
|
||||
restored_version: snap.version,
|
||||
})
|
||||
}
|
||||
|
||||
async fn restore_entry_secrets(
|
||||
tx: &mut sqlx::Transaction<'_, sqlx::Postgres>,
|
||||
entry_id: Uuid,
|
||||
user_id: Option<Uuid>,
|
||||
snapshot: &[db::EntrySecretSnapshot],
|
||||
) -> Result<()> {
|
||||
#[derive(sqlx::FromRow)]
|
||||
struct LinkedSecret {
|
||||
id: Uuid,
|
||||
name: String,
|
||||
encrypted: Vec<u8>,
|
||||
}
|
||||
|
||||
let linked: Vec<LinkedSecret> = sqlx::query_as(
|
||||
"SELECT s.id, s.name, s.encrypted \
|
||||
FROM entry_secrets es \
|
||||
JOIN secrets s ON s.id = es.secret_id \
|
||||
WHERE es.entry_id = $1",
|
||||
)
|
||||
.bind(entry_id)
|
||||
.fetch_all(&mut **tx)
|
||||
.await?;
|
||||
|
||||
let target_names: HashSet<&str> = snapshot.iter().map(|s| s.name.as_str()).collect();
|
||||
|
||||
for s in &linked {
|
||||
if target_names.contains(s.name.as_str()) {
|
||||
continue;
|
||||
}
|
||||
if let Err(e) = db::snapshot_secret_history(
|
||||
tx,
|
||||
db::SecretSnapshotParams {
|
||||
secret_id: s.id,
|
||||
name: &s.name,
|
||||
encrypted: &s.encrypted,
|
||||
action: "rollback",
|
||||
},
|
||||
)
|
||||
.await
|
||||
{
|
||||
tracing::warn!(error = %e, "failed to snapshot secret before rollback unlink");
|
||||
}
|
||||
|
||||
sqlx::query("DELETE FROM entry_secrets WHERE entry_id = $1 AND secret_id = $2")
|
||||
.bind(entry_id)
|
||||
.bind(s.id)
|
||||
.execute(&mut **tx)
|
||||
.await?;
|
||||
|
||||
sqlx::query(
|
||||
"DELETE FROM secrets s \
|
||||
WHERE s.id = $1 \
|
||||
AND NOT EXISTS (SELECT 1 FROM entry_secrets es WHERE es.secret_id = s.id)",
|
||||
)
|
||||
.bind(s.id)
|
||||
.execute(&mut **tx)
|
||||
.await?;
|
||||
}
|
||||
|
||||
for snap in snapshot {
|
||||
let encrypted = ::hex::decode(&snap.encrypted_hex).map_err(|e| {
|
||||
anyhow::anyhow!("invalid secret snapshot data for '{}': {}", snap.name, e)
|
||||
})?;
|
||||
|
||||
#[derive(sqlx::FromRow)]
|
||||
struct ExistingSecret {
|
||||
id: Uuid,
|
||||
encrypted: Vec<u8>,
|
||||
}
|
||||
|
||||
let existing: Option<ExistingSecret> = if let Some(uid) = user_id {
|
||||
sqlx::query_as("SELECT id, encrypted FROM secrets WHERE user_id = $1 AND name = $2")
|
||||
.bind(uid)
|
||||
.bind(&snap.name)
|
||||
.fetch_optional(&mut **tx)
|
||||
.await?
|
||||
} else {
|
||||
sqlx::query_as("SELECT id, encrypted FROM secrets WHERE user_id IS NULL AND name = $1")
|
||||
.bind(&snap.name)
|
||||
.fetch_optional(&mut **tx)
|
||||
.await?
|
||||
};
|
||||
|
||||
let secret_id = if let Some(ex) = existing {
|
||||
if ex.encrypted != encrypted
|
||||
&& let Err(e) = db::snapshot_secret_history(
|
||||
tx,
|
||||
db::SecretSnapshotParams {
|
||||
secret_id: ex.id,
|
||||
name: &snap.name,
|
||||
encrypted: &ex.encrypted,
|
||||
action: "rollback",
|
||||
},
|
||||
)
|
||||
.await
|
||||
{
|
||||
tracing::warn!(error = %e, "failed to snapshot secret before rollback restore");
|
||||
}
|
||||
sqlx::query(
|
||||
"UPDATE secrets SET type = $1, encrypted = $2, version = version + 1, updated_at = NOW() \
|
||||
WHERE id = $3",
|
||||
)
|
||||
.bind(&snap.secret_type)
|
||||
.bind(&encrypted)
|
||||
.bind(ex.id)
|
||||
.execute(&mut **tx)
|
||||
.await?;
|
||||
ex.id
|
||||
} else if let Some(uid) = user_id {
|
||||
sqlx::query_scalar(
|
||||
"INSERT INTO secrets (user_id, name, type, encrypted) VALUES ($1, $2, $3, $4) RETURNING id",
|
||||
)
|
||||
.bind(uid)
|
||||
.bind(&snap.name)
|
||||
.bind(&snap.secret_type)
|
||||
.bind(&encrypted)
|
||||
.fetch_one(&mut **tx)
|
||||
.await?
|
||||
} else {
|
||||
sqlx::query_scalar(
|
||||
"INSERT INTO secrets (user_id, name, type, encrypted) VALUES (NULL, $1, $2, $3) RETURNING id",
|
||||
)
|
||||
.bind(&snap.name)
|
||||
.bind(&snap.secret_type)
|
||||
.bind(&encrypted)
|
||||
.fetch_one(&mut **tx)
|
||||
.await?
|
||||
};
|
||||
|
||||
sqlx::query(
|
||||
"INSERT INTO entry_secrets (entry_id, secret_id) VALUES ($1, $2) ON CONFLICT DO NOTHING",
|
||||
)
|
||||
.bind(entry_id)
|
||||
.bind(secret_id)
|
||||
.execute(&mut **tx)
|
||||
.await?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
423
crates/secrets-core/src/service/search.rs
Normal file
423
crates/secrets-core/src/service/search.rs
Normal file
@@ -0,0 +1,423 @@
|
||||
use anyhow::Result;
|
||||
use serde_json::Value;
|
||||
use sqlx::PgPool;
|
||||
use std::collections::HashMap;
|
||||
use uuid::Uuid;
|
||||
|
||||
use crate::models::{Entry, SecretField};
|
||||
|
||||
pub const FETCH_ALL_LIMIT: u32 = 100_000;
|
||||
|
||||
/// Build an ILIKE pattern for fuzzy matching, escaping `%` and `_` literals.
|
||||
pub fn ilike_pattern(value: &str) -> String {
|
||||
format!(
|
||||
"%{}%",
|
||||
value
|
||||
.replace('\\', "\\\\")
|
||||
.replace('%', "\\%")
|
||||
.replace('_', "\\_")
|
||||
)
|
||||
}
|
||||
|
||||
pub struct SearchParams<'a> {
|
||||
pub folder: Option<&'a str>,
|
||||
pub entry_type: Option<&'a str>,
|
||||
pub name: Option<&'a str>,
|
||||
/// Fuzzy match on `entries.name` only (ILIKE with escaped `%`/`_`).
|
||||
pub name_query: Option<&'a str>,
|
||||
pub tags: &'a [String],
|
||||
pub query: Option<&'a str>,
|
||||
pub sort: &'a str,
|
||||
pub limit: u32,
|
||||
pub offset: u32,
|
||||
/// Multi-user: filter by this user_id. None = single-user / no filter.
|
||||
pub user_id: Option<Uuid>,
|
||||
}
|
||||
|
||||
#[derive(Debug, serde::Serialize)]
|
||||
pub struct SearchResult {
|
||||
pub entries: Vec<Entry>,
|
||||
pub secret_schemas: HashMap<Uuid, Vec<SecretField>>,
|
||||
}
|
||||
|
||||
/// List `entries` rows matching params (paged, ordered per `params.sort`).
|
||||
/// Does not read the `secrets` table.
|
||||
pub async fn list_entries(pool: &PgPool, params: SearchParams<'_>) -> Result<Vec<Entry>> {
|
||||
fetch_entries_paged(pool, ¶ms).await
|
||||
}
|
||||
|
||||
/// Count `entries` rows matching the same filters as [`list_entries`] (ignores `sort` / `limit` / `offset`).
|
||||
/// Does not read the `secrets` table.
|
||||
pub async fn count_entries(pool: &PgPool, a: &SearchParams<'_>) -> Result<i64> {
|
||||
let (where_clause, _) = entry_where_clause_and_next_idx(a);
|
||||
let sql = format!("SELECT COUNT(*)::bigint FROM entries {where_clause}");
|
||||
let mut q = sqlx::query_scalar::<_, i64>(&sql);
|
||||
if let Some(uid) = a.user_id {
|
||||
q = q.bind(uid);
|
||||
}
|
||||
if let Some(v) = a.folder {
|
||||
q = q.bind(v);
|
||||
}
|
||||
if let Some(v) = a.entry_type {
|
||||
q = q.bind(v);
|
||||
}
|
||||
if let Some(v) = a.name {
|
||||
q = q.bind(v);
|
||||
}
|
||||
if let Some(v) = a.name_query {
|
||||
let pattern = ilike_pattern(v);
|
||||
q = q.bind(pattern);
|
||||
}
|
||||
for tag in a.tags {
|
||||
q = q.bind(tag);
|
||||
}
|
||||
if let Some(v) = a.query {
|
||||
let pattern = ilike_pattern(v);
|
||||
q = q.bind(pattern);
|
||||
}
|
||||
let n = q.fetch_one(pool).await?;
|
||||
Ok(n)
|
||||
}
|
||||
|
||||
/// Shared WHERE clause and the next `$n` index (for LIMIT/OFFSET in paged queries).
|
||||
fn entry_where_clause_and_next_idx(a: &SearchParams<'_>) -> (String, i32) {
|
||||
let mut conditions: Vec<String> = Vec::new();
|
||||
let mut idx: i32 = 1;
|
||||
|
||||
if a.user_id.is_some() {
|
||||
conditions.push(format!("user_id = ${}", idx));
|
||||
idx += 1;
|
||||
} else {
|
||||
conditions.push("user_id IS NULL".to_string());
|
||||
}
|
||||
|
||||
if a.folder.is_some() {
|
||||
conditions.push(format!("folder = ${}", idx));
|
||||
idx += 1;
|
||||
}
|
||||
if a.entry_type.is_some() {
|
||||
conditions.push(format!("type = ${}", idx));
|
||||
idx += 1;
|
||||
}
|
||||
if a.name.is_some() {
|
||||
conditions.push(format!("name = ${}", idx));
|
||||
idx += 1;
|
||||
}
|
||||
if a.name_query.is_some() {
|
||||
conditions.push(format!("name ILIKE ${} ESCAPE '\\'", idx));
|
||||
idx += 1;
|
||||
}
|
||||
if !a.tags.is_empty() {
|
||||
let placeholders: Vec<String> = a
|
||||
.tags
|
||||
.iter()
|
||||
.map(|_| {
|
||||
let p = format!("${}", idx);
|
||||
idx += 1;
|
||||
p
|
||||
})
|
||||
.collect();
|
||||
conditions.push(format!(
|
||||
"tags @> ARRAY[{}]::text[]",
|
||||
placeholders.join(", ")
|
||||
));
|
||||
}
|
||||
if a.query.is_some() {
|
||||
conditions.push(format!(
|
||||
"(name ILIKE ${i} ESCAPE '\\' OR folder ILIKE ${i} ESCAPE '\\' \
|
||||
OR type ILIKE ${i} ESCAPE '\\' OR notes ILIKE ${i} ESCAPE '\\' \
|
||||
OR metadata::text ILIKE ${i} ESCAPE '\\' \
|
||||
OR EXISTS (SELECT 1 FROM unnest(tags) t WHERE t ILIKE ${i} ESCAPE '\\'))",
|
||||
i = idx
|
||||
));
|
||||
idx += 1;
|
||||
}
|
||||
|
||||
let where_clause = if conditions.is_empty() {
|
||||
String::new()
|
||||
} else {
|
||||
format!("WHERE {}", conditions.join(" AND "))
|
||||
};
|
||||
(where_clause, idx)
|
||||
}
|
||||
|
||||
pub async fn run(pool: &PgPool, params: SearchParams<'_>) -> Result<SearchResult> {
|
||||
let entries = fetch_entries_paged(pool, ¶ms).await?;
|
||||
let entry_ids: Vec<Uuid> = entries.iter().map(|e| e.id).collect();
|
||||
let secret_schemas = if !entry_ids.is_empty() {
|
||||
fetch_secret_schemas(pool, &entry_ids).await?
|
||||
} else {
|
||||
HashMap::new()
|
||||
};
|
||||
Ok(SearchResult {
|
||||
entries,
|
||||
secret_schemas,
|
||||
})
|
||||
}
|
||||
|
||||
/// Fetch entries matching the given filters — returns all matching entries up to FETCH_ALL_LIMIT.
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
pub async fn fetch_entries(
|
||||
pool: &PgPool,
|
||||
folder: Option<&str>,
|
||||
entry_type: Option<&str>,
|
||||
name: Option<&str>,
|
||||
tags: &[String],
|
||||
query: Option<&str>,
|
||||
user_id: Option<Uuid>,
|
||||
) -> Result<Vec<Entry>> {
|
||||
let params = SearchParams {
|
||||
folder,
|
||||
entry_type,
|
||||
name,
|
||||
name_query: None,
|
||||
tags,
|
||||
query,
|
||||
sort: "name",
|
||||
limit: FETCH_ALL_LIMIT,
|
||||
offset: 0,
|
||||
user_id,
|
||||
};
|
||||
list_entries(pool, params).await
|
||||
}
|
||||
|
||||
async fn fetch_entries_paged(pool: &PgPool, a: &SearchParams<'_>) -> Result<Vec<Entry>> {
|
||||
let (where_clause, idx) = entry_where_clause_and_next_idx(a);
|
||||
|
||||
let order = match a.sort {
|
||||
"updated" => "updated_at DESC",
|
||||
"created" => "created_at DESC",
|
||||
_ => "name ASC",
|
||||
};
|
||||
|
||||
let limit_idx = idx;
|
||||
let offset_idx = idx + 1;
|
||||
|
||||
let sql = format!(
|
||||
"SELECT id, user_id, folder, type, name, notes, tags, metadata, version, \
|
||||
created_at, updated_at \
|
||||
FROM entries {where_clause} ORDER BY {order} LIMIT ${limit_idx} OFFSET ${offset_idx}"
|
||||
);
|
||||
|
||||
let mut q = sqlx::query_as::<_, EntryRaw>(&sql);
|
||||
if let Some(uid) = a.user_id {
|
||||
q = q.bind(uid);
|
||||
}
|
||||
if let Some(v) = a.folder {
|
||||
q = q.bind(v);
|
||||
}
|
||||
if let Some(v) = a.entry_type {
|
||||
q = q.bind(v);
|
||||
}
|
||||
if let Some(v) = a.name {
|
||||
q = q.bind(v);
|
||||
}
|
||||
if let Some(v) = a.name_query {
|
||||
let pattern = ilike_pattern(v);
|
||||
q = q.bind(pattern);
|
||||
}
|
||||
for tag in a.tags {
|
||||
q = q.bind(tag);
|
||||
}
|
||||
if let Some(v) = a.query {
|
||||
let pattern = ilike_pattern(v);
|
||||
q = q.bind(pattern);
|
||||
}
|
||||
q = q.bind(a.limit as i64).bind(a.offset as i64);
|
||||
|
||||
let rows = q.fetch_all(pool).await?;
|
||||
Ok(rows.into_iter().map(Entry::from).collect())
|
||||
}
|
||||
|
||||
/// Fetch secret field names for a set of entry ids (no decryption).
|
||||
pub async fn fetch_secret_schemas(
|
||||
pool: &PgPool,
|
||||
entry_ids: &[Uuid],
|
||||
) -> Result<HashMap<Uuid, Vec<SecretField>>> {
|
||||
if entry_ids.is_empty() {
|
||||
return Ok(HashMap::new());
|
||||
}
|
||||
let fields: Vec<EntrySecretRow> = sqlx::query_as(
|
||||
"SELECT es.entry_id, s.id, s.user_id, s.name, s.type, s.encrypted, s.version, s.created_at, s.updated_at \
|
||||
FROM entry_secrets es \
|
||||
JOIN secrets s ON s.id = es.secret_id \
|
||||
WHERE es.entry_id = ANY($1) \
|
||||
ORDER BY es.entry_id, es.sort_order, s.name",
|
||||
)
|
||||
.bind(entry_ids)
|
||||
.fetch_all(pool)
|
||||
.await?;
|
||||
|
||||
let mut map: HashMap<Uuid, Vec<SecretField>> = HashMap::new();
|
||||
for f in fields {
|
||||
let entry_id = f.entry_id;
|
||||
map.entry(entry_id).or_default().push(f.secret());
|
||||
}
|
||||
Ok(map)
|
||||
}
|
||||
|
||||
/// Fetch all secret fields (including encrypted bytes) for a set of entry ids.
|
||||
pub async fn fetch_secrets_for_entries(
|
||||
pool: &PgPool,
|
||||
entry_ids: &[Uuid],
|
||||
) -> Result<HashMap<Uuid, Vec<SecretField>>> {
|
||||
if entry_ids.is_empty() {
|
||||
return Ok(HashMap::new());
|
||||
}
|
||||
let fields: Vec<EntrySecretRow> = sqlx::query_as(
|
||||
"SELECT es.entry_id, s.id, s.user_id, s.name, s.type, s.encrypted, s.version, s.created_at, s.updated_at \
|
||||
FROM entry_secrets es \
|
||||
JOIN secrets s ON s.id = es.secret_id \
|
||||
WHERE es.entry_id = ANY($1) \
|
||||
ORDER BY es.entry_id, es.sort_order, s.name",
|
||||
)
|
||||
.bind(entry_ids)
|
||||
.fetch_all(pool)
|
||||
.await?;
|
||||
|
||||
let mut map: HashMap<Uuid, Vec<SecretField>> = HashMap::new();
|
||||
for f in fields {
|
||||
let entry_id = f.entry_id;
|
||||
map.entry(entry_id).or_default().push(f.secret());
|
||||
}
|
||||
Ok(map)
|
||||
}
|
||||
|
||||
/// Resolve exactly one entry by its UUID primary key.
|
||||
///
|
||||
/// Returns an error if the entry does not exist or does not belong to the given user.
|
||||
pub async fn resolve_entry_by_id(
|
||||
pool: &PgPool,
|
||||
id: Uuid,
|
||||
user_id: Option<Uuid>,
|
||||
) -> Result<crate::models::Entry> {
|
||||
let row: Option<EntryRaw> = if let Some(uid) = user_id {
|
||||
sqlx::query_as(
|
||||
"SELECT id, user_id, folder, type, name, notes, tags, metadata, version, \
|
||||
created_at, updated_at FROM entries WHERE id = $1 AND user_id = $2",
|
||||
)
|
||||
.bind(id)
|
||||
.bind(uid)
|
||||
.fetch_optional(pool)
|
||||
.await?
|
||||
} else {
|
||||
sqlx::query_as(
|
||||
"SELECT id, user_id, folder, type, name, notes, tags, metadata, version, \
|
||||
created_at, updated_at FROM entries WHERE id = $1 AND user_id IS NULL",
|
||||
)
|
||||
.bind(id)
|
||||
.fetch_optional(pool)
|
||||
.await?
|
||||
};
|
||||
row.map(Entry::from)
|
||||
.ok_or_else(|| anyhow::anyhow!("Entry with id '{}' not found", id))
|
||||
}
|
||||
|
||||
/// Resolve exactly one entry by name, with optional folder for disambiguation.
|
||||
///
|
||||
/// - If `folder` is provided: exact `(folder, name)` match.
|
||||
/// - If `folder` is None and exactly one entry matches: returns it.
|
||||
/// - If `folder` is None and multiple entries match: returns an error listing
|
||||
/// the folders and asking the caller to specify one.
|
||||
pub async fn resolve_entry(
|
||||
pool: &PgPool,
|
||||
name: &str,
|
||||
folder: Option<&str>,
|
||||
user_id: Option<Uuid>,
|
||||
) -> Result<crate::models::Entry> {
|
||||
let entries = fetch_entries(pool, folder, None, Some(name), &[], None, user_id).await?;
|
||||
match entries.len() {
|
||||
0 => {
|
||||
if let Some(f) = folder {
|
||||
anyhow::bail!("Not found: '{}' in folder '{}'", name, f)
|
||||
} else {
|
||||
anyhow::bail!("Not found: '{}'", name)
|
||||
}
|
||||
}
|
||||
1 => Ok(entries.into_iter().next().unwrap()),
|
||||
_ => {
|
||||
let folders: Vec<&str> = entries.iter().map(|e| e.folder.as_str()).collect();
|
||||
anyhow::bail!(
|
||||
"Ambiguous: {} entries named '{}' found in folders: [{}]. \
|
||||
Specify 'folder' to disambiguate.",
|
||||
entries.len(),
|
||||
name,
|
||||
folders.join(", ")
|
||||
)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// ── Internal raw row (because user_id is nullable in DB) ─────────────────────
|
||||
#[derive(sqlx::FromRow)]
|
||||
struct EntryRaw {
|
||||
id: Uuid,
|
||||
user_id: Option<Uuid>,
|
||||
folder: String,
|
||||
#[sqlx(rename = "type")]
|
||||
entry_type: String,
|
||||
name: String,
|
||||
notes: String,
|
||||
tags: Vec<String>,
|
||||
metadata: Value,
|
||||
version: i64,
|
||||
created_at: chrono::DateTime<chrono::Utc>,
|
||||
updated_at: chrono::DateTime<chrono::Utc>,
|
||||
}
|
||||
|
||||
impl From<EntryRaw> for Entry {
|
||||
fn from(r: EntryRaw) -> Self {
|
||||
Entry {
|
||||
id: r.id,
|
||||
user_id: r.user_id,
|
||||
folder: r.folder,
|
||||
entry_type: r.entry_type,
|
||||
name: r.name,
|
||||
notes: r.notes,
|
||||
tags: r.tags,
|
||||
metadata: r.metadata,
|
||||
version: r.version,
|
||||
created_at: r.created_at,
|
||||
updated_at: r.updated_at,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(sqlx::FromRow)]
|
||||
struct EntrySecretRow {
|
||||
entry_id: Uuid,
|
||||
id: Uuid,
|
||||
user_id: Option<Uuid>,
|
||||
name: String,
|
||||
#[sqlx(rename = "type")]
|
||||
secret_type: String,
|
||||
encrypted: Vec<u8>,
|
||||
version: i64,
|
||||
created_at: chrono::DateTime<chrono::Utc>,
|
||||
updated_at: chrono::DateTime<chrono::Utc>,
|
||||
}
|
||||
|
||||
impl EntrySecretRow {
|
||||
fn secret(self) -> SecretField {
|
||||
SecretField {
|
||||
id: self.id,
|
||||
user_id: self.user_id,
|
||||
name: self.name,
|
||||
secret_type: self.secret_type,
|
||||
encrypted: self.encrypted,
|
||||
version: self.version,
|
||||
created_at: self.created_at,
|
||||
updated_at: self.updated_at,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn ilike_pattern_escapes_backslash_percent_and_underscore() {
|
||||
assert_eq!(ilike_pattern(r"hello\_100%"), r"%hello\\\_100\%%");
|
||||
}
|
||||
}
|
||||
572
crates/secrets-core/src/service/update.rs
Normal file
572
crates/secrets-core/src/service/update.rs
Normal file
@@ -0,0 +1,572 @@
|
||||
use anyhow::Result;
|
||||
use serde_json::{Map, Value};
|
||||
use sqlx::PgPool;
|
||||
use uuid::Uuid;
|
||||
|
||||
use crate::crypto;
|
||||
use crate::db;
|
||||
use crate::error::{AppError, DbErrorContext};
|
||||
use crate::models::{EntryRow, EntryWriteRow};
|
||||
use crate::service::add::{
|
||||
collect_field_paths, collect_key_paths, flatten_json_fields, insert_path, parse_key_path,
|
||||
parse_kv, remove_path,
|
||||
};
|
||||
|
||||
#[derive(Debug, serde::Serialize)]
|
||||
pub struct UpdateResult {
|
||||
pub name: String,
|
||||
pub folder: String,
|
||||
#[serde(rename = "type")]
|
||||
pub entry_type: String,
|
||||
pub add_tags: Vec<String>,
|
||||
pub remove_tags: Vec<String>,
|
||||
pub meta_keys: Vec<String>,
|
||||
pub remove_meta: Vec<String>,
|
||||
pub secret_keys: Vec<String>,
|
||||
pub remove_secrets: Vec<String>,
|
||||
pub linked_secrets: Vec<String>,
|
||||
pub unlinked_secrets: Vec<String>,
|
||||
}
|
||||
|
||||
pub struct UpdateParams<'a> {
|
||||
pub name: &'a str,
|
||||
/// Optional folder for disambiguation when multiple entries share the same name.
|
||||
pub folder: Option<&'a str>,
|
||||
pub notes: Option<&'a str>,
|
||||
pub add_tags: &'a [String],
|
||||
pub remove_tags: &'a [String],
|
||||
pub meta_entries: &'a [String],
|
||||
pub remove_meta: &'a [String],
|
||||
pub secret_entries: &'a [String],
|
||||
pub secret_types: &'a std::collections::HashMap<String, String>,
|
||||
pub remove_secrets: &'a [String],
|
||||
pub link_secret_names: &'a [String],
|
||||
pub unlink_secret_names: &'a [String],
|
||||
pub user_id: Option<Uuid>,
|
||||
}
|
||||
|
||||
pub async fn run(
|
||||
pool: &PgPool,
|
||||
params: UpdateParams<'_>,
|
||||
master_key: &[u8; 32],
|
||||
) -> Result<UpdateResult> {
|
||||
let mut tx = pool.begin().await?;
|
||||
|
||||
// Fetch matching rows with FOR UPDATE; use folder when provided to resolve ambiguity.
|
||||
let rows: Vec<EntryRow> = if let Some(uid) = params.user_id {
|
||||
if let Some(folder) = params.folder {
|
||||
sqlx::query_as(
|
||||
"SELECT id, version, folder, type, tags, metadata, notes FROM entries \
|
||||
WHERE user_id = $1 AND folder = $2 AND name = $3 FOR UPDATE",
|
||||
)
|
||||
.bind(uid)
|
||||
.bind(folder)
|
||||
.bind(params.name)
|
||||
.fetch_all(&mut *tx)
|
||||
.await?
|
||||
} else {
|
||||
sqlx::query_as(
|
||||
"SELECT id, version, folder, type, tags, metadata, notes FROM entries \
|
||||
WHERE user_id = $1 AND name = $2 FOR UPDATE",
|
||||
)
|
||||
.bind(uid)
|
||||
.bind(params.name)
|
||||
.fetch_all(&mut *tx)
|
||||
.await?
|
||||
}
|
||||
} else if let Some(folder) = params.folder {
|
||||
sqlx::query_as(
|
||||
"SELECT id, version, folder, type, tags, metadata, notes FROM entries \
|
||||
WHERE user_id IS NULL AND folder = $1 AND name = $2 FOR UPDATE",
|
||||
)
|
||||
.bind(folder)
|
||||
.bind(params.name)
|
||||
.fetch_all(&mut *tx)
|
||||
.await?
|
||||
} else {
|
||||
sqlx::query_as(
|
||||
"SELECT id, version, folder, type, tags, metadata, notes FROM entries \
|
||||
WHERE user_id IS NULL AND name = $1 FOR UPDATE",
|
||||
)
|
||||
.bind(params.name)
|
||||
.fetch_all(&mut *tx)
|
||||
.await?
|
||||
};
|
||||
|
||||
let row = match rows.len() {
|
||||
0 => {
|
||||
tx.rollback().await?;
|
||||
return Err(AppError::NotFoundEntry.into());
|
||||
}
|
||||
1 => rows.into_iter().next().unwrap(),
|
||||
_ => {
|
||||
tx.rollback().await?;
|
||||
let folders: Vec<&str> = rows.iter().map(|r| r.folder.as_str()).collect();
|
||||
anyhow::bail!(
|
||||
"Ambiguous: {} entries named '{}' found in folders: [{}]. \
|
||||
Specify 'folder' to disambiguate.",
|
||||
rows.len(),
|
||||
params.name,
|
||||
folders.join(", ")
|
||||
)
|
||||
}
|
||||
};
|
||||
|
||||
let history_metadata =
|
||||
match db::metadata_with_secret_snapshot(&mut tx, row.id, &row.metadata).await {
|
||||
Ok(v) => v,
|
||||
Err(e) => {
|
||||
tracing::warn!(error = %e, "failed to build secret snapshot for entry history");
|
||||
row.metadata.clone()
|
||||
}
|
||||
};
|
||||
|
||||
if let Err(e) = db::snapshot_entry_history(
|
||||
&mut tx,
|
||||
db::EntrySnapshotParams {
|
||||
entry_id: row.id,
|
||||
user_id: params.user_id,
|
||||
folder: &row.folder,
|
||||
entry_type: &row.entry_type,
|
||||
name: params.name,
|
||||
version: row.version,
|
||||
action: "update",
|
||||
tags: &row.tags,
|
||||
metadata: &history_metadata,
|
||||
},
|
||||
)
|
||||
.await
|
||||
{
|
||||
tracing::warn!(error = %e, "failed to snapshot entry history before update");
|
||||
}
|
||||
|
||||
let mut tags: Vec<String> = row.tags.clone();
|
||||
for t in params.add_tags {
|
||||
if !tags.contains(t) {
|
||||
tags.push(t.clone());
|
||||
}
|
||||
}
|
||||
tags.retain(|t| !params.remove_tags.contains(t));
|
||||
|
||||
let mut meta_map: Map<String, Value> = match row.metadata.clone() {
|
||||
Value::Object(m) => m,
|
||||
_ => Map::new(),
|
||||
};
|
||||
for entry in params.meta_entries {
|
||||
let (path, value) = parse_kv(entry)?;
|
||||
insert_path(&mut meta_map, &path, value)?;
|
||||
}
|
||||
for key in params.remove_meta {
|
||||
let path = parse_key_path(key)?;
|
||||
remove_path(&mut meta_map, &path)?;
|
||||
}
|
||||
let metadata = Value::Object(meta_map);
|
||||
|
||||
let new_notes = params.notes.unwrap_or(&row.notes);
|
||||
|
||||
let result = sqlx::query(
|
||||
"UPDATE entries SET tags = $1, metadata = $2, notes = $3, \
|
||||
version = version + 1, updated_at = NOW() \
|
||||
WHERE id = $4 AND version = $5",
|
||||
)
|
||||
.bind(&tags)
|
||||
.bind(&metadata)
|
||||
.bind(new_notes)
|
||||
.bind(row.id)
|
||||
.bind(row.version)
|
||||
.execute(&mut *tx)
|
||||
.await?;
|
||||
|
||||
if result.rows_affected() == 0 {
|
||||
tx.rollback().await?;
|
||||
return Err(AppError::ConcurrentModification.into());
|
||||
}
|
||||
|
||||
for entry in params.secret_entries {
|
||||
let (path, field_value) = parse_kv(entry)?;
|
||||
let flat = flatten_json_fields("", &{
|
||||
let mut m = Map::new();
|
||||
insert_path(&mut m, &path, field_value)?;
|
||||
Value::Object(m)
|
||||
});
|
||||
|
||||
for (field_name, fv) in &flat {
|
||||
let encrypted = crypto::encrypt_json(master_key, fv)?;
|
||||
|
||||
#[derive(sqlx::FromRow)]
|
||||
struct ExistingField {
|
||||
id: Uuid,
|
||||
encrypted: Vec<u8>,
|
||||
}
|
||||
let ef: Option<ExistingField> = sqlx::query_as(
|
||||
"SELECT s.id, s.encrypted \
|
||||
FROM entry_secrets es \
|
||||
JOIN secrets s ON s.id = es.secret_id \
|
||||
WHERE es.entry_id = $1 AND s.name = $2",
|
||||
)
|
||||
.bind(row.id)
|
||||
.bind(field_name)
|
||||
.fetch_optional(&mut *tx)
|
||||
.await?;
|
||||
|
||||
if let Some(ef) = &ef
|
||||
&& let Err(e) = db::snapshot_secret_history(
|
||||
&mut tx,
|
||||
db::SecretSnapshotParams {
|
||||
secret_id: ef.id,
|
||||
name: field_name,
|
||||
encrypted: &ef.encrypted,
|
||||
action: "update",
|
||||
},
|
||||
)
|
||||
.await
|
||||
{
|
||||
tracing::warn!(error = %e, "failed to snapshot secret field history");
|
||||
}
|
||||
|
||||
if let Some(ef) = ef {
|
||||
sqlx::query(
|
||||
"UPDATE secrets SET encrypted = $1, version = version + 1, updated_at = NOW() WHERE id = $2",
|
||||
)
|
||||
.bind(&encrypted)
|
||||
.bind(ef.id)
|
||||
.execute(&mut *tx)
|
||||
.await?;
|
||||
} else {
|
||||
let secret_type = params
|
||||
.secret_types
|
||||
.get(field_name)
|
||||
.map(|s| s.as_str())
|
||||
.unwrap_or("text");
|
||||
let secret_id: Uuid = sqlx::query_scalar(
|
||||
"INSERT INTO secrets (user_id, name, type, encrypted) VALUES ($1, $2, $3, $4) RETURNING id",
|
||||
)
|
||||
.bind(params.user_id)
|
||||
.bind(field_name.to_string())
|
||||
.bind(secret_type)
|
||||
.bind(&encrypted)
|
||||
.fetch_one(&mut *tx)
|
||||
.await
|
||||
.map_err(|e| AppError::from_db_error(e, DbErrorContext::secret_name(field_name)))?;
|
||||
sqlx::query("INSERT INTO entry_secrets (entry_id, secret_id) VALUES ($1, $2)")
|
||||
.bind(row.id)
|
||||
.bind(secret_id)
|
||||
.execute(&mut *tx)
|
||||
.await?;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for key in params.remove_secrets {
|
||||
let path = parse_key_path(key)?;
|
||||
let field_name = path.join(".");
|
||||
|
||||
#[derive(sqlx::FromRow)]
|
||||
struct FieldToDelete {
|
||||
id: Uuid,
|
||||
encrypted: Vec<u8>,
|
||||
}
|
||||
let field: Option<FieldToDelete> = sqlx::query_as(
|
||||
"SELECT s.id, s.encrypted \
|
||||
FROM entry_secrets es \
|
||||
JOIN secrets s ON s.id = es.secret_id \
|
||||
WHERE es.entry_id = $1 AND s.name = $2",
|
||||
)
|
||||
.bind(row.id)
|
||||
.bind(&field_name)
|
||||
.fetch_optional(&mut *tx)
|
||||
.await?;
|
||||
|
||||
if let Some(f) = field {
|
||||
if let Err(e) = db::snapshot_secret_history(
|
||||
&mut tx,
|
||||
db::SecretSnapshotParams {
|
||||
secret_id: f.id,
|
||||
name: &field_name,
|
||||
encrypted: &f.encrypted,
|
||||
action: "delete",
|
||||
},
|
||||
)
|
||||
.await
|
||||
{
|
||||
tracing::warn!(error = %e, "failed to snapshot secret field history before delete");
|
||||
}
|
||||
sqlx::query("DELETE FROM entry_secrets WHERE entry_id = $1 AND secret_id = $2")
|
||||
.bind(row.id)
|
||||
.bind(f.id)
|
||||
.execute(&mut *tx)
|
||||
.await?;
|
||||
sqlx::query(
|
||||
"DELETE FROM secrets s \
|
||||
WHERE s.id = $1 \
|
||||
AND NOT EXISTS (SELECT 1 FROM entry_secrets es WHERE es.secret_id = s.id)",
|
||||
)
|
||||
.bind(f.id)
|
||||
.execute(&mut *tx)
|
||||
.await?;
|
||||
}
|
||||
}
|
||||
|
||||
// Link existing secrets by name
|
||||
let mut linked_secrets = Vec::new();
|
||||
for link_name in params.link_secret_names {
|
||||
let link_name = link_name.trim();
|
||||
if link_name.is_empty() {
|
||||
anyhow::bail!("link_secret_names contains an empty name");
|
||||
}
|
||||
let secret_ids: Vec<Uuid> = if let Some(uid) = params.user_id {
|
||||
sqlx::query_scalar("SELECT id FROM secrets WHERE user_id = $1 AND name = $2")
|
||||
.bind(uid)
|
||||
.bind(link_name)
|
||||
.fetch_all(&mut *tx)
|
||||
.await?
|
||||
} else {
|
||||
sqlx::query_scalar("SELECT id FROM secrets WHERE user_id IS NULL AND name = $1")
|
||||
.bind(link_name)
|
||||
.fetch_all(&mut *tx)
|
||||
.await?
|
||||
};
|
||||
|
||||
match secret_ids.len() {
|
||||
0 => anyhow::bail!("Not found: secret named '{}'", link_name),
|
||||
1 => {
|
||||
sqlx::query(
|
||||
"INSERT INTO entry_secrets (entry_id, secret_id) VALUES ($1, $2) ON CONFLICT DO NOTHING",
|
||||
)
|
||||
.bind(row.id)
|
||||
.bind(secret_ids[0])
|
||||
.execute(&mut *tx)
|
||||
.await?;
|
||||
linked_secrets.push(link_name.to_string());
|
||||
}
|
||||
n => anyhow::bail!(
|
||||
"Ambiguous: {} secrets named '{}' found. Please deduplicate names first.",
|
||||
n,
|
||||
link_name
|
||||
),
|
||||
}
|
||||
}
|
||||
|
||||
// Unlink secrets by name
|
||||
let mut unlinked_secrets = Vec::new();
|
||||
for unlink_name in params.unlink_secret_names {
|
||||
let unlink_name = unlink_name.trim();
|
||||
if unlink_name.is_empty() {
|
||||
continue;
|
||||
}
|
||||
|
||||
#[derive(sqlx::FromRow)]
|
||||
struct SecretToUnlink {
|
||||
id: Uuid,
|
||||
encrypted: Vec<u8>,
|
||||
}
|
||||
let secret: Option<SecretToUnlink> = sqlx::query_as(
|
||||
"SELECT s.id, s.encrypted \
|
||||
FROM entry_secrets es \
|
||||
JOIN secrets s ON s.id = es.secret_id \
|
||||
WHERE es.entry_id = $1 AND s.name = $2",
|
||||
)
|
||||
.bind(row.id)
|
||||
.bind(unlink_name)
|
||||
.fetch_optional(&mut *tx)
|
||||
.await?;
|
||||
|
||||
if let Some(s) = secret {
|
||||
if let Err(e) = db::snapshot_secret_history(
|
||||
&mut tx,
|
||||
db::SecretSnapshotParams {
|
||||
secret_id: s.id,
|
||||
name: unlink_name,
|
||||
encrypted: &s.encrypted,
|
||||
action: "delete",
|
||||
},
|
||||
)
|
||||
.await
|
||||
{
|
||||
tracing::warn!(error = %e, "failed to snapshot secret field history before unlink");
|
||||
}
|
||||
sqlx::query("DELETE FROM entry_secrets WHERE entry_id = $1 AND secret_id = $2")
|
||||
.bind(row.id)
|
||||
.bind(s.id)
|
||||
.execute(&mut *tx)
|
||||
.await?;
|
||||
sqlx::query(
|
||||
"DELETE FROM secrets s \
|
||||
WHERE s.id = $1 \
|
||||
AND NOT EXISTS (SELECT 1 FROM entry_secrets es WHERE es.secret_id = s.id)",
|
||||
)
|
||||
.bind(s.id)
|
||||
.execute(&mut *tx)
|
||||
.await?;
|
||||
unlinked_secrets.push(unlink_name.to_string());
|
||||
}
|
||||
}
|
||||
|
||||
let meta_keys = collect_key_paths(params.meta_entries)?;
|
||||
let remove_meta_keys = collect_field_paths(params.remove_meta)?;
|
||||
let secret_keys = collect_key_paths(params.secret_entries)?;
|
||||
let remove_secret_keys = collect_field_paths(params.remove_secrets)?;
|
||||
|
||||
crate::audit::log_tx(
|
||||
&mut tx,
|
||||
params.user_id,
|
||||
"update",
|
||||
&row.folder,
|
||||
&row.entry_type,
|
||||
params.name,
|
||||
serde_json::json!({
|
||||
"add_tags": params.add_tags,
|
||||
"remove_tags": params.remove_tags,
|
||||
"meta_keys": meta_keys,
|
||||
"remove_meta": remove_meta_keys,
|
||||
"secret_keys": secret_keys,
|
||||
"remove_secrets": remove_secret_keys,
|
||||
"linked_secrets": linked_secrets,
|
||||
"unlinked_secrets": unlinked_secrets,
|
||||
}),
|
||||
)
|
||||
.await;
|
||||
|
||||
tx.commit().await?;
|
||||
|
||||
Ok(UpdateResult {
|
||||
name: params.name.to_string(),
|
||||
folder: row.folder.clone(),
|
||||
entry_type: row.entry_type.clone(),
|
||||
add_tags: params.add_tags.to_vec(),
|
||||
remove_tags: params.remove_tags.to_vec(),
|
||||
meta_keys,
|
||||
remove_meta: remove_meta_keys,
|
||||
secret_keys,
|
||||
remove_secrets: remove_secret_keys,
|
||||
linked_secrets,
|
||||
unlinked_secrets,
|
||||
})
|
||||
}
|
||||
|
||||
/// Update non-sensitive entry columns by primary key (multi-tenant: `user_id` must match).
|
||||
/// Does not read or modify `secrets` rows.
|
||||
pub struct UpdateEntryFieldsByIdParams<'a> {
|
||||
pub folder: &'a str,
|
||||
pub entry_type: &'a str,
|
||||
pub name: &'a str,
|
||||
pub notes: &'a str,
|
||||
pub tags: &'a [String],
|
||||
pub metadata: &'a serde_json::Value,
|
||||
}
|
||||
|
||||
pub async fn update_fields_by_id(
|
||||
pool: &PgPool,
|
||||
entry_id: Uuid,
|
||||
user_id: Uuid,
|
||||
params: UpdateEntryFieldsByIdParams<'_>,
|
||||
) -> Result<()> {
|
||||
if params.folder.chars().count() > 128 {
|
||||
anyhow::bail!("folder must be at most 128 characters");
|
||||
}
|
||||
if params.entry_type.chars().count() > 64 {
|
||||
anyhow::bail!("type must be at most 64 characters");
|
||||
}
|
||||
if params.name.chars().count() > 256 {
|
||||
anyhow::bail!("name must be at most 256 characters");
|
||||
}
|
||||
|
||||
let mut tx = pool.begin().await?;
|
||||
|
||||
let row: Option<EntryWriteRow> = sqlx::query_as(
|
||||
"SELECT id, version, folder, type, name, tags, metadata, notes FROM entries \
|
||||
WHERE id = $1 AND user_id = $2 FOR UPDATE",
|
||||
)
|
||||
.bind(entry_id)
|
||||
.bind(user_id)
|
||||
.fetch_optional(&mut *tx)
|
||||
.await?;
|
||||
|
||||
let row = match row {
|
||||
Some(r) => r,
|
||||
None => {
|
||||
tx.rollback().await?;
|
||||
return Err(AppError::NotFoundEntry.into());
|
||||
}
|
||||
};
|
||||
|
||||
let history_metadata =
|
||||
match db::metadata_with_secret_snapshot(&mut tx, row.id, &row.metadata).await {
|
||||
Ok(v) => v,
|
||||
Err(e) => {
|
||||
tracing::warn!(error = %e, "failed to build secret snapshot for entry history");
|
||||
row.metadata.clone()
|
||||
}
|
||||
};
|
||||
|
||||
if let Err(e) = db::snapshot_entry_history(
|
||||
&mut tx,
|
||||
db::EntrySnapshotParams {
|
||||
entry_id: row.id,
|
||||
user_id: Some(user_id),
|
||||
folder: &row.folder,
|
||||
entry_type: &row.entry_type,
|
||||
name: &row.name,
|
||||
version: row.version,
|
||||
action: "update",
|
||||
tags: &row.tags,
|
||||
metadata: &history_metadata,
|
||||
},
|
||||
)
|
||||
.await
|
||||
{
|
||||
tracing::warn!(error = %e, "failed to snapshot entry history before web update");
|
||||
}
|
||||
|
||||
let entry_type = params.entry_type.trim();
|
||||
|
||||
let res = sqlx::query(
|
||||
"UPDATE entries SET folder = $1, type = $2, name = $3, notes = $4, tags = $5, metadata = $6, \
|
||||
version = version + 1, updated_at = NOW() \
|
||||
WHERE id = $7 AND version = $8",
|
||||
)
|
||||
.bind(params.folder)
|
||||
.bind(entry_type)
|
||||
.bind(params.name)
|
||||
.bind(params.notes)
|
||||
.bind(params.tags)
|
||||
.bind(params.metadata)
|
||||
.bind(row.id)
|
||||
.bind(row.version)
|
||||
.execute(&mut *tx)
|
||||
.await
|
||||
.map_err(|e| {
|
||||
if let sqlx::Error::Database(ref d) = e
|
||||
&& d.code().as_deref() == Some("23505")
|
||||
{
|
||||
return AppError::ConflictEntryName {
|
||||
folder: params.folder.to_string(),
|
||||
name: params.name.to_string(),
|
||||
};
|
||||
}
|
||||
AppError::Internal(e.into())
|
||||
})?;
|
||||
|
||||
if res.rows_affected() == 0 {
|
||||
tx.rollback().await?;
|
||||
return Err(AppError::ConcurrentModification.into());
|
||||
}
|
||||
|
||||
crate::audit::log_tx(
|
||||
&mut tx,
|
||||
Some(user_id),
|
||||
"update",
|
||||
params.folder,
|
||||
entry_type,
|
||||
params.name,
|
||||
serde_json::json!({
|
||||
"source": "web",
|
||||
"entry_id": entry_id,
|
||||
"fields": ["folder", "type", "name", "notes", "tags", "metadata"],
|
||||
}),
|
||||
)
|
||||
.await;
|
||||
|
||||
tx.commit().await?;
|
||||
Ok(())
|
||||
}
|
||||
348
crates/secrets-core/src/service/user.rs
Normal file
348
crates/secrets-core/src/service/user.rs
Normal file
@@ -0,0 +1,348 @@
|
||||
use anyhow::Result;
|
||||
use serde_json::Value;
|
||||
use sqlx::PgPool;
|
||||
use uuid::Uuid;
|
||||
|
||||
use crate::models::{OauthAccount, User};
|
||||
|
||||
pub struct OAuthProfile {
|
||||
pub provider: String,
|
||||
pub provider_id: String,
|
||||
pub email: Option<String>,
|
||||
pub name: Option<String>,
|
||||
pub avatar_url: Option<String>,
|
||||
}
|
||||
|
||||
/// Find or create a user from an OAuth profile.
|
||||
/// Returns (user, is_new) where is_new indicates first-time registration.
|
||||
pub async fn find_or_create_user(pool: &PgPool, profile: OAuthProfile) -> Result<(User, bool)> {
|
||||
// Use a transaction with FOR UPDATE to prevent TOCTOU race conditions
|
||||
let mut tx = pool.begin().await?;
|
||||
|
||||
// Check if this OAuth account already exists (with row lock)
|
||||
let existing: Option<OauthAccount> = sqlx::query_as(
|
||||
"SELECT id, user_id, provider, provider_id, email, name, avatar_url, created_at \
|
||||
FROM oauth_accounts WHERE provider = $1 AND provider_id = $2 FOR UPDATE",
|
||||
)
|
||||
.bind(&profile.provider)
|
||||
.bind(&profile.provider_id)
|
||||
.fetch_optional(&mut *tx)
|
||||
.await?;
|
||||
|
||||
if let Some(oa) = existing {
|
||||
let user: User = sqlx::query_as(
|
||||
"SELECT id, email, name, avatar_url, key_salt, key_check, key_params, api_key, created_at, updated_at \
|
||||
FROM users WHERE id = $1",
|
||||
)
|
||||
.bind(oa.user_id)
|
||||
.fetch_one(&mut *tx)
|
||||
.await?;
|
||||
tx.commit().await?;
|
||||
return Ok((user, false));
|
||||
}
|
||||
|
||||
// New user — create records (no key yet; user sets passphrase on dashboard)
|
||||
let display_name = profile
|
||||
.name
|
||||
.clone()
|
||||
.unwrap_or_else(|| profile.email.clone().unwrap_or_else(|| "User".to_string()));
|
||||
|
||||
let user: User = sqlx::query_as(
|
||||
"INSERT INTO users (email, name, avatar_url) \
|
||||
VALUES ($1, $2, $3) \
|
||||
RETURNING id, email, name, avatar_url, key_salt, key_check, key_params, api_key, created_at, updated_at",
|
||||
)
|
||||
.bind(&profile.email)
|
||||
.bind(&display_name)
|
||||
.bind(&profile.avatar_url)
|
||||
.fetch_one(&mut *tx)
|
||||
.await?;
|
||||
|
||||
sqlx::query(
|
||||
"INSERT INTO oauth_accounts (user_id, provider, provider_id, email, name, avatar_url) \
|
||||
VALUES ($1, $2, $3, $4, $5, $6)",
|
||||
)
|
||||
.bind(user.id)
|
||||
.bind(&profile.provider)
|
||||
.bind(&profile.provider_id)
|
||||
.bind(&profile.email)
|
||||
.bind(&profile.name)
|
||||
.bind(&profile.avatar_url)
|
||||
.execute(&mut *tx)
|
||||
.await?;
|
||||
|
||||
tx.commit().await?;
|
||||
|
||||
Ok((user, true))
|
||||
}
|
||||
|
||||
/// Re-encrypt all of a user's secrets from `old_key` to `new_key` and update the key metadata.
|
||||
///
|
||||
/// Runs entirely inside a single database transaction: if any secret fails to re-encrypt
|
||||
/// the whole operation is rolled back, leaving the database unchanged.
|
||||
pub async fn change_user_key(
|
||||
pool: &PgPool,
|
||||
user_id: Uuid,
|
||||
old_key: &[u8; 32],
|
||||
new_key: &[u8; 32],
|
||||
new_salt: &[u8],
|
||||
new_key_check: &[u8],
|
||||
new_key_params: &Value,
|
||||
) -> Result<()> {
|
||||
let mut tx = pool.begin().await?;
|
||||
|
||||
let secrets: Vec<(uuid::Uuid, Vec<u8>)> =
|
||||
sqlx::query_as("SELECT id, encrypted FROM secrets WHERE user_id = $1 FOR UPDATE")
|
||||
.bind(user_id)
|
||||
.fetch_all(&mut *tx)
|
||||
.await?;
|
||||
|
||||
for (id, encrypted) in &secrets {
|
||||
let plaintext = crate::crypto::decrypt(old_key, encrypted)?;
|
||||
let new_encrypted = crate::crypto::encrypt(new_key, &plaintext)?;
|
||||
sqlx::query("UPDATE secrets SET encrypted = $1, updated_at = NOW() WHERE id = $2")
|
||||
.bind(&new_encrypted)
|
||||
.bind(id)
|
||||
.execute(&mut *tx)
|
||||
.await?;
|
||||
}
|
||||
|
||||
sqlx::query(
|
||||
"UPDATE users SET key_salt = $1, key_check = $2, key_params = $3, updated_at = NOW() \
|
||||
WHERE id = $4",
|
||||
)
|
||||
.bind(new_salt)
|
||||
.bind(new_key_check)
|
||||
.bind(new_key_params)
|
||||
.bind(user_id)
|
||||
.execute(&mut *tx)
|
||||
.await?;
|
||||
|
||||
tx.commit().await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Store the PBKDF2 salt, key_check, and params for a user's passphrase setup.
|
||||
pub async fn update_user_key_setup(
|
||||
pool: &PgPool,
|
||||
user_id: Uuid,
|
||||
key_salt: &[u8],
|
||||
key_check: &[u8],
|
||||
key_params: &Value,
|
||||
) -> Result<()> {
|
||||
sqlx::query(
|
||||
"UPDATE users SET key_salt = $1, key_check = $2, key_params = $3, updated_at = NOW() \
|
||||
WHERE id = $4",
|
||||
)
|
||||
.bind(key_salt)
|
||||
.bind(key_check)
|
||||
.bind(key_params)
|
||||
.bind(user_id)
|
||||
.execute(pool)
|
||||
.await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Fetch a user by ID.
|
||||
pub async fn get_user_by_id(pool: &PgPool, user_id: Uuid) -> Result<Option<User>> {
|
||||
let user = sqlx::query_as(
|
||||
"SELECT id, email, name, avatar_url, key_salt, key_check, key_params, api_key, created_at, updated_at \
|
||||
FROM users WHERE id = $1",
|
||||
)
|
||||
.bind(user_id)
|
||||
.fetch_optional(pool)
|
||||
.await?;
|
||||
Ok(user)
|
||||
}
|
||||
|
||||
/// List all OAuth accounts linked to a user.
|
||||
pub async fn list_oauth_accounts(pool: &PgPool, user_id: Uuid) -> Result<Vec<OauthAccount>> {
|
||||
let accounts = sqlx::query_as(
|
||||
"SELECT id, user_id, provider, provider_id, email, name, avatar_url, created_at \
|
||||
FROM oauth_accounts WHERE user_id = $1 ORDER BY created_at",
|
||||
)
|
||||
.bind(user_id)
|
||||
.fetch_all(pool)
|
||||
.await?;
|
||||
Ok(accounts)
|
||||
}
|
||||
|
||||
/// Bind an additional OAuth account to an existing user.
|
||||
pub async fn bind_oauth_account(
|
||||
pool: &PgPool,
|
||||
user_id: Uuid,
|
||||
profile: OAuthProfile,
|
||||
) -> Result<OauthAccount> {
|
||||
// Use a transaction with FOR UPDATE to prevent TOCTOU race conditions
|
||||
let mut tx = pool.begin().await?;
|
||||
|
||||
// Check if this provider_id is already linked to someone else (with row lock)
|
||||
let conflict: Option<(Uuid,)> = sqlx::query_as(
|
||||
"SELECT user_id FROM oauth_accounts WHERE provider = $1 AND provider_id = $2 FOR UPDATE",
|
||||
)
|
||||
.bind(&profile.provider)
|
||||
.bind(&profile.provider_id)
|
||||
.fetch_optional(&mut *tx)
|
||||
.await?;
|
||||
|
||||
if let Some((existing_user_id,)) = conflict {
|
||||
if existing_user_id != user_id {
|
||||
anyhow::bail!(
|
||||
"This {} account is already linked to a different user",
|
||||
profile.provider
|
||||
);
|
||||
}
|
||||
anyhow::bail!(
|
||||
"This {} account is already linked to your account",
|
||||
profile.provider
|
||||
);
|
||||
}
|
||||
|
||||
let existing_provider_for_user: Option<(String,)> = sqlx::query_as(
|
||||
"SELECT provider_id FROM oauth_accounts WHERE user_id = $1 AND provider = $2 FOR UPDATE",
|
||||
)
|
||||
.bind(user_id)
|
||||
.bind(&profile.provider)
|
||||
.fetch_optional(&mut *tx)
|
||||
.await?;
|
||||
|
||||
if existing_provider_for_user.is_some() {
|
||||
anyhow::bail!(
|
||||
"You already linked a {} account. Unlink the other provider instead of binding multiple {} accounts.",
|
||||
profile.provider,
|
||||
profile.provider
|
||||
);
|
||||
}
|
||||
|
||||
let account: OauthAccount = sqlx::query_as(
|
||||
"INSERT INTO oauth_accounts (user_id, provider, provider_id, email, name, avatar_url) \
|
||||
VALUES ($1, $2, $3, $4, $5, $6) \
|
||||
RETURNING id, user_id, provider, provider_id, email, name, avatar_url, created_at",
|
||||
)
|
||||
.bind(user_id)
|
||||
.bind(&profile.provider)
|
||||
.bind(&profile.provider_id)
|
||||
.bind(&profile.email)
|
||||
.bind(&profile.name)
|
||||
.bind(&profile.avatar_url)
|
||||
.fetch_one(&mut *tx)
|
||||
.await?;
|
||||
|
||||
tx.commit().await?;
|
||||
Ok(account)
|
||||
}
|
||||
|
||||
/// Unbind an OAuth account. Ensures at least one remains and blocks unlinking the current login provider.
|
||||
pub async fn unbind_oauth_account(
|
||||
pool: &PgPool,
|
||||
user_id: Uuid,
|
||||
provider: &str,
|
||||
current_login_provider: Option<&str>,
|
||||
) -> Result<()> {
|
||||
if current_login_provider == Some(provider) {
|
||||
anyhow::bail!(
|
||||
"Cannot unlink the {} account you are currently using to sign in",
|
||||
provider
|
||||
);
|
||||
}
|
||||
|
||||
let mut tx = pool.begin().await?;
|
||||
|
||||
let locked_accounts: Vec<(String,)> =
|
||||
sqlx::query_as("SELECT provider FROM oauth_accounts WHERE user_id = $1 FOR UPDATE")
|
||||
.bind(user_id)
|
||||
.fetch_all(&mut *tx)
|
||||
.await?;
|
||||
let count = locked_accounts.len();
|
||||
|
||||
if count <= 1 {
|
||||
anyhow::bail!("Cannot unbind the last OAuth account. Please link another account first.");
|
||||
}
|
||||
|
||||
sqlx::query("DELETE FROM oauth_accounts WHERE user_id = $1 AND provider = $2")
|
||||
.bind(user_id)
|
||||
.bind(provider)
|
||||
.execute(&mut *tx)
|
||||
.await?;
|
||||
|
||||
tx.commit().await?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
async fn maybe_test_pool() -> Option<PgPool> {
|
||||
let database_url = match std::env::var("SECRETS_DATABASE_URL") {
|
||||
Ok(v) => v,
|
||||
Err(_) => {
|
||||
eprintln!("skip user service tests: SECRETS_DATABASE_URL not set");
|
||||
return None;
|
||||
}
|
||||
};
|
||||
let pool = match sqlx::PgPool::connect(&database_url).await {
|
||||
Ok(pool) => pool,
|
||||
Err(e) => {
|
||||
eprintln!("skip user service tests: cannot connect to database: {e}");
|
||||
return None;
|
||||
}
|
||||
};
|
||||
if let Err(e) = crate::db::migrate(&pool).await {
|
||||
eprintln!("skip user service tests: migrate failed: {e}");
|
||||
return None;
|
||||
}
|
||||
Some(pool)
|
||||
}
|
||||
|
||||
async fn cleanup_user_rows(pool: &PgPool, user_id: Uuid) -> Result<()> {
|
||||
sqlx::query("DELETE FROM oauth_accounts WHERE user_id = $1")
|
||||
.bind(user_id)
|
||||
.execute(pool)
|
||||
.await?;
|
||||
sqlx::query("DELETE FROM users WHERE id = $1")
|
||||
.bind(user_id)
|
||||
.execute(pool)
|
||||
.await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn unbind_oauth_account_removes_only_requested_provider() -> Result<()> {
|
||||
let Some(pool) = maybe_test_pool().await else {
|
||||
return Ok(());
|
||||
};
|
||||
let user_id = Uuid::from_u128(rand::random());
|
||||
|
||||
cleanup_user_rows(&pool, user_id).await?;
|
||||
|
||||
sqlx::query("INSERT INTO users (id, name) VALUES ($1, '')")
|
||||
.bind(user_id)
|
||||
.execute(&pool)
|
||||
.await?;
|
||||
sqlx::query(
|
||||
"INSERT INTO oauth_accounts (user_id, provider, provider_id, email, name, avatar_url) \
|
||||
VALUES ($1, 'google', $2, NULL, NULL, NULL), \
|
||||
($1, 'github', $3, NULL, NULL, NULL)",
|
||||
)
|
||||
.bind(user_id)
|
||||
.bind(format!("google-{user_id}"))
|
||||
.bind(format!("github-{user_id}"))
|
||||
.execute(&pool)
|
||||
.await?;
|
||||
|
||||
unbind_oauth_account(&pool, user_id, "github", Some("google")).await?;
|
||||
|
||||
let remaining: Vec<(String,)> = sqlx::query_as(
|
||||
"SELECT provider FROM oauth_accounts WHERE user_id = $1 ORDER BY provider",
|
||||
)
|
||||
.bind(user_id)
|
||||
.fetch_all(&pool)
|
||||
.await?;
|
||||
assert_eq!(remaining, vec![("google".to_string(),)]);
|
||||
|
||||
cleanup_user_rows(&pool, user_id).await?;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
4
crates/secrets-core/src/taxonomy.rs
Normal file
4
crates/secrets-core/src/taxonomy.rs
Normal file
@@ -0,0 +1,4 @@
|
||||
/// Canonical secret type options for UI dropdowns.
|
||||
pub const SECRET_TYPE_OPTIONS: &[&str] = &[
|
||||
"text", "password", "token", "api-key", "ssh-key", "url", "phone", "id-card",
|
||||
];
|
||||
48
crates/secrets-mcp/Cargo.toml
Normal file
48
crates/secrets-mcp/Cargo.toml
Normal file
@@ -0,0 +1,48 @@
|
||||
[package]
|
||||
name = "secrets-mcp"
|
||||
version = "0.5.8"
|
||||
edition.workspace = true
|
||||
|
||||
[[bin]]
|
||||
name = "secrets-mcp"
|
||||
path = "src/main.rs"
|
||||
|
||||
[dependencies]
|
||||
secrets-core = { path = "../secrets-core" }
|
||||
|
||||
# MCP
|
||||
rmcp = { version = "1", features = ["server", "macros", "transport-streamable-http-server", "schemars"] }
|
||||
|
||||
# Web framework
|
||||
axum = "0.8"
|
||||
axum-extra = { version = "0.10", features = ["typed-header"] }
|
||||
tower = "0.5"
|
||||
tower-http = { version = "0.6", features = ["cors", "trace", "limit"] }
|
||||
tower-sessions = "0.14"
|
||||
tower-sessions-sqlx-store-chrono = { version = "0.14", features = ["postgres"] }
|
||||
governor = { version = "0.10", features = ["std", "jitter"] }
|
||||
time = "0.3"
|
||||
|
||||
# OAuth (manual token exchange via reqwest)
|
||||
reqwest.workspace = true
|
||||
|
||||
# Templating - render templates manually to avoid integration crate issues
|
||||
askama = "0.13"
|
||||
|
||||
# Common
|
||||
anyhow.workspace = true
|
||||
chrono.workspace = true
|
||||
serde.workspace = true
|
||||
serde_json.workspace = true
|
||||
sha2.workspace = true
|
||||
rand.workspace = true
|
||||
sqlx.workspace = true
|
||||
tokio.workspace = true
|
||||
tracing.workspace = true
|
||||
tracing-subscriber.workspace = true
|
||||
uuid.workspace = true
|
||||
dotenvy.workspace = true
|
||||
urlencoding = "2"
|
||||
schemars = "1"
|
||||
http = "1"
|
||||
url = "2"
|
||||
97
crates/secrets-mcp/src/auth.rs
Normal file
97
crates/secrets-mcp/src/auth.rs
Normal file
@@ -0,0 +1,97 @@
|
||||
use axum::{
|
||||
extract::{Request, State},
|
||||
http::StatusCode,
|
||||
middleware::Next,
|
||||
response::Response,
|
||||
};
|
||||
use sqlx::PgPool;
|
||||
use uuid::Uuid;
|
||||
|
||||
use secrets_core::service::api_key::validate_api_key;
|
||||
|
||||
use crate::client_ip;
|
||||
|
||||
/// Injected into request extensions after Bearer token validation.
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct AuthUser {
|
||||
pub user_id: Uuid,
|
||||
}
|
||||
|
||||
/// Axum middleware that validates Bearer API keys for the /mcp route.
|
||||
/// Passes all non-MCP paths through without authentication.
|
||||
pub async fn bearer_auth_middleware(
|
||||
State(pool): State<PgPool>,
|
||||
req: Request,
|
||||
next: Next,
|
||||
) -> Result<Response, StatusCode> {
|
||||
let path = req.uri().path();
|
||||
let method = req.method().as_str();
|
||||
let client_ip = client_ip::extract_client_ip(&req);
|
||||
|
||||
// Only authenticate /mcp paths
|
||||
if !path.starts_with("/mcp") {
|
||||
return Ok(next.run(req).await);
|
||||
}
|
||||
|
||||
// Allow OPTIONS (CORS preflight) through
|
||||
if req.method() == axum::http::Method::OPTIONS {
|
||||
return Ok(next.run(req).await);
|
||||
}
|
||||
|
||||
let auth_header = req
|
||||
.headers()
|
||||
.get(axum::http::header::AUTHORIZATION)
|
||||
.and_then(|v| v.to_str().ok());
|
||||
|
||||
let raw_key = match auth_header {
|
||||
Some(h) if h.starts_with("Bearer ") => h.trim_start_matches("Bearer ").trim(),
|
||||
Some(_) => {
|
||||
tracing::warn!(
|
||||
method,
|
||||
path,
|
||||
%client_ip,
|
||||
"invalid Authorization header format on /mcp (expected Bearer …)"
|
||||
);
|
||||
return Err(StatusCode::UNAUTHORIZED);
|
||||
}
|
||||
None => {
|
||||
tracing::warn!(
|
||||
method,
|
||||
path,
|
||||
%client_ip,
|
||||
"missing Authorization header on /mcp"
|
||||
);
|
||||
return Err(StatusCode::UNAUTHORIZED);
|
||||
}
|
||||
};
|
||||
|
||||
match validate_api_key(&pool, raw_key).await {
|
||||
Ok(Some(user_id)) => {
|
||||
tracing::debug!(?user_id, "api key authenticated");
|
||||
let mut req = req;
|
||||
req.extensions_mut().insert(AuthUser { user_id });
|
||||
Ok(next.run(req).await)
|
||||
}
|
||||
Ok(None) => {
|
||||
tracing::warn!(
|
||||
method,
|
||||
path,
|
||||
%client_ip,
|
||||
key_prefix = %&raw_key.chars().take(12).collect::<String>(),
|
||||
key_len = raw_key.len(),
|
||||
"invalid api key (not found in database — e.g. revoked key or DB was reset; update MCP client Bearer token)"
|
||||
);
|
||||
Err(StatusCode::UNAUTHORIZED)
|
||||
}
|
||||
Err(e) => {
|
||||
tracing::error!(
|
||||
method,
|
||||
path,
|
||||
%client_ip,
|
||||
error = %e,
|
||||
"api key validation error"
|
||||
);
|
||||
Err(StatusCode::INTERNAL_SERVER_ERROR)
|
||||
}
|
||||
}
|
||||
}
|
||||
65
crates/secrets-mcp/src/client_ip.rs
Normal file
65
crates/secrets-mcp/src/client_ip.rs
Normal file
@@ -0,0 +1,65 @@
|
||||
use axum::extract::Request;
|
||||
use std::net::{IpAddr, SocketAddr};
|
||||
|
||||
/// Extract the client IP from a request.
|
||||
///
|
||||
/// When the `TRUST_PROXY` environment variable is set to `1` or `true`, the
|
||||
/// `X-Forwarded-For` and `X-Real-IP` headers are consulted first, which is
|
||||
/// appropriate when the service runs behind a trusted reverse proxy (e.g.
|
||||
/// Caddy). Otherwise — or if those headers are absent/empty — the direct TCP
|
||||
/// connection address from `ConnectInfo` is used.
|
||||
///
|
||||
/// **Important**: only enable `TRUST_PROXY` when the application is guaranteed
|
||||
/// to receive traffic exclusively through a controlled reverse proxy. Enabling
|
||||
/// it on a directly-exposed port allows clients to spoof their IP address and
|
||||
/// bypass per-IP rate limiting.
|
||||
pub fn extract_client_ip(req: &Request) -> String {
|
||||
if trust_proxy_enabled() {
|
||||
if let Some(ip) = forwarded_for_ip(req.headers()) {
|
||||
return ip;
|
||||
}
|
||||
if let Some(ip) = real_ip(req.headers()) {
|
||||
return ip;
|
||||
}
|
||||
}
|
||||
|
||||
connect_info_ip(req).unwrap_or_else(|| "unknown".to_string())
|
||||
}
|
||||
|
||||
fn trust_proxy_enabled() -> bool {
|
||||
static CACHE: std::sync::OnceLock<bool> = std::sync::OnceLock::new();
|
||||
*CACHE.get_or_init(|| {
|
||||
matches!(
|
||||
std::env::var("TRUST_PROXY").as_deref(),
|
||||
Ok("1") | Ok("true") | Ok("yes")
|
||||
)
|
||||
})
|
||||
}
|
||||
|
||||
fn forwarded_for_ip(headers: &axum::http::HeaderMap) -> Option<String> {
|
||||
let value = headers.get("x-forwarded-for")?.to_str().ok()?;
|
||||
let first = value.split(',').next()?.trim();
|
||||
if first.is_empty() {
|
||||
None
|
||||
} else {
|
||||
validate_ip(first)
|
||||
}
|
||||
}
|
||||
|
||||
fn real_ip(headers: &axum::http::HeaderMap) -> Option<String> {
|
||||
let value = headers.get("x-real-ip")?.to_str().ok()?;
|
||||
let ip = value.trim();
|
||||
if ip.is_empty() { None } else { validate_ip(ip) }
|
||||
}
|
||||
|
||||
/// Validate that a string is a valid IP address.
|
||||
/// Returns Some(ip) if valid, None otherwise.
|
||||
fn validate_ip(s: &str) -> Option<String> {
|
||||
s.parse::<IpAddr>().ok().map(|ip| ip.to_string())
|
||||
}
|
||||
|
||||
fn connect_info_ip(req: &Request) -> Option<String> {
|
||||
req.extensions()
|
||||
.get::<axum::extract::ConnectInfo<SocketAddr>>()
|
||||
.map(|c| c.0.ip().to_string())
|
||||
}
|
||||
54
crates/secrets-mcp/src/error.rs
Normal file
54
crates/secrets-mcp/src/error.rs
Normal file
@@ -0,0 +1,54 @@
|
||||
use secrets_core::error::AppError;
|
||||
|
||||
/// Map a structured `AppError` to an MCP protocol error.
|
||||
///
|
||||
/// This replaces the previous pattern of swallowing all errors into `-32603`.
|
||||
pub fn app_error_to_mcp(err: &AppError) -> rmcp::ErrorData {
|
||||
match err {
|
||||
AppError::ConflictSecretName { secret_name } => rmcp::ErrorData::invalid_request(
|
||||
format!(
|
||||
"A secret with the name '{secret_name}' already exists for your account. \
|
||||
Secret names must be unique per user."
|
||||
),
|
||||
None,
|
||||
),
|
||||
AppError::ConflictEntryName { folder, name } => rmcp::ErrorData::invalid_request(
|
||||
format!(
|
||||
"An entry with folder='{folder}' and name='{name}' already exists. \
|
||||
The combination of folder and name must be unique."
|
||||
),
|
||||
None,
|
||||
),
|
||||
AppError::NotFoundEntry => rmcp::ErrorData::invalid_request(
|
||||
"Entry not found. Use secrets_find to discover existing entries.",
|
||||
None,
|
||||
),
|
||||
AppError::NotFoundUser => rmcp::ErrorData::invalid_request("User not found.", None),
|
||||
AppError::NotFoundSecret => rmcp::ErrorData::invalid_request("Secret not found.", None),
|
||||
AppError::AuthenticationFailed => rmcp::ErrorData::invalid_request(
|
||||
"Authentication failed. Please check your API key or login credentials.",
|
||||
None,
|
||||
),
|
||||
AppError::Unauthorized => rmcp::ErrorData::invalid_request(
|
||||
"Unauthorized: you do not have permission to access this resource.",
|
||||
None,
|
||||
),
|
||||
AppError::Validation { message } => rmcp::ErrorData::invalid_request(message.clone(), None),
|
||||
AppError::ConcurrentModification => rmcp::ErrorData::invalid_request(
|
||||
"The entry was modified by another request. Please refresh and try again.",
|
||||
None,
|
||||
),
|
||||
AppError::DecryptionFailed => rmcp::ErrorData::invalid_request(
|
||||
"Decryption failed — the encryption key may be incorrect or does not match the data.",
|
||||
None,
|
||||
),
|
||||
AppError::EncryptionKeyNotSet => rmcp::ErrorData::invalid_request(
|
||||
"Encryption key not set. You must set a passphrase before using this feature.",
|
||||
None,
|
||||
),
|
||||
AppError::Internal(_) => rmcp::ErrorData::internal_error(
|
||||
"Request failed due to a server error. Check service logs if you need details.",
|
||||
None,
|
||||
),
|
||||
}
|
||||
}
|
||||
381
crates/secrets-mcp/src/logging.rs
Normal file
381
crates/secrets-mcp/src/logging.rs
Normal file
@@ -0,0 +1,381 @@
|
||||
use std::time::Instant;
|
||||
|
||||
use axum::{
|
||||
body::{Body, Bytes, to_bytes},
|
||||
extract::Request,
|
||||
http::{
|
||||
HeaderMap, Method, StatusCode,
|
||||
header::{AUTHORIZATION, CONTENT_LENGTH, CONTENT_TYPE, USER_AGENT},
|
||||
},
|
||||
middleware::Next,
|
||||
response::{IntoResponse, Response},
|
||||
};
|
||||
|
||||
use crate::auth::AuthUser;
|
||||
|
||||
/// Axum middleware that logs structured info for every HTTP request.
|
||||
///
|
||||
/// All requests: method, path, status, latency_ms, client_ip, user_agent.
|
||||
/// POST /mcp requests: additionally parses JSON-RPC body for jsonrpc_method,
|
||||
/// tool_name, jsonrpc_id, mcp_session, batch_size, tool_args (non-sensitive
|
||||
/// arguments only), plus masked auth_key / enc_key fingerprints and user_id
|
||||
/// for diagnosing header forwarding issues.
|
||||
///
|
||||
/// Sensitive headers (Authorization, X-Encryption-Key) are never logged in
|
||||
/// full — only short fingerprints are emitted.
|
||||
pub async fn request_logging_middleware(req: Request, next: Next) -> Response {
|
||||
let method = req.method().clone();
|
||||
let path = req.uri().path().to_string();
|
||||
let ip = client_ip(&req);
|
||||
let ua = header_str(req.headers(), USER_AGENT);
|
||||
let content_len = header_str(req.headers(), CONTENT_LENGTH).and_then(|v| v.parse::<u64>().ok());
|
||||
let mcp_session = req
|
||||
.headers()
|
||||
.get("mcp-session-id")
|
||||
.or_else(|| req.headers().get("x-mcp-session"))
|
||||
.and_then(|v| v.to_str().ok())
|
||||
.map(|s| s.to_string());
|
||||
|
||||
// Capture header fingerprints before consuming the request.
|
||||
let auth_key = mask_bearer(req.headers());
|
||||
let enc_key = mask_enc_key(req.headers());
|
||||
|
||||
let is_mcp_post = path.starts_with("/mcp") && method == Method::POST;
|
||||
let is_json = header_str(req.headers(), CONTENT_TYPE)
|
||||
.map(|ct| ct.contains("application/json"))
|
||||
.unwrap_or(false);
|
||||
|
||||
let start = Instant::now();
|
||||
|
||||
// For MCP JSON-RPC POST requests, buffer body to extract JSON-RPC metadata.
|
||||
// We cap at 512 KiB to avoid buffering large payloads.
|
||||
if is_mcp_post && is_json {
|
||||
let cap = content_len.unwrap_or(0);
|
||||
if cap <= 512 * 1024 {
|
||||
let (parts, body) = req.into_parts();
|
||||
// user_id is available after auth middleware has run (injected into extensions).
|
||||
let user_id = parts
|
||||
.extensions
|
||||
.get::<AuthUser>()
|
||||
.map(|a| a.user_id.to_string());
|
||||
match to_bytes(body, 512 * 1024).await {
|
||||
Ok(bytes) => {
|
||||
let rpc = parse_jsonrpc_meta(&bytes);
|
||||
let req = Request::from_parts(parts, Body::from(bytes));
|
||||
let resp = next.run(req).await;
|
||||
let status = resp.status().as_u16();
|
||||
let elapsed = start.elapsed().as_millis();
|
||||
log_mcp_request(
|
||||
&method,
|
||||
&path,
|
||||
status,
|
||||
elapsed,
|
||||
ip.as_deref(),
|
||||
ua.as_deref(),
|
||||
content_len,
|
||||
mcp_session.as_deref(),
|
||||
auth_key.as_deref(),
|
||||
&enc_key,
|
||||
user_id.as_deref(),
|
||||
&rpc,
|
||||
);
|
||||
return resp;
|
||||
}
|
||||
Err(e) => {
|
||||
tracing::warn!(path, error = %e, "failed to buffer MCP request body for logging");
|
||||
let elapsed = start.elapsed().as_millis();
|
||||
tracing::info!(
|
||||
method = method.as_str(),
|
||||
path,
|
||||
status = StatusCode::INTERNAL_SERVER_ERROR.as_u16(),
|
||||
elapsed_ms = elapsed,
|
||||
client_ip = ip.as_deref(),
|
||||
ua = ua.as_deref(),
|
||||
content_length = content_len,
|
||||
mcp_session = mcp_session.as_deref(),
|
||||
auth_key = auth_key.as_deref(),
|
||||
enc_key = enc_key.as_str(),
|
||||
user_id = user_id.as_deref(),
|
||||
"mcp request",
|
||||
);
|
||||
return (
|
||||
StatusCode::INTERNAL_SERVER_ERROR,
|
||||
"failed to read request body",
|
||||
)
|
||||
.into_response();
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
let resp = next.run(req).await;
|
||||
let status = resp.status().as_u16();
|
||||
let elapsed = start.elapsed().as_millis();
|
||||
|
||||
// Known client probe patterns that legitimately 404 — downgrade to debug to
|
||||
// avoid noise in production logs. These are:
|
||||
// • GET /.well-known/* — OAuth/OIDC discovery by MCP clients (RFC 8414 / RFC 9728)
|
||||
// • GET /mcp → 404 — old SSE-transport compatibility probe by clients
|
||||
let is_expected_probe_404 = status == 404
|
||||
&& (path.starts_with("/.well-known/")
|
||||
|| (method == Method::GET && path.starts_with("/mcp")));
|
||||
|
||||
if is_expected_probe_404 {
|
||||
tracing::debug!(
|
||||
method = method.as_str(),
|
||||
path,
|
||||
status,
|
||||
elapsed_ms = elapsed,
|
||||
client_ip = ip.as_deref(),
|
||||
ua = ua.as_deref(),
|
||||
"probe request (not found — expected)",
|
||||
);
|
||||
} else {
|
||||
log_http_request(
|
||||
&method,
|
||||
&path,
|
||||
status,
|
||||
elapsed,
|
||||
ip.as_deref(),
|
||||
ua.as_deref(),
|
||||
content_len,
|
||||
);
|
||||
}
|
||||
|
||||
resp
|
||||
}
|
||||
|
||||
// ── Logging helpers ───────────────────────────────────────────────────────────
|
||||
|
||||
fn log_http_request(
|
||||
method: &Method,
|
||||
path: &str,
|
||||
status: u16,
|
||||
elapsed_ms: u128,
|
||||
client_ip: Option<&str>,
|
||||
ua: Option<&str>,
|
||||
content_length: Option<u64>,
|
||||
) {
|
||||
tracing::info!(
|
||||
method = method.as_str(),
|
||||
path,
|
||||
status,
|
||||
elapsed_ms,
|
||||
client_ip,
|
||||
ua,
|
||||
content_length,
|
||||
"http request",
|
||||
);
|
||||
}
|
||||
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
fn log_mcp_request(
|
||||
method: &Method,
|
||||
path: &str,
|
||||
status: u16,
|
||||
elapsed_ms: u128,
|
||||
client_ip: Option<&str>,
|
||||
ua: Option<&str>,
|
||||
content_length: Option<u64>,
|
||||
mcp_session: Option<&str>,
|
||||
auth_key: Option<&str>,
|
||||
enc_key: &str,
|
||||
user_id: Option<&str>,
|
||||
rpc: &JsonRpcMeta,
|
||||
) {
|
||||
tracing::info!(
|
||||
method = method.as_str(),
|
||||
path,
|
||||
status,
|
||||
elapsed_ms,
|
||||
client_ip,
|
||||
ua,
|
||||
content_length,
|
||||
mcp_session,
|
||||
jsonrpc = rpc.rpc_method.as_deref(),
|
||||
tool = rpc.tool_name.as_deref(),
|
||||
jsonrpc_id = rpc.request_id.as_deref(),
|
||||
batch_size = rpc.batch_size,
|
||||
tool_args = rpc.tool_args.as_deref(),
|
||||
auth_key,
|
||||
enc_key,
|
||||
user_id,
|
||||
"mcp request",
|
||||
);
|
||||
}
|
||||
|
||||
// ── Sensitive header masking ──────────────────────────────────────────────────
|
||||
|
||||
/// Mask a Bearer token: emit only the first 12 characters followed by `…`.
|
||||
/// Returns `None` if the Authorization header is absent or not a Bearer token.
|
||||
/// Example: `sk_90c88844e4e5…`
|
||||
fn mask_bearer(headers: &HeaderMap) -> Option<String> {
|
||||
let val = headers.get(AUTHORIZATION)?.to_str().ok()?;
|
||||
let token = val.strip_prefix("Bearer ")?.trim();
|
||||
if token.is_empty() {
|
||||
return None;
|
||||
}
|
||||
if token.len() > 12 {
|
||||
Some(format!("{}…", &token[..12]))
|
||||
} else {
|
||||
Some(token.to_string())
|
||||
}
|
||||
}
|
||||
|
||||
/// Fingerprint the X-Encryption-Key header.
|
||||
///
|
||||
/// Emits first 4 chars, last 4 chars, and raw byte length, e.g. `146b…5516(64)`.
|
||||
/// Returns `"absent"` when the header is missing. Reveals enough to confirm
|
||||
/// which key arrived and whether it was truncated or padded, without revealing
|
||||
/// the full value.
|
||||
fn mask_enc_key(headers: &HeaderMap) -> String {
|
||||
match headers
|
||||
.get("x-encryption-key")
|
||||
.and_then(|v| v.to_str().ok())
|
||||
{
|
||||
Some(val) => {
|
||||
let raw_len = val.len();
|
||||
let t = val.trim();
|
||||
let len = t.len();
|
||||
if len >= 8 {
|
||||
let prefix = &t[..4];
|
||||
let suffix = &t[len - 4..];
|
||||
if raw_len != len {
|
||||
// Trailing/leading whitespace detected — extra diagnostic.
|
||||
format!("{prefix}…{suffix}({len}, raw={raw_len})")
|
||||
} else {
|
||||
format!("{prefix}…{suffix}({len})")
|
||||
}
|
||||
} else {
|
||||
format!("…({len})")
|
||||
}
|
||||
}
|
||||
None => "absent".to_string(),
|
||||
}
|
||||
}
|
||||
|
||||
// ── JSON-RPC body parsing ─────────────────────────────────────────────────────
|
||||
|
||||
/// Safe (non-sensitive) argument keys that may be included verbatim in logs.
|
||||
/// Keys NOT in this list (e.g. `secrets`, `secrets_obj`, `meta_obj`,
|
||||
/// `encryption_key`) are silently dropped.
|
||||
const SAFE_ARG_KEYS: &[&str] = &[
|
||||
"id",
|
||||
"name",
|
||||
"name_query",
|
||||
"folder",
|
||||
"type",
|
||||
"entry_type",
|
||||
"field",
|
||||
"query",
|
||||
"tags",
|
||||
"limit",
|
||||
"offset",
|
||||
"format",
|
||||
"dry_run",
|
||||
"prefix",
|
||||
];
|
||||
|
||||
#[derive(Debug, Default)]
|
||||
struct JsonRpcMeta {
|
||||
request_id: Option<String>,
|
||||
rpc_method: Option<String>,
|
||||
tool_name: Option<String>,
|
||||
batch_size: Option<usize>,
|
||||
/// Non-sensitive tool call arguments for diagnostic logging.
|
||||
tool_args: Option<String>,
|
||||
}
|
||||
|
||||
fn parse_jsonrpc_meta(bytes: &Bytes) -> JsonRpcMeta {
|
||||
let Ok(value) = serde_json::from_slice::<serde_json::Value>(bytes) else {
|
||||
return JsonRpcMeta::default();
|
||||
};
|
||||
|
||||
if let Some(arr) = value.as_array() {
|
||||
// Batch request: summarise method(s) from first element only
|
||||
let first = arr.first().map(parse_single).unwrap_or_default();
|
||||
return JsonRpcMeta {
|
||||
batch_size: Some(arr.len()),
|
||||
..first
|
||||
};
|
||||
}
|
||||
|
||||
parse_single(&value)
|
||||
}
|
||||
|
||||
fn parse_single(value: &serde_json::Value) -> JsonRpcMeta {
|
||||
let request_id = value.get("id").and_then(json_to_string);
|
||||
let rpc_method = value
|
||||
.get("method")
|
||||
.and_then(|v| v.as_str())
|
||||
.map(|s| s.to_string());
|
||||
let tool_name = value
|
||||
.pointer("/params/name")
|
||||
.and_then(|v| v.as_str())
|
||||
.map(|s| s.to_string());
|
||||
let tool_args = extract_tool_args(value);
|
||||
|
||||
JsonRpcMeta {
|
||||
request_id,
|
||||
rpc_method,
|
||||
tool_name,
|
||||
batch_size: None,
|
||||
tool_args,
|
||||
}
|
||||
}
|
||||
|
||||
/// Extract a compact summary of non-sensitive tool arguments for logging.
|
||||
/// Only keys listed in `SAFE_ARG_KEYS` are included.
|
||||
fn extract_tool_args(value: &serde_json::Value) -> Option<String> {
|
||||
let args = value.pointer("/params/arguments")?;
|
||||
let obj = args.as_object()?;
|
||||
let pairs: Vec<String> = obj
|
||||
.iter()
|
||||
.filter(|(k, v)| SAFE_ARG_KEYS.contains(&k.as_str()) && !v.is_null())
|
||||
.map(|(k, v)| format!("{}={}", k, summarize_value(v)))
|
||||
.collect();
|
||||
if pairs.is_empty() {
|
||||
None
|
||||
} else {
|
||||
Some(pairs.join(" "))
|
||||
}
|
||||
}
|
||||
|
||||
/// Produce a short, log-safe representation of a JSON value.
|
||||
fn summarize_value(v: &serde_json::Value) -> String {
|
||||
match v {
|
||||
serde_json::Value::String(s) => {
|
||||
if s.len() > 64 {
|
||||
format!("\"{}…\"", &s[..64])
|
||||
} else {
|
||||
format!("\"{s}\"")
|
||||
}
|
||||
}
|
||||
serde_json::Value::Array(arr) => format!("[…{}]", arr.len()),
|
||||
serde_json::Value::Object(_) => "{…}".to_string(),
|
||||
other => other.to_string(),
|
||||
}
|
||||
}
|
||||
|
||||
fn json_to_string(value: &serde_json::Value) -> Option<String> {
|
||||
match value {
|
||||
serde_json::Value::Null => None,
|
||||
serde_json::Value::String(s) => Some(s.clone()),
|
||||
serde_json::Value::Number(n) => Some(n.to_string()),
|
||||
serde_json::Value::Bool(b) => Some(b.to_string()),
|
||||
other => Some(other.to_string()),
|
||||
}
|
||||
}
|
||||
|
||||
// ── Header helpers ────────────────────────────────────────────────────────────
|
||||
|
||||
fn header_str(headers: &HeaderMap, name: impl axum::http::header::AsHeaderName) -> Option<String> {
|
||||
headers
|
||||
.get(name)
|
||||
.and_then(|v| v.to_str().ok())
|
||||
.map(|s| s.to_string())
|
||||
}
|
||||
|
||||
fn client_ip(req: &Request) -> Option<String> {
|
||||
crate::client_ip::extract_client_ip(req).into()
|
||||
}
|
||||
346
crates/secrets-mcp/src/main.rs
Normal file
346
crates/secrets-mcp/src/main.rs
Normal file
@@ -0,0 +1,346 @@
|
||||
mod auth;
|
||||
mod client_ip;
|
||||
mod error;
|
||||
mod logging;
|
||||
mod oauth;
|
||||
mod rate_limit;
|
||||
mod tools;
|
||||
mod validation;
|
||||
mod web;
|
||||
|
||||
use std::net::SocketAddr;
|
||||
use std::sync::Arc;
|
||||
|
||||
use anyhow::{Context, Result};
|
||||
use axum::Router;
|
||||
use rmcp::transport::streamable_http_server::{
|
||||
StreamableHttpService, session::local::LocalSessionManager,
|
||||
};
|
||||
use sqlx::PgPool;
|
||||
use tower_http::cors::{Any, CorsLayer};
|
||||
use tower_sessions::cookie::SameSite;
|
||||
use tower_sessions::session_store::ExpiredDeletion;
|
||||
use tower_sessions::{Expiry, SessionManagerLayer};
|
||||
use tower_sessions_sqlx_store_chrono::PostgresStore;
|
||||
use tracing_subscriber::EnvFilter;
|
||||
use tracing_subscriber::fmt::time::FormatTime;
|
||||
|
||||
use secrets_core::config::resolve_db_config;
|
||||
use secrets_core::db::{create_pool, migrate};
|
||||
|
||||
use crate::oauth::OAuthConfig;
|
||||
use crate::tools::SecretsService;
|
||||
|
||||
/// Shared application state injected into web routes and middleware.
|
||||
#[derive(Clone)]
|
||||
pub struct AppState {
|
||||
pub pool: PgPool,
|
||||
pub google_config: Option<OAuthConfig>,
|
||||
pub base_url: String,
|
||||
pub http_client: reqwest::Client,
|
||||
}
|
||||
|
||||
fn load_env_var(name: &str) -> Option<String> {
|
||||
std::env::var(name).ok().filter(|s| !s.is_empty())
|
||||
}
|
||||
|
||||
/// Pretty-print bind address in logs (`127.0.0.1` → `localhost`); actual socket bind unchanged.
|
||||
fn listen_addr_log_display(bind_addr: &str) -> String {
|
||||
bind_addr
|
||||
.strip_prefix("127.0.0.1:")
|
||||
.map(|port| format!("localhost:{port}"))
|
||||
.unwrap_or_else(|| bind_addr.to_string())
|
||||
}
|
||||
|
||||
fn load_oauth_config(prefix: &str, base_url: &str, path: &str) -> Option<OAuthConfig> {
|
||||
let client_id = load_env_var(&format!("{}_CLIENT_ID", prefix))?;
|
||||
let client_secret = load_env_var(&format!("{}_CLIENT_SECRET", prefix))?;
|
||||
Some(OAuthConfig {
|
||||
client_id,
|
||||
client_secret,
|
||||
redirect_uri: format!("{}{}", base_url, path),
|
||||
})
|
||||
}
|
||||
|
||||
/// Log line timestamps in the process local timezone (honors `TZ` / system zone).
|
||||
#[derive(Clone, Copy, Default)]
|
||||
struct LocalRfc3339Time;
|
||||
|
||||
impl FormatTime for LocalRfc3339Time {
|
||||
fn format_time(&self, w: &mut tracing_subscriber::fmt::format::Writer<'_>) -> std::fmt::Result {
|
||||
write!(
|
||||
w,
|
||||
"{}",
|
||||
chrono::Local::now().to_rfc3339_opts(chrono::SecondsFormat::Millis, false)
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
#[tokio::main]
|
||||
async fn main() -> Result<()> {
|
||||
// Load .env if present
|
||||
let _ = dotenvy::dotenv();
|
||||
|
||||
tracing_subscriber::fmt()
|
||||
.with_timer(LocalRfc3339Time)
|
||||
.with_env_filter(
|
||||
EnvFilter::try_from_default_env()
|
||||
.unwrap_or_else(|_| "secrets_mcp=info,tower_http=info".into()),
|
||||
)
|
||||
.init();
|
||||
|
||||
// ── Database ──────────────────────────────────────────────────────────────
|
||||
let db_config = resolve_db_config("")
|
||||
.context("Database not configured. Set SECRETS_DATABASE_URL environment variable.")?;
|
||||
let pool = create_pool(&db_config)
|
||||
.await
|
||||
.context("failed to connect to database")?;
|
||||
migrate(&pool)
|
||||
.await
|
||||
.context("failed to run database migrations")?;
|
||||
tracing::info!("Database connected and migrated");
|
||||
|
||||
// ── Configuration ─────────────────────────────────────────────────────────
|
||||
let base_url = load_env_var("BASE_URL").unwrap_or_else(|| "http://localhost:9315".to_string());
|
||||
let bind_addr =
|
||||
load_env_var("SECRETS_MCP_BIND").unwrap_or_else(|| "127.0.0.1:9315".to_string());
|
||||
|
||||
// ── OAuth providers ───────────────────────────────────────────────────────
|
||||
let google_config = load_oauth_config("GOOGLE", &base_url, "/auth/google/callback");
|
||||
|
||||
if google_config.is_none() {
|
||||
tracing::warn!(
|
||||
"No OAuth providers configured. Set GOOGLE_CLIENT_ID/GOOGLE_CLIENT_SECRET to enable login."
|
||||
);
|
||||
}
|
||||
|
||||
// ── Session store (PostgreSQL-backed) ─────────────────────────────────────
|
||||
let session_store = PostgresStore::new(pool.clone());
|
||||
session_store
|
||||
.migrate()
|
||||
.await
|
||||
.context("failed to run session table migration")?;
|
||||
// Prune expired rows every hour; task is aborted when the server shuts down.
|
||||
let session_cleanup = tokio::spawn(
|
||||
session_store
|
||||
.clone()
|
||||
.continuously_delete_expired(tokio::time::Duration::from_secs(3600)),
|
||||
);
|
||||
// Strict would drop the session cookie on redirect from Google → our origin (cross-site nav).
|
||||
let session_layer = SessionManagerLayer::new(session_store)
|
||||
.with_secure(base_url.starts_with("https://"))
|
||||
.with_same_site(SameSite::Lax)
|
||||
.with_expiry(Expiry::OnInactivity(time::Duration::days(14)));
|
||||
|
||||
// ── App state ─────────────────────────────────────────────────────────────
|
||||
let app_state = AppState {
|
||||
pool: pool.clone(),
|
||||
google_config,
|
||||
base_url: base_url.clone(),
|
||||
http_client: reqwest::Client::builder()
|
||||
.timeout(std::time::Duration::from_secs(15))
|
||||
.build()
|
||||
.context("failed to build HTTP client")?,
|
||||
};
|
||||
|
||||
// ── MCP service ───────────────────────────────────────────────────────────
|
||||
let pool_arc = Arc::new(pool.clone());
|
||||
|
||||
let mcp_service = StreamableHttpService::new(
|
||||
move || {
|
||||
let p = pool_arc.clone();
|
||||
Ok(SecretsService::new(p))
|
||||
},
|
||||
LocalSessionManager::default().into(),
|
||||
Default::default(),
|
||||
);
|
||||
|
||||
// ── Router ────────────────────────────────────────────────────────────────
|
||||
// CORS: restrict origins in production, allow all in development
|
||||
let is_production = matches!(
|
||||
load_env_var("SECRETS_ENV")
|
||||
.as_deref()
|
||||
.map(|s| s.to_ascii_lowercase())
|
||||
.as_deref(),
|
||||
Some("prod" | "production")
|
||||
);
|
||||
|
||||
let cors = build_cors_layer(&base_url, is_production);
|
||||
|
||||
// Rate limiting
|
||||
let rate_limit_state = rate_limit::RateLimitState::new();
|
||||
let rate_limit_cleanup = rate_limit::spawn_cleanup_task(rate_limit_state.ip_limiter.clone());
|
||||
|
||||
let router = Router::new()
|
||||
.merge(web::web_router())
|
||||
.nest_service("/mcp", mcp_service)
|
||||
.layer(axum::middleware::from_fn(
|
||||
logging::request_logging_middleware,
|
||||
))
|
||||
.layer(axum::middleware::from_fn_with_state(
|
||||
pool,
|
||||
auth::bearer_auth_middleware,
|
||||
))
|
||||
.layer(axum::middleware::from_fn_with_state(
|
||||
rate_limit_state.clone(),
|
||||
rate_limit::rate_limit_middleware,
|
||||
))
|
||||
.layer(session_layer)
|
||||
.layer(cors)
|
||||
.layer(tower_http::limit::RequestBodyLimitLayer::new(
|
||||
10 * 1024 * 1024,
|
||||
))
|
||||
.with_state(app_state);
|
||||
|
||||
// ── Start server ──────────────────────────────────────────────────────────
|
||||
let listener = tokio::net::TcpListener::bind(&bind_addr)
|
||||
.await
|
||||
.with_context(|| format!("failed to bind to {}", bind_addr))?;
|
||||
|
||||
tracing::info!(
|
||||
"Secrets MCP Server listening on http://{}",
|
||||
listen_addr_log_display(&bind_addr)
|
||||
);
|
||||
tracing::info!("MCP endpoint: {}/mcp", base_url);
|
||||
|
||||
axum::serve(
|
||||
listener,
|
||||
router.into_make_service_with_connect_info::<SocketAddr>(),
|
||||
)
|
||||
.with_graceful_shutdown(shutdown_signal())
|
||||
.await
|
||||
.context("server error")?;
|
||||
|
||||
session_cleanup.abort();
|
||||
rate_limit_cleanup.abort();
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn shutdown_signal() {
|
||||
let ctrl_c = tokio::signal::ctrl_c();
|
||||
|
||||
#[cfg(unix)]
|
||||
let terminate = async {
|
||||
tokio::signal::unix::signal(tokio::signal::unix::SignalKind::terminate())
|
||||
.expect("failed to install SIGTERM handler")
|
||||
.recv()
|
||||
.await;
|
||||
};
|
||||
|
||||
#[cfg(not(unix))]
|
||||
let terminate = std::future::pending::<()>();
|
||||
|
||||
tokio::select! {
|
||||
_ = ctrl_c => {},
|
||||
_ = terminate => {},
|
||||
}
|
||||
|
||||
tracing::info!("Shutting down gracefully...");
|
||||
}
|
||||
|
||||
/// Production CORS allowed headers.
|
||||
///
|
||||
/// When adding a new custom header to the MCP or Web API, this list must be
|
||||
/// updated accordingly — otherwise browsers will block the request during
|
||||
/// the CORS preflight check.
|
||||
fn production_allowed_headers() -> [axum::http::HeaderName; 5] {
|
||||
[
|
||||
axum::http::header::AUTHORIZATION,
|
||||
axum::http::header::CONTENT_TYPE,
|
||||
axum::http::HeaderName::from_static("x-encryption-key"),
|
||||
axum::http::HeaderName::from_static("mcp-session-id"),
|
||||
axum::http::HeaderName::from_static("x-mcp-session"),
|
||||
]
|
||||
}
|
||||
|
||||
/// Production CORS allowed methods.
|
||||
///
|
||||
/// Keep this list explicit because tower-http rejects
|
||||
/// `allow_credentials(true)` together with `allow_methods(Any)`.
|
||||
fn production_allowed_methods() -> [axum::http::Method; 5] {
|
||||
[
|
||||
axum::http::Method::GET,
|
||||
axum::http::Method::POST,
|
||||
axum::http::Method::PATCH,
|
||||
axum::http::Method::DELETE,
|
||||
axum::http::Method::OPTIONS,
|
||||
]
|
||||
}
|
||||
|
||||
/// Build the CORS layer for the application.
|
||||
///
|
||||
/// In production mode the origin is restricted to the BASE_URL origin
|
||||
/// (scheme://host:port, path stripped) and credentials are allowed.
|
||||
/// `allow_headers` and `allow_methods` use explicit whitelists to avoid the
|
||||
/// tower-http restriction on `allow_credentials(true)` + wildcards.
|
||||
///
|
||||
/// In development mode all origins, methods and headers are allowed.
|
||||
fn build_cors_layer(base_url: &str, is_production: bool) -> CorsLayer {
|
||||
if is_production {
|
||||
let allowed_origin = if let Ok(parsed) = base_url.parse::<url::Url>() {
|
||||
let origin = parsed.origin().ascii_serialization();
|
||||
origin
|
||||
.parse::<axum::http::HeaderValue>()
|
||||
.unwrap_or_else(|_| panic!("invalid BASE_URL origin: {}", origin))
|
||||
} else {
|
||||
base_url
|
||||
.parse::<axum::http::HeaderValue>()
|
||||
.unwrap_or_else(|_| panic!("invalid BASE_URL: {}", base_url))
|
||||
};
|
||||
CorsLayer::new()
|
||||
.allow_origin(allowed_origin)
|
||||
.allow_methods(production_allowed_methods())
|
||||
.allow_headers(production_allowed_headers())
|
||||
.allow_credentials(true)
|
||||
} else {
|
||||
CorsLayer::new()
|
||||
.allow_origin(Any)
|
||||
.allow_methods(Any)
|
||||
.allow_headers(Any)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn production_cors_does_not_panic() {
|
||||
let layer = build_cors_layer("https://secrets.example.com/app", true);
|
||||
let _ = layer;
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn production_cors_headers_include_all_required() {
|
||||
let headers = production_allowed_headers();
|
||||
let names: Vec<&str> = headers.iter().map(|h| h.as_str()).collect();
|
||||
assert!(names.contains(&"authorization"));
|
||||
assert!(names.contains(&"content-type"));
|
||||
assert!(names.contains(&"x-encryption-key"));
|
||||
assert!(names.contains(&"mcp-session-id"));
|
||||
assert!(names.contains(&"x-mcp-session"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn production_cors_methods_include_all_required() {
|
||||
let methods = production_allowed_methods();
|
||||
assert!(methods.contains(&axum::http::Method::GET));
|
||||
assert!(methods.contains(&axum::http::Method::POST));
|
||||
assert!(methods.contains(&axum::http::Method::PATCH));
|
||||
assert!(methods.contains(&axum::http::Method::DELETE));
|
||||
assert!(methods.contains(&axum::http::Method::OPTIONS));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn production_cors_normalizes_base_url_with_path() {
|
||||
let url = url::Url::parse("https://secrets.example.com/secrets/app").unwrap();
|
||||
let origin = url.origin().ascii_serialization();
|
||||
assert_eq!(origin, "https://secrets.example.com");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn development_cors_allows_everything() {
|
||||
let layer = build_cors_layer("http://localhost:9315", false);
|
||||
let _ = layer;
|
||||
}
|
||||
}
|
||||
66
crates/secrets-mcp/src/oauth/google.rs
Normal file
66
crates/secrets-mcp/src/oauth/google.rs
Normal file
@@ -0,0 +1,66 @@
|
||||
use anyhow::{Context, Result};
|
||||
use serde::Deserialize;
|
||||
|
||||
use super::{OAuthConfig, OAuthUserInfo};
|
||||
|
||||
#[derive(Deserialize)]
|
||||
struct TokenResponse {
|
||||
access_token: String,
|
||||
#[allow(dead_code)]
|
||||
token_type: String,
|
||||
#[allow(dead_code)]
|
||||
id_token: Option<String>,
|
||||
}
|
||||
|
||||
#[derive(Deserialize)]
|
||||
struct UserInfo {
|
||||
sub: String,
|
||||
email: Option<String>,
|
||||
name: Option<String>,
|
||||
picture: Option<String>,
|
||||
}
|
||||
|
||||
/// Exchange authorization code for tokens and fetch user profile.
|
||||
pub async fn exchange_code(
|
||||
client: &reqwest::Client,
|
||||
config: &OAuthConfig,
|
||||
code: &str,
|
||||
) -> Result<OAuthUserInfo> {
|
||||
let token_resp: TokenResponse = client
|
||||
.post("https://oauth2.googleapis.com/token")
|
||||
.form(&[
|
||||
("code", code),
|
||||
("client_id", &config.client_id),
|
||||
("client_secret", &config.client_secret),
|
||||
("redirect_uri", &config.redirect_uri),
|
||||
("grant_type", "authorization_code"),
|
||||
])
|
||||
.send()
|
||||
.await
|
||||
.context("failed to exchange Google code")?
|
||||
.error_for_status()
|
||||
.context("Google token endpoint error")?
|
||||
.json()
|
||||
.await
|
||||
.context("failed to parse Google token response")?;
|
||||
|
||||
let user: UserInfo = client
|
||||
.get("https://openidconnect.googleapis.com/v1/userinfo")
|
||||
.bearer_auth(&token_resp.access_token)
|
||||
.send()
|
||||
.await
|
||||
.context("failed to fetch Google userinfo")?
|
||||
.error_for_status()
|
||||
.context("Google userinfo endpoint error")?
|
||||
.json()
|
||||
.await
|
||||
.context("failed to parse Google userinfo")?;
|
||||
|
||||
Ok(OAuthUserInfo {
|
||||
provider: "google".to_string(),
|
||||
provider_id: user.sub,
|
||||
email: user.email,
|
||||
name: user.name,
|
||||
avatar_url: user.picture,
|
||||
})
|
||||
}
|
||||
45
crates/secrets-mcp/src/oauth/mod.rs
Normal file
45
crates/secrets-mcp/src/oauth/mod.rs
Normal file
@@ -0,0 +1,45 @@
|
||||
pub mod google;
|
||||
pub mod wechat; // not yet implemented — placeholder for future WeChat integration
|
||||
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
/// Normalized OAuth user profile from any provider.
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct OAuthUserInfo {
|
||||
pub provider: String,
|
||||
pub provider_id: String,
|
||||
pub email: Option<String>,
|
||||
pub name: Option<String>,
|
||||
pub avatar_url: Option<String>,
|
||||
}
|
||||
|
||||
/// OAuth provider configuration.
|
||||
#[derive(Clone, Debug, Deserialize, Serialize)]
|
||||
pub struct OAuthConfig {
|
||||
pub client_id: String,
|
||||
pub client_secret: String,
|
||||
pub redirect_uri: String,
|
||||
}
|
||||
|
||||
/// Build the Google authorization URL.
|
||||
pub fn google_auth_url(config: &OAuthConfig, state: &str) -> String {
|
||||
format!(
|
||||
"https://accounts.google.com/o/oauth2/v2/auth\
|
||||
?client_id={}\
|
||||
&redirect_uri={}\
|
||||
&response_type=code\
|
||||
&scope=openid%20email%20profile\
|
||||
&state={}\
|
||||
&access_type=offline",
|
||||
urlencoding::encode(&config.client_id),
|
||||
urlencoding::encode(&config.redirect_uri),
|
||||
urlencoding::encode(state),
|
||||
)
|
||||
}
|
||||
|
||||
pub fn random_state() -> String {
|
||||
use rand::RngExt;
|
||||
let mut bytes = [0u8; 16];
|
||||
rand::rng().fill(&mut bytes);
|
||||
bytes.iter().map(|b| format!("{:02x}", b)).collect()
|
||||
}
|
||||
18
crates/secrets-mcp/src/oauth/wechat.rs
Normal file
18
crates/secrets-mcp/src/oauth/wechat.rs
Normal file
@@ -0,0 +1,18 @@
|
||||
use super::{OAuthConfig, OAuthUserInfo};
|
||||
/// WeChat OAuth — not yet implemented.
|
||||
///
|
||||
/// This module is a placeholder for future WeChat Open Platform integration.
|
||||
/// When ready, implement `exchange_code` following the non-standard WeChat OAuth 2.0 flow:
|
||||
/// - Token exchange uses a GET request (not POST)
|
||||
/// - Preferred user identifier is `unionid` (cross-app), falling back to `openid`
|
||||
/// - Docs: https://developers.weixin.qq.com/doc/oplatform/Website_App/WeChat_Login/Wechat_Login.html
|
||||
use anyhow::{Result, bail};
|
||||
|
||||
#[allow(dead_code)]
|
||||
pub async fn exchange_code(
|
||||
_client: &reqwest::Client,
|
||||
_config: &OAuthConfig,
|
||||
_code: &str,
|
||||
) -> Result<OAuthUserInfo> {
|
||||
bail!("WeChat login is not yet implemented")
|
||||
}
|
||||
160
crates/secrets-mcp/src/rate_limit.rs
Normal file
160
crates/secrets-mcp/src/rate_limit.rs
Normal file
@@ -0,0 +1,160 @@
|
||||
use std::num::NonZeroU32;
|
||||
use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
|
||||
use axum::{
|
||||
extract::{Request, State},
|
||||
http::{HeaderMap, HeaderValue, StatusCode},
|
||||
middleware::Next,
|
||||
response::{IntoResponse, Response},
|
||||
};
|
||||
use governor::{
|
||||
Quota, RateLimiter,
|
||||
clock::{Clock, DefaultClock},
|
||||
state::{InMemoryState, NotKeyed, keyed::DashMapStateStore},
|
||||
};
|
||||
use serde_json::json;
|
||||
|
||||
use crate::client_ip;
|
||||
|
||||
/// Per-IP rate limiter (keyed by client IP string)
|
||||
type IpRateLimiter = RateLimiter<String, DashMapStateStore<String>, DefaultClock>;
|
||||
|
||||
/// Global rate limiter (not keyed)
|
||||
type GlobalRateLimiter = RateLimiter<NotKeyed, InMemoryState, DefaultClock>;
|
||||
|
||||
/// Parse a u32 env value into NonZeroU32, logging a warning and falling back
|
||||
/// to the default if the value is zero.
|
||||
fn nz_or_log(value: u32, default: u32, name: &str) -> NonZeroU32 {
|
||||
NonZeroU32::new(value).unwrap_or_else(|| {
|
||||
tracing::warn!(
|
||||
configured = value,
|
||||
default,
|
||||
"{name} must be non-zero, using default"
|
||||
);
|
||||
NonZeroU32::new(default).unwrap()
|
||||
})
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct RateLimitState {
|
||||
pub ip_limiter: Arc<IpRateLimiter>,
|
||||
pub global_limiter: Arc<GlobalRateLimiter>,
|
||||
}
|
||||
|
||||
impl RateLimitState {
|
||||
/// Create a new RateLimitState with default limits.
|
||||
///
|
||||
/// Default limits (can be overridden via environment variables):
|
||||
/// - Global: 100 req/s, burst 200
|
||||
/// - Per-IP: 20 req/s, burst 40
|
||||
pub fn new() -> Self {
|
||||
let global_rate = std::env::var("RATE_LIMIT_GLOBAL_PER_SECOND")
|
||||
.ok()
|
||||
.and_then(|v| v.parse::<u32>().ok())
|
||||
.unwrap_or(100);
|
||||
|
||||
let global_burst = std::env::var("RATE_LIMIT_GLOBAL_BURST")
|
||||
.ok()
|
||||
.and_then(|v| v.parse::<u32>().ok())
|
||||
.unwrap_or(200);
|
||||
|
||||
let ip_rate = std::env::var("RATE_LIMIT_IP_PER_SECOND")
|
||||
.ok()
|
||||
.and_then(|v| v.parse::<u32>().ok())
|
||||
.unwrap_or(20);
|
||||
|
||||
let ip_burst = std::env::var("RATE_LIMIT_IP_BURST")
|
||||
.ok()
|
||||
.and_then(|v| v.parse::<u32>().ok())
|
||||
.unwrap_or(40);
|
||||
|
||||
let global_rate_nz = nz_or_log(global_rate, 100, "RATE_LIMIT_GLOBAL_PER_SECOND");
|
||||
let global_burst_nz = nz_or_log(global_burst, 200, "RATE_LIMIT_GLOBAL_BURST");
|
||||
let ip_rate_nz = nz_or_log(ip_rate, 20, "RATE_LIMIT_IP_PER_SECOND");
|
||||
let ip_burst_nz = nz_or_log(ip_burst, 40, "RATE_LIMIT_IP_BURST");
|
||||
|
||||
let global_quota = Quota::per_second(global_rate_nz).allow_burst(global_burst_nz);
|
||||
let ip_quota = Quota::per_second(ip_rate_nz).allow_burst(ip_burst_nz);
|
||||
|
||||
tracing::info!(
|
||||
global_rate = global_rate_nz.get(),
|
||||
global_burst = global_burst_nz.get(),
|
||||
ip_rate = ip_rate_nz.get(),
|
||||
ip_burst = ip_burst_nz.get(),
|
||||
"rate limiter initialized"
|
||||
);
|
||||
|
||||
Self {
|
||||
global_limiter: Arc::new(RateLimiter::direct(global_quota)),
|
||||
ip_limiter: Arc::new(RateLimiter::dashmap(ip_quota)),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Rate limiting middleware function.
|
||||
///
|
||||
/// Checks both global and per-IP rate limits before allowing the request through.
|
||||
/// Returns 429 Too Many Requests if either limit is exceeded.
|
||||
pub async fn rate_limit_middleware(
|
||||
State(rl): State<RateLimitState>,
|
||||
req: Request,
|
||||
next: Next,
|
||||
) -> Result<Response, Response> {
|
||||
// Check global rate limit first
|
||||
if let Err(negative) = rl.global_limiter.check() {
|
||||
let retry_after = negative.wait_time_from(DefaultClock::default().now());
|
||||
tracing::warn!(
|
||||
retry_after_secs = retry_after.as_secs(),
|
||||
"global rate limit exceeded"
|
||||
);
|
||||
return Err(too_many_requests_response(Some(retry_after)));
|
||||
}
|
||||
|
||||
// Check per-IP rate limit
|
||||
let key = client_ip::extract_client_ip(&req);
|
||||
if let Err(negative) = rl.ip_limiter.check_key(&key) {
|
||||
let retry_after = negative.wait_time_from(DefaultClock::default().now());
|
||||
tracing::warn!(
|
||||
client_ip = %key,
|
||||
retry_after_secs = retry_after.as_secs(),
|
||||
"per-IP rate limit exceeded"
|
||||
);
|
||||
return Err(too_many_requests_response(Some(retry_after)));
|
||||
}
|
||||
|
||||
Ok(next.run(req).await)
|
||||
}
|
||||
|
||||
/// Start a background task to clean up expired rate limiter entries.
|
||||
///
|
||||
/// This should be called once during application startup.
|
||||
/// The task runs every 60 seconds and will be aborted on shutdown.
|
||||
pub fn spawn_cleanup_task(ip_limiter: Arc<IpRateLimiter>) -> tokio::task::JoinHandle<()> {
|
||||
tokio::spawn(async move {
|
||||
let mut interval = tokio::time::interval(Duration::from_secs(60));
|
||||
loop {
|
||||
interval.tick().await;
|
||||
ip_limiter.retain_recent();
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
/// Create a 429 Too Many Requests response.
|
||||
fn too_many_requests_response(retry_after: Option<Duration>) -> Response {
|
||||
let mut headers = HeaderMap::new();
|
||||
headers.insert("Content-Type", HeaderValue::from_static("application/json"));
|
||||
|
||||
if let Some(duration) = retry_after {
|
||||
let secs = duration.as_secs().max(1);
|
||||
if let Ok(value) = HeaderValue::from_str(&secs.to_string()) {
|
||||
headers.insert("Retry-After", value);
|
||||
}
|
||||
}
|
||||
|
||||
let body = json!({
|
||||
"error": "Too many requests, please try again later"
|
||||
});
|
||||
|
||||
(StatusCode::TOO_MANY_REQUESTS, headers, body.to_string()).into_response()
|
||||
}
|
||||
1804
crates/secrets-mcp/src/tools.rs
Normal file
1804
crates/secrets-mcp/src/tools.rs
Normal file
File diff suppressed because it is too large
Load Diff
149
crates/secrets-mcp/src/validation.rs
Normal file
149
crates/secrets-mcp/src/validation.rs
Normal file
@@ -0,0 +1,149 @@
|
||||
/// Validation constants for input field lengths.
|
||||
pub const MAX_NAME_LENGTH: usize = 256;
|
||||
pub const MAX_FOLDER_LENGTH: usize = 128;
|
||||
pub const MAX_ENTRY_TYPE_LENGTH: usize = 64;
|
||||
pub const MAX_NOTES_LENGTH: usize = 10000;
|
||||
pub const MAX_TAG_LENGTH: usize = 64;
|
||||
pub const MAX_TAG_COUNT: usize = 50;
|
||||
pub const MAX_META_KEY_LENGTH: usize = 128;
|
||||
pub const MAX_META_VALUE_LENGTH: usize = 4096;
|
||||
pub const MAX_META_COUNT: usize = 100;
|
||||
|
||||
/// Validate input field lengths for MCP tools.
|
||||
///
|
||||
/// Returns an error if any field exceeds its maximum length.
|
||||
pub fn validate_input_lengths(
|
||||
name: &str,
|
||||
folder: Option<&str>,
|
||||
entry_type: Option<&str>,
|
||||
notes: Option<&str>,
|
||||
) -> Result<(), rmcp::ErrorData> {
|
||||
if name.chars().count() > MAX_NAME_LENGTH {
|
||||
return Err(rmcp::ErrorData::invalid_params(
|
||||
format!("name must be at most {} characters", MAX_NAME_LENGTH),
|
||||
None,
|
||||
));
|
||||
}
|
||||
if let Some(folder) = folder
|
||||
&& folder.chars().count() > MAX_FOLDER_LENGTH
|
||||
{
|
||||
return Err(rmcp::ErrorData::invalid_params(
|
||||
format!("folder must be at most {} characters", MAX_FOLDER_LENGTH),
|
||||
None,
|
||||
));
|
||||
}
|
||||
if let Some(entry_type) = entry_type
|
||||
&& entry_type.chars().count() > MAX_ENTRY_TYPE_LENGTH
|
||||
{
|
||||
return Err(rmcp::ErrorData::invalid_params(
|
||||
format!("type must be at most {} characters", MAX_ENTRY_TYPE_LENGTH),
|
||||
None,
|
||||
));
|
||||
}
|
||||
if let Some(notes) = notes
|
||||
&& notes.chars().count() > MAX_NOTES_LENGTH
|
||||
{
|
||||
return Err(rmcp::ErrorData::invalid_params(
|
||||
format!("notes must be at most {} characters", MAX_NOTES_LENGTH),
|
||||
None,
|
||||
));
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Validate the tags list.
|
||||
///
|
||||
/// Checks total count and per-tag character length.
|
||||
pub fn validate_tags(tags: &[String]) -> Result<(), rmcp::ErrorData> {
|
||||
if tags.len() > MAX_TAG_COUNT {
|
||||
return Err(rmcp::ErrorData::invalid_params(
|
||||
format!("at most {} tags are allowed", MAX_TAG_COUNT),
|
||||
None,
|
||||
));
|
||||
}
|
||||
for tag in tags {
|
||||
if tag.chars().count() > MAX_TAG_LENGTH {
|
||||
return Err(rmcp::ErrorData::invalid_params(
|
||||
format!(
|
||||
"tag '{}' exceeds the maximum length of {} characters",
|
||||
tag, MAX_TAG_LENGTH
|
||||
),
|
||||
None,
|
||||
));
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Validate metadata KV strings (key=value / key:=json format).
|
||||
///
|
||||
/// Checks total count and per-key/per-value character lengths.
|
||||
/// This is a best-effort check on the raw KV strings before parsing;
|
||||
/// keys containing `:` path separators are checked as a whole.
|
||||
pub fn validate_meta_entries(entries: &[String]) -> Result<(), rmcp::ErrorData> {
|
||||
if entries.len() > MAX_META_COUNT {
|
||||
return Err(rmcp::ErrorData::invalid_params(
|
||||
format!("at most {} metadata entries are allowed", MAX_META_COUNT),
|
||||
None,
|
||||
));
|
||||
}
|
||||
for entry in entries {
|
||||
// key:=json — check both key and JSON value length
|
||||
if let Some((key, value)) = entry.split_once(":=") {
|
||||
if key.chars().count() > MAX_META_KEY_LENGTH {
|
||||
return Err(rmcp::ErrorData::invalid_params(
|
||||
format!(
|
||||
"metadata key '{}' exceeds the maximum length of {} characters",
|
||||
key, MAX_META_KEY_LENGTH
|
||||
),
|
||||
None,
|
||||
));
|
||||
}
|
||||
if value.chars().count() > MAX_META_VALUE_LENGTH {
|
||||
return Err(rmcp::ErrorData::invalid_params(
|
||||
format!(
|
||||
"metadata JSON value for key '{}' exceeds the maximum length of {} characters",
|
||||
key, MAX_META_VALUE_LENGTH
|
||||
),
|
||||
None,
|
||||
));
|
||||
}
|
||||
continue;
|
||||
}
|
||||
// key=value or key@path
|
||||
if let Some((key, value)) = entry.split_once('=') {
|
||||
if key.chars().count() > MAX_META_KEY_LENGTH {
|
||||
return Err(rmcp::ErrorData::invalid_params(
|
||||
format!(
|
||||
"metadata key '{}' exceeds the maximum length of {} characters",
|
||||
key, MAX_META_KEY_LENGTH
|
||||
),
|
||||
None,
|
||||
));
|
||||
}
|
||||
if value.chars().count() > MAX_META_VALUE_LENGTH {
|
||||
return Err(rmcp::ErrorData::invalid_params(
|
||||
format!(
|
||||
"metadata value for key '{}' exceeds the maximum length of {} characters",
|
||||
key, MAX_META_VALUE_LENGTH
|
||||
),
|
||||
None,
|
||||
));
|
||||
}
|
||||
} else {
|
||||
// Fallback: entry without = or := — check total length
|
||||
let max_total = MAX_META_KEY_LENGTH + MAX_META_VALUE_LENGTH;
|
||||
if entry.chars().count() > max_total {
|
||||
let preview = entry.chars().take(50).collect::<String>();
|
||||
return Err(rmcp::ErrorData::invalid_params(
|
||||
format!(
|
||||
"metadata entry '{}' exceeds the maximum length of {} characters",
|
||||
preview, max_total
|
||||
),
|
||||
None,
|
||||
));
|
||||
}
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
1912
crates/secrets-mcp/src/web.rs
Normal file
1912
crates/secrets-mcp/src/web.rs
Normal file
File diff suppressed because it is too large
Load Diff
3
crates/secrets-mcp/static/favicon.svg
Normal file
3
crates/secrets-mcp/static/favicon.svg
Normal file
@@ -0,0 +1,3 @@
|
||||
<svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 32 32">
|
||||
<text x="16" y="23" text-anchor="middle" font-family="system-ui,Segoe UI,sans-serif" font-size="22" font-weight="700" fill="#58a6ff">S</text>
|
||||
</svg>
|
||||
|
After Width: | Height: | Size: 212 B |
28
crates/secrets-mcp/static/llms.txt
Normal file
28
crates/secrets-mcp/static/llms.txt
Normal file
@@ -0,0 +1,28 @@
|
||||
# Secrets MCP
|
||||
|
||||
> 给 AI 与自动化工具的简要说明。本站是 **secrets-mcp**:Streamable HTTP **MCP**(Model Context Protocol)与 **Web 控制台** 的组合,用于在多租户场景下存储条目元数据与加密后的秘密字段;持久化在 PostgreSQL。用户通过 OAuth(如已配置)登录 Web;MCP 调用使用 API Key 与加密相关请求头。
|
||||
|
||||
## 公开页面
|
||||
|
||||
- **`/`**:公开首页,说明安全架构(客户端密钥派生、密文存储、多租户与审计等),无需登录。
|
||||
|
||||
## 不应抓取或索引的内容
|
||||
|
||||
- **`/mcp`**:MCP 流式 HTTP 端点(JSON-RPC 等),**不是** HTML 文档,也不适合作为公开知识库来源。
|
||||
- **`/api/*`**:会话或 API Key 相关的 HTTP API。
|
||||
- **`/login`**:登录入口页(`noindex` / robots 通常 disallow)。
|
||||
- **`/dashboard`、`/audit`、`/auth/*`、`/account/*`**:需浏览器会话,属于用户私有界面与 OAuth 流程。
|
||||
|
||||
## 给 AI 助手的实用提示
|
||||
|
||||
- 向用户说明连接方式时:MCP 基址为 `{BASE_URL}/mcp`(`BASE_URL` 由部署方设置),通常需要 `Authorization: Bearer <api_key>`;读写加密秘密时还需按部署文档传递 `X-Encryption-Key` 等头(与客户端模式有关)。
|
||||
- **不要编造**本实例的数据库 URL、OAuth 密钥、回调地址或任何凭据;一律以用户环境变量与运维文档为准。
|
||||
- Web 端在浏览器内用密码短语派生密钥完成端到端加密;MCP 路径下服务端可能在请求周期内临时使用客户端提供的密钥处理密文(架构细节见项目 README「加密架构」)。
|
||||
|
||||
## 延伸阅读
|
||||
|
||||
- 源码仓库:<https://gitea.refining.dev/refining/secrets>(`README.md`、`AGENTS.md` 含环境变量、表结构与运维约定)。
|
||||
|
||||
## 关于本文件
|
||||
|
||||
- 遵循常见的 **`/llms.txt`** 约定,便于人类与 LLM 快速了解站点性质与抓取边界;同文可在 **`/ai.txt`** 获取。
|
||||
31
crates/secrets-mcp/static/robots.txt
Normal file
31
crates/secrets-mcp/static/robots.txt
Normal file
@@ -0,0 +1,31 @@
|
||||
# Secrets MCP — robots.txt
|
||||
# 本站为需登录的私密控制台与 MCP API;以下路径请勿抓取,以免浪费配额并避免误索引敏感端点。
|
||||
# This host serves an authenticated dashboard and machine APIs; please skip crawling the paths below.
|
||||
|
||||
User-agent: *
|
||||
Disallow: /mcp
|
||||
Disallow: /api/
|
||||
Disallow: /dashboard
|
||||
Disallow: /audit
|
||||
Disallow: /auth/
|
||||
Disallow: /login
|
||||
Disallow: /account/
|
||||
|
||||
# 首页 `/` 为公开安全说明页,允许抓取。
|
||||
|
||||
# 面向 AI / LLM 的机器可读站点说明(Markdown):/llms.txt
|
||||
# Human & AI-readable site summary: /llms.txt (also /ai.txt)
|
||||
|
||||
User-agent: GPTBot
|
||||
User-agent: Google-Extended
|
||||
User-agent: anthropic-ai
|
||||
User-agent: Claude-Web
|
||||
User-agent: PerplexityBot
|
||||
User-agent: Bytespider
|
||||
Disallow: /mcp
|
||||
Disallow: /api/
|
||||
Disallow: /dashboard
|
||||
Disallow: /audit
|
||||
Disallow: /auth/
|
||||
Disallow: /login
|
||||
Disallow: /account/
|
||||
235
crates/secrets-mcp/templates/audit.html
Normal file
235
crates/secrets-mcp/templates/audit.html
Normal file
@@ -0,0 +1,235 @@
|
||||
<!DOCTYPE html>
|
||||
<html lang="zh-CN">
|
||||
<head>
|
||||
<meta charset="UTF-8">
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1.0">
|
||||
<link rel="icon" href="/favicon.svg?v={{ version }}" type="image/svg+xml">
|
||||
<title>Secrets — Audit</title>
|
||||
<style>
|
||||
*, *::before, *::after { box-sizing: border-box; margin: 0; padding: 0; }
|
||||
@import url('https://fonts.googleapis.com/css2?family=JetBrains+Mono:wght@400;600&family=Inter:wght@400;500;600&display=swap');
|
||||
:root {
|
||||
--bg: #0d1117; --surface: #161b22; --surface2: #21262d;
|
||||
--border: #30363d; --text: #e6edf3; --text-muted: #8b949e;
|
||||
--accent: #58a6ff; --accent-hover: #79b8ff;
|
||||
}
|
||||
body { background: var(--bg); color: var(--text); font-family: 'Inter', sans-serif; min-height: 100vh; }
|
||||
.layout { display: flex; min-height: 100vh; }
|
||||
.sidebar {
|
||||
width: 220px; flex-shrink: 0; background: var(--surface); border-right: 1px solid var(--border);
|
||||
padding: 24px 16px; display: flex; flex-direction: column; gap: 20px;
|
||||
}
|
||||
.sidebar-logo { font-family: 'JetBrains Mono', monospace; font-size: 16px; font-weight: 600;
|
||||
color: var(--text); text-decoration: none; padding: 0 10px; }
|
||||
.sidebar-logo span { color: var(--accent); }
|
||||
.sidebar-menu { display: flex; flex-direction: column; gap: 6px; }
|
||||
.sidebar-link {
|
||||
padding: 10px 12px; border-radius: 8px; color: var(--text-muted); text-decoration: none;
|
||||
border: 1px solid transparent; font-size: 13px; font-weight: 500;
|
||||
}
|
||||
.sidebar-link:hover { background: var(--surface2); color: var(--text); }
|
||||
.sidebar-link.active {
|
||||
background: rgba(88,166,255,0.12); color: var(--text); border-color: rgba(88,166,255,0.35);
|
||||
}
|
||||
.content-shell { flex: 1; min-width: 0; display: flex; flex-direction: column; }
|
||||
.topbar {
|
||||
background: var(--surface); border-bottom: 1px solid var(--border); padding: 0 24px;
|
||||
display: flex; align-items: center; gap: 12px; min-height: 52px;
|
||||
}
|
||||
.topbar-spacer { flex: 1; }
|
||||
.nav-user { font-size: 13px; color: var(--text-muted); }
|
||||
.lang-bar { display: flex; gap: 2px; background: var(--surface2); border-radius: 6px; padding: 2px; }
|
||||
.lang-btn { padding: 3px 9px; border: none; background: none; color: var(--text-muted);
|
||||
font-size: 12px; cursor: pointer; border-radius: 4px; }
|
||||
.lang-btn.active { background: var(--border); color: var(--text); }
|
||||
.btn-sign-out {
|
||||
padding: 5px 12px; border-radius: 6px; border: 1px solid var(--border);
|
||||
background: none; color: var(--text); font-size: 12px; text-decoration: none; cursor: pointer;
|
||||
}
|
||||
.btn-sign-out:hover { background: var(--surface2); }
|
||||
.main { padding: 32px 24px 40px; flex: 1; }
|
||||
.card { background: var(--surface); border: 1px solid var(--border); border-radius: 12px;
|
||||
padding: 24px; width: 100%; max-width: 1180px; margin: 0 auto; }
|
||||
.card-title-row {
|
||||
display: flex; align-items: center; flex-wrap: wrap; gap: 8px;
|
||||
margin-bottom: 20px;
|
||||
}
|
||||
.card-title { font-size: 20px; font-weight: 600; margin: 0; }
|
||||
.card-title-count {
|
||||
display: inline-flex;
|
||||
align-items: center;
|
||||
min-height: 24px;
|
||||
padding: 0 8px;
|
||||
border: 1px solid var(--border);
|
||||
border-radius: 999px;
|
||||
background: var(--bg);
|
||||
color: var(--text-muted);
|
||||
font-size: 12px;
|
||||
font-weight: 600;
|
||||
line-height: 1;
|
||||
font-family: 'JetBrains Mono', monospace;
|
||||
}
|
||||
.empty { color: var(--text-muted); font-size: 14px; padding: 20px 0; }
|
||||
table { width: 100%; border-collapse: collapse; }
|
||||
th, td { text-align: left; vertical-align: top; padding: 12px 10px; border-top: 1px solid var(--border); }
|
||||
th { color: var(--text-muted); font-size: 12px; font-weight: 600; }
|
||||
td { font-size: 13px; }
|
||||
.mono { font-family: 'JetBrains Mono', monospace; }
|
||||
.detail {
|
||||
background: var(--bg); border: 1px solid var(--border); border-radius: 8px;
|
||||
padding: 10px; white-space: pre-wrap; word-break: break-word; font-size: 12px;
|
||||
max-width: 460px;
|
||||
}
|
||||
@media (max-width: 900px) {
|
||||
.layout { flex-direction: column; }
|
||||
.sidebar {
|
||||
width: 100%; border-right: none; border-bottom: 1px solid var(--border);
|
||||
padding: 16px; gap: 14px;
|
||||
}
|
||||
.sidebar-menu { flex-direction: row; }
|
||||
.sidebar-link { flex: 1; text-align: center; }
|
||||
.main { padding: 20px 12px 28px; }
|
||||
.card { padding: 16px; }
|
||||
.topbar { padding: 12px 16px; flex-wrap: wrap; }
|
||||
table, thead, tbody, th, td, tr { display: block; }
|
||||
thead { display: none; }
|
||||
tr { border-top: 1px solid var(--border); padding: 12px 0; }
|
||||
td { border-top: none; padding: 6px 0; }
|
||||
td::before {
|
||||
display: block; color: var(--text-muted); font-size: 11px;
|
||||
margin-bottom: 4px; text-transform: uppercase;
|
||||
content: attr(data-label);
|
||||
}
|
||||
.detail { max-width: none; }
|
||||
}
|
||||
.pagination {
|
||||
display: flex; align-items: center; gap: 8px; margin-top: 20px;
|
||||
justify-content: center; padding: 12px 0;
|
||||
}
|
||||
.page-btn {
|
||||
padding: 6px 14px; border-radius: 6px; border: 1px solid var(--border);
|
||||
background: var(--surface); color: var(--text); text-decoration: none;
|
||||
font-size: 13px; cursor: pointer;
|
||||
}
|
||||
.page-btn:hover { background: var(--surface2); }
|
||||
.page-btn-disabled {
|
||||
padding: 6px 14px; border-radius: 6px; border: 1px solid var(--border);
|
||||
background: var(--surface); color: var(--text-muted); font-size: 13px;
|
||||
opacity: 0.5; cursor: not-allowed;
|
||||
}
|
||||
.page-info {
|
||||
color: var(--text-muted); font-size: 13px; font-family: 'JetBrains Mono', monospace;
|
||||
}
|
||||
</style>
|
||||
</head>
|
||||
<body>
|
||||
<div class="layout">
|
||||
<aside class="sidebar">
|
||||
<a href="/dashboard" class="sidebar-logo"><span>secrets</span></a>
|
||||
<nav class="sidebar-menu">
|
||||
<a href="/dashboard" class="sidebar-link" data-i18n="navMcp">MCP</a>
|
||||
<a href="/entries" class="sidebar-link" data-i18n="navEntries">条目</a>
|
||||
<a href="/audit" class="sidebar-link active" data-i18n="navAudit">审计</a>
|
||||
</nav>
|
||||
</aside>
|
||||
|
||||
<div class="content-shell">
|
||||
<div class="topbar">
|
||||
<span class="topbar-spacer"></span>
|
||||
<span class="nav-user">{{ user_name }}{% if !user_email.is_empty() %} · {{ user_email }}{% endif %}</span>
|
||||
<div class="lang-bar">
|
||||
<button class="lang-btn" onclick="setLang('zh-CN')">简</button>
|
||||
<button class="lang-btn" onclick="setLang('zh-TW')">繁</button>
|
||||
<button class="lang-btn" onclick="setLang('en')">EN</button>
|
||||
</div>
|
||||
<form action="/auth/logout" method="post" style="display:inline">
|
||||
<button type="submit" class="btn-sign-out" data-i18n="signOut">退出</button>
|
||||
</form>
|
||||
</div>
|
||||
|
||||
<main class="main">
|
||||
<section class="card">
|
||||
<div class="card-title-row">
|
||||
<div class="card-title" data-i18n="auditTitle">我的审计</div>
|
||||
<span class="card-title-count">{{ total_count }}</span>
|
||||
</div>
|
||||
|
||||
{% if entries.is_empty() %}
|
||||
<div class="empty" data-i18n="emptyAudit">暂无审计记录。</div>
|
||||
{% else %}
|
||||
<table>
|
||||
<thead>
|
||||
<tr>
|
||||
<th data-i18n="colTime">时间</th>
|
||||
<th data-i18n="colAction">动作</th>
|
||||
<th data-i18n="colTarget">目标</th>
|
||||
<th data-i18n="colDetail">详情</th>
|
||||
</tr>
|
||||
</thead>
|
||||
<tbody>
|
||||
{% for entry in entries %}
|
||||
<tr>
|
||||
<td class="col-time mono" data-label="时间"><time class="audit-local-time" datetime="{{ entry.created_at_iso }}">{{ entry.created_at_iso }}</time></td>
|
||||
<td class="col-action mono" data-label="动作">{{ entry.action }}</td>
|
||||
<td class="col-target mono" data-label="目标">{{ entry.target }}</td>
|
||||
<td class="col-detail" data-label="详情"><pre class="detail">{{ entry.detail }}</pre></td>
|
||||
</tr>
|
||||
{% endfor %}
|
||||
</tbody>
|
||||
</table>
|
||||
|
||||
{% if total_count > 0 %}
|
||||
<div class="pagination">
|
||||
{% if current_page > 1 %}
|
||||
<a href="?page={{ current_page - 1 }}" class="page-btn" data-i18n="prevPage">上一页</a>
|
||||
{% else %}
|
||||
<span class="page-btn page-btn-disabled" data-i18n="prevPage">上一页</span>
|
||||
{% endif %}
|
||||
<span class="page-info">{{ current_page }} / {{ total_pages }}</span>
|
||||
{% if current_page < total_pages %}
|
||||
<a href="?page={{ current_page + 1 }}" class="page-btn" data-i18n="nextPage">下一页</a>
|
||||
{% else %}
|
||||
<span class="page-btn page-btn-disabled" data-i18n="nextPage">下一页</span>
|
||||
{% endif %}
|
||||
</div>
|
||||
{% endif %}
|
||||
{% endif %}
|
||||
</section>
|
||||
</main>
|
||||
</div>
|
||||
</div>
|
||||
<script src="/static/i18n.js"></script>
|
||||
<script>
|
||||
(function () {
|
||||
I18N_PAGE = {
|
||||
'zh-CN': { pageTitle: 'Secrets — 审计', auditTitle: '我的审计', emptyAudit: '暂无审计记录。', colTime: '时间', colAction: '动作', colTarget: '目标', colDetail: '详情', prevPage: '上一页', nextPage: '下一页' },
|
||||
'zh-TW': { pageTitle: 'Secrets — 審計', auditTitle: '我的審計', emptyAudit: '暫無審計記錄。', colTime: '時間', colAction: '動作', colTarget: '目標', colDetail: '詳情', prevPage: '上一頁', nextPage: '下一頁' },
|
||||
en: { pageTitle: 'Secrets — Audit', auditTitle: 'My audit', emptyAudit: 'No audit records.', colTime: 'Time', colAction: 'Action', colTarget: 'Target', colDetail: 'Detail', prevPage: 'Previous', nextPage: 'Next' }
|
||||
};
|
||||
|
||||
window.applyPageLang = function () {
|
||||
document.querySelectorAll('tbody tr').forEach(function (tr) {
|
||||
var time = tr.querySelector('.col-time');
|
||||
var action = tr.querySelector('.col-action');
|
||||
var target = tr.querySelector('.col-target');
|
||||
var detail = tr.querySelector('.col-detail');
|
||||
if (time) time.setAttribute('data-label', t('mobileLabelTime'));
|
||||
if (action) action.setAttribute('data-label', t('mobileLabelAction'));
|
||||
if (target) target.setAttribute('data-label', t('mobileLabelTarget'));
|
||||
if (detail) detail.setAttribute('data-label', t('mobileLabelDetail'));
|
||||
});
|
||||
};
|
||||
|
||||
document.querySelectorAll('time.audit-local-time[datetime]').forEach(function (el) {
|
||||
var raw = el.getAttribute('datetime');
|
||||
var d = raw ? new Date(raw) : null;
|
||||
if (d && !isNaN(d.getTime())) {
|
||||
el.textContent = d.toLocaleString(undefined, { dateStyle: 'medium', timeStyle: 'medium' });
|
||||
el.title = raw + ' (UTC)';
|
||||
}
|
||||
});
|
||||
applyLang();
|
||||
})();
|
||||
</script>
|
||||
</body>
|
||||
</html>
|
||||
979
crates/secrets-mcp/templates/dashboard.html
Normal file
979
crates/secrets-mcp/templates/dashboard.html
Normal file
@@ -0,0 +1,979 @@
|
||||
<!DOCTYPE html>
|
||||
<html lang="zh-CN">
|
||||
<head>
|
||||
<meta charset="UTF-8">
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1.0">
|
||||
<link rel="icon" href="/favicon.svg?v={{ version }}" type="image/svg+xml">
|
||||
<title>Secrets</title>
|
||||
<style>
|
||||
*, *::before, *::after { box-sizing: border-box; margin: 0; padding: 0; }
|
||||
@import url('https://fonts.googleapis.com/css2?family=JetBrains+Mono:wght@400;600&family=Inter:wght@400;500;600&display=swap');
|
||||
:root {
|
||||
--bg: #0d1117; --surface: #161b22; --surface2: #21262d;
|
||||
--border: #30363d; --text: #e6edf3; --text-muted: #8b949e;
|
||||
--accent: #58a6ff; --accent-hover: #79b8ff;
|
||||
--danger: #f85149; --success: #3fb950; --warn: #d29922;
|
||||
}
|
||||
body { background: var(--bg); color: var(--text); font-family: 'Inter', sans-serif; min-height: 100vh; }
|
||||
|
||||
.layout { display: flex; min-height: 100vh; }
|
||||
.sidebar {
|
||||
width: 220px; flex-shrink: 0; background: var(--surface); border-right: 1px solid var(--border);
|
||||
padding: 24px 16px; display: flex; flex-direction: column; gap: 20px;
|
||||
}
|
||||
.sidebar-logo { font-family: 'JetBrains Mono', monospace; font-size: 16px; font-weight: 600;
|
||||
color: var(--text); text-decoration: none; padding: 0 10px; }
|
||||
.sidebar-logo span { color: var(--accent); }
|
||||
.sidebar-menu { display: flex; flex-direction: column; gap: 6px; }
|
||||
.sidebar-link {
|
||||
padding: 10px 12px; border-radius: 8px; color: var(--text-muted); text-decoration: none;
|
||||
border: 1px solid transparent; font-size: 13px; font-weight: 500;
|
||||
}
|
||||
.sidebar-link:hover { background: var(--surface2); color: var(--text); }
|
||||
.sidebar-link.active {
|
||||
background: rgba(88,166,255,0.12); color: var(--text); border-color: rgba(88,166,255,0.35);
|
||||
}
|
||||
.content-shell { flex: 1; min-width: 0; display: flex; flex-direction: column; }
|
||||
.topbar {
|
||||
background: var(--surface); border-bottom: 1px solid var(--border); padding: 0 24px;
|
||||
display: flex; align-items: center; gap: 12px; min-height: 52px;
|
||||
}
|
||||
.topbar-spacer { flex: 1; }
|
||||
.nav-user { font-size: 13px; color: var(--text-muted); }
|
||||
.lang-bar { display: flex; gap: 2px; background: var(--surface2); border-radius: 6px; padding: 2px; }
|
||||
.lang-btn { padding: 3px 9px; border: none; background: none; color: var(--text-muted);
|
||||
font-size: 12px; cursor: pointer; border-radius: 4px; }
|
||||
.lang-btn.active { background: var(--border); color: var(--text); }
|
||||
.btn-sign-out { padding: 5px 12px; border-radius: 6px; border: 1px solid var(--border);
|
||||
background: none; color: var(--text); font-size: 12px; cursor: pointer; }
|
||||
.btn-sign-out:hover { background: var(--surface2); }
|
||||
|
||||
/* Main content column */
|
||||
.main { display: flex; flex-direction: column; align-items: center;
|
||||
padding: 24px 20px 8px; min-height: 0; }
|
||||
.app-footer {
|
||||
margin-top: auto;
|
||||
text-align: center;
|
||||
padding: 4px 20px 12px;
|
||||
font-size: 12px;
|
||||
color: #9da7b3;
|
||||
font-family: 'JetBrains Mono', monospace;
|
||||
}
|
||||
.card { background: var(--surface); border: 1px solid var(--border); border-radius: 12px;
|
||||
padding: 24px; width: 100%; max-width: 980px; }
|
||||
.card-title { font-size: 18px; font-weight: 600; margin-bottom: 24px; }
|
||||
/* Form */
|
||||
.field { margin-bottom: 12px; }
|
||||
.field label { display: block; font-size: 12px; color: var(--text-muted); margin-bottom: 5px; }
|
||||
.field input { width: 100%; background: var(--bg); border: 1px solid var(--border);
|
||||
color: var(--text); padding: 9px 12px; border-radius: 6px;
|
||||
font-size: 13px; outline: none; }
|
||||
.field input:focus { border-color: var(--accent); }
|
||||
.pw-field { position: relative; }
|
||||
.pw-field > input { padding-right: 42px; }
|
||||
.pw-toggle {
|
||||
position: absolute; right: 6px; top: 50%; transform: translateY(-50%);
|
||||
display: flex; align-items: center; justify-content: center;
|
||||
width: 32px; height: 32px; border: none; border-radius: 6px;
|
||||
background: transparent; color: var(--text-muted); cursor: pointer;
|
||||
}
|
||||
.pw-toggle:hover { color: var(--text); background: var(--surface2); }
|
||||
.pw-toggle:focus-visible { outline: 2px solid var(--accent); outline-offset: 2px; }
|
||||
.pw-icon svg { display: block; }
|
||||
.pw-icon.hidden { display: none; }
|
||||
.error-msg { color: var(--danger); font-size: 12px; margin-top: 6px; display: none; }
|
||||
|
||||
/* Buttons */
|
||||
.btn-primary { display: inline-flex; align-items: center; gap: 6px; width: 100%;
|
||||
justify-content: center; padding: 10px 20px; border-radius: 7px;
|
||||
border: none; background: var(--accent); color: #0d1117;
|
||||
font-size: 14px; font-weight: 600; cursor: pointer; transition: background 0.15s; }
|
||||
.btn-primary:hover { background: var(--accent-hover); }
|
||||
.btn-primary:disabled { opacity: 0.5; cursor: not-allowed; }
|
||||
.btn-sm { display: inline-flex; align-items: center; gap: 4px; padding: 5px 12px;
|
||||
border-radius: 5px; border: 1px solid var(--border); background: none;
|
||||
color: var(--text-muted); font-size: 12px; cursor: pointer; }
|
||||
.btn-sm:hover { color: var(--text); border-color: var(--text-muted); }
|
||||
.btn-copy { display: flex; align-items: center; gap: 8px; width: 100%; justify-content: center;
|
||||
padding: 11px 20px; border-radius: 7px; border: 1px solid var(--success);
|
||||
background: rgba(63,185,80,0.1); color: var(--success);
|
||||
font-size: 14px; font-weight: 600; cursor: pointer; transition: all 0.15s; }
|
||||
.btn-copy:hover { background: rgba(63,185,80,0.2); }
|
||||
.btn-copy.copied { background: var(--success); color: #0d1117; border-color: var(--success); }
|
||||
|
||||
/* Config format switcher */
|
||||
.config-tabs { display: grid; grid-template-columns: repeat(2, minmax(0, 1fr)); gap: 10px; margin-bottom: 12px; }
|
||||
.config-tab { padding: 12px 14px; border-radius: 10px; border: 1px solid var(--border);
|
||||
background: var(--surface2); color: var(--text-muted); cursor: pointer;
|
||||
font-family: inherit; text-align: left; transition: border-color 0.15s, background 0.15s, transform 0.15s; }
|
||||
.config-tab:hover { color: var(--text); border-color: var(--accent); transform: translateY(-1px); }
|
||||
.config-tab.active { background: rgba(88,166,255,0.1); color: var(--text); border-color: var(--accent); }
|
||||
.config-tab-title { display: block; font-size: 13px; font-weight: 600; color: inherit; }
|
||||
/* Config box */
|
||||
.config-wrap { position: relative; margin-bottom: 14px; }
|
||||
.config-box { background: var(--bg); border: 1px solid var(--border); border-radius: 8px;
|
||||
padding: 16px; font-family: 'JetBrains Mono', monospace; font-size: 11px;
|
||||
line-height: 1.7; color: var(--text); overflow-x: auto; white-space: pre; }
|
||||
.config-box.locked { color: var(--text-muted); filter: blur(3px); user-select: none;
|
||||
pointer-events: none; }
|
||||
.config-key { color: #79c0ff; }
|
||||
.config-str { color: #a5d6ff; }
|
||||
.config-val { color: var(--accent); }
|
||||
|
||||
/* Divider */
|
||||
.divider { border: none; border-top: 1px solid var(--border); margin: 20px 0; }
|
||||
|
||||
/* Actions row */
|
||||
.actions-row { display: flex; gap: 8px; flex-wrap: wrap; justify-content: center; }
|
||||
|
||||
/* Spinner */
|
||||
.spinner { display: inline-block; width: 14px; height: 14px; border: 2px solid rgba(13,17,23,0.3);
|
||||
border-top-color: #0d1117; border-radius: 50%; animation: spin 0.7s linear infinite; }
|
||||
@keyframes spin { to { transform: rotate(360deg); } }
|
||||
|
||||
/* Modal */
|
||||
.modal-bd { display: none; position: fixed; inset: 0; background: rgba(0,0,0,0.75);
|
||||
z-index: 100; align-items: center; justify-content: center; }
|
||||
.modal-bd.open { display: flex; }
|
||||
.modal { background: var(--surface); border: 1px solid var(--border); border-radius: 12px;
|
||||
padding: 28px; width: 100%; max-width: 420px; }
|
||||
.modal h3 { font-size: 16px; font-weight: 600; margin-bottom: 16px; }
|
||||
.modal-actions { display: flex; gap: 8px; margin-top: 16px; }
|
||||
.btn-modal-ok { flex: 1; padding: 8px; border-radius: 6px; border: none;
|
||||
background: var(--accent); color: #0d1117; font-size: 13px;
|
||||
font-weight: 600; cursor: pointer; }
|
||||
.btn-modal-ok:hover { background: var(--accent-hover); }
|
||||
.btn-modal-cancel { padding: 8px 16px; border-radius: 6px; border: 1px solid var(--border);
|
||||
background: none; color: var(--text); font-size: 13px; cursor: pointer; }
|
||||
.btn-modal-cancel:hover { background: var(--surface2); }
|
||||
|
||||
@media (max-width: 900px) {
|
||||
.layout { flex-direction: column; }
|
||||
.sidebar {
|
||||
width: 100%; border-right: none; border-bottom: 1px solid var(--border);
|
||||
padding: 16px; gap: 14px;
|
||||
}
|
||||
.sidebar-menu { flex-direction: row; }
|
||||
.sidebar-link { flex: 1; text-align: center; }
|
||||
}
|
||||
|
||||
@media (max-width: 720px) {
|
||||
.config-tabs { grid-template-columns: 1fr; }
|
||||
.topbar { padding: 12px 16px; flex-wrap: wrap; }
|
||||
.main { padding: 16px 12px 6px; }
|
||||
.app-footer { padding: 4px 12px 10px; }
|
||||
.card { padding: 18px; }
|
||||
}
|
||||
|
||||
</style>
|
||||
</head>
|
||||
<body data-has-passphrase="{{ has_passphrase }}" data-base-url="{{ base_url }}">
|
||||
|
||||
<div class="layout">
|
||||
<aside class="sidebar">
|
||||
<a href="/dashboard" class="sidebar-logo"><span>secrets</span></a>
|
||||
<nav class="sidebar-menu">
|
||||
<a href="/dashboard" class="sidebar-link active">MCP</a>
|
||||
<a href="/entries" class="sidebar-link">条目</a>
|
||||
<a href="/audit" class="sidebar-link">审计</a>
|
||||
</nav>
|
||||
</aside>
|
||||
|
||||
<div class="content-shell">
|
||||
<div class="topbar">
|
||||
<span class="topbar-spacer"></span>
|
||||
<span class="nav-user">{{ user_name }}{% if !user_email.is_empty() %} · {{ user_email }}{% endif %}</span>
|
||||
<div class="lang-bar">
|
||||
<button class="lang-btn" onclick="setLang('zh-CN')">简</button>
|
||||
<button class="lang-btn" onclick="setLang('zh-TW')">繁</button>
|
||||
<button class="lang-btn" onclick="setLang('en')">EN</button>
|
||||
</div>
|
||||
<form action="/auth/logout" method="post" style="display:inline">
|
||||
<button type="submit" class="btn-sign-out" data-i18n="signOut">退出</button>
|
||||
</form>
|
||||
</div>
|
||||
|
||||
<div class="main">
|
||||
<div class="card">
|
||||
|
||||
<!-- ── Locked state ──────────────────────────────────────────────────── -->
|
||||
<div id="locked-view">
|
||||
<div class="card-title" data-i18n="lockedTitle">获取 MCP 配置</div>
|
||||
|
||||
<!-- placeholder config -->
|
||||
<div class="config-wrap">
|
||||
<div class="config-box locked" id="placeholder-config"></div>
|
||||
</div>
|
||||
|
||||
<!-- Setup form (no passphrase yet) -->
|
||||
<div id="setup-form" style="display:none">
|
||||
<div class="field">
|
||||
<label data-i18n="labelPassphrase">加密密码</label>
|
||||
<div class="pw-field">
|
||||
<input type="password" id="setup-pass1" data-i18n-ph="phPassphrase" autocomplete="new-password">
|
||||
<button type="button" class="pw-toggle" data-target="setup-pass1" aria-pressed="false"
|
||||
onclick="togglePwVisibility(this)" aria-label="">
|
||||
<span class="pw-icon pw-icon-show" aria-hidden="true"><svg xmlns="http://www.w3.org/2000/svg" width="18" height="18" viewBox="0 0 24 24" fill="none" stroke="currentColor" stroke-width="2" stroke-linecap="round" stroke-linejoin="round"><path d="M1 12s4-8 11-8 11 8 11 8-4 8-11 8-11-8-11-8z"/><circle cx="12" cy="12" r="3"/></svg></span>
|
||||
<span class="pw-icon pw-icon-hide hidden" aria-hidden="true"><svg xmlns="http://www.w3.org/2000/svg" width="18" height="18" viewBox="0 0 24 24" fill="none" stroke="currentColor" stroke-width="2" stroke-linecap="round" stroke-linejoin="round"><path d="M17.94 17.94A10.07 10.07 0 0 1 12 20c-7 0-11-8-11-8a18.45 18.45 0 0 1 5.06-5.94M9.9 4.24A9.12 9.12 0 0 1 12 4c7 0 11 8 11 8a18.5 18.5 0 0 1-2.16 3.19m-6.72-1.07a3 3 0 1 1-4.24-4.24"/><line x1="1" y1="1" x2="23" y2="23"/></svg></span>
|
||||
</button>
|
||||
</div>
|
||||
</div>
|
||||
<div class="field">
|
||||
<label data-i18n="labelConfirm">确认密码</label>
|
||||
<div class="pw-field">
|
||||
<input type="password" id="setup-pass2" data-i18n-ph="phConfirm" autocomplete="new-password">
|
||||
<button type="button" class="pw-toggle" data-target="setup-pass2" aria-pressed="false"
|
||||
onclick="togglePwVisibility(this)" aria-label="">
|
||||
<span class="pw-icon pw-icon-show" aria-hidden="true"><svg xmlns="http://www.w3.org/2000/svg" width="18" height="18" viewBox="0 0 24 24" fill="none" stroke="currentColor" stroke-width="2" stroke-linecap="round" stroke-linejoin="round"><path d="M1 12s4-8 11-8 11 8 11 8-4 8-11 8-11-8-11-8z"/><circle cx="12" cy="12" r="3"/></svg></span>
|
||||
<span class="pw-icon pw-icon-hide hidden" aria-hidden="true"><svg xmlns="http://www.w3.org/2000/svg" width="18" height="18" viewBox="0 0 24 24" fill="none" stroke="currentColor" stroke-width="2" stroke-linecap="round" stroke-linejoin="round"><path d="M17.94 17.94A10.07 10.07 0 0 1 12 20c-7 0-11-8-11-8a18.45 18.45 0 0 1 5.06-5.94M9.9 4.24A9.12 9.12 0 0 1 12 4c7 0 11 8 11 8a18.5 18.5 0 0 1-2.16 3.19m-6.72-1.07a3 3 0 1 1-4.24-4.24"/><line x1="1" y1="1" x2="23" y2="23"/></svg></span>
|
||||
</button>
|
||||
</div>
|
||||
</div>
|
||||
<div class="error-msg" id="setup-error"></div>
|
||||
<button class="btn-primary" id="setup-btn" onclick="doSetup()">
|
||||
<span data-i18n="btnSetup">设置并获取配置</span>
|
||||
</button>
|
||||
<p style="font-size:11px;color:var(--text-muted);text-align:center;margin-top:10px" data-i18n="setupNote">
|
||||
密码不会上传服务器。遗忘后数据将无法恢复。
|
||||
</p>
|
||||
</div>
|
||||
|
||||
<!-- Unlock form (passphrase already set) -->
|
||||
<div id="unlock-form" style="display:none">
|
||||
<div class="field">
|
||||
<label data-i18n="labelPassphrase">加密密码</label>
|
||||
<div class="pw-field">
|
||||
<input type="password" id="unlock-pass" data-i18n-ph="phPassphrase" autocomplete="current-password">
|
||||
<button type="button" class="pw-toggle" data-target="unlock-pass" aria-pressed="false"
|
||||
onclick="togglePwVisibility(this)" aria-label="">
|
||||
<span class="pw-icon pw-icon-show" aria-hidden="true"><svg xmlns="http://www.w3.org/2000/svg" width="18" height="18" viewBox="0 0 24 24" fill="none" stroke="currentColor" stroke-width="2" stroke-linecap="round" stroke-linejoin="round"><path d="M1 12s4-8 11-8 11 8 11 8-4 8-11 8-11-8-11-8z"/><circle cx="12" cy="12" r="3"/></svg></span>
|
||||
<span class="pw-icon pw-icon-hide hidden" aria-hidden="true"><svg xmlns="http://www.w3.org/2000/svg" width="18" height="18" viewBox="0 0 24 24" fill="none" stroke="currentColor" stroke-width="2" stroke-linecap="round" stroke-linejoin="round"><path d="M17.94 17.94A10.07 10.07 0 0 1 12 20c-7 0-11-8-11-8a18.45 18.45 0 0 1 5.06-5.94M9.9 4.24A9.12 9.12 0 0 1 12 4c7 0 11 8 11 8a18.5 18.5 0 0 1-2.16 3.19m-6.72-1.07a3 3 0 1 1-4.24-4.24"/><line x1="1" y1="1" x2="23" y2="23"/></svg></span>
|
||||
</button>
|
||||
</div>
|
||||
</div>
|
||||
<div class="error-msg" id="unlock-error"></div>
|
||||
<button class="btn-primary" id="unlock-btn" onclick="doUnlock()">
|
||||
<span data-i18n="btnUnlock">解锁并获取配置</span>
|
||||
</button>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<!-- ── Unlocked state ────────────────────────────────────────────────── -->
|
||||
<div id="unlocked-view" style="display:none">
|
||||
<div class="card-title" data-i18n="unlockedTitle" style="margin-bottom:16px">MCP 配置</div>
|
||||
|
||||
<div class="config-tabs" role="tablist" aria-label="Config format">
|
||||
<button type="button" class="config-tab active" role="tab" id="tab-mcp" aria-selected="true"
|
||||
onclick="setConfigFormat('mcp')">
|
||||
<span class="config-tab-title" data-i18n="tabMcp">Cursor、Claude Code、Codex、Gemini CLI</span>
|
||||
</button>
|
||||
<button type="button" class="config-tab" role="tab" id="tab-opencode" aria-selected="false"
|
||||
onclick="setConfigFormat('opencode')">
|
||||
<span class="config-tab-title" data-i18n="tabOpencode">OpenCode</span>
|
||||
</button>
|
||||
</div>
|
||||
<div class="config-wrap">
|
||||
<pre class="config-box" id="real-config"></pre>
|
||||
</div>
|
||||
|
||||
<div style="display:flex;gap:10px;flex-wrap:wrap">
|
||||
<button class="btn-copy" id="copy-full-btn" onclick="copyFullConfig()" style="flex:1">
|
||||
<span id="copy-full-text">复制完整 mcp.json</span>
|
||||
</button>
|
||||
<button class="btn-copy" id="copy-secrets-btn" onclick="copySecretsConfig()" style="flex:1">
|
||||
<span id="copy-secrets-text">仅复制 secrets 节点</span>
|
||||
</button>
|
||||
</div>
|
||||
|
||||
<hr class="divider">
|
||||
|
||||
<div class="actions-row">
|
||||
<button class="btn-sm" onclick="clearAndLock()" data-i18n="btnClear">清除密钥</button>
|
||||
<button class="btn-sm" onclick="openChangeModal()" data-i18n="btnChangePass">更换密码</button>
|
||||
<button class="btn-sm" onclick="confirmRegenerate()" data-i18n="btnRegen">重置 API Key</button>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
</div>
|
||||
</div>
|
||||
<footer class="app-footer">{{ version }}</footer>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<!-- ── Change passphrase modal ──────────────────────────────────────────────── -->
|
||||
<div class="modal-bd" id="change-modal">
|
||||
<div class="modal">
|
||||
<h3 data-i18n="changeTitle">更换密码</h3>
|
||||
<div class="field">
|
||||
<label data-i18n="labelCurrent">当前密码</label>
|
||||
<div class="pw-field">
|
||||
<input type="password" id="change-pass-old" data-i18n-ph="phCurrent" autocomplete="current-password">
|
||||
<button type="button" class="pw-toggle" data-target="change-pass-old" aria-pressed="false"
|
||||
onclick="togglePwVisibility(this)" aria-label="">
|
||||
<span class="pw-icon pw-icon-show" aria-hidden="true"><svg xmlns="http://www.w3.org/2000/svg" width="18" height="18" viewBox="0 0 24 24" fill="none" stroke="currentColor" stroke-width="2" stroke-linecap="round" stroke-linejoin="round"><path d="M1 12s4-8 11-8 11 8 11 8-4 8-11 8-11-8-11-8z"/><circle cx="12" cy="12" r="3"/></svg></span>
|
||||
<span class="pw-icon pw-icon-hide hidden" aria-hidden="true"><svg xmlns="http://www.w3.org/2000/svg" width="18" height="18" viewBox="0 0 24 24" fill="none" stroke="currentColor" stroke-width="2" stroke-linecap="round" stroke-linejoin="round"><path d="M17.94 17.94A10.07 10.07 0 0 1 12 20c-7 0-11-8-11-8a18.45 18.45 0 0 1 5.06-5.94M9.9 4.24A9.12 9.12 0 0 1 12 4c7 0 11 8 11 8a18.5 18.5 0 0 1-2.16 3.19m-6.72-1.07a3 3 0 1 1-4.24-4.24"/><line x1="1" y1="1" x2="23" y2="23"/></svg></span>
|
||||
</button>
|
||||
</div>
|
||||
</div>
|
||||
<div class="field">
|
||||
<label data-i18n="labelNew">新密码</label>
|
||||
<div class="pw-field">
|
||||
<input type="password" id="change-pass1" data-i18n-ph="phPassphrase" autocomplete="new-password">
|
||||
<button type="button" class="pw-toggle" data-target="change-pass1" aria-pressed="false"
|
||||
onclick="togglePwVisibility(this)" aria-label="">
|
||||
<span class="pw-icon pw-icon-show" aria-hidden="true"><svg xmlns="http://www.w3.org/2000/svg" width="18" height="18" viewBox="0 0 24 24" fill="none" stroke="currentColor" stroke-width="2" stroke-linecap="round" stroke-linejoin="round"><path d="M1 12s4-8 11-8 11 8 11 8-4 8-11 8-11-8-11-8z"/><circle cx="12" cy="12" r="3"/></svg></span>
|
||||
<span class="pw-icon pw-icon-hide hidden" aria-hidden="true"><svg xmlns="http://www.w3.org/2000/svg" width="18" height="18" viewBox="0 0 24 24" fill="none" stroke="currentColor" stroke-width="2" stroke-linecap="round" stroke-linejoin="round"><path d="M17.94 17.94A10.07 10.07 0 0 1 12 20c-7 0-11-8-11-8a18.45 18.45 0 0 1 5.06-5.94M9.9 4.24A9.12 9.12 0 0 1 12 4c7 0 11 8 11 8a18.5 18.5 0 0 1-2.16 3.19m-6.72-1.07a3 3 0 1 1-4.24-4.24"/><line x1="1" y1="1" x2="23" y2="23"/></svg></span>
|
||||
</button>
|
||||
</div>
|
||||
</div>
|
||||
<div class="field">
|
||||
<label data-i18n="labelConfirm">确认</label>
|
||||
<div class="pw-field">
|
||||
<input type="password" id="change-pass2" data-i18n-ph="phConfirm" autocomplete="new-password">
|
||||
<button type="button" class="pw-toggle" data-target="change-pass2" aria-pressed="false"
|
||||
onclick="togglePwVisibility(this)" aria-label="">
|
||||
<span class="pw-icon pw-icon-show" aria-hidden="true"><svg xmlns="http://www.w3.org/2000/svg" width="18" height="18" viewBox="0 0 24 24" fill="none" stroke="currentColor" stroke-width="2" stroke-linecap="round" stroke-linejoin="round"><path d="M1 12s4-8 11-8 11 8 11 8-4 8-11 8-11-8-11-8z"/><circle cx="12" cy="12" r="3"/></svg></span>
|
||||
<span class="pw-icon pw-icon-hide hidden" aria-hidden="true"><svg xmlns="http://www.w3.org/2000/svg" width="18" height="18" viewBox="0 0 24 24" fill="none" stroke="currentColor" stroke-width="2" stroke-linecap="round" stroke-linejoin="round"><path d="M17.94 17.94A10.07 10.07 0 0 1 12 20c-7 0-11-8-11-8a18.45 18.45 0 0 1 5.06-5.94M9.9 4.24A9.12 9.12 0 0 1 12 4c7 0 11 8 11 8a18.5 18.5 0 0 1-2.16 3.19m-6.72-1.07a3 3 0 1 1-4.24-4.24"/><line x1="1" y1="1" x2="23" y2="23"/></svg></span>
|
||||
</button>
|
||||
</div>
|
||||
</div>
|
||||
<div class="error-msg" id="change-error"></div>
|
||||
<div class="modal-actions">
|
||||
<button class="btn-modal-ok" id="change-btn" onclick="doChange()" data-i18n="btnChange">确认更换</button>
|
||||
<button class="btn-modal-cancel" onclick="closeChangeModal()" data-i18n="btnCancel">取消</button>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<script>
|
||||
// ── i18n ───────────────────────────────────────────────────────────────────────
|
||||
|
||||
const T = {
|
||||
'zh-CN': {
|
||||
signOut: '退出',
|
||||
lockedTitle: '获取 MCP 配置',
|
||||
labelPassphrase: '加密密码',
|
||||
labelConfirm: '确认密码',
|
||||
labelNew: '新密码',
|
||||
labelCurrent: '当前密码',
|
||||
phPassphrase: '输入密码…',
|
||||
phConfirm: '再次输入…',
|
||||
phCurrent: '输入当前密码…',
|
||||
btnSetup: '设置并获取配置',
|
||||
btnUnlock: '解锁并获取配置',
|
||||
setupNote: '密码不会上传服务器。遗忘后数据将无法恢复。',
|
||||
errEmpty: '密码不能为空。',
|
||||
errShort: '密码至少需要 8 个字符。',
|
||||
errMismatch: '两次输入不一致。',
|
||||
errWrong: '密码错误,请重试。',
|
||||
errWrongOld: '当前密码错误,请重试。',
|
||||
unlockedTitle: 'MCP 配置',
|
||||
tabMcp: 'Cursor、Claude Code、Codex、Gemini CLI',
|
||||
tabOpencode: 'OpenCode',
|
||||
btnCopyFull: '复制完整 mcp.json',
|
||||
btnCopySecrets: '仅复制 secrets 节点',
|
||||
btnCopyFullOpencode: '复制完整 mcp.json',
|
||||
btnCopySecretsOpencode: '仅复制 secrets 节点',
|
||||
btnCopied: '已复制!',
|
||||
btnClear: '清除密钥',
|
||||
btnChangePass: '更换密码',
|
||||
btnRegen: '重置 API Key',
|
||||
changeTitle: '更换密码',
|
||||
btnChange: '确认更换',
|
||||
btnCancel: '取消',
|
||||
regenConfirm: '重置 API Key 后,当前 Key 立即失效,需同步更新 AI 客户端配置。确认继续?',
|
||||
regenFailed: '重置失败,请刷新页面重试。',
|
||||
ariaShowPw: '显示密码',
|
||||
ariaHidePw: '隐藏密码',
|
||||
},
|
||||
'zh-TW': {
|
||||
signOut: '登出',
|
||||
lockedTitle: '取得 MCP 設定',
|
||||
labelPassphrase: '加密密碼',
|
||||
labelConfirm: '確認密碼',
|
||||
labelNew: '新密碼',
|
||||
labelCurrent: '目前密碼',
|
||||
phPassphrase: '輸入密碼…',
|
||||
phConfirm: '再次輸入…',
|
||||
phCurrent: '輸入目前密碼…',
|
||||
btnSetup: '設定並取得設定',
|
||||
btnUnlock: '解鎖並取得設定',
|
||||
setupNote: '密碼不會上傳伺服器。遺忘後資料將無法復原。',
|
||||
errEmpty: '密碼不能為空。',
|
||||
errShort: '密碼至少需要 8 個字元。',
|
||||
errMismatch: '兩次輸入不一致。',
|
||||
errWrong: '密碼錯誤,請重試。',
|
||||
errWrongOld: '目前密碼錯誤,請重試。',
|
||||
unlockedTitle: 'MCP 設定',
|
||||
tabMcp: 'Cursor、Claude Code、Codex、Gemini CLI',
|
||||
tabOpencode: 'OpenCode',
|
||||
btnCopyFull: '複製完整 mcp.json',
|
||||
btnCopySecrets: '僅複製 secrets 節點',
|
||||
btnCopyFullOpencode: '複製完整 mcp.json',
|
||||
btnCopySecretsOpencode: '僅複製 secrets 節點',
|
||||
btnCopied: '已複製!',
|
||||
btnClear: '清除密鑰',
|
||||
btnChangePass: '更換密碼',
|
||||
btnRegen: '重置 API Key',
|
||||
changeTitle: '更換密碼',
|
||||
btnChange: '確認更換',
|
||||
btnCancel: '取消',
|
||||
regenConfirm: '重置 API Key 後,目前 Key 立即失效,需同步更新 AI 用戶端設定。確認繼續?',
|
||||
regenFailed: '重置失敗,請重新整理頁面再試。',
|
||||
ariaShowPw: '顯示密碼',
|
||||
ariaHidePw: '隱藏密碼',
|
||||
},
|
||||
'en': {
|
||||
signOut: 'Sign out',
|
||||
lockedTitle: 'Get MCP Config',
|
||||
labelPassphrase: 'Encryption password',
|
||||
labelConfirm: 'Confirm password',
|
||||
labelNew: 'New password',
|
||||
labelCurrent: 'Current password',
|
||||
phPassphrase: 'Enter password…',
|
||||
phConfirm: 'Repeat password…',
|
||||
phCurrent: 'Enter current password…',
|
||||
btnSetup: 'Set up & get config',
|
||||
btnUnlock: 'Unlock & get config',
|
||||
setupNote: 'Your password never leaves this device. If forgotten, encrypted data cannot be recovered.',
|
||||
errEmpty: 'Password cannot be empty.',
|
||||
errShort: 'Password must be at least 8 characters.',
|
||||
errMismatch: 'Passwords do not match.',
|
||||
errWrong: 'Incorrect password, please try again.',
|
||||
errWrongOld: 'Current password is incorrect, please try again.',
|
||||
unlockedTitle: 'MCP Config',
|
||||
tabMcp: 'Cursor, Claude Code, Codex, Gemini CLI',
|
||||
tabOpencode: 'OpenCode',
|
||||
btnCopyFull: 'Copy full mcp.json',
|
||||
btnCopySecrets: 'Copy only secrets node',
|
||||
btnCopyFullOpencode: 'Copy full mcp.json',
|
||||
btnCopySecretsOpencode: 'Copy only secrets node',
|
||||
btnCopied: 'Copied!',
|
||||
btnClear: 'Clear key',
|
||||
btnChangePass: 'Change password',
|
||||
btnRegen: 'Reset API key',
|
||||
changeTitle: 'Change password',
|
||||
btnChange: 'Confirm',
|
||||
btnCancel: 'Cancel',
|
||||
regenConfirm: 'Resetting will immediately invalidate your current API key. You will need to update your AI client config. Continue?',
|
||||
regenFailed: 'Reset failed. Please refresh and try again.',
|
||||
ariaShowPw: 'Show password',
|
||||
ariaHidePw: 'Hide password',
|
||||
}
|
||||
};
|
||||
|
||||
let currentLang = localStorage.getItem('lang') || 'zh-CN';
|
||||
|
||||
function t(key) { return T[currentLang][key] || T['en'][key] || key; }
|
||||
|
||||
function applyLang() {
|
||||
document.documentElement.lang = currentLang;
|
||||
document.querySelectorAll('[data-i18n]').forEach(el => {
|
||||
const key = el.getAttribute('data-i18n');
|
||||
el.textContent = t(key);
|
||||
});
|
||||
document.querySelectorAll('[data-i18n-ph]').forEach(el => {
|
||||
el.placeholder = t(el.getAttribute('data-i18n-ph'));
|
||||
});
|
||||
document.querySelectorAll('.lang-btn').forEach(btn => {
|
||||
const map = { 'zh-CN': '简', 'zh-TW': '繁', 'en': 'EN' };
|
||||
btn.classList.toggle('active', btn.textContent === map[currentLang]);
|
||||
});
|
||||
// Rebuild placeholder config (language affects nothing but triggers re-render)
|
||||
renderPlaceholderConfig();
|
||||
// Rebuild real config if unlocked
|
||||
if (currentEncKey && currentApiKey) renderRealConfig();
|
||||
syncPwToggleI18n();
|
||||
syncConfigFormatUi();
|
||||
}
|
||||
|
||||
function setLang(lang) {
|
||||
currentLang = lang;
|
||||
localStorage.setItem('lang', lang);
|
||||
applyLang();
|
||||
}
|
||||
|
||||
function syncPwToggleI18n() {
|
||||
document.querySelectorAll('.pw-toggle').forEach(btn => {
|
||||
const input = document.getElementById(btn.getAttribute('data-target'));
|
||||
if (!input) return;
|
||||
const visible = input.type === 'text';
|
||||
btn.setAttribute('aria-pressed', visible ? 'true' : 'false');
|
||||
btn.setAttribute('aria-label', visible ? t('ariaHidePw') : t('ariaShowPw'));
|
||||
const showIc = btn.querySelector('.pw-icon-show');
|
||||
const hideIc = btn.querySelector('.pw-icon-hide');
|
||||
if (showIc) showIc.classList.toggle('hidden', visible);
|
||||
if (hideIc) hideIc.classList.toggle('hidden', !visible);
|
||||
});
|
||||
}
|
||||
|
||||
function togglePwVisibility(btn) {
|
||||
const input = document.getElementById(btn.getAttribute('data-target'));
|
||||
if (!input) return;
|
||||
input.type = input.type === 'password' ? 'text' : 'password';
|
||||
syncPwToggleI18n();
|
||||
}
|
||||
|
||||
// ── Constants ──────────────────────────────────────────────────────────────────
|
||||
|
||||
const HAS_PASSPHRASE = document.body.dataset.hasPassphrase === 'true';
|
||||
const BASE_URL = document.body.dataset.baseUrl;
|
||||
const KEY_CHECK_PLAINTEXT = 'secrets-mcp-key-check';
|
||||
const PBKDF2_ITERATIONS = 600000;
|
||||
const ENC = new TextEncoder();
|
||||
let currentEncKey = null;
|
||||
let currentApiKey = null;
|
||||
/** @type {'mcp' | 'opencode'} */
|
||||
let configFormat = 'mcp';
|
||||
|
||||
function redirectLoginExpired() {
|
||||
sessionStorage.removeItem('enc_key');
|
||||
currentEncKey = null;
|
||||
currentApiKey = null;
|
||||
window.location.replace('/');
|
||||
}
|
||||
|
||||
/** Like fetch; on 401 clears local state and navigates to login (await does not complete). */
|
||||
async function fetchAuth(input, init) {
|
||||
const resp = await fetch(input, init);
|
||||
if (resp.status === 401) {
|
||||
redirectLoginExpired();
|
||||
await new Promise(() => {});
|
||||
}
|
||||
return resp;
|
||||
}
|
||||
|
||||
// ── Placeholder config ─────────────────────────────────────────────────────────
|
||||
|
||||
function renderPlaceholderConfig() {
|
||||
document.getElementById('placeholder-config').textContent =
|
||||
buildConfigText('sk_' + '•'.repeat(64), '•'.repeat(64));
|
||||
}
|
||||
|
||||
function buildBaseServerConfig(apiKey, encKey) {
|
||||
return {
|
||||
url: BASE_URL + '/mcp',
|
||||
headers: {
|
||||
Authorization: 'Bearer ' + apiKey,
|
||||
'X-Encryption-Key': encKey
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
function buildSecretsEntryObject(apiKey, encKey) {
|
||||
return buildBaseServerConfig(apiKey, encKey);
|
||||
}
|
||||
|
||||
function buildConfigText(apiKey, encKey) {
|
||||
return JSON.stringify({ mcpServers: { secrets: buildSecretsEntryObject(apiKey, encKey) } }, null, 2);
|
||||
}
|
||||
|
||||
function buildSecretsConfigText(apiKey, encKey) {
|
||||
const wrapped = JSON.stringify({
|
||||
secrets: buildSecretsEntryObject(apiKey, encKey)
|
||||
}, null, 2);
|
||||
const lines = wrapped.split('\n');
|
||||
return lines.length < 3 ? wrapped : lines.slice(1, -1).join('\n');
|
||||
}
|
||||
|
||||
/** OpenCode: local stdio bridge to Streamable HTTP MCP (mcp-remote --transport http-only). */
|
||||
function buildOpencodeEntry(apiKey, encKey) {
|
||||
return {
|
||||
type: 'local',
|
||||
command: [
|
||||
'npx', '-y', 'mcp-remote',
|
||||
BASE_URL + '/mcp',
|
||||
'--header',
|
||||
'Authorization: Bearer ' + apiKey,
|
||||
'--header',
|
||||
'X-Encryption-Key: ' + encKey,
|
||||
'--transport',
|
||||
'http-only'
|
||||
]
|
||||
};
|
||||
}
|
||||
|
||||
/** Full OpenCode config: MCP servers live under top-level `mcp`. */
|
||||
function buildOpencodeConfigText(apiKey, encKey) {
|
||||
return JSON.stringify({ mcp: { secrets: buildOpencodeEntry(apiKey, encKey) } }, null, 2);
|
||||
}
|
||||
|
||||
/** Strip outer `{` `}` so user can paste `secrets` under an existing `mcp` object. */
|
||||
function buildOpencodeMergeSnippet(apiKey, encKey) {
|
||||
const wrapped = JSON.stringify({ secrets: buildOpencodeEntry(apiKey, encKey) }, null, 2);
|
||||
const lines = wrapped.split('\n');
|
||||
return lines.length < 3 ? wrapped : lines.slice(1, -1).join('\n');
|
||||
}
|
||||
|
||||
function getCopyFullKey() {
|
||||
return 'btnCopyFull';
|
||||
}
|
||||
|
||||
function getCopySecretsKey() {
|
||||
return 'btnCopySecrets';
|
||||
}
|
||||
|
||||
const CONFIG_FORMAT_STORAGE = 'dash_config_format';
|
||||
|
||||
function setConfigFormat(fmt) {
|
||||
configFormat = fmt;
|
||||
try { sessionStorage.setItem(CONFIG_FORMAT_STORAGE, fmt); } catch (_) {}
|
||||
syncConfigFormatUi();
|
||||
if (currentEncKey && currentApiKey) renderRealConfig();
|
||||
}
|
||||
|
||||
/** Refresh tabs, format hint, and copy button labels (after language change or tab switch). */
|
||||
function syncConfigFormatUi() {
|
||||
const uv = document.getElementById('unlocked-view');
|
||||
if (!uv || uv.style.display === 'none') return;
|
||||
const tabMcp = document.getElementById('tab-mcp');
|
||||
const tabOc = document.getElementById('tab-opencode');
|
||||
if (tabMcp && tabOc) {
|
||||
tabMcp.classList.toggle('active', configFormat === 'mcp');
|
||||
tabOc.classList.toggle('active', configFormat === 'opencode');
|
||||
tabMcp.setAttribute('aria-selected', configFormat === 'mcp' ? 'true' : 'false');
|
||||
tabOc.setAttribute('aria-selected', configFormat === 'opencode' ? 'true' : 'false');
|
||||
}
|
||||
const cf = document.getElementById('copy-full-text');
|
||||
const cs = document.getElementById('copy-secrets-text');
|
||||
if (cf) cf.textContent = t(getCopyFullKey());
|
||||
if (cs) cs.textContent = t(getCopySecretsKey());
|
||||
}
|
||||
|
||||
// ── Unlock / Setup flow ───────────────────────────────────────────────────────
|
||||
|
||||
function showLockedView() {
|
||||
document.getElementById('locked-view').style.display = '';
|
||||
document.getElementById('unlocked-view').style.display = 'none';
|
||||
if (HAS_PASSPHRASE) {
|
||||
document.getElementById('setup-form').style.display = 'none';
|
||||
document.getElementById('unlock-form').style.display = '';
|
||||
setTimeout(() => document.getElementById('unlock-pass').focus(), 50);
|
||||
} else {
|
||||
document.getElementById('setup-form').style.display = '';
|
||||
document.getElementById('unlock-form').style.display = 'none';
|
||||
setTimeout(() => document.getElementById('setup-pass1').focus(), 50);
|
||||
}
|
||||
}
|
||||
|
||||
async function showUnlockedView(encKeyHex, apiKey) {
|
||||
currentEncKey = encKeyHex;
|
||||
currentApiKey = apiKey;
|
||||
sessionStorage.setItem('enc_key', encKeyHex);
|
||||
renderRealConfig();
|
||||
document.getElementById('locked-view').style.display = 'none';
|
||||
document.getElementById('unlocked-view').style.display = '';
|
||||
syncConfigFormatUi();
|
||||
}
|
||||
|
||||
function renderRealConfig() {
|
||||
if (!currentApiKey || !currentEncKey) return;
|
||||
const text = configFormat === 'mcp'
|
||||
? buildConfigText(currentApiKey, currentEncKey)
|
||||
: buildOpencodeConfigText(currentApiKey, currentEncKey);
|
||||
document.getElementById('real-config').textContent = text;
|
||||
}
|
||||
|
||||
function clearAndLock() {
|
||||
sessionStorage.removeItem('enc_key');
|
||||
currentEncKey = null;
|
||||
currentApiKey = null;
|
||||
showLockedView();
|
||||
}
|
||||
|
||||
// ── Web Crypto helpers ─────────────────────────────────────────────────────────
|
||||
|
||||
async function deriveKey(passphrase, saltBytes, extractable = false) {
|
||||
const km = await crypto.subtle.importKey('raw', ENC.encode(passphrase), 'PBKDF2', false, ['deriveKey']);
|
||||
return crypto.subtle.deriveKey(
|
||||
{ name: 'PBKDF2', salt: saltBytes, iterations: PBKDF2_ITERATIONS, hash: 'SHA-256' },
|
||||
km, { name: 'AES-GCM', length: 256 }, extractable, ['encrypt', 'decrypt']
|
||||
);
|
||||
}
|
||||
|
||||
async function exportKeyHex(cryptoKey) {
|
||||
const raw = await crypto.subtle.exportKey('raw', cryptoKey);
|
||||
return Array.from(new Uint8Array(raw)).map(b => b.toString(16).padStart(2, '0')).join('');
|
||||
}
|
||||
|
||||
function hexToBytes(hex) {
|
||||
const b = new Uint8Array(hex.length / 2);
|
||||
for (let i = 0; i < hex.length; i += 2) b[i / 2] = parseInt(hex.slice(i, i + 2), 16);
|
||||
return b;
|
||||
}
|
||||
|
||||
function bytesToHex(bytes) {
|
||||
return Array.from(bytes).map(b => b.toString(16).padStart(2, '0')).join('');
|
||||
}
|
||||
|
||||
async function encryptKeyCheck(cryptoKey) {
|
||||
const nonce = crypto.getRandomValues(new Uint8Array(12));
|
||||
const ct = await crypto.subtle.encrypt({ name: 'AES-GCM', iv: nonce }, cryptoKey, ENC.encode(KEY_CHECK_PLAINTEXT));
|
||||
const out = new Uint8Array(12 + ct.byteLength);
|
||||
out.set(nonce); out.set(new Uint8Array(ct), 12);
|
||||
return bytesToHex(out);
|
||||
}
|
||||
|
||||
async function verifyKeyCheck(cryptoKey, keyCheckHex) {
|
||||
try {
|
||||
const b = hexToBytes(keyCheckHex);
|
||||
const plain = await crypto.subtle.decrypt({ name: 'AES-GCM', iv: b.slice(0, 12) }, cryptoKey, b.slice(12));
|
||||
return new TextDecoder().decode(plain) === KEY_CHECK_PLAINTEXT;
|
||||
} catch { return false; }
|
||||
}
|
||||
|
||||
// ── Passphrase setup (first time) ─────────────────────────────────────────────
|
||||
|
||||
function setBtnLoading(id, loading, labelKey) {
|
||||
const btn = document.getElementById(id);
|
||||
btn.disabled = loading;
|
||||
btn.innerHTML = loading
|
||||
? '<span class="spinner"></span>'
|
||||
: `<span data-i18n="${labelKey}">${t(labelKey)}</span>`;
|
||||
}
|
||||
|
||||
async function doSetup() {
|
||||
const pass1 = document.getElementById('setup-pass1').value;
|
||||
const pass2 = document.getElementById('setup-pass2').value;
|
||||
const errEl = document.getElementById('setup-error');
|
||||
errEl.style.display = 'none';
|
||||
|
||||
if (!pass1) { showErr(errEl, t('errEmpty')); return; }
|
||||
if (pass1.length < 8) { showErr(errEl, t('errShort')); return; }
|
||||
if (pass1 !== pass2) { showErr(errEl, t('errMismatch')); return; }
|
||||
|
||||
setBtnLoading('setup-btn', true, 'btnSetup');
|
||||
try {
|
||||
const salt = crypto.getRandomValues(new Uint8Array(32));
|
||||
const cryptoKey = await deriveKey(pass1, salt, true);
|
||||
const keyCheckHex = await encryptKeyCheck(cryptoKey);
|
||||
const hexKey = await exportKeyHex(cryptoKey);
|
||||
|
||||
const resp = await fetchAuth('/api/key-setup', {
|
||||
method: 'POST',
|
||||
headers: { 'Content-Type': 'application/json' },
|
||||
body: JSON.stringify({
|
||||
salt: bytesToHex(salt),
|
||||
key_check: keyCheckHex,
|
||||
params: { alg: 'pbkdf2-sha256', iterations: PBKDF2_ITERATIONS }
|
||||
})
|
||||
});
|
||||
if (!resp.ok) throw new Error('HTTP ' + resp.status);
|
||||
|
||||
const apiKey = await fetchApiKey();
|
||||
await showUnlockedView(hexKey, apiKey);
|
||||
} catch (e) {
|
||||
showErr(errEl, 'Error: ' + e.message);
|
||||
} finally {
|
||||
setBtnLoading('setup-btn', false, 'btnSetup');
|
||||
}
|
||||
}
|
||||
|
||||
// ── Passphrase unlock ──────────────────────────────────────────────────────────
|
||||
|
||||
async function doUnlock() {
|
||||
const pass = document.getElementById('unlock-pass').value;
|
||||
const errEl = document.getElementById('unlock-error');
|
||||
errEl.style.display = 'none';
|
||||
|
||||
if (!pass) { showErr(errEl, t('errEmpty')); return; }
|
||||
|
||||
setBtnLoading('unlock-btn', true, 'btnUnlock');
|
||||
try {
|
||||
const saltResp = await fetchAuth('/api/key-salt');
|
||||
if (!saltResp.ok) throw new Error('HTTP ' + saltResp.status);
|
||||
const saltData = await saltResp.json();
|
||||
|
||||
const cryptoKey = await deriveKey(pass, hexToBytes(saltData.salt), true);
|
||||
const valid = await verifyKeyCheck(cryptoKey, saltData.key_check);
|
||||
if (!valid) { showErr(errEl, t('errWrong')); return; }
|
||||
|
||||
const hexKey = await exportKeyHex(cryptoKey);
|
||||
const apiKey = await fetchApiKey();
|
||||
await showUnlockedView(hexKey, apiKey);
|
||||
} catch (e) {
|
||||
showErr(errEl, 'Error: ' + e.message);
|
||||
} finally {
|
||||
setBtnLoading('unlock-btn', false, 'btnUnlock');
|
||||
}
|
||||
}
|
||||
|
||||
// ── Copy config ────────────────────────────────────────────────────────────────
|
||||
|
||||
async function copyFullConfig() {
|
||||
await copyWithFeedback(
|
||||
document.getElementById('real-config').textContent,
|
||||
'copy-full-btn',
|
||||
'copy-full-text',
|
||||
getCopyFullKey()
|
||||
);
|
||||
}
|
||||
|
||||
async function copySecretsConfig() {
|
||||
const snippet = configFormat === 'mcp'
|
||||
? buildSecretsConfigText(currentApiKey, currentEncKey)
|
||||
: buildOpencodeMergeSnippet(currentApiKey, currentEncKey);
|
||||
await copyWithFeedback(
|
||||
snippet,
|
||||
'copy-secrets-btn',
|
||||
'copy-secrets-text',
|
||||
getCopySecretsKey()
|
||||
);
|
||||
}
|
||||
|
||||
async function copyWithFeedback(text, btnId, textId, resetLabelKey) {
|
||||
await navigator.clipboard.writeText(text);
|
||||
const btn = document.getElementById(btnId);
|
||||
const textEl = document.getElementById(textId);
|
||||
btn.classList.add('copied');
|
||||
textEl.textContent = t('btnCopied');
|
||||
setTimeout(() => {
|
||||
btn.classList.remove('copied');
|
||||
textEl.textContent = t(resetLabelKey);
|
||||
}, 2500);
|
||||
}
|
||||
|
||||
// ── Reset API key ──────────────────────────────────────────────────────────────
|
||||
|
||||
async function confirmRegenerate() {
|
||||
if (!confirm(t('regenConfirm'))) return;
|
||||
try {
|
||||
const resp = await fetchAuth('/api/apikey/regenerate', { method: 'POST' });
|
||||
if (!resp.ok) throw new Error();
|
||||
const data = await resp.json();
|
||||
currentApiKey = data.api_key;
|
||||
renderRealConfig();
|
||||
} catch {
|
||||
alert(t('regenFailed'));
|
||||
}
|
||||
}
|
||||
|
||||
// ── Change passphrase modal ────────────────────────────────────────────────────
|
||||
|
||||
function openChangeModal() {
|
||||
document.getElementById('change-pass-old').value = '';
|
||||
document.getElementById('change-pass1').value = '';
|
||||
document.getElementById('change-pass2').value = '';
|
||||
document.getElementById('change-pass-old').type = 'password';
|
||||
document.getElementById('change-pass1').type = 'password';
|
||||
document.getElementById('change-pass2').type = 'password';
|
||||
document.getElementById('change-error').style.display = 'none';
|
||||
document.getElementById('change-modal').classList.add('open');
|
||||
syncPwToggleI18n();
|
||||
setTimeout(() => document.getElementById('change-pass-old').focus(), 50);
|
||||
}
|
||||
|
||||
function closeChangeModal() {
|
||||
document.getElementById('change-modal').classList.remove('open');
|
||||
}
|
||||
|
||||
async function doChange() {
|
||||
const passOld = document.getElementById('change-pass-old').value;
|
||||
const pass1 = document.getElementById('change-pass1').value;
|
||||
const pass2 = document.getElementById('change-pass2').value;
|
||||
const errEl = document.getElementById('change-error');
|
||||
errEl.style.display = 'none';
|
||||
|
||||
if (!passOld) { showErr(errEl, t('errEmpty')); return; }
|
||||
if (!pass1) { showErr(errEl, t('errEmpty')); return; }
|
||||
if (pass1.length < 8) { showErr(errEl, t('errShort')); return; }
|
||||
if (pass1 !== pass2) { showErr(errEl, t('errMismatch')); return; }
|
||||
|
||||
const btn = document.getElementById('change-btn');
|
||||
btn.disabled = true;
|
||||
btn.innerHTML = '<span class="spinner" style="border-top-color:#0d1117"></span>';
|
||||
try {
|
||||
// Fetch current salt to derive old key for verification
|
||||
const saltResp = await fetchAuth('/api/key-salt');
|
||||
if (!saltResp.ok) throw new Error('HTTP ' + saltResp.status);
|
||||
const saltData = await saltResp.json();
|
||||
if (!saltData.has_passphrase) throw new Error('No passphrase configured');
|
||||
|
||||
// Derive old key and verify it
|
||||
const oldCryptoKey = await deriveKey(passOld, hexToBytes(saltData.salt), true);
|
||||
const validOld = await verifyKeyCheck(oldCryptoKey, saltData.key_check);
|
||||
if (!validOld) { showErr(errEl, t('errWrongOld')); return; }
|
||||
const oldHexKey = await exportKeyHex(oldCryptoKey);
|
||||
|
||||
// Derive new key
|
||||
const newSalt = crypto.getRandomValues(new Uint8Array(32));
|
||||
const newCryptoKey = await deriveKey(pass1, newSalt, true);
|
||||
const newKeyCheckHex = await encryptKeyCheck(newCryptoKey);
|
||||
const newHexKey = await exportKeyHex(newCryptoKey);
|
||||
|
||||
const resp = await fetchAuth('/api/key-change', {
|
||||
method: 'POST',
|
||||
headers: { 'Content-Type': 'application/json' },
|
||||
body: JSON.stringify({
|
||||
old_key: oldHexKey,
|
||||
new_key: newHexKey,
|
||||
salt: bytesToHex(newSalt),
|
||||
key_check: newKeyCheckHex,
|
||||
params: { alg: 'pbkdf2-sha256', iterations: PBKDF2_ITERATIONS }
|
||||
})
|
||||
});
|
||||
if (!resp.ok) throw new Error('HTTP ' + resp.status);
|
||||
|
||||
currentEncKey = newHexKey;
|
||||
sessionStorage.setItem('enc_key', newHexKey);
|
||||
renderRealConfig();
|
||||
closeChangeModal();
|
||||
} catch (e) {
|
||||
showErr(errEl, 'Error: ' + e.message);
|
||||
} finally {
|
||||
btn.disabled = false;
|
||||
btn.textContent = t('btnChange');
|
||||
}
|
||||
}
|
||||
|
||||
// ── Fetch API key ──────────────────────────────────────────────────────────────
|
||||
|
||||
async function fetchApiKey() {
|
||||
const resp = await fetchAuth('/api/apikey');
|
||||
if (!resp.ok) throw new Error('Failed to load API key');
|
||||
const data = await resp.json();
|
||||
return data.api_key;
|
||||
}
|
||||
|
||||
// ── Helpers ────────────────────────────────────────────────────────────────────
|
||||
|
||||
function showErr(el, msg) {
|
||||
el.textContent = msg;
|
||||
el.style.display = 'block';
|
||||
}
|
||||
|
||||
// ── Keyboard shortcuts ─────────────────────────────────────────────────────────
|
||||
|
||||
document.addEventListener('keydown', e => {
|
||||
if (e.key === 'Escape') closeChangeModal();
|
||||
if (e.key === 'Enter') {
|
||||
if (document.getElementById('change-modal').classList.contains('open')) { doChange(); return; }
|
||||
if (document.getElementById('unlock-form').style.display !== 'none' &&
|
||||
document.getElementById('locked-view').style.display !== 'none') { doUnlock(); return; }
|
||||
if (document.getElementById('setup-form').style.display !== 'none' &&
|
||||
document.getElementById('locked-view').style.display !== 'none') { doSetup(); return; }
|
||||
}
|
||||
});
|
||||
|
||||
// ── Init ───────────────────────────────────────────────────────────────────────
|
||||
|
||||
(async function init() {
|
||||
applyLang();
|
||||
try {
|
||||
const sf = sessionStorage.getItem(CONFIG_FORMAT_STORAGE);
|
||||
if (sf === 'mcp' || sf === 'opencode') configFormat = sf;
|
||||
} catch (_) { /* ignore */ }
|
||||
const savedKey = sessionStorage.getItem('enc_key');
|
||||
if (savedKey) {
|
||||
try {
|
||||
const apiKey = await fetchApiKey();
|
||||
await showUnlockedView(savedKey, apiKey);
|
||||
return;
|
||||
} catch { /* fall through to locked */ }
|
||||
}
|
||||
showLockedView();
|
||||
})();
|
||||
</script>
|
||||
</body>
|
||||
</html>
|
||||
1312
crates/secrets-mcp/templates/entries.html
Normal file
1312
crates/secrets-mcp/templates/entries.html
Normal file
File diff suppressed because it is too large
Load Diff
269
crates/secrets-mcp/templates/home.html
Normal file
269
crates/secrets-mcp/templates/home.html
Normal file
@@ -0,0 +1,269 @@
|
||||
<!DOCTYPE html>
|
||||
<html lang="zh-CN">
|
||||
<head>
|
||||
<meta charset="UTF-8">
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1.0">
|
||||
<meta name="description" content="Secrets MCP:基于 Model Context Protocol 的密钥与配置管理。密码短语在浏览器本地 PBKDF2 派生,密文 AES-GCM 存储,完整审计与历史版本。">
|
||||
<meta name="keywords" content="secrets management,MCP,Model Context Protocol,end-to-end encryption,AES-GCM,PBKDF2,API key,密钥管理">
|
||||
<meta name="robots" content="index, follow">
|
||||
<link rel="canonical" href="{{ base_url }}/">
|
||||
<link rel="icon" href="/favicon.svg?v={{ version }}" type="image/svg+xml">
|
||||
<title>Secrets MCP — 端到端加密的密钥管理</title>
|
||||
<meta property="og:type" content="website">
|
||||
<meta property="og:url" content="{{ base_url }}/">
|
||||
<meta property="og:title" content="Secrets MCP — 端到端加密的密钥管理">
|
||||
<meta property="og:description" content="密码短语客户端派生,密文存储;MCP API 与 Web 控制台,多租户与审计。">
|
||||
<meta name="twitter:card" content="summary">
|
||||
<meta name="twitter:title" content="Secrets MCP — 端到端加密的密钥管理">
|
||||
<meta name="twitter:description" content="密码短语客户端派生,密文存储;MCP API 与 Web 控制台,多租户与审计。">
|
||||
<style>
|
||||
*, *::before, *::after { box-sizing: border-box; margin: 0; padding: 0; }
|
||||
@import url('https://fonts.googleapis.com/css2?family=JetBrains+Mono:wght@500;600&family=Inter:wght@400;500;600&display=swap');
|
||||
:root {
|
||||
--bg: #0d1117;
|
||||
--surface: #161b22;
|
||||
--surface2: #21262d;
|
||||
--border: #30363d;
|
||||
--text: #e6edf3;
|
||||
--text-muted: #8b949e;
|
||||
--accent: #58a6ff;
|
||||
--accent-hover: #79b8ff;
|
||||
}
|
||||
html, body { height: 100%; overflow: hidden; }
|
||||
@supports (height: 100dvh) {
|
||||
html, body { height: 100dvh; }
|
||||
}
|
||||
body {
|
||||
background: var(--bg);
|
||||
color: var(--text);
|
||||
font-family: 'Inter', sans-serif;
|
||||
display: flex;
|
||||
flex-direction: column;
|
||||
}
|
||||
.nav {
|
||||
flex-shrink: 0;
|
||||
display: flex;
|
||||
align-items: center;
|
||||
justify-content: space-between;
|
||||
padding: 14px 24px;
|
||||
border-bottom: 1px solid var(--border);
|
||||
background: var(--surface);
|
||||
}
|
||||
.brand {
|
||||
font-family: 'JetBrains Mono', monospace;
|
||||
font-size: 15px;
|
||||
font-weight: 600;
|
||||
color: var(--text);
|
||||
text-decoration: none;
|
||||
}
|
||||
.brand span { color: var(--accent); }
|
||||
.nav-right { display: flex; align-items: center; gap: 14px; }
|
||||
.lang-bar { display: flex; gap: 2px; background: rgba(255,255,255,0.04); border-radius: 6px; padding: 2px; }
|
||||
.lang-btn {
|
||||
padding: 4px 10px; border: none; background: none; color: var(--text-muted);
|
||||
font-size: 12px; cursor: pointer; border-radius: 4px;
|
||||
}
|
||||
.lang-btn.active { background: var(--border); color: var(--text); }
|
||||
.cta {
|
||||
display: inline-flex; align-items: center; justify-content: center;
|
||||
padding: 8px 18px; border-radius: 8px; font-size: 13px; font-weight: 600;
|
||||
text-decoration: none; border: 1px solid var(--accent);
|
||||
background: rgba(88, 166, 255, 0.12); color: var(--accent);
|
||||
transition: background 0.15s, color 0.15s;
|
||||
}
|
||||
.cta:hover { background: var(--accent); color: var(--bg); }
|
||||
.main {
|
||||
flex: 1;
|
||||
min-height: 0;
|
||||
display: flex;
|
||||
flex-direction: column;
|
||||
align-items: center;
|
||||
justify-content: center;
|
||||
padding: 16px 24px 12px;
|
||||
gap: 20px;
|
||||
}
|
||||
.hero { text-align: center; max-width: 720px; }
|
||||
.hero h1 { font-size: clamp(20px, 4vw, 28px); font-weight: 600; margin-bottom: 8px; line-height: 1.25; }
|
||||
.hero .tagline { color: var(--text-muted); font-size: clamp(13px, 2vw, 15px); line-height: 1.5; }
|
||||
.grid {
|
||||
display: grid;
|
||||
grid-template-columns: repeat(3, 1fr);
|
||||
gap: 12px;
|
||||
width: 100%;
|
||||
max-width: 900px;
|
||||
}
|
||||
@media (max-width: 900px) {
|
||||
.grid { grid-template-columns: repeat(2, 1fr); }
|
||||
}
|
||||
@media (max-width: 480px) {
|
||||
.grid { grid-template-columns: 1fr; gap: 8px; }
|
||||
.main { justify-content: flex-start; padding-top: 12px; }
|
||||
}
|
||||
.card {
|
||||
background: var(--surface);
|
||||
border: 1px solid var(--border);
|
||||
border-radius: 10px;
|
||||
padding: 14px 14px 12px;
|
||||
min-height: 0;
|
||||
}
|
||||
.card-icon {
|
||||
width: 32px; height: 32px; border-radius: 8px;
|
||||
background: var(--surface2);
|
||||
display: flex; align-items: center; justify-content: center;
|
||||
margin-bottom: 10px; color: var(--accent);
|
||||
}
|
||||
.card-icon svg { width: 18px; height: 18px; }
|
||||
.card h2 { font-size: 13px; font-weight: 600; margin-bottom: 6px; line-height: 1.3; }
|
||||
.card p { font-size: 12px; color: var(--text-muted); line-height: 1.45; }
|
||||
.foot {
|
||||
flex-shrink: 0;
|
||||
text-align: center;
|
||||
padding: 8px 16px 12px;
|
||||
font-size: 11px;
|
||||
color: var(--text-muted);
|
||||
border-top: 1px solid var(--border);
|
||||
background: var(--surface);
|
||||
}
|
||||
.foot a { color: var(--accent); text-decoration: none; }
|
||||
.foot a:hover { text-decoration: underline; }
|
||||
</style>
|
||||
</head>
|
||||
<body>
|
||||
<header class="nav">
|
||||
<a class="brand" href="/">secrets<span>-mcp</span></a>
|
||||
<div class="nav-right">
|
||||
<div class="lang-bar">
|
||||
<button type="button" class="lang-btn" onclick="setLang('zh-CN')">简</button>
|
||||
<button type="button" class="lang-btn" onclick="setLang('zh-TW')">繁</button>
|
||||
<button type="button" class="lang-btn" onclick="setLang('en')">EN</button>
|
||||
</div>
|
||||
{% if is_logged_in %}
|
||||
<a class="cta" href="/dashboard" data-i18n="ctaDashboard">进入控制台</a>
|
||||
{% else %}
|
||||
<a class="cta" href="/login" data-i18n="ctaLogin">登录</a>
|
||||
{% endif %}
|
||||
</div>
|
||||
</header>
|
||||
<main class="main">
|
||||
<div class="hero">
|
||||
<h1 data-i18n="heroTitle">端到端加密的密钥与配置管理</h1>
|
||||
<p class="tagline" data-i18n="heroTagline">Streamable HTTP MCP 与 Web 控制台:元数据与密文分库存储,密钥永不离开你的客户端逻辑。</p>
|
||||
</div>
|
||||
<div class="grid">
|
||||
<article class="card">
|
||||
<div class="card-icon" aria-hidden="true">
|
||||
<svg viewBox="0 0 24 24" fill="none" stroke="currentColor" stroke-width="2"><path d="M12 11c1.66 0 3-1.34 3-3V5c0-1.66-1.34-3-3-3S9 3.34 9 5v3c0 1.66 1.34 3 3 3z"/><path d="M19 10v1a7 7 0 01-14 0v-1"/><path d="M12 14v7M9 18h6"/></svg>
|
||||
</div>
|
||||
<h2 data-i18n="c1t">客户端密钥派生</h2>
|
||||
<p data-i18n="c1d">PBKDF2-SHA256(约 60 万次)在浏览器本地从密码短语派生密钥;服务端仅保存盐与校验值,不持有密码或明文主密钥。</p>
|
||||
</article>
|
||||
<article class="card">
|
||||
<div class="card-icon" aria-hidden="true">
|
||||
<svg viewBox="0 0 24 24" fill="none" stroke="currentColor" stroke-width="2"><rect x="3" y="11" width="18" height="11" rx="2"/><path d="M7 11V7a5 5 0 0110 0v4"/></svg>
|
||||
</div>
|
||||
<h2 data-i18n="c2t">AES-256-GCM 加密</h2>
|
||||
<p data-i18n="c2d">敏感字段以 AES-GCM 密文落库;Web 端在本地加解密,明文默认不经过服务端持久化。</p>
|
||||
</article>
|
||||
<article class="card">
|
||||
<div class="card-icon" aria-hidden="true">
|
||||
<svg viewBox="0 0 24 24" fill="none" stroke="currentColor" stroke-width="2"><path d="M14 2H6a2 2 0 00-2 2v16a2 2 0 002 2h12a2 2 0 002-2V8z"/><path d="M14 2v6h6M16 13H8M16 17H8M10 9H8"/></svg>
|
||||
</div>
|
||||
<h2 data-i18n="c3t">审计与历史</h2>
|
||||
<p data-i18n="c3d">操作写入审计日志;条目与密文保留历史版本,支持按版本查看与恢复。</p>
|
||||
</article>
|
||||
</div>
|
||||
</main>
|
||||
<footer class="foot">
|
||||
<span data-i18n="versionLabel">版本</span> {{ version }} ·
|
||||
<a href="/llms.txt">llms.txt</a>
|
||||
<span data-i18n="sep"> · </span>
|
||||
<a href="https://gitea.refining.dev/refining/secrets" target="_blank" rel="noopener noreferrer" data-i18n="footRepo">源码仓库</a>
|
||||
{% if !is_logged_in %}
|
||||
<span data-i18n="sep"> · </span>
|
||||
<a href="/login" data-i18n="footLogin">登录</a>
|
||||
{% endif %}
|
||||
</footer>
|
||||
<script>
|
||||
const T = {
|
||||
'zh-CN': {
|
||||
docTitle: 'Secrets MCP — 端到端加密的密钥管理',
|
||||
ctaDashboard: '进入控制台',
|
||||
ctaLogin: '登录',
|
||||
heroTitle: '端到端加密的密钥与配置管理',
|
||||
heroTagline: 'Streamable HTTP MCP 与 Web 控制台:元数据与密文分库存储,密钥永不离开你的客户端逻辑。',
|
||||
c1t: '客户端密钥派生',
|
||||
c1d: 'PBKDF2-SHA256(约 60 万次)在浏览器本地从密码短语派生密钥;服务端仅保存盐与校验值,不持有密码或明文主密钥。',
|
||||
c2t: 'AES-256-GCM 加密',
|
||||
c2d: '敏感字段以 AES-GCM 密文落库;Web 端在本地加解密,明文默认不经过服务端持久化。',
|
||||
c3t: '审计与历史',
|
||||
c3d: '操作写入审计日志;条目与密文保留历史版本,支持按版本查看与恢复。',
|
||||
versionLabel: '版本',
|
||||
sep: ' · ',
|
||||
footRepo: '源码仓库',
|
||||
footLogin: '登录',
|
||||
},
|
||||
'zh-TW': {
|
||||
docTitle: 'Secrets MCP — 端到端加密的金鑰管理',
|
||||
ctaDashboard: '進入控制台',
|
||||
ctaLogin: '登入',
|
||||
heroTitle: '端到端加密的金鑰與設定管理',
|
||||
heroTagline: 'Streamable HTTP MCP 與 Web 控制台:中繼資料與密文分庫儲存,金鑰不離開你的用戶端邏輯。',
|
||||
c1t: '用戶端金鑰派生',
|
||||
c1d: 'PBKDF2-SHA256(約 60 萬次)在瀏覽器本地從密碼片語派生金鑰;伺服端僅保存鹽與校驗值,不持有密碼或明文主金鑰。',
|
||||
c2t: 'AES-256-GCM 加密',
|
||||
c2d: '敏感欄位以 AES-GCM 密文落庫;Web 端在本地加解密,明文預設不經伺服端持久化。',
|
||||
c3t: '稽核與歷史',
|
||||
c3d: '操作寫入稽核日誌;條目與密文保留歷史版本,支援依版本檢視與還原。',
|
||||
versionLabel: '版本',
|
||||
sep: ' · ',
|
||||
footRepo: '原始碼倉庫',
|
||||
footLogin: '登入',
|
||||
},
|
||||
'en': {
|
||||
docTitle: 'Secrets MCP — End-to-end encrypted secrets',
|
||||
ctaDashboard: 'Open dashboard',
|
||||
ctaLogin: 'Sign in',
|
||||
heroTitle: 'End-to-end encrypted secrets and configuration',
|
||||
heroTagline: 'Streamable HTTP MCP plus web console: metadata and ciphertext stored separately; keys stay on your client.',
|
||||
c1t: 'Client-side key derivation',
|
||||
c1d: 'PBKDF2-SHA256 (~600k iterations) derives keys from your passphrase in the browser; the server stores only salt and a verification blob, never your password or raw master key.',
|
||||
c2t: 'AES-256-GCM',
|
||||
c2d: 'Secret fields are stored as AES-GCM ciphertext; the web UI encrypts and decrypts locally so plaintext is not persisted server-side by default.',
|
||||
c3t: 'Audit and history',
|
||||
c3d: 'Operations are audited; entries and secrets keep version history for review and rollback.',
|
||||
versionLabel: 'Version',
|
||||
sep: ' · ',
|
||||
footRepo: 'Source repository',
|
||||
footLogin: 'Sign in',
|
||||
}
|
||||
};
|
||||
|
||||
let currentLang = localStorage.getItem('lang') || 'zh-CN';
|
||||
|
||||
function t(key) {
|
||||
return (T[currentLang] && T[currentLang][key]) || T['en'][key] || key;
|
||||
}
|
||||
|
||||
function applyLang() {
|
||||
document.documentElement.lang = currentLang;
|
||||
document.title = t('docTitle');
|
||||
document.querySelectorAll('[data-i18n]').forEach(el => {
|
||||
const key = el.getAttribute('data-i18n');
|
||||
el.textContent = t(key);
|
||||
});
|
||||
document.querySelectorAll('.lang-btn').forEach(btn => {
|
||||
const map = { 'zh-CN': '简', 'zh-TW': '繁', 'en': 'EN' };
|
||||
btn.classList.toggle('active', btn.textContent === map[currentLang]);
|
||||
});
|
||||
}
|
||||
|
||||
function setLang(lang) {
|
||||
currentLang = lang;
|
||||
localStorage.setItem('lang', lang);
|
||||
applyLang();
|
||||
}
|
||||
|
||||
applyLang();
|
||||
</script>
|
||||
</body>
|
||||
</html>
|
||||
76
crates/secrets-mcp/templates/i18n.js
Normal file
76
crates/secrets-mcp/templates/i18n.js
Normal file
@@ -0,0 +1,76 @@
|
||||
var I18N_SHARED = {
|
||||
'zh-CN': {
|
||||
pageTitleBase: 'Secrets',
|
||||
navMcp: 'MCP',
|
||||
navEntries: '条目',
|
||||
navAudit: '审计',
|
||||
signOut: '退出',
|
||||
mobileLabelTime: '时间',
|
||||
mobileLabelAction: '动作',
|
||||
mobileLabelTarget: '目标',
|
||||
mobileLabelDetail: '详情'
|
||||
},
|
||||
'zh-TW': {
|
||||
pageTitleBase: 'Secrets',
|
||||
navMcp: 'MCP',
|
||||
navEntries: '條目',
|
||||
navAudit: '審計',
|
||||
signOut: '登出',
|
||||
mobileLabelTime: '時間',
|
||||
mobileLabelAction: '動作',
|
||||
mobileLabelTarget: '目標',
|
||||
mobileLabelDetail: '詳情'
|
||||
},
|
||||
en: {
|
||||
pageTitleBase: 'Secrets',
|
||||
navMcp: 'MCP',
|
||||
navEntries: 'Entries',
|
||||
navAudit: 'Audit',
|
||||
signOut: 'Sign out',
|
||||
mobileLabelTime: 'Time',
|
||||
mobileLabelAction: 'Action',
|
||||
mobileLabelTarget: 'Target',
|
||||
mobileLabelDetail: 'Detail'
|
||||
}
|
||||
};
|
||||
|
||||
var currentLang = localStorage.getItem('lang') || 'zh-CN';
|
||||
var I18N_PAGE = {};
|
||||
|
||||
function t(key) {
|
||||
var dict = I18N_PAGE[currentLang] || I18N_PAGE['en'] || {};
|
||||
var val = dict[key] || (I18N_SHARED[currentLang] && I18N_SHARED[currentLang][key]) || (I18N_SHARED.en && I18N_SHARED.en[key]) || key;
|
||||
return val;
|
||||
}
|
||||
|
||||
function tf(key, vars) {
|
||||
var tpl = t(key);
|
||||
return Object.keys(vars || {}).reduce(function (acc, k) {
|
||||
return acc.replace(new RegExp('\\{' + k + '\\}', 'g'), String(vars[k]));
|
||||
}, tpl);
|
||||
}
|
||||
|
||||
function applyLang() {
|
||||
document.documentElement.lang = currentLang;
|
||||
var title = t('pageTitle');
|
||||
if (title) document.title = title;
|
||||
document.querySelectorAll('[data-i18n]').forEach(function (el) {
|
||||
var key = el.getAttribute('data-i18n');
|
||||
el.textContent = t(key);
|
||||
});
|
||||
document.querySelectorAll('[data-i18n-ph]').forEach(function (el) {
|
||||
var key = el.getAttribute('data-i18n-ph');
|
||||
el.placeholder = t(key);
|
||||
});
|
||||
document.querySelectorAll('.lang-btn').forEach(function (btn) {
|
||||
var map = { 'zh-CN': '简', 'zh-TW': '繁', en: 'EN' };
|
||||
btn.classList.toggle('active', btn.textContent === map[currentLang]);
|
||||
});
|
||||
if (typeof applyPageLang === 'function') applyPageLang();
|
||||
}
|
||||
|
||||
window.setLang = function (lang) {
|
||||
currentLang = lang;
|
||||
localStorage.setItem('lang', lang);
|
||||
applyLang();
|
||||
};
|
||||
186
crates/secrets-mcp/templates/login.html
Normal file
186
crates/secrets-mcp/templates/login.html
Normal file
@@ -0,0 +1,186 @@
|
||||
<!DOCTYPE html>
|
||||
<html lang="zh-CN">
|
||||
<head>
|
||||
<meta charset="UTF-8">
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1.0">
|
||||
<meta name="robots" content="noindex, follow">
|
||||
<meta name="description" content="登录 Secrets MCP Web 控制台,安全管理跨设备加密 secrets。">
|
||||
<meta name="keywords" content="Secrets MCP,登录,OAuth,密钥管理">
|
||||
<link rel="canonical" href="{{ base_url }}/login">
|
||||
<link rel="icon" href="/favicon.svg?v={{ version }}" type="image/svg+xml">
|
||||
<title>登录 — Secrets MCP</title>
|
||||
<meta property="og:type" content="website">
|
||||
<meta property="og:url" content="{{ base_url }}/login">
|
||||
<meta property="og:title" content="登录 — Secrets MCP">
|
||||
<meta property="og:description" content="登录 Web 控制台,管理加密存储的密钥与配置。">
|
||||
<meta name="twitter:card" content="summary">
|
||||
<meta name="twitter:title" content="登录 — Secrets MCP">
|
||||
<meta name="twitter:description" content="登录 Web 控制台,管理加密存储的密钥与配置。">
|
||||
<style>
|
||||
*, *::before, *::after { box-sizing: border-box; margin: 0; padding: 0; }
|
||||
@import url('https://fonts.googleapis.com/css2?family=Inter:wght@400;500;600&display=swap');
|
||||
:root {
|
||||
--bg: #0d1117;
|
||||
--surface: #161b22;
|
||||
--border: #30363d;
|
||||
--text: #e6edf3;
|
||||
--text-muted: #8b949e;
|
||||
--accent: #58a6ff;
|
||||
--accent-hover: #79b8ff;
|
||||
--google: #4285f4;
|
||||
--danger: #f85149;
|
||||
}
|
||||
body { background: var(--bg); color: var(--text); font-family: 'Inter', sans-serif;
|
||||
min-height: 100vh; display: flex; align-items: center; justify-content: center; }
|
||||
.card {
|
||||
background: var(--surface); border: 1px solid var(--border); border-radius: 12px;
|
||||
padding: 48px 40px; width: 100%; max-width: 400px;
|
||||
box-shadow: 0 8px 32px rgba(0,0,0,0.4);
|
||||
}
|
||||
.topbar { display: flex; justify-content: space-between; align-items: flex-start; margin-bottom: 20px; gap: 12px; }
|
||||
.back-home {
|
||||
font-size: 13px; color: var(--accent); text-decoration: none; white-space: nowrap;
|
||||
}
|
||||
.back-home:hover { text-decoration: underline; }
|
||||
.lang-bar { display: flex; gap: 2px; background: rgba(255,255,255,0.04); border-radius: 6px; padding: 2px; flex-shrink: 0; }
|
||||
.lang-btn { padding: 3px 9px; border: none; background: none; color: var(--text-muted);
|
||||
font-size: 12px; cursor: pointer; border-radius: 4px; }
|
||||
.lang-btn.active { background: var(--border); color: var(--text); }
|
||||
.oauth-alert {
|
||||
display: none;
|
||||
margin-bottom: 16px; padding: 10px 12px; border-radius: 8px;
|
||||
font-size: 13px; line-height: 1.4;
|
||||
background: rgba(248, 81, 73, 0.12);
|
||||
border: 1px solid rgba(248, 81, 73, 0.35);
|
||||
color: #ffa198;
|
||||
}
|
||||
.oauth-alert.visible { display: block; }
|
||||
h1 { font-size: 22px; font-weight: 600; margin-bottom: 8px; }
|
||||
.subtitle { color: var(--text-muted); font-size: 14px; margin-bottom: 32px; }
|
||||
.btn {
|
||||
display: flex; align-items: center; justify-content: center; gap: 12px;
|
||||
width: 100%; padding: 12px 20px; border: 1px solid var(--border); border-radius: 8px;
|
||||
background: var(--surface); color: var(--text); font-size: 14px; font-weight: 500;
|
||||
cursor: pointer; text-decoration: none; transition: all 0.2s;
|
||||
}
|
||||
.btn:hover { background: var(--border); border-color: var(--text-muted); }
|
||||
.btn + .btn { margin-top: 12px; }
|
||||
.btn svg { flex-shrink: 0; }
|
||||
.footer { margin-top: 28px; text-align: center; color: var(--text-muted); font-size: 12px; }
|
||||
.footer a { color: var(--accent); text-decoration: none; }
|
||||
</style>
|
||||
</head>
|
||||
<body>
|
||||
<div class="card">
|
||||
<div class="topbar">
|
||||
<a class="back-home" href="/" data-i18n="backHome">返回首页</a>
|
||||
<div class="lang-bar">
|
||||
<button type="button" class="lang-btn" onclick="setLang('zh-CN')">简</button>
|
||||
<button type="button" class="lang-btn" onclick="setLang('zh-TW')">繁</button>
|
||||
<button type="button" class="lang-btn" onclick="setLang('en')">EN</button>
|
||||
</div>
|
||||
</div>
|
||||
<div id="oauth-alert" class="oauth-alert" role="alert"></div>
|
||||
<h1 data-i18n="title">登录</h1>
|
||||
<p class="subtitle" data-i18n="subtitle">安全管理你的跨设备 secrets。</p>
|
||||
|
||||
{% if has_google %}
|
||||
<a href="/auth/google" class="btn">
|
||||
<svg width="18" height="18" viewBox="0 0 18 18" fill="none">
|
||||
<path d="M17.64 9.2c0-.637-.057-1.251-.164-1.84H9v3.481h4.844a4.14 4.14 0 01-1.796 2.716v2.259h2.908c1.702-1.567 2.684-3.875 2.684-6.615z" fill="#4285F4"/>
|
||||
<path d="M9 18c2.43 0 4.467-.806 5.956-2.18l-2.908-2.259c-.806.54-1.837.86-3.048.86-2.344 0-4.328-1.584-5.036-3.711H.957v2.332A8.997 8.997 0 009 18z" fill="#34A853"/>
|
||||
<path d="M3.964 10.71A5.41 5.41 0 013.682 9c0-.593.102-1.17.282-1.71V4.958H.957A8.996 8.996 0 000 9c0 1.452.348 2.827.957 4.042l3.007-2.332z" fill="#FBBC05"/>
|
||||
<path d="M9 3.58c1.321 0 2.508.454 3.44 1.345l2.582-2.58C13.463.891 11.426 0 9 0A8.997 8.997 0 00.957 4.958L3.964 7.29C4.672 5.163 6.656 3.58 9 3.58z" fill="#EA4335"/>
|
||||
</svg>
|
||||
<span data-i18n="google">使用 Google 登录</span>
|
||||
</a>
|
||||
{% endif %}
|
||||
|
||||
{% if !has_google %}
|
||||
<p style="text-align:center; color: var(--text-muted); font-size: 14px;" data-i18n="noProviders">
|
||||
未配置登录方式,请联系管理员。
|
||||
</p>
|
||||
{% endif %}
|
||||
</div>
|
||||
<script>
|
||||
const T = {
|
||||
'zh-CN': {
|
||||
docTitle: '登录 — Secrets MCP',
|
||||
backHome: '返回首页',
|
||||
title: '登录',
|
||||
subtitle: '安全管理你的跨设备 secrets。',
|
||||
google: '使用 Google 登录',
|
||||
noProviders: '未配置登录方式,请联系管理员。',
|
||||
err_oauth_error: '登录失败:授权提供方返回错误,请重试。',
|
||||
err_oauth_missing_code: '登录失败:未收到授权码,请重试。',
|
||||
err_oauth_missing_state: '登录失败:缺少安全校验参数,请重试。',
|
||||
err_oauth_state: '登录失败:会话校验不匹配(可能因 Cookie 策略或服务器重启)。请返回首页再试。',
|
||||
},
|
||||
'zh-TW': {
|
||||
docTitle: '登入 — Secrets MCP',
|
||||
backHome: '返回首頁',
|
||||
title: '登入',
|
||||
subtitle: '安全管理你的跨裝置 secrets。',
|
||||
google: '使用 Google 登入',
|
||||
noProviders: '尚未設定登入方式,請聯絡管理員。',
|
||||
err_oauth_error: '登入失敗:授權方回傳錯誤,請再試一次。',
|
||||
err_oauth_missing_code: '登入失敗:未取得授權碼,請再試一次。',
|
||||
err_oauth_missing_state: '登入失敗:缺少安全校驗參數,請再試一次。',
|
||||
err_oauth_state: '登入失敗:工作階段校驗不符(可能與 Cookie 政策或伺服器重啟有關)。請回到首頁再試。',
|
||||
},
|
||||
'en': {
|
||||
docTitle: 'Sign in — Secrets MCP',
|
||||
backHome: 'Back to home',
|
||||
title: 'Sign in',
|
||||
subtitle: 'Manage your cross-device secrets securely.',
|
||||
google: 'Continue with Google',
|
||||
noProviders: 'No login providers configured. Please contact your administrator.',
|
||||
err_oauth_error: 'Sign-in failed: the identity provider returned an error. Please try again.',
|
||||
err_oauth_missing_code: 'Sign-in failed: no authorization code was returned. Please try again.',
|
||||
err_oauth_missing_state: 'Sign-in failed: missing security state. Please try again.',
|
||||
err_oauth_state: 'Sign-in failed: session state mismatch (often cookies or server restart). Open the home page and try again.',
|
||||
}
|
||||
};
|
||||
|
||||
let currentLang = localStorage.getItem('lang') || 'zh-CN';
|
||||
|
||||
function t(key) { return T[currentLang][key] || T['en'][key] || key; }
|
||||
|
||||
function showOAuthError() {
|
||||
const params = new URLSearchParams(window.location.search);
|
||||
const code = params.get('error');
|
||||
const el = document.getElementById('oauth-alert');
|
||||
if (!code || !code.startsWith('oauth_')) {
|
||||
el.classList.remove('visible');
|
||||
el.textContent = '';
|
||||
return;
|
||||
}
|
||||
const key = 'err_' + code;
|
||||
el.textContent = t(key) || t('err_oauth_error');
|
||||
el.classList.add('visible');
|
||||
}
|
||||
|
||||
function applyLang() {
|
||||
document.documentElement.lang = currentLang;
|
||||
document.title = t('docTitle');
|
||||
document.querySelectorAll('[data-i18n]').forEach(el => {
|
||||
const key = el.getAttribute('data-i18n');
|
||||
el.textContent = t(key);
|
||||
});
|
||||
document.querySelectorAll('.lang-btn').forEach(btn => {
|
||||
const map = { 'zh-CN': '简', 'zh-TW': '繁', 'en': 'EN' };
|
||||
btn.classList.toggle('active', btn.textContent === map[currentLang]);
|
||||
});
|
||||
showOAuthError();
|
||||
}
|
||||
|
||||
function setLang(lang) {
|
||||
currentLang = lang;
|
||||
localStorage.setItem('lang', lang);
|
||||
applyLang();
|
||||
}
|
||||
|
||||
applyLang();
|
||||
</script>
|
||||
</body>
|
||||
</html>
|
||||
53
deploy/.env.example
Normal file
53
deploy/.env.example
Normal file
@@ -0,0 +1,53 @@
|
||||
# Secrets MCP Server 环境变量配置
|
||||
# 复制此文件为 .env 并填写真实值
|
||||
|
||||
# ─── 数据库 ───────────────────────────────────────────────────────────
|
||||
# Web 会话(tower-sessions)与业务数据共用此库;启动时会自动 migrate 会话表,无需额外环境变量。
|
||||
SECRETS_DATABASE_URL=postgres://postgres:PASSWORD@db.refining.ltd:5432/secrets-mcp
|
||||
# 强烈建议生产使用 verify-full(至少 verify-ca)
|
||||
SECRETS_DATABASE_SSL_MODE=verify-full
|
||||
# 私有 CA 或自建链路时填写 CA 根证书路径;使用公共受信 CA 可留空
|
||||
# SECRETS_DATABASE_SSL_ROOT_CERT=/etc/secrets/pg-ca.crt
|
||||
# 当设为 prod/production 时,服务会拒绝弱 TLS 模式(prefer/disable/allow/require)
|
||||
SECRETS_ENV=production
|
||||
|
||||
# ─── 服务地址 ─────────────────────────────────────────────────────────
|
||||
# 内网监听地址(Cloudflare / Nginx 反代时填内网端口)
|
||||
SECRETS_MCP_BIND=127.0.0.1:9315
|
||||
|
||||
# 对外 HTTPS 地址(用于 OAuth 回调 URL 拼接)
|
||||
BASE_URL=https://secrets.example.com
|
||||
|
||||
# ─── Google OAuth ─────────────────────────────────────────────────────
|
||||
# Google Cloud Console → APIs & Services → Credentials
|
||||
# 授权回调 URI 须配置为:${BASE_URL}/auth/google/callback
|
||||
GOOGLE_CLIENT_ID=
|
||||
GOOGLE_CLIENT_SECRET=
|
||||
|
||||
# ─── 微信登录(暂未开放,预留)───────────────────────────────────────
|
||||
# WECHAT_APP_CLIENT_ID=
|
||||
# WECHAT_APP_CLIENT_SECRET=
|
||||
|
||||
# ─── 日志(可选)──────────────────────────────────────────────────────
|
||||
# RUST_LOG=secrets_mcp=debug
|
||||
|
||||
# ─── 数据库连接池(可选)──────────────────────────────────────────────
|
||||
# 最大连接数,默认 10
|
||||
# SECRETS_DATABASE_POOL_SIZE=10
|
||||
# 获取连接超时秒数,默认 5
|
||||
# SECRETS_DATABASE_ACQUIRE_TIMEOUT=5
|
||||
|
||||
# ─── 限流(可选)──────────────────────────────────────────────────────
|
||||
# 全局限流速率(req/s),默认 100
|
||||
# RATE_LIMIT_GLOBAL_PER_SECOND=100
|
||||
# 全局限流突发量,默认 200
|
||||
# RATE_LIMIT_GLOBAL_BURST=200
|
||||
# 单 IP 限流速率(req/s),默认 20
|
||||
# RATE_LIMIT_IP_PER_SECOND=20
|
||||
# 单 IP 限流突发量,默认 40
|
||||
# RATE_LIMIT_IP_BURST=40
|
||||
|
||||
# ─── 代理信任(可选)─────────────────────────────────────────────────
|
||||
# 设为 1/true/yes 时从 X-Forwarded-For / X-Real-IP 提取客户端 IP
|
||||
# 仅在反代环境下启用,否则客户端可伪造 IP 绕过限流
|
||||
# TRUST_PROXY=1
|
||||
92
deploy/postgres-tls-hardening.md
Normal file
92
deploy/postgres-tls-hardening.md
Normal file
@@ -0,0 +1,92 @@
|
||||
# PostgreSQL TLS Hardening Runbook
|
||||
|
||||
This runbook applies to:
|
||||
|
||||
- PostgreSQL server: `47.117.131.22` (`db.refining.ltd`)
|
||||
- `secrets-mcp` app server: `47.238.146.244` (`secrets.refining.app`)
|
||||
|
||||
## 1) Issue certificate for `db.refining.ltd` (Let's Encrypt + Cloudflare DNS-01)
|
||||
|
||||
Install `acme.sh` on the PostgreSQL server and use a Cloudflare API token with DNS edit permission for the target zone.
|
||||
|
||||
```bash
|
||||
curl https://get.acme.sh | sh -s email=ops@refining.ltd
|
||||
export CF_Token="your_cloudflare_dns_token"
|
||||
export CF_Zone_ID="your_zone_id"
|
||||
~/.acme.sh/acme.sh --issue --dns dns_cf -d db.refining.ltd --keylength ec-256
|
||||
```
|
||||
|
||||
Install cert/key into a PostgreSQL-readable path:
|
||||
|
||||
```bash
|
||||
sudo mkdir -p /etc/postgresql/tls
|
||||
sudo ~/.acme.sh/acme.sh --install-cert -d db.refining.ltd --ecc \
|
||||
--fullchain-file /etc/postgresql/tls/fullchain.pem \
|
||||
--key-file /etc/postgresql/tls/privkey.pem \
|
||||
--reloadcmd "systemctl reload postgresql || systemctl restart postgresql"
|
||||
sudo chown -R postgres:postgres /etc/postgresql/tls
|
||||
sudo chmod 600 /etc/postgresql/tls/privkey.pem
|
||||
sudo chmod 644 /etc/postgresql/tls/fullchain.pem
|
||||
```
|
||||
|
||||
## 2) Configure PostgreSQL TLS and access rules
|
||||
|
||||
In `postgresql.conf`:
|
||||
|
||||
```conf
|
||||
ssl = on
|
||||
ssl_cert_file = '/etc/postgresql/tls/fullchain.pem'
|
||||
ssl_key_file = '/etc/postgresql/tls/privkey.pem'
|
||||
```
|
||||
|
||||
In `pg_hba.conf`, allow app traffic via TLS only (example):
|
||||
|
||||
```conf
|
||||
hostssl secrets-mcp postgres 47.238.146.244/32 scram-sha-256
|
||||
```
|
||||
|
||||
Keep a safe admin path (`local` socket or restricted source CIDR) before removing old plaintext `host` rules.
|
||||
|
||||
Reload PostgreSQL:
|
||||
|
||||
```bash
|
||||
sudo systemctl reload postgresql
|
||||
```
|
||||
|
||||
## 3) Verify server-side TLS
|
||||
|
||||
```bash
|
||||
openssl s_client -starttls postgres -connect db.refining.ltd:5432 -servername db.refining.ltd
|
||||
```
|
||||
|
||||
The handshake should succeed and the certificate should match `db.refining.ltd`.
|
||||
|
||||
## 4) Update `secrets-mcp` app server env
|
||||
|
||||
Use environment values like:
|
||||
|
||||
```bash
|
||||
SECRETS_DATABASE_URL=postgres://postgres:***@db.refining.ltd:5432/secrets-mcp
|
||||
SECRETS_DATABASE_SSL_MODE=verify-full
|
||||
SECRETS_ENV=production
|
||||
```
|
||||
|
||||
If you use private CA instead of public CA, also set:
|
||||
|
||||
```bash
|
||||
SECRETS_DATABASE_SSL_ROOT_CERT=/etc/secrets/pg-ca.crt
|
||||
```
|
||||
|
||||
Restart `secrets-mcp` after updating env.
|
||||
|
||||
## 5) Verify from app server
|
||||
|
||||
Run positive and negative checks:
|
||||
|
||||
- Positive: app starts, migrations pass, dashboard + MCP API work.
|
||||
- Negative:
|
||||
- wrong hostname -> connection fails
|
||||
- wrong CA file -> connection fails
|
||||
- disable TLS on DB -> connection fails
|
||||
|
||||
This ensures no silent downgrade to weak TLS in production.
|
||||
27
deploy/secrets-mcp.service
Normal file
27
deploy/secrets-mcp.service
Normal file
@@ -0,0 +1,27 @@
|
||||
[Unit]
|
||||
Description=Secrets MCP Server
|
||||
After=network.target
|
||||
Wants=network-online.target
|
||||
|
||||
[Service]
|
||||
Type=simple
|
||||
User=secrets-mcp
|
||||
Group=secrets-mcp
|
||||
WorkingDirectory=/opt/secrets-mcp
|
||||
EnvironmentFile=/opt/secrets-mcp/.env
|
||||
ExecStart=/opt/secrets-mcp/secrets-mcp
|
||||
Restart=always
|
||||
RestartSec=5
|
||||
StandardOutput=journal
|
||||
StandardError=journal
|
||||
SyslogIdentifier=secrets-mcp
|
||||
|
||||
# 安全加固
|
||||
NoNewPrivileges=yes
|
||||
ProtectSystem=strict
|
||||
ProtectHome=yes
|
||||
ReadWritePaths=/opt/secrets-mcp
|
||||
PrivateTmp=yes
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
3
rust-toolchain.toml
Normal file
3
rust-toolchain.toml
Normal file
@@ -0,0 +1,3 @@
|
||||
[toolchain]
|
||||
channel = "1.94.0"
|
||||
components = ["rustfmt", "clippy"]
|
||||
@@ -5,19 +5,20 @@ set -euo pipefail
|
||||
repo_root="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)"
|
||||
cd "$repo_root"
|
||||
|
||||
version="$(grep -m1 '^version' Cargo.toml | sed 's/.*"\(.*\)".*/\1/')"
|
||||
tag="secrets-${version}"
|
||||
version="$(grep -m1 '^version' crates/secrets-mcp/Cargo.toml | sed 's/.*"\(.*\)".*/\1/')"
|
||||
tag="secrets-mcp-${version}"
|
||||
|
||||
echo "==> 当前版本: ${version}"
|
||||
echo "==> 当前 secrets-mcp 版本: ${version}"
|
||||
echo "==> 检查是否已存在 tag: ${tag}"
|
||||
|
||||
if git rev-parse "refs/tags/${tag}" >/dev/null 2>&1; then
|
||||
echo "错误: 已存在 tag ${tag}"
|
||||
echo "请先 bump Cargo.toml 中的 version,再执行 cargo build 同步 Cargo.lock。"
|
||||
exit 1
|
||||
if jj log --no-graph --revisions "tag(${tag})" --limit 1 >/dev/null 2>&1; then
|
||||
echo "提示: 已存在 tag ${tag},将按重复构建处理,不阻断检查。"
|
||||
echo "如需创建新的发布版本,请先 bump crates/secrets-mcp/Cargo.toml 中的 version。"
|
||||
else
|
||||
echo "==> 未发现重复 tag,将创建新版本"
|
||||
fi
|
||||
|
||||
echo "==> 未发现重复 tag,开始执行检查"
|
||||
echo "==> 开始执行检查"
|
||||
cargo fmt -- --check
|
||||
cargo clippy --locked -- -D warnings
|
||||
cargo test --locked
|
||||
|
||||
@@ -4,17 +4,24 @@
|
||||
# 参考: .gitea/workflows/secrets.yml
|
||||
#
|
||||
# 所需配置:
|
||||
# - secrets.RELEASE_TOKEN (必选) Release 上传用,值为 Gitea PAT
|
||||
# - secrets.RELEASE_TOKEN (可选,推荐) Gitea PAT;未配置则工作流跳过 Release 创建与产物上传
|
||||
# - vars.WEBHOOK_URL (可选) 飞书通知
|
||||
# - vars.DEPLOY_HOST (可选) 部署目标 SSH 主机(IP 或域名)
|
||||
# - vars.DEPLOY_USER (可选) SSH 用户名
|
||||
# - secrets.DEPLOY_SSH_KEY (可选) SSH 私钥 PEM 全文(原始字符,含 BEGIN/END 行);通过 DEPLOY_SSH_KEY_FILE 写入 API
|
||||
#
|
||||
# 注意:
|
||||
# - Gitea 不允许 secret/variable 名以 GITEA_ 或 GITHUB_ 开头,故使用 RELEASE_TOKEN
|
||||
# - Secret/Variable 的 data/value 字段需传入原始值,不要使用 base64 编码
|
||||
# - Gitea Actions 的 secrets(API 的 data 字段,及网页里粘贴的值)必须是未经 base64 的原始值。
|
||||
# 若事先 base64 再写入,工作流里拿到的仍是「一串 base64 文本」,SSH/OpenSSH 无法识别,部署会失败。
|
||||
# DEPLOY_SSH_KEY 须与 .pem 文件内容一致:本脚本用 jq --rawfile 按原文上传。
|
||||
# - Variables 的 value 字段同样为原始字符串,不要 base64。
|
||||
#
|
||||
# 用法:
|
||||
# 1. 从 ~/.config/gitea/config.env 读取 GITEA_URL, GITEA_TOKEN, GITEA_WEBHOOK_URL
|
||||
# 2. 或通过环境变量覆盖: GITEA_TOKEN(作为 RELEASE_TOKEN 的值), WEBHOOK_URL
|
||||
# 3. 或使用 secrets CLI 获取: 需 DATABASE_URL,从 refining/service gitea 读取
|
||||
# 2. 或通过环境变量覆盖: GITEA_TOKEN(作为 RELEASE_TOKEN 的值), WEBHOOK_URL,
|
||||
# DEPLOY_HOST, DEPLOY_USER, DEPLOY_SSH_KEY_FILE(部署到 ECS)
|
||||
# 3. 凭据勿用 base64;部署私钥路径见 DEPLOY_SSH_KEY_FILE
|
||||
#
|
||||
|
||||
set -e
|
||||
@@ -23,26 +30,41 @@ OWNER="refining"
|
||||
REPO="secrets"
|
||||
|
||||
# 解析参数
|
||||
USE_SECRETS_CLI=false
|
||||
while [[ $# -gt 0 ]]; do
|
||||
case $1 in
|
||||
--from-secrets) USE_SECRETS_CLI=true; shift ;;
|
||||
--from-secrets)
|
||||
echo "❌ --from-secrets 尚未实现,请使用 ~/.config/gitea/config.env 或环境变量" >&2
|
||||
exit 1
|
||||
;;
|
||||
-h|--help)
|
||||
echo "用法: $0 [--from-secrets]"
|
||||
echo "用法: $0"
|
||||
echo ""
|
||||
echo " --from-secrets 从 secrets CLI (refining/service gitea) 获取 token 和 webhook_url"
|
||||
echo " 否则从 ~/.config/gitea/config.env 读取"
|
||||
echo "从 ~/.config/gitea/config.env 读取,或由环境变量覆盖。"
|
||||
echo ""
|
||||
echo "环境变量覆盖:"
|
||||
echo " GITEA_URL Gitea 实例地址"
|
||||
echo " GITEA_TOKEN 用于 Release 上传的 PAT (创建 RELEASE_TOKEN secret)"
|
||||
echo " WEBHOOK_URL 飞书 Webhook URL (创建 variable,可选)"
|
||||
echo "环境变量:"
|
||||
echo " GITEA_URL Gitea 实例根地址(可误带尾部 /api/v1,脚本会规范化后拼接)"
|
||||
echo " GITEA_TOKEN 用于 Release 的 PAT → secrets.RELEASE_TOKEN"
|
||||
echo " WEBHOOK_URL 或 GITEA_WEBHOOK_URL → vars.WEBHOOK_URL(可选)"
|
||||
echo " DEPLOY_HOST 部署 SSH 主机(可选,须与下面两项同时设置)"
|
||||
echo " DEPLOY_USER 部署 SSH 用户"
|
||||
echo " DEPLOY_SSH_KEY_FILE 本地 PEM 路径 → secrets.DEPLOY_SSH_KEY(原文上传,勿 base64)"
|
||||
exit 0
|
||||
;;
|
||||
*) shift ;;
|
||||
*)
|
||||
echo "❌ 未知参数: $1" >&2
|
||||
echo " 使用 $0 --help 查看用法" >&2
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
for cmd in curl jq; do
|
||||
if ! command -v "$cmd" &>/dev/null; then
|
||||
echo "❌ 未找到命令: $cmd(本脚本依赖 curl 与 jq)" >&2
|
||||
exit 1
|
||||
fi
|
||||
done
|
||||
|
||||
# 加载配置
|
||||
load_config() {
|
||||
local config="$HOME/.config/gitea/config.env"
|
||||
@@ -52,26 +74,6 @@ load_config() {
|
||||
fi
|
||||
}
|
||||
|
||||
# 从 secrets CLI 获取 gitea 凭据
|
||||
fetch_from_secrets() {
|
||||
if ! command -v secrets &>/dev/null; then
|
||||
echo "❌ secrets CLI 未找到,请先构建: cargo build --release" >&2
|
||||
return 1
|
||||
fi
|
||||
# 输出 JSON 格式便于解析;需要 --show-secrets
|
||||
# secrets 当前无 JSON 输出,用简单解析
|
||||
local out
|
||||
out=$(secrets search -n refining --kind service -q gitea --show-secrets 2>/dev/null || true)
|
||||
if [[ -z "$out" ]]; then
|
||||
echo "❌ 未找到 refining/service gitea 记录" >&2
|
||||
return 1
|
||||
fi
|
||||
# 简化:从 metadata 和 secrets 中提取,实际格式需根据 search 输出调整
|
||||
# 此处仅作占位,实际解析较复杂;建议用户优先用 config.env
|
||||
echo "⚠️ --from-secrets 暂不支持自动解析,请使用 config.env 或环境变量" >&2
|
||||
return 1
|
||||
}
|
||||
|
||||
load_config
|
||||
|
||||
# 优先使用环境变量
|
||||
@@ -86,18 +88,17 @@ if [[ -z "$GITEA_URL" ]]; then
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# 去掉 URL 尾部斜杠
|
||||
# 规范为实例根 URL:去尾部斜杠,并去掉重复的 .../api/v1 后缀(避免拼成 .../api/v1/api/v1)
|
||||
GITEA_URL="${GITEA_URL%/}"
|
||||
# 确保使用 /api/v1 基础路径(若用户只写了根 URL)
|
||||
[[ "$GITEA_URL" != *"/api/v1"* ]] || true
|
||||
while [[ "$GITEA_URL" == */api/v1 ]]; do
|
||||
GITEA_URL="${GITEA_URL%/api/v1}"
|
||||
GITEA_URL="${GITEA_URL%/}"
|
||||
done
|
||||
|
||||
API_BASE="${GITEA_URL}/api/v1"
|
||||
|
||||
# 获取 GITEA_TOKEN(作为 workflow 中 secrets.RELEASE_TOKEN 的值)
|
||||
if [[ -z "$GITEA_TOKEN" ]]; then
|
||||
if $USE_SECRETS_CLI; then
|
||||
fetch_from_secrets || exit 1
|
||||
fi
|
||||
echo "❌ GITEA_TOKEN 未配置"
|
||||
echo " 在 ~/.config/gitea/config.env 中设置,或 export GITEA_TOKEN=xxx" >&2
|
||||
echo " Token 需具备 repo 写权限(创建 Release、上传附件)" >&2
|
||||
@@ -109,8 +110,7 @@ echo "配置 Gitea Actions: $OWNER/$REPO"
|
||||
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
|
||||
echo ""
|
||||
|
||||
# 1. 创建 Secret: RELEASE_TOKEN
|
||||
# 注意: Gitea Actions API 的 data 字段需传入原始值,不要使用 base64 编码
|
||||
# 1. 创建 Secret: RELEASE_TOKEN(data = PAT 原文,勿 base64)
|
||||
echo "1. 创建 Secret: RELEASE_TOKEN"
|
||||
secret_payload=$(jq -n --arg t "$GITEA_TOKEN" '{data: $t}')
|
||||
resp=$(curl -s -w "\n%{http_code}" -X PUT \
|
||||
@@ -129,8 +129,7 @@ else
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# 2. 创建/更新 Variable: WEBHOOK_URL(可选)
|
||||
# 注意: Secret 和 Variable 均使用原始值,不要 base64 编码
|
||||
# 2. 创建/更新 Variable: WEBHOOK_URL(可选,value 为原始 URL 字符串,勿 base64)
|
||||
WEBHOOK_VALUE="${WEBHOOK_URL:-$GITEA_WEBHOOK_URL}"
|
||||
if [[ -n "$WEBHOOK_VALUE" ]]; then
|
||||
echo ""
|
||||
@@ -168,6 +167,68 @@ else
|
||||
echo " 飞书通知将不可用;如需可后续在仓库 Settings → Variables 中添加"
|
||||
fi
|
||||
|
||||
# 3. 部署用 Variable + Secret(与 .gitea/workflows/secrets.yml 中 deploy-mcp 一致)
|
||||
upsert_repo_variable() {
|
||||
local var_name="$1" var_value="$2"
|
||||
local var_payload http_code body resp
|
||||
var_payload=$(jq -n --arg v "$var_value" '{value: $v}')
|
||||
resp=$(curl -s -w "\n%{http_code}" -X POST \
|
||||
-H "Authorization: token $GITEA_TOKEN" \
|
||||
-H "Content-Type: application/json" \
|
||||
-d "$var_payload" \
|
||||
"${API_BASE}/repos/${OWNER}/${REPO}/actions/variables/${var_name}")
|
||||
http_code=$(echo "$resp" | tail -n1)
|
||||
if [[ "$http_code" == "200" || "$http_code" == "201" || "$http_code" == "204" ]]; then
|
||||
return 0
|
||||
fi
|
||||
if [[ "$http_code" == "409" ]]; then
|
||||
resp=$(curl -s -w "\n%{http_code}" -X PUT \
|
||||
-H "Authorization: token $GITEA_TOKEN" \
|
||||
-H "Content-Type: application/json" \
|
||||
-d "$var_payload" \
|
||||
"${API_BASE}/repos/${OWNER}/${REPO}/actions/variables/${var_name}")
|
||||
http_code=$(echo "$resp" | tail -n1)
|
||||
[[ "$http_code" == "200" || "$http_code" == "204" ]]
|
||||
return
|
||||
fi
|
||||
body=$(echo "$resp" | sed '$d')
|
||||
echo " ❌ 变量 ${var_name} 失败 (HTTP $http_code)" >&2
|
||||
echo "$body" | jq -r '.message // .' 2>/dev/null || echo "$body" >&2
|
||||
return 1
|
||||
}
|
||||
|
||||
if [[ -n "$DEPLOY_HOST" && -n "$DEPLOY_USER" && -n "$DEPLOY_SSH_KEY_FILE" ]]; then
|
||||
echo ""
|
||||
echo "3. 部署目标: vars.DEPLOY_HOST / vars.DEPLOY_USER + secrets.DEPLOY_SSH_KEY"
|
||||
if [[ ! -f "$DEPLOY_SSH_KEY_FILE" ]]; then
|
||||
echo " ❌ DEPLOY_SSH_KEY_FILE 不是文件: $DEPLOY_SSH_KEY_FILE" >&2
|
||||
exit 1
|
||||
fi
|
||||
upsert_repo_variable DEPLOY_HOST "$DEPLOY_HOST" || exit 1
|
||||
echo " ✓ DEPLOY_HOST"
|
||||
upsert_repo_variable DEPLOY_USER "$DEPLOY_USER" || exit 1
|
||||
echo " ✓ DEPLOY_USER"
|
||||
# PEM 原文写入 secret.data;勿对文件先做 base64,否则 runner 侧 ssh 无法解析密钥
|
||||
secret_payload=$(jq -n --rawfile k "$DEPLOY_SSH_KEY_FILE" '{data: $k}')
|
||||
resp=$(curl -s -w "\n%{http_code}" -X PUT \
|
||||
-H "Authorization: token $GITEA_TOKEN" \
|
||||
-H "Content-Type: application/json" \
|
||||
-d "$secret_payload" \
|
||||
"${API_BASE}/repos/${OWNER}/${REPO}/actions/secrets/DEPLOY_SSH_KEY")
|
||||
http_code=$(echo "$resp" | tail -n1)
|
||||
body=$(echo "$resp" | sed '$d')
|
||||
if [[ "$http_code" == "200" || "$http_code" == "201" || "$http_code" == "204" ]]; then
|
||||
echo " ✓ DEPLOY_SSH_KEY"
|
||||
else
|
||||
echo " ❌ DEPLOY_SSH_KEY 失败 (HTTP $http_code)" >&2
|
||||
echo "$body" | jq -r '.message // .' 2>/dev/null || echo "$body" >&2
|
||||
exit 1
|
||||
fi
|
||||
else
|
||||
echo ""
|
||||
echo "3. 跳过部署配置(需同时设置 DEPLOY_HOST、DEPLOY_USER、DEPLOY_SSH_KEY_FILE)"
|
||||
fi
|
||||
|
||||
echo ""
|
||||
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
|
||||
echo "✓ 配置完成"
|
||||
@@ -176,6 +237,7 @@ echo ""
|
||||
echo "Workflow 将使用:"
|
||||
echo " - secrets.RELEASE_TOKEN 创建 Release 并上传二进制"
|
||||
echo " - vars.WEBHOOK_URL 发送飞书通知(如已配置)"
|
||||
echo " - vars.DEPLOY_* / secrets.DEPLOY_SSH_KEY deploy-mcp(如已配置)"
|
||||
echo ""
|
||||
echo "推送代码触发构建:"
|
||||
echo " git push origin main"
|
||||
|
||||
37
src/audit.rs
37
src/audit.rs
@@ -1,37 +0,0 @@
|
||||
use serde_json::Value;
|
||||
use sqlx::{Postgres, Transaction};
|
||||
|
||||
/// Return the current OS user as the audit actor (falls back to empty string).
|
||||
pub fn current_actor() -> String {
|
||||
std::env::var("USER").unwrap_or_default()
|
||||
}
|
||||
|
||||
/// Write an audit entry within an existing transaction.
|
||||
pub async fn log_tx(
|
||||
tx: &mut Transaction<'_, Postgres>,
|
||||
action: &str,
|
||||
namespace: &str,
|
||||
kind: &str,
|
||||
name: &str,
|
||||
detail: Value,
|
||||
) {
|
||||
let actor = current_actor();
|
||||
let result: Result<_, sqlx::Error> = sqlx::query(
|
||||
"INSERT INTO audit_log (action, namespace, kind, name, detail, actor) \
|
||||
VALUES ($1, $2, $3, $4, $5, $6)",
|
||||
)
|
||||
.bind(action)
|
||||
.bind(namespace)
|
||||
.bind(kind)
|
||||
.bind(name)
|
||||
.bind(&detail)
|
||||
.bind(&actor)
|
||||
.execute(&mut **tx)
|
||||
.await;
|
||||
|
||||
if let Err(e) = result {
|
||||
tracing::warn!(error = %e, "failed to write audit log");
|
||||
} else {
|
||||
tracing::debug!(action, namespace, kind, name, actor, "audit logged");
|
||||
}
|
||||
}
|
||||
@@ -1,459 +0,0 @@
|
||||
use anyhow::Result;
|
||||
use serde_json::{Map, Value, json};
|
||||
use sqlx::PgPool;
|
||||
use std::fs;
|
||||
|
||||
use crate::crypto;
|
||||
use crate::db;
|
||||
use crate::models::EntryRow;
|
||||
use crate::output::{OutputMode, print_json};
|
||||
|
||||
// ── Key/value parsing helpers (shared with update.rs) ───────────────────────
|
||||
|
||||
/// Parse secret / metadata entries into a nested key path and JSON value.
|
||||
/// - `key=value` → stores the literal string `value`
|
||||
/// - `key:=<json>` → parses `<json>` as a typed JSON value
|
||||
/// - `key=@file` → reads the file content as a string
|
||||
/// - `a:b=value` → writes nested fields: `{ "a": { "b": "value" } }`
|
||||
/// - `a:b@./file.txt` → shorthand for nested file reads without manual JSON escaping
|
||||
pub(crate) fn parse_kv(entry: &str) -> Result<(Vec<String>, Value)> {
|
||||
// Typed JSON form: key:=<json>
|
||||
if let Some((key, json_str)) = entry.split_once(":=") {
|
||||
let val: Value = serde_json::from_str(json_str).map_err(|e| {
|
||||
anyhow::anyhow!(
|
||||
"Invalid JSON value for key '{}': {} (use key=value for plain strings)",
|
||||
key,
|
||||
e
|
||||
)
|
||||
})?;
|
||||
return Ok((parse_key_path(key)?, val));
|
||||
}
|
||||
|
||||
// Plain string form: key=value or key=@file
|
||||
if let Some((key, raw_val)) = entry.split_once('=') {
|
||||
let value = if let Some(path) = raw_val.strip_prefix('@') {
|
||||
fs::read_to_string(path)
|
||||
.map_err(|e| anyhow::anyhow!("Failed to read file '{}': {}", path, e))?
|
||||
} else {
|
||||
raw_val.to_string()
|
||||
};
|
||||
|
||||
return Ok((parse_key_path(key)?, Value::String(value)));
|
||||
}
|
||||
|
||||
// Shorthand file form: nested:key@file
|
||||
if let Some((key, path)) = entry.split_once('@') {
|
||||
let value = fs::read_to_string(path)
|
||||
.map_err(|e| anyhow::anyhow!("Failed to read file '{}': {}", path, e))?;
|
||||
return Ok((parse_key_path(key)?, Value::String(value)));
|
||||
}
|
||||
|
||||
anyhow::bail!(
|
||||
"Invalid format '{}'. Expected: key=value, key=@file, nested:key@file, or key:=<json>",
|
||||
entry
|
||||
)
|
||||
}
|
||||
|
||||
pub(crate) fn build_json(entries: &[String]) -> Result<Value> {
|
||||
let mut map = Map::new();
|
||||
for entry in entries {
|
||||
let (path, value) = parse_kv(entry)?;
|
||||
insert_path(&mut map, &path, value)?;
|
||||
}
|
||||
Ok(Value::Object(map))
|
||||
}
|
||||
|
||||
pub(crate) fn key_path_to_string(path: &[String]) -> String {
|
||||
path.join(":")
|
||||
}
|
||||
|
||||
pub(crate) fn collect_key_paths(entries: &[String]) -> Result<Vec<String>> {
|
||||
entries
|
||||
.iter()
|
||||
.map(|entry| parse_kv(entry).map(|(path, _)| key_path_to_string(&path)))
|
||||
.collect()
|
||||
}
|
||||
|
||||
pub(crate) fn collect_field_paths(entries: &[String]) -> Result<Vec<String>> {
|
||||
entries
|
||||
.iter()
|
||||
.map(|entry| parse_key_path(entry).map(|path| key_path_to_string(&path)))
|
||||
.collect()
|
||||
}
|
||||
|
||||
pub(crate) fn parse_key_path(key: &str) -> Result<Vec<String>> {
|
||||
let path: Vec<String> = key
|
||||
.split(':')
|
||||
.map(str::trim)
|
||||
.map(ToOwned::to_owned)
|
||||
.collect();
|
||||
|
||||
if path.is_empty() || path.iter().any(|part| part.is_empty()) {
|
||||
anyhow::bail!(
|
||||
"Invalid key path '{}'. Use non-empty segments like 'credentials:content'.",
|
||||
key
|
||||
);
|
||||
}
|
||||
|
||||
Ok(path)
|
||||
}
|
||||
|
||||
pub(crate) fn insert_path(
|
||||
map: &mut Map<String, Value>,
|
||||
path: &[String],
|
||||
value: Value,
|
||||
) -> Result<()> {
|
||||
if path.is_empty() {
|
||||
anyhow::bail!("Key path cannot be empty");
|
||||
}
|
||||
|
||||
if path.len() == 1 {
|
||||
map.insert(path[0].clone(), value);
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
let head = path[0].clone();
|
||||
let tail = &path[1..];
|
||||
|
||||
match map.entry(head.clone()) {
|
||||
serde_json::map::Entry::Vacant(entry) => {
|
||||
let mut child = Map::new();
|
||||
insert_path(&mut child, tail, value)?;
|
||||
entry.insert(Value::Object(child));
|
||||
}
|
||||
serde_json::map::Entry::Occupied(mut entry) => match entry.get_mut() {
|
||||
Value::Object(child) => insert_path(child, tail, value)?,
|
||||
_ => {
|
||||
anyhow::bail!(
|
||||
"Cannot set nested key '{}' because '{}' is already a non-object value",
|
||||
key_path_to_string(path),
|
||||
head
|
||||
);
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub(crate) fn remove_path(map: &mut Map<String, Value>, path: &[String]) -> Result<bool> {
|
||||
if path.is_empty() {
|
||||
anyhow::bail!("Key path cannot be empty");
|
||||
}
|
||||
|
||||
if path.len() == 1 {
|
||||
return Ok(map.remove(&path[0]).is_some());
|
||||
}
|
||||
|
||||
let Some(value) = map.get_mut(&path[0]) else {
|
||||
return Ok(false);
|
||||
};
|
||||
|
||||
let Value::Object(child) = value else {
|
||||
return Ok(false);
|
||||
};
|
||||
|
||||
let removed = remove_path(child, &path[1..])?;
|
||||
if child.is_empty() {
|
||||
map.remove(&path[0]);
|
||||
}
|
||||
|
||||
Ok(removed)
|
||||
}
|
||||
|
||||
/// Flatten a (potentially nested) JSON object into dot-separated field entries.
|
||||
/// e.g. `{"credentials": {"type": "ssh", "content": "..."}}` →
|
||||
/// `[("credentials.type", "ssh"), ("credentials.content", "...")]`
|
||||
/// Top-level non-object values are emitted directly.
|
||||
pub(crate) fn flatten_json_fields(prefix: &str, value: &Value) -> Vec<(String, Value)> {
|
||||
match value {
|
||||
Value::Object(map) => {
|
||||
let mut out = Vec::new();
|
||||
for (k, v) in map {
|
||||
let full_key = if prefix.is_empty() {
|
||||
k.clone()
|
||||
} else {
|
||||
format!("{}.{}", prefix, k)
|
||||
};
|
||||
out.extend(flatten_json_fields(&full_key, v));
|
||||
}
|
||||
out
|
||||
}
|
||||
other => vec![(prefix.to_string(), other.clone())],
|
||||
}
|
||||
}
|
||||
|
||||
// ── Add command ──────────────────────────────────────────────────────────────
|
||||
|
||||
pub struct AddArgs<'a> {
|
||||
pub namespace: &'a str,
|
||||
pub kind: &'a str,
|
||||
pub name: &'a str,
|
||||
pub tags: &'a [String],
|
||||
pub meta_entries: &'a [String],
|
||||
pub secret_entries: &'a [String],
|
||||
pub output: OutputMode,
|
||||
}
|
||||
|
||||
pub async fn run(pool: &PgPool, args: AddArgs<'_>, master_key: &[u8; 32]) -> Result<()> {
|
||||
let metadata = build_json(args.meta_entries)?;
|
||||
let secret_json = build_json(args.secret_entries)?;
|
||||
|
||||
tracing::debug!(args.namespace, args.kind, args.name, "upserting entry");
|
||||
|
||||
let meta_keys = collect_key_paths(args.meta_entries)?;
|
||||
let secret_keys = collect_key_paths(args.secret_entries)?;
|
||||
|
||||
let mut tx = pool.begin().await?;
|
||||
|
||||
// Upsert the entry row (tags + metadata).
|
||||
let existing: Option<EntryRow> = sqlx::query_as(
|
||||
"SELECT id, version, tags, metadata FROM entries \
|
||||
WHERE namespace = $1 AND kind = $2 AND name = $3",
|
||||
)
|
||||
.bind(args.namespace)
|
||||
.bind(args.kind)
|
||||
.bind(args.name)
|
||||
.fetch_optional(&mut *tx)
|
||||
.await?;
|
||||
|
||||
// Snapshot the current entry state before overwriting.
|
||||
if let Some(ref ex) = existing
|
||||
&& let Err(e) = db::snapshot_entry_history(
|
||||
&mut tx,
|
||||
db::EntrySnapshotParams {
|
||||
entry_id: ex.id,
|
||||
namespace: args.namespace,
|
||||
kind: args.kind,
|
||||
name: args.name,
|
||||
version: ex.version,
|
||||
action: "add",
|
||||
tags: &ex.tags,
|
||||
metadata: &ex.metadata,
|
||||
},
|
||||
)
|
||||
.await
|
||||
{
|
||||
tracing::warn!(error = %e, "failed to snapshot entry history before upsert");
|
||||
}
|
||||
|
||||
let entry_id: uuid::Uuid = sqlx::query_scalar(
|
||||
r#"
|
||||
INSERT INTO entries (namespace, kind, name, tags, metadata, version, updated_at)
|
||||
VALUES ($1, $2, $3, $4, $5, 1, NOW())
|
||||
ON CONFLICT (namespace, kind, name)
|
||||
DO UPDATE SET
|
||||
tags = EXCLUDED.tags,
|
||||
metadata = EXCLUDED.metadata,
|
||||
version = entries.version + 1,
|
||||
updated_at = NOW()
|
||||
RETURNING id
|
||||
"#,
|
||||
)
|
||||
.bind(args.namespace)
|
||||
.bind(args.kind)
|
||||
.bind(args.name)
|
||||
.bind(args.tags)
|
||||
.bind(&metadata)
|
||||
.fetch_one(&mut *tx)
|
||||
.await?;
|
||||
|
||||
let new_entry_version: i64 = sqlx::query_scalar("SELECT version FROM entries WHERE id = $1")
|
||||
.bind(entry_id)
|
||||
.fetch_one(&mut *tx)
|
||||
.await?;
|
||||
|
||||
// Snapshot existing secret fields before replacing.
|
||||
if existing.is_some() {
|
||||
#[derive(sqlx::FromRow)]
|
||||
struct ExistingField {
|
||||
id: uuid::Uuid,
|
||||
field_name: String,
|
||||
encrypted: Vec<u8>,
|
||||
}
|
||||
let existing_fields: Vec<ExistingField> = sqlx::query_as(
|
||||
"SELECT id, field_name, encrypted \
|
||||
FROM secrets WHERE entry_id = $1",
|
||||
)
|
||||
.bind(entry_id)
|
||||
.fetch_all(&mut *tx)
|
||||
.await?;
|
||||
|
||||
for f in &existing_fields {
|
||||
if let Err(e) = db::snapshot_secret_history(
|
||||
&mut tx,
|
||||
db::SecretSnapshotParams {
|
||||
entry_id,
|
||||
secret_id: f.id,
|
||||
entry_version: new_entry_version - 1,
|
||||
field_name: &f.field_name,
|
||||
encrypted: &f.encrypted,
|
||||
action: "add",
|
||||
},
|
||||
)
|
||||
.await
|
||||
{
|
||||
tracing::warn!(error = %e, "failed to snapshot secret field history");
|
||||
}
|
||||
}
|
||||
|
||||
// Delete existing secret fields so we can re-insert the full set.
|
||||
sqlx::query("DELETE FROM secrets WHERE entry_id = $1")
|
||||
.bind(entry_id)
|
||||
.execute(&mut *tx)
|
||||
.await?;
|
||||
}
|
||||
|
||||
// Insert new secret fields.
|
||||
let flat_fields = flatten_json_fields("", &secret_json);
|
||||
for (field_name, field_value) in &flat_fields {
|
||||
let encrypted = crypto::encrypt_json(master_key, field_value)?;
|
||||
|
||||
sqlx::query(
|
||||
"INSERT INTO secrets (entry_id, field_name, encrypted) \
|
||||
VALUES ($1, $2, $3)",
|
||||
)
|
||||
.bind(entry_id)
|
||||
.bind(field_name)
|
||||
.bind(&encrypted)
|
||||
.execute(&mut *tx)
|
||||
.await?;
|
||||
}
|
||||
|
||||
crate::audit::log_tx(
|
||||
&mut tx,
|
||||
"add",
|
||||
args.namespace,
|
||||
args.kind,
|
||||
args.name,
|
||||
json!({
|
||||
"tags": args.tags,
|
||||
"meta_keys": meta_keys,
|
||||
"secret_keys": secret_keys,
|
||||
}),
|
||||
)
|
||||
.await;
|
||||
|
||||
tx.commit().await?;
|
||||
|
||||
let result_json = json!({
|
||||
"action": "added",
|
||||
"namespace": args.namespace,
|
||||
"kind": args.kind,
|
||||
"name": args.name,
|
||||
"tags": args.tags,
|
||||
"meta_keys": meta_keys,
|
||||
"secret_keys": secret_keys,
|
||||
});
|
||||
|
||||
match args.output {
|
||||
OutputMode::Json | OutputMode::JsonCompact => {
|
||||
print_json(&result_json, &args.output)?;
|
||||
}
|
||||
_ => {
|
||||
println!("Added: [{}/{}] {}", args.namespace, args.kind, args.name);
|
||||
if !args.tags.is_empty() {
|
||||
println!(" tags: {}", args.tags.join(", "));
|
||||
}
|
||||
if !args.meta_entries.is_empty() {
|
||||
println!(" metadata: {}", meta_keys.join(", "));
|
||||
}
|
||||
if !args.secret_entries.is_empty() {
|
||||
println!(" secrets: {}", secret_keys.join(", "));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::{build_json, flatten_json_fields, key_path_to_string, parse_kv, remove_path};
|
||||
use serde_json::Value;
|
||||
use std::fs;
|
||||
use std::path::PathBuf;
|
||||
use std::time::{SystemTime, UNIX_EPOCH};
|
||||
|
||||
fn temp_file_path(name: &str) -> PathBuf {
|
||||
let nanos = SystemTime::now()
|
||||
.duration_since(UNIX_EPOCH)
|
||||
.expect("clock should be after unix epoch")
|
||||
.as_nanos();
|
||||
std::env::temp_dir().join(format!("secrets-{name}-{nanos}.txt"))
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn parse_nested_file_shorthand() {
|
||||
let path = temp_file_path("ssh-key");
|
||||
fs::write(&path, "line1\nline2\n").expect("should write temp file");
|
||||
|
||||
let entry = format!("credentials:content@{}", path.display());
|
||||
let (path_parts, value) = parse_kv(&entry).expect("should parse nested file shorthand");
|
||||
|
||||
assert_eq!(key_path_to_string(&path_parts), "credentials:content");
|
||||
assert_eq!(value, serde_json::Value::String("line1\nline2\n".into()));
|
||||
|
||||
fs::remove_file(path).expect("should remove temp file");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn build_nested_json_from_mixed_entries() {
|
||||
let payload = vec![
|
||||
"credentials:type=ssh".to_string(),
|
||||
"credentials:enabled:=true".to_string(),
|
||||
"username=root".to_string(),
|
||||
];
|
||||
|
||||
let value = build_json(&payload).expect("should build nested json");
|
||||
|
||||
assert_eq!(
|
||||
value,
|
||||
serde_json::json!({
|
||||
"credentials": {
|
||||
"type": "ssh",
|
||||
"enabled": true
|
||||
},
|
||||
"username": "root"
|
||||
})
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn remove_nested_path_prunes_empty_parents() {
|
||||
let mut value = serde_json::json!({
|
||||
"credentials": {
|
||||
"content": "pem-data"
|
||||
},
|
||||
"username": "root"
|
||||
});
|
||||
|
||||
let map = match &mut value {
|
||||
Value::Object(map) => map,
|
||||
_ => panic!("expected object"),
|
||||
};
|
||||
|
||||
let removed = remove_path(map, &["credentials".to_string(), "content".to_string()])
|
||||
.expect("should remove nested field");
|
||||
|
||||
assert!(removed);
|
||||
assert_eq!(value, serde_json::json!({ "username": "root" }));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn flatten_json_fields_nested() {
|
||||
let v = serde_json::json!({
|
||||
"username": "root",
|
||||
"credentials": {
|
||||
"type": "ssh",
|
||||
"content": "pem-data"
|
||||
}
|
||||
});
|
||||
let mut fields = flatten_json_fields("", &v);
|
||||
fields.sort_by(|a, b| a.0.cmp(&b.0));
|
||||
|
||||
assert_eq!(fields[0].0, "credentials.content");
|
||||
assert_eq!(fields[1].0, "credentials.type");
|
||||
assert_eq!(fields[2].0, "username");
|
||||
}
|
||||
}
|
||||
@@ -1,55 +0,0 @@
|
||||
use crate::config::{self, Config, config_path};
|
||||
use anyhow::Result;
|
||||
|
||||
pub async fn run(action: crate::ConfigAction) -> Result<()> {
|
||||
match action {
|
||||
crate::ConfigAction::SetDb { url } => {
|
||||
// Verify connection before writing config
|
||||
let pool = crate::db::create_pool(&url)
|
||||
.await
|
||||
.map_err(|e| anyhow::anyhow!("Database connection failed: {}", e))?;
|
||||
drop(pool);
|
||||
println!("Database connection successful.");
|
||||
|
||||
let cfg = Config {
|
||||
database_url: Some(url.clone()),
|
||||
};
|
||||
config::save_config(&cfg)?;
|
||||
println!("Database URL saved to: {}", config_path()?.display());
|
||||
println!(" {}", mask_password(&url));
|
||||
}
|
||||
crate::ConfigAction::Show => {
|
||||
let cfg = config::load_config()?;
|
||||
match cfg.database_url {
|
||||
Some(url) => {
|
||||
println!("database_url = {}", mask_password(&url));
|
||||
println!("config file: {}", config_path()?.display());
|
||||
}
|
||||
None => {
|
||||
println!("Database URL not configured.");
|
||||
println!("Run: secrets config set-db <DATABASE_URL>");
|
||||
}
|
||||
}
|
||||
}
|
||||
crate::ConfigAction::Path => {
|
||||
println!("{}", config_path()?.display());
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Mask the password in a postgres://user:password@host/db URL.
|
||||
fn mask_password(url: &str) -> String {
|
||||
if let Some(at_pos) = url.rfind('@')
|
||||
&& let Some(scheme_end) = url.find("://")
|
||||
{
|
||||
let prefix = &url[..scheme_end + 3];
|
||||
let credentials = &url[scheme_end + 3..at_pos];
|
||||
let rest = &url[at_pos..];
|
||||
if let Some(colon_pos) = credentials.find(':') {
|
||||
let user = &credentials[..colon_pos];
|
||||
return format!("{}{}:***{}", prefix, user, rest);
|
||||
}
|
||||
}
|
||||
url.to_string()
|
||||
}
|
||||
@@ -1,291 +0,0 @@
|
||||
use anyhow::Result;
|
||||
use serde_json::json;
|
||||
use sqlx::PgPool;
|
||||
use uuid::Uuid;
|
||||
|
||||
use crate::db;
|
||||
use crate::models::{EntryRow, SecretFieldRow};
|
||||
use crate::output::{OutputMode, print_json};
|
||||
|
||||
pub struct DeleteArgs<'a> {
|
||||
pub namespace: &'a str,
|
||||
/// Kind filter. Required when --name is given; optional for bulk deletes.
|
||||
pub kind: Option<&'a str>,
|
||||
/// Exact record name. When None, bulk-delete all matching records.
|
||||
pub name: Option<&'a str>,
|
||||
/// Preview without writing to the database (bulk mode only).
|
||||
pub dry_run: bool,
|
||||
pub output: OutputMode,
|
||||
}
|
||||
|
||||
// ── Internal row type used for bulk queries ────────────────────────────────
|
||||
|
||||
#[derive(Debug, sqlx::FromRow)]
|
||||
struct FullEntryRow {
|
||||
pub id: Uuid,
|
||||
pub version: i64,
|
||||
pub kind: String,
|
||||
pub name: String,
|
||||
pub metadata: serde_json::Value,
|
||||
pub tags: Vec<String>,
|
||||
}
|
||||
|
||||
// ── Entry point ────────────────────────────────────────────────────────────
|
||||
|
||||
pub async fn run(pool: &PgPool, args: DeleteArgs<'_>) -> Result<()> {
|
||||
match args.name {
|
||||
Some(name) => {
|
||||
let kind = args
|
||||
.kind
|
||||
.ok_or_else(|| anyhow::anyhow!("--kind is required when --name is specified"))?;
|
||||
delete_one(pool, args.namespace, kind, name, args.output).await
|
||||
}
|
||||
None => delete_bulk(pool, args.namespace, args.kind, args.dry_run, args.output).await,
|
||||
}
|
||||
}
|
||||
|
||||
// ── Single-record delete (original behaviour) ─────────────────────────────
|
||||
|
||||
async fn delete_one(
|
||||
pool: &PgPool,
|
||||
namespace: &str,
|
||||
kind: &str,
|
||||
name: &str,
|
||||
output: OutputMode,
|
||||
) -> Result<()> {
|
||||
tracing::debug!(namespace, kind, name, "deleting entry");
|
||||
|
||||
let mut tx = pool.begin().await?;
|
||||
|
||||
let row: Option<EntryRow> = sqlx::query_as(
|
||||
"SELECT id, version, tags, metadata FROM entries \
|
||||
WHERE namespace = $1 AND kind = $2 AND name = $3 \
|
||||
FOR UPDATE",
|
||||
)
|
||||
.bind(namespace)
|
||||
.bind(kind)
|
||||
.bind(name)
|
||||
.fetch_optional(&mut *tx)
|
||||
.await?;
|
||||
|
||||
let Some(row) = row else {
|
||||
tx.rollback().await?;
|
||||
tracing::warn!(namespace, kind, name, "entry not found for deletion");
|
||||
let v = json!({"action":"not_found","namespace":namespace,"kind":kind,"name":name});
|
||||
match output {
|
||||
OutputMode::Text => println!("Not found: [{}/{}] {}", namespace, kind, name),
|
||||
ref mode => print_json(&v, mode)?,
|
||||
}
|
||||
return Ok(());
|
||||
};
|
||||
|
||||
snapshot_and_delete(&mut tx, namespace, kind, name, &row).await?;
|
||||
|
||||
crate::audit::log_tx(&mut tx, "delete", namespace, kind, name, json!({})).await;
|
||||
tx.commit().await?;
|
||||
|
||||
let v = json!({"action":"deleted","namespace":namespace,"kind":kind,"name":name});
|
||||
match output {
|
||||
OutputMode::Text => println!("Deleted: [{}/{}] {}", namespace, kind, name),
|
||||
ref mode => print_json(&v, mode)?,
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// ── Bulk delete by namespace (+ optional kind filter) ─────────────────────
|
||||
|
||||
async fn delete_bulk(
|
||||
pool: &PgPool,
|
||||
namespace: &str,
|
||||
kind: Option<&str>,
|
||||
dry_run: bool,
|
||||
output: OutputMode,
|
||||
) -> Result<()> {
|
||||
tracing::debug!(namespace, ?kind, dry_run, "bulk-deleting entries");
|
||||
|
||||
let rows: Vec<FullEntryRow> = if let Some(k) = kind {
|
||||
sqlx::query_as(
|
||||
"SELECT id, version, kind, name, metadata, tags FROM entries \
|
||||
WHERE namespace = $1 AND kind = $2 \
|
||||
ORDER BY name",
|
||||
)
|
||||
.bind(namespace)
|
||||
.bind(k)
|
||||
.fetch_all(pool)
|
||||
.await?
|
||||
} else {
|
||||
sqlx::query_as(
|
||||
"SELECT id, version, kind, name, metadata, tags FROM entries \
|
||||
WHERE namespace = $1 \
|
||||
ORDER BY kind, name",
|
||||
)
|
||||
.bind(namespace)
|
||||
.fetch_all(pool)
|
||||
.await?
|
||||
};
|
||||
|
||||
if rows.is_empty() {
|
||||
let v = json!({
|
||||
"action": "noop",
|
||||
"namespace": namespace,
|
||||
"kind": kind,
|
||||
"deleted": 0,
|
||||
"dry_run": dry_run
|
||||
});
|
||||
match output {
|
||||
OutputMode::Text => println!(
|
||||
"No records found in namespace \"{}\"{}.",
|
||||
namespace,
|
||||
kind.map(|k| format!(" with kind \"{}\"", k))
|
||||
.unwrap_or_default()
|
||||
),
|
||||
ref mode => print_json(&v, mode)?,
|
||||
}
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
if dry_run {
|
||||
let count = rows.len();
|
||||
match output {
|
||||
OutputMode::Text => {
|
||||
println!(
|
||||
"dry-run: would delete {} record(s) in namespace \"{}\":",
|
||||
count, namespace
|
||||
);
|
||||
for r in &rows {
|
||||
println!(" [{}/{}] {}", namespace, r.kind, r.name);
|
||||
}
|
||||
}
|
||||
ref mode => {
|
||||
let items: Vec<_> = rows
|
||||
.iter()
|
||||
.map(|r| json!({"namespace": namespace, "kind": r.kind, "name": r.name}))
|
||||
.collect();
|
||||
print_json(
|
||||
&json!({
|
||||
"action": "dry_run",
|
||||
"namespace": namespace,
|
||||
"kind": kind,
|
||||
"would_delete": count,
|
||||
"entries": items
|
||||
}),
|
||||
mode,
|
||||
)?;
|
||||
}
|
||||
}
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
let mut deleted = Vec::with_capacity(rows.len());
|
||||
|
||||
for row in &rows {
|
||||
let entry_row = EntryRow {
|
||||
id: row.id,
|
||||
version: row.version,
|
||||
tags: row.tags.clone(),
|
||||
metadata: row.metadata.clone(),
|
||||
};
|
||||
let mut tx = pool.begin().await?;
|
||||
snapshot_and_delete(&mut tx, namespace, &row.kind, &row.name, &entry_row).await?;
|
||||
crate::audit::log_tx(
|
||||
&mut tx,
|
||||
"delete",
|
||||
namespace,
|
||||
&row.kind,
|
||||
&row.name,
|
||||
json!({"bulk": true}),
|
||||
)
|
||||
.await;
|
||||
tx.commit().await?;
|
||||
|
||||
deleted.push(json!({"namespace": namespace, "kind": row.kind, "name": row.name}));
|
||||
tracing::info!(namespace, kind = %row.kind, name = %row.name, "bulk deleted");
|
||||
}
|
||||
|
||||
let count = deleted.len();
|
||||
match output {
|
||||
OutputMode::Text => {
|
||||
for item in &deleted {
|
||||
println!(
|
||||
"Deleted: [{}/{}] {}",
|
||||
item["namespace"].as_str().unwrap_or(""),
|
||||
item["kind"].as_str().unwrap_or(""),
|
||||
item["name"].as_str().unwrap_or("")
|
||||
);
|
||||
}
|
||||
println!("Total: {} record(s) deleted.", count);
|
||||
}
|
||||
ref mode => print_json(
|
||||
&json!({
|
||||
"action": "deleted",
|
||||
"namespace": namespace,
|
||||
"kind": kind,
|
||||
"deleted": count,
|
||||
"entries": deleted
|
||||
}),
|
||||
mode,
|
||||
)?,
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// ── Shared helper: snapshot history then DELETE ────────────────────────────
|
||||
|
||||
async fn snapshot_and_delete(
|
||||
tx: &mut sqlx::Transaction<'_, sqlx::Postgres>,
|
||||
namespace: &str,
|
||||
kind: &str,
|
||||
name: &str,
|
||||
row: &EntryRow,
|
||||
) -> Result<()> {
|
||||
if let Err(e) = db::snapshot_entry_history(
|
||||
tx,
|
||||
db::EntrySnapshotParams {
|
||||
entry_id: row.id,
|
||||
namespace,
|
||||
kind,
|
||||
name,
|
||||
version: row.version,
|
||||
action: "delete",
|
||||
tags: &row.tags,
|
||||
metadata: &row.metadata,
|
||||
},
|
||||
)
|
||||
.await
|
||||
{
|
||||
tracing::warn!(error = %e, "failed to snapshot entry history before delete");
|
||||
}
|
||||
|
||||
let fields: Vec<SecretFieldRow> = sqlx::query_as(
|
||||
"SELECT id, field_name, encrypted \
|
||||
FROM secrets WHERE entry_id = $1",
|
||||
)
|
||||
.bind(row.id)
|
||||
.fetch_all(&mut **tx)
|
||||
.await?;
|
||||
|
||||
for f in &fields {
|
||||
if let Err(e) = db::snapshot_secret_history(
|
||||
tx,
|
||||
db::SecretSnapshotParams {
|
||||
entry_id: row.id,
|
||||
secret_id: f.id,
|
||||
entry_version: row.version,
|
||||
field_name: &f.field_name,
|
||||
encrypted: &f.encrypted,
|
||||
action: "delete",
|
||||
},
|
||||
)
|
||||
.await
|
||||
{
|
||||
tracing::warn!(error = %e, "failed to snapshot secret history before delete");
|
||||
}
|
||||
}
|
||||
|
||||
sqlx::query("DELETE FROM entries WHERE id = $1")
|
||||
.bind(row.id)
|
||||
.execute(&mut **tx)
|
||||
.await?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
@@ -1,109 +0,0 @@
|
||||
use anyhow::Result;
|
||||
use sqlx::PgPool;
|
||||
use std::collections::BTreeMap;
|
||||
use std::io::Write;
|
||||
|
||||
use crate::commands::search::{fetch_entries, fetch_secrets_for_entries};
|
||||
use crate::crypto;
|
||||
use crate::models::{ExportData, ExportEntry, ExportFormat};
|
||||
|
||||
pub struct ExportArgs<'a> {
|
||||
pub namespace: Option<&'a str>,
|
||||
pub kind: Option<&'a str>,
|
||||
pub name: Option<&'a str>,
|
||||
pub tags: &'a [String],
|
||||
pub query: Option<&'a str>,
|
||||
/// Output file path. None means write to stdout.
|
||||
pub file: Option<&'a str>,
|
||||
/// Explicit format override (e.g. from --format flag).
|
||||
pub format: Option<&'a str>,
|
||||
/// When true, secrets are omitted and master_key is not used.
|
||||
pub no_secrets: bool,
|
||||
}
|
||||
|
||||
pub async fn run(pool: &PgPool, args: ExportArgs<'_>, master_key: Option<&[u8; 32]>) -> Result<()> {
|
||||
// Determine output format: --format > file extension > default JSON.
|
||||
let format = if let Some(fmt_str) = args.format {
|
||||
ExportFormat::from_str(fmt_str)?
|
||||
} else if let Some(path) = args.file {
|
||||
ExportFormat::from_extension(path).unwrap_or(ExportFormat::Json)
|
||||
} else {
|
||||
ExportFormat::Json
|
||||
};
|
||||
|
||||
let entries = fetch_entries(
|
||||
pool,
|
||||
args.namespace,
|
||||
args.kind,
|
||||
args.name,
|
||||
args.tags,
|
||||
args.query,
|
||||
)
|
||||
.await?;
|
||||
|
||||
let entry_ids: Vec<uuid::Uuid> = entries.iter().map(|e| e.id).collect();
|
||||
|
||||
let secrets_map = if !args.no_secrets && !entry_ids.is_empty() {
|
||||
fetch_secrets_for_entries(pool, &entry_ids).await?
|
||||
} else {
|
||||
std::collections::HashMap::new()
|
||||
};
|
||||
|
||||
let key = if !args.no_secrets { master_key } else { None };
|
||||
|
||||
let mut export_entries: Vec<ExportEntry> = Vec::with_capacity(entries.len());
|
||||
for entry in &entries {
|
||||
let secrets = if args.no_secrets {
|
||||
None
|
||||
} else {
|
||||
let fields = secrets_map.get(&entry.id).map(Vec::as_slice).unwrap_or(&[]);
|
||||
if fields.is_empty() {
|
||||
Some(BTreeMap::new())
|
||||
} else {
|
||||
let mk =
|
||||
key.ok_or_else(|| anyhow::anyhow!("master key required to decrypt secrets"))?;
|
||||
let mut map = BTreeMap::new();
|
||||
for f in fields {
|
||||
let decrypted = crypto::decrypt_json(mk, &f.encrypted)?;
|
||||
map.insert(f.field_name.clone(), decrypted);
|
||||
}
|
||||
Some(map)
|
||||
}
|
||||
};
|
||||
|
||||
export_entries.push(ExportEntry {
|
||||
namespace: entry.namespace.clone(),
|
||||
kind: entry.kind.clone(),
|
||||
name: entry.name.clone(),
|
||||
tags: entry.tags.clone(),
|
||||
metadata: entry.metadata.clone(),
|
||||
secrets,
|
||||
});
|
||||
}
|
||||
|
||||
let data = ExportData {
|
||||
version: 1,
|
||||
exported_at: chrono::Utc::now().format("%Y-%m-%dT%H:%M:%SZ").to_string(),
|
||||
entries: export_entries,
|
||||
};
|
||||
|
||||
let serialized = format.serialize(&data)?;
|
||||
|
||||
if let Some(path) = args.file {
|
||||
std::fs::write(path, &serialized)?;
|
||||
println!(
|
||||
"Exported {} record(s) to {} ({:?})",
|
||||
data.entries.len(),
|
||||
path,
|
||||
format
|
||||
);
|
||||
} else {
|
||||
std::io::stdout().write_all(serialized.as_bytes())?;
|
||||
// Ensure trailing newline on stdout.
|
||||
if !serialized.ends_with('\n') {
|
||||
println!();
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
@@ -1,78 +0,0 @@
|
||||
use anyhow::Result;
|
||||
use serde_json::{Value, json};
|
||||
use sqlx::{FromRow, PgPool};
|
||||
|
||||
use crate::output::{OutputMode, format_local_time, print_json};
|
||||
|
||||
pub struct HistoryArgs<'a> {
|
||||
pub namespace: &'a str,
|
||||
pub kind: &'a str,
|
||||
pub name: &'a str,
|
||||
pub limit: u32,
|
||||
pub output: OutputMode,
|
||||
}
|
||||
|
||||
/// List history entries for an entry.
|
||||
pub async fn run(pool: &PgPool, args: HistoryArgs<'_>) -> Result<()> {
|
||||
#[derive(FromRow)]
|
||||
struct HistorySummary {
|
||||
version: i64,
|
||||
action: String,
|
||||
actor: String,
|
||||
created_at: chrono::DateTime<chrono::Utc>,
|
||||
}
|
||||
|
||||
let rows: Vec<HistorySummary> = sqlx::query_as(
|
||||
"SELECT version, action, actor, created_at FROM entries_history \
|
||||
WHERE namespace = $1 AND kind = $2 AND name = $3 \
|
||||
ORDER BY id DESC LIMIT $4",
|
||||
)
|
||||
.bind(args.namespace)
|
||||
.bind(args.kind)
|
||||
.bind(args.name)
|
||||
.bind(args.limit as i64)
|
||||
.fetch_all(pool)
|
||||
.await?;
|
||||
|
||||
match args.output {
|
||||
OutputMode::Json | OutputMode::JsonCompact => {
|
||||
let arr: Vec<Value> = rows
|
||||
.iter()
|
||||
.map(|r| {
|
||||
json!({
|
||||
"version": r.version,
|
||||
"action": r.action,
|
||||
"actor": r.actor,
|
||||
"created_at": r.created_at.format("%Y-%m-%dT%H:%M:%SZ").to_string(),
|
||||
})
|
||||
})
|
||||
.collect();
|
||||
print_json(&Value::Array(arr), &args.output)?;
|
||||
}
|
||||
_ => {
|
||||
if rows.is_empty() {
|
||||
println!(
|
||||
"No history found for [{}/{}] {}.",
|
||||
args.namespace, args.kind, args.name
|
||||
);
|
||||
return Ok(());
|
||||
}
|
||||
println!(
|
||||
"History for [{}/{}] {}:",
|
||||
args.namespace, args.kind, args.name
|
||||
);
|
||||
for r in &rows {
|
||||
println!(
|
||||
" v{:<4} {:8} {} {}",
|
||||
r.version,
|
||||
r.action,
|
||||
r.actor,
|
||||
format_local_time(r.created_at)
|
||||
);
|
||||
}
|
||||
println!(" (use `secrets rollback --to-version <N>` to restore)");
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
@@ -1,217 +0,0 @@
|
||||
use anyhow::Result;
|
||||
use serde_json::Value;
|
||||
use sqlx::PgPool;
|
||||
use std::collections::BTreeMap;
|
||||
|
||||
use crate::commands::add::{self, AddArgs};
|
||||
use crate::models::ExportFormat;
|
||||
use crate::output::{OutputMode, print_json};
|
||||
|
||||
pub struct ImportArgs<'a> {
|
||||
pub file: &'a str,
|
||||
/// Overwrite existing records when there is a conflict (upsert).
|
||||
/// Without this flag, the import aborts on the first conflict.
|
||||
/// A future `--skip` flag could allow silently skipping conflicts and continuing.
|
||||
pub force: bool,
|
||||
/// Check and preview operations without writing to the database.
|
||||
pub dry_run: bool,
|
||||
pub output: OutputMode,
|
||||
}
|
||||
|
||||
pub async fn run(pool: &PgPool, args: ImportArgs<'_>, master_key: &[u8; 32]) -> Result<()> {
|
||||
let format = ExportFormat::from_extension(args.file)?;
|
||||
let content = std::fs::read_to_string(args.file)
|
||||
.map_err(|e| anyhow::anyhow!("Cannot read file '{}': {}", args.file, e))?;
|
||||
let data = format.deserialize(&content)?;
|
||||
|
||||
if data.version != 1 {
|
||||
anyhow::bail!(
|
||||
"Unsupported export version {}. Only version 1 is supported.",
|
||||
data.version
|
||||
);
|
||||
}
|
||||
|
||||
let total = data.entries.len();
|
||||
let mut inserted = 0usize;
|
||||
let mut skipped = 0usize;
|
||||
let mut failed = 0usize;
|
||||
|
||||
for entry in &data.entries {
|
||||
// Check if record already exists.
|
||||
let exists: bool = sqlx::query_scalar(
|
||||
"SELECT EXISTS(SELECT 1 FROM entries \
|
||||
WHERE namespace = $1 AND kind = $2 AND name = $3)",
|
||||
)
|
||||
.bind(&entry.namespace)
|
||||
.bind(&entry.kind)
|
||||
.bind(&entry.name)
|
||||
.fetch_one(pool)
|
||||
.await
|
||||
.unwrap_or(false);
|
||||
|
||||
if exists && !args.force {
|
||||
let v = serde_json::json!({
|
||||
"action": "conflict",
|
||||
"namespace": entry.namespace,
|
||||
"kind": entry.kind,
|
||||
"name": entry.name,
|
||||
});
|
||||
match args.output {
|
||||
OutputMode::Text => eprintln!(
|
||||
"[{}/{}/{}] conflict — record already exists (use --force to overwrite)",
|
||||
entry.namespace, entry.kind, entry.name
|
||||
),
|
||||
ref mode => {
|
||||
// Write conflict notice to stderr so it does not mix with summary JSON.
|
||||
eprint!(
|
||||
"{}",
|
||||
if *mode == OutputMode::Json {
|
||||
serde_json::to_string_pretty(&v)?
|
||||
} else {
|
||||
serde_json::to_string(&v)?
|
||||
}
|
||||
);
|
||||
eprintln!();
|
||||
}
|
||||
}
|
||||
return Err(anyhow::anyhow!(
|
||||
"Import aborted: conflict on [{}/{}/{}]",
|
||||
entry.namespace,
|
||||
entry.kind,
|
||||
entry.name
|
||||
));
|
||||
}
|
||||
|
||||
let action = if exists { "upsert" } else { "insert" };
|
||||
|
||||
if args.dry_run {
|
||||
let v = serde_json::json!({
|
||||
"action": action,
|
||||
"namespace": entry.namespace,
|
||||
"kind": entry.kind,
|
||||
"name": entry.name,
|
||||
"dry_run": true,
|
||||
});
|
||||
match args.output {
|
||||
OutputMode::Text => println!(
|
||||
"[dry-run] {} [{}/{}/{}]",
|
||||
action, entry.namespace, entry.kind, entry.name
|
||||
),
|
||||
ref mode => print_json(&v, mode)?,
|
||||
}
|
||||
if exists {
|
||||
skipped += 1;
|
||||
} else {
|
||||
inserted += 1;
|
||||
}
|
||||
continue;
|
||||
}
|
||||
|
||||
// Build secret_entries: convert BTreeMap<String, Value> to Vec<String> ("key:=json")
|
||||
let secret_entries = build_secret_entries(entry.secrets.as_ref());
|
||||
|
||||
// Build meta_entries from metadata JSON object.
|
||||
let meta_entries = build_meta_entries(&entry.metadata);
|
||||
|
||||
match add::run(
|
||||
pool,
|
||||
AddArgs {
|
||||
namespace: &entry.namespace,
|
||||
kind: &entry.kind,
|
||||
name: &entry.name,
|
||||
tags: &entry.tags,
|
||||
meta_entries: &meta_entries,
|
||||
secret_entries: &secret_entries,
|
||||
output: OutputMode::Text,
|
||||
},
|
||||
master_key,
|
||||
)
|
||||
.await
|
||||
{
|
||||
Ok(()) => {
|
||||
let v = serde_json::json!({
|
||||
"action": action,
|
||||
"namespace": entry.namespace,
|
||||
"kind": entry.kind,
|
||||
"name": entry.name,
|
||||
});
|
||||
match args.output {
|
||||
OutputMode::Text => println!(
|
||||
"Imported [{}/{}/{}]",
|
||||
entry.namespace, entry.kind, entry.name
|
||||
),
|
||||
ref mode => print_json(&v, mode)?,
|
||||
}
|
||||
inserted += 1;
|
||||
}
|
||||
Err(e) => {
|
||||
eprintln!(
|
||||
"Error importing [{}/{}/{}]: {}",
|
||||
entry.namespace, entry.kind, entry.name, e
|
||||
);
|
||||
failed += 1;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
let summary = serde_json::json!({
|
||||
"total": total,
|
||||
"inserted": inserted,
|
||||
"skipped": skipped,
|
||||
"failed": failed,
|
||||
"dry_run": args.dry_run,
|
||||
});
|
||||
match args.output {
|
||||
OutputMode::Text => {
|
||||
if args.dry_run {
|
||||
println!(
|
||||
"\n[dry-run] {} total: {} would insert, {} would skip, {} would fail",
|
||||
total, inserted, skipped, failed
|
||||
);
|
||||
} else {
|
||||
println!(
|
||||
"\nImport done: {} total — {} inserted, {} skipped, {} failed",
|
||||
total, inserted, skipped, failed
|
||||
);
|
||||
}
|
||||
}
|
||||
ref mode => print_json(&summary, mode)?,
|
||||
}
|
||||
|
||||
if failed > 0 {
|
||||
anyhow::bail!("{} record(s) failed to import", failed);
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Convert metadata JSON object into Vec<String> of "key:=json_value" entries.
|
||||
fn build_meta_entries(metadata: &Value) -> Vec<String> {
|
||||
let mut entries = Vec::new();
|
||||
if let Some(obj) = metadata.as_object() {
|
||||
for (k, v) in obj {
|
||||
entries.push(value_to_kv_entry(k, v));
|
||||
}
|
||||
}
|
||||
entries
|
||||
}
|
||||
|
||||
/// Convert a BTreeMap<String, Value> (secrets) into Vec<String> of "key:=json_value" entries.
|
||||
fn build_secret_entries(secrets: Option<&BTreeMap<String, Value>>) -> Vec<String> {
|
||||
let mut entries = Vec::new();
|
||||
if let Some(map) = secrets {
|
||||
for (k, v) in map {
|
||||
entries.push(value_to_kv_entry(k, v));
|
||||
}
|
||||
}
|
||||
entries
|
||||
}
|
||||
|
||||
/// Convert a key/value pair to a CLI-style entry string.
|
||||
/// Strings use `key=value`; everything else uses `key:=<json>`.
|
||||
fn value_to_kv_entry(key: &str, value: &Value) -> String {
|
||||
match value {
|
||||
Value::String(s) => format!("{}={}", key, s),
|
||||
other => format!("{}:={}", key, other),
|
||||
}
|
||||
}
|
||||
@@ -1,70 +0,0 @@
|
||||
use anyhow::{Context, Result};
|
||||
use rand::RngExt;
|
||||
use sqlx::PgPool;
|
||||
|
||||
use crate::{crypto, db};
|
||||
|
||||
const MIN_MASTER_PASSWORD_LEN: usize = 8;
|
||||
|
||||
pub async fn run(pool: &PgPool) -> Result<()> {
|
||||
println!("Initializing secrets master key...");
|
||||
println!();
|
||||
|
||||
// Read password (no echo)
|
||||
let password = rpassword::prompt_password(format!(
|
||||
"Enter master password (at least {} characters): ",
|
||||
MIN_MASTER_PASSWORD_LEN
|
||||
))
|
||||
.context("failed to read password")?;
|
||||
if password.chars().count() < MIN_MASTER_PASSWORD_LEN {
|
||||
anyhow::bail!(
|
||||
"Master password must be at least {} characters.",
|
||||
MIN_MASTER_PASSWORD_LEN
|
||||
);
|
||||
}
|
||||
let confirm = rpassword::prompt_password("Confirm master password: ")
|
||||
.context("failed to read password confirmation")?;
|
||||
if password != confirm {
|
||||
anyhow::bail!("Passwords do not match.");
|
||||
}
|
||||
|
||||
// Get or create Argon2id salt
|
||||
let salt = match db::load_argon2_salt(pool).await? {
|
||||
Some(existing) => {
|
||||
println!("Found existing salt in database (not the first device).");
|
||||
existing
|
||||
}
|
||||
None => {
|
||||
println!("Generating new Argon2id salt and storing in database...");
|
||||
let mut salt = vec![0u8; 16];
|
||||
rand::rng().fill(&mut salt[..]);
|
||||
db::store_argon2_salt(pool, &salt).await?;
|
||||
salt
|
||||
}
|
||||
};
|
||||
|
||||
// Derive master key
|
||||
print!("Deriving master key (Argon2id, this takes a moment)... ");
|
||||
let master_key = crypto::derive_master_key(&password, &salt)?;
|
||||
println!("done.");
|
||||
|
||||
// Store in OS Keychain
|
||||
crypto::store_master_key(&master_key)?;
|
||||
|
||||
// Self-test: encrypt and decrypt a canary value
|
||||
let canary = b"secrets-cli-canary";
|
||||
let enc = crypto::encrypt(&master_key, canary)?;
|
||||
let dec = crypto::decrypt(&master_key, &enc)?;
|
||||
if dec != canary {
|
||||
anyhow::bail!("Self-test failed: encryption roundtrip mismatch");
|
||||
}
|
||||
|
||||
println!();
|
||||
println!("Master key stored in OS Keychain.");
|
||||
println!("You can now use `secrets add` / `secrets search` commands.");
|
||||
println!();
|
||||
println!("IMPORTANT: Remember your master password — it is not stored anywhere.");
|
||||
println!(" On a new device, run `secrets init` with the same password.");
|
||||
|
||||
Ok(())
|
||||
}
|
||||
@@ -1,12 +0,0 @@
|
||||
pub mod add;
|
||||
pub mod config;
|
||||
pub mod delete;
|
||||
pub mod export_cmd;
|
||||
pub mod history;
|
||||
pub mod import_cmd;
|
||||
pub mod init;
|
||||
pub mod rollback;
|
||||
pub mod run;
|
||||
pub mod search;
|
||||
pub mod update;
|
||||
pub mod upgrade;
|
||||
@@ -1,256 +0,0 @@
|
||||
use anyhow::Result;
|
||||
use serde_json::{Value, json};
|
||||
use sqlx::{FromRow, PgPool};
|
||||
use uuid::Uuid;
|
||||
|
||||
use crate::crypto;
|
||||
use crate::db;
|
||||
use crate::output::{OutputMode, print_json};
|
||||
|
||||
pub struct RollbackArgs<'a> {
|
||||
pub namespace: &'a str,
|
||||
pub kind: &'a str,
|
||||
pub name: &'a str,
|
||||
/// Target entry version to restore. None → restore the most recent history entry.
|
||||
pub to_version: Option<i64>,
|
||||
pub output: OutputMode,
|
||||
}
|
||||
|
||||
pub async fn run(pool: &PgPool, args: RollbackArgs<'_>, master_key: &[u8; 32]) -> Result<()> {
|
||||
// ── Find the target entry history snapshot ────────────────────────────────
|
||||
#[derive(FromRow)]
|
||||
struct EntryHistoryRow {
|
||||
entry_id: Uuid,
|
||||
version: i64,
|
||||
action: String,
|
||||
tags: Vec<String>,
|
||||
metadata: Value,
|
||||
}
|
||||
|
||||
let snap: Option<EntryHistoryRow> = if let Some(ver) = args.to_version {
|
||||
sqlx::query_as(
|
||||
"SELECT entry_id, version, action, tags, metadata \
|
||||
FROM entries_history \
|
||||
WHERE namespace = $1 AND kind = $2 AND name = $3 AND version = $4 \
|
||||
ORDER BY id DESC LIMIT 1",
|
||||
)
|
||||
.bind(args.namespace)
|
||||
.bind(args.kind)
|
||||
.bind(args.name)
|
||||
.bind(ver)
|
||||
.fetch_optional(pool)
|
||||
.await?
|
||||
} else {
|
||||
sqlx::query_as(
|
||||
"SELECT entry_id, version, action, tags, metadata \
|
||||
FROM entries_history \
|
||||
WHERE namespace = $1 AND kind = $2 AND name = $3 \
|
||||
ORDER BY id DESC LIMIT 1",
|
||||
)
|
||||
.bind(args.namespace)
|
||||
.bind(args.kind)
|
||||
.bind(args.name)
|
||||
.fetch_optional(pool)
|
||||
.await?
|
||||
};
|
||||
|
||||
let snap = snap.ok_or_else(|| {
|
||||
anyhow::anyhow!(
|
||||
"No history found for [{}/{}] {}{}.",
|
||||
args.namespace,
|
||||
args.kind,
|
||||
args.name,
|
||||
args.to_version
|
||||
.map(|v| format!(" at version {}", v))
|
||||
.unwrap_or_default()
|
||||
)
|
||||
})?;
|
||||
|
||||
// ── Find the matching secret field snapshots ──────────────────────────────
|
||||
#[derive(FromRow)]
|
||||
struct SecretHistoryRow {
|
||||
secret_id: Uuid,
|
||||
field_name: String,
|
||||
encrypted: Vec<u8>,
|
||||
action: String,
|
||||
}
|
||||
|
||||
let field_snaps: Vec<SecretHistoryRow> = sqlx::query_as(
|
||||
"SELECT secret_id, field_name, encrypted, action \
|
||||
FROM secrets_history \
|
||||
WHERE entry_id = $1 AND entry_version = $2 \
|
||||
ORDER BY field_name",
|
||||
)
|
||||
.bind(snap.entry_id)
|
||||
.bind(snap.version)
|
||||
.fetch_all(pool)
|
||||
.await?;
|
||||
|
||||
// Validate: try decrypting all encrypted fields before writing anything.
|
||||
for f in &field_snaps {
|
||||
if f.action != "delete" && !f.encrypted.is_empty() {
|
||||
crypto::decrypt_json(master_key, &f.encrypted).map_err(|e| {
|
||||
anyhow::anyhow!(
|
||||
"Cannot decrypt snapshot for field '{}': {}",
|
||||
f.field_name,
|
||||
e
|
||||
)
|
||||
})?;
|
||||
}
|
||||
}
|
||||
|
||||
let mut tx = pool.begin().await?;
|
||||
|
||||
// ── Snapshot the current live state before overwriting ────────────────────
|
||||
#[derive(sqlx::FromRow)]
|
||||
struct LiveEntry {
|
||||
id: Uuid,
|
||||
version: i64,
|
||||
tags: Vec<String>,
|
||||
metadata: Value,
|
||||
}
|
||||
let live: Option<LiveEntry> = sqlx::query_as(
|
||||
"SELECT id, version, tags, metadata FROM entries \
|
||||
WHERE namespace = $1 AND kind = $2 AND name = $3 FOR UPDATE",
|
||||
)
|
||||
.bind(args.namespace)
|
||||
.bind(args.kind)
|
||||
.bind(args.name)
|
||||
.fetch_optional(&mut *tx)
|
||||
.await?;
|
||||
|
||||
if let Some(ref lr) = live {
|
||||
if let Err(e) = db::snapshot_entry_history(
|
||||
&mut tx,
|
||||
db::EntrySnapshotParams {
|
||||
entry_id: lr.id,
|
||||
namespace: args.namespace,
|
||||
kind: args.kind,
|
||||
name: args.name,
|
||||
version: lr.version,
|
||||
action: "rollback",
|
||||
tags: &lr.tags,
|
||||
metadata: &lr.metadata,
|
||||
},
|
||||
)
|
||||
.await
|
||||
{
|
||||
tracing::warn!(error = %e, "failed to snapshot entry before rollback");
|
||||
}
|
||||
|
||||
// Snapshot existing secret fields.
|
||||
#[derive(sqlx::FromRow)]
|
||||
struct LiveField {
|
||||
id: Uuid,
|
||||
field_name: String,
|
||||
encrypted: Vec<u8>,
|
||||
}
|
||||
let live_fields: Vec<LiveField> = sqlx::query_as(
|
||||
"SELECT id, field_name, encrypted \
|
||||
FROM secrets WHERE entry_id = $1",
|
||||
)
|
||||
.bind(lr.id)
|
||||
.fetch_all(&mut *tx)
|
||||
.await?;
|
||||
|
||||
for f in &live_fields {
|
||||
if let Err(e) = db::snapshot_secret_history(
|
||||
&mut tx,
|
||||
db::SecretSnapshotParams {
|
||||
entry_id: lr.id,
|
||||
secret_id: f.id,
|
||||
entry_version: lr.version,
|
||||
field_name: &f.field_name,
|
||||
encrypted: &f.encrypted,
|
||||
action: "rollback",
|
||||
},
|
||||
)
|
||||
.await
|
||||
{
|
||||
tracing::warn!(error = %e, "failed to snapshot secret field before rollback");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// ── Restore entry row ─────────────────────────────────────────────────────
|
||||
sqlx::query(
|
||||
"INSERT INTO entries (id, namespace, kind, name, tags, metadata, version, updated_at) \
|
||||
VALUES ($1, $2, $3, $4, $5, $6, $7, NOW()) \
|
||||
ON CONFLICT (namespace, kind, name) DO UPDATE SET \
|
||||
tags = EXCLUDED.tags, \
|
||||
metadata = EXCLUDED.metadata, \
|
||||
version = entries.version + 1, \
|
||||
updated_at = NOW()",
|
||||
)
|
||||
.bind(snap.entry_id)
|
||||
.bind(args.namespace)
|
||||
.bind(args.kind)
|
||||
.bind(args.name)
|
||||
.bind(&snap.tags)
|
||||
.bind(&snap.metadata)
|
||||
.bind(snap.version)
|
||||
.execute(&mut *tx)
|
||||
.await?;
|
||||
|
||||
// ── Restore secret fields ─────────────────────────────────────────────────
|
||||
// Delete all current fields and re-insert from snapshot
|
||||
// (only non-deleted fields from the snapshot are restored).
|
||||
sqlx::query("DELETE FROM secrets WHERE entry_id = $1")
|
||||
.bind(snap.entry_id)
|
||||
.execute(&mut *tx)
|
||||
.await?;
|
||||
|
||||
for f in &field_snaps {
|
||||
if f.action == "delete" {
|
||||
// Field was deleted at this snapshot point — don't restore it.
|
||||
continue;
|
||||
}
|
||||
sqlx::query(
|
||||
"INSERT INTO secrets (id, entry_id, field_name, encrypted) \
|
||||
VALUES ($1, $2, $3, $4) \
|
||||
ON CONFLICT (entry_id, field_name) DO UPDATE SET \
|
||||
encrypted = EXCLUDED.encrypted, \
|
||||
version = secrets.version + 1, \
|
||||
updated_at = NOW()",
|
||||
)
|
||||
.bind(f.secret_id)
|
||||
.bind(snap.entry_id)
|
||||
.bind(&f.field_name)
|
||||
.bind(&f.encrypted)
|
||||
.execute(&mut *tx)
|
||||
.await?;
|
||||
}
|
||||
|
||||
crate::audit::log_tx(
|
||||
&mut tx,
|
||||
"rollback",
|
||||
args.namespace,
|
||||
args.kind,
|
||||
args.name,
|
||||
json!({
|
||||
"restored_version": snap.version,
|
||||
"original_action": snap.action,
|
||||
}),
|
||||
)
|
||||
.await;
|
||||
|
||||
tx.commit().await?;
|
||||
|
||||
let result_json = json!({
|
||||
"action": "rolled_back",
|
||||
"namespace": args.namespace,
|
||||
"kind": args.kind,
|
||||
"name": args.name,
|
||||
"restored_version": snap.version,
|
||||
});
|
||||
|
||||
match args.output {
|
||||
OutputMode::Text => println!(
|
||||
"Rolled back: [{}/{}] {} → version {}",
|
||||
args.namespace, args.kind, args.name, snap.version
|
||||
),
|
||||
ref mode => print_json(&result_json, mode)?,
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
@@ -1,143 +0,0 @@
|
||||
use anyhow::Result;
|
||||
use serde_json::Value;
|
||||
use sqlx::PgPool;
|
||||
use std::collections::HashMap;
|
||||
|
||||
use crate::commands::search::{build_injected_env_map, fetch_entries, fetch_secrets_for_entries};
|
||||
use crate::output::OutputMode;
|
||||
|
||||
pub struct InjectArgs<'a> {
|
||||
pub namespace: Option<&'a str>,
|
||||
pub kind: Option<&'a str>,
|
||||
pub name: Option<&'a str>,
|
||||
pub tags: &'a [String],
|
||||
pub prefix: &'a str,
|
||||
pub output: OutputMode,
|
||||
}
|
||||
|
||||
pub struct RunArgs<'a> {
|
||||
pub namespace: Option<&'a str>,
|
||||
pub kind: Option<&'a str>,
|
||||
pub name: Option<&'a str>,
|
||||
pub tags: &'a [String],
|
||||
pub prefix: &'a str,
|
||||
pub command: &'a [String],
|
||||
}
|
||||
|
||||
/// Fetch entries matching the filter and build a flat env map (decrypted secrets only, no metadata).
|
||||
pub async fn collect_env_map(
|
||||
pool: &PgPool,
|
||||
namespace: Option<&str>,
|
||||
kind: Option<&str>,
|
||||
name: Option<&str>,
|
||||
tags: &[String],
|
||||
prefix: &str,
|
||||
master_key: &[u8; 32],
|
||||
) -> Result<HashMap<String, String>> {
|
||||
if namespace.is_none() && kind.is_none() && name.is_none() && tags.is_empty() {
|
||||
anyhow::bail!(
|
||||
"At least one filter (--namespace, --kind, --name, or --tag) is required for inject/run"
|
||||
);
|
||||
}
|
||||
let entries = fetch_entries(pool, namespace, kind, name, tags, None).await?;
|
||||
if entries.is_empty() {
|
||||
anyhow::bail!("No records matched the given filters.");
|
||||
}
|
||||
|
||||
let entry_ids: Vec<uuid::Uuid> = entries.iter().map(|e| e.id).collect();
|
||||
let fields_map = fetch_secrets_for_entries(pool, &entry_ids).await?;
|
||||
|
||||
let mut map = HashMap::new();
|
||||
for entry in &entries {
|
||||
let empty = vec![];
|
||||
let fields = fields_map.get(&entry.id).unwrap_or(&empty);
|
||||
let row_map = build_injected_env_map(pool, entry, prefix, master_key, fields).await?;
|
||||
for (k, v) in row_map {
|
||||
map.insert(k, v);
|
||||
}
|
||||
}
|
||||
Ok(map)
|
||||
}
|
||||
|
||||
/// `inject` command: print env vars to stdout.
|
||||
pub async fn run_inject(pool: &PgPool, args: InjectArgs<'_>, master_key: &[u8; 32]) -> Result<()> {
|
||||
let env_map = collect_env_map(
|
||||
pool,
|
||||
args.namespace,
|
||||
args.kind,
|
||||
args.name,
|
||||
args.tags,
|
||||
args.prefix,
|
||||
master_key,
|
||||
)
|
||||
.await?;
|
||||
|
||||
match args.output {
|
||||
OutputMode::Json => {
|
||||
let obj: serde_json::Map<String, Value> = env_map
|
||||
.into_iter()
|
||||
.map(|(k, v)| (k, Value::String(v)))
|
||||
.collect();
|
||||
println!("{}", serde_json::to_string_pretty(&Value::Object(obj))?);
|
||||
}
|
||||
OutputMode::JsonCompact => {
|
||||
let obj: serde_json::Map<String, Value> = env_map
|
||||
.into_iter()
|
||||
.map(|(k, v)| (k, Value::String(v)))
|
||||
.collect();
|
||||
println!("{}", serde_json::to_string(&Value::Object(obj))?);
|
||||
}
|
||||
_ => {
|
||||
let mut pairs: Vec<(String, String)> = env_map.into_iter().collect();
|
||||
pairs.sort_by(|a, b| a.0.cmp(&b.0));
|
||||
for (k, v) in pairs {
|
||||
println!("{}={}", k, shell_quote(&v));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// `run` command: inject secrets into a child process environment and execute.
|
||||
pub async fn run_exec(pool: &PgPool, args: RunArgs<'_>, master_key: &[u8; 32]) -> Result<()> {
|
||||
if args.command.is_empty() {
|
||||
anyhow::bail!(
|
||||
"No command specified. Usage: secrets run [filter flags] -- <command> [args]"
|
||||
);
|
||||
}
|
||||
|
||||
let env_map = collect_env_map(
|
||||
pool,
|
||||
args.namespace,
|
||||
args.kind,
|
||||
args.name,
|
||||
args.tags,
|
||||
args.prefix,
|
||||
master_key,
|
||||
)
|
||||
.await?;
|
||||
|
||||
tracing::debug!(
|
||||
vars = env_map.len(),
|
||||
cmd = args.command[0].as_str(),
|
||||
"injecting secrets into child process"
|
||||
);
|
||||
|
||||
let status = std::process::Command::new(&args.command[0])
|
||||
.args(&args.command[1..])
|
||||
.envs(&env_map)
|
||||
.status()
|
||||
.map_err(|e| anyhow::anyhow!("Failed to execute '{}': {}", args.command[0], e))?;
|
||||
|
||||
if !status.success() {
|
||||
let code = status.code().unwrap_or(1);
|
||||
std::process::exit(code);
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn shell_quote(s: &str) -> String {
|
||||
format!("'{}'", s.replace('\'', "'\\''"))
|
||||
}
|
||||
@@ -1,568 +0,0 @@
|
||||
use anyhow::Result;
|
||||
use serde_json::{Value, json};
|
||||
use sqlx::PgPool;
|
||||
use std::collections::HashMap;
|
||||
|
||||
use crate::crypto;
|
||||
use crate::models::{Entry, SecretField};
|
||||
use crate::output::{OutputMode, format_local_time};
|
||||
|
||||
pub struct SearchArgs<'a> {
|
||||
pub namespace: Option<&'a str>,
|
||||
pub kind: Option<&'a str>,
|
||||
pub name: Option<&'a str>,
|
||||
pub tags: &'a [String],
|
||||
pub query: Option<&'a str>,
|
||||
pub fields: &'a [String],
|
||||
pub summary: bool,
|
||||
pub limit: u32,
|
||||
pub offset: u32,
|
||||
pub sort: &'a str,
|
||||
pub output: OutputMode,
|
||||
}
|
||||
|
||||
pub async fn run(pool: &PgPool, args: SearchArgs<'_>) -> Result<()> {
|
||||
validate_safe_search_args(args.fields)?;
|
||||
|
||||
let rows = fetch_entries_paged(
|
||||
pool,
|
||||
PagedFetchArgs {
|
||||
namespace: args.namespace,
|
||||
kind: args.kind,
|
||||
name: args.name,
|
||||
tags: args.tags,
|
||||
query: args.query,
|
||||
sort: args.sort,
|
||||
limit: args.limit,
|
||||
offset: args.offset,
|
||||
},
|
||||
)
|
||||
.await?;
|
||||
|
||||
// -f/--field: extract specific metadata field values directly
|
||||
if !args.fields.is_empty() {
|
||||
return print_fields(&rows, args.fields);
|
||||
}
|
||||
|
||||
// Fetch secret schemas for all returned entries (no master key needed).
|
||||
let entry_ids: Vec<uuid::Uuid> = rows.iter().map(|r| r.id).collect();
|
||||
let schema_map = if !args.summary && !entry_ids.is_empty() {
|
||||
fetch_secret_schemas(pool, &entry_ids).await?
|
||||
} else {
|
||||
HashMap::new()
|
||||
};
|
||||
|
||||
match args.output {
|
||||
OutputMode::Json | OutputMode::JsonCompact => {
|
||||
let arr: Vec<Value> = rows
|
||||
.iter()
|
||||
.map(|r| to_json(r, args.summary, schema_map.get(&r.id).map(Vec::as_slice)))
|
||||
.collect();
|
||||
let out = if args.output == OutputMode::Json {
|
||||
serde_json::to_string_pretty(&arr)?
|
||||
} else {
|
||||
serde_json::to_string(&arr)?
|
||||
};
|
||||
println!("{}", out);
|
||||
}
|
||||
OutputMode::Text => {
|
||||
if rows.is_empty() {
|
||||
println!("No records found.");
|
||||
return Ok(());
|
||||
}
|
||||
for row in &rows {
|
||||
print_text(
|
||||
row,
|
||||
args.summary,
|
||||
schema_map.get(&row.id).map(Vec::as_slice),
|
||||
)?;
|
||||
}
|
||||
println!("{} record(s) found.", rows.len());
|
||||
if rows.len() == args.limit as usize {
|
||||
println!(
|
||||
" (showing up to {}; use --offset {} to see more)",
|
||||
args.limit,
|
||||
args.offset + args.limit
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn validate_safe_search_args(fields: &[String]) -> Result<()> {
|
||||
if let Some(field) = fields.iter().find(|field| is_secret_field(field)) {
|
||||
anyhow::bail!(
|
||||
"Field '{}' is sensitive. `search -f` only supports metadata.* fields; use `secrets inject` or `secrets run` for secrets.",
|
||||
field
|
||||
);
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn is_secret_field(field: &str) -> bool {
|
||||
matches!(
|
||||
field.split_once('.').map(|(section, _)| section),
|
||||
Some("secret" | "secrets" | "encrypted")
|
||||
)
|
||||
}
|
||||
|
||||
// ── Entry fetching ────────────────────────────────────────────────────────────
|
||||
|
||||
struct PagedFetchArgs<'a> {
|
||||
namespace: Option<&'a str>,
|
||||
kind: Option<&'a str>,
|
||||
name: Option<&'a str>,
|
||||
tags: &'a [String],
|
||||
query: Option<&'a str>,
|
||||
sort: &'a str,
|
||||
limit: u32,
|
||||
offset: u32,
|
||||
}
|
||||
|
||||
/// A very large limit used when callers need all matching records (export, inject, run).
|
||||
/// Postgres will stop scanning when this many rows are found; adjust if needed.
|
||||
pub const FETCH_ALL_LIMIT: u32 = 100_000;
|
||||
|
||||
/// Fetch entries matching the given filters (used by search, inject, run).
|
||||
/// `limit` caps the result set; pass `FETCH_ALL_LIMIT` when you need all matching records.
|
||||
pub async fn fetch_entries(
|
||||
pool: &PgPool,
|
||||
namespace: Option<&str>,
|
||||
kind: Option<&str>,
|
||||
name: Option<&str>,
|
||||
tags: &[String],
|
||||
query: Option<&str>,
|
||||
) -> Result<Vec<Entry>> {
|
||||
fetch_entries_with_limit(pool, namespace, kind, name, tags, query, FETCH_ALL_LIMIT).await
|
||||
}
|
||||
|
||||
/// Like `fetch_entries` but with an explicit limit. Used internally by `search`.
|
||||
pub(crate) async fn fetch_entries_with_limit(
|
||||
pool: &PgPool,
|
||||
namespace: Option<&str>,
|
||||
kind: Option<&str>,
|
||||
name: Option<&str>,
|
||||
tags: &[String],
|
||||
query: Option<&str>,
|
||||
limit: u32,
|
||||
) -> Result<Vec<Entry>> {
|
||||
fetch_entries_paged(
|
||||
pool,
|
||||
PagedFetchArgs {
|
||||
namespace,
|
||||
kind,
|
||||
name,
|
||||
tags,
|
||||
query,
|
||||
sort: "name",
|
||||
limit,
|
||||
offset: 0,
|
||||
},
|
||||
)
|
||||
.await
|
||||
}
|
||||
|
||||
async fn fetch_entries_paged(pool: &PgPool, a: PagedFetchArgs<'_>) -> Result<Vec<Entry>> {
|
||||
let mut conditions: Vec<String> = Vec::new();
|
||||
let mut idx: i32 = 1;
|
||||
|
||||
if a.namespace.is_some() {
|
||||
conditions.push(format!("namespace = ${}", idx));
|
||||
idx += 1;
|
||||
}
|
||||
if a.kind.is_some() {
|
||||
conditions.push(format!("kind = ${}", idx));
|
||||
idx += 1;
|
||||
}
|
||||
if a.name.is_some() {
|
||||
conditions.push(format!("name = ${}", idx));
|
||||
idx += 1;
|
||||
}
|
||||
if !a.tags.is_empty() {
|
||||
let placeholders: Vec<String> = a
|
||||
.tags
|
||||
.iter()
|
||||
.map(|_| {
|
||||
let p = format!("${}", idx);
|
||||
idx += 1;
|
||||
p
|
||||
})
|
||||
.collect();
|
||||
conditions.push(format!("tags @> ARRAY[{}]", placeholders.join(", ")));
|
||||
}
|
||||
if a.query.is_some() {
|
||||
conditions.push(format!(
|
||||
"(name ILIKE ${i} ESCAPE '\\' OR namespace ILIKE ${i} ESCAPE '\\' OR kind ILIKE ${i} ESCAPE '\\' OR metadata::text ILIKE ${i} ESCAPE '\\' OR EXISTS (SELECT 1 FROM unnest(tags) t WHERE t ILIKE ${i} ESCAPE '\\'))",
|
||||
i = idx
|
||||
));
|
||||
idx += 1;
|
||||
}
|
||||
|
||||
let where_clause = if conditions.is_empty() {
|
||||
String::new()
|
||||
} else {
|
||||
format!("WHERE {}", conditions.join(" AND "))
|
||||
};
|
||||
|
||||
let order = match a.sort {
|
||||
"updated" => "updated_at DESC",
|
||||
"created" => "created_at DESC",
|
||||
_ => "namespace, kind, name",
|
||||
};
|
||||
|
||||
let sql = format!(
|
||||
"SELECT * FROM entries {} ORDER BY {} LIMIT ${} OFFSET ${}",
|
||||
where_clause,
|
||||
order,
|
||||
idx,
|
||||
idx + 1
|
||||
);
|
||||
|
||||
tracing::debug!(sql, "executing search query");
|
||||
|
||||
let mut q = sqlx::query_as::<_, Entry>(&sql);
|
||||
if let Some(v) = a.namespace {
|
||||
q = q.bind(v);
|
||||
}
|
||||
if let Some(v) = a.kind {
|
||||
q = q.bind(v);
|
||||
}
|
||||
if let Some(v) = a.name {
|
||||
q = q.bind(v);
|
||||
}
|
||||
for v in a.tags {
|
||||
q = q.bind(v.as_str());
|
||||
}
|
||||
if let Some(v) = a.query {
|
||||
q = q.bind(format!(
|
||||
"%{}%",
|
||||
v.replace('\\', "\\\\")
|
||||
.replace('%', "\\%")
|
||||
.replace('_', "\\_")
|
||||
));
|
||||
}
|
||||
q = q.bind(a.limit as i64).bind(a.offset as i64);
|
||||
|
||||
Ok(q.fetch_all(pool).await?)
|
||||
}
|
||||
|
||||
// ── Secret schema fetching (no master key) ───────────────────────────────────
|
||||
|
||||
/// Fetch secret field names for a set of entry ids.
|
||||
/// Returns a map from entry_id to list of SecretField.
|
||||
async fn fetch_secret_schemas(
|
||||
pool: &PgPool,
|
||||
entry_ids: &[uuid::Uuid],
|
||||
) -> Result<HashMap<uuid::Uuid, Vec<SecretField>>> {
|
||||
if entry_ids.is_empty() {
|
||||
return Ok(HashMap::new());
|
||||
}
|
||||
|
||||
let fields: Vec<SecretField> = sqlx::query_as(
|
||||
"SELECT * FROM secrets WHERE entry_id = ANY($1) ORDER BY entry_id, field_name",
|
||||
)
|
||||
.bind(entry_ids)
|
||||
.fetch_all(pool)
|
||||
.await?;
|
||||
|
||||
let mut map: HashMap<uuid::Uuid, Vec<SecretField>> = HashMap::new();
|
||||
for f in fields {
|
||||
map.entry(f.entry_id).or_default().push(f);
|
||||
}
|
||||
Ok(map)
|
||||
}
|
||||
|
||||
/// Fetch all secret fields (including encrypted bytes) for a set of entry ids.
|
||||
pub async fn fetch_secrets_for_entries(
|
||||
pool: &PgPool,
|
||||
entry_ids: &[uuid::Uuid],
|
||||
) -> Result<HashMap<uuid::Uuid, Vec<SecretField>>> {
|
||||
if entry_ids.is_empty() {
|
||||
return Ok(HashMap::new());
|
||||
}
|
||||
|
||||
let fields: Vec<SecretField> = sqlx::query_as(
|
||||
"SELECT * FROM secrets WHERE entry_id = ANY($1) ORDER BY entry_id, field_name",
|
||||
)
|
||||
.bind(entry_ids)
|
||||
.fetch_all(pool)
|
||||
.await?;
|
||||
|
||||
let mut map: HashMap<uuid::Uuid, Vec<SecretField>> = HashMap::new();
|
||||
for f in fields {
|
||||
map.entry(f.entry_id).or_default().push(f);
|
||||
}
|
||||
Ok(map)
|
||||
}
|
||||
|
||||
// ── Display helpers ───────────────────────────────────────────────────────────
|
||||
|
||||
fn env_prefix(entry: &Entry, prefix: &str) -> String {
|
||||
let name_part = entry.name.to_uppercase().replace(['-', '.', ' '], "_");
|
||||
if prefix.is_empty() {
|
||||
name_part
|
||||
} else {
|
||||
format!(
|
||||
"{}_{}",
|
||||
prefix.to_uppercase().replace(['-', '.', ' '], "_"),
|
||||
name_part
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
/// Build a flat KEY=VALUE map from decrypted secret fields only.
|
||||
/// Resolves key_ref: if metadata.key_ref is set, merges secret fields from that key entry.
|
||||
pub async fn build_injected_env_map(
|
||||
pool: &PgPool,
|
||||
entry: &Entry,
|
||||
prefix: &str,
|
||||
master_key: &[u8; 32],
|
||||
fields: &[SecretField],
|
||||
) -> Result<HashMap<String, String>> {
|
||||
let effective_prefix = env_prefix(entry, prefix);
|
||||
let mut map = HashMap::new();
|
||||
|
||||
// Decrypt each secret field and add to env map.
|
||||
for f in fields {
|
||||
let decrypted = crypto::decrypt_json(master_key, &f.encrypted)?;
|
||||
let key = format!(
|
||||
"{}_{}",
|
||||
effective_prefix,
|
||||
f.field_name.to_uppercase().replace(['-', '.'], "_")
|
||||
);
|
||||
map.insert(key, json_value_to_env_string(&decrypted));
|
||||
}
|
||||
|
||||
// Resolve key_ref: merge secrets from the referenced key entry.
|
||||
if let Some(key_ref) = entry.metadata.get("key_ref").and_then(|v| v.as_str()) {
|
||||
let key_entries = fetch_entries(
|
||||
pool,
|
||||
Some(&entry.namespace),
|
||||
Some("key"),
|
||||
Some(key_ref),
|
||||
&[],
|
||||
None,
|
||||
)
|
||||
.await?;
|
||||
|
||||
if let Some(key_entry) = key_entries.first() {
|
||||
let key_ids = vec![key_entry.id];
|
||||
let key_fields_map = fetch_secrets_for_entries(pool, &key_ids).await?;
|
||||
let empty = vec![];
|
||||
let key_fields = key_fields_map.get(&key_entry.id).unwrap_or(&empty);
|
||||
|
||||
let key_prefix = env_prefix(key_entry, prefix);
|
||||
for f in key_fields {
|
||||
let decrypted = crypto::decrypt_json(master_key, &f.encrypted)?;
|
||||
let key_var = format!(
|
||||
"{}_{}",
|
||||
key_prefix,
|
||||
f.field_name.to_uppercase().replace(['-', '.'], "_")
|
||||
);
|
||||
map.insert(key_var, json_value_to_env_string(&decrypted));
|
||||
}
|
||||
} else {
|
||||
tracing::warn!(key_ref, "key_ref target not found");
|
||||
}
|
||||
}
|
||||
|
||||
Ok(map)
|
||||
}
|
||||
|
||||
fn json_value_to_env_string(v: &Value) -> String {
|
||||
match v {
|
||||
Value::String(s) => s.clone(),
|
||||
Value::Null => String::new(),
|
||||
other => other.to_string(),
|
||||
}
|
||||
}
|
||||
|
||||
fn to_json(entry: &Entry, summary: bool, schema: Option<&[SecretField]>) -> Value {
|
||||
if summary {
|
||||
let desc = entry
|
||||
.metadata
|
||||
.get("desc")
|
||||
.or_else(|| entry.metadata.get("url"))
|
||||
.and_then(|v| v.as_str())
|
||||
.unwrap_or("")
|
||||
.to_string();
|
||||
return json!({
|
||||
"namespace": entry.namespace,
|
||||
"kind": entry.kind,
|
||||
"name": entry.name,
|
||||
"tags": entry.tags,
|
||||
"desc": desc,
|
||||
"updated_at": entry.updated_at.format("%Y-%m-%dT%H:%M:%SZ").to_string(),
|
||||
});
|
||||
}
|
||||
|
||||
let secrets_val: Value = match schema {
|
||||
Some(fields) if !fields.is_empty() => {
|
||||
let schema_arr: Vec<Value> = fields
|
||||
.iter()
|
||||
.map(|f| {
|
||||
json!({
|
||||
"field_name": f.field_name,
|
||||
})
|
||||
})
|
||||
.collect();
|
||||
Value::Array(schema_arr)
|
||||
}
|
||||
_ => Value::Array(vec![]),
|
||||
};
|
||||
|
||||
json!({
|
||||
"id": entry.id,
|
||||
"namespace": entry.namespace,
|
||||
"kind": entry.kind,
|
||||
"name": entry.name,
|
||||
"tags": entry.tags,
|
||||
"metadata": entry.metadata,
|
||||
"secrets": secrets_val,
|
||||
"version": entry.version,
|
||||
"created_at": entry.created_at.format("%Y-%m-%dT%H:%M:%SZ").to_string(),
|
||||
"updated_at": entry.updated_at.format("%Y-%m-%dT%H:%M:%SZ").to_string(),
|
||||
})
|
||||
}
|
||||
|
||||
fn print_text(entry: &Entry, summary: bool, schema: Option<&[SecretField]>) -> Result<()> {
|
||||
println!("[{}/{}] {}", entry.namespace, entry.kind, entry.name);
|
||||
if summary {
|
||||
let desc = entry
|
||||
.metadata
|
||||
.get("desc")
|
||||
.or_else(|| entry.metadata.get("url"))
|
||||
.and_then(|v| v.as_str())
|
||||
.unwrap_or("-");
|
||||
if !entry.tags.is_empty() {
|
||||
println!(" tags: [{}]", entry.tags.join(", "));
|
||||
}
|
||||
println!(" desc: {}", desc);
|
||||
println!(" updated: {}", format_local_time(entry.updated_at));
|
||||
} else {
|
||||
println!(" id: {}", entry.id);
|
||||
if !entry.tags.is_empty() {
|
||||
println!(" tags: [{}]", entry.tags.join(", "));
|
||||
}
|
||||
if entry.metadata.as_object().is_some_and(|m| !m.is_empty()) {
|
||||
println!(
|
||||
" metadata: {}",
|
||||
serde_json::to_string_pretty(&entry.metadata)?
|
||||
);
|
||||
}
|
||||
match schema {
|
||||
Some(fields) if !fields.is_empty() => {
|
||||
let schema_str: Vec<String> = fields.iter().map(|f| f.field_name.clone()).collect();
|
||||
println!(" secrets: {}", schema_str.join(", "));
|
||||
println!(" (use `secrets inject` or `secrets run` to get values)");
|
||||
}
|
||||
_ => {}
|
||||
}
|
||||
println!(" version: {}", entry.version);
|
||||
println!(" created: {}", format_local_time(entry.created_at));
|
||||
}
|
||||
println!();
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Extract one or more metadata field paths like `metadata.url`.
|
||||
fn print_fields(rows: &[Entry], fields: &[String]) -> Result<()> {
|
||||
for row in rows {
|
||||
for field in fields {
|
||||
let val = extract_field(row, field)?;
|
||||
println!("{}", val);
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn extract_field(entry: &Entry, field: &str) -> Result<String> {
|
||||
let (section, key) = field
|
||||
.split_once('.')
|
||||
.ok_or_else(|| anyhow::anyhow!("Invalid field path '{}'. Use metadata.<key>.", field))?;
|
||||
|
||||
let obj = match section {
|
||||
"metadata" | "meta" => &entry.metadata,
|
||||
other => anyhow::bail!("Unknown field section '{}'. Use 'metadata'.", other),
|
||||
};
|
||||
|
||||
obj.get(key)
|
||||
.and_then(|v| {
|
||||
v.as_str()
|
||||
.map(|s| s.to_string())
|
||||
.or_else(|| Some(v.to_string()))
|
||||
})
|
||||
.ok_or_else(|| {
|
||||
anyhow::anyhow!(
|
||||
"Field '{}' not found in record [{}/{}/{}]",
|
||||
field,
|
||||
entry.namespace,
|
||||
entry.kind,
|
||||
entry.name
|
||||
)
|
||||
})
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use chrono::Utc;
|
||||
use serde_json::json;
|
||||
use uuid::Uuid;
|
||||
|
||||
fn sample_entry() -> Entry {
|
||||
Entry {
|
||||
id: Uuid::nil(),
|
||||
namespace: "refining".to_string(),
|
||||
kind: "service".to_string(),
|
||||
name: "gitea.main".to_string(),
|
||||
tags: vec!["prod".to_string()],
|
||||
metadata: json!({"url": "https://code.example.com", "enabled": true}),
|
||||
version: 1,
|
||||
created_at: Utc::now(),
|
||||
updated_at: Utc::now(),
|
||||
}
|
||||
}
|
||||
|
||||
fn sample_fields() -> Vec<SecretField> {
|
||||
let key = [0x42u8; 32];
|
||||
let enc = crypto::encrypt_json(&key, &json!("abc123")).unwrap();
|
||||
vec![SecretField {
|
||||
id: Uuid::nil(),
|
||||
entry_id: Uuid::nil(),
|
||||
field_name: "token".to_string(),
|
||||
encrypted: enc,
|
||||
version: 1,
|
||||
created_at: Utc::now(),
|
||||
updated_at: Utc::now(),
|
||||
}]
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn rejects_secret_field_extraction() {
|
||||
let fields = vec!["secret.token".to_string()];
|
||||
let err = validate_safe_search_args(&fields).unwrap_err();
|
||||
assert!(err.to_string().contains("sensitive"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn to_json_full_includes_secrets_schema() {
|
||||
let entry = sample_entry();
|
||||
let fields = sample_fields();
|
||||
let v = to_json(&entry, false, Some(&fields));
|
||||
|
||||
let secrets = v.get("secrets").unwrap().as_array().unwrap();
|
||||
assert_eq!(secrets.len(), 1);
|
||||
assert_eq!(secrets[0]["field_name"], "token");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn to_json_summary_omits_secrets_schema() {
|
||||
let entry = sample_entry();
|
||||
let fields = sample_fields();
|
||||
let v = to_json(&entry, true, Some(&fields));
|
||||
assert!(v.get("secrets").is_none());
|
||||
}
|
||||
}
|
||||
@@ -1,293 +0,0 @@
|
||||
use anyhow::Result;
|
||||
use serde_json::{Map, Value, json};
|
||||
use sqlx::PgPool;
|
||||
use uuid::Uuid;
|
||||
|
||||
use super::add::{
|
||||
collect_field_paths, collect_key_paths, flatten_json_fields, insert_path, parse_key_path,
|
||||
parse_kv, remove_path,
|
||||
};
|
||||
use crate::crypto;
|
||||
use crate::db;
|
||||
use crate::models::EntryRow;
|
||||
use crate::output::{OutputMode, print_json};
|
||||
|
||||
pub struct UpdateArgs<'a> {
|
||||
pub namespace: &'a str,
|
||||
pub kind: &'a str,
|
||||
pub name: &'a str,
|
||||
pub add_tags: &'a [String],
|
||||
pub remove_tags: &'a [String],
|
||||
pub meta_entries: &'a [String],
|
||||
pub remove_meta: &'a [String],
|
||||
pub secret_entries: &'a [String],
|
||||
pub remove_secrets: &'a [String],
|
||||
pub output: OutputMode,
|
||||
}
|
||||
|
||||
pub async fn run(pool: &PgPool, args: UpdateArgs<'_>, master_key: &[u8; 32]) -> Result<()> {
|
||||
let mut tx = pool.begin().await?;
|
||||
|
||||
let row: Option<EntryRow> = sqlx::query_as(
|
||||
"SELECT id, version, tags, metadata \
|
||||
FROM entries \
|
||||
WHERE namespace = $1 AND kind = $2 AND name = $3 \
|
||||
FOR UPDATE",
|
||||
)
|
||||
.bind(args.namespace)
|
||||
.bind(args.kind)
|
||||
.bind(args.name)
|
||||
.fetch_optional(&mut *tx)
|
||||
.await?;
|
||||
|
||||
let row = row.ok_or_else(|| {
|
||||
anyhow::anyhow!(
|
||||
"Not found: [{}/{}] {}. Use `add` to create it first.",
|
||||
args.namespace,
|
||||
args.kind,
|
||||
args.name
|
||||
)
|
||||
})?;
|
||||
|
||||
// Snapshot current entry state before modifying.
|
||||
if let Err(e) = db::snapshot_entry_history(
|
||||
&mut tx,
|
||||
db::EntrySnapshotParams {
|
||||
entry_id: row.id,
|
||||
namespace: args.namespace,
|
||||
kind: args.kind,
|
||||
name: args.name,
|
||||
version: row.version,
|
||||
action: "update",
|
||||
tags: &row.tags,
|
||||
metadata: &row.metadata,
|
||||
},
|
||||
)
|
||||
.await
|
||||
{
|
||||
tracing::warn!(error = %e, "failed to snapshot entry history before update");
|
||||
}
|
||||
|
||||
// ── Merge tags ────────────────────────────────────────────────────────────
|
||||
let mut tags: Vec<String> = row.tags;
|
||||
for t in args.add_tags {
|
||||
if !tags.contains(t) {
|
||||
tags.push(t.clone());
|
||||
}
|
||||
}
|
||||
tags.retain(|t| !args.remove_tags.contains(t));
|
||||
|
||||
// ── Merge metadata ────────────────────────────────────────────────────────
|
||||
let mut meta_map: Map<String, Value> = match row.metadata {
|
||||
Value::Object(m) => m,
|
||||
_ => Map::new(),
|
||||
};
|
||||
for entry in args.meta_entries {
|
||||
let (path, value) = parse_kv(entry)?;
|
||||
insert_path(&mut meta_map, &path, value)?;
|
||||
}
|
||||
for key in args.remove_meta {
|
||||
let path = parse_key_path(key)?;
|
||||
remove_path(&mut meta_map, &path)?;
|
||||
}
|
||||
let metadata = Value::Object(meta_map);
|
||||
|
||||
// CAS update of the entry row.
|
||||
let result = sqlx::query(
|
||||
"UPDATE entries \
|
||||
SET tags = $1, metadata = $2, version = version + 1, updated_at = NOW() \
|
||||
WHERE id = $3 AND version = $4",
|
||||
)
|
||||
.bind(&tags)
|
||||
.bind(&metadata)
|
||||
.bind(row.id)
|
||||
.bind(row.version)
|
||||
.execute(&mut *tx)
|
||||
.await?;
|
||||
|
||||
if result.rows_affected() == 0 {
|
||||
tx.rollback().await?;
|
||||
anyhow::bail!(
|
||||
"Concurrent modification detected for [{}/{}] {}. Please retry.",
|
||||
args.namespace,
|
||||
args.kind,
|
||||
args.name
|
||||
);
|
||||
}
|
||||
|
||||
let new_version = row.version + 1;
|
||||
|
||||
// ── Update secret fields ──────────────────────────────────────────────────
|
||||
for entry in args.secret_entries {
|
||||
let (path, field_value) = parse_kv(entry)?;
|
||||
|
||||
// For nested paths (e.g. credentials:type), flatten into dot-separated names
|
||||
// and treat the sub-value as the individual field to store.
|
||||
let flat = flatten_json_fields("", &{
|
||||
let mut m = Map::new();
|
||||
insert_path(&mut m, &path, field_value)?;
|
||||
Value::Object(m)
|
||||
});
|
||||
|
||||
for (field_name, fv) in &flat {
|
||||
let encrypted = crypto::encrypt_json(master_key, fv)?;
|
||||
|
||||
// Snapshot existing field before replacing.
|
||||
#[derive(sqlx::FromRow)]
|
||||
struct ExistingField {
|
||||
id: Uuid,
|
||||
encrypted: Vec<u8>,
|
||||
}
|
||||
let existing_field: Option<ExistingField> = sqlx::query_as(
|
||||
"SELECT id, encrypted \
|
||||
FROM secrets WHERE entry_id = $1 AND field_name = $2",
|
||||
)
|
||||
.bind(row.id)
|
||||
.bind(field_name)
|
||||
.fetch_optional(&mut *tx)
|
||||
.await?;
|
||||
|
||||
if let Some(ef) = &existing_field
|
||||
&& let Err(e) = db::snapshot_secret_history(
|
||||
&mut tx,
|
||||
db::SecretSnapshotParams {
|
||||
entry_id: row.id,
|
||||
secret_id: ef.id,
|
||||
entry_version: row.version,
|
||||
field_name,
|
||||
encrypted: &ef.encrypted,
|
||||
action: "update",
|
||||
},
|
||||
)
|
||||
.await
|
||||
{
|
||||
tracing::warn!(error = %e, "failed to snapshot secret field history");
|
||||
}
|
||||
|
||||
sqlx::query(
|
||||
"INSERT INTO secrets (entry_id, field_name, encrypted) \
|
||||
VALUES ($1, $2, $3) \
|
||||
ON CONFLICT (entry_id, field_name) DO UPDATE SET \
|
||||
encrypted = EXCLUDED.encrypted, \
|
||||
version = secrets.version + 1, \
|
||||
updated_at = NOW()",
|
||||
)
|
||||
.bind(row.id)
|
||||
.bind(field_name)
|
||||
.bind(&encrypted)
|
||||
.execute(&mut *tx)
|
||||
.await?;
|
||||
}
|
||||
}
|
||||
|
||||
// ── Remove secret fields ──────────────────────────────────────────────────
|
||||
for key in args.remove_secrets {
|
||||
let path = parse_key_path(key)?;
|
||||
// Dot-join the path to match flattened field_name storage.
|
||||
let field_name = path.join(".");
|
||||
|
||||
// Snapshot before delete.
|
||||
#[derive(sqlx::FromRow)]
|
||||
struct FieldToDelete {
|
||||
id: Uuid,
|
||||
encrypted: Vec<u8>,
|
||||
}
|
||||
let field: Option<FieldToDelete> = sqlx::query_as(
|
||||
"SELECT id, encrypted \
|
||||
FROM secrets WHERE entry_id = $1 AND field_name = $2",
|
||||
)
|
||||
.bind(row.id)
|
||||
.bind(&field_name)
|
||||
.fetch_optional(&mut *tx)
|
||||
.await?;
|
||||
|
||||
if let Some(f) = field {
|
||||
if let Err(e) = db::snapshot_secret_history(
|
||||
&mut tx,
|
||||
db::SecretSnapshotParams {
|
||||
entry_id: row.id,
|
||||
secret_id: f.id,
|
||||
entry_version: new_version,
|
||||
field_name: &field_name,
|
||||
encrypted: &f.encrypted,
|
||||
action: "delete",
|
||||
},
|
||||
)
|
||||
.await
|
||||
{
|
||||
tracing::warn!(error = %e, "failed to snapshot secret field history before delete");
|
||||
}
|
||||
|
||||
sqlx::query("DELETE FROM secrets WHERE id = $1")
|
||||
.bind(f.id)
|
||||
.execute(&mut *tx)
|
||||
.await?;
|
||||
}
|
||||
}
|
||||
|
||||
let meta_keys = collect_key_paths(args.meta_entries)?;
|
||||
let remove_meta_keys = collect_field_paths(args.remove_meta)?;
|
||||
let secret_keys = collect_key_paths(args.secret_entries)?;
|
||||
let remove_secret_keys = collect_field_paths(args.remove_secrets)?;
|
||||
|
||||
crate::audit::log_tx(
|
||||
&mut tx,
|
||||
"update",
|
||||
args.namespace,
|
||||
args.kind,
|
||||
args.name,
|
||||
json!({
|
||||
"add_tags": args.add_tags,
|
||||
"remove_tags": args.remove_tags,
|
||||
"meta_keys": meta_keys,
|
||||
"remove_meta": remove_meta_keys,
|
||||
"secret_keys": secret_keys,
|
||||
"remove_secrets": remove_secret_keys,
|
||||
}),
|
||||
)
|
||||
.await;
|
||||
|
||||
tx.commit().await?;
|
||||
|
||||
let result_json = json!({
|
||||
"action": "updated",
|
||||
"namespace": args.namespace,
|
||||
"kind": args.kind,
|
||||
"name": args.name,
|
||||
"add_tags": args.add_tags,
|
||||
"remove_tags": args.remove_tags,
|
||||
"meta_keys": meta_keys,
|
||||
"remove_meta": remove_meta_keys,
|
||||
"secret_keys": secret_keys,
|
||||
"remove_secrets": remove_secret_keys,
|
||||
});
|
||||
|
||||
match args.output {
|
||||
OutputMode::Json | OutputMode::JsonCompact => {
|
||||
print_json(&result_json, &args.output)?;
|
||||
}
|
||||
_ => {
|
||||
println!("Updated: [{}/{}] {}", args.namespace, args.kind, args.name);
|
||||
if !args.add_tags.is_empty() {
|
||||
println!(" +tags: {}", args.add_tags.join(", "));
|
||||
}
|
||||
if !args.remove_tags.is_empty() {
|
||||
println!(" -tags: {}", args.remove_tags.join(", "));
|
||||
}
|
||||
if !args.meta_entries.is_empty() {
|
||||
println!(" +metadata: {}", meta_keys.join(", "));
|
||||
}
|
||||
if !args.remove_meta.is_empty() {
|
||||
println!(" -metadata: {}", remove_meta_keys.join(", "));
|
||||
}
|
||||
if !args.secret_entries.is_empty() {
|
||||
println!(" +secrets: {}", secret_keys.join(", "));
|
||||
}
|
||||
if !args.remove_secrets.is_empty() {
|
||||
println!(" -secrets: {}", remove_secret_keys.join(", "));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
@@ -1,411 +0,0 @@
|
||||
use anyhow::{Context, Result, bail};
|
||||
use flate2::read::GzDecoder;
|
||||
use serde::Deserialize;
|
||||
use sha2::{Digest, Sha256};
|
||||
use std::io::{Cursor, Read, Write};
|
||||
use std::time::Duration;
|
||||
|
||||
const CURRENT_VERSION: &str = env!("CARGO_PKG_VERSION");
|
||||
|
||||
/// Build-time config via `option_env!("SECRETS_UPGRADE_URL")`. Set during `cargo build`, e.g.:
|
||||
/// SECRETS_UPGRADE_URL=https://... cargo build --release
|
||||
const BUILD_UPGRADE_URL: Option<&'static str> = option_env!("SECRETS_UPGRADE_URL");
|
||||
|
||||
fn upgrade_api_url() -> Result<String> {
|
||||
if let Some(url) = BUILD_UPGRADE_URL.filter(|s| !s.trim().is_empty()) {
|
||||
return Ok(url.to_string());
|
||||
}
|
||||
let url = std::env::var("SECRETS_UPGRADE_URL").context(
|
||||
"SECRETS_UPGRADE_URL is not set at build or runtime. Set it when building: \
|
||||
SECRETS_UPGRADE_URL=https://... cargo build, or export before running secrets upgrade.",
|
||||
)?;
|
||||
if url.trim().is_empty() {
|
||||
anyhow::bail!("SECRETS_UPGRADE_URL is empty.");
|
||||
}
|
||||
Ok(url)
|
||||
}
|
||||
|
||||
#[derive(Debug, Deserialize)]
|
||||
struct Release {
|
||||
tag_name: String,
|
||||
assets: Vec<Asset>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Deserialize)]
|
||||
struct Asset {
|
||||
name: String,
|
||||
browser_download_url: String,
|
||||
}
|
||||
|
||||
fn available_assets(assets: &[Asset]) -> String {
|
||||
assets
|
||||
.iter()
|
||||
.map(|a| a.name.as_str())
|
||||
.collect::<Vec<_>>()
|
||||
.join(", ")
|
||||
}
|
||||
|
||||
fn release_asset_name(tag_name: &str, suffix: &str) -> String {
|
||||
format!("secrets-{tag_name}-{suffix}")
|
||||
}
|
||||
|
||||
fn find_asset_by_name<'a>(assets: &'a [Asset], name: &str) -> Result<&'a Asset> {
|
||||
assets.iter().find(|a| a.name == name).with_context(|| {
|
||||
format!(
|
||||
"no matching release asset found: {name}\navailable: {}",
|
||||
available_assets(assets)
|
||||
)
|
||||
})
|
||||
}
|
||||
|
||||
/// Detect the asset suffix for the current platform/arch at compile time.
|
||||
fn platform_asset_suffix() -> Result<&'static str> {
|
||||
#[cfg(all(target_os = "linux", target_arch = "x86_64"))]
|
||||
{
|
||||
Ok("x86_64-linux-musl.tar.gz")
|
||||
}
|
||||
|
||||
#[cfg(all(target_os = "macos", target_arch = "aarch64"))]
|
||||
{
|
||||
Ok("aarch64-macos.tar.gz")
|
||||
}
|
||||
|
||||
#[cfg(all(target_os = "macos", target_arch = "x86_64"))]
|
||||
{
|
||||
Ok("x86_64-macos.tar.gz")
|
||||
}
|
||||
|
||||
#[cfg(all(target_os = "windows", target_arch = "x86_64"))]
|
||||
{
|
||||
Ok("x86_64-windows.zip")
|
||||
}
|
||||
|
||||
#[cfg(not(any(
|
||||
all(target_os = "linux", target_arch = "x86_64"),
|
||||
all(target_os = "macos", target_arch = "aarch64"),
|
||||
all(target_os = "macos", target_arch = "x86_64"),
|
||||
all(target_os = "windows", target_arch = "x86_64"),
|
||||
)))]
|
||||
bail!(
|
||||
"Unsupported platform: {}/{}",
|
||||
std::env::consts::OS,
|
||||
std::env::consts::ARCH
|
||||
)
|
||||
}
|
||||
|
||||
/// Strip the "secrets-" prefix from the tag and parse as semver.
|
||||
fn parse_tag_version(tag: &str) -> Result<semver::Version> {
|
||||
let ver_str = tag
|
||||
.strip_prefix("secrets-")
|
||||
.with_context(|| format!("unexpected tag format: {tag}"))?;
|
||||
semver::Version::parse(ver_str)
|
||||
.with_context(|| format!("failed to parse version from tag: {tag}"))
|
||||
}
|
||||
|
||||
fn sha256_hex(bytes: &[u8]) -> String {
|
||||
let digest = Sha256::digest(bytes);
|
||||
format!("{digest:x}")
|
||||
}
|
||||
|
||||
fn verify_checksum(asset_name: &str, archive: &[u8], checksum_contents: &str) -> Result<String> {
|
||||
let expected_checksum = parse_checksum_file(checksum_contents)?;
|
||||
let actual_checksum = sha256_hex(archive);
|
||||
|
||||
if actual_checksum != expected_checksum {
|
||||
bail!(
|
||||
"checksum verification failed for {}: expected {}, got {}",
|
||||
asset_name,
|
||||
expected_checksum,
|
||||
actual_checksum
|
||||
);
|
||||
}
|
||||
|
||||
Ok(actual_checksum)
|
||||
}
|
||||
|
||||
fn parse_checksum_file(contents: &str) -> Result<String> {
|
||||
let checksum = contents
|
||||
.split_whitespace()
|
||||
.next()
|
||||
.context("checksum file is empty")?
|
||||
.trim()
|
||||
.to_ascii_lowercase();
|
||||
|
||||
if checksum.len() != 64 || !checksum.bytes().all(|b| b.is_ascii_hexdigit()) {
|
||||
bail!("invalid SHA-256 checksum format")
|
||||
}
|
||||
|
||||
Ok(checksum)
|
||||
}
|
||||
|
||||
async fn download_bytes(client: &reqwest::Client, url: &str, context: &str) -> Result<Vec<u8>> {
|
||||
Ok(client
|
||||
.get(url)
|
||||
.send()
|
||||
.await
|
||||
.with_context(|| format!("{context}: request failed"))?
|
||||
.error_for_status()
|
||||
.with_context(|| format!("{context}: server returned an error"))?
|
||||
.bytes()
|
||||
.await
|
||||
.with_context(|| format!("{context}: failed to read response body"))?
|
||||
.to_vec())
|
||||
}
|
||||
|
||||
/// Extract the binary from a tar.gz archive (first file whose name == "secrets").
|
||||
fn extract_from_targz(bytes: &[u8]) -> Result<Vec<u8>> {
|
||||
let gz = GzDecoder::new(Cursor::new(bytes));
|
||||
let mut archive = tar::Archive::new(gz);
|
||||
for entry in archive.entries().context("failed to read tar entries")? {
|
||||
let mut entry = entry.context("bad tar entry")?;
|
||||
let path = entry.path().context("bad tar entry path")?.into_owned();
|
||||
let fname = path
|
||||
.file_name()
|
||||
.and_then(|n| n.to_str())
|
||||
.unwrap_or_default();
|
||||
if fname == "secrets" || fname == "secrets.exe" {
|
||||
let mut buf = Vec::new();
|
||||
entry.read_to_end(&mut buf).context("read tar entry")?;
|
||||
return Ok(buf);
|
||||
}
|
||||
}
|
||||
bail!("binary not found inside tar.gz archive")
|
||||
}
|
||||
|
||||
/// Extract the binary from a zip archive (first file whose name matches).
|
||||
#[cfg(target_os = "windows")]
|
||||
fn extract_from_zip(bytes: &[u8]) -> Result<Vec<u8>> {
|
||||
let reader = Cursor::new(bytes);
|
||||
let mut archive = zip::ZipArchive::new(reader).context("failed to open zip archive")?;
|
||||
for i in 0..archive.len() {
|
||||
let mut file = archive.by_index(i).context("bad zip entry")?;
|
||||
let fname = file.name().to_owned();
|
||||
if fname.ends_with("secrets.exe") || fname.ends_with("secrets") {
|
||||
let mut buf = Vec::new();
|
||||
file.read_to_end(&mut buf).context("read zip entry")?;
|
||||
return Ok(buf);
|
||||
}
|
||||
}
|
||||
bail!("binary not found inside zip archive")
|
||||
}
|
||||
|
||||
pub async fn run(check_only: bool) -> Result<()> {
|
||||
let current = semver::Version::parse(CURRENT_VERSION).context("invalid current version")?;
|
||||
|
||||
println!("Current version: v{current}");
|
||||
println!("Checking for updates...");
|
||||
|
||||
let client = reqwest::Client::builder()
|
||||
.user_agent(format!("secrets-cli/{CURRENT_VERSION}"))
|
||||
.connect_timeout(Duration::from_secs(10))
|
||||
.timeout(Duration::from_secs(120))
|
||||
.build()
|
||||
.context("failed to build HTTP client")?;
|
||||
|
||||
let api_url = upgrade_api_url()?;
|
||||
let release: Release = client
|
||||
.get(&api_url)
|
||||
.send()
|
||||
.await
|
||||
.context("failed to fetch release info")?
|
||||
.error_for_status()
|
||||
.context("release API returned an error")?
|
||||
.json()
|
||||
.await
|
||||
.context("failed to parse release JSON")?;
|
||||
|
||||
let latest = parse_tag_version(&release.tag_name)?;
|
||||
|
||||
if latest <= current {
|
||||
println!("Already up to date (v{current})");
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
println!("New version available: v{latest}");
|
||||
|
||||
if check_only {
|
||||
println!("Run `secrets upgrade` to update.");
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
let suffix = platform_asset_suffix()?;
|
||||
let asset_name = release_asset_name(&release.tag_name, suffix);
|
||||
let asset = find_asset_by_name(&release.assets, &asset_name)?;
|
||||
let checksum_name = format!("{}.sha256", asset.name);
|
||||
let checksum_asset = find_asset_by_name(&release.assets, &checksum_name)?;
|
||||
|
||||
println!("Downloading {}...", asset.name);
|
||||
|
||||
let archive = download_bytes(&client, &asset.browser_download_url, "archive download").await?;
|
||||
let checksum_contents = download_bytes(
|
||||
&client,
|
||||
&checksum_asset.browser_download_url,
|
||||
"checksum download",
|
||||
)
|
||||
.await?;
|
||||
let actual_checksum = verify_checksum(
|
||||
&asset.name,
|
||||
&archive,
|
||||
std::str::from_utf8(&checksum_contents).context("checksum file is not valid UTF-8")?,
|
||||
)?;
|
||||
|
||||
println!("Verified SHA-256: {actual_checksum}");
|
||||
|
||||
println!("Extracting...");
|
||||
|
||||
let binary = if suffix.ends_with(".tar.gz") {
|
||||
extract_from_targz(&archive)?
|
||||
} else {
|
||||
#[cfg(target_os = "windows")]
|
||||
{
|
||||
extract_from_zip(&archive)?
|
||||
}
|
||||
#[cfg(not(target_os = "windows"))]
|
||||
bail!("zip extraction is only supported on Windows")
|
||||
};
|
||||
|
||||
// Write to a temporary file, set executable permission, then atomically replace.
|
||||
let mut tmp = tempfile::NamedTempFile::new().context("failed to create temp file")?;
|
||||
tmp.write_all(&binary)
|
||||
.context("failed to write temp binary")?;
|
||||
|
||||
#[cfg(unix)]
|
||||
{
|
||||
use std::os::unix::fs::PermissionsExt;
|
||||
let perms = std::fs::Permissions::from_mode(0o755);
|
||||
std::fs::set_permissions(tmp.path(), perms).context("failed to chmod temp binary")?;
|
||||
}
|
||||
|
||||
self_replace::self_replace(tmp.path()).context("failed to replace current binary")?;
|
||||
|
||||
println!("Updated: v{current} → v{latest}");
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use flate2::Compression;
|
||||
use flate2::write::GzEncoder;
|
||||
use tar::Builder;
|
||||
|
||||
#[test]
|
||||
fn parse_tag_version_accepts_release_tag() {
|
||||
let version = parse_tag_version("secrets-0.6.1").expect("version should parse");
|
||||
assert_eq!(version, semver::Version::new(0, 6, 1));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn parse_tag_version_rejects_invalid_tag() {
|
||||
let err = parse_tag_version("v0.6.1").expect_err("tag should be rejected");
|
||||
assert!(err.to_string().contains("unexpected tag format"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn parse_checksum_file_accepts_sha256sum_format() {
|
||||
let checksum = parse_checksum_file(
|
||||
"0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef secrets.tar.gz",
|
||||
)
|
||||
.expect("checksum should parse");
|
||||
assert_eq!(
|
||||
checksum,
|
||||
"0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef"
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn parse_checksum_file_rejects_invalid_checksum() {
|
||||
let err = parse_checksum_file("not-a-sha256").expect_err("checksum should be rejected");
|
||||
assert!(err.to_string().contains("invalid SHA-256 checksum format"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn release_asset_name_matches_release_tag() {
|
||||
assert_eq!(
|
||||
release_asset_name("secrets-0.7.0", "x86_64-linux-musl.tar.gz"),
|
||||
"secrets-secrets-0.7.0-x86_64-linux-musl.tar.gz"
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn find_asset_by_name_rejects_stale_platform_match() {
|
||||
let assets = vec![
|
||||
Asset {
|
||||
name: "secrets-secrets-0.6.9-x86_64-linux-musl.tar.gz".into(),
|
||||
browser_download_url: "https://example.invalid/old".into(),
|
||||
},
|
||||
Asset {
|
||||
name: "secrets-secrets-0.7.0-aarch64-macos.tar.gz".into(),
|
||||
browser_download_url: "https://example.invalid/other".into(),
|
||||
},
|
||||
];
|
||||
|
||||
let err = find_asset_by_name(&assets, "secrets-secrets-0.7.0-x86_64-linux-musl.tar.gz")
|
||||
.expect_err("stale asset should not match");
|
||||
|
||||
assert!(err.to_string().contains("no matching release asset found"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn sha256_hex_matches_known_value() {
|
||||
assert_eq!(
|
||||
sha256_hex(b"abc"),
|
||||
"ba7816bf8f01cfea414140de5dae2223b00361a396177a9cb410ff61f20015ad"
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn verify_checksum_rejects_mismatch() {
|
||||
let err = verify_checksum(
|
||||
"secrets.tar.gz",
|
||||
b"abc",
|
||||
"0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef secrets.tar.gz",
|
||||
)
|
||||
.expect_err("checksum mismatch should fail");
|
||||
|
||||
assert!(err.to_string().contains("checksum verification failed"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn extract_from_targz_reads_binary() {
|
||||
let payload = b"fake-secrets-binary";
|
||||
let archive = make_test_targz("secrets", payload);
|
||||
let extracted = extract_from_targz(&archive).expect("binary should extract");
|
||||
assert_eq!(extracted, payload);
|
||||
}
|
||||
|
||||
fn make_test_targz(name: &str, payload: &[u8]) -> Vec<u8> {
|
||||
let encoder = GzEncoder::new(Vec::new(), Compression::default());
|
||||
let mut builder = Builder::new(encoder);
|
||||
|
||||
let mut header = tar::Header::new_gnu();
|
||||
header.set_mode(0o755);
|
||||
header.set_size(payload.len() as u64);
|
||||
header.set_cksum();
|
||||
builder
|
||||
.append_data(&mut header, name, payload)
|
||||
.expect("append tar entry");
|
||||
|
||||
let encoder = builder.into_inner().expect("finish tar builder");
|
||||
encoder.finish().expect("finish gzip")
|
||||
}
|
||||
|
||||
#[cfg(target_os = "windows")]
|
||||
#[test]
|
||||
fn extract_from_zip_reads_binary() {
|
||||
use zip::write::SimpleFileOptions;
|
||||
|
||||
let cursor = Cursor::new(Vec::<u8>::new());
|
||||
let mut writer = zip::ZipWriter::new(cursor);
|
||||
writer
|
||||
.start_file("secrets.exe", SimpleFileOptions::default())
|
||||
.expect("start zip file");
|
||||
writer
|
||||
.write_all(b"fake-secrets-binary")
|
||||
.expect("write zip payload");
|
||||
let bytes = writer.finish().expect("finish zip").into_inner();
|
||||
|
||||
let extracted = extract_from_zip(&bytes).expect("binary should extract");
|
||||
assert_eq!(extracted, b"fake-secrets-binary");
|
||||
}
|
||||
}
|
||||
@@ -1,77 +0,0 @@
|
||||
use anyhow::{Context, Result};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::fs;
|
||||
use std::path::PathBuf;
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize, Default)]
|
||||
pub struct Config {
|
||||
pub database_url: Option<String>,
|
||||
}
|
||||
|
||||
pub fn config_dir() -> Result<PathBuf> {
|
||||
let dir = dirs::config_dir()
|
||||
.or_else(|| dirs::home_dir().map(|h| h.join(".config")))
|
||||
.context(
|
||||
"Cannot determine config directory: \
|
||||
neither XDG_CONFIG_HOME nor HOME is set",
|
||||
)?
|
||||
.join("secrets");
|
||||
Ok(dir)
|
||||
}
|
||||
|
||||
pub fn config_path() -> Result<PathBuf> {
|
||||
Ok(config_dir()?.join("config.toml"))
|
||||
}
|
||||
|
||||
pub fn load_config() -> Result<Config> {
|
||||
let path = config_path()?;
|
||||
if !path.exists() {
|
||||
return Ok(Config::default());
|
||||
}
|
||||
let content = fs::read_to_string(&path)
|
||||
.with_context(|| format!("failed to read config file: {}", path.display()))?;
|
||||
let config: Config = toml::from_str(&content)
|
||||
.with_context(|| format!("failed to parse config file: {}", path.display()))?;
|
||||
Ok(config)
|
||||
}
|
||||
|
||||
pub fn save_config(config: &Config) -> Result<()> {
|
||||
let dir = config_dir()?;
|
||||
fs::create_dir_all(&dir)
|
||||
.with_context(|| format!("failed to create config dir: {}", dir.display()))?;
|
||||
|
||||
let path = dir.join("config.toml");
|
||||
let content = toml::to_string_pretty(config).context("failed to serialize config")?;
|
||||
fs::write(&path, &content)
|
||||
.with_context(|| format!("failed to write config file: {}", path.display()))?;
|
||||
|
||||
// Set file permissions to 0600 (owner read/write only)
|
||||
#[cfg(unix)]
|
||||
{
|
||||
use std::os::unix::fs::PermissionsExt;
|
||||
let perms = fs::Permissions::from_mode(0o600);
|
||||
fs::set_permissions(&path, perms)
|
||||
.with_context(|| format!("failed to set file permissions: {}", path.display()))?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Resolve database URL by priority:
|
||||
/// 1. --db-url CLI flag (if non-empty)
|
||||
/// 2. database_url in ~/.config/secrets/config.toml
|
||||
/// 3. Error with setup instructions
|
||||
pub fn resolve_db_url(cli_db_url: &str) -> Result<String> {
|
||||
if !cli_db_url.is_empty() {
|
||||
return Ok(cli_db_url.to_string());
|
||||
}
|
||||
|
||||
let config = load_config()?;
|
||||
if let Some(url) = config.database_url
|
||||
&& !url.is_empty()
|
||||
{
|
||||
return Ok(url);
|
||||
}
|
||||
|
||||
anyhow::bail!("Database not configured. Run:\n\n secrets config set-db <DATABASE_URL>\n")
|
||||
}
|
||||
215
src/db.rs
215
src/db.rs
@@ -1,215 +0,0 @@
|
||||
use anyhow::Result;
|
||||
use serde_json::Value;
|
||||
use sqlx::PgPool;
|
||||
use sqlx::postgres::PgPoolOptions;
|
||||
|
||||
use crate::audit::current_actor;
|
||||
|
||||
pub async fn create_pool(database_url: &str) -> Result<PgPool> {
|
||||
tracing::debug!("connecting to database");
|
||||
let pool = PgPoolOptions::new()
|
||||
.max_connections(5)
|
||||
.acquire_timeout(std::time::Duration::from_secs(5))
|
||||
.connect(database_url)
|
||||
.await?;
|
||||
tracing::debug!("database connection established");
|
||||
Ok(pool)
|
||||
}
|
||||
|
||||
pub async fn migrate(pool: &PgPool) -> Result<()> {
|
||||
tracing::debug!("running migrations");
|
||||
sqlx::raw_sql(
|
||||
r#"
|
||||
-- ── entries: top-level entities (server, service, key, …) ──────────────
|
||||
CREATE TABLE IF NOT EXISTS entries (
|
||||
id UUID PRIMARY KEY DEFAULT uuidv7(),
|
||||
namespace VARCHAR(64) NOT NULL,
|
||||
kind VARCHAR(64) NOT NULL,
|
||||
name VARCHAR(256) NOT NULL,
|
||||
tags TEXT[] NOT NULL DEFAULT '{}',
|
||||
metadata JSONB NOT NULL DEFAULT '{}',
|
||||
version BIGINT NOT NULL DEFAULT 1,
|
||||
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
|
||||
updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
|
||||
UNIQUE(namespace, kind, name)
|
||||
);
|
||||
|
||||
CREATE INDEX IF NOT EXISTS idx_entries_namespace ON entries(namespace);
|
||||
CREATE INDEX IF NOT EXISTS idx_entries_kind ON entries(kind);
|
||||
CREATE INDEX IF NOT EXISTS idx_entries_tags ON entries USING GIN(tags);
|
||||
CREATE INDEX IF NOT EXISTS idx_entries_metadata ON entries USING GIN(metadata jsonb_path_ops);
|
||||
|
||||
-- ── secrets: one row per encrypted field, plaintext schema metadata ────
|
||||
CREATE TABLE IF NOT EXISTS secrets (
|
||||
id UUID PRIMARY KEY DEFAULT uuidv7(),
|
||||
entry_id UUID NOT NULL REFERENCES entries(id) ON DELETE CASCADE,
|
||||
field_name VARCHAR(256) NOT NULL,
|
||||
encrypted BYTEA NOT NULL DEFAULT '\x',
|
||||
version BIGINT NOT NULL DEFAULT 1,
|
||||
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
|
||||
updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
|
||||
UNIQUE(entry_id, field_name)
|
||||
);
|
||||
|
||||
CREATE INDEX IF NOT EXISTS idx_secrets_entry_id ON secrets(entry_id);
|
||||
|
||||
-- ── kv_config: global key-value store (Argon2id salt, etc.) ────────────
|
||||
CREATE TABLE IF NOT EXISTS kv_config (
|
||||
key TEXT PRIMARY KEY,
|
||||
value BYTEA NOT NULL
|
||||
);
|
||||
|
||||
-- ── audit_log: append-only operation log ────────────────────────────────
|
||||
CREATE TABLE IF NOT EXISTS audit_log (
|
||||
id BIGINT GENERATED ALWAYS AS IDENTITY PRIMARY KEY,
|
||||
action VARCHAR(32) NOT NULL,
|
||||
namespace VARCHAR(64) NOT NULL,
|
||||
kind VARCHAR(64) NOT NULL,
|
||||
name VARCHAR(256) NOT NULL,
|
||||
detail JSONB NOT NULL DEFAULT '{}',
|
||||
actor VARCHAR(128) NOT NULL DEFAULT '',
|
||||
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW()
|
||||
);
|
||||
|
||||
CREATE INDEX IF NOT EXISTS idx_audit_log_created ON audit_log(created_at DESC);
|
||||
CREATE INDEX IF NOT EXISTS idx_audit_log_ns_kind ON audit_log(namespace, kind);
|
||||
|
||||
-- ── entries_history: entry-level snapshot (tags + metadata) ─────────────
|
||||
CREATE TABLE IF NOT EXISTS entries_history (
|
||||
id BIGINT GENERATED ALWAYS AS IDENTITY PRIMARY KEY,
|
||||
entry_id UUID NOT NULL,
|
||||
namespace VARCHAR(64) NOT NULL,
|
||||
kind VARCHAR(64) NOT NULL,
|
||||
name VARCHAR(256) NOT NULL,
|
||||
version BIGINT NOT NULL,
|
||||
action VARCHAR(16) NOT NULL,
|
||||
tags TEXT[] NOT NULL DEFAULT '{}',
|
||||
metadata JSONB NOT NULL DEFAULT '{}',
|
||||
actor VARCHAR(128) NOT NULL DEFAULT '',
|
||||
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW()
|
||||
);
|
||||
|
||||
CREATE INDEX IF NOT EXISTS idx_entries_history_entry_id
|
||||
ON entries_history(entry_id, version DESC);
|
||||
CREATE INDEX IF NOT EXISTS idx_entries_history_ns_kind_name
|
||||
ON entries_history(namespace, kind, name, version DESC);
|
||||
|
||||
-- ── secrets_history: field-level snapshot ───────────────────────────────
|
||||
CREATE TABLE IF NOT EXISTS secrets_history (
|
||||
id BIGINT GENERATED ALWAYS AS IDENTITY PRIMARY KEY,
|
||||
entry_id UUID NOT NULL,
|
||||
secret_id UUID NOT NULL,
|
||||
entry_version BIGINT NOT NULL,
|
||||
field_name VARCHAR(256) NOT NULL,
|
||||
encrypted BYTEA NOT NULL DEFAULT '\x',
|
||||
action VARCHAR(16) NOT NULL,
|
||||
actor VARCHAR(128) NOT NULL DEFAULT '',
|
||||
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW()
|
||||
);
|
||||
|
||||
CREATE INDEX IF NOT EXISTS idx_secrets_history_entry_id
|
||||
ON secrets_history(entry_id, entry_version DESC);
|
||||
CREATE INDEX IF NOT EXISTS idx_secrets_history_secret_id
|
||||
ON secrets_history(secret_id);
|
||||
"#,
|
||||
)
|
||||
.execute(pool)
|
||||
.await?;
|
||||
tracing::debug!("migrations complete");
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// ── Entry-level history snapshot ────────────────────────────────────────────
|
||||
|
||||
pub struct EntrySnapshotParams<'a> {
|
||||
pub entry_id: uuid::Uuid,
|
||||
pub namespace: &'a str,
|
||||
pub kind: &'a str,
|
||||
pub name: &'a str,
|
||||
pub version: i64,
|
||||
pub action: &'a str,
|
||||
pub tags: &'a [String],
|
||||
pub metadata: &'a Value,
|
||||
}
|
||||
|
||||
/// Snapshot an entry row into `entries_history` before a write operation.
|
||||
pub async fn snapshot_entry_history(
|
||||
tx: &mut sqlx::Transaction<'_, sqlx::Postgres>,
|
||||
p: EntrySnapshotParams<'_>,
|
||||
) -> Result<()> {
|
||||
let actor = current_actor();
|
||||
sqlx::query(
|
||||
"INSERT INTO entries_history \
|
||||
(entry_id, namespace, kind, name, version, action, tags, metadata, actor) \
|
||||
VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9)",
|
||||
)
|
||||
.bind(p.entry_id)
|
||||
.bind(p.namespace)
|
||||
.bind(p.kind)
|
||||
.bind(p.name)
|
||||
.bind(p.version)
|
||||
.bind(p.action)
|
||||
.bind(p.tags)
|
||||
.bind(p.metadata)
|
||||
.bind(&actor)
|
||||
.execute(&mut **tx)
|
||||
.await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// ── Secret field-level history snapshot ─────────────────────────────────────
|
||||
|
||||
pub struct SecretSnapshotParams<'a> {
|
||||
pub entry_id: uuid::Uuid,
|
||||
pub secret_id: uuid::Uuid,
|
||||
pub entry_version: i64,
|
||||
pub field_name: &'a str,
|
||||
pub encrypted: &'a [u8],
|
||||
pub action: &'a str,
|
||||
}
|
||||
|
||||
/// Snapshot a single secret field into `secrets_history`.
|
||||
pub async fn snapshot_secret_history(
|
||||
tx: &mut sqlx::Transaction<'_, sqlx::Postgres>,
|
||||
p: SecretSnapshotParams<'_>,
|
||||
) -> Result<()> {
|
||||
let actor = current_actor();
|
||||
sqlx::query(
|
||||
"INSERT INTO secrets_history \
|
||||
(entry_id, secret_id, entry_version, field_name, encrypted, action, actor) \
|
||||
VALUES ($1, $2, $3, $4, $5, $6, $7)",
|
||||
)
|
||||
.bind(p.entry_id)
|
||||
.bind(p.secret_id)
|
||||
.bind(p.entry_version)
|
||||
.bind(p.field_name)
|
||||
.bind(p.encrypted)
|
||||
.bind(p.action)
|
||||
.bind(&actor)
|
||||
.execute(&mut **tx)
|
||||
.await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// ── Argon2 salt helpers ──────────────────────────────────────────────────────
|
||||
|
||||
/// Load the Argon2id salt from the database.
|
||||
pub async fn load_argon2_salt(pool: &PgPool) -> Result<Option<Vec<u8>>> {
|
||||
let row: Option<(Vec<u8>,)> =
|
||||
sqlx::query_as("SELECT value FROM kv_config WHERE key = 'argon2_salt'")
|
||||
.fetch_optional(pool)
|
||||
.await?;
|
||||
Ok(row.map(|(v,)| v))
|
||||
}
|
||||
|
||||
/// Store the Argon2id salt in the database (only called once on first device init).
|
||||
pub async fn store_argon2_salt(pool: &PgPool, salt: &[u8]) -> Result<()> {
|
||||
sqlx::query(
|
||||
"INSERT INTO kv_config (key, value) VALUES ('argon2_salt', $1) \
|
||||
ON CONFLICT (key) DO NOTHING",
|
||||
)
|
||||
.bind(salt)
|
||||
.execute(pool)
|
||||
.await?;
|
||||
Ok(())
|
||||
}
|
||||
879
src/main.rs
879
src/main.rs
@@ -1,879 +0,0 @@
|
||||
mod audit;
|
||||
mod commands;
|
||||
mod config;
|
||||
mod crypto;
|
||||
mod db;
|
||||
mod models;
|
||||
mod output;
|
||||
|
||||
use anyhow::Result;
|
||||
|
||||
/// Load .env from current or parent directories (best-effort, no error if missing).
|
||||
fn load_dotenv() {
|
||||
let _ = dotenvy::dotenv();
|
||||
}
|
||||
use clap::{Parser, Subcommand};
|
||||
use tracing_subscriber::EnvFilter;
|
||||
|
||||
use output::resolve_output_mode;
|
||||
|
||||
#[derive(Parser)]
|
||||
#[command(
|
||||
name = "secrets",
|
||||
version,
|
||||
about = "Secrets & config manager backed by PostgreSQL — optimised for AI agents",
|
||||
after_help = "QUICK START:
|
||||
# 1. Configure database (once per device)
|
||||
secrets config set-db \"postgres://postgres:<password>@<host>:<port>/secrets\"
|
||||
|
||||
# 2. Initialize master key (once per device)
|
||||
secrets init
|
||||
|
||||
# Discover what namespaces / kinds exist
|
||||
secrets search --summary --limit 20
|
||||
|
||||
# Precise lookup (JSON output for easy parsing)
|
||||
secrets search -n refining --kind service --name gitea -o json
|
||||
|
||||
# Extract a single metadata field directly
|
||||
secrets search -n refining --kind service --name gitea -f metadata.url
|
||||
|
||||
# Pipe-friendly (non-TTY defaults to json-compact automatically)
|
||||
secrets search -n refining --kind service | jq '.[].name'
|
||||
|
||||
# Inject secrets into environment variables when you really need them
|
||||
secrets inject -n refining --kind service --name gitea"
|
||||
)]
|
||||
struct Cli {
|
||||
/// Database URL, overrides saved config (one-time override)
|
||||
#[arg(long, global = true, default_value = "")]
|
||||
db_url: String,
|
||||
|
||||
/// Enable verbose debug output
|
||||
#[arg(long, short, global = true)]
|
||||
verbose: bool,
|
||||
|
||||
#[command(subcommand)]
|
||||
command: Commands,
|
||||
}
|
||||
|
||||
#[derive(Subcommand)]
|
||||
enum Commands {
|
||||
/// Initialize master key on this device (run once per device).
|
||||
///
|
||||
/// Prompts for a master password, derives a key with Argon2id, and stores
|
||||
/// it in the OS Keychain. Use the same password on every device.
|
||||
///
|
||||
/// NOTE: Run `secrets config set-db <URL>` first if database is not configured.
|
||||
#[command(after_help = "PREREQUISITE:
|
||||
Database must be configured first. Run: secrets config set-db <DATABASE_URL>
|
||||
|
||||
EXAMPLES:
|
||||
# First device: generates a new Argon2id salt and stores master key
|
||||
secrets init
|
||||
|
||||
# Subsequent devices: reuses existing salt from the database
|
||||
secrets init")]
|
||||
Init,
|
||||
|
||||
/// Add or update a record (upsert). Use -m for plaintext metadata, -s for secrets.
|
||||
#[command(after_help = "EXAMPLES:
|
||||
# Add a server
|
||||
secrets add -n refining --kind server --name my-server \\
|
||||
--tag aliyun --tag shanghai \\
|
||||
-m ip=10.0.0.1 -m desc=\"Example ECS\" \\
|
||||
-s username=root -s ssh_key=@./keys/server.pem
|
||||
|
||||
# Add a service credential
|
||||
secrets add -n refining --kind service --name gitea \\
|
||||
--tag gitea \\
|
||||
-m url=https://code.example.com -m default_org=myorg \\
|
||||
-s token=<token>
|
||||
|
||||
# Add typed JSON metadata
|
||||
secrets add -n refining --kind service --name gitea \\
|
||||
-m port:=3000 \\
|
||||
-m enabled:=true \\
|
||||
-m domains:='[\"code.example.com\",\"git.example.com\"]' \\
|
||||
-m tls:='{\"enabled\":true,\"redirect_http\":true}'
|
||||
|
||||
# Add with token read from a file
|
||||
secrets add -n ricnsmart --kind service --name mqtt \\
|
||||
-m host=mqtt.example.com -m port=1883 \\
|
||||
-s password=@./mqtt_password.txt
|
||||
|
||||
# Add typed JSON secrets
|
||||
secrets add -n refining --kind service --name deploy-bot \\
|
||||
-s enabled:=true \\
|
||||
-s retry_count:=3 \\
|
||||
-s scopes:='[\"repo\",\"workflow\"]' \\
|
||||
-s extra:='{\"region\":\"ap-east-1\",\"verify_tls\":true}'
|
||||
|
||||
# Write a multiline file into a nested secret field
|
||||
secrets add -n refining --kind server --name my-server \\
|
||||
-s credentials:content@./keys/server.pem
|
||||
|
||||
# Shared PEM (key_ref): store key once, reference from multiple servers
|
||||
secrets add -n refining --kind key --name my-shared-key \\
|
||||
--tag aliyun -s content=@./keys/shared.pem
|
||||
secrets add -n refining --kind server --name i-abc123 \\
|
||||
-m ip=10.0.0.1 -m key_ref=my-shared-key -s username=ecs-user")]
|
||||
Add {
|
||||
/// Namespace, e.g. refining, ricnsmart
|
||||
#[arg(short, long)]
|
||||
namespace: String,
|
||||
/// Kind of record: server, service, key, ...
|
||||
#[arg(long)]
|
||||
kind: String,
|
||||
/// Human-readable unique name, e.g. gitea, i-example0abcd1234efgh
|
||||
#[arg(long)]
|
||||
name: String,
|
||||
/// Tag for categorization (repeatable), e.g. --tag aliyun --tag hongkong
|
||||
#[arg(long = "tag")]
|
||||
tags: Vec<String>,
|
||||
/// Plaintext metadata: key=value, key:=<json>, key=@file, or nested:path@file.
|
||||
/// Use key_ref=<name> to reference a shared key entry (kind=key); inject/run merge its secrets.
|
||||
#[arg(long = "meta", short = 'm')]
|
||||
meta: Vec<String>,
|
||||
/// Secret entry: key=value, key:=<json>, key=@file, or nested:path@file
|
||||
#[arg(long = "secret", short = 's')]
|
||||
secrets: Vec<String>,
|
||||
/// Output format: text (default on TTY), json, json-compact
|
||||
#[arg(short, long = "output")]
|
||||
output: Option<String>,
|
||||
},
|
||||
|
||||
/// Search / read records. This is the primary read command for AI agents.
|
||||
///
|
||||
/// Supports fuzzy search (-q), exact lookup (--name), field extraction (-f),
|
||||
/// summary view (--summary), pagination (--limit / --offset), and structured
|
||||
/// output (-o json / json-compact). When stdout is not a TTY, output
|
||||
/// defaults to json-compact automatically.
|
||||
#[command(after_help = "EXAMPLES:
|
||||
# Discover all records (summary, safe default limit)
|
||||
secrets search --summary --limit 20
|
||||
|
||||
# Filter by namespace and kind
|
||||
secrets search -n refining --kind service
|
||||
|
||||
# Exact lookup — returns 0 or 1 record
|
||||
secrets search -n refining --kind service --name gitea
|
||||
|
||||
# Fuzzy keyword search (matches name, namespace, kind, tags, metadata)
|
||||
secrets search -q mqtt
|
||||
|
||||
# Extract a single metadata field value
|
||||
secrets search -n refining --kind service --name gitea -f metadata.url
|
||||
|
||||
# Multiple fields at once
|
||||
secrets search -n refining --kind service --name gitea \\
|
||||
-f metadata.url -f metadata.default_org
|
||||
|
||||
# Inject decrypted secrets only when needed
|
||||
secrets inject -n refining --kind service --name gitea
|
||||
secrets run -n refining --kind service --name gitea -- printenv
|
||||
|
||||
# Paginate large result sets
|
||||
secrets search -n refining --summary --limit 10 --offset 0
|
||||
secrets search -n refining --summary --limit 10 --offset 10
|
||||
|
||||
# Sort by most recently updated
|
||||
secrets search --sort updated --limit 5 --summary
|
||||
|
||||
# Non-TTY / pipe: output is json-compact by default
|
||||
secrets search -n refining --kind service | jq '.[].name'")]
|
||||
Search {
|
||||
/// Filter by namespace, e.g. refining, ricnsmart
|
||||
#[arg(short, long)]
|
||||
namespace: Option<String>,
|
||||
/// Filter by kind, e.g. server, service
|
||||
#[arg(long)]
|
||||
kind: Option<String>,
|
||||
/// Exact name filter, e.g. gitea, i-example0abcd1234efgh
|
||||
#[arg(long)]
|
||||
name: Option<String>,
|
||||
/// Filter by tag, e.g. --tag aliyun (repeatable for AND intersection)
|
||||
#[arg(long)]
|
||||
tag: Vec<String>,
|
||||
/// Fuzzy keyword (matches name, namespace, kind, tags, metadata text)
|
||||
#[arg(short, long)]
|
||||
query: Option<String>,
|
||||
/// Extract metadata field value(s) directly: metadata.<key> (repeatable)
|
||||
#[arg(short = 'f', long = "field")]
|
||||
fields: Vec<String>,
|
||||
/// Return lightweight summary only (namespace, kind, name, tags, desc, updated_at)
|
||||
#[arg(long)]
|
||||
summary: bool,
|
||||
/// Maximum number of records to return [default: 50]
|
||||
#[arg(long, default_value = "50")]
|
||||
limit: u32,
|
||||
/// Skip this many records (for pagination)
|
||||
#[arg(long, default_value = "0")]
|
||||
offset: u32,
|
||||
/// Sort order: name (default), updated, created
|
||||
#[arg(long, default_value = "name")]
|
||||
sort: String,
|
||||
/// Output format: text (default on TTY), json, json-compact
|
||||
#[arg(short, long = "output")]
|
||||
output: Option<String>,
|
||||
},
|
||||
|
||||
/// Delete one record precisely, or bulk-delete by namespace.
|
||||
///
|
||||
/// With --name: deletes exactly that record (--kind also required).
|
||||
/// Without --name: bulk-deletes all records matching namespace + optional --kind.
|
||||
/// Use --dry-run to preview bulk deletes before committing.
|
||||
#[command(after_help = "EXAMPLES:
|
||||
# Delete a single record (exact match)
|
||||
secrets delete -n refining --kind service --name legacy-mqtt
|
||||
|
||||
# Preview what a bulk delete would remove (no writes)
|
||||
secrets delete -n refining --dry-run
|
||||
|
||||
# Bulk-delete all records in a namespace
|
||||
secrets delete -n ricnsmart
|
||||
|
||||
# Bulk-delete only server records in a namespace
|
||||
secrets delete -n ricnsmart --kind server
|
||||
|
||||
# JSON output
|
||||
secrets delete -n refining --kind service -o json")]
|
||||
Delete {
|
||||
/// Namespace, e.g. refining
|
||||
#[arg(short, long)]
|
||||
namespace: String,
|
||||
/// Kind filter, e.g. server, service (required with --name; optional for bulk)
|
||||
#[arg(long)]
|
||||
kind: Option<String>,
|
||||
/// Exact name of the record to delete (omit for bulk delete)
|
||||
#[arg(long)]
|
||||
name: Option<String>,
|
||||
/// Preview what would be deleted without making any changes (bulk mode only)
|
||||
#[arg(long)]
|
||||
dry_run: bool,
|
||||
/// Output format: text (default on TTY), json, json-compact
|
||||
#[arg(short, long = "output")]
|
||||
output: Option<String>,
|
||||
},
|
||||
|
||||
/// Incrementally update an existing record (merge semantics; record must exist).
|
||||
///
|
||||
/// Only the fields you pass are changed — everything else is preserved.
|
||||
/// Use --add-tag / --remove-tag to modify tags without touching other fields.
|
||||
#[command(after_help = "EXAMPLES:
|
||||
# Update a single metadata field (all other fields unchanged)
|
||||
secrets update -n refining --kind server --name my-server -m ip=10.0.0.1
|
||||
|
||||
# Rotate a secret token
|
||||
secrets update -n refining --kind service --name gitea -s token=<new-token>
|
||||
|
||||
# Update typed JSON metadata
|
||||
secrets update -n refining --kind service --name gitea \\
|
||||
-m deploy:strategy:='{\"type\":\"rolling\",\"batch\":2}' \\
|
||||
-m runtime:max_open_conns:=20
|
||||
|
||||
# Add a tag and rotate password at the same time
|
||||
secrets update -n refining --kind service --name gitea \\
|
||||
--add-tag production -s token=<new-token>
|
||||
|
||||
# Remove a deprecated metadata field and a stale secret key
|
||||
secrets update -n refining --kind service --name mqtt \\
|
||||
--remove-meta old_port --remove-secret old_password
|
||||
|
||||
# Remove a nested field
|
||||
secrets update -n refining --kind server --name my-server \\
|
||||
--remove-secret credentials:content
|
||||
|
||||
# Remove a tag
|
||||
secrets update -n refining --kind service --name gitea --remove-tag staging
|
||||
|
||||
# Update a nested secret field from a file
|
||||
secrets update -n refining --kind server --name my-server \\
|
||||
-s credentials:content@./keys/server.pem
|
||||
|
||||
# Update nested typed JSON fields
|
||||
secrets update -n refining --kind service --name deploy-bot \\
|
||||
-s auth:config:='{\"issuer\":\"gitea\",\"rotate\":true}' \\
|
||||
-s auth:retry:=5
|
||||
|
||||
# Rotate shared PEM (all servers with key_ref=my-shared-key get the new key)
|
||||
secrets update -n refining --kind key --name my-shared-key \\
|
||||
-s content=@./keys/new-shared.pem")]
|
||||
Update {
|
||||
/// Namespace, e.g. refining, ricnsmart
|
||||
#[arg(short, long)]
|
||||
namespace: String,
|
||||
/// Kind of record: server, service, key, ...
|
||||
#[arg(long)]
|
||||
kind: String,
|
||||
/// Human-readable unique name
|
||||
#[arg(long)]
|
||||
name: String,
|
||||
/// Add a tag (repeatable; does not affect existing tags)
|
||||
#[arg(long = "add-tag")]
|
||||
add_tags: Vec<String>,
|
||||
/// Remove a tag (repeatable)
|
||||
#[arg(long = "remove-tag")]
|
||||
remove_tags: Vec<String>,
|
||||
/// Set or overwrite a metadata field: key=value, key:=<json>, key=@file, or nested:path@file.
|
||||
/// Use key_ref=<name> to reference a shared key entry (kind=key).
|
||||
#[arg(long = "meta", short = 'm')]
|
||||
meta: Vec<String>,
|
||||
/// Delete a metadata field by key or nested path, e.g. old_port or credentials:content
|
||||
#[arg(long = "remove-meta")]
|
||||
remove_meta: Vec<String>,
|
||||
/// Set or overwrite a secret field: key=value, key:=<json>, key=@file, or nested:path@file
|
||||
#[arg(long = "secret", short = 's')]
|
||||
secrets: Vec<String>,
|
||||
/// Delete a secret field by key or nested path, e.g. old_password or credentials:content
|
||||
#[arg(long = "remove-secret")]
|
||||
remove_secrets: Vec<String>,
|
||||
/// Output format: text (default on TTY), json, json-compact
|
||||
#[arg(short, long = "output")]
|
||||
output: Option<String>,
|
||||
},
|
||||
|
||||
/// Manage CLI configuration (database connection, etc.)
|
||||
#[command(after_help = "EXAMPLES:
|
||||
# Configure the database URL (run once per device; persisted to config file)
|
||||
secrets config set-db \"postgres://postgres:<password>@<host>:<port>/secrets\"
|
||||
|
||||
# Show current config (password is masked)
|
||||
secrets config show
|
||||
|
||||
# Print path to the config file
|
||||
secrets config path")]
|
||||
Config {
|
||||
#[command(subcommand)]
|
||||
action: ConfigAction,
|
||||
},
|
||||
|
||||
/// Show the change history for a record.
|
||||
#[command(after_help = "EXAMPLES:
|
||||
# Show last 20 versions for a service record
|
||||
secrets history -n refining --kind service --name gitea
|
||||
|
||||
# Show last 5 versions
|
||||
secrets history -n refining --kind service --name gitea --limit 5")]
|
||||
History {
|
||||
#[arg(short, long)]
|
||||
namespace: String,
|
||||
#[arg(long)]
|
||||
kind: String,
|
||||
#[arg(long)]
|
||||
name: String,
|
||||
/// Number of history entries to show [default: 20]
|
||||
#[arg(long, default_value = "20")]
|
||||
limit: u32,
|
||||
/// Output format: text (default on TTY), json, json-compact
|
||||
#[arg(short, long = "output")]
|
||||
output: Option<String>,
|
||||
},
|
||||
|
||||
/// Roll back a record to a previous version.
|
||||
#[command(after_help = "EXAMPLES:
|
||||
# Roll back to the most recent snapshot (undo last change)
|
||||
secrets rollback -n refining --kind service --name gitea
|
||||
|
||||
# Roll back to a specific version number
|
||||
secrets rollback -n refining --kind service --name gitea --to-version 3")]
|
||||
Rollback {
|
||||
#[arg(short, long)]
|
||||
namespace: String,
|
||||
#[arg(long)]
|
||||
kind: String,
|
||||
#[arg(long)]
|
||||
name: String,
|
||||
/// Target version to restore. Omit to restore the most recent snapshot.
|
||||
#[arg(long)]
|
||||
to_version: Option<i64>,
|
||||
/// Output format: text (default on TTY), json, json-compact
|
||||
#[arg(short, long = "output")]
|
||||
output: Option<String>,
|
||||
},
|
||||
|
||||
/// Print secrets as environment variables (stdout only, nothing persisted).
|
||||
///
|
||||
/// Outputs KEY=VALUE pairs for all matched records. Safe to pipe or eval.
|
||||
#[command(after_help = "EXAMPLES:
|
||||
# Print env vars for a single service
|
||||
secrets inject -n refining --kind service --name gitea
|
||||
|
||||
# With a custom prefix
|
||||
secrets inject -n refining --kind service --name gitea --prefix GITEA
|
||||
|
||||
# JSON output (all vars as a JSON object)
|
||||
secrets inject -n refining --kind service --name gitea -o json
|
||||
|
||||
# Eval into current shell (use with caution)
|
||||
eval $(secrets inject -n refining --kind service --name gitea)
|
||||
|
||||
# For entries with metadata.key_ref, referenced key's secrets are merged automatically")]
|
||||
Inject {
|
||||
#[arg(short, long)]
|
||||
namespace: Option<String>,
|
||||
#[arg(long)]
|
||||
kind: Option<String>,
|
||||
#[arg(long)]
|
||||
name: Option<String>,
|
||||
#[arg(long)]
|
||||
tag: Vec<String>,
|
||||
/// Prefix to prepend to every variable name (uppercased automatically)
|
||||
#[arg(long, default_value = "")]
|
||||
prefix: String,
|
||||
/// Output format: text/KEY=VALUE (default), json, json-compact
|
||||
#[arg(short, long = "output")]
|
||||
output: Option<String>,
|
||||
},
|
||||
|
||||
/// Run a command with secrets injected as environment variables.
|
||||
///
|
||||
/// Secrets are available only to the child process; the current shell
|
||||
/// environment is not modified. The process exit code is propagated.
|
||||
#[command(after_help = "EXAMPLES:
|
||||
# Run a script with a single service's secrets injected
|
||||
secrets run -n refining --kind service --name gitea -- ./deploy.sh
|
||||
|
||||
# Run with a tag filter (all matched records merged)
|
||||
secrets run --tag production -- env | grep GITEA
|
||||
|
||||
# With prefix
|
||||
secrets run -n refining --kind service --name gitea --prefix GITEA -- printenv
|
||||
|
||||
# metadata.key_ref entries get key secrets merged (e.g. server + shared PEM)")]
|
||||
Run {
|
||||
#[arg(short, long)]
|
||||
namespace: Option<String>,
|
||||
#[arg(long)]
|
||||
kind: Option<String>,
|
||||
#[arg(long)]
|
||||
name: Option<String>,
|
||||
#[arg(long)]
|
||||
tag: Vec<String>,
|
||||
/// Prefix to prepend to every variable name (uppercased automatically)
|
||||
#[arg(long, default_value = "")]
|
||||
prefix: String,
|
||||
/// Command and arguments to execute with injected environment
|
||||
#[arg(last = true, required = true)]
|
||||
command: Vec<String>,
|
||||
},
|
||||
|
||||
/// Check for a newer version and update the binary in-place.
|
||||
///
|
||||
/// Downloads the latest release and replaces the current binary. No database connection or master key required.
|
||||
/// Release URL defaults to the upstream server; override via SECRETS_UPGRADE_URL for self-hosted or fork.
|
||||
#[command(after_help = "EXAMPLES:
|
||||
# Check for updates only (no download)
|
||||
secrets upgrade --check
|
||||
|
||||
# Download and install the latest version
|
||||
secrets upgrade")]
|
||||
Upgrade {
|
||||
/// Only check if a newer version is available; do not download
|
||||
#[arg(long)]
|
||||
check: bool,
|
||||
},
|
||||
|
||||
/// Export records to a file (JSON, TOML, or YAML).
|
||||
///
|
||||
/// Decrypts and exports all matched records. Requires master key unless --no-secrets is used.
|
||||
#[command(after_help = "EXAMPLES:
|
||||
# Export everything to JSON
|
||||
secrets export --file backup.json
|
||||
|
||||
# Export a specific namespace to TOML
|
||||
secrets export -n refining --file refining.toml
|
||||
|
||||
# Export a specific kind
|
||||
secrets export -n refining --kind service --file services.yaml
|
||||
|
||||
# Export by tag
|
||||
secrets export --tag production --file prod.json
|
||||
|
||||
# Export schema only (no decryption needed)
|
||||
secrets export --no-secrets --file schema.json
|
||||
|
||||
# Print to stdout in YAML
|
||||
secrets export -n refining --format yaml")]
|
||||
Export {
|
||||
/// Filter by namespace
|
||||
#[arg(short, long)]
|
||||
namespace: Option<String>,
|
||||
/// Filter by kind, e.g. server, service
|
||||
#[arg(long)]
|
||||
kind: Option<String>,
|
||||
/// Exact name filter
|
||||
#[arg(long)]
|
||||
name: Option<String>,
|
||||
/// Filter by tag (repeatable)
|
||||
#[arg(long)]
|
||||
tag: Vec<String>,
|
||||
/// Fuzzy keyword search
|
||||
#[arg(short, long)]
|
||||
query: Option<String>,
|
||||
/// Output file path (format inferred from extension: .json / .toml / .yaml / .yml)
|
||||
#[arg(long)]
|
||||
file: Option<String>,
|
||||
/// Explicit format: json, toml, or yaml (overrides file extension; required for stdout)
|
||||
#[arg(long)]
|
||||
format: Option<String>,
|
||||
/// Omit secrets from output (no master key required)
|
||||
#[arg(long)]
|
||||
no_secrets: bool,
|
||||
},
|
||||
|
||||
/// Import records from a file (JSON, TOML, or YAML).
|
||||
///
|
||||
/// Reads an export file and inserts or updates entries. Requires master key to re-encrypt secrets.
|
||||
#[command(after_help = "EXAMPLES:
|
||||
# Import a JSON backup (conflict = error by default)
|
||||
secrets import backup.json
|
||||
|
||||
# Import and overwrite existing records
|
||||
secrets import --force refining.toml
|
||||
|
||||
# Preview what would be imported (no writes)
|
||||
secrets import --dry-run backup.yaml
|
||||
|
||||
# JSON output for the import summary
|
||||
secrets import backup.json -o json")]
|
||||
Import {
|
||||
/// Input file path (format inferred from extension: .json / .toml / .yaml / .yml)
|
||||
file: String,
|
||||
/// Overwrite existing records on conflict (default: error and abort)
|
||||
#[arg(long)]
|
||||
force: bool,
|
||||
/// Preview operations without writing to the database
|
||||
#[arg(long)]
|
||||
dry_run: bool,
|
||||
/// Output format: text (default on TTY), json, json-compact
|
||||
#[arg(short, long = "output")]
|
||||
output: Option<String>,
|
||||
},
|
||||
}
|
||||
|
||||
#[derive(Subcommand)]
|
||||
enum ConfigAction {
|
||||
/// Save database URL to config file (~/.config/secrets/config.toml)
|
||||
SetDb {
|
||||
/// PostgreSQL connection string, e.g. postgres://user:pass@<host>:<port>/dbname
|
||||
url: String,
|
||||
},
|
||||
/// Show current configuration (password masked)
|
||||
Show,
|
||||
/// Print path to the config file
|
||||
Path,
|
||||
}
|
||||
|
||||
#[tokio::main]
|
||||
async fn main() -> Result<()> {
|
||||
load_dotenv();
|
||||
let cli = Cli::parse();
|
||||
|
||||
let filter = if cli.verbose {
|
||||
EnvFilter::new("secrets=debug")
|
||||
} else {
|
||||
EnvFilter::try_from_default_env().unwrap_or_else(|_| EnvFilter::new("secrets=warn"))
|
||||
};
|
||||
tracing_subscriber::fmt()
|
||||
.with_env_filter(filter)
|
||||
.with_target(false)
|
||||
.init();
|
||||
|
||||
// config subcommand needs no database or master key
|
||||
if let Commands::Config { action } = cli.command {
|
||||
return commands::config::run(action).await;
|
||||
}
|
||||
|
||||
// upgrade needs no database or master key either
|
||||
if let Commands::Upgrade { check } = cli.command {
|
||||
return commands::upgrade::run(check).await;
|
||||
}
|
||||
|
||||
let db_url = config::resolve_db_url(&cli.db_url)?;
|
||||
let pool = db::create_pool(&db_url).await?;
|
||||
db::migrate(&pool).await?;
|
||||
|
||||
// init needs a pool but sets up the master key — handle before loading it
|
||||
if let Commands::Init = cli.command {
|
||||
return commands::init::run(&pool).await;
|
||||
}
|
||||
|
||||
// All remaining commands require the master key from the OS Keychain,
|
||||
// except delete which operates on plaintext metadata only.
|
||||
|
||||
match cli.command {
|
||||
Commands::Init | Commands::Config { .. } | Commands::Upgrade { .. } => unreachable!(),
|
||||
|
||||
Commands::Add {
|
||||
namespace,
|
||||
kind,
|
||||
name,
|
||||
tags,
|
||||
meta,
|
||||
secrets,
|
||||
output,
|
||||
} => {
|
||||
let master_key = crypto::load_master_key()?;
|
||||
let _span =
|
||||
tracing::info_span!("cmd", command = "add", %namespace, %kind, %name).entered();
|
||||
let out = resolve_output_mode(output.as_deref())?;
|
||||
commands::add::run(
|
||||
&pool,
|
||||
commands::add::AddArgs {
|
||||
namespace: &namespace,
|
||||
kind: &kind,
|
||||
name: &name,
|
||||
tags: &tags,
|
||||
meta_entries: &meta,
|
||||
secret_entries: &secrets,
|
||||
output: out,
|
||||
},
|
||||
&master_key,
|
||||
)
|
||||
.await?;
|
||||
}
|
||||
|
||||
Commands::Search {
|
||||
namespace,
|
||||
kind,
|
||||
name,
|
||||
tag,
|
||||
query,
|
||||
fields,
|
||||
summary,
|
||||
limit,
|
||||
offset,
|
||||
sort,
|
||||
output,
|
||||
} => {
|
||||
let _span = tracing::info_span!("cmd", command = "search").entered();
|
||||
let out = resolve_output_mode(output.as_deref())?;
|
||||
commands::search::run(
|
||||
&pool,
|
||||
commands::search::SearchArgs {
|
||||
namespace: namespace.as_deref(),
|
||||
kind: kind.as_deref(),
|
||||
name: name.as_deref(),
|
||||
tags: &tag,
|
||||
query: query.as_deref(),
|
||||
fields: &fields,
|
||||
summary,
|
||||
limit,
|
||||
offset,
|
||||
sort: &sort,
|
||||
output: out,
|
||||
},
|
||||
)
|
||||
.await?;
|
||||
}
|
||||
|
||||
Commands::Delete {
|
||||
namespace,
|
||||
kind,
|
||||
name,
|
||||
dry_run,
|
||||
output,
|
||||
} => {
|
||||
let _span =
|
||||
tracing::info_span!("cmd", command = "delete", %namespace, ?kind, ?name).entered();
|
||||
let out = resolve_output_mode(output.as_deref())?;
|
||||
commands::delete::run(
|
||||
&pool,
|
||||
commands::delete::DeleteArgs {
|
||||
namespace: &namespace,
|
||||
kind: kind.as_deref(),
|
||||
name: name.as_deref(),
|
||||
dry_run,
|
||||
output: out,
|
||||
},
|
||||
)
|
||||
.await?;
|
||||
}
|
||||
|
||||
Commands::Update {
|
||||
namespace,
|
||||
kind,
|
||||
name,
|
||||
add_tags,
|
||||
remove_tags,
|
||||
meta,
|
||||
remove_meta,
|
||||
secrets,
|
||||
remove_secrets,
|
||||
output,
|
||||
} => {
|
||||
let master_key = crypto::load_master_key()?;
|
||||
let _span =
|
||||
tracing::info_span!("cmd", command = "update", %namespace, %kind, %name).entered();
|
||||
let out = resolve_output_mode(output.as_deref())?;
|
||||
commands::update::run(
|
||||
&pool,
|
||||
commands::update::UpdateArgs {
|
||||
namespace: &namespace,
|
||||
kind: &kind,
|
||||
name: &name,
|
||||
add_tags: &add_tags,
|
||||
remove_tags: &remove_tags,
|
||||
meta_entries: &meta,
|
||||
remove_meta: &remove_meta,
|
||||
secret_entries: &secrets,
|
||||
remove_secrets: &remove_secrets,
|
||||
output: out,
|
||||
},
|
||||
&master_key,
|
||||
)
|
||||
.await?;
|
||||
}
|
||||
|
||||
Commands::History {
|
||||
namespace,
|
||||
kind,
|
||||
name,
|
||||
limit,
|
||||
output,
|
||||
} => {
|
||||
let out = resolve_output_mode(output.as_deref())?;
|
||||
commands::history::run(
|
||||
&pool,
|
||||
commands::history::HistoryArgs {
|
||||
namespace: &namespace,
|
||||
kind: &kind,
|
||||
name: &name,
|
||||
limit,
|
||||
output: out,
|
||||
},
|
||||
)
|
||||
.await?;
|
||||
}
|
||||
|
||||
Commands::Rollback {
|
||||
namespace,
|
||||
kind,
|
||||
name,
|
||||
to_version,
|
||||
output,
|
||||
} => {
|
||||
let master_key = crypto::load_master_key()?;
|
||||
let out = resolve_output_mode(output.as_deref())?;
|
||||
commands::rollback::run(
|
||||
&pool,
|
||||
commands::rollback::RollbackArgs {
|
||||
namespace: &namespace,
|
||||
kind: &kind,
|
||||
name: &name,
|
||||
to_version,
|
||||
output: out,
|
||||
},
|
||||
&master_key,
|
||||
)
|
||||
.await?;
|
||||
}
|
||||
|
||||
Commands::Inject {
|
||||
namespace,
|
||||
kind,
|
||||
name,
|
||||
tag,
|
||||
prefix,
|
||||
output,
|
||||
} => {
|
||||
let master_key = crypto::load_master_key()?;
|
||||
let out = resolve_output_mode(output.as_deref())?;
|
||||
commands::run::run_inject(
|
||||
&pool,
|
||||
commands::run::InjectArgs {
|
||||
namespace: namespace.as_deref(),
|
||||
kind: kind.as_deref(),
|
||||
name: name.as_deref(),
|
||||
tags: &tag,
|
||||
prefix: &prefix,
|
||||
output: out,
|
||||
},
|
||||
&master_key,
|
||||
)
|
||||
.await?;
|
||||
}
|
||||
|
||||
Commands::Run {
|
||||
namespace,
|
||||
kind,
|
||||
name,
|
||||
tag,
|
||||
prefix,
|
||||
command,
|
||||
} => {
|
||||
let master_key = crypto::load_master_key()?;
|
||||
commands::run::run_exec(
|
||||
&pool,
|
||||
commands::run::RunArgs {
|
||||
namespace: namespace.as_deref(),
|
||||
kind: kind.as_deref(),
|
||||
name: name.as_deref(),
|
||||
tags: &tag,
|
||||
prefix: &prefix,
|
||||
command: &command,
|
||||
},
|
||||
&master_key,
|
||||
)
|
||||
.await?;
|
||||
}
|
||||
|
||||
Commands::Export {
|
||||
namespace,
|
||||
kind,
|
||||
name,
|
||||
tag,
|
||||
query,
|
||||
file,
|
||||
format,
|
||||
no_secrets,
|
||||
} => {
|
||||
let master_key = if no_secrets {
|
||||
None
|
||||
} else {
|
||||
Some(crypto::load_master_key()?)
|
||||
};
|
||||
let _span = tracing::info_span!("cmd", command = "export").entered();
|
||||
commands::export_cmd::run(
|
||||
&pool,
|
||||
commands::export_cmd::ExportArgs {
|
||||
namespace: namespace.as_deref(),
|
||||
kind: kind.as_deref(),
|
||||
name: name.as_deref(),
|
||||
tags: &tag,
|
||||
query: query.as_deref(),
|
||||
file: file.as_deref(),
|
||||
format: format.as_deref(),
|
||||
no_secrets,
|
||||
},
|
||||
master_key.as_ref(),
|
||||
)
|
||||
.await?;
|
||||
}
|
||||
|
||||
Commands::Import {
|
||||
file,
|
||||
force,
|
||||
dry_run,
|
||||
output,
|
||||
} => {
|
||||
let master_key = crypto::load_master_key()?;
|
||||
let _span = tracing::info_span!("cmd", command = "import").entered();
|
||||
let out = resolve_output_mode(output.as_deref())?;
|
||||
commands::import_cmd::run(
|
||||
&pool,
|
||||
commands::import_cmd::ImportArgs {
|
||||
file: &file,
|
||||
force,
|
||||
dry_run,
|
||||
output: out,
|
||||
},
|
||||
&master_key,
|
||||
)
|
||||
.await?;
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
@@ -1,65 +0,0 @@
|
||||
use chrono::{DateTime, Local, Utc};
|
||||
use std::io::IsTerminal;
|
||||
use std::str::FromStr;
|
||||
|
||||
/// Output format for all commands.
|
||||
#[derive(Debug, Clone, Default, PartialEq)]
|
||||
pub enum OutputMode {
|
||||
/// Human-readable text (default when stdout is a TTY)
|
||||
#[default]
|
||||
Text,
|
||||
/// Pretty-printed JSON
|
||||
Json,
|
||||
/// Single-line JSON (default when stdout is NOT a TTY, e.g. piped to jq)
|
||||
JsonCompact,
|
||||
}
|
||||
|
||||
impl FromStr for OutputMode {
|
||||
type Err = anyhow::Error;
|
||||
|
||||
fn from_str(s: &str) -> Result<Self, Self::Err> {
|
||||
match s {
|
||||
"text" => Ok(Self::Text),
|
||||
"json" => Ok(Self::Json),
|
||||
"json-compact" => Ok(Self::JsonCompact),
|
||||
other => Err(anyhow::anyhow!(
|
||||
"Unknown output format '{}'. Valid: text, json, json-compact",
|
||||
other
|
||||
)),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Resolve the effective output mode.
|
||||
/// - Explicit value from `--output` takes priority.
|
||||
/// - TTY → text; non-TTY (piped/redirected) → json-compact.
|
||||
pub fn resolve_output_mode(explicit: Option<&str>) -> anyhow::Result<OutputMode> {
|
||||
if let Some(s) = explicit {
|
||||
return s.parse();
|
||||
}
|
||||
if std::io::stdout().is_terminal() {
|
||||
Ok(OutputMode::Text)
|
||||
} else {
|
||||
Ok(OutputMode::JsonCompact)
|
||||
}
|
||||
}
|
||||
|
||||
/// Format a UTC timestamp for local human-readable output.
|
||||
pub fn format_local_time(dt: DateTime<Utc>) -> String {
|
||||
dt.with_timezone(&Local)
|
||||
.format("%Y-%m-%d %H:%M:%S %:z")
|
||||
.to_string()
|
||||
}
|
||||
|
||||
/// Print a JSON value to stdout in the requested output mode.
|
||||
/// - `Json` → pretty-printed
|
||||
/// - `JsonCompact` → single line
|
||||
/// - `Text` → no-op (caller is responsible for the text branch)
|
||||
pub fn print_json(value: &serde_json::Value, mode: &OutputMode) -> anyhow::Result<()> {
|
||||
match mode {
|
||||
OutputMode::Json => println!("{}", serde_json::to_string_pretty(value)?),
|
||||
OutputMode::JsonCompact => println!("{}", serde_json::to_string(value)?),
|
||||
OutputMode::Text => {}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
@@ -1,3 +0,0 @@
|
||||
-----BEGIN EXAMPLE KEY PLACEHOLDER-----
|
||||
This file is for local dev/testing. Replace with a real key when needed.
|
||||
-----END EXAMPLE KEY PLACEHOLDER-----
|
||||
Reference in New Issue
Block a user