Compare commits
100 Commits
secrets-0.
...
v3
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
57c3efb70e | ||
|
|
e6bd2225cd | ||
|
|
328962706b | ||
|
|
763d99b15e | ||
|
|
0374899dab | ||
|
|
cb5865b958 | ||
|
|
34093b0e23 | ||
|
|
0bf06bbc73 | ||
|
|
f86d12b80e | ||
|
|
43d6164a15 | ||
|
|
1b2fbdae4d | ||
|
|
ab1e3329b9 | ||
|
|
c3b1a0df1a | ||
|
|
d772066210 | ||
|
|
2c7dbf890b | ||
|
|
8c49316923 | ||
|
|
cf93488c6a | ||
| 137a4d42b0 | |||
|
|
ff2ea91e72 | ||
|
|
574c1c9967 | ||
|
|
98d69f5f12 | ||
|
|
089d0b4b58 | ||
|
|
10da51c203 | ||
|
|
bc8995cf71 | ||
|
|
5333b863c5 | ||
|
|
6fde982c20 | ||
|
|
a2a80a1744 | ||
| dfe282095c | |||
|
|
59084a409d | ||
|
|
b0fcb83592 | ||
|
|
8942718641 | ||
|
|
53d53ff96a | ||
|
|
cab234cfcb | ||
|
|
e0fee639c1 | ||
|
|
7c53bfb782 | ||
|
|
63cb3a8216 | ||
|
|
2b994141b8 | ||
|
|
9d6ac5c13a | ||
|
|
1860cce86c | ||
| dd24f7cc44 | |||
|
|
aefad33870 | ||
|
|
0ffb81e57f | ||
|
|
4a1654c820 | ||
|
|
a15e2eaf4a | ||
|
|
1518388374 | ||
| b99d821644 | |||
|
|
32f275f88a | ||
|
|
c6fb457734 | ||
| df701f21b9 | |||
| c3c536200e | |||
| 7909f7102d | |||
| 87a29af82d | |||
| 1b11f7e976 | |||
| 08e81363c9 | |||
|
|
beade4503d | ||
|
|
409fd78a35 | ||
|
|
f7afd7f819 | ||
|
|
719bdd7e08 | ||
|
|
1e597559a2 | ||
|
|
e3ca43ca3f | ||
|
|
0b57605103 | ||
|
|
8b191937cd | ||
|
|
11c936a5b8 | ||
|
|
b6349dd1c8 | ||
|
|
f720983328 | ||
|
|
7bd0603dc6 | ||
|
|
17a95bea5b | ||
|
|
a42db62702 | ||
|
|
2edb970cba | ||
|
|
17f8ac0dbc | ||
|
|
259fbe10a6 | ||
|
|
c815fb4cc8 | ||
|
|
90cd1eca15 | ||
|
|
da007348ea | ||
|
|
f2344b7543 | ||
|
|
ee028d45c3 | ||
|
|
a44c8ebf08 | ||
|
|
a595081c4c | ||
|
|
0a8b14211a | ||
|
|
9cebbd7587 | ||
|
|
4d136a5a20 | ||
|
|
7ce4aaf835 | ||
|
|
bce01a0f2b | ||
|
|
8cd4dbf592 | ||
|
|
ad3c8d1672 | ||
|
|
8d6b9f0368 | ||
|
|
ce9e089348 | ||
|
|
786675ce42 | ||
|
|
5df4141935 | ||
|
|
49fb7430a8 | ||
|
|
ff9767ff95 | ||
|
|
955acfe9ec | ||
|
|
3a5ec92bf0 | ||
|
|
854720f10c | ||
|
|
62a1df316b | ||
|
|
d0796e9c9a | ||
|
|
66b6417faa | ||
|
|
56a28e8cf7 | ||
|
|
12aec6675a | ||
|
|
e1cd6e736c |
@@ -1,12 +1,13 @@
|
|||||||
name: Secrets CLI - Build & Release
|
name: Secrets v3 CI
|
||||||
|
|
||||||
on:
|
on:
|
||||||
push:
|
push:
|
||||||
branches: [main]
|
|
||||||
paths:
|
paths:
|
||||||
- 'src/**'
|
- 'crates/**'
|
||||||
- 'Cargo.toml'
|
- 'Cargo.toml'
|
||||||
- 'Cargo.lock'
|
- 'Cargo.lock'
|
||||||
|
- 'deploy/**'
|
||||||
|
- '.gitea/workflows/**'
|
||||||
|
|
||||||
concurrency:
|
concurrency:
|
||||||
group: ${{ github.workflow }}-${{ github.ref }}
|
group: ${{ github.workflow }}-${{ github.ref }}
|
||||||
@@ -16,139 +17,38 @@ permissions:
|
|||||||
contents: write
|
contents: write
|
||||||
|
|
||||||
env:
|
env:
|
||||||
BINARY_NAME: secrets
|
RUST_TOOLCHAIN: 1.94.0
|
||||||
CARGO_INCREMENTAL: 0
|
CARGO_INCREMENTAL: 0
|
||||||
CARGO_NET_RETRY: 10
|
CARGO_NET_RETRY: 10
|
||||||
CARGO_TERM_COLOR: always
|
CARGO_TERM_COLOR: always
|
||||||
RUST_BACKTRACE: short
|
RUST_BACKTRACE: short
|
||||||
|
MUSL_TARGET: x86_64-unknown-linux-musl
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
version:
|
ci:
|
||||||
name: 版本 & Release
|
name: 检查
|
||||||
runs-on: debian
|
runs-on: debian
|
||||||
outputs:
|
timeout-minutes: 40
|
||||||
version: ${{ steps.ver.outputs.version }}
|
|
||||||
tag: ${{ steps.ver.outputs.tag }}
|
|
||||||
tag_exists: ${{ steps.ver.outputs.tag_exists }}
|
|
||||||
release_id: ${{ steps.release.outputs.release_id }}
|
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v4
|
||||||
with:
|
with:
|
||||||
fetch-depth: 0
|
fetch-depth: 0
|
||||||
|
|
||||||
- name: 解析版本
|
# ── Rust 工具链 ──────────────────────────────────────────────────────
|
||||||
id: ver
|
- name: 安装 Rust 与 musl 工具链
|
||||||
run: |
|
run: |
|
||||||
version=$(grep -m1 '^version' Cargo.toml | sed 's/.*"\(.*\)".*/\1/')
|
sudo apt-get update -qq
|
||||||
tag="secrets-${version}"
|
sudo apt-get install -y -qq pkg-config musl-tools binutils jq
|
||||||
previous_tag=$(git tag --list 'secrets-*' --sort=-v:refname | awk -v tag="$tag" '$0 != tag { print; exit }')
|
if ! command -v rustup >/dev/null 2>&1; then
|
||||||
|
curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y --default-toolchain "${RUST_TOOLCHAIN}"
|
||||||
echo "version=${version}" >> "$GITHUB_OUTPUT"
|
|
||||||
echo "tag=${tag}" >> "$GITHUB_OUTPUT"
|
|
||||||
echo "previous_tag=${previous_tag}" >> "$GITHUB_OUTPUT"
|
|
||||||
|
|
||||||
if git rev-parse "refs/tags/${tag}" >/dev/null 2>&1; then
|
|
||||||
echo "tag_exists=true" >> "$GITHUB_OUTPUT"
|
|
||||||
echo "版本 ${tag} 已存在"
|
|
||||||
else
|
|
||||||
echo "tag_exists=false" >> "$GITHUB_OUTPUT"
|
|
||||||
echo "将创建新版本 ${tag}"
|
|
||||||
fi
|
|
||||||
|
|
||||||
- name: 严格拦截重复版本
|
|
||||||
if: steps.ver.outputs.tag_exists == 'true'
|
|
||||||
run: |
|
|
||||||
echo "错误: 版本 ${{ steps.ver.outputs.tag }} 已存在,禁止重复发版。"
|
|
||||||
echo "请先 bump Cargo.toml 中的 version,并执行 cargo build 同步 Cargo.lock。"
|
|
||||||
exit 1
|
|
||||||
|
|
||||||
- name: 创建 Tag
|
|
||||||
if: steps.ver.outputs.tag_exists == 'false'
|
|
||||||
run: |
|
|
||||||
git config user.name "github-actions[bot]"
|
|
||||||
git config user.email "github-actions[bot]@users.noreply.github.com"
|
|
||||||
git tag -a "${{ steps.ver.outputs.tag }}" -m "Release ${{ steps.ver.outputs.tag }}"
|
|
||||||
git push origin "${{ steps.ver.outputs.tag }}"
|
|
||||||
|
|
||||||
- name: 解析或创建 Release
|
|
||||||
id: release
|
|
||||||
env:
|
|
||||||
RELEASE_TOKEN: ${{ secrets.RELEASE_TOKEN }}
|
|
||||||
run: |
|
|
||||||
if [ -z "$RELEASE_TOKEN" ]; then
|
|
||||||
echo "release_id=" >> "$GITHUB_OUTPUT"
|
|
||||||
exit 0
|
|
||||||
fi
|
|
||||||
|
|
||||||
command -v jq >/dev/null 2>&1 || (sudo apt-get update -qq && sudo apt-get install -y -qq jq)
|
|
||||||
|
|
||||||
tag="${{ steps.ver.outputs.tag }}"
|
|
||||||
version="${{ steps.ver.outputs.version }}"
|
|
||||||
release_api="${{ github.server_url }}/api/v1/repos/${{ github.repository }}/releases"
|
|
||||||
|
|
||||||
http_code=$(curl -sS -o /tmp/release.json -w '%{http_code}' \
|
|
||||||
-H "Authorization: token $RELEASE_TOKEN" \
|
|
||||||
"${release_api}/tags/${tag}")
|
|
||||||
|
|
||||||
if [ "$http_code" = "200" ]; then
|
|
||||||
release_id=$(jq -r '.id // empty' /tmp/release.json)
|
|
||||||
if [ -n "$release_id" ]; then
|
|
||||||
echo "已找到现有 Release: ${release_id}"
|
|
||||||
echo "release_id=${release_id}" >> "$GITHUB_OUTPUT"
|
|
||||||
exit 0
|
|
||||||
fi
|
|
||||||
fi
|
|
||||||
|
|
||||||
previous_tag="${{ steps.ver.outputs.previous_tag }}"
|
|
||||||
if [ -n "$previous_tag" ]; then
|
|
||||||
changes=$(git log --pretty=format:'- %s (%h)' "${previous_tag}..HEAD")
|
|
||||||
else
|
|
||||||
changes=$(git log --pretty=format:'- %s (%h)')
|
|
||||||
fi
|
|
||||||
[ -z "$changes" ] && changes="- 首次发布"
|
|
||||||
|
|
||||||
body=$(printf '## 变更日志\n\n%s' "$changes")
|
|
||||||
|
|
||||||
payload=$(jq -n \
|
|
||||||
--arg tag "$tag" \
|
|
||||||
--arg name "${{ env.BINARY_NAME }} ${version}" \
|
|
||||||
--arg body "$body" \
|
|
||||||
'{tag_name: $tag, name: $name, body: $body, draft: true}')
|
|
||||||
|
|
||||||
http_code=$(curl -sS -o /tmp/create-release.json -w '%{http_code}' \
|
|
||||||
-H "Authorization: token $RELEASE_TOKEN" \
|
|
||||||
-H "Content-Type: application/json" \
|
|
||||||
-X POST "$release_api" \
|
|
||||||
-d "$payload")
|
|
||||||
|
|
||||||
if [ "$http_code" = "201" ] || [ "$http_code" = "200" ]; then
|
|
||||||
release_id=$(jq -r '.id // empty' /tmp/create-release.json)
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [ -n "$release_id" ]; then
|
|
||||||
echo "已创建草稿 Release: ${release_id}"
|
|
||||||
echo "release_id=${release_id}" >> "$GITHUB_OUTPUT"
|
|
||||||
else
|
|
||||||
echo "⚠ 创建 Release 失败 (HTTP ${http_code}),跳过产物上传"
|
|
||||||
cat /tmp/create-release.json 2>/dev/null || true
|
|
||||||
echo "release_id=" >> "$GITHUB_OUTPUT"
|
|
||||||
fi
|
|
||||||
|
|
||||||
check:
|
|
||||||
name: 质量检查 (fmt / clippy / test)
|
|
||||||
runs-on: debian
|
|
||||||
timeout-minutes: 10
|
|
||||||
steps:
|
|
||||||
- name: 安装 Rust
|
|
||||||
run: |
|
|
||||||
if ! command -v cargo >/dev/null 2>&1; then
|
|
||||||
curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y --default-toolchain stable
|
|
||||||
echo "$HOME/.cargo/bin" >> "$GITHUB_PATH"
|
echo "$HOME/.cargo/bin" >> "$GITHUB_PATH"
|
||||||
fi
|
fi
|
||||||
source "$HOME/.cargo/env" 2>/dev/null || true
|
source "$HOME/.cargo/env" 2>/dev/null || true
|
||||||
rustup component add rustfmt clippy
|
rustup toolchain install "${RUST_TOOLCHAIN}" --profile minimal \
|
||||||
|
--component rustfmt --component clippy
|
||||||
- uses: actions/checkout@v4
|
rustup default "${RUST_TOOLCHAIN}"
|
||||||
|
rustup target add "${MUSL_TARGET}" --toolchain "${RUST_TOOLCHAIN}"
|
||||||
|
rustc -V && cargo -V
|
||||||
|
|
||||||
- name: 缓存 Cargo
|
- name: 缓存 Cargo
|
||||||
uses: actions/cache@v4
|
uses: actions/cache@v4
|
||||||
@@ -158,325 +58,44 @@ jobs:
|
|||||||
~/.cargo/registry/cache
|
~/.cargo/registry/cache
|
||||||
~/.cargo/git/db
|
~/.cargo/git/db
|
||||||
target
|
target
|
||||||
key: cargo-check-${{ hashFiles('Cargo.lock') }}
|
key: cargo-${{ env.MUSL_TARGET }}-${{ env.RUST_TOOLCHAIN }}-${{ hashFiles('Cargo.lock') }}
|
||||||
restore-keys: |
|
restore-keys: |
|
||||||
cargo-check-
|
cargo-${{ env.MUSL_TARGET }}-${{ env.RUST_TOOLCHAIN }}-
|
||||||
|
cargo-${{ env.MUSL_TARGET }}-
|
||||||
|
|
||||||
- run: cargo fmt -- --check
|
# ── 质量检查(先于构建,失败即止)──────────────────────────────────
|
||||||
- run: cargo clippy --locked -- -D warnings
|
- name: fmt
|
||||||
- run: cargo test --locked
|
run: cargo fmt -- --check
|
||||||
|
|
||||||
build-linux:
|
- name: clippy
|
||||||
name: Build (x86_64-unknown-linux-musl)
|
run: cargo clippy --locked -- -D warnings
|
||||||
needs: [version, check]
|
|
||||||
runs-on: debian
|
- name: test
|
||||||
timeout-minutes: 15
|
run: cargo test --locked
|
||||||
steps:
|
|
||||||
- name: 安装依赖
|
- name: 构建 secrets-api
|
||||||
run: |
|
run: |
|
||||||
sudo apt-get update
|
cargo build --release --locked -p secrets-api
|
||||||
sudo apt-get install -y pkg-config musl-tools binutils curl
|
|
||||||
if ! command -v cargo >/dev/null 2>&1; then
|
|
||||||
curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y --default-toolchain stable
|
|
||||||
fi
|
|
||||||
source "$HOME/.cargo/env" 2>/dev/null || true
|
|
||||||
rustup target add x86_64-unknown-linux-musl
|
|
||||||
echo "$HOME/.cargo/bin" >> "$GITHUB_PATH"
|
|
||||||
|
|
||||||
- uses: actions/checkout@v4
|
- name: 构建 secrets-desktop-daemon
|
||||||
|
|
||||||
- name: 缓存 Cargo
|
|
||||||
uses: actions/cache@v4
|
|
||||||
with:
|
|
||||||
path: |
|
|
||||||
~/.cargo/registry/index
|
|
||||||
~/.cargo/registry/cache
|
|
||||||
~/.cargo/git/db
|
|
||||||
target
|
|
||||||
key: cargo-x86_64-unknown-linux-musl-${{ hashFiles('Cargo.lock') }}
|
|
||||||
restore-keys: |
|
|
||||||
cargo-x86_64-unknown-linux-musl-
|
|
||||||
|
|
||||||
- run: cargo build --release --locked --target x86_64-unknown-linux-musl
|
|
||||||
- run: strip target/x86_64-unknown-linux-musl/release/${{ env.BINARY_NAME }}
|
|
||||||
|
|
||||||
- name: 上传 Release 产物
|
|
||||||
if: needs.version.outputs.release_id != ''
|
|
||||||
env:
|
|
||||||
RELEASE_TOKEN: ${{ secrets.RELEASE_TOKEN }}
|
|
||||||
run: |
|
run: |
|
||||||
[ -z "$RELEASE_TOKEN" ] && exit 0
|
cargo build --release --locked -p secrets-desktop-daemon
|
||||||
tag="${{ needs.version.outputs.tag }}"
|
|
||||||
bin="target/x86_64-unknown-linux-musl/release/${{ env.BINARY_NAME }}"
|
|
||||||
archive="${{ env.BINARY_NAME }}-${tag}-x86_64-linux-musl.tar.gz"
|
|
||||||
tar -czf "$archive" -C "$(dirname "$bin")" "$(basename "$bin")"
|
|
||||||
sha256sum "$archive" > "${archive}.sha256"
|
|
||||||
curl -fsS -H "Authorization: token $RELEASE_TOKEN" \
|
|
||||||
-F "attachment=@${archive}" \
|
|
||||||
"${{ github.server_url }}/api/v1/repos/${{ github.repository }}/releases/${{ needs.version.outputs.release_id }}/assets"
|
|
||||||
curl -fsS -H "Authorization: token $RELEASE_TOKEN" \
|
|
||||||
-F "attachment=@${archive}.sha256" \
|
|
||||||
"${{ github.server_url }}/api/v1/repos/${{ github.repository }}/releases/${{ needs.version.outputs.release_id }}/assets"
|
|
||||||
|
|
||||||
|
# ── 飞书汇总通知 ─────────────────────────────────────────────────────
|
||||||
- name: 飞书通知
|
- name: 飞书通知
|
||||||
if: always()
|
if: always()
|
||||||
env:
|
env:
|
||||||
WEBHOOK_URL: ${{ vars.WEBHOOK_URL }}
|
WEBHOOK_URL: ${{ vars.WEBHOOK_URL }}
|
||||||
run: |
|
run: |
|
||||||
[ -z "$WEBHOOK_URL" ] && exit 0
|
[ -z "$WEBHOOK_URL" ] && exit 0
|
||||||
command -v jq >/dev/null 2>&1 || (sudo apt-get update -qq && sudo apt-get install -y -qq jq)
|
commit="${{ github.event.head_commit.message }}"
|
||||||
tag="${{ needs.version.outputs.tag }}"
|
[ -z "$commit" ] && commit="${{ github.sha }}"
|
||||||
commit=$(git log -1 --pretty=format:"%s" 2>/dev/null || echo "N/A")
|
|
||||||
url="${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_number }}"
|
url="${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_number }}"
|
||||||
result="${{ job.status }}"
|
result="${{ job.status }}"
|
||||||
if [ "$result" = "success" ]; then icon="✅"; else icon="❌"; fi
|
if [ "$result" = "success" ]; then icon="✅"; else icon="❌"; fi
|
||||||
msg="secrets linux 构建${icon}
|
msg="secrets v3 CI ${icon}
|
||||||
版本:${tag}
|
|
||||||
提交:${commit}
|
提交:${commit}
|
||||||
作者:${{ github.actor }}
|
作者:${{ github.actor }}
|
||||||
详情:${url}"
|
详情:${url}"
|
||||||
payload=$(jq -n --arg text "$msg" '{msg_type: "text", content: {text: $text}}')
|
payload=$(jq -n --arg text "$msg" '{msg_type: "text", content: {text: $text}}')
|
||||||
curl -sS -H "Content-Type: application/json" -X POST -d "$payload" "$WEBHOOK_URL"
|
curl -sS -H "Content-Type: application/json" -X POST -d "$payload" "$WEBHOOK_URL"
|
||||||
|
|
||||||
build-macos:
|
|
||||||
name: Build (macOS aarch64 + x86_64)
|
|
||||||
needs: [version, check]
|
|
||||||
runs-on: darwin-arm64
|
|
||||||
timeout-minutes: 15
|
|
||||||
steps:
|
|
||||||
- name: 安装依赖
|
|
||||||
run: |
|
|
||||||
if ! command -v cargo >/dev/null 2>&1; then
|
|
||||||
curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y --default-toolchain stable
|
|
||||||
fi
|
|
||||||
source "$HOME/.cargo/env" 2>/dev/null || true
|
|
||||||
rustup target add aarch64-apple-darwin
|
|
||||||
rustup target add x86_64-apple-darwin
|
|
||||||
echo "$HOME/.cargo/bin" >> "$GITHUB_PATH"
|
|
||||||
|
|
||||||
- uses: actions/checkout@v4
|
|
||||||
|
|
||||||
- name: 缓存 Cargo
|
|
||||||
uses: actions/cache@v4
|
|
||||||
with:
|
|
||||||
path: |
|
|
||||||
~/.cargo/registry/index
|
|
||||||
~/.cargo/registry/cache
|
|
||||||
~/.cargo/git/db
|
|
||||||
target
|
|
||||||
key: cargo-macos-${{ hashFiles('Cargo.lock') }}
|
|
||||||
restore-keys: |
|
|
||||||
cargo-macos-
|
|
||||||
|
|
||||||
- run: cargo build --release --locked --target aarch64-apple-darwin
|
|
||||||
- run: cargo build --release --locked --target x86_64-apple-darwin
|
|
||||||
- run: strip -x target/aarch64-apple-darwin/release/${{ env.BINARY_NAME }}
|
|
||||||
- run: strip -x target/x86_64-apple-darwin/release/${{ env.BINARY_NAME }}
|
|
||||||
|
|
||||||
- name: 上传 Release 产物
|
|
||||||
if: needs.version.outputs.release_id != ''
|
|
||||||
env:
|
|
||||||
RELEASE_TOKEN: ${{ secrets.RELEASE_TOKEN }}
|
|
||||||
run: |
|
|
||||||
[ -z "$RELEASE_TOKEN" ] && exit 0
|
|
||||||
tag="${{ needs.version.outputs.tag }}"
|
|
||||||
release_id="${{ needs.version.outputs.release_id }}"
|
|
||||||
|
|
||||||
arm_bin="target/aarch64-apple-darwin/release/${{ env.BINARY_NAME }}"
|
|
||||||
arm_archive="${{ env.BINARY_NAME }}-${tag}-aarch64-macos.tar.gz"
|
|
||||||
tar -czf "$arm_archive" -C "$(dirname "$arm_bin")" "$(basename "$arm_bin")"
|
|
||||||
shasum -a 256 "$arm_archive" > "${arm_archive}.sha256"
|
|
||||||
curl -fsS -H "Authorization: token $RELEASE_TOKEN" \
|
|
||||||
-F "attachment=@${arm_archive}" \
|
|
||||||
"${{ github.server_url }}/api/v1/repos/${{ github.repository }}/releases/${release_id}/assets"
|
|
||||||
curl -fsS -H "Authorization: token $RELEASE_TOKEN" \
|
|
||||||
-F "attachment=@${arm_archive}.sha256" \
|
|
||||||
"${{ github.server_url }}/api/v1/repos/${{ github.repository }}/releases/${release_id}/assets"
|
|
||||||
|
|
||||||
intel_bin="target/x86_64-apple-darwin/release/${{ env.BINARY_NAME }}"
|
|
||||||
intel_archive="${{ env.BINARY_NAME }}-${tag}-x86_64-macos.tar.gz"
|
|
||||||
tar -czf "$intel_archive" -C "$(dirname "$intel_bin")" "$(basename "$intel_bin")"
|
|
||||||
shasum -a 256 "$intel_archive" > "${intel_archive}.sha256"
|
|
||||||
curl -fsS -H "Authorization: token $RELEASE_TOKEN" \
|
|
||||||
-F "attachment=@${intel_archive}" \
|
|
||||||
"${{ github.server_url }}/api/v1/repos/${{ github.repository }}/releases/${release_id}/assets"
|
|
||||||
curl -fsS -H "Authorization: token $RELEASE_TOKEN" \
|
|
||||||
-F "attachment=@${intel_archive}.sha256" \
|
|
||||||
"${{ github.server_url }}/api/v1/repos/${{ github.repository }}/releases/${release_id}/assets"
|
|
||||||
|
|
||||||
- name: 飞书通知
|
|
||||||
if: always()
|
|
||||||
env:
|
|
||||||
WEBHOOK_URL: ${{ vars.WEBHOOK_URL }}
|
|
||||||
run: |
|
|
||||||
[ -z "$WEBHOOK_URL" ] && exit 0
|
|
||||||
tag="${{ needs.version.outputs.tag }}"
|
|
||||||
commit=$(git log -1 --pretty=format:"%s" 2>/dev/null || echo "N/A")
|
|
||||||
url="${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_number }}"
|
|
||||||
result="${{ job.status }}"
|
|
||||||
if [ "$result" = "success" ]; then icon="✅"; else icon="❌"; fi
|
|
||||||
msg="secrets macOS 双架构构建${icon}
|
|
||||||
版本:${tag}
|
|
||||||
目标:aarch64-apple-darwin, x86_64-apple-darwin
|
|
||||||
提交:${commit}
|
|
||||||
作者:${{ github.actor }}
|
|
||||||
详情:${url}"
|
|
||||||
payload=$(python3 -c "import json,sys; print(json.dumps({'msg_type':'text','content':{'text':sys.argv[1]}}))" "$msg")
|
|
||||||
curl -sS -H "Content-Type: application/json" -X POST -d "$payload" "$WEBHOOK_URL"
|
|
||||||
|
|
||||||
build-windows:
|
|
||||||
name: Build (x86_64-pc-windows-msvc)
|
|
||||||
needs: [version, check]
|
|
||||||
runs-on: windows
|
|
||||||
timeout-minutes: 15
|
|
||||||
steps:
|
|
||||||
- name: 安装依赖
|
|
||||||
shell: pwsh
|
|
||||||
run: |
|
|
||||||
$cargoBin = Join-Path $env:USERPROFILE ".cargo\bin"
|
|
||||||
if (-not (Get-Command cargo -ErrorAction SilentlyContinue)) {
|
|
||||||
Invoke-WebRequest -Uri "https://win.rustup.rs/x86_64" -OutFile rustup-init.exe
|
|
||||||
.\rustup-init.exe -y --default-toolchain stable
|
|
||||||
Remove-Item rustup-init.exe
|
|
||||||
}
|
|
||||||
$env:Path = "$cargoBin;$env:Path"
|
|
||||||
Add-Content -Path $env:GITHUB_PATH -Value $cargoBin
|
|
||||||
rustup target add x86_64-pc-windows-msvc
|
|
||||||
|
|
||||||
- uses: actions/checkout@v4
|
|
||||||
|
|
||||||
- name: 缓存 Cargo
|
|
||||||
uses: actions/cache@v4
|
|
||||||
with:
|
|
||||||
path: |
|
|
||||||
~/.cargo/registry/index
|
|
||||||
~/.cargo/registry/cache
|
|
||||||
~/.cargo/git/db
|
|
||||||
target
|
|
||||||
key: cargo-x86_64-pc-windows-msvc-${{ hashFiles('Cargo.lock') }}
|
|
||||||
restore-keys: |
|
|
||||||
cargo-x86_64-pc-windows-msvc-
|
|
||||||
|
|
||||||
- name: 构建
|
|
||||||
shell: pwsh
|
|
||||||
run: cargo build --release --locked --target x86_64-pc-windows-msvc
|
|
||||||
|
|
||||||
- name: 上传 Release 产物
|
|
||||||
if: needs.version.outputs.release_id != ''
|
|
||||||
shell: pwsh
|
|
||||||
env:
|
|
||||||
RELEASE_TOKEN: ${{ secrets.RELEASE_TOKEN }}
|
|
||||||
run: |
|
|
||||||
if (-not $env:RELEASE_TOKEN) { exit 0 }
|
|
||||||
$tag = "${{ needs.version.outputs.tag }}"
|
|
||||||
$bin = "target\x86_64-pc-windows-msvc\release\${{ env.BINARY_NAME }}.exe"
|
|
||||||
$archive = "${{ env.BINARY_NAME }}-${tag}-x86_64-windows.zip"
|
|
||||||
Compress-Archive -Path $bin -DestinationPath $archive -Force
|
|
||||||
$hash = (Get-FileHash -Algorithm SHA256 $archive).Hash.ToLower()
|
|
||||||
Set-Content -Path "${archive}.sha256" -Value "$hash $archive" -NoNewline
|
|
||||||
$url = "${{ github.server_url }}/api/v1/repos/${{ github.repository }}/releases/${{ needs.version.outputs.release_id }}/assets"
|
|
||||||
Invoke-RestMethod -Uri $url -Method Post `
|
|
||||||
-Headers @{ "Authorization" = "token $env:RELEASE_TOKEN" } `
|
|
||||||
-Form @{ attachment = Get-Item $archive }
|
|
||||||
Invoke-RestMethod -Uri $url -Method Post `
|
|
||||||
-Headers @{ "Authorization" = "token $env:RELEASE_TOKEN" } `
|
|
||||||
-Form @{ attachment = Get-Item "${archive}.sha256" }
|
|
||||||
|
|
||||||
- name: 飞书通知
|
|
||||||
if: always()
|
|
||||||
shell: pwsh
|
|
||||||
env:
|
|
||||||
WEBHOOK_URL: ${{ vars.WEBHOOK_URL }}
|
|
||||||
run: |
|
|
||||||
if (-not $env:WEBHOOK_URL) { exit 0 }
|
|
||||||
$tag = "${{ needs.version.outputs.tag }}"
|
|
||||||
$commit = (git log -1 --pretty=format:"%s" 2>$null) ?? "N/A"
|
|
||||||
$url = "${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_number }}"
|
|
||||||
$result = "${{ job.status }}"
|
|
||||||
$icon = if ($result -eq "success") { "✅" } else { "❌" }
|
|
||||||
$msg = "secrets windows 构建${icon}`n版本:${tag}`n提交:${commit}`n作者:${{ github.actor }}`n详情:${url}"
|
|
||||||
$payload = @{ msg_type = "text"; content = @{ text = $msg } } | ConvertTo-Json
|
|
||||||
Invoke-RestMethod -Uri $env:WEBHOOK_URL -Method Post `
|
|
||||||
-ContentType "application/json" -Body $payload
|
|
||||||
|
|
||||||
publish-release:
|
|
||||||
name: 发布草稿 Release
|
|
||||||
needs: [version, build-linux, build-macos, build-windows]
|
|
||||||
if: always() && needs.version.outputs.release_id != ''
|
|
||||||
runs-on: debian
|
|
||||||
timeout-minutes: 5
|
|
||||||
steps:
|
|
||||||
- uses: actions/checkout@v4
|
|
||||||
|
|
||||||
- name: 发布草稿
|
|
||||||
env:
|
|
||||||
RELEASE_TOKEN: ${{ secrets.RELEASE_TOKEN }}
|
|
||||||
run: |
|
|
||||||
[ -z "$RELEASE_TOKEN" ] && exit 0
|
|
||||||
|
|
||||||
linux_r="${{ needs.build-linux.result }}"
|
|
||||||
macos_r="${{ needs.build-macos.result }}"
|
|
||||||
windows_r="${{ needs.build-windows.result }}"
|
|
||||||
if [ "$linux_r" != "success" ] || [ "$macos_r" != "success" ] || [ "$windows_r" != "success" ]; then
|
|
||||||
echo "存在未成功的构建任务,保留草稿 Release"
|
|
||||||
echo "linux=${linux_r} macos=${macos_r} windows=${windows_r}"
|
|
||||||
exit 0
|
|
||||||
fi
|
|
||||||
|
|
||||||
release_api="${{ github.server_url }}/api/v1/repos/${{ github.repository }}/releases/${{ needs.version.outputs.release_id }}"
|
|
||||||
http_code=$(curl -sS -o /tmp/publish-release.json -w '%{http_code}' \
|
|
||||||
-H "Authorization: token $RELEASE_TOKEN" \
|
|
||||||
-H "Content-Type: application/json" \
|
|
||||||
-X PATCH "$release_api" \
|
|
||||||
-d '{"draft":false}')
|
|
||||||
|
|
||||||
if [ "$http_code" != "200" ]; then
|
|
||||||
echo "发布草稿 Release 失败 (HTTP ${http_code})"
|
|
||||||
cat /tmp/publish-release.json 2>/dev/null || true
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
echo "Release 已发布"
|
|
||||||
|
|
||||||
- name: 飞书汇总通知
|
|
||||||
if: always()
|
|
||||||
env:
|
|
||||||
WEBHOOK_URL: ${{ vars.WEBHOOK_URL }}
|
|
||||||
run: |
|
|
||||||
[ -z "$WEBHOOK_URL" ] && exit 0
|
|
||||||
command -v jq >/dev/null 2>&1 || (sudo apt-get update -qq && sudo apt-get install -y -qq jq)
|
|
||||||
|
|
||||||
tag="${{ needs.version.outputs.tag }}"
|
|
||||||
tag_exists="${{ needs.version.outputs.tag_exists }}"
|
|
||||||
commit=$(git log -1 --pretty=format:"%s" 2>/dev/null || echo "N/A")
|
|
||||||
url="${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_number }}"
|
|
||||||
|
|
||||||
linux_r="${{ needs.build-linux.result }}"
|
|
||||||
macos_r="${{ needs.build-macos.result }}"
|
|
||||||
windows_r="${{ needs.build-windows.result }}"
|
|
||||||
publish_r="${{ job.status }}"
|
|
||||||
|
|
||||||
icon() { case "$1" in success) echo "✅";; skipped) echo "⏭";; *) echo "❌";; esac; }
|
|
||||||
|
|
||||||
if [ "$linux_r" = "success" ] && [ "$macos_r" = "success" ] && [ "$windows_r" = "success" ] && [ "$publish_r" = "success" ]; then
|
|
||||||
status="发布成功 ✅"
|
|
||||||
elif [ "$linux_r" != "success" ] || [ "$macos_r" != "success" ] || [ "$windows_r" != "success" ]; then
|
|
||||||
status="构建失败 ❌"
|
|
||||||
else
|
|
||||||
status="发布失败 ❌"
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [ "$tag_exists" = "false" ]; then
|
|
||||||
version_line="🆕 新版本 ${tag}"
|
|
||||||
else
|
|
||||||
version_line="🔄 重复构建 ${tag}"
|
|
||||||
fi
|
|
||||||
|
|
||||||
msg="secrets ${status}
|
|
||||||
${version_line}
|
|
||||||
linux $(icon "$linux_r") | macOS $(icon "$macos_r") | windows $(icon "$windows_r") | Release $(icon "$publish_r")
|
|
||||||
提交:${commit}
|
|
||||||
作者:${{ github.actor }}
|
|
||||||
详情:${url}"
|
|
||||||
|
|
||||||
payload=$(jq -n --arg text "$msg" '{msg_type: "text", content: {text: $text}}')
|
|
||||||
curl -sS -H "Content-Type: application/json" -X POST -d "$payload" "$WEBHOOK_URL"
|
|
||||||
|
|||||||
11
.gitignore
vendored
11
.gitignore
vendored
@@ -2,3 +2,14 @@
|
|||||||
.env
|
.env
|
||||||
.DS_Store
|
.DS_Store
|
||||||
.cursor/
|
.cursor/
|
||||||
|
*.pem
|
||||||
|
tmp/
|
||||||
|
client_secret_*.apps.googleusercontent.com.json
|
||||||
|
node_modules/
|
||||||
|
*.pyc
|
||||||
|
|
||||||
|
# Tauri app icon pack: generated by `cargo tauri icon apps/desktop/src-tauri/icons/icon.png`
|
||||||
|
# Version control only the 1024×1024 master; regenerate the rest locally or in release builds.
|
||||||
|
apps/desktop/src-tauri/icons/**
|
||||||
|
!apps/desktop/src-tauri/icons/
|
||||||
|
!apps/desktop/src-tauri/icons/icon.png
|
||||||
149
.vscode/tasks.json
vendored
149
.vscode/tasks.json
vendored
@@ -1,149 +0,0 @@
|
|||||||
{
|
|
||||||
"version": "2.0.0",
|
|
||||||
"tasks": [
|
|
||||||
{
|
|
||||||
"label": "build",
|
|
||||||
"type": "shell",
|
|
||||||
"command": "cargo build",
|
|
||||||
"group": { "kind": "build", "isDefault": true }
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"label": "cli: version",
|
|
||||||
"type": "shell",
|
|
||||||
"command": "./target/debug/secrets -V",
|
|
||||||
"dependsOn": "build"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"label": "cli: help",
|
|
||||||
"type": "shell",
|
|
||||||
"command": "./target/debug/secrets --help",
|
|
||||||
"dependsOn": "build"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"label": "cli: help add",
|
|
||||||
"type": "shell",
|
|
||||||
"command": "./target/debug/secrets help add",
|
|
||||||
"dependsOn": "build"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"label": "cli: help config",
|
|
||||||
"type": "shell",
|
|
||||||
"command": "./target/debug/secrets help config",
|
|
||||||
"dependsOn": "build"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"label": "cli: config path",
|
|
||||||
"type": "shell",
|
|
||||||
"command": "./target/debug/secrets config path",
|
|
||||||
"dependsOn": "build"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"label": "cli: config show",
|
|
||||||
"type": "shell",
|
|
||||||
"command": "./target/debug/secrets config show",
|
|
||||||
"dependsOn": "build"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"label": "test: search all",
|
|
||||||
"type": "shell",
|
|
||||||
"command": "./target/debug/secrets search",
|
|
||||||
"dependsOn": "build"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"label": "test: search all (verbose)",
|
|
||||||
"type": "shell",
|
|
||||||
"command": "./target/debug/secrets --verbose search",
|
|
||||||
"dependsOn": "build"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"label": "test: search by namespace (refining)",
|
|
||||||
"type": "shell",
|
|
||||||
"command": "./target/debug/secrets search -n refining",
|
|
||||||
"dependsOn": "build"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"label": "test: search by namespace (ricnsmart)",
|
|
||||||
"type": "shell",
|
|
||||||
"command": "./target/debug/secrets search -n ricnsmart",
|
|
||||||
"dependsOn": "build"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"label": "test: search servers",
|
|
||||||
"type": "shell",
|
|
||||||
"command": "./target/debug/secrets search --kind server",
|
|
||||||
"dependsOn": "build"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"label": "test: search services",
|
|
||||||
"type": "shell",
|
|
||||||
"command": "./target/debug/secrets search --kind service",
|
|
||||||
"dependsOn": "build"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"label": "test: search keys",
|
|
||||||
"type": "shell",
|
|
||||||
"command": "./target/debug/secrets search --kind key",
|
|
||||||
"dependsOn": "build"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"label": "test: search by tag (aliyun)",
|
|
||||||
"type": "shell",
|
|
||||||
"command": "./target/debug/secrets search --tag aliyun",
|
|
||||||
"dependsOn": "build"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"label": "test: search by tag (hongkong)",
|
|
||||||
"type": "shell",
|
|
||||||
"command": "./target/debug/secrets search --tag hongkong",
|
|
||||||
"dependsOn": "build"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"label": "test: search keyword (gitea)",
|
|
||||||
"type": "shell",
|
|
||||||
"command": "./target/debug/secrets search -q gitea",
|
|
||||||
"dependsOn": "build"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"label": "test: inject service secrets",
|
|
||||||
"type": "shell",
|
|
||||||
"command": "./target/debug/secrets inject -n refining --kind service --name gitea",
|
|
||||||
"dependsOn": "build"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"label": "test: combined search (ricnsmart + server + shanghai)",
|
|
||||||
"type": "shell",
|
|
||||||
"command": "./target/debug/secrets search -n ricnsmart --kind server --tag shanghai",
|
|
||||||
"dependsOn": "build"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"label": "test: add + delete roundtrip",
|
|
||||||
"type": "shell",
|
|
||||||
"command": "echo '--- add ---' && ./target/debug/secrets add -n test --kind demo --name roundtrip-test --tag test -m foo=bar -s password=secret123 && echo '--- search metadata ---' && ./target/debug/secrets search -n test && echo '--- inject secrets ---' && ./target/debug/secrets inject -n test --kind demo --name roundtrip-test && echo '--- delete ---' && ./target/debug/secrets delete -n test --kind demo --name roundtrip-test && echo '--- verify deleted ---' && ./target/debug/secrets search -n test",
|
|
||||||
"dependsOn": "build"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"label": "test: add + delete roundtrip (verbose)",
|
|
||||||
"type": "shell",
|
|
||||||
"command": "echo '--- add (verbose) ---' && ./target/debug/secrets --verbose add -n test --kind demo --name roundtrip-verbose --tag test -m foo=bar -s password=secret123 && echo '--- delete (verbose) ---' && ./target/debug/secrets --verbose delete -n test --kind demo --name roundtrip-verbose",
|
|
||||||
"dependsOn": "build"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"label": "test: update roundtrip",
|
|
||||||
"type": "shell",
|
|
||||||
"command": "echo '--- add ---' && ./target/debug/secrets add -n test --kind demo --name update-test --tag v1 -m env=staging && echo '--- update ---' && ./target/debug/secrets update -n test --kind demo --name update-test --add-tag v2 --remove-tag v1 -m env=production && echo '--- verify ---' && ./target/debug/secrets search -n test --kind demo && echo '--- cleanup ---' && ./target/debug/secrets delete -n test --kind demo --name update-test",
|
|
||||||
"dependsOn": "build"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"label": "test: audit log",
|
|
||||||
"type": "shell",
|
|
||||||
"command": "echo '--- add ---' && ./target/debug/secrets add -n test --kind demo --name audit-test -m foo=bar -s key=val && echo '--- update ---' && ./target/debug/secrets update -n test --kind demo --name audit-test -m foo=baz && echo '--- delete ---' && ./target/debug/secrets delete -n test --kind demo --name audit-test && echo '--- audit log (last 5) ---' && psql $DATABASE_URL -c \"SELECT action, namespace, kind, name, actor, detail, created_at FROM audit_log ORDER BY created_at DESC LIMIT 5;\"",
|
|
||||||
"dependsOn": "build"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"label": "test: add with file secret",
|
|
||||||
"type": "shell",
|
|
||||||
"command": "echo '--- add key from file ---' && ./target/debug/secrets add -n test --kind key --name test-key --tag test -s content=@./refining/keys/Vultr && echo '--- verify metadata ---' && ./target/debug/secrets search -n test --kind key && echo '--- verify inject ---' && ./target/debug/secrets inject -n test --kind key --name test-key && echo '--- cleanup ---' && ./target/debug/secrets delete -n test --kind key --name test-key",
|
|
||||||
"dependsOn": "build"
|
|
||||||
}
|
|
||||||
]
|
|
||||||
}
|
|
||||||
714
AGENTS.md
714
AGENTS.md
@@ -1,540 +1,240 @@
|
|||||||
# Secrets CLI — AGENTS.md
|
# Secrets — AGENTS.md
|
||||||
|
|
||||||
## 提交 / 发版硬规则(优先于下文其他说明)
|
本仓库当前为 **v3 桌面端架构**:
|
||||||
|
|
||||||
1. 涉及 `src/**`、`Cargo.toml`、`Cargo.lock`、CLI 行为变更的提交,默认视为**需要发版**,除非用户明确说明“本次不发版”。
|
- `apps/api`:远端 JSON API
|
||||||
2. 发版前必须先检查 `Cargo.toml` 中的 `version`,再检查是否已存在对应 tag:`git tag -l 'secrets-*'`。
|
- `apps/desktop/src-tauri`:桌面客户端
|
||||||
3. 若当前版本对应 tag 已存在,必须先 bump `Cargo.toml` 的 `version`,再执行 `cargo build` 同步 `Cargo.lock`,然后才能提交。
|
- `crates/desktop-daemon`:本地 MCP daemon
|
||||||
4. 提交前优先运行 `./scripts/release-check.sh`;该脚本会检查重复版本并执行 `cargo fmt -- --check && cargo clippy --locked -- -D warnings && cargo test --locked`。
|
- `crates/application` / `domain` / `infrastructure-db`:v3 业务与数据层
|
||||||
|
|
||||||
跨设备密钥与配置管理 CLI 工具,将 refining / ricnsmart 两个项目的服务器信息、服务凭据存储到 PostgreSQL 18,供 AI 工具读取上下文。敏感数据(encrypted 字段)使用 AES-256-GCM 加密,主密钥由 Argon2id 从主密码派生并存入平台安全存储(macOS Keychain / Windows Credential Manager / Linux keyutils)。
|
旧 `secrets-core` / `secrets-mcp` / `secrets-mcp-local` 已移除,不再作为开发入口。
|
||||||
|
|
||||||
## 项目结构
|
## 版本控制
|
||||||
|
|
||||||
```
|
本仓库使用 **[Jujutsu (jj)](https://jj-vcs.dev/)** 作为版本控制系统(纯 jj 模式,无 `.git` 目录)。
|
||||||
secrets/
|
|
||||||
src/
|
|
||||||
main.rs # CLI 入口,clap 命令定义,auto-migrate,--verbose 全局参数
|
|
||||||
output.rs # OutputMode 枚举 + TTY 检测(TTY→text,非 TTY→json-compact)
|
|
||||||
config.rs # 配置读写:~/.config/secrets/config.toml(database_url)
|
|
||||||
db.rs # PgPool 创建 + 建表/索引(幂等,含 audit_log + kv_config + secrets_history)
|
|
||||||
crypto.rs # AES-256-GCM 加解密、Argon2id 派生、OS 钥匙串
|
|
||||||
models.rs # Secret 结构体(sqlx::FromRow + serde,含 version 字段)
|
|
||||||
audit.rs # 审计写入:log_tx(事务内)/ log(池,保留备用)
|
|
||||||
commands/
|
|
||||||
init.rs # init 命令:主密钥初始化(每台设备一次)
|
|
||||||
add.rs # add 命令:upsert,事务化,含历史快照,支持 key:=json 类型化值与嵌套路径写入
|
|
||||||
config.rs # config 命令:set-db / show / path(持久化 database_url)
|
|
||||||
search.rs # search 命令:多条件查询,公开 fetch_rows / build_env_map
|
|
||||||
delete.rs # delete 命令:事务化,含历史快照
|
|
||||||
update.rs # update 命令:增量更新,CAS 并发保护,含历史快照
|
|
||||||
rollback.rs # rollback / history 命令:版本回滚与历史查看
|
|
||||||
run.rs # inject / run 命令:临时环境变量注入
|
|
||||||
upgrade.rs # upgrade 命令:检查、校验摘要并下载最新版本,自动替换二进制
|
|
||||||
scripts/
|
|
||||||
release-check.sh # 发版前检查版本号/tag 是否重复,并执行 fmt/clippy/test
|
|
||||||
setup-gitea-actions.sh # 配置 Gitea Actions 变量与 Secrets
|
|
||||||
.gitea/workflows/
|
|
||||||
secrets.yml # CI:fmt + clippy + musl 构建 + Release 上传 + 飞书通知
|
|
||||||
.vscode/tasks.json # 本地测试任务(build / config / search / add+delete / update / audit 等)
|
|
||||||
```
|
|
||||||
|
|
||||||
## 数据库
|
### 常用 jj 命令对照
|
||||||
|
|
||||||
- **Host**: `<host>:<port>`
|
| 操作 | jj 命令 |
|
||||||
- **Database**: `secrets`
|
|------|---------|
|
||||||
- **连接串**: `postgres://postgres:<password>@<host>:<port>/secrets`
|
| 查看历史 | `jj log` / `jj log 'all()'` |
|
||||||
- **表**: `secrets`(主表)+ `audit_log`(审计表)+ `kv_config`(Argon2 salt 等),首次连接自动建表(auto-migrate)
|
| 查看状态 | `jj status` |
|
||||||
|
| 新建提交 | `jj commit` |
|
||||||
|
| 创建新变更 | `jj new` |
|
||||||
|
| 变基 | `jj rebase` |
|
||||||
|
| 合并提交 | `jj squash` |
|
||||||
|
| 撤销操作 | `jj undo` |
|
||||||
|
| 查看标签 | `jj tag list` |
|
||||||
|
| 查看分支 | `jj bookmark list` |
|
||||||
|
| 推送远端 | `jj git push` |
|
||||||
|
| 拉取远端 | `jj git fetch` |
|
||||||
|
|
||||||
### 表结构
|
### 注意事项
|
||||||
|
|
||||||
```sql
|
- 本仓库为纯 `jj` 模式,本地不要使用 `git` 命令。
|
||||||
secrets (
|
- CI Runner 侧仍可能使用 `git` 拉代码,这不影响本地开发。
|
||||||
id UUID PRIMARY KEY DEFAULT uuidv7(), -- PG18 时间有序 UUID
|
- 检查 tag 是否存在时,使用 `jj log --no-graph --revisions "tag(${tag})"`。
|
||||||
namespace VARCHAR(64) NOT NULL, -- 一级隔离: "refining" | "ricnsmart"
|
|
||||||
kind VARCHAR(64) NOT NULL, -- 类型: "server" | "service"(可扩展)
|
|
||||||
name VARCHAR(256) NOT NULL, -- 人类可读标识
|
|
||||||
tags TEXT[] NOT NULL DEFAULT '{}', -- 灵活标签: ["aliyun","hongkong"]
|
|
||||||
metadata JSONB NOT NULL DEFAULT '{}', -- 明文描述: ip, desc, domains, location...
|
|
||||||
encrypted BYTEA NOT NULL DEFAULT '\x', -- AES-256-GCM 密文: nonce(12B)||ciphertext+tag
|
|
||||||
version BIGINT NOT NULL DEFAULT 1, -- 乐观锁版本号,每次写操作自增
|
|
||||||
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
|
|
||||||
updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
|
|
||||||
UNIQUE(namespace, kind, name)
|
|
||||||
)
|
|
||||||
```
|
|
||||||
|
|
||||||
```sql
|
## 提交前检查
|
||||||
secrets_history (
|
|
||||||
id BIGINT GENERATED ALWAYS AS IDENTITY PRIMARY KEY,
|
|
||||||
secret_id UUID NOT NULL, -- 对应 secrets.id
|
|
||||||
namespace VARCHAR(64) NOT NULL,
|
|
||||||
kind VARCHAR(64) NOT NULL,
|
|
||||||
name VARCHAR(256) NOT NULL,
|
|
||||||
version BIGINT NOT NULL, -- 被快照时的版本号
|
|
||||||
action VARCHAR(16) NOT NULL, -- 'add' | 'update' | 'delete' | 'rollback'
|
|
||||||
tags TEXT[] NOT NULL DEFAULT '{}',
|
|
||||||
metadata JSONB NOT NULL DEFAULT '{}',
|
|
||||||
encrypted BYTEA NOT NULL DEFAULT '\x', -- 快照时的加密密文
|
|
||||||
actor VARCHAR(128) NOT NULL DEFAULT '',
|
|
||||||
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW()
|
|
||||||
)
|
|
||||||
```
|
|
||||||
|
|
||||||
```sql
|
每次提交前至少运行:
|
||||||
kv_config (
|
|
||||||
key TEXT PRIMARY KEY, -- 如 'argon2_salt'
|
|
||||||
value BYTEA NOT NULL -- Argon2id salt,首台设备 init 时生成
|
|
||||||
)
|
|
||||||
```
|
|
||||||
|
|
||||||
### audit_log 表结构
|
|
||||||
|
|
||||||
```sql
|
|
||||||
audit_log (
|
|
||||||
id BIGINT GENERATED ALWAYS AS IDENTITY PRIMARY KEY,
|
|
||||||
action VARCHAR(32) NOT NULL, -- 'add' | 'update' | 'delete'
|
|
||||||
namespace VARCHAR(64) NOT NULL,
|
|
||||||
kind VARCHAR(64) NOT NULL,
|
|
||||||
name VARCHAR(256) NOT NULL,
|
|
||||||
detail JSONB NOT NULL DEFAULT '{}', -- 变更摘要(tags/meta keys/secret keys,不含 value)
|
|
||||||
actor VARCHAR(128) NOT NULL DEFAULT '', -- 操作者($USER 环境变量)
|
|
||||||
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW()
|
|
||||||
)
|
|
||||||
```
|
|
||||||
|
|
||||||
### 字段职责划分
|
|
||||||
|
|
||||||
| 字段 | 存什么 | 示例 |
|
|
||||||
|------|--------|------|
|
|
||||||
| `namespace` | 项目/团队隔离 | `refining`, `ricnsmart` |
|
|
||||||
| `kind` | 记录类型 | `server`, `service` |
|
|
||||||
| `name` | 唯一标识名 | `i-uf63f2uookgs5uxmrdyc`, `gitea` |
|
|
||||||
| `tags` | 多维分类标签 | `["aliyun","hongkong","ricn"]` |
|
|
||||||
| `metadata` | 明文非敏感信息 | `{"ip":"47.243.154.187","desc":"Grafana","domains":["..."]}` |
|
|
||||||
| `encrypted` | 敏感凭据,AES-256-GCM 加密存储 | 二进制密文,解密后为 `{"ssh_key":"...","password":"..."}` |
|
|
||||||
|
|
||||||
## 数据库配置
|
|
||||||
|
|
||||||
首次使用需显式配置数据库连接,设置一次后在该设备上持久生效:
|
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
secrets config set-db "postgres://postgres:<password>@<host>:<port>/secrets"
|
cargo fmt -- --check
|
||||||
secrets config show # 查看当前配置(密码脱敏)
|
cargo clippy --locked -- -D warnings
|
||||||
secrets config path # 打印配置文件路径
|
cargo test --locked
|
||||||
```
|
```
|
||||||
|
|
||||||
`set-db` 会先验证连接可用,成功后才写入配置文件;连接失败时提示 "Database connection failed" 且不修改配置。
|
也可以直接运行:
|
||||||
|
|
||||||
配置文件:`~/.config/secrets/config.toml`,权限 0600。`--db-url` 参数可一次性覆盖。
|
|
||||||
|
|
||||||
## 主密钥与加密
|
|
||||||
|
|
||||||
首次使用(每台设备各执行一次):
|
|
||||||
|
|
||||||
```bash
|
|
||||||
secrets config set-db "postgres://postgres:<password>@<host>:<port>/secrets"
|
|
||||||
secrets init # 提示输入主密码,Argon2id 派生主密钥后存入 OS 钥匙串
|
|
||||||
```
|
|
||||||
|
|
||||||
主密码不存储;salt 存于 `kv_config`,首台设备生成后共享,确保同一主密码在所有设备派生出相同主密钥。
|
|
||||||
|
|
||||||
主密钥存储后端:macOS Keychain、Windows Credential Manager、Linux keyutils(会话级,重启后需再次 `secrets init`)。
|
|
||||||
|
|
||||||
**从旧版(明文 JSONB)升级**:升级后执行 `secrets init` 即可(明文记录需手动重新 add 或通过 update 更新)。
|
|
||||||
|
|
||||||
## CLI 命令
|
|
||||||
|
|
||||||
### AI 使用主路径
|
|
||||||
|
|
||||||
**读取一律用 `search`,写入用 `add` / `update`,避免反复查帮助。**
|
|
||||||
|
|
||||||
输出格式规则:
|
|
||||||
- TTY(终端直接运行)→ 默认 `text`
|
|
||||||
- 非 TTY(管道/重定向/AI 调用)→ 自动 `json-compact`
|
|
||||||
- 显式 `-o json` → 美化 JSON
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
### init — 主密钥初始化(每台设备一次)
|
|
||||||
|
|
||||||
```bash
|
|
||||||
# 首次设备:生成 Argon2id salt 并存库,派生主密钥后存 OS 钥匙串
|
|
||||||
secrets init
|
|
||||||
|
|
||||||
# 后续设备:复用已有 salt,派生主密钥后存钥匙串(主密码需与首台相同)
|
|
||||||
secrets init
|
|
||||||
```
|
|
||||||
|
|
||||||
### search — 发现与读取
|
|
||||||
|
|
||||||
```bash
|
|
||||||
# 参数说明(带典型值)
|
|
||||||
# -n / --namespace refining | ricnsmart
|
|
||||||
# --kind server | service
|
|
||||||
# --name gitea | i-uf63f2uookgs5uxmrdyc | mqtt
|
|
||||||
# --tag aliyun | hongkong | production
|
|
||||||
# -q / --query mqtt | grafana | gitea (模糊匹配 name/namespace/kind/tags/metadata)
|
|
||||||
# --show-secrets 已弃用;search 不再直接展示 secrets
|
|
||||||
# -f / --field metadata.ip | metadata.url | metadata.default_org
|
|
||||||
# --summary 不带值的 flag,仅返回摘要(name/tags/desc/updated_at)
|
|
||||||
# --limit 20 | 50(默认 50)
|
|
||||||
# --offset 0 | 10 | 20(分页偏移)
|
|
||||||
# --sort name(默认)| updated | created
|
|
||||||
# -o / --output text | json | json-compact
|
|
||||||
|
|
||||||
# 发现概览(起步推荐)
|
|
||||||
secrets search --summary --limit 20
|
|
||||||
secrets search -n refining --summary --limit 20
|
|
||||||
secrets search --sort updated --limit 10 --summary
|
|
||||||
|
|
||||||
# 精确定位单条记录
|
|
||||||
secrets search -n refining --kind service --name gitea
|
|
||||||
secrets search -n refining --kind server --name i-uf63f2uookgs5uxmrdyc
|
|
||||||
|
|
||||||
# 精确定位并获取完整内容(secrets 保持加密占位)
|
|
||||||
secrets search -n refining --kind service --name gitea -o json
|
|
||||||
|
|
||||||
# 直接提取 metadata 字段值(最短路径)
|
|
||||||
secrets search -n refining --kind service --name gitea -f metadata.url
|
|
||||||
secrets search -n refining --kind service --name gitea \
|
|
||||||
-f metadata.url -f metadata.default_org
|
|
||||||
|
|
||||||
# 需要 secrets 时,改用 inject / run
|
|
||||||
secrets inject -n refining --kind service --name gitea
|
|
||||||
secrets run -n refining --kind service --name gitea -- printenv
|
|
||||||
|
|
||||||
# 模糊关键词搜索
|
|
||||||
secrets search -q mqtt
|
|
||||||
secrets search -q grafana
|
|
||||||
secrets search -q 47.117
|
|
||||||
|
|
||||||
# 按条件过滤
|
|
||||||
secrets search -n refining --kind service
|
|
||||||
secrets search -n ricnsmart --kind server
|
|
||||||
secrets search --tag hongkong
|
|
||||||
secrets search --tag aliyun --summary
|
|
||||||
|
|
||||||
# 分页
|
|
||||||
secrets search -n refining --summary --limit 10 --offset 0
|
|
||||||
secrets search -n refining --summary --limit 10 --offset 10
|
|
||||||
|
|
||||||
# 管道 / AI 调用(非 TTY 自动 json-compact)
|
|
||||||
secrets search -n refining --kind service | jq '.[].name'
|
|
||||||
```
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
### add — 新增或全量覆盖(upsert)
|
|
||||||
|
|
||||||
```bash
|
|
||||||
# 参数说明(带典型值)
|
|
||||||
# -n / --namespace refining | ricnsmart
|
|
||||||
# --kind server | service
|
|
||||||
# --name gitea | i-uf63f2uookgs5uxmrdyc
|
|
||||||
# --tag aliyun | hongkong(可重复)
|
|
||||||
# -m / --meta ip=47.117.131.22 | desc="Aliyun ECS" | url=https://... | tls:cert@./cert.pem(可重复)
|
|
||||||
# -s / --secret token=<value> | ssh_key=@./key.pem | password=secret123 | credentials:content@./key.pem(可重复)
|
|
||||||
|
|
||||||
# 添加服务器
|
|
||||||
secrets add -n refining --kind server --name i-uf63f2uookgs5uxmrdyc \
|
|
||||||
--tag aliyun --tag shanghai \
|
|
||||||
-m ip=47.117.131.22 -m desc="Aliyun Shanghai ECS" \
|
|
||||||
-s username=root -s ssh_key=@./keys/voson_shanghai_e.pem
|
|
||||||
|
|
||||||
# 添加服务凭据
|
|
||||||
secrets add -n refining --kind service --name gitea \
|
|
||||||
--tag gitea \
|
|
||||||
-m url=https://gitea.refining.dev -m default_org=refining -m username=voson \
|
|
||||||
-s token=<token> -s runner_token=<runner_token>
|
|
||||||
|
|
||||||
# 从文件读取 token
|
|
||||||
secrets add -n ricnsmart --kind service --name mqtt \
|
|
||||||
-m host=mqtt.ricnsmart.com -m port=1883 \
|
|
||||||
-s password=@./mqtt_password.txt
|
|
||||||
|
|
||||||
# 多行文件直接写入嵌套 secret 字段
|
|
||||||
secrets add -n refining --kind server --name i-uf63f2uookgs5uxmrdyc \
|
|
||||||
-s credentials:content@./keys/voson_shanghai_e.pem
|
|
||||||
|
|
||||||
# 使用类型化值(key:=<json>)存储非字符串类型
|
|
||||||
secrets add -n refining --kind service --name prometheus \
|
|
||||||
-m scrape_interval:=15 \
|
|
||||||
-m enabled:=true \
|
|
||||||
-m labels:='["prod","metrics"]' \
|
|
||||||
-s api_key=abc123
|
|
||||||
```
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
### update — 增量更新(记录必须已存在)
|
|
||||||
|
|
||||||
只有传入的字段才会变动,其余全部保留。
|
|
||||||
|
|
||||||
```bash
|
|
||||||
# 参数说明(带典型值)
|
|
||||||
# -n / --namespace refining | ricnsmart
|
|
||||||
# --kind server | service
|
|
||||||
# --name gitea | i-uf63f2uookgs5uxmrdyc
|
|
||||||
# --add-tag production | backup(不影响已有 tag,可重复)
|
|
||||||
# --remove-tag staging | deprecated(可重复)
|
|
||||||
# -m / --meta ip=10.0.0.1 | desc="新描述" | credentials:username=root(新增或覆盖,可重复)
|
|
||||||
# --remove-meta old_port | legacy_key | credentials:content(删除 metadata 字段,可重复)
|
|
||||||
# -s / --secret token=<new> | ssh_key=@./new.pem | credentials:content@./new.pem(新增或覆盖,可重复)
|
|
||||||
# --remove-secret old_password | deprecated_key | credentials:content(删除 secret 字段,可重复)
|
|
||||||
|
|
||||||
# 更新单个 metadata 字段
|
|
||||||
secrets update -n refining --kind server --name i-uf63f2uookgs5uxmrdyc \
|
|
||||||
-m ip=10.0.0.1
|
|
||||||
|
|
||||||
# 轮换 token
|
|
||||||
secrets update -n refining --kind service --name gitea \
|
|
||||||
-s token=<new-token>
|
|
||||||
|
|
||||||
# 新增 tag 并轮换 token
|
|
||||||
secrets update -n refining --kind service --name gitea \
|
|
||||||
--add-tag production \
|
|
||||||
-s token=<new-token>
|
|
||||||
|
|
||||||
# 移除废弃字段
|
|
||||||
secrets update -n refining --kind service --name mqtt \
|
|
||||||
--remove-meta old_port --remove-secret old_password
|
|
||||||
|
|
||||||
# 从文件更新嵌套 secret 字段
|
|
||||||
secrets update -n refining --kind server --name i-uf63f2uookgs5uxmrdyc \
|
|
||||||
-s credentials:content@./keys/voson_shanghai_e.pem
|
|
||||||
|
|
||||||
# 删除嵌套字段
|
|
||||||
secrets update -n refining --kind server --name i-uf63f2uookgs5uxmrdyc \
|
|
||||||
--remove-secret credentials:content
|
|
||||||
|
|
||||||
# 移除 tag
|
|
||||||
secrets update -n refining --kind service --name gitea --remove-tag staging
|
|
||||||
```
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
### delete — 删除记录
|
|
||||||
|
|
||||||
```bash
|
|
||||||
# 参数说明(带典型值)
|
|
||||||
# -n / --namespace refining | ricnsmart
|
|
||||||
# --kind server | service
|
|
||||||
# --name gitea | i-uf63f2uookgs5uxmrdyc(必须精确匹配)
|
|
||||||
|
|
||||||
# 删除服务凭据
|
|
||||||
secrets delete -n refining --kind service --name legacy-mqtt
|
|
||||||
|
|
||||||
# 删除服务器记录
|
|
||||||
secrets delete -n ricnsmart --kind server --name i-old-server-id
|
|
||||||
```
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
### history — 查看变更历史
|
|
||||||
|
|
||||||
```bash
|
|
||||||
# 参数说明
|
|
||||||
# -n / --namespace refining | ricnsmart
|
|
||||||
# --kind server | service
|
|
||||||
# --name 记录名
|
|
||||||
# --limit 返回条数(默认 20)
|
|
||||||
|
|
||||||
# 查看某条记录的历史版本列表
|
|
||||||
secrets history -n refining --kind service --name gitea
|
|
||||||
|
|
||||||
# 查最近 5 条
|
|
||||||
secrets history -n refining --kind service --name gitea --limit 5
|
|
||||||
|
|
||||||
# JSON 输出
|
|
||||||
secrets history -n refining --kind service --name gitea -o json
|
|
||||||
```
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
### rollback — 回滚到指定版本
|
|
||||||
|
|
||||||
```bash
|
|
||||||
# 参数说明
|
|
||||||
# -n / --namespace refining | ricnsmart
|
|
||||||
# --kind server | service
|
|
||||||
# --name 记录名
|
|
||||||
# --to-version <N> 目标版本号(省略则恢复最近一次快照)
|
|
||||||
|
|
||||||
# 撤销上次修改(回滚到最近一次快照)
|
|
||||||
secrets rollback -n refining --kind service --name gitea
|
|
||||||
|
|
||||||
# 回滚到版本 3
|
|
||||||
secrets rollback -n refining --kind service --name gitea --to-version 3
|
|
||||||
```
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
### inject — 输出临时环境变量
|
|
||||||
|
|
||||||
敏感值仅打印到 stdout,不持久化、不写入当前 shell。
|
|
||||||
|
|
||||||
```bash
|
|
||||||
# 参数说明
|
|
||||||
# -n / --namespace refining | ricnsmart
|
|
||||||
# --kind server | service
|
|
||||||
# --name 记录名
|
|
||||||
# --tag 按 tag 过滤(可重复)
|
|
||||||
# --prefix 变量名前缀(留空则以记录 name 作前缀)
|
|
||||||
# -o / --output text(默认 KEY=VALUE)| json | json-compact
|
|
||||||
|
|
||||||
# 打印单条记录的所有变量(KEY=VALUE 格式)
|
|
||||||
secrets inject -n refining --kind service --name gitea
|
|
||||||
|
|
||||||
# 自定义前缀
|
|
||||||
secrets inject -n refining --kind service --name gitea --prefix GITEA
|
|
||||||
|
|
||||||
# JSON 格式(适合管道或脚本解析)
|
|
||||||
secrets inject -n refining --kind service --name gitea -o json
|
|
||||||
|
|
||||||
# eval 注入当前 shell(谨慎使用)
|
|
||||||
eval $(secrets inject -n refining --kind service --name gitea)
|
|
||||||
```
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
### run — 向子进程注入 secrets 并执行命令
|
|
||||||
|
|
||||||
secrets 仅作用于子进程环境,不修改当前 shell,进程退出码透传。
|
|
||||||
|
|
||||||
```bash
|
|
||||||
# 参数说明
|
|
||||||
# -n / --namespace refining | ricnsmart
|
|
||||||
# --kind server | service
|
|
||||||
# --name 记录名
|
|
||||||
# --tag 按 tag 过滤(可重复)
|
|
||||||
# --prefix 变量名前缀
|
|
||||||
# -- <command> 执行的命令及参数
|
|
||||||
|
|
||||||
# 向脚本注入单条记录的 secrets
|
|
||||||
secrets run -n refining --kind service --name gitea -- ./deploy.sh
|
|
||||||
|
|
||||||
# 按 tag 批量注入(多条记录合并)
|
|
||||||
secrets run --tag production -- env | grep -i token
|
|
||||||
|
|
||||||
# 验证注入了哪些变量
|
|
||||||
secrets run -n refining --kind service --name gitea -- printenv
|
|
||||||
```
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
### upgrade — 自动更新 CLI 二进制
|
|
||||||
|
|
||||||
从 Gitea Release 下载最新版本,校验对应 `.sha256` 摘要后替换当前二进制,无需数据库连接或主密钥。
|
|
||||||
|
|
||||||
```bash
|
|
||||||
# 检查是否有新版本(不下载)
|
|
||||||
secrets upgrade --check
|
|
||||||
|
|
||||||
# 下载、校验 SHA-256 并安装最新版本
|
|
||||||
secrets upgrade
|
|
||||||
```
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
### config — 配置管理(无需主密钥)
|
|
||||||
|
|
||||||
```bash
|
|
||||||
# 设置数据库连接(每台设备执行一次,之后永久生效;先验证连接可用再写入)
|
|
||||||
secrets config set-db "postgres://postgres:<password>@<host>:<port>/secrets"
|
|
||||||
|
|
||||||
# 查看当前配置(密码脱敏)
|
|
||||||
secrets config show
|
|
||||||
|
|
||||||
# 打印配置文件路径
|
|
||||||
secrets config path
|
|
||||||
# 输出: /Users/<user>/.config/secrets/config.toml
|
|
||||||
```
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
### 全局参数
|
|
||||||
|
|
||||||
```bash
|
|
||||||
# debug 日志(位于子命令之前)
|
|
||||||
secrets --verbose search -q mqtt
|
|
||||||
secrets -v add -n refining --kind service --name gitea -m url=xxx -s token=yyy
|
|
||||||
|
|
||||||
# 或通过环境变量精细控制
|
|
||||||
RUST_LOG=secrets=trace secrets search
|
|
||||||
|
|
||||||
# 一次性覆盖数据库连接
|
|
||||||
secrets --db-url "postgres://..." search -n refining
|
|
||||||
```
|
|
||||||
|
|
||||||
## 代码规范
|
|
||||||
|
|
||||||
- 错误处理:统一使用 `anyhow::Result`,不用 `unwrap()`
|
|
||||||
- 异步:全程 `tokio`,数据库操作 `sqlx` async
|
|
||||||
- SQL:使用 `sqlx::query` / `sqlx::query_as` 绑定参数,禁止字符串拼接(搜索的动态 WHERE 子句除外,需使用参数绑定 `$1/$2`)
|
|
||||||
- 新增 `kind` 类型时:只需在 `add` 调用时传入,无需改代码
|
|
||||||
- 字段命名:CLI 短标志 `-n`=namespace,`-m`=meta,`-s`=secret,`-q`=query,`-v`=verbose,`-f`=field,`-o`=output
|
|
||||||
- 日志:用户可见输出用 `println!`;调试/运维信息用 `tracing::debug!`/`info!`/`warn!`/`error!`
|
|
||||||
- 审计:`add`/`update`/`delete` 成功后调用 `audit::log_tx`,写入 `audit_log` 表;失败只 warn 不中断
|
|
||||||
- 加密:`encrypted` 列存储 AES-256-GCM 密文;`add`/`update`/`search`/`delete` 需主密钥(`secrets init` 后从 OS 钥匙串加载)
|
|
||||||
- 输出:读命令通过 `OutputMode` 支持 text/json/json-compact/env;写命令 `add` 同样支持 `-o json`
|
|
||||||
|
|
||||||
## 提交前检查(必须全部通过)
|
|
||||||
|
|
||||||
每次提交代码前,请在本地依次执行以下检查,**全部通过后再 push**:
|
|
||||||
|
|
||||||
优先使用:
|
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
./scripts/release-check.sh
|
./scripts/release-check.sh
|
||||||
```
|
```
|
||||||
|
|
||||||
它等价于先检查版本号 / tag,再执行下面的格式、Lint、测试。
|
## 项目结构
|
||||||
|
|
||||||
### 1. 版本号(按需)
|
```text
|
||||||
|
secrets/
|
||||||
若本次改动需要发版,请先确认 `Cargo.toml` 中的 `version` 已提升,避免 CI 打出的 Tag 与已有版本重复。**升级版本后需同时更新 `Cargo.lock`**(运行 `cargo build` 即可自动同步),否则 CI 中 `cargo clippy --locked` 会因 lock 与 manifest 不一致而失败。可通过 git tag 判断:
|
Cargo.toml
|
||||||
|
apps/
|
||||||
```bash
|
api/ # 远端 JSON API
|
||||||
# 查看当前 Cargo.toml 版本
|
desktop/src-tauri/ # 桌面端
|
||||||
grep '^version' Cargo.toml
|
crates/
|
||||||
|
application/ # v3 应用服务
|
||||||
# 查看是否已存在该版本对应的 tag(CI 使用格式 secrets-<version>)
|
client-integrations/ # Cursor / Claude Code 配置注入
|
||||||
git tag -l 'secrets-*'
|
crypto/ # 通用加密辅助
|
||||||
|
desktop-daemon/ # 本地 MCP daemon
|
||||||
|
device-auth/ # 设备登录 / Desktop OAuth 辅助
|
||||||
|
domain/ # v3 领域模型
|
||||||
|
infrastructure-db/ # 数据库与迁移
|
||||||
|
deploy/
|
||||||
|
scripts/
|
||||||
|
.gitea/workflows/
|
||||||
|
.vscode/tasks.json
|
||||||
```
|
```
|
||||||
|
|
||||||
若当前版本已被 tag(例如已有 `secrets-0.3.0` 且 `Cargo.toml` 仍为 `0.3.0`),则应在 `Cargo.toml` 中 bump 版本号,再执行 `cargo build` 同步 `Cargo.lock`,最后一并提交,以便 CI 自动打新 Tag 并发布 Release。
|
## 数据库
|
||||||
|
|
||||||
### 2. 格式、Lint、测试
|
- 建议数据库名:`secrets-v3`
|
||||||
|
- 连接串:`SECRETS_DATABASE_URL`
|
||||||
|
- 首次连接会自动运行 `secrets-infrastructure-db::migrate_current_schema`
|
||||||
|
|
||||||
```bash
|
当前 v3 主要表:
|
||||||
cargo fmt -- --check # 格式检查(不通过则运行 cargo fmt 修复)
|
|
||||||
cargo clippy -- -D warnings # Lint 检查(消除所有 warning)
|
- `users`
|
||||||
cargo test # 单元/集成测试
|
- `oauth_accounts`
|
||||||
|
- `devices`
|
||||||
|
- `device_login_tokens`
|
||||||
|
- `auth_events`
|
||||||
|
- `vault_objects`
|
||||||
|
- `vault_object_revisions`
|
||||||
|
|
||||||
|
### 当前模型约束
|
||||||
|
|
||||||
|
- 服务端只保存同步所需的密文对象与版本信息
|
||||||
|
- 搜索、详情、reveal、history 主要在 desktop 本地 vault 中完成
|
||||||
|
- 删除通过对象级 `deleted_at` / tombstone 传播
|
||||||
|
- 历史服务端保留在 `vault_object_revisions`,本地另有 `vault_object_history`
|
||||||
|
|
||||||
|
### 字段职责
|
||||||
|
|
||||||
|
| 字段 | 含义 | 示例 |
|
||||||
|
|------|------|------|
|
||||||
|
| `object_id` | 同步对象标识 | `UUID` |
|
||||||
|
| `object_kind` | 当前对象类别 | `cipher` |
|
||||||
|
| `revision` | 对象版本号 | `12` |
|
||||||
|
| `cipher_version` | 密文封装版本 | `1` |
|
||||||
|
| `ciphertext` | 密文对象载荷 | AES-GCM 密文 |
|
||||||
|
| `content_hash` | 密文内容摘要 | `sha256:...` |
|
||||||
|
| `deleted_at` | 对象删除时间 | `2026-04-14T12:00:00Z` |
|
||||||
|
|
||||||
|
## Google 登录
|
||||||
|
|
||||||
|
当前登录流为 **Google Desktop OAuth**:
|
||||||
|
|
||||||
|
- 桌面端使用系统浏览器拉起 Google 授权
|
||||||
|
- API 服务端持有 Google OAuth client 配置并处理 callback / token exchange
|
||||||
|
- desktop 创建一次性 login session,打开托管登录页后轮询状态
|
||||||
|
- API 校验 Google userinfo 后发放本地 device token
|
||||||
|
|
||||||
|
官网 DMG 正式分发时,服务端至少需要配置:
|
||||||
|
|
||||||
|
- `SECRETS_PUBLIC_BASE_URL`
|
||||||
|
- `GOOGLE_OAUTH_CLIENT_ID`
|
||||||
|
- `GOOGLE_OAUTH_CLIENT_SECRET`
|
||||||
|
- `GOOGLE_OAUTH_REDIRECT_URI`
|
||||||
|
|
||||||
|
推荐约束:
|
||||||
|
|
||||||
|
- `SECRETS_PUBLIC_BASE_URL` 使用用户浏览器实际访问的 HTTPS 官网地址
|
||||||
|
- `GOOGLE_OAUTH_REDIRECT_URI` 配置为 `${SECRETS_PUBLIC_BASE_URL}/auth/google/callback`
|
||||||
|
- `GOOGLE_OAUTH_CLIENT_SECRET` 只保留在服务端环境变量或密钥管理系统中,不入库
|
||||||
|
- Google Cloud Console 中登记的 callback URL 必须与 `GOOGLE_OAUTH_REDIRECT_URI` 完全一致
|
||||||
|
|
||||||
|
## MCP
|
||||||
|
|
||||||
|
本地 MCP 入口由 `crates/desktop-daemon` 提供,默认地址:
|
||||||
|
|
||||||
|
```text
|
||||||
|
http://127.0.0.1:9515/mcp
|
||||||
```
|
```
|
||||||
|
|
||||||
或一次性执行:
|
当前暴露的工具:
|
||||||
|
|
||||||
```bash
|
- `secrets_entry_find`
|
||||||
cargo fmt -- --check && cargo clippy -- -D warnings && cargo test
|
- `secrets_entry_get`
|
||||||
```
|
- `secrets_entry_add`
|
||||||
|
- `secrets_entry_update`
|
||||||
|
- `secrets_entry_delete`
|
||||||
|
- `secrets_entry_restore`
|
||||||
|
- `secrets_secret_add`
|
||||||
|
- `secrets_secret_update`
|
||||||
|
- `secrets_secret_delete`
|
||||||
|
- `secrets_secret_history`
|
||||||
|
- `secrets_secret_rollback`
|
||||||
|
- `target_exec`
|
||||||
|
|
||||||
## CI/CD
|
当前不保留:
|
||||||
|
|
||||||
- Gitea Actions(runners: debian / darwin-arm64 / windows)
|
- `secrets_env_map`
|
||||||
- 触发:`src/**`、`Cargo.toml`、`Cargo.lock` 变更推送到 main
|
|
||||||
- 构建目标:`x86_64-unknown-linux-musl`、`aarch64-apple-darwin`、`x86_64-apple-darwin`(由 ARM mac runner 交叉编译)、`x86_64-pc-windows-msvc`
|
|
||||||
- 新版本自动打 Tag(格式 `secrets-<version>`)并上传二进制与对应 `.sha256` 摘要到 Gitea Release
|
|
||||||
- Release 仅在 Linux/macOS/Windows 构建全部成功后才会从 draft 发布
|
|
||||||
- 通知:飞书 Webhook(`vars.WEBHOOK_URL`)
|
|
||||||
- 所需 secrets/vars:`RELEASE_TOKEN`(Release 上传,Gitea PAT)、`vars.WEBHOOK_URL`(通知,可选)
|
|
||||||
- **注意**:Gitea Actions 的 Secret/Variable 创建时,`data`/`value` 字段需传入**原始值**,不要使用 base64 编码
|
|
||||||
|
|
||||||
## 环境变量
|
### `target_exec`
|
||||||
|
|
||||||
| 变量 | 说明 |
|
`target_exec` 会显式读取 entry 当前 secrets 的真实值,并从 metadata / secrets 派生标准环境变量,例如:
|
||||||
|------|------|
|
|
||||||
| `RUST_LOG` | 日志级别,如 `secrets=debug`、`secrets=trace`(默认 warn) |
|
|
||||||
| `USER` | 审计日志 actor 字段来源,Shell 自动设置,通常无需手动配置 |
|
|
||||||
|
|
||||||
数据库连接通过 `secrets config set-db` 持久化到 `~/.config/secrets/config.toml`,不支持环境变量。
|
- `TARGET_ENTRY_ID`
|
||||||
|
- `TARGET_NAME`
|
||||||
|
- `TARGET_FOLDER`
|
||||||
|
- `TARGET_TYPE`
|
||||||
|
- `TARGET_HOST`
|
||||||
|
- `TARGET_PORT`
|
||||||
|
- `TARGET_USER`
|
||||||
|
- `TARGET_BASE_URL`
|
||||||
|
- `TARGET_API_KEY`
|
||||||
|
- `TARGET_TOKEN`
|
||||||
|
- `TARGET_SSH_KEY`
|
||||||
|
|
||||||
|
## 桌面端
|
||||||
|
|
||||||
|
桌面端当前支持:
|
||||||
|
|
||||||
|
- Google 登录
|
||||||
|
- 自动写入 `Cursor` / `Claude Code` 的 `mcp.json`
|
||||||
|
- 新建条目
|
||||||
|
- 搜索、按 type 筛选
|
||||||
|
- 右侧原地编辑
|
||||||
|
- secret 新增、编辑、删除
|
||||||
|
- secret 明文显示 / 复制
|
||||||
|
- secret 历史查看与回滚
|
||||||
|
- 删除到最近删除与恢复
|
||||||
|
- 登录态仅在当前 desktop 进程内有效,不做自动恢复登录
|
||||||
|
- desktop 进程退出后,本地 daemon 所有工具不可用
|
||||||
|
|
||||||
|
### 配置注入
|
||||||
|
|
||||||
|
桌面端会把本地 daemon 配置写入:
|
||||||
|
|
||||||
|
- `~/.cursor/mcp.json`
|
||||||
|
- `~/.claude/mcp.json`
|
||||||
|
|
||||||
|
写入策略:
|
||||||
|
|
||||||
|
- 保留现有其它 `mcpServers`
|
||||||
|
- 仅覆盖同名 `secrets` 节点
|
||||||
|
|
||||||
|
### 图标与前端 dist(本地 / CI)
|
||||||
|
|
||||||
|
版本库为减小噪音,**不提交** Tauri 生成的多尺寸图标包;但 **`apps/desktop/dist/`** 现在作为桌面端前端静态资源目录,**需要提交到版本库**,以保证新机器 clone 后可直接运行 Tauri desktop。
|
||||||
|
|
||||||
|
- **图标**:仅跟踪 `apps/desktop/src-tauri/icons/icon.png` 作为源图(建议 **1024×1024** PNG)。检出代码后,若需要完整 `icons/`(例如打包、验证窗口/托盘图标),在 **`apps/desktop/src-tauri`** 下执行:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
cd apps/desktop/src-tauri
|
||||||
|
cargo tauri icon icons/icon.png
|
||||||
|
```
|
||||||
|
|
||||||
|
需已安装 **Tauri CLI**(例如 `cargo install tauri-cli`,或与项目一致的 `cargo-tauri` 版本)。
|
||||||
|
|
||||||
|
- **前端 dist**:`tauri.conf.json` 中 `build.frontendDist` 指向 `../dist`。当前仓库直接跟踪 **`apps/desktop/dist/`** 下的静态页面资源,因此新机器 clone 后无需额外生成前端产物即可运行 `cargo run -p secrets-desktop`。若后续引入独立前端构建链,再单独把这部分切回构建产物管理。
|
||||||
|
|
||||||
|
## 代码规范
|
||||||
|
|
||||||
|
- 业务层优先使用 `anyhow::Result`
|
||||||
|
- 避免生产路径 `unwrap()`
|
||||||
|
- 使用 `tokio` + `sqlx` async
|
||||||
|
- SQL 使用参数绑定,不要手拼用户输入
|
||||||
|
- 运维日志使用 `tracing`
|
||||||
|
- 变更后优先跑最小必要验证,不要只改不测
|
||||||
|
|
||||||
|
## CI / 脚本
|
||||||
|
|
||||||
|
- `.gitea/workflows/secrets.yml` 现在是 v3 workspace 级 CI
|
||||||
|
- `scripts/release-check.sh` 只做 workspace 质量检查
|
||||||
|
- `deploy/.env.example` 反映当前 v3 API / daemon / desktop 登录配置
|
||||||
|
|
||||||
|
## 安全约束
|
||||||
|
|
||||||
|
- 不要把 Google `client_secret` 提交到受版本控制的配置文件中
|
||||||
|
- 不要把 device token、数据库密码、真实生产密钥提交入库
|
||||||
|
- 数据库生产环境优先使用 `verify-full`
|
||||||
|
- AI 审查时,不要把“随机高熵 token 明文存储”机械地当成密码学问题处理,必须结合当前架构和威胁模型判断
|
||||||
|
|||||||
56
CONTRIBUTING.md
Normal file
56
CONTRIBUTING.md
Normal file
@@ -0,0 +1,56 @@
|
|||||||
|
# Contributing
|
||||||
|
|
||||||
|
## 版本控制
|
||||||
|
|
||||||
|
本仓库使用 **[Jujutsu (jj)](https://jj-vcs.dev/)**。请勿使用 `git` 命令。
|
||||||
|
|
||||||
|
```bash
|
||||||
|
jj log # 查看历史
|
||||||
|
jj status # 查看状态
|
||||||
|
jj new # 创建新变更
|
||||||
|
jj commit # 提交
|
||||||
|
jj rebase # 变基
|
||||||
|
jj squash # 合并提交
|
||||||
|
jj git push # 推送到远端
|
||||||
|
```
|
||||||
|
|
||||||
|
详见 [AGENTS.md](AGENTS.md) 的「版本控制」章节。
|
||||||
|
|
||||||
|
## 本地开发
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# 复制环境变量
|
||||||
|
cp deploy/.env.example .env
|
||||||
|
|
||||||
|
# 填写数据库连接等配置后
|
||||||
|
cargo build
|
||||||
|
cargo test --locked
|
||||||
|
```
|
||||||
|
|
||||||
|
## 提交前检查
|
||||||
|
|
||||||
|
每次提交前必须通过:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
cargo fmt -- --check
|
||||||
|
cargo clippy --locked -- -D warnings
|
||||||
|
cargo test --locked
|
||||||
|
```
|
||||||
|
|
||||||
|
或使用脚本:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
./scripts/release-check.sh
|
||||||
|
```
|
||||||
|
|
||||||
|
## 发版规则
|
||||||
|
|
||||||
|
当前仓库已切换到 v3 架构,不再围绕 `secrets-mcp` 做单独发版。
|
||||||
|
|
||||||
|
提交前请至少保证:
|
||||||
|
|
||||||
|
1. `cargo fmt -- --check`
|
||||||
|
2. `cargo clippy --locked -- -D warnings`
|
||||||
|
3. `cargo test --locked`
|
||||||
|
|
||||||
|
详见 [AGENTS.md](AGENTS.md) 中最新的仓库说明。
|
||||||
3815
Cargo.lock
generated
3815
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
81
Cargo.toml
81
Cargo.toml
@@ -1,31 +1,54 @@
|
|||||||
[package]
|
[workspace]
|
||||||
name = "secrets"
|
members = [
|
||||||
version = "0.7.5"
|
"apps/api",
|
||||||
|
"apps/desktop/src-tauri",
|
||||||
|
"crates/application",
|
||||||
|
"crates/client-integrations",
|
||||||
|
"crates/crypto",
|
||||||
|
"crates/desktop-daemon",
|
||||||
|
"crates/device-auth",
|
||||||
|
"crates/domain",
|
||||||
|
"crates/infrastructure-db",
|
||||||
|
]
|
||||||
|
resolver = "2"
|
||||||
|
|
||||||
|
[workspace.package]
|
||||||
edition = "2024"
|
edition = "2024"
|
||||||
|
|
||||||
[dependencies]
|
[workspace.dependencies]
|
||||||
aes-gcm = "0.10.3"
|
# Async runtime
|
||||||
anyhow = "1.0.102"
|
tokio = { version = "^1.50.0", features = ["rt-multi-thread", "macros", "fs", "io-util", "process", "signal"] }
|
||||||
argon2 = { version = "0.5.3", features = ["std"] }
|
|
||||||
chrono = { version = "0.4.44", features = ["serde"] }
|
# Database
|
||||||
clap = { version = "4.6.0", features = ["derive"] }
|
sqlx = { version = "^0.8.6", features = ["runtime-tokio", "tls-rustls", "postgres", "sqlite", "uuid", "json", "chrono"] }
|
||||||
dirs = "6.0.0"
|
|
||||||
flate2 = "1.1.9"
|
# Serialization
|
||||||
keyring = { version = "3.6.3", features = ["apple-native", "windows-native", "linux-native"] }
|
serde = { version = "^1.0.228", features = ["derive"] }
|
||||||
rand = "0.10.0"
|
serde_json = "^1.0.149"
|
||||||
reqwest = { version = "0.12", default-features = false, features = ["rustls-tls", "json"] }
|
serde_yaml = "^0.9"
|
||||||
rpassword = "7.4.0"
|
toml = "^1.0.7"
|
||||||
self-replace = "1.5.0"
|
|
||||||
semver = "1.0.27"
|
# Crypto
|
||||||
serde = { version = "1.0.228", features = ["derive"] }
|
aes-gcm = "^0.10.3"
|
||||||
serde_json = "1.0.149"
|
sha2 = "^0.10.9"
|
||||||
sha2 = "0.10.9"
|
rand = "^0.10.0"
|
||||||
sqlx = { version = "0.8.6", features = ["runtime-tokio", "tls-rustls", "postgres", "uuid", "json", "chrono"] }
|
hex = "0.4"
|
||||||
tar = "0.4.44"
|
|
||||||
tempfile = "3.19"
|
# Utils
|
||||||
tokio = { version = "1.50.0", features = ["full"] }
|
anyhow = "^1.0.102"
|
||||||
toml = "1.0.7"
|
thiserror = "^2"
|
||||||
tracing = "0.1"
|
chrono = { version = "^0.4.44", features = ["serde"] }
|
||||||
tracing-subscriber = { version = "0.3", features = ["env-filter"] }
|
uuid = { version = "^1.22.0", features = ["serde", "v4"] }
|
||||||
uuid = { version = "1.22.0", features = ["serde"] }
|
tracing = "^0.1"
|
||||||
zip = { version = "8.2.0", default-features = false, features = ["deflate"] }
|
tracing-subscriber = { version = "^0.3", features = ["env-filter"] }
|
||||||
|
dotenvy = "^0.15"
|
||||||
|
|
||||||
|
# HTTP
|
||||||
|
# system-proxy:与浏览器一致,读取 macOS/Windows 系统代理(禁用 default 后须显式开启,否则 OAuth 出站不走 Clash 等)
|
||||||
|
reqwest = { version = "^0.12", default-features = false, features = ["rustls-tls", "json", "system-proxy"] }
|
||||||
|
axum = "0.8"
|
||||||
|
http = "1"
|
||||||
|
url = "2"
|
||||||
|
rmcp = { version = "1", features = ["server", "macros", "transport-streamable-http-server", "schemars"] }
|
||||||
|
tauri = { version = "2", features = [] }
|
||||||
|
tauri-build = { version = "2", features = [] }
|
||||||
|
|||||||
421
README.md
421
README.md
@@ -1,320 +1,223 @@
|
|||||||
# secrets
|
# Secrets
|
||||||
|
|
||||||
跨设备密钥与配置管理 CLI,基于 Rust + PostgreSQL 18。
|
这是 v3 架构的仓库,当前主路径已经收敛为:
|
||||||
|
|
||||||
将服务器信息、服务凭据统一存入数据库,供本地工具和 AI 读取上下文。敏感数据(`encrypted` 字段)使用 AES-256-GCM 加密存储,主密钥由 Argon2id 从主密码派生并存入系统钥匙串。
|
- `apps/api`:远端 JSON API
|
||||||
|
- `apps/desktop/src-tauri`:桌面客户端
|
||||||
|
- `crates/desktop-daemon`:本地 MCP 入口
|
||||||
|
- `crates/application` / `domain` / `infrastructure-db`:业务与数据层
|
||||||
|
|
||||||
## 安装
|
## 本地开发
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
cargo build --release
|
cp deploy/.env.example .env
|
||||||
# 或从 Release 页面下载预编译二进制
|
|
||||||
|
# 远端 API
|
||||||
|
cargo run -p secrets-api --bin secrets-api
|
||||||
|
|
||||||
|
# 本地 daemon
|
||||||
|
cargo run -p secrets-desktop-daemon
|
||||||
|
|
||||||
|
# 桌面客户端
|
||||||
|
cargo run -p secrets-desktop
|
||||||
```
|
```
|
||||||
|
|
||||||
已有旧版本时,可执行 `secrets upgrade` 自动下载最新版并替换。该命令会校验 Release 附带的 `.sha256` 摘要后再安装。
|
说明:
|
||||||
|
|
||||||
## 首次使用(每台设备各执行一次)
|
- `apps/desktop/src-tauri/tauri.conf.json` 中 `build.frontendDist` 指向 `apps/desktop/dist`
|
||||||
|
- 当前仓库会直接提交 `apps/desktop/dist/` 下的桌面端静态资源
|
||||||
|
- 因此新机器 clone 后,无需额外前端构建步骤即可启动 desktop
|
||||||
|
- 官网 DMG 正式分发不依赖本地 `client_secret_*.json`
|
||||||
|
- Google OAuth 凭据只配置在 API 服务端,desktop 通过浏览器完成托管登录
|
||||||
|
|
||||||
|
## 官网 DMG 的服务端 OAuth 配置
|
||||||
|
|
||||||
|
官网 DMG 正式分发时,**Google OAuth 只配置在 API 服务端**。桌面端不需要本地 `client_secret_*.json`,也不直接向 Google 换 token。
|
||||||
|
|
||||||
|
建议先复制 `deploy/.env.example` 为 `.env`,然后至少配置以下变量:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
# 1. 配置数据库连接(会先验证连接可用再写入)
|
SECRETS_PUBLIC_BASE_URL=https://secrets.example.com
|
||||||
secrets config set-db "postgres://postgres:<password>@<host>:<port>/secrets"
|
GOOGLE_OAUTH_CLIENT_ID=your-google-oauth-client-id.apps.googleusercontent.com
|
||||||
|
GOOGLE_OAUTH_CLIENT_SECRET=your-google-oauth-client-secret
|
||||||
# 2. 初始化主密钥(提示输入至少 8 位的主密码,派生后存入 OS 钥匙串)
|
GOOGLE_OAUTH_REDIRECT_URI=https://secrets.example.com/auth/google/callback
|
||||||
secrets init
|
|
||||||
```
|
```
|
||||||
|
|
||||||
主密码不会存储,仅用于派生主密钥,且至少需 8 位。同一主密码在所有设备上会得到相同主密钥(salt 存于数据库,首台设备生成后共享)。
|
变量含义:
|
||||||
|
|
||||||
**主密钥存储**:macOS → Keychain;Windows → Credential Manager;Linux → keyutils(会话级,重启后需再次 `secrets init`)。
|
- `SECRETS_PUBLIC_BASE_URL`:桌面端打开浏览器时访问的 API 外网基地址,必须是用户浏览器能访问到的公开地址
|
||||||
|
- `GOOGLE_OAUTH_CLIENT_ID`:Google Cloud Console 中为服务端登录流程配置的 OAuth Client ID
|
||||||
|
- `GOOGLE_OAUTH_CLIENT_SECRET`:对应的 Client Secret,只能保留在服务端
|
||||||
|
- `GOOGLE_OAUTH_REDIRECT_URI`:Google 登录完成后回调到 API 的地址,必须与 Google Console 中登记的回调地址完全一致
|
||||||
|
|
||||||
**从旧版(明文存储)升级**:升级后首次运行需执行 `secrets init` 即可(明文记录需手动重新 add 或通过 update 更新)。
|
配置步骤建议:
|
||||||
|
|
||||||
## AI Agent 快速指南
|
1. 在 Google Cloud Console 创建或选择 OAuth Client
|
||||||
|
2. 把授权回调地址加入允许列表,例如 `https://secrets.example.com/auth/google/callback`
|
||||||
|
3. 把上面的 4 个变量配置到 API 服务的运行环境中
|
||||||
|
4. 确认 `SECRETS_PUBLIC_BASE_URL` 与 `GOOGLE_OAUTH_REDIRECT_URI` 使用同一公开域名
|
||||||
|
5. 重启 API 服务后,再用 desktop / DMG 验证浏览器登录流程
|
||||||
|
|
||||||
这个 CLI 以 AI 使用优先设计。核心路径只有一条:**读取用 `search`,写入用 `add` / `update`**。
|
注意:
|
||||||
|
|
||||||
### 第一步:发现有哪些数据
|
- `GOOGLE_OAUTH_CLIENT_SECRET` 不要提交到仓库
|
||||||
|
- `GOOGLE_OAUTH_REDIRECT_URI` 不要写成 `localhost`,正式分发应使用官网可访问域名
|
||||||
|
- 如果 API 部署在反向代理后面,`SECRETS_PUBLIC_BASE_URL` 应填写用户实际访问的 HTTPS 地址,而不是内网监听地址
|
||||||
|
|
||||||
|
## 当前能力
|
||||||
|
|
||||||
|
- 桌面端使用系统浏览器完成 Google Desktop OAuth 登录
|
||||||
|
- 登录成功后向 API 注册设备,并在当前桌面进程内维护登录会话
|
||||||
|
- 本地 daemon 提供显式拆分的 MCP 工具:
|
||||||
|
- `secrets_entry_find` / `secrets_entry_get`
|
||||||
|
- `secrets_entry_add` / `secrets_entry_update` / `secrets_entry_delete` / `secrets_entry_restore`
|
||||||
|
- `secrets_secret_add` / `secrets_secret_update` / `secrets_secret_delete`
|
||||||
|
- `secrets_secret_history` / `secrets_secret_rollback`
|
||||||
|
- `target_exec`
|
||||||
|
- 桌面端会自动把本地 daemon MCP 配置写入 `Cursor` 与 `Claude Code`
|
||||||
|
- 桌面端支持条目新建、搜索、按 type 筛选、元数据编辑、最近删除与恢复
|
||||||
|
- 桌面端支持 secret 新增、编辑、删除、明文显示、真实复制、历史查看与回滚
|
||||||
|
- 不保留 `secrets_env_map`
|
||||||
|
- 不做自动恢复登录;重启 app 后必须重新登录
|
||||||
|
|
||||||
|
## 提交前检查
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
# 列出所有记录摘要(默认最多 50 条,安全起步)
|
cargo fmt -- --check
|
||||||
secrets search --summary --limit 20
|
cargo clippy --locked -- -D warnings
|
||||||
|
cargo test --locked
|
||||||
# 按 namespace 过滤
|
|
||||||
secrets search -n refining --summary --limit 20
|
|
||||||
|
|
||||||
# 按最近更新排序
|
|
||||||
secrets search --sort updated --limit 10 --summary
|
|
||||||
```
|
```
|
||||||
|
|
||||||
`--summary` 只返回轻量字段(namespace、kind、name、tags、desc、updated_at),不含完整 metadata 和 secrets。
|
## PostgreSQL TLS 加固
|
||||||
|
|
||||||
### 第二步:精确读取单条记录
|
- 推荐将数据库域名单独设置为 `db.refining.ltd`,服务域名保持 `secrets.refining.app`。
|
||||||
|
- 数据库证书建议使用可校验链路(如 Let's Encrypt 或私有 CA),并保证证书 `SAN` 包含 `db.refining.ltd`。
|
||||||
|
- PostgreSQL 侧建议使用 `hostssl` 规则限制应用来源(如 `47.238.146.244/32`),逐步移除公网明文 `host` 访问。
|
||||||
|
- 应用端推荐 `SECRETS_DATABASE_SSL_MODE=verify-full`;仅在过渡阶段可临时用 `verify-ca`。
|
||||||
|
- 可执行运维步骤见 `[deploy/postgres-tls-hardening.md](deploy/postgres-tls-hardening.md)`。
|
||||||
|
|
||||||
```bash
|
## MCP 与 AI 工作流(v3)
|
||||||
# 精确定位(namespace + kind + name 三元组)
|
|
||||||
secrets search -n refining --kind service --name gitea
|
|
||||||
|
|
||||||
# 获取完整记录(secrets 保持加密占位)
|
当前 v3 以 **桌面端 + 本地 daemon** 为主路径:
|
||||||
secrets search -n refining --kind service --name gitea -o json
|
|
||||||
|
|
||||||
# 直接提取单个 metadata 字段值(最短路径)
|
- 桌面端登录态仅在当前进程内有效,不持久化 `device token`
|
||||||
secrets search -n refining --kind service --name gitea -f metadata.url
|
- 本地 daemon 默认监听 `http://127.0.0.1:9515/mcp`
|
||||||
|
- daemon 通过活跃 desktop 进程提供的本地会话转发访问 API;desktop 进程退出后所有工具不可用
|
||||||
|
- `target_exec` 会显式读取真实 secret 值后再生成 `TARGET_`* 环境变量
|
||||||
|
- 不保留 `secrets_env_map`
|
||||||
|
|
||||||
# 同时提取多个 metadata 字段
|
### Canonical MCP 工具
|
||||||
secrets search -n refining --kind service --name gitea \
|
|
||||||
-f metadata.url -f metadata.default_org
|
|
||||||
|
|
||||||
# 需要 secrets 时,改用 inject / run
|
|
||||||
secrets inject -n refining --kind service --name gitea
|
|
||||||
secrets run -n refining --kind service --name gitea -- printenv
|
|
||||||
```
|
|
||||||
|
|
||||||
`search` 只负责发现、定位和读取 metadata,不直接展示 secrets。
|
| 工具 | 说明 |
|
||||||
|
| ------------------------- | --------------------------------------------------------- |
|
||||||
|
| `secrets_entry_find` | 从 desktop 已解锁本地 vault 搜索对象,支持 `query` / `folder` / `type` |
|
||||||
|
| `secrets_entry_get` | 读取单条本地对象,并返回当前 secrets 的真实值 |
|
||||||
|
| `secrets_entry_add` | 在本地 vault 创建对象,可选附带初始 secrets |
|
||||||
|
| `secrets_entry_update` | 更新本地对象的 folder / type / name / metadata |
|
||||||
|
| `secrets_entry_delete` | 将本地对象标记为删除 |
|
||||||
|
| `secrets_entry_restore` | 恢复本地已删除对象 |
|
||||||
|
| `secrets_secret_add` | 向已有本地对象新增 secret |
|
||||||
|
| `secrets_secret_update` | 更新本地 secret 名称、类型或内容 |
|
||||||
|
| `secrets_secret_delete` | 删除单个本地 secret |
|
||||||
|
| `secrets_secret_history` | 查看单个本地 secret 的历史版本 |
|
||||||
|
| `secrets_secret_rollback` | 将单个本地 secret 回滚到指定版本 |
|
||||||
|
| `target_exec` | 用本地对象的 metadata 和 secrets 生成 `TARGET_`* 环境变量并执行本地命令 |
|
||||||
|
|
||||||
### 输出格式
|
|
||||||
|
|
||||||
| 场景 | 推荐命令 |
|
## AI 客户端配置
|
||||||
|------|----------|
|
|
||||||
| AI 解析 / 管道处理 | `-o json` 或 `-o json-compact` |
|
|
||||||
| 注入 secrets 到环境变量 | `inject` / `run` |
|
|
||||||
| 人类查看 | 默认 `text`(TTY 下自动启用) |
|
|
||||||
| 非 TTY(管道/重定向) | 自动 `json-compact` |
|
|
||||||
|
|
||||||
说明:`text` 输出中的时间会按当前机器本地时区显示;`json/json-compact` 继续使用 UTC(RFC3339 风格)以便脚本和 AI 稳定解析。
|
桌面端会自动把本地 daemon 写入以下配置:
|
||||||
|
|
||||||
```bash
|
- `~/.cursor/mcp.json`
|
||||||
# 管道直接 jq 解析(非 TTY 自动 json-compact)
|
- `~/.claude/mcp.json`
|
||||||
secrets search -n refining --kind service | jq '.[].name'
|
|
||||||
|
|
||||||
# 需要 secrets 时,使用 inject / run
|
写入示例:
|
||||||
secrets inject -n refining --kind service --name gitea > ~/.config/gitea/secrets.env
|
|
||||||
secrets run -n refining --kind service --name gitea -- ./deploy.sh
|
|
||||||
```
|
|
||||||
|
|
||||||
## 完整命令参考
|
```json
|
||||||
|
{
|
||||||
```bash
|
"mcpServers": {
|
||||||
# 查看帮助(包含各子命令 EXAMPLES)
|
"secrets": {
|
||||||
secrets --help
|
"url": "http://127.0.0.1:9515/mcp"
|
||||||
secrets init --help # 主密钥初始化
|
}
|
||||||
secrets search --help
|
}
|
||||||
secrets add --help
|
}
|
||||||
secrets update --help
|
|
||||||
secrets delete --help
|
|
||||||
secrets config --help
|
|
||||||
secrets upgrade --help # 检查并更新 CLI 版本
|
|
||||||
|
|
||||||
# ── search ──────────────────────────────────────────────────────────────────
|
|
||||||
secrets search --summary --limit 20 # 发现概览
|
|
||||||
secrets search -n refining --kind service # 按 namespace + kind
|
|
||||||
secrets search -n refining --kind service --name gitea # 精确查找
|
|
||||||
secrets search -q mqtt # 关键词模糊搜索
|
|
||||||
secrets search --tag hongkong # 按 tag 过滤
|
|
||||||
secrets search -n refining --kind service --name gitea -f metadata.url # 提取 metadata 字段
|
|
||||||
secrets search -n refining --kind service --name gitea -o json # 完整记录(secrets 保持占位)
|
|
||||||
secrets search --sort updated --limit 10 --summary # 最近改动
|
|
||||||
secrets search -n refining --summary --limit 10 --offset 10 # 翻页
|
|
||||||
|
|
||||||
# ── add ──────────────────────────────────────────────────────────────────────
|
|
||||||
secrets add -n refining --kind server --name my-server \
|
|
||||||
--tag aliyun --tag shanghai \
|
|
||||||
-m ip=47.117.131.22 -m desc="Aliyun Shanghai ECS" \
|
|
||||||
-s username=root -s ssh_key=@./keys/server.pem
|
|
||||||
|
|
||||||
# 多行文件直接写入嵌套 secret 字段
|
|
||||||
secrets add -n refining --kind server --name my-server \
|
|
||||||
-s credentials:content@./keys/server.pem
|
|
||||||
|
|
||||||
# 使用 typed JSON 写入 secret(布尔、数字、数组、对象)
|
|
||||||
secrets add -n refining --kind service --name deploy-bot \
|
|
||||||
-s enabled:=true \
|
|
||||||
-s retry_count:=3 \
|
|
||||||
-s scopes:='["repo","workflow"]' \
|
|
||||||
-s extra:='{"region":"ap-east-1","verify_tls":true}'
|
|
||||||
|
|
||||||
secrets add -n refining --kind service --name gitea \
|
|
||||||
--tag gitea \
|
|
||||||
-m url=https://gitea.refining.dev -m default_org=refining \
|
|
||||||
-s token=<token>
|
|
||||||
|
|
||||||
# ── update ───────────────────────────────────────────────────────────────────
|
|
||||||
secrets update -n refining --kind server --name my-server -m ip=10.0.0.1
|
|
||||||
secrets update -n refining --kind service --name gitea --add-tag production -s token=<new>
|
|
||||||
secrets update -n refining --kind service --name mqtt --remove-meta old_port --remove-secret old_key
|
|
||||||
secrets update -n refining --kind server --name my-server --remove-secret credentials:content
|
|
||||||
|
|
||||||
# ── delete ───────────────────────────────────────────────────────────────────
|
|
||||||
secrets delete -n refining --kind service --name legacy-mqtt
|
|
||||||
|
|
||||||
# ── init ─────────────────────────────────────────────────────────────────────
|
|
||||||
secrets init # 主密钥初始化(每台设备一次,主密码至少 8 位,派生后存钥匙串)
|
|
||||||
|
|
||||||
# ── config ───────────────────────────────────────────────────────────────────
|
|
||||||
secrets config set-db "postgres://postgres:<password>@<host>:<port>/secrets" # 先验证再写入
|
|
||||||
secrets config show # 密码脱敏展示
|
|
||||||
secrets config path # 打印配置文件路径
|
|
||||||
|
|
||||||
# ── upgrade ──────────────────────────────────────────────────────────────────
|
|
||||||
secrets upgrade --check # 仅检查是否有新版本
|
|
||||||
secrets upgrade # 下载、校验 SHA-256 并安装最新版(从 Gitea Release)
|
|
||||||
|
|
||||||
# ── 调试 ──────────────────────────────────────────────────────────────────────
|
|
||||||
secrets --verbose search -q mqtt
|
|
||||||
RUST_LOG=secrets=trace secrets search
|
|
||||||
```
|
```
|
||||||
|
|
||||||
## 数据模型
|
## 数据模型
|
||||||
|
|
||||||
单张 `secrets` 表,首次连接自动建表;同时自动创建 `audit_log` 表,记录所有写操作。
|
当前 v3 已切到**零知识同步模型**:
|
||||||
|
|
||||||
| 字段 | 说明 |
|
- 服务端保存 `vault_objects` 与 `vault_object_revisions`
|
||||||
|------|------|
|
- desktop 本地保存 `vault_objects`、`vault_object_history`、`pending_changes`、`sync_state`
|
||||||
| `namespace` | 一级隔离,如 `refining`、`ricnsmart` |
|
- 搜索、详情、reveal、history 主要在本地已解锁 vault 上完成
|
||||||
| `kind` | 记录类型,如 `server`、`service`(可自由扩展) |
|
- 服务端负责 `auth/device` 与 `/sync/`*,不再承担明文搜索与明文 reveal
|
||||||
| `name` | 人类可读唯一标识 |
|
|
||||||
| `tags` | 多维标签,如 `["aliyun","hongkong"]` |
|
|
||||||
| `metadata` | 明文描述信息(ip、desc、domains 等) |
|
|
||||||
| `encrypted` | 敏感凭据(ssh_key、password、token 等),AES-256-GCM 加密存储 |
|
|
||||||
|
|
||||||
`-m` / `--meta` 写入 `metadata`,`-s` / `--secret` 写入 `encrypted`。支持 `key=value`、`key=@file`、`key:=<json>`,也支持 `credentials:content@./key.pem` 这种嵌套字段文件写入语法,避免手动转义多行文本;删除时也支持 `--remove-secret credentials:content` 和 `--remove-meta credentials:content`。加解密使用主密钥(由 `secrets init` 设置)。
|
主要表:
|
||||||
|
|
||||||
### `-m` / `--meta` JSON 语法速查
|
- `users`
|
||||||
|
- `oauth_accounts`
|
||||||
|
- `devices`
|
||||||
|
- `device_login_tokens`
|
||||||
|
- `auth_events`
|
||||||
|
- `vault_objects`
|
||||||
|
- `vault_object_revisions`
|
||||||
|
|
||||||
`-m` 和 `-s` 走的是同一套解析规则,只是写入位置不同:`-m` 写到明文 `metadata`,适合端口、开关、标签、描述性配置等非敏感信息。
|
字段职责:
|
||||||
|
|
||||||
| 目标值 | 写法示例 | 实际存入 |
|
|
||||||
|------|------|------|
|
|
||||||
| 普通字符串 | `-m url=https://gitea.refining.dev` | `"https://gitea.refining.dev"` |
|
|
||||||
| 文件内容字符串 | `-m notes=@./service-notes.txt` | `"..."` |
|
|
||||||
| 布尔值 | `-m enabled:=true` | `true` |
|
|
||||||
| 数字 | `-m port:=3000` | `3000` |
|
|
||||||
| `null` | `-m deprecated_at:=null` | `null` |
|
|
||||||
| 数组 | `-m domains:='["gitea.refining.dev","git.refining.dev"]'` | `["gitea.refining.dev","git.refining.dev"]` |
|
|
||||||
| 对象 | `-m tls:='{"enabled":true,"redirect_http":true}'` | `{"enabled":true,"redirect_http":true}` |
|
|
||||||
| 嵌套路径 + JSON | `-m deploy:strategy:='{"type":"rolling","batch":2}'` | `{"deploy":{"strategy":{"type":"rolling","batch":2}}}` |
|
|
||||||
|
|
||||||
常见规则:
|
| 位置 | 字段 | 说明 |
|
||||||
|
| ------------------------ | ------------------------- | --------------------- |
|
||||||
|
| `vault_objects` | `object_id` | 同步对象标识 |
|
||||||
|
| `vault_objects` | `object_kind` | 当前对象类别,当前主要为 `cipher` |
|
||||||
|
| `vault_objects` | `revision` | 服务端对象版本 |
|
||||||
|
| `vault_objects` | `ciphertext` | 密文对象载荷 |
|
||||||
|
| `vault_objects` | `content_hash` | 密文摘要 |
|
||||||
|
| `vault_objects` | `deleted_at` | 对象级删除标记 |
|
||||||
|
| `vault_object_revisions` | `revision` / `ciphertext` | 服务端对象历史版本 |
|
||||||
|
|
||||||
- `=` 表示按字符串存储。
|
|
||||||
- `:=` 表示按 JSON 解析。
|
|
||||||
- shell 中数组和对象建议整体用单引号包住。
|
|
||||||
- 嵌套字段继续用冒号分隔:`-m runtime:max_open_conns:=20`。
|
|
||||||
|
|
||||||
示例:新增一条带 typed metadata 的记录
|
## 认证与事件
|
||||||
|
|
||||||
```bash
|
当前登录流为 Google Desktop OAuth:
|
||||||
secrets add -n refining --kind service --name gitea \
|
|
||||||
-m url=https://gitea.refining.dev \
|
|
||||||
-m port:=3000 \
|
|
||||||
-m enabled:=true \
|
|
||||||
-m domains:='["gitea.refining.dev","git.refining.dev"]' \
|
|
||||||
-m tls:='{"enabled":true,"redirect_http":true}'
|
|
||||||
```
|
|
||||||
|
|
||||||
示例:更新已有记录中的嵌套 metadata
|
- 桌面端使用系统浏览器拉起 Google 授权
|
||||||
|
- API 服务端负责发起 OAuth、处理 callback、校验 Google userinfo
|
||||||
```bash
|
- desktop 通过创建一次性 login session 并轮询状态获取 `device token`
|
||||||
secrets update -n refining --kind service --name gitea \
|
- 登录与设备活动写入 `auth_events`
|
||||||
-m deploy:strategy:='{"type":"rolling","batch":2}' \
|
|
||||||
-m runtime:max_open_conns:=20
|
|
||||||
```
|
|
||||||
|
|
||||||
### `-s` / `--secret` JSON 语法速查
|
|
||||||
|
|
||||||
当你希望写入的不是普通字符串,而是 `true`、`123`、`null`、数组或对象时,用 `:=`,右侧按 JSON 解析。
|
|
||||||
|
|
||||||
| 目标值 | 写法示例 | 实际存入 |
|
|
||||||
|------|------|------|
|
|
||||||
| 普通字符串 | `-s token=abc123` | `"abc123"` |
|
|
||||||
| 文件内容字符串 | `-s ssh_key=@./id_ed25519` | `"-----BEGIN ..."` |
|
|
||||||
| 布尔值 | `-s enabled:=true` | `true` |
|
|
||||||
| 数字 | `-s retry_count:=3` | `3` |
|
|
||||||
| `null` | `-s deprecated_at:=null` | `null` |
|
|
||||||
| 数组 | `-s scopes:='["repo","workflow"]'` | `["repo","workflow"]` |
|
|
||||||
| 对象 | `-s extra:='{"region":"ap-east-1","verify_tls":true}'` | `{"region":"ap-east-1","verify_tls":true}` |
|
|
||||||
| 嵌套路径 + JSON | `-s auth:policy:='{"mfa":true,"ttl":3600}'` | `{"auth":{"policy":{"mfa":true,"ttl":3600}}}` |
|
|
||||||
|
|
||||||
常见规则:
|
|
||||||
|
|
||||||
- `=` 表示按字符串存储,不做 JSON 解析。
|
|
||||||
- `:=` 表示按 JSON 解析,适合布尔、数字、数组、对象、`null`。
|
|
||||||
- shell 里对象和数组通常要整体加引号,推荐单引号:`-s flags:='["a","b"]'`。
|
|
||||||
- 嵌套字段继续用冒号分隔:`-s credentials:enabled:=true`。
|
|
||||||
- 如果你就是想存一个“JSON 字符串字面量”,可以写成 `-s note:='"hello"'`,但大多数字符串场景直接用 `=` 更直观。
|
|
||||||
|
|
||||||
示例:新增一条同时包含字符串、文件、布尔、数组、对象的记录
|
|
||||||
|
|
||||||
```bash
|
|
||||||
secrets add -n refining --kind service --name deploy-bot \
|
|
||||||
-s token=abc123 \
|
|
||||||
-s ssh_key=@./keys/deploy-bot.pem \
|
|
||||||
-s enabled:=true \
|
|
||||||
-s scopes:='["repo","workflow"]' \
|
|
||||||
-s policy:='{"ttl":3600,"mfa":true}'
|
|
||||||
```
|
|
||||||
|
|
||||||
示例:更新已有记录中的嵌套 JSON 字段
|
|
||||||
|
|
||||||
```bash
|
|
||||||
secrets update -n refining --kind service --name deploy-bot \
|
|
||||||
-s auth:config:='{"issuer":"gitea","rotate":true}' \
|
|
||||||
-s auth:retry:=5
|
|
||||||
```
|
|
||||||
|
|
||||||
## 审计日志
|
|
||||||
|
|
||||||
`add`、`update`、`delete` 操作成功后自动向 `audit_log` 表写入一条记录,包含操作类型、操作对象和变更摘要(不含 secret 值)。操作者取自 `$USER` 环境变量。
|
|
||||||
|
|
||||||
```sql
|
|
||||||
-- 查看最近 20 条审计记录
|
|
||||||
SELECT action, namespace, kind, name, actor, detail, created_at
|
|
||||||
FROM audit_log
|
|
||||||
ORDER BY created_at DESC
|
|
||||||
LIMIT 20;
|
|
||||||
```
|
|
||||||
|
|
||||||
## 项目结构
|
## 项目结构
|
||||||
|
|
||||||
```
|
```text
|
||||||
src/
|
Cargo.toml
|
||||||
main.rs # CLI 入口(clap),含各子命令 after_help 示例
|
apps/
|
||||||
output.rs # OutputMode 枚举 + TTY 检测
|
api/ # 远端 JSON API
|
||||||
config.rs # 配置读写(~/.config/secrets/config.toml)
|
desktop/src-tauri/ # Tauri 桌面端
|
||||||
db.rs # 连接池 + auto-migrate(secrets + audit_log + kv_config)
|
crates/
|
||||||
crypto.rs # AES-256-GCM 加解密、Argon2id 派生、OS 钥匙串
|
application/ # v3 应用服务
|
||||||
models.rs # Secret 结构体
|
client-integrations/ # Cursor / Claude Code mcp.json 注入
|
||||||
audit.rs # 审计日志写入(audit_log 表)
|
crypto/ # 通用加密辅助
|
||||||
commands/
|
desktop-daemon/ # 本地 MCP daemon
|
||||||
init.rs # 主密钥初始化(首次/新设备)
|
device-auth/ # Desktop OAuth / device token 辅助
|
||||||
add.rs # upsert,支持 -o json
|
domain/ # 领域模型
|
||||||
config.rs # config set-db/show/path
|
infrastructure-db/ # PostgreSQL 连接与迁移
|
||||||
search.rs # 多条件查询,支持 -f/-o/--summary/--limit/--offset/--sort
|
deploy/
|
||||||
delete.rs # 删除
|
.env.example
|
||||||
update.rs # 增量更新(合并 tags/metadata/encrypted)
|
secrets-mcp.service
|
||||||
upgrade.rs # 从 Gitea Release 自更新
|
postgres-tls-hardening.md
|
||||||
scripts/
|
scripts/
|
||||||
setup-gitea-actions.sh # 配置 Gitea Actions 变量与 Secrets
|
release-check.sh
|
||||||
|
setup-gitea-actions.sh
|
||||||
```
|
```
|
||||||
|
|
||||||
## CI/CD(Gitea Actions)
|
## CI/CD(Gitea Actions)
|
||||||
|
|
||||||
推送 `main` 分支时自动:fmt/clippy/test 检查 → Linux/macOS/Windows 构建 → 上传二进制与 `.sha256` 摘要 → 所有平台成功后发布 Release。
|
当前以 workspace 级检查为主,见 `[.gitea/workflows/secrets.yml](.gitea/workflows/secrets.yml)`。
|
||||||
|
|
||||||
**首次使用需配置 Actions 变量和 Secrets:**
|
提交前建议直接运行:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
# 需有 ~/.config/gitea/config.env(GITEA_URL、GITEA_TOKEN、GITEA_WEBHOOK_URL)
|
./scripts/release-check.sh
|
||||||
./scripts/setup-gitea-actions.sh
|
|
||||||
```
|
```
|
||||||
|
|
||||||
- `RELEASE_TOKEN`(Secret):Gitea PAT,用于创建 Release 上传二进制
|
详见 [AGENTS.md](AGENTS.md)(发版规则、代码规范)。
|
||||||
- `WEBHOOK_URL`(Variable):飞书通知,可选
|
|
||||||
- **注意**:Secret/Variable 的 `data`/`value` 字段需传入原始值,不要 base64 编码
|
|
||||||
|
|
||||||
当前 Release 预编译产物覆盖:
|
|
||||||
- Linux `x86_64-unknown-linux-musl`
|
|
||||||
- macOS Apple Silicon `aarch64-apple-darwin`
|
|
||||||
- macOS Intel `x86_64-apple-darwin`(由 ARM mac runner 交叉编译)
|
|
||||||
- Windows `x86_64-pc-windows-msvc`
|
|
||||||
|
|
||||||
详见 [AGENTS.md](AGENTS.md)。
|
|
||||||
30
apps/api/Cargo.toml
Normal file
30
apps/api/Cargo.toml
Normal file
@@ -0,0 +1,30 @@
|
|||||||
|
[package]
|
||||||
|
name = "secrets-api"
|
||||||
|
version = "0.1.0"
|
||||||
|
edition.workspace = true
|
||||||
|
|
||||||
|
[[bin]]
|
||||||
|
name = "secrets-api"
|
||||||
|
path = "src/main.rs"
|
||||||
|
|
||||||
|
[dependencies]
|
||||||
|
anyhow.workspace = true
|
||||||
|
axum.workspace = true
|
||||||
|
dotenvy.workspace = true
|
||||||
|
serde.workspace = true
|
||||||
|
serde_json.workspace = true
|
||||||
|
sqlx.workspace = true
|
||||||
|
tokio.workspace = true
|
||||||
|
tracing.workspace = true
|
||||||
|
tracing-subscriber.workspace = true
|
||||||
|
uuid.workspace = true
|
||||||
|
chrono.workspace = true
|
||||||
|
reqwest.workspace = true
|
||||||
|
sha2.workspace = true
|
||||||
|
url.workspace = true
|
||||||
|
base64 = "0.22.1"
|
||||||
|
|
||||||
|
secrets-application = { path = "../../crates/application" }
|
||||||
|
secrets-device-auth = { path = "../../crates/device-auth" }
|
||||||
|
secrets-domain = { path = "../../crates/domain" }
|
||||||
|
secrets-infrastructure-db = { path = "../../crates/infrastructure-db" }
|
||||||
15
apps/api/src/bin/secrets-api-migrate.rs
Normal file
15
apps/api/src/bin/secrets-api-migrate.rs
Normal file
@@ -0,0 +1,15 @@
|
|||||||
|
use anyhow::{Context, Result};
|
||||||
|
|
||||||
|
#[tokio::main]
|
||||||
|
async fn main() -> Result<()> {
|
||||||
|
let _ = dotenvy::dotenv();
|
||||||
|
|
||||||
|
let database_url = secrets_infrastructure_db::load_database_url()?;
|
||||||
|
let pool = secrets_infrastructure_db::create_pool(&database_url).await?;
|
||||||
|
secrets_infrastructure_db::migrate_current_schema(&pool)
|
||||||
|
.await
|
||||||
|
.context("failed to initialize current database schema")?;
|
||||||
|
|
||||||
|
println!("current database schema initialized");
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
1099
apps/api/src/main.rs
Normal file
1099
apps/api/src/main.rs
Normal file
File diff suppressed because it is too large
Load Diff
6
apps/desktop/README.md
Normal file
6
apps/desktop/README.md
Normal file
@@ -0,0 +1,6 @@
|
|||||||
|
# apps/desktop
|
||||||
|
|
||||||
|
This directory is reserved for the v3 Tauri desktop shell.
|
||||||
|
|
||||||
|
The desktop UI is intentionally kept separate from `crates/desktop-daemon` so
|
||||||
|
that closing the main window does not terminate the local MCP process.
|
||||||
208
apps/desktop/design/DESIGN.md
Normal file
208
apps/desktop/design/DESIGN.md
Normal file
@@ -0,0 +1,208 @@
|
|||||||
|
# Secrets Design System
|
||||||
|
|
||||||
|
## 1. Visual Theme & Atmosphere
|
||||||
|
|
||||||
|
- Primary inspiration: Raycast desktop UI.
|
||||||
|
- Secondary influence: Linear information density and list discipline.
|
||||||
|
- Product personality: secure, local-first, developer-facing, restrained, trustworthy.
|
||||||
|
- Default mood: dark utility app, not a marketing site and not a glossy consumer app.
|
||||||
|
- The interface should feel like a native desktop control surface for secrets and MCP integrations.
|
||||||
|
- Use calm contrast, clean edges, compact spacing, and intentional empty space.
|
||||||
|
- Prefer precision over decoration. Visual polish should come from alignment, spacing, and hierarchy.
|
||||||
|
|
||||||
|
## 2. Color Palette & Roles
|
||||||
|
|
||||||
|
### Core Surfaces
|
||||||
|
|
||||||
|
- `bg.app`: `#0A0A0B` - app background, deepest canvas.
|
||||||
|
- `bg.panel`: `#111113` - main panel and modal background.
|
||||||
|
- `bg.panelElevated`: `#17171A` - cards, selected rows, input shells.
|
||||||
|
- `bg.panelHover`: `#1D1D22` - hover state for rows and controls.
|
||||||
|
- `bg.input`: `#141418` - text inputs, code blocks, secret fields.
|
||||||
|
- `border.subtle`: `#26262C` - default panel borders.
|
||||||
|
- `border.strong`: `#34343D` - active borders and high-emphasis outlines.
|
||||||
|
|
||||||
|
### Text
|
||||||
|
|
||||||
|
- `text.primary`: `#F5F5F7` - primary labels and values.
|
||||||
|
- `text.secondary`: `#B3B3BD` - supporting metadata.
|
||||||
|
- `text.tertiary`: `#7C7C88` - placeholders and low-emphasis copy.
|
||||||
|
- `text.inverse`: `#0B0B0D` - text on bright accents.
|
||||||
|
|
||||||
|
### Accents
|
||||||
|
|
||||||
|
- `accent.blue`: `#3B82F6` - login CTA, toggles, focus ring, trust signals.
|
||||||
|
- `accent.blueHover`: `#4C8DFF` - hover state for primary interactions.
|
||||||
|
- `accent.purple`: `#8B5CF6` - secondary accent for selected count pills or light emphasis.
|
||||||
|
- `accent.amber`: `#D97706` - local warnings or pending states.
|
||||||
|
- `accent.red`: `#EF4444` - destructive actions.
|
||||||
|
- `accent.green`: `#22C55E` - success or enabled state when stronger signal is required.
|
||||||
|
|
||||||
|
### Semantic Use
|
||||||
|
|
||||||
|
- Blue is the main action color. Keep it rare and meaningful.
|
||||||
|
- Purple can appear in subtle badges or selected-count chips, never as a second primary CTA.
|
||||||
|
- Red is reserved for delete, revoke, sign-out danger, and destructive confirmations.
|
||||||
|
- Avoid bright gradients as a dominant surface treatment.
|
||||||
|
|
||||||
|
## 3. Typography Rules
|
||||||
|
|
||||||
|
- Font stack: `Inter`, `SF Pro Text`, `SF Pro Display`, `Segoe UI`, system sans-serif.
|
||||||
|
- Use system-friendly text rendering. This is a desktop tool, not a display-heavy website.
|
||||||
|
- Chinese UI copy is allowed and should feel natural beside English identifiers like `host`, `token`, `MCP`.
|
||||||
|
- Keep tracking neutral. Avoid wide uppercase spacing except tiny overline labels.
|
||||||
|
|
||||||
|
### Type Scale
|
||||||
|
|
||||||
|
- App title / page title: 30-34px, weight 700.
|
||||||
|
- Section title: 18-22px, weight 650-700.
|
||||||
|
- Card title / row title: 15-17px, weight 600.
|
||||||
|
- Body text: 13-14px, weight 400-500.
|
||||||
|
- Caption / metadata label: 11-12px, weight 500, uppercase allowed with modest tracking.
|
||||||
|
- Monospace values: `SF Mono`, `JetBrains Mono`, `Menlo`, monospace; 12-13px.
|
||||||
|
|
||||||
|
## 4. Component Stylings
|
||||||
|
|
||||||
|
### App Shell
|
||||||
|
|
||||||
|
- Use a three-pane desktop layout for the main screen: left navigation, middle list, right detail pane.
|
||||||
|
- Pane separation should rely on subtle borders, not strong shadows.
|
||||||
|
- Sidebar should feel slightly darker than the center list pane.
|
||||||
|
- The detail pane can be the most open surface, with larger top padding and calmer spacing.
|
||||||
|
|
||||||
|
### Login Card
|
||||||
|
|
||||||
|
- Centered card on a dark canvas.
|
||||||
|
- Width: compact, roughly 420-520px.
|
||||||
|
- Rounded corners: 24-28px.
|
||||||
|
- Include one lock/trust mark, one clear product title, one short support sentence, one primary Google login button.
|
||||||
|
- Login should feel calm and premium, never busy.
|
||||||
|
|
||||||
|
### Buttons
|
||||||
|
|
||||||
|
- Primary button: dark app shell with blue fill, white text, medium radius.
|
||||||
|
- Secondary button: dark raised surface with subtle border.
|
||||||
|
- Destructive button: same structure as secondary, with red text or red-emphasis border only when needed.
|
||||||
|
- Button height should feel desktop-like, not mobile oversized.
|
||||||
|
- Avoid flashy gradients and oversized glows.
|
||||||
|
|
||||||
|
### Inputs
|
||||||
|
|
||||||
|
- Inputs use dark filled surfaces, subtle inset feel, 12-14px radius.
|
||||||
|
- Border should be nearly invisible at rest and stronger on hover/focus.
|
||||||
|
- Placeholders should be quiet and low-contrast.
|
||||||
|
- Search and filter inputs should visually align and share the same height.
|
||||||
|
|
||||||
|
### Lists and Rows
|
||||||
|
|
||||||
|
- Entry rows should be compact, crisp, and easy to scan.
|
||||||
|
- Selected row: slightly brighter dark card, subtle border, no heavy glow.
|
||||||
|
- Support a two-line rhythm: primary name and smaller type/folder metadata.
|
||||||
|
- Counts in the sidebar should use muted rounded chips.
|
||||||
|
|
||||||
|
### Detail Pane
|
||||||
|
|
||||||
|
- Use strong top title hierarchy with restrained action buttons on the right.
|
||||||
|
- Metadata should be presented in structured blocks or columns, not loose paragraphs.
|
||||||
|
- Secret values should live inside dedicated protected field cards.
|
||||||
|
- Secret field rows should include icon, masked value, reveal action, and copy action.
|
||||||
|
- Sensitive content must look controlled and deliberate, not playful.
|
||||||
|
|
||||||
|
### Modals
|
||||||
|
|
||||||
|
- Modal cards should feel like elevated control panels.
|
||||||
|
- MCP integration modal should support stacked integration rows with trailing toggles.
|
||||||
|
- Embedded JSON/config blocks should use a darker, code-oriented surface with monospace text.
|
||||||
|
- Large modal width is acceptable for configuration-heavy content.
|
||||||
|
|
||||||
|
### Toggles
|
||||||
|
|
||||||
|
- Use blue enabled state by default.
|
||||||
|
- Toggle track should be compact and clean, avoiding iOS-like softness.
|
||||||
|
- Align toggles flush right in integration lists.
|
||||||
|
|
||||||
|
### Badges and Status Pills
|
||||||
|
|
||||||
|
- Use small rounded pills for folder counts, archived state, or recent-delete state.
|
||||||
|
- Prefer muted purple, gray, or amber fills over saturated color blocks.
|
||||||
|
|
||||||
|
## 5. Layout Principles
|
||||||
|
|
||||||
|
- Use an 8px spacing system.
|
||||||
|
- Typical paddings:
|
||||||
|
- Sidebars: 16-20px.
|
||||||
|
- List and toolbar: 12-18px.
|
||||||
|
- Detail pane: 24-32px.
|
||||||
|
- Modals: 20-28px.
|
||||||
|
- Favor even vertical rhythm over decorative separators.
|
||||||
|
- Keep left edges aligned aggressively across sections.
|
||||||
|
- Avoid oversized hero spacing inside application surfaces.
|
||||||
|
- The main app should feel dense enough for productivity but never cramped.
|
||||||
|
|
||||||
|
## 6. Depth & Elevation
|
||||||
|
|
||||||
|
- Most separation should come from tone shifts and borders.
|
||||||
|
- Base panels: no shadow or extremely soft shadow.
|
||||||
|
- Elevated cards and modals: subtle shadow only, with low blur and low opacity.
|
||||||
|
- Do not use neon bloom, oversized backdrop blur, or glassmorphism.
|
||||||
|
- Focus states should use border color and a faint blue outer ring.
|
||||||
|
|
||||||
|
## 7. Do's and Don'ts
|
||||||
|
|
||||||
|
### Do
|
||||||
|
|
||||||
|
- Keep the UI dark, crisp, and desktop-native.
|
||||||
|
- Preserve strong information hierarchy in the detail pane.
|
||||||
|
- Make security-sensitive actions feel explicit and carefully gated.
|
||||||
|
- Use compact controls and disciplined spacing.
|
||||||
|
- Let alignment and typography carry most of the visual quality.
|
||||||
|
- Keep MCP integration screens structured like settings panels.
|
||||||
|
|
||||||
|
### Don't
|
||||||
|
|
||||||
|
- Do not turn the app into a landing page aesthetic.
|
||||||
|
- Do not use giant gradients, colorful illustrations, or soft SaaS cards.
|
||||||
|
- Do not over-round every surface.
|
||||||
|
- Do not mix many accent colors in one screen.
|
||||||
|
- Do not make secret fields look like casual form inputs.
|
||||||
|
- Do not use bright white backgrounds in the desktop app.
|
||||||
|
|
||||||
|
## 8. Responsive Behavior
|
||||||
|
|
||||||
|
- Primary target is desktop widths from 1280px upward.
|
||||||
|
- The three-pane shell should remain stable on desktop.
|
||||||
|
- At narrower widths, collapse from three panes to two panes before using stacked mobile behavior.
|
||||||
|
- The MCP modal can reduce width but should keep readable row spacing and code block legibility.
|
||||||
|
- Buttons and toggles should remain mouse-first, with minimum 32px touch-friendly height where practical.
|
||||||
|
|
||||||
|
## 9. Screen-Specific Guidance
|
||||||
|
|
||||||
|
### Login Screen
|
||||||
|
|
||||||
|
- Centered trust card.
|
||||||
|
- One focal icon or emblem above the title.
|
||||||
|
- Keep copy short.
|
||||||
|
- The Google login button should be the visual anchor.
|
||||||
|
|
||||||
|
### Main Secrets Screen
|
||||||
|
|
||||||
|
- Left sidebar: user card, folder navigation, utility actions near the bottom.
|
||||||
|
- Middle pane: search, type filter, result list.
|
||||||
|
- Right pane: selected entry title, metadata grid, secret cards, edit actions.
|
||||||
|
- The selected item should be immediately obvious but understated.
|
||||||
|
|
||||||
|
### MCP Integration Screen
|
||||||
|
|
||||||
|
- Treat as a settings modal.
|
||||||
|
- Integration rows should read like desktop preferences, not marketing feature cards.
|
||||||
|
- JSON config block should feel developer-native and copy-friendly.
|
||||||
|
|
||||||
|
## 10. Agent Prompt Guide
|
||||||
|
|
||||||
|
- Keywords: `dark desktop utility`, `Raycast-inspired`, `Linear-density`, `secure control panel`, `developer tool`, `restrained premium`, `MCP settings modal`.
|
||||||
|
- When generating screens, preserve: dark surfaces, subtle borders, compact controls, right-aligned actions, clean typography, muted status pills.
|
||||||
|
- If unsure, bias toward less decoration and tighter structure.
|
||||||
|
|
||||||
|
## 11. Quick Summary for Agents
|
||||||
|
|
||||||
|
Build Secrets like a polished desktop utility: mostly Raycast in atmosphere, a little Linear in density, with dark layered panels, precise typography, subtle borders, blue-only primary actions, and security-sensitive detail cards that feel calm, serious, and highly usable.
|
||||||
6300
apps/desktop/design/secrets-client.pen
Normal file
6300
apps/desktop/design/secrets-client.pen
Normal file
File diff suppressed because it is too large
Load Diff
41
apps/desktop/dist/disable-features.js
vendored
Normal file
41
apps/desktop/dist/disable-features.js
vendored
Normal file
@@ -0,0 +1,41 @@
|
|||||||
|
(() => {
|
||||||
|
const tauriInvoke = window.__TAURI_INTERNALS__?.invoke;
|
||||||
|
|
||||||
|
// Disable text selection globally, but keep inputs editable.
|
||||||
|
document.addEventListener("selectstart", (event) => {
|
||||||
|
const target = event.target;
|
||||||
|
if (target instanceof HTMLInputElement || target instanceof HTMLTextAreaElement) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
event.preventDefault();
|
||||||
|
});
|
||||||
|
|
||||||
|
async function applyProductionGuards() {
|
||||||
|
if (!tauriInvoke) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
let isDebugBuild = false;
|
||||||
|
try {
|
||||||
|
isDebugBuild = await tauriInvoke("is_debug_build");
|
||||||
|
} catch {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (isDebugBuild) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
document.addEventListener("contextmenu", (event) => event.preventDefault());
|
||||||
|
document.addEventListener("keydown", (event) => {
|
||||||
|
if (event.key === "F12") {
|
||||||
|
event.preventDefault();
|
||||||
|
}
|
||||||
|
if ((event.ctrlKey || event.metaKey) && event.shiftKey && ["I", "C", "J"].includes(event.key.toUpperCase())) {
|
||||||
|
event.preventDefault();
|
||||||
|
}
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
void applyProductionGuards();
|
||||||
|
})();
|
||||||
BIN
apps/desktop/dist/favicon.png
vendored
Normal file
BIN
apps/desktop/dist/favicon.png
vendored
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 1.2 KiB |
279
apps/desktop/dist/index.html
vendored
Normal file
279
apps/desktop/dist/index.html
vendored
Normal file
@@ -0,0 +1,279 @@
|
|||||||
|
<!doctype html>
|
||||||
|
<html lang="zh-CN">
|
||||||
|
<head>
|
||||||
|
<meta charset="utf-8" />
|
||||||
|
<meta name="viewport" content="width=device-width, initial-scale=1.0" />
|
||||||
|
<title>Secrets</title>
|
||||||
|
<link rel="stylesheet" href="./styles.css" />
|
||||||
|
<script src="./disable-features.js"></script>
|
||||||
|
</head>
|
||||||
|
<body>
|
||||||
|
<div id="login-view" class="login-screen hidden">
|
||||||
|
<div class="window-titlebar login-titlebar" data-tauri-drag-region aria-hidden="true"></div>
|
||||||
|
<div class="login-card">
|
||||||
|
<div class="login-main">
|
||||||
|
<div class="login-emblem" aria-hidden="true">
|
||||||
|
<svg class="login-lock-icon" viewBox="0 0 24 24" fill="none" aria-hidden="true">
|
||||||
|
<circle cx="12" cy="16" r="1"></circle>
|
||||||
|
<rect x="3" y="10" width="18" height="12" rx="2"></rect>
|
||||||
|
<path d="M7 10V7a5 5 0 0 1 10 0v3"></path>
|
||||||
|
</svg>
|
||||||
|
</div>
|
||||||
|
<div class="login-title-block">
|
||||||
|
<h1>Secrets</h1>
|
||||||
|
<p class="login-subtle">用 AI 安全地管理和使用密钥</p>
|
||||||
|
</div>
|
||||||
|
<div class="login-actions">
|
||||||
|
<button id="login-button" class="primary login-google-button">
|
||||||
|
<svg class="login-google-mark" viewBox="0 0 24 24" fill="currentColor" aria-hidden="true">
|
||||||
|
<path d="M22.56 12.25c0-.78-.07-1.53-.2-2.25H12v4.26h5.92c-.26 1.37-1.04 2.53-2.21 3.31v2.77h3.57c2.08-1.92 3.28-4.74 3.28-8.09z" />
|
||||||
|
<path d="M12 23c2.97 0 5.46-.98 7.28-2.66l-3.57-2.77c-.98.66-2.23 1.06-3.71 1.06-2.86 0-5.29-1.93-6.16-4.53H2.18v2.84C3.99 20.53 7.7 23 12 23z" />
|
||||||
|
<path d="M5.84 14.09c-.22-.66-.35-1.36-.35-2.09s.13-1.43.35-2.09V7.07H2.18C1.43 8.55 1 10.22 1 12s.43 3.45 1.18 4.93l2.85-2.22.81-.62z" />
|
||||||
|
<path d="M12 5.38c1.62 0 3.06.56 4.21 1.64l3.15-3.15C17.45 2.09 14.97 1 12 1 7.7 1 3.99 3.47 2.18 7.07l3.66 2.84c.87-2.6 3.3-4.53 6.16-4.53z" />
|
||||||
|
</svg>
|
||||||
|
<span>前往浏览器登录</span>
|
||||||
|
</button>
|
||||||
|
</div>
|
||||||
|
<p id="login-error" class="error-text hidden"></p>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<div id="vault-modal" class="modal hidden">
|
||||||
|
<div class="modal-card">
|
||||||
|
<div class="modal-header">
|
||||||
|
<h3 id="vault-modal-title">解锁本地 Vault</h3>
|
||||||
|
</div>
|
||||||
|
<p id="vault-modal-copy" class="subtle modal-copy">请输入本地 vault 主密码。</p>
|
||||||
|
<div class="modal-form">
|
||||||
|
<label class="field-label">
|
||||||
|
<span>主密码</span>
|
||||||
|
<input id="vault-password-input" type="password" class="detail-input" placeholder="输入主密码" />
|
||||||
|
</label>
|
||||||
|
</div>
|
||||||
|
<p id="vault-modal-error" class="error-text hidden"></p>
|
||||||
|
<div class="modal-actions">
|
||||||
|
<button id="vault-modal-save" class="primary small">继续</button>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<div id="app-shell" class="shell hidden">
|
||||||
|
<div class="window-titlebar shell-titlebar" data-tauri-drag-region aria-hidden="true"></div>
|
||||||
|
<aside class="sidebar">
|
||||||
|
<div class="user-block">
|
||||||
|
<button id="user-trigger" class="user-trigger">
|
||||||
|
<div class="avatar">V</div>
|
||||||
|
<div class="user-copy">
|
||||||
|
<div id="user-name" class="user-name">-</div>
|
||||||
|
<div id="user-email" class="user-email">-</div>
|
||||||
|
</div>
|
||||||
|
<span class="caret">▾</span>
|
||||||
|
</button>
|
||||||
|
<div id="user-menu" class="user-menu hidden">
|
||||||
|
<button id="manage-devices" class="menu-item">管理设备</button>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<div id="folder-list" class="folder-list"></div>
|
||||||
|
|
||||||
|
<div class="sidebar-spacer"></div>
|
||||||
|
|
||||||
|
<div class="sidebar-footer">
|
||||||
|
<button id="open-mcp-modal" class="sidebar-utility">
|
||||||
|
<span class="sidebar-utility-icon" aria-hidden="true">⌁</span>
|
||||||
|
<span>MCP</span>
|
||||||
|
</button>
|
||||||
|
<button id="logout-button" class="sidebar-utility">
|
||||||
|
<span class="sidebar-utility-icon" aria-hidden="true">↩</span>
|
||||||
|
<span>退出登录</span>
|
||||||
|
</button>
|
||||||
|
</div>
|
||||||
|
</aside>
|
||||||
|
|
||||||
|
<main class="main-shell">
|
||||||
|
<section class="list-column">
|
||||||
|
<div class="searchbar-shell">
|
||||||
|
<input id="search-input" class="search-input global-search" placeholder="按名称模糊搜索" />
|
||||||
|
</div>
|
||||||
|
<section class="list-pane">
|
||||||
|
<div class="toolbar">
|
||||||
|
<button id="new-entry-button" class="secondary-button small">
|
||||||
|
<span class="button-icon" aria-hidden="true">+</span>
|
||||||
|
<span class="button-label">新建条目</span>
|
||||||
|
</button>
|
||||||
|
<select id="type-filter" class="filter-select">
|
||||||
|
<option value="">全部类型</option>
|
||||||
|
</select>
|
||||||
|
</div>
|
||||||
|
<div id="entry-list" class="entry-list"></div>
|
||||||
|
</section>
|
||||||
|
</section>
|
||||||
|
|
||||||
|
<section class="detail-pane">
|
||||||
|
<div class="detail-header">
|
||||||
|
<div class="detail-title-stack">
|
||||||
|
<div id="detail-folder-label" class="detail-folder-label">-</div>
|
||||||
|
<div class="detail-title-block">
|
||||||
|
<h2 id="entry-title">-</h2>
|
||||||
|
<div id="detail-badge" class="detail-badge hidden">最近删除</div>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
<div class="detail-actions">
|
||||||
|
<button id="edit-entry-button" class="secondary-button small action-button">
|
||||||
|
<span class="button-icon" aria-hidden="true">✎</span>
|
||||||
|
<span class="button-label">编辑</span>
|
||||||
|
</button>
|
||||||
|
<button id="delete-entry-button" class="secondary-button small danger action-button hidden">
|
||||||
|
<span class="button-icon" aria-hidden="true">⌫</span>
|
||||||
|
<span class="button-label">删除</span>
|
||||||
|
</button>
|
||||||
|
<button id="restore-entry-button" class="secondary-button small action-button hidden">
|
||||||
|
<span class="button-icon" aria-hidden="true">↺</span>
|
||||||
|
<span class="button-label">恢复</span>
|
||||||
|
</button>
|
||||||
|
<button id="save-entry-button" class="primary small action-button hidden">
|
||||||
|
<span class="button-icon" aria-hidden="true">✓</span>
|
||||||
|
<span class="button-label">保存</span>
|
||||||
|
</button>
|
||||||
|
<button id="cancel-edit-button" class="secondary-button small action-button hidden">
|
||||||
|
<span class="button-icon" aria-hidden="true">×</span>
|
||||||
|
<span class="button-label">取消</span>
|
||||||
|
</button>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<div id="name-section" class="detail-section detail-edit-section hidden">
|
||||||
|
<h3>名称</h3>
|
||||||
|
<div id="name-view" class="detail-inline-value">-</div>
|
||||||
|
<input id="name-input" class="detail-input hidden" />
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<div class="detail-section">
|
||||||
|
<h3>元数据</h3>
|
||||||
|
<div id="metadata-list" class="detail-fields"></div>
|
||||||
|
<div id="metadata-editor" class="metadata-editor hidden"></div>
|
||||||
|
<button id="add-metadata-button" class="secondary-button small hidden">新增元数据</button>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<div class="detail-section">
|
||||||
|
<div class="section-header-row">
|
||||||
|
<h3>密钥</h3>
|
||||||
|
<button id="add-secret-button" class="secondary-button small hidden">
|
||||||
|
<span class="button-icon" aria-hidden="true">+</span>
|
||||||
|
<span class="button-label">新增密钥</span>
|
||||||
|
</button>
|
||||||
|
</div>
|
||||||
|
<div id="secret-list" class="secret-list"></div>
|
||||||
|
</div>
|
||||||
|
</section>
|
||||||
|
</main>
|
||||||
|
|
||||||
|
<div id="device-modal" class="modal hidden">
|
||||||
|
<div class="modal-card">
|
||||||
|
<div class="modal-header">
|
||||||
|
<h3>设备在线列表</h3>
|
||||||
|
<button id="close-device-modal" class="icon-button">×</button>
|
||||||
|
</div>
|
||||||
|
<p class="subtle modal-copy">查看已登录设备的在线情况与最近活动。</p>
|
||||||
|
<div id="device-list" class="device-list"></div>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<div id="mcp-modal" class="modal hidden">
|
||||||
|
<div class="modal-card wide">
|
||||||
|
<div class="modal-header">
|
||||||
|
<h3>MCP 集成</h3>
|
||||||
|
<button id="close-mcp-modal" class="icon-button">×</button>
|
||||||
|
</div>
|
||||||
|
<p class="subtle modal-copy">查看当前 AI 工具的 MCP 集成情况,并一键写入本地 daemon 配置。</p>
|
||||||
|
<section class="modal-section">
|
||||||
|
<div id="mcp-integration-list" class="integration-list"></div>
|
||||||
|
<p class="modal-footnote">启动 Secrets 桌面端时,可按选择自动为上述工具写入 MCP 配置。</p>
|
||||||
|
</section>
|
||||||
|
<section class="detail-section compact modal-section">
|
||||||
|
<div class="mcp-json-header">
|
||||||
|
<h4>自定义 MCP 配置</h4>
|
||||||
|
<button id="copy-mcp-config" class="secondary-button small">
|
||||||
|
<span class="button-icon" aria-hidden="true">⧉</span>
|
||||||
|
<span class="button-label">复制</span>
|
||||||
|
</button>
|
||||||
|
</div>
|
||||||
|
<pre id="mcp-config" class="mcp-config"></pre>
|
||||||
|
</section>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<div id="entry-modal" class="modal hidden">
|
||||||
|
<div class="modal-card">
|
||||||
|
<div class="modal-header">
|
||||||
|
<h3>新建条目</h3>
|
||||||
|
<button id="close-entry-modal" class="icon-button">×</button>
|
||||||
|
</div>
|
||||||
|
<div class="modal-form">
|
||||||
|
<label class="field-label">
|
||||||
|
<span>项目</span>
|
||||||
|
<input id="entry-modal-folder" class="detail-input" placeholder="例如:Refining" />
|
||||||
|
</label>
|
||||||
|
<label class="field-label">
|
||||||
|
<span>名称</span>
|
||||||
|
<input id="entry-modal-title" class="detail-input" placeholder="例如:secrets-local" />
|
||||||
|
</label>
|
||||||
|
<label class="field-label">
|
||||||
|
<span>类型</span>
|
||||||
|
<input id="entry-modal-type" class="detail-input" placeholder="例如:service" />
|
||||||
|
</label>
|
||||||
|
</div>
|
||||||
|
<div class="modal-actions">
|
||||||
|
<button id="entry-modal-cancel" class="secondary-button small">取消</button>
|
||||||
|
<button id="entry-modal-save" class="primary small">创建</button>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<div id="secret-modal" class="modal hidden">
|
||||||
|
<div class="modal-card">
|
||||||
|
<div class="modal-header">
|
||||||
|
<h3 id="secret-modal-title">新增密钥</h3>
|
||||||
|
<button id="close-secret-modal" class="icon-button">×</button>
|
||||||
|
</div>
|
||||||
|
<div class="modal-form">
|
||||||
|
<label class="field-label">
|
||||||
|
<span>名称</span>
|
||||||
|
<input id="secret-name-input" class="detail-input" placeholder="例如:token" />
|
||||||
|
</label>
|
||||||
|
<label class="field-label">
|
||||||
|
<span>类型</span>
|
||||||
|
<select id="secret-type-input" class="filter-select">
|
||||||
|
<option value="text">text</option>
|
||||||
|
<option value="password">password</option>
|
||||||
|
<option value="key">key</option>
|
||||||
|
</select>
|
||||||
|
</label>
|
||||||
|
<label class="field-label">
|
||||||
|
<span>内容</span>
|
||||||
|
<textarea id="secret-value-input" class="detail-input detail-textarea" placeholder="输入密钥内容"></textarea>
|
||||||
|
</label>
|
||||||
|
</div>
|
||||||
|
<div class="modal-actions">
|
||||||
|
<button id="secret-modal-cancel" class="secondary-button small">取消</button>
|
||||||
|
<button id="secret-modal-save" class="primary small">保存</button>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<div id="history-modal" class="modal hidden">
|
||||||
|
<div class="modal-card wide">
|
||||||
|
<div class="modal-header">
|
||||||
|
<h3>密钥历史</h3>
|
||||||
|
<button id="close-history-modal" class="icon-button">×</button>
|
||||||
|
</div>
|
||||||
|
<p id="history-modal-copy" class="subtle modal-copy">查看版本历史并回滚到指定版本。</p>
|
||||||
|
<div id="history-list" class="history-list"></div>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<script src="./main.js"></script>
|
||||||
|
</body>
|
||||||
|
</html>
|
||||||
1020
apps/desktop/dist/main.js
vendored
Normal file
1020
apps/desktop/dist/main.js
vendored
Normal file
File diff suppressed because it is too large
Load Diff
1072
apps/desktop/dist/styles.css
vendored
Normal file
1072
apps/desktop/dist/styles.css
vendored
Normal file
File diff suppressed because it is too large
Load Diff
32
apps/desktop/src-tauri/Cargo.toml
Normal file
32
apps/desktop/src-tauri/Cargo.toml
Normal file
@@ -0,0 +1,32 @@
|
|||||||
|
[package]
|
||||||
|
name = "secrets-desktop"
|
||||||
|
version = "3.0.0"
|
||||||
|
edition.workspace = true
|
||||||
|
|
||||||
|
[build-dependencies]
|
||||||
|
tauri-build.workspace = true
|
||||||
|
|
||||||
|
[dependencies]
|
||||||
|
anyhow.workspace = true
|
||||||
|
axum.workspace = true
|
||||||
|
chrono.workspace = true
|
||||||
|
hex.workspace = true
|
||||||
|
sqlx.workspace = true
|
||||||
|
serde.workspace = true
|
||||||
|
serde_json.workspace = true
|
||||||
|
tauri.workspace = true
|
||||||
|
tokio.workspace = true
|
||||||
|
reqwest.workspace = true
|
||||||
|
sha2.workspace = true
|
||||||
|
url.workspace = true
|
||||||
|
uuid.workspace = true
|
||||||
|
base64 = "0.22.1"
|
||||||
|
|
||||||
|
secrets-client-integrations = { path = "../../../crates/client-integrations" }
|
||||||
|
secrets-crypto = { path = "../../../crates/crypto" }
|
||||||
|
secrets-device-auth = { path = "../../../crates/device-auth" }
|
||||||
|
secrets-domain = { path = "../../../crates/domain" }
|
||||||
|
|
||||||
|
[[bin]]
|
||||||
|
name = "Secrets"
|
||||||
|
path = "src/main.rs"
|
||||||
3
apps/desktop/src-tauri/build.rs
Normal file
3
apps/desktop/src-tauri/build.rs
Normal file
@@ -0,0 +1,3 @@
|
|||||||
|
fn main() {
|
||||||
|
tauri_build::build()
|
||||||
|
}
|
||||||
2
apps/desktop/src-tauri/check_png_center.js
Normal file
2
apps/desktop/src-tauri/check_png_center.js
Normal file
@@ -0,0 +1,2 @@
|
|||||||
|
const fs = require('fs');
|
||||||
|
// Very simple check: read the first few bytes, maybe we can use an image library to find the bounding box
|
||||||
1
apps/desktop/src-tauri/gen/schemas/acl-manifests.json
Normal file
1
apps/desktop/src-tauri/gen/schemas/acl-manifests.json
Normal file
File diff suppressed because one or more lines are too long
1
apps/desktop/src-tauri/gen/schemas/capabilities.json
Normal file
1
apps/desktop/src-tauri/gen/schemas/capabilities.json
Normal file
@@ -0,0 +1 @@
|
|||||||
|
{}
|
||||||
2244
apps/desktop/src-tauri/gen/schemas/desktop-schema.json
Normal file
2244
apps/desktop/src-tauri/gen/schemas/desktop-schema.json
Normal file
File diff suppressed because it is too large
Load Diff
2244
apps/desktop/src-tauri/gen/schemas/macOS-schema.json
Normal file
2244
apps/desktop/src-tauri/gen/schemas/macOS-schema.json
Normal file
File diff suppressed because it is too large
Load Diff
BIN
apps/desktop/src-tauri/icons/icon.png
Normal file
BIN
apps/desktop/src-tauri/icons/icon.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 6.3 KiB |
1427
apps/desktop/src-tauri/src/local_vault.rs
Normal file
1427
apps/desktop/src-tauri/src/local_vault.rs
Normal file
File diff suppressed because it is too large
Load Diff
1080
apps/desktop/src-tauri/src/main.rs
Normal file
1080
apps/desktop/src-tauri/src/main.rs
Normal file
File diff suppressed because it is too large
Load Diff
356
apps/desktop/src-tauri/src/session_api.rs
Normal file
356
apps/desktop/src-tauri/src/session_api.rs
Normal file
@@ -0,0 +1,356 @@
|
|||||||
|
use anyhow::{Context, Result as AnyResult};
|
||||||
|
use axum::{
|
||||||
|
Router,
|
||||||
|
body::{Body, to_bytes},
|
||||||
|
extract::{Request, State as AxumState},
|
||||||
|
http::{StatusCode as AxumStatusCode, header},
|
||||||
|
response::Response,
|
||||||
|
routing::{any, get, post},
|
||||||
|
};
|
||||||
|
use url::Url;
|
||||||
|
|
||||||
|
use crate::local_vault::{
|
||||||
|
LocalEntryQuery, bootstrap as vault_bootstrap, create_entry as vault_create_entry,
|
||||||
|
create_secret as vault_create_secret, delete_entry as vault_delete_entry,
|
||||||
|
delete_secret as vault_delete_secret, entry_detail as vault_entry_detail,
|
||||||
|
list_entries as vault_list_entries, restore_entry as vault_restore_entry,
|
||||||
|
reveal_secret_value as vault_reveal_secret_value, rollback_secret as vault_rollback_secret,
|
||||||
|
secret_history as vault_secret_history, update_entry as vault_update_entry,
|
||||||
|
update_secret as vault_update_secret,
|
||||||
|
};
|
||||||
|
use crate::{
|
||||||
|
DesktopState, EntryDetail, EntryDraft, EntryListItem, EntryListQuery, SecretDraft,
|
||||||
|
SecretUpdateDraft, current_device_token, map_entry_detail_to_local, map_entry_draft_to_local,
|
||||||
|
map_local_entry_detail, map_local_history_item, map_local_secret_value,
|
||||||
|
map_secret_draft_to_local, map_secret_update_to_local, split_secret_ref_for_ui,
|
||||||
|
sync_local_vault,
|
||||||
|
};
|
||||||
|
|
||||||
|
pub async fn desktop_session_health(
|
||||||
|
AxumState(state): AxumState<DesktopState>,
|
||||||
|
) -> Result<&'static str, AxumStatusCode> {
|
||||||
|
current_device_token(&state)
|
||||||
|
.map(|_| "ok")
|
||||||
|
.map_err(|_| AxumStatusCode::UNAUTHORIZED)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn desktop_session_api(
|
||||||
|
AxumState(state): AxumState<DesktopState>,
|
||||||
|
request: Request<Body>,
|
||||||
|
) -> Response {
|
||||||
|
let (parts, body) = request.into_parts();
|
||||||
|
let path_and_query = parts
|
||||||
|
.uri
|
||||||
|
.path_and_query()
|
||||||
|
.map(|value| value.as_str())
|
||||||
|
.unwrap_or("/");
|
||||||
|
|
||||||
|
let body_bytes = match to_bytes(body, 1024 * 1024).await {
|
||||||
|
Ok(bytes) => bytes,
|
||||||
|
Err(_) => {
|
||||||
|
return Response::builder()
|
||||||
|
.status(AxumStatusCode::BAD_REQUEST)
|
||||||
|
.body(Body::from("failed to read relay request body"))
|
||||||
|
.expect("build relay bad request");
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
handle_local_session_request(&state, parts.method.as_str(), path_and_query, &body_bytes)
|
||||||
|
.await
|
||||||
|
.unwrap_or_else(|| {
|
||||||
|
Response::builder()
|
||||||
|
.status(AxumStatusCode::NOT_FOUND)
|
||||||
|
.header(header::CONTENT_TYPE, "application/json; charset=utf-8")
|
||||||
|
.body(Body::from(
|
||||||
|
r#"{"error":"desktop local vault route not found"}"#,
|
||||||
|
))
|
||||||
|
.expect("build local session not found response")
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn handle_local_session_request(
|
||||||
|
state: &DesktopState,
|
||||||
|
method: &str,
|
||||||
|
path_and_query: &str,
|
||||||
|
body_bytes: &[u8],
|
||||||
|
) -> Option<Response> {
|
||||||
|
let path = path_and_query.split('?').next().unwrap_or(path_and_query);
|
||||||
|
let make_json = |status: AxumStatusCode, value: serde_json::Value| {
|
||||||
|
Response::builder()
|
||||||
|
.status(status)
|
||||||
|
.header(header::CONTENT_TYPE, "application/json; charset=utf-8")
|
||||||
|
.body(Body::from(value.to_string()))
|
||||||
|
.expect("build local session response")
|
||||||
|
};
|
||||||
|
|
||||||
|
match (method, path) {
|
||||||
|
("GET", "/vault/status") => {
|
||||||
|
let status = vault_bootstrap(&state.local_vault).await.ok()?;
|
||||||
|
Some(make_json(
|
||||||
|
AxumStatusCode::OK,
|
||||||
|
serde_json::json!({
|
||||||
|
"unlocked": status.unlocked,
|
||||||
|
"has_master_password": status.has_master_password
|
||||||
|
}),
|
||||||
|
))
|
||||||
|
}
|
||||||
|
("GET", "/vault/entries") => {
|
||||||
|
let url = format!("http://localhost{path_and_query}");
|
||||||
|
let parsed = Url::parse(&url).ok()?;
|
||||||
|
let mut query = EntryListQuery {
|
||||||
|
folder: None,
|
||||||
|
entry_type: None,
|
||||||
|
query: None,
|
||||||
|
deleted_only: false,
|
||||||
|
};
|
||||||
|
for (key, value) in parsed.query_pairs() {
|
||||||
|
match key.as_ref() {
|
||||||
|
"folder" => query.folder = Some(value.into_owned()),
|
||||||
|
"entry_type" => query.entry_type = Some(value.into_owned()),
|
||||||
|
"query" => query.query = Some(value.into_owned()),
|
||||||
|
"deleted_only" => query.deleted_only = value == "true",
|
||||||
|
_ => {}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
let entries = vault_list_entries(
|
||||||
|
&state.local_vault,
|
||||||
|
&LocalEntryQuery {
|
||||||
|
folder: query.folder,
|
||||||
|
cipher_type: query.entry_type,
|
||||||
|
query: query.query,
|
||||||
|
deleted_only: query.deleted_only,
|
||||||
|
},
|
||||||
|
)
|
||||||
|
.await
|
||||||
|
.ok()?;
|
||||||
|
Some(make_json(
|
||||||
|
AxumStatusCode::OK,
|
||||||
|
serde_json::to_value(
|
||||||
|
entries
|
||||||
|
.into_iter()
|
||||||
|
.map(|entry| EntryListItem {
|
||||||
|
id: entry.id,
|
||||||
|
title: entry.name,
|
||||||
|
subtitle: entry.cipher_type,
|
||||||
|
folder: entry.folder,
|
||||||
|
deleted: entry.deleted,
|
||||||
|
})
|
||||||
|
.collect::<Vec<_>>(),
|
||||||
|
)
|
||||||
|
.ok()?,
|
||||||
|
))
|
||||||
|
}
|
||||||
|
_ if method == "GET" && path.starts_with("/vault/entries/") => {
|
||||||
|
let entry_id = path.trim_start_matches("/vault/entries/");
|
||||||
|
let detail = vault_entry_detail(&state.local_vault, entry_id)
|
||||||
|
.await
|
||||||
|
.ok()?;
|
||||||
|
Some(make_json(
|
||||||
|
AxumStatusCode::OK,
|
||||||
|
serde_json::to_value(map_local_entry_detail(detail)).ok()?,
|
||||||
|
))
|
||||||
|
}
|
||||||
|
("POST", "/vault/entries") => {
|
||||||
|
let draft: EntryDraft = serde_json::from_slice(body_bytes).ok()?;
|
||||||
|
let created = vault_create_entry(&state.local_vault, map_entry_draft_to_local(draft))
|
||||||
|
.await
|
||||||
|
.ok()?;
|
||||||
|
let _ = sync_local_vault(state).await;
|
||||||
|
Some(make_json(
|
||||||
|
AxumStatusCode::OK,
|
||||||
|
serde_json::to_value(map_local_entry_detail(created)).ok()?,
|
||||||
|
))
|
||||||
|
}
|
||||||
|
_ if method == "PATCH" && path.starts_with("/vault/entries/") => {
|
||||||
|
let entry_id = path.trim_start_matches("/vault/entries/").to_string();
|
||||||
|
let mut detail: EntryDetail = serde_json::from_slice(body_bytes).ok()?;
|
||||||
|
detail.id = entry_id;
|
||||||
|
let updated = vault_update_entry(&state.local_vault, map_entry_detail_to_local(detail))
|
||||||
|
.await
|
||||||
|
.ok()?;
|
||||||
|
let _ = sync_local_vault(state).await;
|
||||||
|
Some(make_json(
|
||||||
|
AxumStatusCode::OK,
|
||||||
|
serde_json::to_value(map_local_entry_detail(updated)).ok()?,
|
||||||
|
))
|
||||||
|
}
|
||||||
|
_ if method == "POST"
|
||||||
|
&& path.starts_with("/vault/entries/")
|
||||||
|
&& path.ends_with("/delete") =>
|
||||||
|
{
|
||||||
|
let entry_id = path
|
||||||
|
.trim_start_matches("/vault/entries/")
|
||||||
|
.trim_end_matches("/delete")
|
||||||
|
.trim_end_matches('/');
|
||||||
|
vault_delete_entry(&state.local_vault, entry_id)
|
||||||
|
.await
|
||||||
|
.ok()?;
|
||||||
|
let _ = sync_local_vault(state).await;
|
||||||
|
Some(make_json(
|
||||||
|
AxumStatusCode::OK,
|
||||||
|
serde_json::json!({ "ok": true }),
|
||||||
|
))
|
||||||
|
}
|
||||||
|
_ if method == "POST"
|
||||||
|
&& path.starts_with("/vault/entries/")
|
||||||
|
&& path.ends_with("/restore") =>
|
||||||
|
{
|
||||||
|
let entry_id = path
|
||||||
|
.trim_start_matches("/vault/entries/")
|
||||||
|
.trim_end_matches("/restore")
|
||||||
|
.trim_end_matches('/');
|
||||||
|
vault_restore_entry(&state.local_vault, entry_id)
|
||||||
|
.await
|
||||||
|
.ok()?;
|
||||||
|
let _ = sync_local_vault(state).await;
|
||||||
|
Some(make_json(
|
||||||
|
AxumStatusCode::OK,
|
||||||
|
serde_json::json!({ "ok": true }),
|
||||||
|
))
|
||||||
|
}
|
||||||
|
_ if method == "POST"
|
||||||
|
&& path.starts_with("/vault/entries/")
|
||||||
|
&& path.ends_with("/secrets") =>
|
||||||
|
{
|
||||||
|
let entry_id = path
|
||||||
|
.trim_start_matches("/vault/entries/")
|
||||||
|
.trim_end_matches("/secrets")
|
||||||
|
.trim_end_matches('/');
|
||||||
|
let secret: SecretDraft = serde_json::from_slice(body_bytes).ok()?;
|
||||||
|
let updated = vault_create_secret(
|
||||||
|
&state.local_vault,
|
||||||
|
entry_id,
|
||||||
|
map_secret_draft_to_local(secret),
|
||||||
|
)
|
||||||
|
.await
|
||||||
|
.ok()?;
|
||||||
|
let _ = sync_local_vault(state).await;
|
||||||
|
Some(make_json(
|
||||||
|
AxumStatusCode::OK,
|
||||||
|
serde_json::to_value(map_local_entry_detail(updated)).ok()?,
|
||||||
|
))
|
||||||
|
}
|
||||||
|
_ if method == "GET" && path.starts_with("/vault/secrets/") && path.ends_with("/value") => {
|
||||||
|
let secret_id = path
|
||||||
|
.trim_start_matches("/vault/secrets/")
|
||||||
|
.trim_end_matches("/value")
|
||||||
|
.trim_end_matches('/')
|
||||||
|
.to_string();
|
||||||
|
let (entry_id, secret_name) = split_secret_ref_for_ui(&secret_id).ok()?;
|
||||||
|
let value = vault_reveal_secret_value(&state.local_vault, &entry_id, &secret_name)
|
||||||
|
.await
|
||||||
|
.ok()?;
|
||||||
|
Some(make_json(
|
||||||
|
AxumStatusCode::OK,
|
||||||
|
serde_json::to_value(map_local_secret_value(value)).ok()?,
|
||||||
|
))
|
||||||
|
}
|
||||||
|
_ if method == "GET"
|
||||||
|
&& path.starts_with("/vault/secrets/")
|
||||||
|
&& path.ends_with("/history") =>
|
||||||
|
{
|
||||||
|
let secret_id = path
|
||||||
|
.trim_start_matches("/vault/secrets/")
|
||||||
|
.trim_end_matches("/history")
|
||||||
|
.trim_end_matches('/')
|
||||||
|
.to_string();
|
||||||
|
let (entry_id, secret_name) = split_secret_ref_for_ui(&secret_id).ok()?;
|
||||||
|
let history = vault_secret_history(&state.local_vault, &entry_id, &secret_name)
|
||||||
|
.await
|
||||||
|
.ok()?;
|
||||||
|
Some(make_json(
|
||||||
|
AxumStatusCode::OK,
|
||||||
|
serde_json::to_value(
|
||||||
|
history
|
||||||
|
.into_iter()
|
||||||
|
.map(map_local_history_item)
|
||||||
|
.collect::<Vec<_>>(),
|
||||||
|
)
|
||||||
|
.ok()?,
|
||||||
|
))
|
||||||
|
}
|
||||||
|
_ if method == "PATCH" && path.starts_with("/vault/secrets/") => {
|
||||||
|
let secret_id = path.trim_start_matches("/vault/secrets/").to_string();
|
||||||
|
let mut update: SecretUpdateDraft = serde_json::from_slice(body_bytes).ok()?;
|
||||||
|
update.id = secret_id;
|
||||||
|
let updated =
|
||||||
|
vault_update_secret(&state.local_vault, map_secret_update_to_local(update))
|
||||||
|
.await
|
||||||
|
.ok()?;
|
||||||
|
let _ = sync_local_vault(state).await;
|
||||||
|
Some(make_json(
|
||||||
|
AxumStatusCode::OK,
|
||||||
|
serde_json::to_value(map_local_entry_detail(updated)).ok()?,
|
||||||
|
))
|
||||||
|
}
|
||||||
|
_ if method == "POST"
|
||||||
|
&& path.starts_with("/vault/secrets/")
|
||||||
|
&& path.ends_with("/delete") =>
|
||||||
|
{
|
||||||
|
let secret_id = path
|
||||||
|
.trim_start_matches("/vault/secrets/")
|
||||||
|
.trim_end_matches("/delete")
|
||||||
|
.trim_end_matches('/');
|
||||||
|
vault_delete_secret(&state.local_vault, secret_id)
|
||||||
|
.await
|
||||||
|
.ok()?;
|
||||||
|
let _ = sync_local_vault(state).await;
|
||||||
|
Some(make_json(
|
||||||
|
AxumStatusCode::OK,
|
||||||
|
serde_json::json!({ "ok": true }),
|
||||||
|
))
|
||||||
|
}
|
||||||
|
_ if method == "POST"
|
||||||
|
&& path.starts_with("/vault/secrets/")
|
||||||
|
&& path.ends_with("/rollback") =>
|
||||||
|
{
|
||||||
|
let secret_id = path
|
||||||
|
.trim_start_matches("/vault/secrets/")
|
||||||
|
.trim_end_matches("/rollback")
|
||||||
|
.trim_end_matches('/')
|
||||||
|
.to_string();
|
||||||
|
let payload: serde_json::Value = serde_json::from_slice(body_bytes).ok()?;
|
||||||
|
let updated = vault_rollback_secret(
|
||||||
|
&state.local_vault,
|
||||||
|
&secret_id,
|
||||||
|
payload.get("history_id").and_then(|value| value.as_i64()),
|
||||||
|
)
|
||||||
|
.await
|
||||||
|
.ok()?;
|
||||||
|
let _ = sync_local_vault(state).await;
|
||||||
|
Some(make_json(
|
||||||
|
AxumStatusCode::OK,
|
||||||
|
serde_json::to_value(map_local_entry_detail(updated)).ok()?,
|
||||||
|
))
|
||||||
|
}
|
||||||
|
_ => None,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn start_desktop_session_server(state: DesktopState) -> AnyResult<()> {
|
||||||
|
let app = Router::new()
|
||||||
|
.route("/healthz", get(desktop_session_health))
|
||||||
|
.route("/vault/status", get(desktop_session_api))
|
||||||
|
.route("/vault/entries", any(desktop_session_api))
|
||||||
|
.route("/vault/entries/{id}", any(desktop_session_api))
|
||||||
|
.route("/vault/entries/{id}/delete", post(desktop_session_api))
|
||||||
|
.route("/vault/entries/{id}/restore", post(desktop_session_api))
|
||||||
|
.route("/vault/entries/{id}/secrets", post(desktop_session_api))
|
||||||
|
.route("/vault/secrets/{id}", any(desktop_session_api))
|
||||||
|
.route("/vault/secrets/{id}/value", get(desktop_session_api))
|
||||||
|
.route("/vault/secrets/{id}/history", get(desktop_session_api))
|
||||||
|
.route("/vault/secrets/{id}/delete", post(desktop_session_api))
|
||||||
|
.route("/vault/secrets/{id}/rollback", post(desktop_session_api))
|
||||||
|
.with_state(state.clone());
|
||||||
|
let listener = tokio::net::TcpListener::bind(&state.session_bind)
|
||||||
|
.await
|
||||||
|
.with_context(|| {
|
||||||
|
format!(
|
||||||
|
"failed to bind desktop session relay {}",
|
||||||
|
state.session_bind
|
||||||
|
)
|
||||||
|
})?;
|
||||||
|
axum::serve(listener, app)
|
||||||
|
.await
|
||||||
|
.context("desktop session relay server error")
|
||||||
|
}
|
||||||
31
apps/desktop/src-tauri/tauri.conf.json
Normal file
31
apps/desktop/src-tauri/tauri.conf.json
Normal file
@@ -0,0 +1,31 @@
|
|||||||
|
{
|
||||||
|
"$schema": "https://schema.tauri.app/config/2",
|
||||||
|
"productName": "Secrets",
|
||||||
|
"version": "3.0.0",
|
||||||
|
"identifier": "dev.refining.secrets",
|
||||||
|
"build": {
|
||||||
|
"beforeDevCommand": "",
|
||||||
|
"beforeBuildCommand": "",
|
||||||
|
"frontendDist": "../dist"
|
||||||
|
},
|
||||||
|
"app": {
|
||||||
|
"windows": [
|
||||||
|
{
|
||||||
|
"title": "Secrets",
|
||||||
|
"width": 420,
|
||||||
|
"height": 400,
|
||||||
|
"minWidth": 420,
|
||||||
|
"minHeight": 400,
|
||||||
|
"resizable": true,
|
||||||
|
"titleBarStyle": "overlay",
|
||||||
|
"hiddenTitle": true
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"security": {
|
||||||
|
"csp": null
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"bundle": {
|
||||||
|
"active": false
|
||||||
|
}
|
||||||
|
}
|
||||||
18
crates/application/Cargo.toml
Normal file
18
crates/application/Cargo.toml
Normal file
@@ -0,0 +1,18 @@
|
|||||||
|
[package]
|
||||||
|
name = "secrets-application"
|
||||||
|
version = "0.1.0"
|
||||||
|
edition.workspace = true
|
||||||
|
|
||||||
|
[lib]
|
||||||
|
name = "secrets_application"
|
||||||
|
path = "src/lib.rs"
|
||||||
|
|
||||||
|
[dependencies]
|
||||||
|
anyhow.workspace = true
|
||||||
|
chrono.workspace = true
|
||||||
|
serde.workspace = true
|
||||||
|
serde_json.workspace = true
|
||||||
|
sqlx.workspace = true
|
||||||
|
uuid.workspace = true
|
||||||
|
|
||||||
|
secrets-domain = { path = "../domain" }
|
||||||
9
crates/application/src/conflict.rs
Normal file
9
crates/application/src/conflict.rs
Normal file
@@ -0,0 +1,9 @@
|
|||||||
|
use secrets_domain::VaultObjectEnvelope;
|
||||||
|
use uuid::Uuid;
|
||||||
|
|
||||||
|
#[derive(Debug, Clone)]
|
||||||
|
pub struct RevisionConflict {
|
||||||
|
pub change_id: Uuid,
|
||||||
|
pub object_id: Uuid,
|
||||||
|
pub server_object: Option<VaultObjectEnvelope>,
|
||||||
|
}
|
||||||
3
crates/application/src/lib.rs
Normal file
3
crates/application/src/lib.rs
Normal file
@@ -0,0 +1,3 @@
|
|||||||
|
pub mod conflict;
|
||||||
|
pub mod sync;
|
||||||
|
pub mod vault_store;
|
||||||
252
crates/application/src/sync.rs
Normal file
252
crates/application/src/sync.rs
Normal file
@@ -0,0 +1,252 @@
|
|||||||
|
use anyhow::Result;
|
||||||
|
use sqlx::PgPool;
|
||||||
|
use uuid::Uuid;
|
||||||
|
|
||||||
|
use secrets_domain::{
|
||||||
|
SyncAcceptedChange, SyncConflict, SyncPullRequest, SyncPullResponse, SyncPushRequest,
|
||||||
|
SyncPushResponse, VaultObjectChange, VaultObjectEnvelope,
|
||||||
|
};
|
||||||
|
|
||||||
|
use crate::vault_store::{
|
||||||
|
get_object, list_objects_since, list_tombstones_since, max_server_revision,
|
||||||
|
};
|
||||||
|
|
||||||
|
fn detect_conflict(
|
||||||
|
change: &VaultObjectChange,
|
||||||
|
existing: Option<&VaultObjectEnvelope>,
|
||||||
|
) -> Option<SyncConflict> {
|
||||||
|
match (change.base_revision, existing) {
|
||||||
|
(Some(base_revision), Some(server_object)) if server_object.revision != base_revision => {
|
||||||
|
Some(SyncConflict {
|
||||||
|
change_id: change.change_id,
|
||||||
|
object_id: change.object_id,
|
||||||
|
reason: "revision_conflict".to_string(),
|
||||||
|
server_object: Some(server_object.clone()),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
_ if !matches!(change.operation.as_str(), "upsert" | "delete") => Some(SyncConflict {
|
||||||
|
change_id: change.change_id,
|
||||||
|
object_id: change.object_id,
|
||||||
|
reason: "unsupported_operation".to_string(),
|
||||||
|
server_object: existing.cloned(),
|
||||||
|
}),
|
||||||
|
_ => None,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn sync_pull(
|
||||||
|
pool: &PgPool,
|
||||||
|
user_id: Uuid,
|
||||||
|
request: SyncPullRequest,
|
||||||
|
) -> Result<SyncPullResponse> {
|
||||||
|
let cursor = request.cursor.unwrap_or(0).max(0);
|
||||||
|
let limit = request.limit.unwrap_or(200).clamp(1, 500);
|
||||||
|
let objects = list_objects_since(pool, user_id, cursor, limit).await?;
|
||||||
|
let tombstones = if request.include_deleted {
|
||||||
|
list_tombstones_since(pool, user_id, cursor, limit).await?
|
||||||
|
} else {
|
||||||
|
Vec::new()
|
||||||
|
};
|
||||||
|
let server_revision = max_server_revision(pool, user_id).await?;
|
||||||
|
let next_cursor = objects
|
||||||
|
.last()
|
||||||
|
.map(|object| object.revision)
|
||||||
|
.unwrap_or(cursor);
|
||||||
|
|
||||||
|
Ok(SyncPullResponse {
|
||||||
|
server_revision,
|
||||||
|
next_cursor,
|
||||||
|
has_more: (objects.len() as i64) >= limit,
|
||||||
|
objects,
|
||||||
|
tombstones,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn sync_push(
|
||||||
|
pool: &PgPool,
|
||||||
|
user_id: Uuid,
|
||||||
|
request: SyncPushRequest,
|
||||||
|
) -> Result<SyncPushResponse> {
|
||||||
|
let mut accepted = Vec::new();
|
||||||
|
let mut conflicts = Vec::new();
|
||||||
|
|
||||||
|
for change in request.changes {
|
||||||
|
let existing = get_object(pool, user_id, change.object_id).await?;
|
||||||
|
if let Some(conflict) = detect_conflict(&change, existing.as_ref()) {
|
||||||
|
conflicts.push(conflict);
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
let next_revision = existing
|
||||||
|
.as_ref()
|
||||||
|
.map(|object| object.revision + 1)
|
||||||
|
.unwrap_or(1);
|
||||||
|
let next_cipher_version = change.cipher_version.unwrap_or(1);
|
||||||
|
let next_ciphertext = change.ciphertext.clone().unwrap_or_default();
|
||||||
|
let next_content_hash = change.content_hash.clone().unwrap_or_default();
|
||||||
|
let next_deleted_at = if change.operation == "delete" {
|
||||||
|
Some(chrono::Utc::now())
|
||||||
|
} else {
|
||||||
|
None
|
||||||
|
};
|
||||||
|
|
||||||
|
match change.operation.as_str() {
|
||||||
|
"upsert" => {
|
||||||
|
sqlx::query(
|
||||||
|
r#"
|
||||||
|
INSERT INTO vault_objects (
|
||||||
|
object_id, user_id, object_kind, revision, cipher_version, ciphertext, content_hash, deleted_at, updated_at, created_by_device
|
||||||
|
)
|
||||||
|
VALUES ($1, $2, $3, $4, $5, $6, $7, NULL, NOW(), NULL)
|
||||||
|
ON CONFLICT (object_id)
|
||||||
|
DO UPDATE SET
|
||||||
|
revision = EXCLUDED.revision,
|
||||||
|
cipher_version = EXCLUDED.cipher_version,
|
||||||
|
ciphertext = EXCLUDED.ciphertext,
|
||||||
|
content_hash = EXCLUDED.content_hash,
|
||||||
|
deleted_at = NULL,
|
||||||
|
updated_at = NOW()
|
||||||
|
"#,
|
||||||
|
)
|
||||||
|
.bind(change.object_id)
|
||||||
|
.bind(user_id)
|
||||||
|
.bind(change.object_kind.as_str())
|
||||||
|
.bind(next_revision)
|
||||||
|
.bind(next_cipher_version)
|
||||||
|
.bind(next_ciphertext.clone())
|
||||||
|
.bind(next_content_hash.clone())
|
||||||
|
.execute(pool)
|
||||||
|
.await?;
|
||||||
|
}
|
||||||
|
"delete" => {
|
||||||
|
sqlx::query(
|
||||||
|
r#"
|
||||||
|
UPDATE vault_objects
|
||||||
|
SET revision = $1, deleted_at = NOW(), updated_at = NOW()
|
||||||
|
WHERE object_id = $2
|
||||||
|
AND user_id = $3
|
||||||
|
"#,
|
||||||
|
)
|
||||||
|
.bind(next_revision)
|
||||||
|
.bind(change.object_id)
|
||||||
|
.bind(user_id)
|
||||||
|
.execute(pool)
|
||||||
|
.await?;
|
||||||
|
}
|
||||||
|
_ => unreachable!("unsupported operations are filtered by detect_conflict"),
|
||||||
|
}
|
||||||
|
|
||||||
|
sqlx::query(
|
||||||
|
r#"
|
||||||
|
INSERT INTO vault_object_revisions (
|
||||||
|
object_id, user_id, revision, cipher_version, ciphertext, content_hash, deleted_at, created_at
|
||||||
|
)
|
||||||
|
VALUES ($1, $2, $3, $4, $5, $6, $7, NOW())
|
||||||
|
"#,
|
||||||
|
)
|
||||||
|
.bind(change.object_id)
|
||||||
|
.bind(user_id)
|
||||||
|
.bind(next_revision)
|
||||||
|
.bind(next_cipher_version)
|
||||||
|
.bind(next_ciphertext)
|
||||||
|
.bind(next_content_hash)
|
||||||
|
.bind(next_deleted_at)
|
||||||
|
.execute(pool)
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
accepted.push(SyncAcceptedChange {
|
||||||
|
change_id: change.change_id,
|
||||||
|
object_id: change.object_id,
|
||||||
|
revision: next_revision,
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
let server_revision = max_server_revision(pool, user_id).await?;
|
||||||
|
Ok(SyncPushResponse {
|
||||||
|
server_revision,
|
||||||
|
accepted,
|
||||||
|
conflicts,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn fetch_object(
|
||||||
|
pool: &PgPool,
|
||||||
|
user_id: Uuid,
|
||||||
|
object_id: Uuid,
|
||||||
|
) -> Result<Option<VaultObjectEnvelope>> {
|
||||||
|
get_object(pool, user_id, object_id).await
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
mod tests {
|
||||||
|
use super::*;
|
||||||
|
use chrono::Utc;
|
||||||
|
use secrets_domain::{VaultObjectChange, VaultObjectKind};
|
||||||
|
use uuid::Uuid;
|
||||||
|
|
||||||
|
fn sample_change(operation: &str, base_revision: Option<i64>) -> VaultObjectChange {
|
||||||
|
VaultObjectChange {
|
||||||
|
change_id: Uuid::nil(),
|
||||||
|
object_id: Uuid::max(),
|
||||||
|
object_kind: VaultObjectKind::Cipher,
|
||||||
|
operation: operation.to_string(),
|
||||||
|
base_revision,
|
||||||
|
cipher_version: Some(1),
|
||||||
|
ciphertext: Some(vec![1, 2, 3]),
|
||||||
|
content_hash: Some("sha256:test".to_string()),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn sample_object(revision: i64) -> VaultObjectEnvelope {
|
||||||
|
VaultObjectEnvelope {
|
||||||
|
object_id: Uuid::max(),
|
||||||
|
object_kind: VaultObjectKind::Cipher,
|
||||||
|
revision,
|
||||||
|
cipher_version: 1,
|
||||||
|
ciphertext: vec![9, 9, 9],
|
||||||
|
content_hash: "sha256:server".to_string(),
|
||||||
|
deleted_at: None,
|
||||||
|
updated_at: Utc::now(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn conflict_when_base_revision_is_stale() {
|
||||||
|
let mut change = sample_change("upsert", Some(3));
|
||||||
|
let server = sample_object(5);
|
||||||
|
change.object_id = server.object_id;
|
||||||
|
|
||||||
|
let conflict = detect_conflict(&change, Some(&server)).expect("expected conflict");
|
||||||
|
|
||||||
|
assert_eq!(conflict.reason, "revision_conflict");
|
||||||
|
assert_eq!(conflict.object_id, server.object_id);
|
||||||
|
assert_eq!(
|
||||||
|
conflict
|
||||||
|
.server_object
|
||||||
|
.as_ref()
|
||||||
|
.map(|object| object.revision),
|
||||||
|
Some(5)
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn no_conflict_when_revision_matches() {
|
||||||
|
let mut change = sample_change("upsert", Some(5));
|
||||||
|
let server = sample_object(5);
|
||||||
|
change.object_id = server.object_id;
|
||||||
|
|
||||||
|
let conflict = detect_conflict(&change, Some(&server));
|
||||||
|
|
||||||
|
assert!(conflict.is_none());
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn unsupported_operation_is_conflict() {
|
||||||
|
let change = sample_change("merge", None);
|
||||||
|
|
||||||
|
let conflict = detect_conflict(&change, None).expect("expected unsupported operation");
|
||||||
|
|
||||||
|
assert_eq!(conflict.reason, "unsupported_operation");
|
||||||
|
assert!(conflict.server_object.is_none());
|
||||||
|
}
|
||||||
|
}
|
||||||
147
crates/application/src/vault_store.rs
Normal file
147
crates/application/src/vault_store.rs
Normal file
@@ -0,0 +1,147 @@
|
|||||||
|
use anyhow::{Context, Result};
|
||||||
|
use chrono::{DateTime, Utc};
|
||||||
|
use sqlx::PgPool;
|
||||||
|
use uuid::Uuid;
|
||||||
|
|
||||||
|
use secrets_domain::{VaultObjectEnvelope, VaultObjectKind, VaultTombstone};
|
||||||
|
|
||||||
|
#[derive(Debug, sqlx::FromRow)]
|
||||||
|
struct VaultObjectRow {
|
||||||
|
object_id: Uuid,
|
||||||
|
_object_kind: String,
|
||||||
|
revision: i64,
|
||||||
|
cipher_version: i32,
|
||||||
|
ciphertext: Vec<u8>,
|
||||||
|
content_hash: String,
|
||||||
|
deleted_at: Option<DateTime<Utc>>,
|
||||||
|
updated_at: DateTime<Utc>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl From<VaultObjectRow> for VaultObjectEnvelope {
|
||||||
|
fn from(row: VaultObjectRow) -> Self {
|
||||||
|
Self {
|
||||||
|
object_id: row.object_id,
|
||||||
|
object_kind: VaultObjectKind::Cipher,
|
||||||
|
revision: row.revision,
|
||||||
|
cipher_version: row.cipher_version,
|
||||||
|
ciphertext: row.ciphertext,
|
||||||
|
content_hash: row.content_hash,
|
||||||
|
deleted_at: row.deleted_at,
|
||||||
|
updated_at: row.updated_at,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn list_objects_since(
|
||||||
|
pool: &PgPool,
|
||||||
|
user_id: Uuid,
|
||||||
|
cursor: i64,
|
||||||
|
limit: i64,
|
||||||
|
) -> Result<Vec<VaultObjectEnvelope>> {
|
||||||
|
let rows = sqlx::query_as::<_, VaultObjectRow>(
|
||||||
|
r#"
|
||||||
|
SELECT
|
||||||
|
object_id,
|
||||||
|
object_kind AS _object_kind,
|
||||||
|
revision,
|
||||||
|
cipher_version,
|
||||||
|
ciphertext,
|
||||||
|
content_hash,
|
||||||
|
deleted_at,
|
||||||
|
updated_at
|
||||||
|
FROM vault_objects
|
||||||
|
WHERE user_id = $1
|
||||||
|
AND revision > $2
|
||||||
|
ORDER BY revision ASC
|
||||||
|
LIMIT $3
|
||||||
|
"#,
|
||||||
|
)
|
||||||
|
.bind(user_id)
|
||||||
|
.bind(cursor)
|
||||||
|
.bind(limit.max(1))
|
||||||
|
.fetch_all(pool)
|
||||||
|
.await
|
||||||
|
.context("failed to list vault objects")?;
|
||||||
|
|
||||||
|
Ok(rows.into_iter().map(Into::into).collect())
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn get_object(
|
||||||
|
pool: &PgPool,
|
||||||
|
user_id: Uuid,
|
||||||
|
object_id: Uuid,
|
||||||
|
) -> Result<Option<VaultObjectEnvelope>> {
|
||||||
|
let row = sqlx::query_as::<_, VaultObjectRow>(
|
||||||
|
r#"
|
||||||
|
SELECT
|
||||||
|
object_id,
|
||||||
|
object_kind AS _object_kind,
|
||||||
|
revision,
|
||||||
|
cipher_version,
|
||||||
|
ciphertext,
|
||||||
|
content_hash,
|
||||||
|
deleted_at,
|
||||||
|
updated_at
|
||||||
|
FROM vault_objects
|
||||||
|
WHERE user_id = $1
|
||||||
|
AND object_id = $2
|
||||||
|
"#,
|
||||||
|
)
|
||||||
|
.bind(user_id)
|
||||||
|
.bind(object_id)
|
||||||
|
.fetch_optional(pool)
|
||||||
|
.await
|
||||||
|
.context("failed to load vault object")?;
|
||||||
|
|
||||||
|
Ok(row.map(Into::into))
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn list_tombstones_since(
|
||||||
|
pool: &PgPool,
|
||||||
|
user_id: Uuid,
|
||||||
|
cursor: i64,
|
||||||
|
limit: i64,
|
||||||
|
) -> Result<Vec<VaultTombstone>> {
|
||||||
|
let rows = sqlx::query_as::<_, (Uuid, i64, DateTime<Utc>)>(
|
||||||
|
r#"
|
||||||
|
SELECT object_id, revision, deleted_at
|
||||||
|
FROM vault_objects
|
||||||
|
WHERE user_id = $1
|
||||||
|
AND revision > $2
|
||||||
|
AND deleted_at IS NOT NULL
|
||||||
|
ORDER BY revision ASC
|
||||||
|
LIMIT $3
|
||||||
|
"#,
|
||||||
|
)
|
||||||
|
.bind(user_id)
|
||||||
|
.bind(cursor)
|
||||||
|
.bind(limit.max(1))
|
||||||
|
.fetch_all(pool)
|
||||||
|
.await
|
||||||
|
.context("failed to list tombstones")?;
|
||||||
|
|
||||||
|
Ok(rows
|
||||||
|
.into_iter()
|
||||||
|
.map(|(object_id, revision, deleted_at)| VaultTombstone {
|
||||||
|
object_id,
|
||||||
|
revision,
|
||||||
|
deleted_at,
|
||||||
|
})
|
||||||
|
.collect())
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn max_server_revision(pool: &PgPool, user_id: Uuid) -> Result<i64> {
|
||||||
|
let revision = sqlx::query_scalar::<_, Option<i64>>(
|
||||||
|
r#"
|
||||||
|
SELECT MAX(revision)
|
||||||
|
FROM vault_objects
|
||||||
|
WHERE user_id = $1
|
||||||
|
"#,
|
||||||
|
)
|
||||||
|
.bind(user_id)
|
||||||
|
.fetch_one(pool)
|
||||||
|
.await
|
||||||
|
.context("failed to load max server revision")?;
|
||||||
|
|
||||||
|
Ok(revision.unwrap_or(0))
|
||||||
|
}
|
||||||
13
crates/client-integrations/Cargo.toml
Normal file
13
crates/client-integrations/Cargo.toml
Normal file
@@ -0,0 +1,13 @@
|
|||||||
|
[package]
|
||||||
|
name = "secrets-client-integrations"
|
||||||
|
version = "0.1.0"
|
||||||
|
edition.workspace = true
|
||||||
|
|
||||||
|
[lib]
|
||||||
|
name = "secrets_client_integrations"
|
||||||
|
path = "src/lib.rs"
|
||||||
|
|
||||||
|
[dependencies]
|
||||||
|
anyhow.workspace = true
|
||||||
|
serde.workspace = true
|
||||||
|
serde_json.workspace = true
|
||||||
162
crates/client-integrations/src/lib.rs
Normal file
162
crates/client-integrations/src/lib.rs
Normal file
@@ -0,0 +1,162 @@
|
|||||||
|
use anyhow::{Context, Result};
|
||||||
|
use serde_json::{Map, Value};
|
||||||
|
use std::{
|
||||||
|
fs,
|
||||||
|
path::{Path, PathBuf},
|
||||||
|
};
|
||||||
|
|
||||||
|
pub trait ClientAdapter {
|
||||||
|
fn client_name(&self) -> &'static str;
|
||||||
|
fn config_path(&self) -> PathBuf;
|
||||||
|
}
|
||||||
|
|
||||||
|
pub struct CursorAdapter;
|
||||||
|
|
||||||
|
impl ClientAdapter for CursorAdapter {
|
||||||
|
fn client_name(&self) -> &'static str {
|
||||||
|
"cursor"
|
||||||
|
}
|
||||||
|
|
||||||
|
fn config_path(&self) -> PathBuf {
|
||||||
|
default_home().join(".cursor").join("mcp.json")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub struct ClaudeCodeAdapter;
|
||||||
|
|
||||||
|
impl ClientAdapter for ClaudeCodeAdapter {
|
||||||
|
fn client_name(&self) -> &'static str {
|
||||||
|
"claude-code"
|
||||||
|
}
|
||||||
|
|
||||||
|
fn config_path(&self) -> PathBuf {
|
||||||
|
default_home().join(".claude").join("mcp.json")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn default_home() -> PathBuf {
|
||||||
|
std::env::var_os("HOME")
|
||||||
|
.or_else(|| std::env::var_os("USERPROFILE"))
|
||||||
|
.map(PathBuf::from)
|
||||||
|
.unwrap_or_else(|| PathBuf::from("."))
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn has_managed_server(adapter: &dyn ClientAdapter, server_name: &str) -> Result<bool> {
|
||||||
|
let path = adapter.config_path();
|
||||||
|
let root = read_config_or_default(&path)?;
|
||||||
|
Ok(root
|
||||||
|
.get("mcpServers")
|
||||||
|
.and_then(Value::as_object)
|
||||||
|
.is_some_and(|servers| servers.contains_key(server_name)))
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn upsert_managed_server(
|
||||||
|
adapter: &dyn ClientAdapter,
|
||||||
|
server_name: &str,
|
||||||
|
server_config: Value,
|
||||||
|
) -> Result<()> {
|
||||||
|
let path = adapter.config_path();
|
||||||
|
let mut root = read_config_or_default(&path)?;
|
||||||
|
let root_object = ensure_object(&mut root);
|
||||||
|
let mcp_servers = root_object
|
||||||
|
.entry("mcpServers".to_string())
|
||||||
|
.or_insert_with(|| Value::Object(Map::new()));
|
||||||
|
let servers_object = ensure_object(mcp_servers);
|
||||||
|
servers_object.insert(server_name.to_string(), server_config);
|
||||||
|
write_config_atomically(&path, &root)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn read_config_or_default(path: &Path) -> Result<Value> {
|
||||||
|
if !path.exists() {
|
||||||
|
return Ok(Value::Object(Map::new()));
|
||||||
|
}
|
||||||
|
let raw =
|
||||||
|
fs::read_to_string(path).with_context(|| format!("failed to read {}", path.display()))?;
|
||||||
|
serde_json::from_str(&raw).with_context(|| format!("failed to parse {}", path.display()))
|
||||||
|
}
|
||||||
|
|
||||||
|
fn write_config_atomically(path: &Path, value: &Value) -> Result<()> {
|
||||||
|
if let Some(parent) = path.parent() {
|
||||||
|
fs::create_dir_all(parent)
|
||||||
|
.with_context(|| format!("failed to create {}", parent.display()))?;
|
||||||
|
}
|
||||||
|
let tmp_path = path.with_extension("json.tmp");
|
||||||
|
let body = serde_json::to_string_pretty(value).context("failed to serialize mcp config")?;
|
||||||
|
fs::write(&tmp_path, body)
|
||||||
|
.with_context(|| format!("failed to write {}", tmp_path.display()))?;
|
||||||
|
fs::rename(&tmp_path, path).with_context(|| format!("failed to replace {}", path.display()))?;
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
fn ensure_object(value: &mut Value) -> &mut Map<String, Value> {
|
||||||
|
if !value.is_object() {
|
||||||
|
*value = Value::Object(Map::new());
|
||||||
|
}
|
||||||
|
value.as_object_mut().expect("object just ensured")
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
mod tests {
|
||||||
|
use super::*;
|
||||||
|
use std::time::{SystemTime, UNIX_EPOCH};
|
||||||
|
|
||||||
|
struct TestAdapter {
|
||||||
|
path: PathBuf,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl ClientAdapter for TestAdapter {
|
||||||
|
fn client_name(&self) -> &'static str {
|
||||||
|
"test"
|
||||||
|
}
|
||||||
|
|
||||||
|
fn config_path(&self) -> PathBuf {
|
||||||
|
self.path.clone()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn upsert_preserves_other_servers() {
|
||||||
|
let unique = SystemTime::now()
|
||||||
|
.duration_since(UNIX_EPOCH)
|
||||||
|
.expect("clock")
|
||||||
|
.as_nanos();
|
||||||
|
let base = std::env::temp_dir().join(format!("secrets-client-integrations-{unique}"));
|
||||||
|
let adapter = TestAdapter {
|
||||||
|
path: base.join("mcp.json"),
|
||||||
|
};
|
||||||
|
fs::create_dir_all(adapter.path.parent().expect("parent")).expect("mkdir");
|
||||||
|
fs::write(
|
||||||
|
&adapter.path,
|
||||||
|
r#"{"mcpServers":{"postgres":{"command":"npx"},"secrets":{"url":"http://old"}}}"#,
|
||||||
|
)
|
||||||
|
.expect("seed config");
|
||||||
|
|
||||||
|
upsert_managed_server(
|
||||||
|
&adapter,
|
||||||
|
"secrets",
|
||||||
|
serde_json::json!({
|
||||||
|
"url": "http://127.0.0.1:9515/mcp"
|
||||||
|
}),
|
||||||
|
)
|
||||||
|
.expect("upsert config");
|
||||||
|
|
||||||
|
let root: Value =
|
||||||
|
serde_json::from_str(&fs::read_to_string(&adapter.path).expect("read back"))
|
||||||
|
.expect("parse back");
|
||||||
|
let servers = root
|
||||||
|
.get("mcpServers")
|
||||||
|
.and_then(Value::as_object)
|
||||||
|
.expect("mcpServers object");
|
||||||
|
assert!(servers.contains_key("postgres"));
|
||||||
|
assert_eq!(
|
||||||
|
servers
|
||||||
|
.get("secrets")
|
||||||
|
.and_then(Value::as_object)
|
||||||
|
.and_then(|value| value.get("url"))
|
||||||
|
.and_then(Value::as_str),
|
||||||
|
Some("http://127.0.0.1:9515/mcp")
|
||||||
|
);
|
||||||
|
|
||||||
|
let _ = fs::remove_dir_all(base);
|
||||||
|
}
|
||||||
|
}
|
||||||
14
crates/crypto/Cargo.toml
Normal file
14
crates/crypto/Cargo.toml
Normal file
@@ -0,0 +1,14 @@
|
|||||||
|
[package]
|
||||||
|
name = "secrets-crypto"
|
||||||
|
version = "0.1.0"
|
||||||
|
edition.workspace = true
|
||||||
|
|
||||||
|
[lib]
|
||||||
|
name = "secrets_crypto"
|
||||||
|
path = "src/lib.rs"
|
||||||
|
|
||||||
|
[dependencies]
|
||||||
|
aes-gcm.workspace = true
|
||||||
|
anyhow.workspace = true
|
||||||
|
hex.workspace = true
|
||||||
|
rand.workspace = true
|
||||||
47
crates/crypto/src/lib.rs
Normal file
47
crates/crypto/src/lib.rs
Normal file
@@ -0,0 +1,47 @@
|
|||||||
|
use aes_gcm::aead::{Aead, KeyInit};
|
||||||
|
use aes_gcm::{Aes256Gcm, Nonce};
|
||||||
|
use anyhow::{Context, Result};
|
||||||
|
use rand::Rng;
|
||||||
|
|
||||||
|
pub const KEY_CHECK_PLAINTEXT: &[u8] = b"secrets-v3-key-check";
|
||||||
|
|
||||||
|
pub fn decode_hex(input: &str) -> Result<Vec<u8>> {
|
||||||
|
hex::decode(input.trim()).context("invalid hex")
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn encode_hex(input: &[u8]) -> String {
|
||||||
|
hex::encode(input)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn extract_key_32(input: &str) -> Result<[u8; 32]> {
|
||||||
|
let bytes = decode_hex(input)?;
|
||||||
|
let key: [u8; 32] = bytes
|
||||||
|
.try_into()
|
||||||
|
.map_err(|_| anyhow::anyhow!("expected 32-byte key"))?;
|
||||||
|
Ok(key)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn encrypt(key: &[u8; 32], plaintext: &[u8]) -> Result<Vec<u8>> {
|
||||||
|
let cipher = Aes256Gcm::new_from_slice(key).context("invalid AES-256 key")?;
|
||||||
|
let mut nonce_bytes = [0_u8; 12];
|
||||||
|
rand::rng().fill_bytes(&mut nonce_bytes);
|
||||||
|
let nonce = Nonce::from_slice(&nonce_bytes);
|
||||||
|
let mut out = nonce_bytes.to_vec();
|
||||||
|
out.extend(
|
||||||
|
cipher
|
||||||
|
.encrypt(nonce, plaintext)
|
||||||
|
.map_err(|_| anyhow::anyhow!("encryption failed"))?,
|
||||||
|
);
|
||||||
|
Ok(out)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn decrypt(key: &[u8; 32], ciphertext: &[u8]) -> Result<Vec<u8>> {
|
||||||
|
if ciphertext.len() < 12 {
|
||||||
|
anyhow::bail!("ciphertext too short");
|
||||||
|
}
|
||||||
|
let cipher = Aes256Gcm::new_from_slice(key).context("invalid AES-256 key")?;
|
||||||
|
let (nonce, body) = ciphertext.split_at(12);
|
||||||
|
cipher
|
||||||
|
.decrypt(Nonce::from_slice(nonce), body)
|
||||||
|
.map_err(|_| anyhow::anyhow!("decryption failed"))
|
||||||
|
}
|
||||||
26
crates/desktop-daemon/Cargo.toml
Normal file
26
crates/desktop-daemon/Cargo.toml
Normal file
@@ -0,0 +1,26 @@
|
|||||||
|
[package]
|
||||||
|
name = "secrets-desktop-daemon"
|
||||||
|
version = "0.1.0"
|
||||||
|
edition.workspace = true
|
||||||
|
|
||||||
|
[lib]
|
||||||
|
name = "secrets_desktop_daemon"
|
||||||
|
path = "src/lib.rs"
|
||||||
|
|
||||||
|
[[bin]]
|
||||||
|
name = "secrets-desktop-daemon"
|
||||||
|
path = "src/main.rs"
|
||||||
|
|
||||||
|
[dependencies]
|
||||||
|
anyhow.workspace = true
|
||||||
|
axum.workspace = true
|
||||||
|
dotenvy.workspace = true
|
||||||
|
reqwest = { workspace = true, features = ["stream"] }
|
||||||
|
rmcp.workspace = true
|
||||||
|
serde.workspace = true
|
||||||
|
serde_json.workspace = true
|
||||||
|
tokio.workspace = true
|
||||||
|
tracing.workspace = true
|
||||||
|
tracing-subscriber.workspace = true
|
||||||
|
|
||||||
|
secrets-device-auth = { path = "../device-auth" }
|
||||||
23
crates/desktop-daemon/src/config.rs
Normal file
23
crates/desktop-daemon/src/config.rs
Normal file
@@ -0,0 +1,23 @@
|
|||||||
|
use anyhow::Result;
|
||||||
|
|
||||||
|
#[derive(Debug, Clone)]
|
||||||
|
pub struct DaemonConfig {
|
||||||
|
pub bind: String,
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn load_config() -> Result<DaemonConfig> {
|
||||||
|
let bind =
|
||||||
|
std::env::var("SECRETS_DAEMON_BIND").unwrap_or_else(|_| "127.0.0.1:9515".to_string());
|
||||||
|
if bind.trim().is_empty() {
|
||||||
|
anyhow::bail!("SECRETS_DAEMON_BIND must not be empty");
|
||||||
|
}
|
||||||
|
Ok(DaemonConfig { bind })
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn load_persisted_device_token() -> Result<Option<String>> {
|
||||||
|
let token = std::env::var("SECRETS_DEVICE_LOGIN_TOKEN")
|
||||||
|
.ok()
|
||||||
|
.map(|value| value.trim().to_string())
|
||||||
|
.filter(|value| !value.is_empty());
|
||||||
|
Ok(token)
|
||||||
|
}
|
||||||
139
crates/desktop-daemon/src/exec.rs
Normal file
139
crates/desktop-daemon/src/exec.rs
Normal file
@@ -0,0 +1,139 @@
|
|||||||
|
use std::collections::BTreeMap;
|
||||||
|
use std::time::Duration;
|
||||||
|
|
||||||
|
use anyhow::{Context, Result, anyhow};
|
||||||
|
use serde::{Deserialize, Serialize};
|
||||||
|
use serde_json::{Map, Value};
|
||||||
|
use tokio::process::Command;
|
||||||
|
|
||||||
|
use crate::target::{ExecutionTarget, ResolvedTarget};
|
||||||
|
|
||||||
|
const MAX_OUTPUT_CHARS: usize = 64 * 1024;
|
||||||
|
|
||||||
|
#[derive(Clone, Debug, Deserialize)]
|
||||||
|
pub struct TargetExecInput {
|
||||||
|
pub target_ref: Option<String>,
|
||||||
|
pub command: String,
|
||||||
|
pub timeout_secs: Option<u64>,
|
||||||
|
pub working_dir: Option<String>,
|
||||||
|
pub env_overrides: Option<Map<String, Value>>,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Clone, Debug, Serialize)]
|
||||||
|
pub struct ExecResult {
|
||||||
|
pub resolved_target: ResolvedTarget,
|
||||||
|
pub resolved_env_keys: Vec<String>,
|
||||||
|
pub command: String,
|
||||||
|
pub exit_code: Option<i32>,
|
||||||
|
pub stdout: String,
|
||||||
|
pub stderr: String,
|
||||||
|
pub timed_out: bool,
|
||||||
|
pub duration_ms: u128,
|
||||||
|
pub stdout_truncated: bool,
|
||||||
|
pub stderr_truncated: bool,
|
||||||
|
}
|
||||||
|
|
||||||
|
fn truncate_output(text: String) -> (String, bool) {
|
||||||
|
if text.chars().count() <= MAX_OUTPUT_CHARS {
|
||||||
|
return (text, false);
|
||||||
|
}
|
||||||
|
let truncated = text.chars().take(MAX_OUTPUT_CHARS).collect::<String>();
|
||||||
|
(truncated, true)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn stringify_env_override(value: &Value) -> Option<String> {
|
||||||
|
match value {
|
||||||
|
Value::Null => None,
|
||||||
|
Value::String(s) => Some(s.clone()),
|
||||||
|
Value::Bool(v) => Some(v.to_string()),
|
||||||
|
Value::Number(v) => Some(v.to_string()),
|
||||||
|
other => serde_json::to_string(other).ok(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn apply_env_overrides(
|
||||||
|
env: &mut BTreeMap<String, String>,
|
||||||
|
overrides: Option<&Map<String, Value>>,
|
||||||
|
) -> Result<()> {
|
||||||
|
let Some(overrides) = overrides else {
|
||||||
|
return Ok(());
|
||||||
|
};
|
||||||
|
for (key, value) in overrides {
|
||||||
|
if key.is_empty() || key.contains('=') {
|
||||||
|
return Err(anyhow!("invalid env override key: {key}"));
|
||||||
|
}
|
||||||
|
if key.starts_with("TARGET_") {
|
||||||
|
return Err(anyhow!(
|
||||||
|
"env override `{key}` cannot override reserved TARGET_* variables"
|
||||||
|
));
|
||||||
|
}
|
||||||
|
if let Some(value) = stringify_env_override(value) {
|
||||||
|
env.insert(key.clone(), value);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn execute_command(
|
||||||
|
input: &TargetExecInput,
|
||||||
|
target: &ExecutionTarget,
|
||||||
|
timeout_secs: u64,
|
||||||
|
) -> Result<ExecResult> {
|
||||||
|
let mut env = target.env.clone();
|
||||||
|
apply_env_overrides(&mut env, input.env_overrides.as_ref())?;
|
||||||
|
|
||||||
|
let started = std::time::Instant::now();
|
||||||
|
let mut command = Command::new("/bin/sh");
|
||||||
|
command
|
||||||
|
.arg("-lc")
|
||||||
|
.arg(&input.command)
|
||||||
|
.kill_on_drop(true)
|
||||||
|
.stdout(std::process::Stdio::piped())
|
||||||
|
.stderr(std::process::Stdio::piped());
|
||||||
|
|
||||||
|
if let Some(dir) = input.working_dir.as_ref().filter(|dir| !dir.is_empty()) {
|
||||||
|
command.current_dir(dir);
|
||||||
|
}
|
||||||
|
for (key, value) in &env {
|
||||||
|
command.env(key, value);
|
||||||
|
}
|
||||||
|
|
||||||
|
let child = command
|
||||||
|
.spawn()
|
||||||
|
.with_context(|| format!("failed to spawn command: {}", input.command))?;
|
||||||
|
|
||||||
|
let timed = tokio::time::timeout(
|
||||||
|
Duration::from_secs(timeout_secs.clamp(1, 86400)),
|
||||||
|
child.wait_with_output(),
|
||||||
|
)
|
||||||
|
.await;
|
||||||
|
|
||||||
|
let (exit_code, stdout, stderr, timed_out) = match timed {
|
||||||
|
Ok(output) => {
|
||||||
|
let output = output.context("failed waiting for command output")?;
|
||||||
|
(
|
||||||
|
output.status.code(),
|
||||||
|
String::from_utf8_lossy(&output.stdout).to_string(),
|
||||||
|
String::from_utf8_lossy(&output.stderr).to_string(),
|
||||||
|
false,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
Err(_) => (None, String::new(), "command timed out".to_string(), true),
|
||||||
|
};
|
||||||
|
|
||||||
|
let (stdout, stdout_truncated) = truncate_output(stdout);
|
||||||
|
let (stderr, stderr_truncated) = truncate_output(stderr);
|
||||||
|
|
||||||
|
Ok(ExecResult {
|
||||||
|
resolved_target: target.resolved.clone(),
|
||||||
|
resolved_env_keys: target.resolved_env_keys(),
|
||||||
|
command: input.command.clone(),
|
||||||
|
exit_code,
|
||||||
|
stdout,
|
||||||
|
stderr,
|
||||||
|
timed_out,
|
||||||
|
duration_ms: started.elapsed().as_millis(),
|
||||||
|
stdout_truncated,
|
||||||
|
stderr_truncated,
|
||||||
|
})
|
||||||
|
}
|
||||||
642
crates/desktop-daemon/src/lib.rs
Normal file
642
crates/desktop-daemon/src/lib.rs
Normal file
@@ -0,0 +1,642 @@
|
|||||||
|
pub mod config;
|
||||||
|
pub mod exec;
|
||||||
|
pub mod target;
|
||||||
|
pub mod vault_client;
|
||||||
|
|
||||||
|
use std::collections::HashMap;
|
||||||
|
|
||||||
|
use anyhow::{Context, Result, anyhow};
|
||||||
|
use axum::{
|
||||||
|
Router,
|
||||||
|
body::Body,
|
||||||
|
extract::State,
|
||||||
|
http::{StatusCode, header},
|
||||||
|
response::Response,
|
||||||
|
routing::{any, get},
|
||||||
|
};
|
||||||
|
use serde::Deserialize;
|
||||||
|
use serde_json::{Value, json};
|
||||||
|
|
||||||
|
use crate::{
|
||||||
|
exec::{TargetExecInput, execute_command},
|
||||||
|
target::{TargetSnapshot, build_execution_target},
|
||||||
|
vault_client::{
|
||||||
|
EntryDetail, EntrySummary, SecretHistoryItem, SecretValueField, authorized_get,
|
||||||
|
authorized_patch, authorized_post, entry_detail_payload, fetch_entry_detail,
|
||||||
|
fetch_revealed_entry_secrets,
|
||||||
|
},
|
||||||
|
};
|
||||||
|
|
||||||
|
#[derive(Clone)]
|
||||||
|
pub struct AppState {
|
||||||
|
session_base: String,
|
||||||
|
client: reqwest::Client,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Deserialize)]
|
||||||
|
struct JsonRpcRequest {
|
||||||
|
#[serde(default)]
|
||||||
|
id: Value,
|
||||||
|
method: String,
|
||||||
|
#[serde(default)]
|
||||||
|
params: Value,
|
||||||
|
}
|
||||||
|
|
||||||
|
fn json_response(status: StatusCode, value: Value) -> Response {
|
||||||
|
Response::builder()
|
||||||
|
.status(status)
|
||||||
|
.header(header::CONTENT_TYPE, "application/json; charset=utf-8")
|
||||||
|
.body(Body::from(value.to_string()))
|
||||||
|
.expect("build response")
|
||||||
|
}
|
||||||
|
|
||||||
|
fn jsonrpc_result_response(id: Value, result: Value) -> Response {
|
||||||
|
json_response(
|
||||||
|
StatusCode::OK,
|
||||||
|
json!({
|
||||||
|
"jsonrpc": "2.0",
|
||||||
|
"id": id,
|
||||||
|
"result": result,
|
||||||
|
}),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn tool_success_response(id: Value, value: Value) -> Response {
|
||||||
|
let pretty = serde_json::to_string_pretty(&value).unwrap_or_else(|_| value.to_string());
|
||||||
|
jsonrpc_result_response(
|
||||||
|
id,
|
||||||
|
json!({
|
||||||
|
"content": [
|
||||||
|
{
|
||||||
|
"type": "text",
|
||||||
|
"text": pretty
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"isError": false
|
||||||
|
}),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn tool_error_response(id: Value, message: impl Into<String>) -> Response {
|
||||||
|
jsonrpc_result_response(
|
||||||
|
id,
|
||||||
|
json!({
|
||||||
|
"content": [
|
||||||
|
{
|
||||||
|
"type": "text",
|
||||||
|
"text": message.into()
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"isError": true
|
||||||
|
}),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn initialize_response(id: Value) -> Response {
|
||||||
|
let session_id = format!(
|
||||||
|
"desktop-daemon-{}",
|
||||||
|
std::time::SystemTime::now()
|
||||||
|
.duration_since(std::time::UNIX_EPOCH)
|
||||||
|
.map(|duration| duration.as_nanos())
|
||||||
|
.unwrap_or(0)
|
||||||
|
);
|
||||||
|
let payload = json!({
|
||||||
|
"jsonrpc": "2.0",
|
||||||
|
"id": id,
|
||||||
|
"result": {
|
||||||
|
"protocolVersion": "2025-06-18",
|
||||||
|
"capabilities": {
|
||||||
|
"tools": {}
|
||||||
|
},
|
||||||
|
"serverInfo": {
|
||||||
|
"name": "secrets-desktop-daemon",
|
||||||
|
"version": env!("CARGO_PKG_VERSION"),
|
||||||
|
"title": "Secrets Desktop Daemon"
|
||||||
|
},
|
||||||
|
"instructions": "Preferred tools: secrets_entry_find, secrets_entry_get, secrets_entry_add, secrets_entry_update, secrets_entry_delete, secrets_entry_restore, secrets_secret_add, secrets_secret_update, secrets_secret_delete, secrets_secret_history, secrets_secret_rollback, and target_exec. All data is resolved from the desktop app's unlocked local vault session."
|
||||||
|
}
|
||||||
|
});
|
||||||
|
Response::builder()
|
||||||
|
.status(StatusCode::OK)
|
||||||
|
.header(header::CONTENT_TYPE, "application/json; charset=utf-8")
|
||||||
|
.header("mcp-session-id", session_id)
|
||||||
|
.body(Body::from(payload.to_string()))
|
||||||
|
.expect("build response")
|
||||||
|
}
|
||||||
|
|
||||||
|
fn tool_definitions() -> Vec<Value> {
|
||||||
|
vec![
|
||||||
|
json!({
|
||||||
|
"name": "secrets_entry_find",
|
||||||
|
"description": "Find entries from the user's secrets vault.",
|
||||||
|
"inputSchema": {
|
||||||
|
"type": "object",
|
||||||
|
"properties": {
|
||||||
|
"query": { "type": ["string", "null"] },
|
||||||
|
"folder": { "type": ["string", "null"] },
|
||||||
|
"type": { "type": ["string", "null"] }
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}),
|
||||||
|
json!({
|
||||||
|
"name": "secrets_entry_get",
|
||||||
|
"description": "Get one entry from the unlocked local vault by entry id.",
|
||||||
|
"inputSchema": {
|
||||||
|
"type": "object",
|
||||||
|
"properties": {
|
||||||
|
"id": { "type": "string" }
|
||||||
|
},
|
||||||
|
"required": ["id"]
|
||||||
|
}
|
||||||
|
}),
|
||||||
|
json!({
|
||||||
|
"name": "secrets_entry_add",
|
||||||
|
"description": "Create a new entry and optionally include initial secrets.",
|
||||||
|
"inputSchema": {
|
||||||
|
"type": "object",
|
||||||
|
"properties": {
|
||||||
|
"folder": { "type": "string" },
|
||||||
|
"name": { "type": "string" },
|
||||||
|
"type": { "type": ["string", "null"] },
|
||||||
|
"metadata": { "type": ["object", "null"] },
|
||||||
|
"secrets": {
|
||||||
|
"type": ["array", "null"],
|
||||||
|
"items": {
|
||||||
|
"type": "object",
|
||||||
|
"properties": {
|
||||||
|
"name": { "type": "string" },
|
||||||
|
"secret_type": { "type": ["string", "null"] },
|
||||||
|
"value": { "type": "string" }
|
||||||
|
},
|
||||||
|
"required": ["name", "value"]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"required": ["folder", "name"]
|
||||||
|
}
|
||||||
|
}),
|
||||||
|
json!({
|
||||||
|
"name": "secrets_entry_update",
|
||||||
|
"description": "Update an existing entry by id.",
|
||||||
|
"inputSchema": {
|
||||||
|
"type": "object",
|
||||||
|
"properties": {
|
||||||
|
"id": { "type": "string" },
|
||||||
|
"folder": { "type": ["string", "null"] },
|
||||||
|
"name": { "type": ["string", "null"] },
|
||||||
|
"type": { "type": ["string", "null"] },
|
||||||
|
"metadata": { "type": ["object", "null"] }
|
||||||
|
},
|
||||||
|
"required": ["id"]
|
||||||
|
}
|
||||||
|
}),
|
||||||
|
json!({
|
||||||
|
"name": "secrets_entry_delete",
|
||||||
|
"description": "Move an entry into recycle bin by id.",
|
||||||
|
"inputSchema": {
|
||||||
|
"type": "object",
|
||||||
|
"properties": {
|
||||||
|
"id": { "type": "string" }
|
||||||
|
},
|
||||||
|
"required": ["id"]
|
||||||
|
}
|
||||||
|
}),
|
||||||
|
json!({
|
||||||
|
"name": "secrets_entry_restore",
|
||||||
|
"description": "Restore a deleted entry from recycle bin by id.",
|
||||||
|
"inputSchema": {
|
||||||
|
"type": "object",
|
||||||
|
"properties": {
|
||||||
|
"id": { "type": "string" }
|
||||||
|
},
|
||||||
|
"required": ["id"]
|
||||||
|
}
|
||||||
|
}),
|
||||||
|
json!({
|
||||||
|
"name": "secrets_secret_add",
|
||||||
|
"description": "Create one secret under an existing entry.",
|
||||||
|
"inputSchema": {
|
||||||
|
"type": "object",
|
||||||
|
"properties": {
|
||||||
|
"entry_id": { "type": "string" },
|
||||||
|
"name": { "type": "string" },
|
||||||
|
"secret_type": { "type": ["string", "null"] },
|
||||||
|
"value": { "type": "string" }
|
||||||
|
},
|
||||||
|
"required": ["entry_id", "name", "value"]
|
||||||
|
}
|
||||||
|
}),
|
||||||
|
json!({
|
||||||
|
"name": "secrets_secret_update",
|
||||||
|
"description": "Update one secret by id.",
|
||||||
|
"inputSchema": {
|
||||||
|
"type": "object",
|
||||||
|
"properties": {
|
||||||
|
"id": { "type": "string" },
|
||||||
|
"name": { "type": ["string", "null"] },
|
||||||
|
"secret_type": { "type": ["string", "null"] },
|
||||||
|
"value": { "type": ["string", "null"] }
|
||||||
|
},
|
||||||
|
"required": ["id"]
|
||||||
|
}
|
||||||
|
}),
|
||||||
|
json!({
|
||||||
|
"name": "secrets_secret_delete",
|
||||||
|
"description": "Delete one secret by id.",
|
||||||
|
"inputSchema": {
|
||||||
|
"type": "object",
|
||||||
|
"properties": {
|
||||||
|
"id": { "type": "string" }
|
||||||
|
},
|
||||||
|
"required": ["id"]
|
||||||
|
}
|
||||||
|
}),
|
||||||
|
json!({
|
||||||
|
"name": "secrets_secret_history",
|
||||||
|
"description": "List history snapshots for one secret by id.",
|
||||||
|
"inputSchema": {
|
||||||
|
"type": "object",
|
||||||
|
"properties": {
|
||||||
|
"id": { "type": "string" }
|
||||||
|
},
|
||||||
|
"required": ["id"]
|
||||||
|
}
|
||||||
|
}),
|
||||||
|
json!({
|
||||||
|
"name": "secrets_secret_rollback",
|
||||||
|
"description": "Rollback one secret by id to a previous version or history id.",
|
||||||
|
"inputSchema": {
|
||||||
|
"type": "object",
|
||||||
|
"properties": {
|
||||||
|
"id": { "type": "string" },
|
||||||
|
"version": { "type": ["integer", "null"] },
|
||||||
|
"history_id": { "type": ["integer", "null"] }
|
||||||
|
},
|
||||||
|
"required": ["id"]
|
||||||
|
}
|
||||||
|
}),
|
||||||
|
json!({
|
||||||
|
"name": "target_exec",
|
||||||
|
"description": "Execute a local shell command with resolved TARGET_* environment variables from one entry.",
|
||||||
|
"inputSchema": {
|
||||||
|
"type": "object",
|
||||||
|
"properties": {
|
||||||
|
"target_ref": { "type": ["string", "null"] },
|
||||||
|
"command": { "type": "string" },
|
||||||
|
"timeout_secs": { "type": ["integer", "null"] },
|
||||||
|
"working_dir": { "type": ["string", "null"] },
|
||||||
|
"env_overrides": { "type": ["object", "null"] }
|
||||||
|
},
|
||||||
|
"required": ["target_ref", "command"]
|
||||||
|
}
|
||||||
|
}),
|
||||||
|
]
|
||||||
|
}
|
||||||
|
|
||||||
|
fn entry_detail_to_snapshot(detail: &EntryDetail) -> TargetSnapshot {
|
||||||
|
let metadata = detail
|
||||||
|
.metadata
|
||||||
|
.iter()
|
||||||
|
.map(|field| (field.label.clone(), Value::String(field.value.clone())))
|
||||||
|
.collect();
|
||||||
|
let secret_fields = detail
|
||||||
|
.secrets
|
||||||
|
.iter()
|
||||||
|
.map(|secret| crate::target::SecretFieldRef {
|
||||||
|
name: secret.name.clone(),
|
||||||
|
secret_type: Some(secret.secret_type.clone()),
|
||||||
|
})
|
||||||
|
.collect();
|
||||||
|
TargetSnapshot {
|
||||||
|
id: detail.id.clone(),
|
||||||
|
folder: detail.folder.clone(),
|
||||||
|
name: detail.name.clone(),
|
||||||
|
entry_type: Some(detail.cipher_type.clone()),
|
||||||
|
metadata,
|
||||||
|
secret_fields,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn revealed_secrets_to_env(secrets: &[SecretValueField]) -> HashMap<String, Value> {
|
||||||
|
secrets
|
||||||
|
.iter()
|
||||||
|
.map(|secret| (secret.name.clone(), Value::String(secret.value.clone())))
|
||||||
|
.collect()
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn call_tool(state: &AppState, name: &str, arguments: Value) -> Result<Value> {
|
||||||
|
match name {
|
||||||
|
"secrets_entry_find" => {
|
||||||
|
let folder = arguments
|
||||||
|
.get("folder")
|
||||||
|
.and_then(Value::as_str)
|
||||||
|
.map(ToOwned::to_owned);
|
||||||
|
let query = arguments
|
||||||
|
.get("query")
|
||||||
|
.and_then(Value::as_str)
|
||||||
|
.map(ToOwned::to_owned);
|
||||||
|
let entry_type = arguments
|
||||||
|
.get("type")
|
||||||
|
.and_then(Value::as_str)
|
||||||
|
.map(ToOwned::to_owned);
|
||||||
|
let mut params = Vec::new();
|
||||||
|
if let Some(folder) = folder {
|
||||||
|
params.push(("folder", folder));
|
||||||
|
}
|
||||||
|
if let Some(query) = query {
|
||||||
|
params.push(("query", query));
|
||||||
|
}
|
||||||
|
if let Some(entry_type) = entry_type {
|
||||||
|
params.push(("entry_type", entry_type));
|
||||||
|
}
|
||||||
|
params.push(("deleted_only", "false".to_string()));
|
||||||
|
let entries = authorized_get(state, "/vault/entries", ¶ms)
|
||||||
|
.await?
|
||||||
|
.json::<Vec<EntrySummary>>()
|
||||||
|
.await
|
||||||
|
.context("failed to decode entries list")?;
|
||||||
|
Ok(json!({
|
||||||
|
"entries": entries.into_iter().map(|entry| {
|
||||||
|
json!({
|
||||||
|
"id": entry.id,
|
||||||
|
"folder": entry.folder,
|
||||||
|
"name": entry.name,
|
||||||
|
"type": entry.cipher_type
|
||||||
|
})
|
||||||
|
}).collect::<Vec<_>>()
|
||||||
|
}))
|
||||||
|
}
|
||||||
|
"secrets_entry_get" => {
|
||||||
|
let id = arguments
|
||||||
|
.get("id")
|
||||||
|
.and_then(Value::as_str)
|
||||||
|
.context("id is required")?;
|
||||||
|
let detail = fetch_entry_detail(state, id).await?;
|
||||||
|
let secrets = fetch_revealed_entry_secrets(state, id).await?;
|
||||||
|
Ok(entry_detail_payload(&detail, Some(&secrets)))
|
||||||
|
}
|
||||||
|
"secrets_entry_add" => {
|
||||||
|
let folder = arguments
|
||||||
|
.get("folder")
|
||||||
|
.and_then(Value::as_str)
|
||||||
|
.context("folder is required")?;
|
||||||
|
let name = arguments
|
||||||
|
.get("name")
|
||||||
|
.and_then(Value::as_str)
|
||||||
|
.context("name is required")?;
|
||||||
|
let entry_type = arguments
|
||||||
|
.get("type")
|
||||||
|
.and_then(Value::as_str)
|
||||||
|
.unwrap_or("entry");
|
||||||
|
let metadata = arguments
|
||||||
|
.get("metadata")
|
||||||
|
.cloned()
|
||||||
|
.unwrap_or_else(|| json!({}));
|
||||||
|
let res = authorized_post(
|
||||||
|
state,
|
||||||
|
"/vault/entries",
|
||||||
|
&json!({
|
||||||
|
"folder": folder,
|
||||||
|
"name": name,
|
||||||
|
"entry_type": entry_type,
|
||||||
|
"metadata": metadata,
|
||||||
|
"secrets": arguments.get("secrets").cloned().unwrap_or(Value::Null)
|
||||||
|
}),
|
||||||
|
)
|
||||||
|
.await?;
|
||||||
|
Ok(res
|
||||||
|
.json::<Value>()
|
||||||
|
.await
|
||||||
|
.context("failed to decode create result")?)
|
||||||
|
}
|
||||||
|
"secrets_entry_update" => {
|
||||||
|
let id = arguments
|
||||||
|
.get("id")
|
||||||
|
.and_then(Value::as_str)
|
||||||
|
.context("id is required")?;
|
||||||
|
let body = json!({
|
||||||
|
"folder": arguments.get("folder").cloned().unwrap_or(Value::Null),
|
||||||
|
"entry_type": arguments.get("type").cloned().unwrap_or(Value::Null),
|
||||||
|
"title": arguments.get("name").cloned().unwrap_or(Value::Null),
|
||||||
|
"metadata": arguments.get("metadata").cloned().unwrap_or(Value::Null)
|
||||||
|
});
|
||||||
|
let res = authorized_patch(state, &format!("/vault/entries/{id}"), &body).await?;
|
||||||
|
Ok(res
|
||||||
|
.json::<Value>()
|
||||||
|
.await
|
||||||
|
.context("failed to decode update result")?)
|
||||||
|
}
|
||||||
|
"secrets_entry_delete" => {
|
||||||
|
let id = arguments
|
||||||
|
.get("id")
|
||||||
|
.and_then(Value::as_str)
|
||||||
|
.context("id is required")?;
|
||||||
|
let res =
|
||||||
|
authorized_post(state, &format!("/vault/entries/{id}/delete"), &json!({})).await?;
|
||||||
|
Ok(res
|
||||||
|
.json::<Value>()
|
||||||
|
.await
|
||||||
|
.context("failed to decode delete result")?)
|
||||||
|
}
|
||||||
|
"secrets_entry_restore" => {
|
||||||
|
let id = arguments
|
||||||
|
.get("id")
|
||||||
|
.and_then(Value::as_str)
|
||||||
|
.context("id is required")?;
|
||||||
|
let res =
|
||||||
|
authorized_post(state, &format!("/vault/entries/{id}/restore"), &json!({})).await?;
|
||||||
|
Ok(res
|
||||||
|
.json::<Value>()
|
||||||
|
.await
|
||||||
|
.context("failed to decode restore result")?)
|
||||||
|
}
|
||||||
|
"secrets_secret_add" => {
|
||||||
|
let entry_id = arguments
|
||||||
|
.get("entry_id")
|
||||||
|
.and_then(Value::as_str)
|
||||||
|
.context("entry_id is required")?;
|
||||||
|
let name = arguments
|
||||||
|
.get("name")
|
||||||
|
.and_then(Value::as_str)
|
||||||
|
.context("name is required")?;
|
||||||
|
let value = arguments
|
||||||
|
.get("value")
|
||||||
|
.and_then(Value::as_str)
|
||||||
|
.context("value is required")?;
|
||||||
|
let res = authorized_post(
|
||||||
|
state,
|
||||||
|
&format!("/vault/entries/{entry_id}/secrets"),
|
||||||
|
&json!({
|
||||||
|
"name": name,
|
||||||
|
"secret_type": arguments.get("secret_type").cloned().unwrap_or(Value::Null),
|
||||||
|
"value": value
|
||||||
|
}),
|
||||||
|
)
|
||||||
|
.await?;
|
||||||
|
Ok(res
|
||||||
|
.json::<Value>()
|
||||||
|
.await
|
||||||
|
.context("failed to decode secret create result")?)
|
||||||
|
}
|
||||||
|
"secrets_secret_update" => {
|
||||||
|
let id = arguments
|
||||||
|
.get("id")
|
||||||
|
.and_then(Value::as_str)
|
||||||
|
.context("id is required")?;
|
||||||
|
let res = authorized_patch(
|
||||||
|
state,
|
||||||
|
&format!("/vault/secrets/{id}"),
|
||||||
|
&json!({
|
||||||
|
"name": arguments.get("name").cloned().unwrap_or(Value::Null),
|
||||||
|
"secret_type": arguments.get("secret_type").cloned().unwrap_or(Value::Null),
|
||||||
|
"value": arguments.get("value").cloned().unwrap_or(Value::Null)
|
||||||
|
}),
|
||||||
|
)
|
||||||
|
.await?;
|
||||||
|
Ok(res
|
||||||
|
.json::<Value>()
|
||||||
|
.await
|
||||||
|
.context("failed to decode secret update result")?)
|
||||||
|
}
|
||||||
|
"secrets_secret_delete" => {
|
||||||
|
let id = arguments
|
||||||
|
.get("id")
|
||||||
|
.and_then(Value::as_str)
|
||||||
|
.context("id is required")?;
|
||||||
|
let res =
|
||||||
|
authorized_post(state, &format!("/vault/secrets/{id}/delete"), &json!({})).await?;
|
||||||
|
Ok(res
|
||||||
|
.json::<Value>()
|
||||||
|
.await
|
||||||
|
.context("failed to decode secret delete result")?)
|
||||||
|
}
|
||||||
|
"secrets_secret_history" => {
|
||||||
|
let id = arguments
|
||||||
|
.get("id")
|
||||||
|
.and_then(Value::as_str)
|
||||||
|
.context("id is required")?;
|
||||||
|
let history = authorized_get(state, &format!("/vault/secrets/{id}/history"), &[])
|
||||||
|
.await?
|
||||||
|
.json::<Vec<SecretHistoryItem>>()
|
||||||
|
.await
|
||||||
|
.context("failed to decode secret history")?;
|
||||||
|
Ok(json!({
|
||||||
|
"history": history.into_iter().map(|item| {
|
||||||
|
json!({
|
||||||
|
"history_id": item.history_id,
|
||||||
|
"secret_id": item.secret_id,
|
||||||
|
"name": item.name,
|
||||||
|
"type": item.secret_type,
|
||||||
|
"masked_value": item.masked_value,
|
||||||
|
"value": item.value,
|
||||||
|
"version": item.version,
|
||||||
|
"action": item.action,
|
||||||
|
"created_at": item.created_at
|
||||||
|
})
|
||||||
|
}).collect::<Vec<_>>()
|
||||||
|
}))
|
||||||
|
}
|
||||||
|
"secrets_secret_rollback" => {
|
||||||
|
let id = arguments
|
||||||
|
.get("id")
|
||||||
|
.and_then(Value::as_str)
|
||||||
|
.context("id is required")?;
|
||||||
|
let res = authorized_post(
|
||||||
|
state,
|
||||||
|
&format!("/vault/secrets/{id}/rollback"),
|
||||||
|
&json!({
|
||||||
|
"version": arguments.get("version").cloned().unwrap_or(Value::Null),
|
||||||
|
"history_id": arguments.get("history_id").cloned().unwrap_or(Value::Null)
|
||||||
|
}),
|
||||||
|
)
|
||||||
|
.await?;
|
||||||
|
Ok(res
|
||||||
|
.json::<Value>()
|
||||||
|
.await
|
||||||
|
.context("failed to decode secret rollback result")?)
|
||||||
|
}
|
||||||
|
"target_exec" => {
|
||||||
|
let input: TargetExecInput =
|
||||||
|
serde_json::from_value(arguments).context("invalid target_exec arguments")?;
|
||||||
|
let target_ref = input
|
||||||
|
.target_ref
|
||||||
|
.as_ref()
|
||||||
|
.context("target_ref is required")?;
|
||||||
|
let detail = fetch_entry_detail(state, target_ref).await?;
|
||||||
|
let secrets = fetch_revealed_entry_secrets(state, target_ref).await?;
|
||||||
|
let execution_target = build_execution_target(
|
||||||
|
&entry_detail_to_snapshot(&detail),
|
||||||
|
&revealed_secrets_to_env(&secrets),
|
||||||
|
)?;
|
||||||
|
let result =
|
||||||
|
execute_command(&input, &execution_target, input.timeout_secs.unwrap_or(30))
|
||||||
|
.await?;
|
||||||
|
Ok(serde_json::to_value(result).context("failed to encode exec result")?)
|
||||||
|
}
|
||||||
|
other => Err(anyhow!("unsupported tool: {other}")),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn handle_mcp(State(state): State<AppState>, body: String) -> Response {
|
||||||
|
let request: JsonRpcRequest = match serde_json::from_str(&body) {
|
||||||
|
Ok(request) => request,
|
||||||
|
Err(err) => {
|
||||||
|
return json_response(
|
||||||
|
StatusCode::BAD_REQUEST,
|
||||||
|
json!({
|
||||||
|
"jsonrpc": "2.0",
|
||||||
|
"id": null,
|
||||||
|
"error": {
|
||||||
|
"code": -32600,
|
||||||
|
"message": format!("invalid request: {err}")
|
||||||
|
}
|
||||||
|
}),
|
||||||
|
);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
match request.method.as_str() {
|
||||||
|
"initialize" => initialize_response(request.id),
|
||||||
|
"tools/list" => jsonrpc_result_response(request.id, json!({ "tools": tool_definitions() })),
|
||||||
|
"tools/call" => {
|
||||||
|
let name = request
|
||||||
|
.params
|
||||||
|
.get("name")
|
||||||
|
.and_then(Value::as_str)
|
||||||
|
.unwrap_or_default();
|
||||||
|
let arguments = request
|
||||||
|
.params
|
||||||
|
.get("arguments")
|
||||||
|
.cloned()
|
||||||
|
.unwrap_or_else(|| json!({}));
|
||||||
|
match call_tool(&state, name, arguments).await {
|
||||||
|
Ok(value) => tool_success_response(request.id, value),
|
||||||
|
Err(err) => tool_error_response(request.id, err.to_string()),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
other => json_response(
|
||||||
|
StatusCode::OK,
|
||||||
|
json!({
|
||||||
|
"jsonrpc": "2.0",
|
||||||
|
"id": request.id,
|
||||||
|
"error": {
|
||||||
|
"code": -32601,
|
||||||
|
"message": format!("method `{other}` not supported by secrets-desktop-daemon")
|
||||||
|
}
|
||||||
|
}),
|
||||||
|
),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn build_router() -> Result<Router> {
|
||||||
|
let session_base = std::env::var("SECRETS_DESKTOP_SESSION_URL")
|
||||||
|
.unwrap_or_else(|_| "http://127.0.0.1:9520".to_string());
|
||||||
|
let state = AppState {
|
||||||
|
session_base,
|
||||||
|
client: reqwest::Client::new(),
|
||||||
|
};
|
||||||
|
Ok(Router::new()
|
||||||
|
.route("/healthz", get(|| async { "ok" }))
|
||||||
|
.route("/mcp", any(handle_mcp))
|
||||||
|
.with_state(state))
|
||||||
|
}
|
||||||
26
crates/desktop-daemon/src/main.rs
Normal file
26
crates/desktop-daemon/src/main.rs
Normal file
@@ -0,0 +1,26 @@
|
|||||||
|
use anyhow::{Context, Result};
|
||||||
|
use tracing_subscriber::EnvFilter;
|
||||||
|
|
||||||
|
#[tokio::main]
|
||||||
|
async fn main() -> Result<()> {
|
||||||
|
let _ = dotenvy::dotenv();
|
||||||
|
|
||||||
|
tracing_subscriber::fmt()
|
||||||
|
.with_env_filter(
|
||||||
|
EnvFilter::try_from_default_env()
|
||||||
|
.unwrap_or_else(|_| "secrets_desktop_daemon=info".into()),
|
||||||
|
)
|
||||||
|
.init();
|
||||||
|
|
||||||
|
let config = secrets_desktop_daemon::config::load_config()?;
|
||||||
|
let app = secrets_desktop_daemon::build_router().await?;
|
||||||
|
let listener = tokio::net::TcpListener::bind(&config.bind)
|
||||||
|
.await
|
||||||
|
.with_context(|| format!("failed to bind {}", config.bind))?;
|
||||||
|
|
||||||
|
tracing::info!(bind = %config.bind, "secrets-desktop-daemon listening");
|
||||||
|
axum::serve(listener, app)
|
||||||
|
.await
|
||||||
|
.context("daemon server error")?;
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
332
crates/desktop-daemon/src/target.rs
Normal file
332
crates/desktop-daemon/src/target.rs
Normal file
@@ -0,0 +1,332 @@
|
|||||||
|
use std::collections::{BTreeMap, HashMap};
|
||||||
|
|
||||||
|
use anyhow::{Result, anyhow};
|
||||||
|
use serde::{Deserialize, Serialize};
|
||||||
|
use serde_json::{Map, Value};
|
||||||
|
|
||||||
|
#[derive(Clone, Debug, Deserialize, Serialize)]
|
||||||
|
pub struct SecretFieldRef {
|
||||||
|
pub name: String,
|
||||||
|
#[serde(rename = "type")]
|
||||||
|
pub secret_type: Option<String>,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Clone, Debug, Deserialize, Serialize)]
|
||||||
|
pub struct TargetSnapshot {
|
||||||
|
pub id: String,
|
||||||
|
pub folder: String,
|
||||||
|
pub name: String,
|
||||||
|
#[serde(rename = "type")]
|
||||||
|
pub entry_type: Option<String>,
|
||||||
|
#[serde(default)]
|
||||||
|
pub metadata: Map<String, Value>,
|
||||||
|
#[serde(default)]
|
||||||
|
pub secret_fields: Vec<SecretFieldRef>,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Clone, Debug, Serialize)]
|
||||||
|
pub struct ResolvedTarget {
|
||||||
|
pub id: String,
|
||||||
|
pub folder: String,
|
||||||
|
pub name: String,
|
||||||
|
#[serde(rename = "type")]
|
||||||
|
pub entry_type: Option<String>,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Clone, Debug)]
|
||||||
|
pub struct ExecutionTarget {
|
||||||
|
pub resolved: ResolvedTarget,
|
||||||
|
pub env: BTreeMap<String, String>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl ExecutionTarget {
|
||||||
|
pub fn resolved_env_keys(&self) -> Vec<String> {
|
||||||
|
self.env.keys().cloned().collect()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn stringify_value(value: &Value) -> Option<String> {
|
||||||
|
match value {
|
||||||
|
Value::Null => None,
|
||||||
|
Value::String(s) => Some(s.clone()),
|
||||||
|
Value::Bool(v) => Some(v.to_string()),
|
||||||
|
Value::Number(v) => Some(v.to_string()),
|
||||||
|
other => serde_json::to_string(other).ok(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn sanitize_env_key(key: &str) -> String {
|
||||||
|
let mut out = String::with_capacity(key.len());
|
||||||
|
for ch in key.chars() {
|
||||||
|
if ch.is_ascii_alphanumeric() {
|
||||||
|
out.push(ch.to_ascii_uppercase());
|
||||||
|
} else {
|
||||||
|
out.push('_');
|
||||||
|
}
|
||||||
|
}
|
||||||
|
while out.contains("__") {
|
||||||
|
out = out.replace("__", "_");
|
||||||
|
}
|
||||||
|
out.trim_matches('_').to_string()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn set_if_missing(env: &mut BTreeMap<String, String>, key: &str, value: Option<String>) {
|
||||||
|
if let Some(value) = value.filter(|v| !v.is_empty()) {
|
||||||
|
env.entry(key.to_string()).or_insert(value);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn metadata_alias(metadata: &Map<String, Value>, keys: &[&str]) -> Option<String> {
|
||||||
|
keys.iter()
|
||||||
|
.find_map(|key| metadata.get(*key))
|
||||||
|
.and_then(stringify_value)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn secret_alias(
|
||||||
|
secrets: &HashMap<String, Value>,
|
||||||
|
secret_types: &HashMap<&str, Option<&str>>,
|
||||||
|
name_match: impl Fn(&str) -> bool,
|
||||||
|
type_match: impl Fn(Option<&str>) -> bool,
|
||||||
|
) -> Option<String> {
|
||||||
|
secrets.iter().find_map(|(name, value)| {
|
||||||
|
let normalized = sanitize_env_key(name);
|
||||||
|
let ty = secret_types.get(name.as_str()).copied().flatten();
|
||||||
|
if name_match(&normalized) || type_match(ty) {
|
||||||
|
stringify_value(value)
|
||||||
|
} else {
|
||||||
|
None
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn build_execution_target(
|
||||||
|
snapshot: &TargetSnapshot,
|
||||||
|
secrets: &HashMap<String, Value>,
|
||||||
|
) -> Result<ExecutionTarget> {
|
||||||
|
if snapshot.id.trim().is_empty() {
|
||||||
|
return Err(anyhow!("target snapshot missing id"));
|
||||||
|
}
|
||||||
|
|
||||||
|
let mut env = BTreeMap::new();
|
||||||
|
env.insert("TARGET_ENTRY_ID".to_string(), snapshot.id.clone());
|
||||||
|
env.insert("TARGET_NAME".to_string(), snapshot.name.clone());
|
||||||
|
env.insert("TARGET_FOLDER".to_string(), snapshot.folder.clone());
|
||||||
|
if let Some(entry_type) = snapshot.entry_type.as_ref().filter(|v| !v.is_empty()) {
|
||||||
|
env.insert("TARGET_TYPE".to_string(), entry_type.clone());
|
||||||
|
}
|
||||||
|
|
||||||
|
for (key, value) in &snapshot.metadata {
|
||||||
|
if let Some(value) = stringify_value(value) {
|
||||||
|
let name = sanitize_env_key(key);
|
||||||
|
if !name.is_empty() {
|
||||||
|
env.insert(format!("TARGET_META_{name}"), value);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
let secret_type_map: HashMap<&str, Option<&str>> = snapshot
|
||||||
|
.secret_fields
|
||||||
|
.iter()
|
||||||
|
.map(|field| (field.name.as_str(), field.secret_type.as_deref()))
|
||||||
|
.collect();
|
||||||
|
|
||||||
|
for (key, value) in secrets {
|
||||||
|
if let Some(value) = stringify_value(value) {
|
||||||
|
let name = sanitize_env_key(key);
|
||||||
|
if !name.is_empty() {
|
||||||
|
env.insert(format!("TARGET_SECRET_{name}"), value);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
set_if_missing(
|
||||||
|
&mut env,
|
||||||
|
"TARGET_HOST",
|
||||||
|
metadata_alias(
|
||||||
|
&snapshot.metadata,
|
||||||
|
&["public_ip", "ipv4", "private_ip", "host", "hostname"],
|
||||||
|
),
|
||||||
|
);
|
||||||
|
set_if_missing(
|
||||||
|
&mut env,
|
||||||
|
"TARGET_PORT",
|
||||||
|
metadata_alias(&snapshot.metadata, &["ssh_port", "port"]),
|
||||||
|
);
|
||||||
|
set_if_missing(
|
||||||
|
&mut env,
|
||||||
|
"TARGET_USER",
|
||||||
|
metadata_alias(&snapshot.metadata, &["username", "ssh_user", "user"]),
|
||||||
|
);
|
||||||
|
set_if_missing(
|
||||||
|
&mut env,
|
||||||
|
"TARGET_BASE_URL",
|
||||||
|
metadata_alias(&snapshot.metadata, &["base_url", "url", "endpoint"]),
|
||||||
|
);
|
||||||
|
set_if_missing(
|
||||||
|
&mut env,
|
||||||
|
"TARGET_API_KEY",
|
||||||
|
secret_alias(
|
||||||
|
secrets,
|
||||||
|
&secret_type_map,
|
||||||
|
|name| matches!(name, "API_KEY" | "APIKEY" | "ACCESS_KEY" | "ACCESS_KEY_ID"),
|
||||||
|
|_| false,
|
||||||
|
),
|
||||||
|
);
|
||||||
|
set_if_missing(
|
||||||
|
&mut env,
|
||||||
|
"TARGET_TOKEN",
|
||||||
|
secret_alias(
|
||||||
|
secrets,
|
||||||
|
&secret_type_map,
|
||||||
|
|name| name.contains("TOKEN"),
|
||||||
|
|_| false,
|
||||||
|
),
|
||||||
|
);
|
||||||
|
set_if_missing(
|
||||||
|
&mut env,
|
||||||
|
"TARGET_SSH_KEY",
|
||||||
|
secret_alias(
|
||||||
|
secrets,
|
||||||
|
&secret_type_map,
|
||||||
|
|name| name.contains("SSH") || name.ends_with("PEM"),
|
||||||
|
|ty| ty.is_some_and(|v| v.eq_ignore_ascii_case("ssh-key")),
|
||||||
|
),
|
||||||
|
);
|
||||||
|
|
||||||
|
Ok(ExecutionTarget {
|
||||||
|
resolved: ResolvedTarget {
|
||||||
|
id: snapshot.id.clone(),
|
||||||
|
folder: snapshot.folder.clone(),
|
||||||
|
name: snapshot.name.clone(),
|
||||||
|
entry_type: snapshot.entry_type.clone(),
|
||||||
|
},
|
||||||
|
env,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
mod tests {
|
||||||
|
use super::*;
|
||||||
|
|
||||||
|
fn build_snapshot() -> TargetSnapshot {
|
||||||
|
let mut metadata = Map::new();
|
||||||
|
metadata.insert(
|
||||||
|
"host".to_string(),
|
||||||
|
Value::String("git.example.com".to_string()),
|
||||||
|
);
|
||||||
|
metadata.insert("port".to_string(), Value::String("22".to_string()));
|
||||||
|
metadata.insert("username".to_string(), Value::String("deploy".to_string()));
|
||||||
|
metadata.insert(
|
||||||
|
"base_url".to_string(),
|
||||||
|
Value::String("https://api.example.com".to_string()),
|
||||||
|
);
|
||||||
|
TargetSnapshot {
|
||||||
|
id: "entry-1".to_string(),
|
||||||
|
folder: "infra".to_string(),
|
||||||
|
name: "production".to_string(),
|
||||||
|
entry_type: Some("ssh_key".to_string()),
|
||||||
|
metadata,
|
||||||
|
secret_fields: vec![
|
||||||
|
SecretFieldRef {
|
||||||
|
name: "api_key".to_string(),
|
||||||
|
secret_type: Some("text".to_string()),
|
||||||
|
},
|
||||||
|
SecretFieldRef {
|
||||||
|
name: "token".to_string(),
|
||||||
|
secret_type: Some("text".to_string()),
|
||||||
|
},
|
||||||
|
SecretFieldRef {
|
||||||
|
name: "ssh_key".to_string(),
|
||||||
|
secret_type: Some("ssh-key".to_string()),
|
||||||
|
},
|
||||||
|
],
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn derives_standard_target_env_keys() {
|
||||||
|
let snapshot = build_snapshot();
|
||||||
|
let secrets = HashMap::from([
|
||||||
|
("api_key".to_string(), Value::String("ak-123".to_string())),
|
||||||
|
("token".to_string(), Value::String("tok-456".to_string())),
|
||||||
|
(
|
||||||
|
"ssh_key".to_string(),
|
||||||
|
Value::String("-----BEGIN KEY-----".to_string()),
|
||||||
|
),
|
||||||
|
]);
|
||||||
|
|
||||||
|
let target = build_execution_target(&snapshot, &secrets).expect("build execution target");
|
||||||
|
|
||||||
|
assert_eq!(
|
||||||
|
target.env.get("TARGET_ENTRY_ID").map(String::as_str),
|
||||||
|
Some("entry-1")
|
||||||
|
);
|
||||||
|
assert_eq!(
|
||||||
|
target.env.get("TARGET_NAME").map(String::as_str),
|
||||||
|
Some("production")
|
||||||
|
);
|
||||||
|
assert_eq!(
|
||||||
|
target.env.get("TARGET_FOLDER").map(String::as_str),
|
||||||
|
Some("infra")
|
||||||
|
);
|
||||||
|
assert_eq!(
|
||||||
|
target.env.get("TARGET_TYPE").map(String::as_str),
|
||||||
|
Some("ssh_key")
|
||||||
|
);
|
||||||
|
assert_eq!(
|
||||||
|
target.env.get("TARGET_HOST").map(String::as_str),
|
||||||
|
Some("git.example.com")
|
||||||
|
);
|
||||||
|
assert_eq!(
|
||||||
|
target.env.get("TARGET_PORT").map(String::as_str),
|
||||||
|
Some("22")
|
||||||
|
);
|
||||||
|
assert_eq!(
|
||||||
|
target.env.get("TARGET_USER").map(String::as_str),
|
||||||
|
Some("deploy")
|
||||||
|
);
|
||||||
|
assert_eq!(
|
||||||
|
target.env.get("TARGET_BASE_URL").map(String::as_str),
|
||||||
|
Some("https://api.example.com")
|
||||||
|
);
|
||||||
|
assert_eq!(
|
||||||
|
target.env.get("TARGET_API_KEY").map(String::as_str),
|
||||||
|
Some("ak-123")
|
||||||
|
);
|
||||||
|
assert_eq!(
|
||||||
|
target.env.get("TARGET_TOKEN").map(String::as_str),
|
||||||
|
Some("tok-456")
|
||||||
|
);
|
||||||
|
assert_eq!(
|
||||||
|
target.env.get("TARGET_SSH_KEY").map(String::as_str),
|
||||||
|
Some("-----BEGIN KEY-----")
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn exports_sanitized_meta_and_secret_keys() {
|
||||||
|
let mut snapshot = build_snapshot();
|
||||||
|
snapshot.metadata.insert(
|
||||||
|
"private-ip".to_string(),
|
||||||
|
Value::String("10.0.0.8".to_string()),
|
||||||
|
);
|
||||||
|
let secrets = HashMap::from([(
|
||||||
|
"access key id".to_string(),
|
||||||
|
Value::String("access-1".to_string()),
|
||||||
|
)]);
|
||||||
|
|
||||||
|
let target = build_execution_target(&snapshot, &secrets).expect("build execution target");
|
||||||
|
|
||||||
|
assert_eq!(
|
||||||
|
target.env.get("TARGET_META_PRIVATE_IP").map(String::as_str),
|
||||||
|
Some("10.0.0.8")
|
||||||
|
);
|
||||||
|
assert_eq!(
|
||||||
|
target
|
||||||
|
.env
|
||||||
|
.get("TARGET_SECRET_ACCESS_KEY_ID")
|
||||||
|
.map(String::as_str),
|
||||||
|
Some("access-1")
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
168
crates/desktop-daemon/src/vault_client.rs
Normal file
168
crates/desktop-daemon/src/vault_client.rs
Normal file
@@ -0,0 +1,168 @@
|
|||||||
|
use std::collections::HashMap;
|
||||||
|
|
||||||
|
use anyhow::{Context, Result};
|
||||||
|
use serde::Deserialize;
|
||||||
|
use serde_json::{Value, json};
|
||||||
|
|
||||||
|
use crate::AppState;
|
||||||
|
|
||||||
|
#[derive(Debug, Deserialize)]
|
||||||
|
pub struct EntrySummary {
|
||||||
|
pub id: String,
|
||||||
|
pub folder: String,
|
||||||
|
#[serde(rename = "title")]
|
||||||
|
pub name: String,
|
||||||
|
#[serde(rename = "subtitle")]
|
||||||
|
pub cipher_type: String,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Deserialize)]
|
||||||
|
pub struct EntryDetail {
|
||||||
|
pub id: String,
|
||||||
|
#[serde(rename = "title")]
|
||||||
|
pub name: String,
|
||||||
|
pub folder: String,
|
||||||
|
#[serde(rename = "entry_type")]
|
||||||
|
pub cipher_type: String,
|
||||||
|
pub metadata: Vec<DetailField>,
|
||||||
|
pub secrets: Vec<SecretField>,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Deserialize)]
|
||||||
|
pub struct DetailField {
|
||||||
|
pub label: String,
|
||||||
|
pub value: String,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Deserialize)]
|
||||||
|
pub struct SecretField {
|
||||||
|
pub id: String,
|
||||||
|
pub name: String,
|
||||||
|
pub secret_type: String,
|
||||||
|
pub masked_value: String,
|
||||||
|
pub version: i64,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Deserialize)]
|
||||||
|
pub struct SecretValueField {
|
||||||
|
pub id: String,
|
||||||
|
pub name: String,
|
||||||
|
pub value: String,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Deserialize)]
|
||||||
|
pub struct SecretHistoryItem {
|
||||||
|
pub history_id: i64,
|
||||||
|
pub secret_id: String,
|
||||||
|
pub name: String,
|
||||||
|
pub secret_type: String,
|
||||||
|
pub masked_value: String,
|
||||||
|
pub value: String,
|
||||||
|
pub version: i64,
|
||||||
|
pub action: String,
|
||||||
|
pub created_at: String,
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn authorized_get(
|
||||||
|
state: &AppState,
|
||||||
|
path: &str,
|
||||||
|
query: &[(&str, String)],
|
||||||
|
) -> Result<reqwest::Response> {
|
||||||
|
state
|
||||||
|
.client
|
||||||
|
.get(format!("{}{}", state.session_base, path))
|
||||||
|
.query(query)
|
||||||
|
.send()
|
||||||
|
.await
|
||||||
|
.with_context(|| format!("desktop local vault unavailable: {path}"))?
|
||||||
|
.error_for_status()
|
||||||
|
.with_context(|| format!("desktop local vault requires sign-in and unlock: {path}"))
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn authorized_patch(
|
||||||
|
state: &AppState,
|
||||||
|
path: &str,
|
||||||
|
body: &Value,
|
||||||
|
) -> Result<reqwest::Response> {
|
||||||
|
state
|
||||||
|
.client
|
||||||
|
.patch(format!("{}{}", state.session_base, path))
|
||||||
|
.json(body)
|
||||||
|
.send()
|
||||||
|
.await
|
||||||
|
.with_context(|| format!("desktop local vault unavailable: {path}"))?
|
||||||
|
.error_for_status()
|
||||||
|
.with_context(|| format!("desktop local vault requires sign-in and unlock: {path}"))
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn authorized_post(
|
||||||
|
state: &AppState,
|
||||||
|
path: &str,
|
||||||
|
body: &Value,
|
||||||
|
) -> Result<reqwest::Response> {
|
||||||
|
state
|
||||||
|
.client
|
||||||
|
.post(format!("{}{}", state.session_base, path))
|
||||||
|
.json(body)
|
||||||
|
.send()
|
||||||
|
.await
|
||||||
|
.with_context(|| format!("desktop local vault unavailable: {path}"))?
|
||||||
|
.error_for_status()
|
||||||
|
.with_context(|| format!("desktop local vault requires sign-in and unlock: {path}"))
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn fetch_entry_detail(state: &AppState, entry_id: &str) -> Result<EntryDetail> {
|
||||||
|
authorized_get(state, &format!("/vault/entries/{entry_id}"), &[])
|
||||||
|
.await?
|
||||||
|
.json::<EntryDetail>()
|
||||||
|
.await
|
||||||
|
.context("failed to decode entry detail")
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn fetch_revealed_entry_secrets(
|
||||||
|
state: &AppState,
|
||||||
|
entry_id: &str,
|
||||||
|
) -> Result<Vec<SecretValueField>> {
|
||||||
|
let detail = fetch_entry_detail(state, entry_id).await?;
|
||||||
|
let mut secrets = Vec::new();
|
||||||
|
for secret in detail.secrets {
|
||||||
|
let item = authorized_get(state, &format!("/vault/secrets/{}/value", secret.id), &[])
|
||||||
|
.await?
|
||||||
|
.json::<SecretValueField>()
|
||||||
|
.await
|
||||||
|
.context("failed to decode revealed secret value")?;
|
||||||
|
secrets.push(item);
|
||||||
|
}
|
||||||
|
Ok(secrets)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn entry_detail_payload(detail: &EntryDetail, revealed: Option<&[SecretValueField]>) -> Value {
|
||||||
|
let revealed_by_id: HashMap<&str, &SecretValueField> = revealed
|
||||||
|
.unwrap_or(&[])
|
||||||
|
.iter()
|
||||||
|
.map(|secret| (secret.id.as_str(), secret))
|
||||||
|
.collect();
|
||||||
|
json!({
|
||||||
|
"id": detail.id,
|
||||||
|
"folder": detail.folder,
|
||||||
|
"name": detail.name,
|
||||||
|
"type": detail.cipher_type,
|
||||||
|
"metadata": detail.metadata.iter().map(|field| {
|
||||||
|
json!({
|
||||||
|
"label": field.label,
|
||||||
|
"value": field.value
|
||||||
|
})
|
||||||
|
}).collect::<Vec<_>>(),
|
||||||
|
"secrets": detail.secrets.iter().map(|secret| {
|
||||||
|
let revealed = revealed_by_id.get(secret.id.as_str());
|
||||||
|
json!({
|
||||||
|
"id": secret.id,
|
||||||
|
"name": secret.name,
|
||||||
|
"type": secret.secret_type,
|
||||||
|
"masked_value": secret.masked_value,
|
||||||
|
"value": revealed.map(|item| item.value.clone()),
|
||||||
|
"version": secret.version
|
||||||
|
})
|
||||||
|
}).collect::<Vec<_>>()
|
||||||
|
})
|
||||||
|
}
|
||||||
16
crates/device-auth/Cargo.toml
Normal file
16
crates/device-auth/Cargo.toml
Normal file
@@ -0,0 +1,16 @@
|
|||||||
|
[package]
|
||||||
|
name = "secrets-device-auth"
|
||||||
|
version = "0.1.0"
|
||||||
|
edition.workspace = true
|
||||||
|
|
||||||
|
[lib]
|
||||||
|
name = "secrets_device_auth"
|
||||||
|
path = "src/lib.rs"
|
||||||
|
|
||||||
|
[dependencies]
|
||||||
|
anyhow.workspace = true
|
||||||
|
hex.workspace = true
|
||||||
|
rand.workspace = true
|
||||||
|
sha2.workspace = true
|
||||||
|
url.workspace = true
|
||||||
|
uuid.workspace = true
|
||||||
27
crates/device-auth/src/lib.rs
Normal file
27
crates/device-auth/src/lib.rs
Normal file
@@ -0,0 +1,27 @@
|
|||||||
|
use anyhow::{Context, Result};
|
||||||
|
use rand::{Rng, RngExt};
|
||||||
|
use sha2::{Digest, Sha256};
|
||||||
|
use url::Url;
|
||||||
|
|
||||||
|
pub fn loopback_redirect_uri(port: u16) -> Result<Url> {
|
||||||
|
Url::parse(&format!("http://127.0.0.1:{port}/oauth/callback"))
|
||||||
|
.context("failed to build loopback redirect URI")
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn new_device_fingerprint() -> String {
|
||||||
|
let mut bytes = [0_u8; 16];
|
||||||
|
rand::rng().fill(&mut bytes);
|
||||||
|
hex::encode(bytes)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn new_device_login_token() -> String {
|
||||||
|
let mut bytes = [0_u8; 32];
|
||||||
|
rand::rng().fill_bytes(&mut bytes);
|
||||||
|
hex::encode(bytes)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn hash_device_login_token(token: &str) -> String {
|
||||||
|
let mut hasher = Sha256::new();
|
||||||
|
hasher.update(token.as_bytes());
|
||||||
|
hex::encode(hasher.finalize())
|
||||||
|
}
|
||||||
16
crates/domain/Cargo.toml
Normal file
16
crates/domain/Cargo.toml
Normal file
@@ -0,0 +1,16 @@
|
|||||||
|
[package]
|
||||||
|
name = "secrets-domain"
|
||||||
|
version = "0.1.0"
|
||||||
|
edition.workspace = true
|
||||||
|
|
||||||
|
[lib]
|
||||||
|
name = "secrets_domain"
|
||||||
|
path = "src/lib.rs"
|
||||||
|
|
||||||
|
[dependencies]
|
||||||
|
argon2 = "0.5.3"
|
||||||
|
chrono.workspace = true
|
||||||
|
serde.workspace = true
|
||||||
|
serde_json.workspace = true
|
||||||
|
thiserror.workspace = true
|
||||||
|
uuid.workspace = true
|
||||||
68
crates/domain/src/auth.rs
Normal file
68
crates/domain/src/auth.rs
Normal file
@@ -0,0 +1,68 @@
|
|||||||
|
use chrono::{DateTime, Utc};
|
||||||
|
use serde::{Deserialize, Serialize};
|
||||||
|
use serde_json::Value;
|
||||||
|
use uuid::Uuid;
|
||||||
|
|
||||||
|
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||||
|
pub struct User {
|
||||||
|
pub id: Uuid,
|
||||||
|
pub email: Option<String>,
|
||||||
|
pub name: String,
|
||||||
|
pub avatar_url: Option<String>,
|
||||||
|
pub key_salt: Option<Vec<u8>>,
|
||||||
|
pub key_check: Option<Vec<u8>>,
|
||||||
|
pub key_params: Option<Value>,
|
||||||
|
pub key_version: i64,
|
||||||
|
pub created_at: DateTime<Utc>,
|
||||||
|
pub updated_at: DateTime<Utc>,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||||
|
pub struct Device {
|
||||||
|
pub id: Uuid,
|
||||||
|
pub user_id: Uuid,
|
||||||
|
pub display_name: String,
|
||||||
|
pub platform: String,
|
||||||
|
pub client_version: String,
|
||||||
|
pub device_fingerprint: String,
|
||||||
|
pub created_at: DateTime<Utc>,
|
||||||
|
pub last_seen_at: DateTime<Utc>,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||||
|
pub struct DeviceLoginToken {
|
||||||
|
pub id: Uuid,
|
||||||
|
pub device_id: Uuid,
|
||||||
|
pub token_hash: String,
|
||||||
|
pub created_at: DateTime<Utc>,
|
||||||
|
pub last_seen_at: DateTime<Utc>,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Clone, Copy, Serialize, Deserialize, PartialEq, Eq)]
|
||||||
|
#[serde(rename_all = "snake_case")]
|
||||||
|
pub enum LoginMethod {
|
||||||
|
GoogleOauth,
|
||||||
|
DeviceToken,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Clone, Copy, Serialize, Deserialize, PartialEq, Eq)]
|
||||||
|
#[serde(rename_all = "snake_case")]
|
||||||
|
pub enum LoginResult {
|
||||||
|
Success,
|
||||||
|
Failed,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||||
|
pub struct ClientLoginEvent {
|
||||||
|
pub id: i64,
|
||||||
|
pub user_id: Uuid,
|
||||||
|
pub device_id: Uuid,
|
||||||
|
pub device_name: String,
|
||||||
|
pub platform: String,
|
||||||
|
pub client_version: String,
|
||||||
|
pub ip_addr: Option<String>,
|
||||||
|
pub forwarded_ip: Option<String>,
|
||||||
|
pub login_method: LoginMethod,
|
||||||
|
pub login_result: LoginResult,
|
||||||
|
pub created_at: DateTime<Utc>,
|
||||||
|
}
|
||||||
138
crates/domain/src/cipher.rs
Normal file
138
crates/domain/src/cipher.rs
Normal file
@@ -0,0 +1,138 @@
|
|||||||
|
use chrono::{DateTime, Utc};
|
||||||
|
use serde::{Deserialize, Serialize};
|
||||||
|
use serde_json::Value;
|
||||||
|
use uuid::Uuid;
|
||||||
|
|
||||||
|
#[derive(Debug, Clone, Copy, Serialize, Deserialize, PartialEq, Eq)]
|
||||||
|
#[serde(rename_all = "snake_case")]
|
||||||
|
pub enum CipherType {
|
||||||
|
Login,
|
||||||
|
ApiKey,
|
||||||
|
SecureNote,
|
||||||
|
SshKey,
|
||||||
|
Identity,
|
||||||
|
Card,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl CipherType {
|
||||||
|
pub fn as_str(&self) -> &'static str {
|
||||||
|
match self {
|
||||||
|
Self::Login => "login",
|
||||||
|
Self::ApiKey => "api_key",
|
||||||
|
Self::SecureNote => "secure_note",
|
||||||
|
Self::SshKey => "ssh_key",
|
||||||
|
Self::Identity => "identity",
|
||||||
|
Self::Card => "card",
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn parse(input: &str) -> Self {
|
||||||
|
match input {
|
||||||
|
"login" => Self::Login,
|
||||||
|
"api_key" => Self::ApiKey,
|
||||||
|
"secure_note" => Self::SecureNote,
|
||||||
|
"ssh_key" => Self::SshKey,
|
||||||
|
"identity" => Self::Identity,
|
||||||
|
"card" => Self::Card,
|
||||||
|
_ => Self::SecureNote,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
|
||||||
|
pub struct CustomField {
|
||||||
|
pub name: String,
|
||||||
|
pub value: Value,
|
||||||
|
#[serde(default)]
|
||||||
|
pub sensitive: bool,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq, Default)]
|
||||||
|
pub struct LoginPayload {
|
||||||
|
#[serde(default)]
|
||||||
|
pub username: Option<String>,
|
||||||
|
#[serde(default)]
|
||||||
|
pub uris: Vec<String>,
|
||||||
|
#[serde(default)]
|
||||||
|
pub password: Option<String>,
|
||||||
|
#[serde(default)]
|
||||||
|
pub totp: Option<String>,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq, Default)]
|
||||||
|
pub struct ApiKeyPayload {
|
||||||
|
#[serde(default)]
|
||||||
|
pub client_id: Option<String>,
|
||||||
|
#[serde(default)]
|
||||||
|
pub secret: Option<String>,
|
||||||
|
#[serde(default)]
|
||||||
|
pub base_url: Option<String>,
|
||||||
|
#[serde(default)]
|
||||||
|
pub host: Option<String>,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq, Default)]
|
||||||
|
pub struct SecureNotePayload {
|
||||||
|
#[serde(default)]
|
||||||
|
pub text: Option<String>,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq, Default)]
|
||||||
|
pub struct SshKeyPayload {
|
||||||
|
#[serde(default)]
|
||||||
|
pub username: Option<String>,
|
||||||
|
#[serde(default)]
|
||||||
|
pub host: Option<String>,
|
||||||
|
#[serde(default)]
|
||||||
|
pub port: Option<u16>,
|
||||||
|
#[serde(default)]
|
||||||
|
pub private_key: Option<String>,
|
||||||
|
#[serde(default)]
|
||||||
|
pub passphrase: Option<String>,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
|
||||||
|
#[serde(tag = "kind", rename_all = "snake_case")]
|
||||||
|
pub enum ItemPayload {
|
||||||
|
Login(LoginPayload),
|
||||||
|
ApiKey(ApiKeyPayload),
|
||||||
|
SecureNote(SecureNotePayload),
|
||||||
|
SshKey(SshKeyPayload),
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Default for ItemPayload {
|
||||||
|
fn default() -> Self {
|
||||||
|
Self::SecureNote(SecureNotePayload::default())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
|
||||||
|
pub struct CipherView {
|
||||||
|
pub id: Uuid,
|
||||||
|
pub cipher_type: CipherType,
|
||||||
|
pub name: String,
|
||||||
|
pub folder: String,
|
||||||
|
#[serde(default)]
|
||||||
|
pub notes: Option<String>,
|
||||||
|
#[serde(default)]
|
||||||
|
pub custom_fields: Vec<CustomField>,
|
||||||
|
#[serde(default)]
|
||||||
|
pub deleted_at: Option<DateTime<Utc>>,
|
||||||
|
pub revision_date: DateTime<Utc>,
|
||||||
|
pub payload: ItemPayload,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)]
|
||||||
|
pub struct Cipher {
|
||||||
|
pub id: Uuid,
|
||||||
|
pub user_id: Uuid,
|
||||||
|
pub object_kind: String,
|
||||||
|
pub cipher_type: CipherType,
|
||||||
|
pub revision: i64,
|
||||||
|
pub cipher_version: i32,
|
||||||
|
pub ciphertext: Vec<u8>,
|
||||||
|
pub content_hash: String,
|
||||||
|
pub deleted_at: Option<DateTime<Utc>>,
|
||||||
|
pub created_at: DateTime<Utc>,
|
||||||
|
pub updated_at: DateTime<Utc>,
|
||||||
|
}
|
||||||
15
crates/domain/src/error.rs
Normal file
15
crates/domain/src/error.rs
Normal file
@@ -0,0 +1,15 @@
|
|||||||
|
use thiserror::Error;
|
||||||
|
|
||||||
|
#[derive(Debug, Error)]
|
||||||
|
pub enum DomainError {
|
||||||
|
#[error("resource not found")]
|
||||||
|
NotFound,
|
||||||
|
#[error("resource already exists")]
|
||||||
|
Conflict,
|
||||||
|
#[error("validation failed: {0}")]
|
||||||
|
Validation(String),
|
||||||
|
#[error("authentication failed")]
|
||||||
|
AuthenticationFailed,
|
||||||
|
#[error("decryption failed")]
|
||||||
|
DecryptionFailed,
|
||||||
|
}
|
||||||
37
crates/domain/src/kdf.rs
Normal file
37
crates/domain/src/kdf.rs
Normal file
@@ -0,0 +1,37 @@
|
|||||||
|
use argon2::{Algorithm, Argon2, Params, Version};
|
||||||
|
use serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
|
use crate::DomainError;
|
||||||
|
|
||||||
|
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)]
|
||||||
|
#[serde(rename_all = "snake_case")]
|
||||||
|
pub enum KdfType {
|
||||||
|
Argon2id,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)]
|
||||||
|
pub struct KdfConfig {
|
||||||
|
pub kdf_type: KdfType,
|
||||||
|
pub memory_kib: u32,
|
||||||
|
pub iterations: u32,
|
||||||
|
pub parallelism: u32,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Default for KdfConfig {
|
||||||
|
fn default() -> Self {
|
||||||
|
Self {
|
||||||
|
kdf_type: KdfType::Argon2id,
|
||||||
|
memory_kib: 64 * 1024,
|
||||||
|
iterations: 3,
|
||||||
|
parallelism: 4,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl KdfConfig {
|
||||||
|
pub fn build_argon2(&self) -> Result<Argon2<'static>, DomainError> {
|
||||||
|
let params = Params::new(self.memory_kib, self.iterations, self.parallelism, Some(32))
|
||||||
|
.map_err(|err| DomainError::Validation(err.to_string()))?;
|
||||||
|
Ok(Argon2::new(Algorithm::Argon2id, Version::V0x13, params))
|
||||||
|
}
|
||||||
|
}
|
||||||
19
crates/domain/src/lib.rs
Normal file
19
crates/domain/src/lib.rs
Normal file
@@ -0,0 +1,19 @@
|
|||||||
|
pub mod auth;
|
||||||
|
pub mod cipher;
|
||||||
|
pub mod error;
|
||||||
|
pub mod kdf;
|
||||||
|
pub mod sync;
|
||||||
|
pub mod vault_object;
|
||||||
|
|
||||||
|
pub use auth::{ClientLoginEvent, Device, DeviceLoginToken, LoginMethod, LoginResult, User};
|
||||||
|
pub use cipher::{
|
||||||
|
ApiKeyPayload, Cipher, CipherType, CipherView, CustomField, ItemPayload, LoginPayload,
|
||||||
|
SecureNotePayload, SshKeyPayload,
|
||||||
|
};
|
||||||
|
pub use error::DomainError;
|
||||||
|
pub use kdf::{KdfConfig, KdfType};
|
||||||
|
pub use sync::{
|
||||||
|
SyncAcceptedChange, SyncConflict, SyncPullRequest, SyncPullResponse, SyncPushRequest,
|
||||||
|
SyncPushResponse,
|
||||||
|
};
|
||||||
|
pub use vault_object::{VaultObjectChange, VaultObjectEnvelope, VaultObjectKind, VaultTombstone};
|
||||||
47
crates/domain/src/sync.rs
Normal file
47
crates/domain/src/sync.rs
Normal file
@@ -0,0 +1,47 @@
|
|||||||
|
use serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
|
use crate::vault_object::{VaultObjectChange, VaultObjectEnvelope, VaultTombstone};
|
||||||
|
|
||||||
|
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)]
|
||||||
|
pub struct SyncPullRequest {
|
||||||
|
pub cursor: Option<i64>,
|
||||||
|
pub limit: Option<i64>,
|
||||||
|
#[serde(default)]
|
||||||
|
pub include_deleted: bool,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)]
|
||||||
|
pub struct SyncPullResponse {
|
||||||
|
pub server_revision: i64,
|
||||||
|
pub next_cursor: i64,
|
||||||
|
pub has_more: bool,
|
||||||
|
pub objects: Vec<VaultObjectEnvelope>,
|
||||||
|
pub tombstones: Vec<VaultTombstone>,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)]
|
||||||
|
pub struct SyncPushRequest {
|
||||||
|
pub changes: Vec<VaultObjectChange>,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)]
|
||||||
|
pub struct SyncAcceptedChange {
|
||||||
|
pub change_id: uuid::Uuid,
|
||||||
|
pub object_id: uuid::Uuid,
|
||||||
|
pub revision: i64,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)]
|
||||||
|
pub struct SyncConflict {
|
||||||
|
pub change_id: uuid::Uuid,
|
||||||
|
pub object_id: uuid::Uuid,
|
||||||
|
pub reason: String,
|
||||||
|
pub server_object: Option<VaultObjectEnvelope>,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)]
|
||||||
|
pub struct SyncPushResponse {
|
||||||
|
pub server_revision: i64,
|
||||||
|
pub accepted: Vec<SyncAcceptedChange>,
|
||||||
|
pub conflicts: Vec<SyncConflict>,
|
||||||
|
}
|
||||||
48
crates/domain/src/vault_object.rs
Normal file
48
crates/domain/src/vault_object.rs
Normal file
@@ -0,0 +1,48 @@
|
|||||||
|
use chrono::{DateTime, Utc};
|
||||||
|
use serde::{Deserialize, Serialize};
|
||||||
|
use uuid::Uuid;
|
||||||
|
|
||||||
|
#[derive(Debug, Clone, Copy, Serialize, Deserialize, PartialEq, Eq)]
|
||||||
|
#[serde(rename_all = "snake_case")]
|
||||||
|
pub enum VaultObjectKind {
|
||||||
|
Cipher,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl VaultObjectKind {
|
||||||
|
pub fn as_str(&self) -> &'static str {
|
||||||
|
match self {
|
||||||
|
Self::Cipher => "cipher",
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)]
|
||||||
|
pub struct VaultObjectEnvelope {
|
||||||
|
pub object_id: Uuid,
|
||||||
|
pub object_kind: VaultObjectKind,
|
||||||
|
pub revision: i64,
|
||||||
|
pub cipher_version: i32,
|
||||||
|
pub ciphertext: Vec<u8>,
|
||||||
|
pub content_hash: String,
|
||||||
|
pub deleted_at: Option<DateTime<Utc>>,
|
||||||
|
pub updated_at: DateTime<Utc>,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)]
|
||||||
|
pub struct VaultObjectChange {
|
||||||
|
pub change_id: Uuid,
|
||||||
|
pub object_id: Uuid,
|
||||||
|
pub object_kind: VaultObjectKind,
|
||||||
|
pub operation: String,
|
||||||
|
pub base_revision: Option<i64>,
|
||||||
|
pub cipher_version: Option<i32>,
|
||||||
|
pub ciphertext: Option<Vec<u8>>,
|
||||||
|
pub content_hash: Option<String>,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)]
|
||||||
|
pub struct VaultTombstone {
|
||||||
|
pub object_id: Uuid,
|
||||||
|
pub revision: i64,
|
||||||
|
pub deleted_at: DateTime<Utc>,
|
||||||
|
}
|
||||||
15
crates/infrastructure-db/Cargo.toml
Normal file
15
crates/infrastructure-db/Cargo.toml
Normal file
@@ -0,0 +1,15 @@
|
|||||||
|
[package]
|
||||||
|
name = "secrets-infrastructure-db"
|
||||||
|
version = "0.1.0"
|
||||||
|
edition.workspace = true
|
||||||
|
|
||||||
|
[lib]
|
||||||
|
name = "secrets_infrastructure_db"
|
||||||
|
path = "src/lib.rs"
|
||||||
|
|
||||||
|
[dependencies]
|
||||||
|
anyhow.workspace = true
|
||||||
|
dotenvy.workspace = true
|
||||||
|
sqlx.workspace = true
|
||||||
|
tracing.workspace = true
|
||||||
|
uuid.workspace = true
|
||||||
29
crates/infrastructure-db/src/lib.rs
Normal file
29
crates/infrastructure-db/src/lib.rs
Normal file
@@ -0,0 +1,29 @@
|
|||||||
|
mod migrate;
|
||||||
|
|
||||||
|
use anyhow::{Context, Result};
|
||||||
|
use sqlx::PgPool;
|
||||||
|
use sqlx::postgres::{PgConnectOptions, PgPoolOptions};
|
||||||
|
use std::str::FromStr;
|
||||||
|
|
||||||
|
pub use migrate::migrate_current_schema;
|
||||||
|
|
||||||
|
pub fn load_database_url() -> Result<String> {
|
||||||
|
std::env::var("SECRETS_DATABASE_URL")
|
||||||
|
.context("SECRETS_DATABASE_URL is required for current services")
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn create_pool(database_url: &str) -> Result<PgPool> {
|
||||||
|
let options =
|
||||||
|
PgConnectOptions::from_str(database_url).context("failed to parse SECRETS_DATABASE_URL")?;
|
||||||
|
let pool = PgPoolOptions::new()
|
||||||
|
.max_connections(
|
||||||
|
std::env::var("SECRETS_DATABASE_POOL_SIZE")
|
||||||
|
.ok()
|
||||||
|
.and_then(|v| v.parse::<u32>().ok())
|
||||||
|
.unwrap_or(10),
|
||||||
|
)
|
||||||
|
.connect_with(options)
|
||||||
|
.await
|
||||||
|
.context("failed to connect to PostgreSQL")?;
|
||||||
|
Ok(pool)
|
||||||
|
}
|
||||||
130
crates/infrastructure-db/src/migrate.rs
Normal file
130
crates/infrastructure-db/src/migrate.rs
Normal file
@@ -0,0 +1,130 @@
|
|||||||
|
use anyhow::Result;
|
||||||
|
use sqlx::PgPool;
|
||||||
|
|
||||||
|
pub async fn migrate_current_schema(pool: &PgPool) -> Result<()> {
|
||||||
|
sqlx::raw_sql(
|
||||||
|
r#"
|
||||||
|
CREATE TABLE IF NOT EXISTS users (
|
||||||
|
id UUID PRIMARY KEY DEFAULT uuidv7(),
|
||||||
|
email VARCHAR(256),
|
||||||
|
name VARCHAR(256) NOT NULL DEFAULT '',
|
||||||
|
avatar_url TEXT,
|
||||||
|
key_salt BYTEA,
|
||||||
|
key_check BYTEA,
|
||||||
|
key_params JSONB,
|
||||||
|
key_version BIGINT NOT NULL DEFAULT 0,
|
||||||
|
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
|
||||||
|
updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW()
|
||||||
|
);
|
||||||
|
|
||||||
|
CREATE TABLE IF NOT EXISTS oauth_accounts (
|
||||||
|
id UUID PRIMARY KEY DEFAULT uuidv7(),
|
||||||
|
user_id UUID NOT NULL REFERENCES users(id) ON DELETE CASCADE,
|
||||||
|
provider VARCHAR(32) NOT NULL,
|
||||||
|
provider_id VARCHAR(256) NOT NULL,
|
||||||
|
email VARCHAR(256),
|
||||||
|
name VARCHAR(256),
|
||||||
|
avatar_url TEXT,
|
||||||
|
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
|
||||||
|
UNIQUE(provider, provider_id),
|
||||||
|
UNIQUE(user_id, provider)
|
||||||
|
);
|
||||||
|
|
||||||
|
CREATE TABLE IF NOT EXISTS devices (
|
||||||
|
id UUID PRIMARY KEY DEFAULT uuidv7(),
|
||||||
|
user_id UUID NOT NULL REFERENCES users(id) ON DELETE CASCADE,
|
||||||
|
display_name VARCHAR(256) NOT NULL,
|
||||||
|
platform VARCHAR(64) NOT NULL,
|
||||||
|
client_version VARCHAR(64) NOT NULL,
|
||||||
|
device_fingerprint TEXT NOT NULL,
|
||||||
|
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
|
||||||
|
last_seen_at TIMESTAMPTZ NOT NULL DEFAULT NOW()
|
||||||
|
);
|
||||||
|
CREATE INDEX IF NOT EXISTS idx_devices_user_id ON devices(user_id);
|
||||||
|
|
||||||
|
CREATE TABLE IF NOT EXISTS device_login_tokens (
|
||||||
|
id UUID PRIMARY KEY DEFAULT uuidv7(),
|
||||||
|
device_id UUID NOT NULL REFERENCES devices(id) ON DELETE CASCADE,
|
||||||
|
token_hash TEXT NOT NULL UNIQUE,
|
||||||
|
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
|
||||||
|
last_seen_at TIMESTAMPTZ NOT NULL DEFAULT NOW()
|
||||||
|
);
|
||||||
|
CREATE INDEX IF NOT EXISTS idx_device_login_tokens_device_id ON device_login_tokens(device_id);
|
||||||
|
|
||||||
|
CREATE TABLE IF NOT EXISTS auth_events (
|
||||||
|
id BIGINT GENERATED ALWAYS AS IDENTITY PRIMARY KEY,
|
||||||
|
user_id UUID NOT NULL REFERENCES users(id) ON DELETE CASCADE,
|
||||||
|
device_id UUID NOT NULL REFERENCES devices(id) ON DELETE CASCADE,
|
||||||
|
device_name VARCHAR(256) NOT NULL,
|
||||||
|
platform VARCHAR(64) NOT NULL,
|
||||||
|
client_version VARCHAR(64) NOT NULL,
|
||||||
|
ip_addr TEXT,
|
||||||
|
forwarded_ip TEXT,
|
||||||
|
login_method VARCHAR(32) NOT NULL,
|
||||||
|
login_result VARCHAR(32) NOT NULL,
|
||||||
|
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW()
|
||||||
|
);
|
||||||
|
CREATE INDEX IF NOT EXISTS idx_auth_events_user_id_created_at
|
||||||
|
ON auth_events(user_id, created_at DESC);
|
||||||
|
CREATE INDEX IF NOT EXISTS idx_auth_events_device_id_created_at
|
||||||
|
ON auth_events(device_id, created_at DESC);
|
||||||
|
|
||||||
|
CREATE TABLE IF NOT EXISTS desktop_login_sessions (
|
||||||
|
session_id TEXT PRIMARY KEY,
|
||||||
|
oauth_state TEXT NOT NULL UNIQUE,
|
||||||
|
pkce_verifier TEXT NOT NULL,
|
||||||
|
device_name VARCHAR(256) NOT NULL,
|
||||||
|
platform VARCHAR(64) NOT NULL,
|
||||||
|
client_version VARCHAR(64) NOT NULL,
|
||||||
|
device_fingerprint TEXT NOT NULL,
|
||||||
|
status VARCHAR(32) NOT NULL DEFAULT 'pending',
|
||||||
|
error_message TEXT,
|
||||||
|
user_id UUID REFERENCES users(id) ON DELETE SET NULL,
|
||||||
|
device_id UUID REFERENCES devices(id) ON DELETE SET NULL,
|
||||||
|
device_token TEXT,
|
||||||
|
device_token_hash TEXT,
|
||||||
|
expires_at TIMESTAMPTZ NOT NULL,
|
||||||
|
consumed_at TIMESTAMPTZ,
|
||||||
|
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
|
||||||
|
updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW()
|
||||||
|
);
|
||||||
|
CREATE INDEX IF NOT EXISTS idx_desktop_login_sessions_status_expires
|
||||||
|
ON desktop_login_sessions(status, expires_at);
|
||||||
|
|
||||||
|
CREATE TABLE IF NOT EXISTS vault_objects (
|
||||||
|
object_id UUID PRIMARY KEY,
|
||||||
|
user_id UUID NOT NULL REFERENCES users(id) ON DELETE CASCADE,
|
||||||
|
object_kind VARCHAR(32) NOT NULL,
|
||||||
|
revision BIGINT NOT NULL,
|
||||||
|
cipher_version INTEGER NOT NULL DEFAULT 1,
|
||||||
|
ciphertext BYTEA NOT NULL DEFAULT '\x',
|
||||||
|
content_hash TEXT NOT NULL DEFAULT '',
|
||||||
|
deleted_at TIMESTAMPTZ,
|
||||||
|
updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
|
||||||
|
created_by_device UUID REFERENCES devices(id) ON DELETE SET NULL
|
||||||
|
);
|
||||||
|
CREATE INDEX IF NOT EXISTS idx_vault_objects_user_revision
|
||||||
|
ON vault_objects(user_id, revision ASC);
|
||||||
|
CREATE INDEX IF NOT EXISTS idx_vault_objects_user_deleted
|
||||||
|
ON vault_objects(user_id, deleted_at);
|
||||||
|
|
||||||
|
CREATE TABLE IF NOT EXISTS vault_object_revisions (
|
||||||
|
object_id UUID NOT NULL,
|
||||||
|
user_id UUID NOT NULL REFERENCES users(id) ON DELETE CASCADE,
|
||||||
|
revision BIGINT NOT NULL,
|
||||||
|
cipher_version INTEGER NOT NULL DEFAULT 1,
|
||||||
|
ciphertext BYTEA NOT NULL DEFAULT '\x',
|
||||||
|
content_hash TEXT NOT NULL DEFAULT '',
|
||||||
|
deleted_at TIMESTAMPTZ,
|
||||||
|
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
|
||||||
|
PRIMARY KEY (object_id, revision)
|
||||||
|
);
|
||||||
|
CREATE INDEX IF NOT EXISTS idx_vault_object_revisions_user_revision
|
||||||
|
ON vault_object_revisions(user_id, revision ASC);
|
||||||
|
"#,
|
||||||
|
)
|
||||||
|
.execute(pool)
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
57
deploy/.env.example
Normal file
57
deploy/.env.example
Normal file
@@ -0,0 +1,57 @@
|
|||||||
|
# Secrets v3 环境变量配置
|
||||||
|
# 复制此文件为 .env 并填写真实值
|
||||||
|
|
||||||
|
# ─── 数据库 ───────────────────────────────────────────────────────────
|
||||||
|
# v3 API 与桌面端都复用这套数据库
|
||||||
|
SECRETS_DATABASE_URL=postgres://postgres:PASSWORD@db.refining.ltd:5432/secrets-v3
|
||||||
|
# 强烈建议生产使用 verify-full(至少 verify-ca)
|
||||||
|
SECRETS_DATABASE_SSL_MODE=verify-full
|
||||||
|
# 私有 CA 或自建链路时填写 CA 根证书路径;使用公共受信 CA 可留空
|
||||||
|
# SECRETS_DATABASE_SSL_ROOT_CERT=/etc/secrets/pg-ca.crt
|
||||||
|
# 当设为 prod/production 时,服务会拒绝弱 TLS 模式(prefer/disable/allow/require)
|
||||||
|
SECRETS_ENV=production
|
||||||
|
|
||||||
|
# ─── 服务地址 ─────────────────────────────────────────────────────────
|
||||||
|
SECRETS_API_BIND=127.0.0.1:9415
|
||||||
|
SECRETS_DAEMON_BIND=127.0.0.1:9515
|
||||||
|
SECRETS_API_BASE=http://127.0.0.1:9415
|
||||||
|
SECRETS_DAEMON_URL=http://127.0.0.1:9515/mcp
|
||||||
|
|
||||||
|
# ─── Google OAuth(服务端托管)──────────────────────────────────────────
|
||||||
|
# 官网 DMG 正式分发时,Google OAuth 凭据只配置在 API 服务端
|
||||||
|
SECRETS_PUBLIC_BASE_URL=http://127.0.0.1:9415
|
||||||
|
GOOGLE_OAUTH_CLIENT_ID=your-google-oauth-client-id.apps.googleusercontent.com
|
||||||
|
GOOGLE_OAUTH_CLIENT_SECRET=your-google-oauth-client-secret
|
||||||
|
GOOGLE_OAUTH_REDIRECT_URI=http://127.0.0.1:9415/auth/google/callback
|
||||||
|
# 可选:如不配置则使用 Google 默认公开端点
|
||||||
|
# GOOGLE_OAUTH_AUTH_URI=https://accounts.google.com/o/oauth2/v2/auth
|
||||||
|
# GOOGLE_OAUTH_TOKEN_URI=https://oauth2.googleapis.com/token
|
||||||
|
# 若仍无法换 token(仅提供端口代理、无系统代理):可取消注释并改为本机代理地址
|
||||||
|
# HTTPS_PROXY=http://127.0.0.1:7890
|
||||||
|
# NO_PROXY=localhost,127.0.0.1
|
||||||
|
|
||||||
|
# ─── 日志(可选)──────────────────────────────────────────────────────
|
||||||
|
# RUST_LOG=secrets_api=debug,secrets_desktop_daemon=debug
|
||||||
|
|
||||||
|
# ─── 数据库连接池(可选)──────────────────────────────────────────────
|
||||||
|
# 最大连接数,默认 10
|
||||||
|
# SECRETS_DATABASE_POOL_SIZE=10
|
||||||
|
# 获取连接超时秒数,默认 5
|
||||||
|
# SECRETS_DATABASE_ACQUIRE_TIMEOUT=5
|
||||||
|
|
||||||
|
# ─── 限流(可选)──────────────────────────────────────────────────────
|
||||||
|
# 全局限流速率(req/s),默认 100
|
||||||
|
# RATE_LIMIT_GLOBAL_PER_SECOND=100
|
||||||
|
# 全局限流突发量,默认 200
|
||||||
|
# RATE_LIMIT_GLOBAL_BURST=200
|
||||||
|
# 单 IP 限流速率(req/s),默认 20
|
||||||
|
# RATE_LIMIT_IP_PER_SECOND=20
|
||||||
|
# 单 IP 限流突发量,默认 40
|
||||||
|
# RATE_LIMIT_IP_BURST=40
|
||||||
|
|
||||||
|
# ─── 代理信任(可选)─────────────────────────────────────────────────
|
||||||
|
# 设为 1/true/yes 时从 X-Forwarded-For / X-Real-IP 提取客户端 IP
|
||||||
|
# 仅在反代环境下启用,否则客户端可伪造 IP 绕过限流
|
||||||
|
# TRUST_PROXY=1
|
||||||
|
|
||||||
|
# 桌面端会在 ~/.secrets-v3/desktop 下持久化 device token 与 device fingerprint
|
||||||
92
deploy/postgres-tls-hardening.md
Normal file
92
deploy/postgres-tls-hardening.md
Normal file
@@ -0,0 +1,92 @@
|
|||||||
|
# PostgreSQL TLS Hardening Runbook
|
||||||
|
|
||||||
|
This runbook applies to:
|
||||||
|
|
||||||
|
- PostgreSQL server: `47.117.131.22` (`db.refining.ltd`)
|
||||||
|
- `secrets-mcp` app server: `47.238.146.244` (`secrets.refining.app`)
|
||||||
|
|
||||||
|
## 1) Issue certificate for `db.refining.ltd` (Let's Encrypt + Cloudflare DNS-01)
|
||||||
|
|
||||||
|
Install `acme.sh` on the PostgreSQL server and use a Cloudflare API token with DNS edit permission for the target zone.
|
||||||
|
|
||||||
|
```bash
|
||||||
|
curl https://get.acme.sh | sh -s email=ops@refining.ltd
|
||||||
|
export CF_Token="your_cloudflare_dns_token"
|
||||||
|
export CF_Zone_ID="your_zone_id"
|
||||||
|
~/.acme.sh/acme.sh --issue --dns dns_cf -d db.refining.ltd --keylength ec-256
|
||||||
|
```
|
||||||
|
|
||||||
|
Install cert/key into a PostgreSQL-readable path:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
sudo mkdir -p /etc/postgresql/tls
|
||||||
|
sudo ~/.acme.sh/acme.sh --install-cert -d db.refining.ltd --ecc \
|
||||||
|
--fullchain-file /etc/postgresql/tls/fullchain.pem \
|
||||||
|
--key-file /etc/postgresql/tls/privkey.pem \
|
||||||
|
--reloadcmd "systemctl reload postgresql || systemctl restart postgresql"
|
||||||
|
sudo chown -R postgres:postgres /etc/postgresql/tls
|
||||||
|
sudo chmod 600 /etc/postgresql/tls/privkey.pem
|
||||||
|
sudo chmod 644 /etc/postgresql/tls/fullchain.pem
|
||||||
|
```
|
||||||
|
|
||||||
|
## 2) Configure PostgreSQL TLS and access rules
|
||||||
|
|
||||||
|
In `postgresql.conf`:
|
||||||
|
|
||||||
|
```conf
|
||||||
|
ssl = on
|
||||||
|
ssl_cert_file = '/etc/postgresql/tls/fullchain.pem'
|
||||||
|
ssl_key_file = '/etc/postgresql/tls/privkey.pem'
|
||||||
|
```
|
||||||
|
|
||||||
|
In `pg_hba.conf`, allow app traffic via TLS only (example):
|
||||||
|
|
||||||
|
```conf
|
||||||
|
hostssl secrets-mcp postgres 47.238.146.244/32 scram-sha-256
|
||||||
|
```
|
||||||
|
|
||||||
|
Keep a safe admin path (`local` socket or restricted source CIDR) before removing old plaintext `host` rules.
|
||||||
|
|
||||||
|
Reload PostgreSQL:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
sudo systemctl reload postgresql
|
||||||
|
```
|
||||||
|
|
||||||
|
## 3) Verify server-side TLS
|
||||||
|
|
||||||
|
```bash
|
||||||
|
openssl s_client -starttls postgres -connect db.refining.ltd:5432 -servername db.refining.ltd
|
||||||
|
```
|
||||||
|
|
||||||
|
The handshake should succeed and the certificate should match `db.refining.ltd`.
|
||||||
|
|
||||||
|
## 4) Update `secrets-mcp` app server env
|
||||||
|
|
||||||
|
Use environment values like:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
SECRETS_DATABASE_URL=postgres://postgres:***@db.refining.ltd:5432/secrets-mcp
|
||||||
|
SECRETS_DATABASE_SSL_MODE=verify-full
|
||||||
|
SECRETS_ENV=production
|
||||||
|
```
|
||||||
|
|
||||||
|
If you use private CA instead of public CA, also set:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
SECRETS_DATABASE_SSL_ROOT_CERT=/etc/secrets/pg-ca.crt
|
||||||
|
```
|
||||||
|
|
||||||
|
Restart `secrets-mcp` after updating env.
|
||||||
|
|
||||||
|
## 5) Verify from app server
|
||||||
|
|
||||||
|
Run positive and negative checks:
|
||||||
|
|
||||||
|
- Positive: app starts, migrations pass, dashboard + MCP API work.
|
||||||
|
- Negative:
|
||||||
|
- wrong hostname -> connection fails
|
||||||
|
- wrong CA file -> connection fails
|
||||||
|
- disable TLS on DB -> connection fails
|
||||||
|
|
||||||
|
This ensures no silent downgrade to weak TLS in production.
|
||||||
27
deploy/secrets-mcp.service
Normal file
27
deploy/secrets-mcp.service
Normal file
@@ -0,0 +1,27 @@
|
|||||||
|
[Unit]
|
||||||
|
Description=Secrets API Server
|
||||||
|
After=network.target
|
||||||
|
Wants=network-online.target
|
||||||
|
|
||||||
|
[Service]
|
||||||
|
Type=simple
|
||||||
|
User=secrets
|
||||||
|
Group=secrets
|
||||||
|
WorkingDirectory=/opt/secrets
|
||||||
|
EnvironmentFile=/opt/secrets/.env
|
||||||
|
ExecStart=/opt/secrets/secrets-api
|
||||||
|
Restart=always
|
||||||
|
RestartSec=5
|
||||||
|
StandardOutput=journal
|
||||||
|
StandardError=journal
|
||||||
|
SyslogIdentifier=secrets-api
|
||||||
|
|
||||||
|
# 安全加固
|
||||||
|
NoNewPrivileges=yes
|
||||||
|
ProtectSystem=strict
|
||||||
|
ProtectHome=yes
|
||||||
|
ReadWritePaths=/opt/secrets
|
||||||
|
PrivateTmp=yes
|
||||||
|
|
||||||
|
[Install]
|
||||||
|
WantedBy=multi-user.target
|
||||||
3
rust-toolchain.toml
Normal file
3
rust-toolchain.toml
Normal file
@@ -0,0 +1,3 @@
|
|||||||
|
[toolchain]
|
||||||
|
channel = "1.94.0"
|
||||||
|
components = ["rustfmt", "clippy"]
|
||||||
@@ -5,19 +5,7 @@ set -euo pipefail
|
|||||||
repo_root="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)"
|
repo_root="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)"
|
||||||
cd "$repo_root"
|
cd "$repo_root"
|
||||||
|
|
||||||
version="$(grep -m1 '^version' Cargo.toml | sed 's/.*"\(.*\)".*/\1/')"
|
echo "==> 开始执行检查"
|
||||||
tag="secrets-${version}"
|
|
||||||
|
|
||||||
echo "==> 当前版本: ${version}"
|
|
||||||
echo "==> 检查是否已存在 tag: ${tag}"
|
|
||||||
|
|
||||||
if git rev-parse "refs/tags/${tag}" >/dev/null 2>&1; then
|
|
||||||
echo "错误: 已存在 tag ${tag}"
|
|
||||||
echo "请先 bump Cargo.toml 中的 version,再执行 cargo build 同步 Cargo.lock。"
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
echo "==> 未发现重复 tag,开始执行检查"
|
|
||||||
cargo fmt -- --check
|
cargo fmt -- --check
|
||||||
cargo clippy --locked -- -D warnings
|
cargo clippy --locked -- -D warnings
|
||||||
cargo test --locked
|
cargo test --locked
|
||||||
|
|||||||
1
scripts/repair-secrets.template.csv
Normal file
1
scripts/repair-secrets.template.csv
Normal file
@@ -0,0 +1 @@
|
|||||||
|
entry_id,secret_name,secret_value
|
||||||
|
383
scripts/repair_secrets_from_csv.py
Normal file
383
scripts/repair_secrets_from_csv.py
Normal file
@@ -0,0 +1,383 @@
|
|||||||
|
#!/usr/bin/env python3
|
||||||
|
"""
|
||||||
|
Batch re-encrypt secret fields from a CSV file.
|
||||||
|
|
||||||
|
CSV format:
|
||||||
|
entry_id,secret_name,secret_value
|
||||||
|
019d...,api_key,sk-xxxx
|
||||||
|
019d...,password,hunter2
|
||||||
|
|
||||||
|
The script groups rows by entry_id, then calls `secrets_entry_update` with `secrets_obj`
|
||||||
|
so the server re-encrypts the provided plaintext values with the current key.
|
||||||
|
|
||||||
|
Warnings:
|
||||||
|
- Keep the CSV outside version control whenever possible.
|
||||||
|
- Delete the filled CSV after the repair is complete.
|
||||||
|
"""
|
||||||
|
|
||||||
|
from __future__ import annotations
|
||||||
|
|
||||||
|
import argparse
|
||||||
|
import csv
|
||||||
|
import json
|
||||||
|
import sys
|
||||||
|
import urllib.error
|
||||||
|
import urllib.request
|
||||||
|
from collections import OrderedDict
|
||||||
|
from pathlib import Path
|
||||||
|
from typing import Any
|
||||||
|
|
||||||
|
|
||||||
|
DEFAULT_USER_AGENT = "Cursor/3.0.12 (darwin arm64)"
|
||||||
|
REQUIRED_COLUMNS = {"entry_id", "secret_name", "secret_value"}
|
||||||
|
|
||||||
|
|
||||||
|
def parse_args() -> argparse.Namespace:
|
||||||
|
parser = argparse.ArgumentParser(
|
||||||
|
description="Repair secret ciphertexts by re-submitting plaintext via secrets_entry_update."
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"--csv",
|
||||||
|
required=True,
|
||||||
|
help="Path to CSV file with columns: entry_id,secret_name,secret_value",
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"--mcp-json",
|
||||||
|
default=str(Path.home() / ".cursor" / "mcp.json"),
|
||||||
|
help="Path to mcp.json used to resolve URL and headers",
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"--server",
|
||||||
|
default="secrets",
|
||||||
|
help="MCP server name inside mcp.json (default: secrets)",
|
||||||
|
)
|
||||||
|
parser.add_argument("--url", help="Override MCP URL")
|
||||||
|
parser.add_argument("--auth", help="Override Authorization header value")
|
||||||
|
parser.add_argument("--encryption-key", help="Override X-Encryption-Key header value")
|
||||||
|
parser.add_argument(
|
||||||
|
"--user-agent",
|
||||||
|
default=DEFAULT_USER_AGENT,
|
||||||
|
help=f"User-Agent header (default: {DEFAULT_USER_AGENT})",
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"--dry-run",
|
||||||
|
action="store_true",
|
||||||
|
help="Parse and print grouped updates without sending requests",
|
||||||
|
)
|
||||||
|
return parser.parse_args()
|
||||||
|
|
||||||
|
|
||||||
|
def load_mcp_config(path: str, server_name: str) -> dict[str, Any]:
|
||||||
|
data = json.loads(Path(path).read_text(encoding="utf-8"))
|
||||||
|
servers = data.get("mcpServers", {})
|
||||||
|
if server_name not in servers:
|
||||||
|
raise KeyError(f"Server '{server_name}' not found in {path}")
|
||||||
|
return servers[server_name]
|
||||||
|
|
||||||
|
|
||||||
|
def resolve_connection_settings(args: argparse.Namespace) -> tuple[str, str, str]:
|
||||||
|
server = load_mcp_config(args.mcp_json, args.server)
|
||||||
|
headers = server.get("headers", {})
|
||||||
|
|
||||||
|
url = args.url or server.get("url")
|
||||||
|
auth = args.auth or headers.get("Authorization")
|
||||||
|
encryption_key = args.encryption_key or headers.get("X-Encryption-Key")
|
||||||
|
|
||||||
|
if not url:
|
||||||
|
raise ValueError("Missing MCP URL. Pass --url or configure it in mcp.json.")
|
||||||
|
if not auth:
|
||||||
|
raise ValueError(
|
||||||
|
"Missing Authorization header. Pass --auth or configure it in mcp.json."
|
||||||
|
)
|
||||||
|
if not encryption_key:
|
||||||
|
raise ValueError(
|
||||||
|
"Missing X-Encryption-Key. Pass --encryption-key or configure it in mcp.json."
|
||||||
|
)
|
||||||
|
|
||||||
|
return url, auth, encryption_key
|
||||||
|
|
||||||
|
|
||||||
|
def load_updates(csv_path: str) -> OrderedDict[str, OrderedDict[str, str]]:
|
||||||
|
grouped: OrderedDict[str, OrderedDict[str, str]] = OrderedDict()
|
||||||
|
|
||||||
|
with Path(csv_path).open("r", encoding="utf-8-sig", newline="") as fh:
|
||||||
|
reader = csv.DictReader(fh)
|
||||||
|
fieldnames = set(reader.fieldnames or [])
|
||||||
|
missing = REQUIRED_COLUMNS - fieldnames
|
||||||
|
if missing:
|
||||||
|
raise ValueError(
|
||||||
|
"CSV missing required columns: " + ", ".join(sorted(missing))
|
||||||
|
)
|
||||||
|
|
||||||
|
for line_no, row in enumerate(reader, start=2):
|
||||||
|
entry_id = (row.get("entry_id") or "").strip()
|
||||||
|
secret_name = (row.get("secret_name") or "").strip()
|
||||||
|
secret_value = row.get("secret_value") or ""
|
||||||
|
|
||||||
|
if not entry_id and not secret_name and not secret_value:
|
||||||
|
continue
|
||||||
|
if not entry_id:
|
||||||
|
raise ValueError(f"Line {line_no}: entry_id is required")
|
||||||
|
if not secret_name:
|
||||||
|
raise ValueError(f"Line {line_no}: secret_name is required")
|
||||||
|
|
||||||
|
entry_group = grouped.setdefault(entry_id, OrderedDict())
|
||||||
|
if secret_name in entry_group:
|
||||||
|
raise ValueError(
|
||||||
|
f"Line {line_no}: duplicate secret_name '{secret_name}' for entry_id '{entry_id}'"
|
||||||
|
)
|
||||||
|
entry_group[secret_name] = secret_value
|
||||||
|
|
||||||
|
if not grouped:
|
||||||
|
raise ValueError("CSV contains no updates")
|
||||||
|
|
||||||
|
return grouped
|
||||||
|
|
||||||
|
|
||||||
|
def post_json(
|
||||||
|
url: str,
|
||||||
|
payload: dict[str, Any],
|
||||||
|
auth: str,
|
||||||
|
encryption_key: str,
|
||||||
|
user_agent: str,
|
||||||
|
session_id: str | None = None,
|
||||||
|
) -> tuple[int, str | None, str]:
|
||||||
|
headers = {
|
||||||
|
"Content-Type": "application/json",
|
||||||
|
"Accept": "application/json, text/event-stream",
|
||||||
|
"Authorization": auth,
|
||||||
|
"X-Encryption-Key": encryption_key,
|
||||||
|
"User-Agent": user_agent,
|
||||||
|
}
|
||||||
|
if session_id:
|
||||||
|
headers["mcp-session-id"] = session_id
|
||||||
|
|
||||||
|
req = urllib.request.Request(
|
||||||
|
url,
|
||||||
|
data=json.dumps(payload).encode("utf-8"),
|
||||||
|
headers=headers,
|
||||||
|
method="POST",
|
||||||
|
)
|
||||||
|
try:
|
||||||
|
with urllib.request.urlopen(req, timeout=30) as resp:
|
||||||
|
return (
|
||||||
|
resp.status,
|
||||||
|
resp.headers.get("mcp-session-id") or session_id,
|
||||||
|
resp.read().decode("utf-8"),
|
||||||
|
)
|
||||||
|
except urllib.error.HTTPError as exc:
|
||||||
|
body = exc.read().decode("utf-8", errors="replace")
|
||||||
|
return exc.code, session_id, body
|
||||||
|
|
||||||
|
|
||||||
|
def parse_sse_json(body: str) -> list[dict[str, Any]]:
|
||||||
|
items: list[dict[str, Any]] = []
|
||||||
|
for line in body.splitlines():
|
||||||
|
if line.startswith("data: {"):
|
||||||
|
items.append(json.loads(line[6:]))
|
||||||
|
return items
|
||||||
|
|
||||||
|
|
||||||
|
def initialize_session(
|
||||||
|
url: str, auth: str, encryption_key: str, user_agent: str
|
||||||
|
) -> str:
|
||||||
|
status, session_id, body = post_json(
|
||||||
|
url,
|
||||||
|
{
|
||||||
|
"jsonrpc": "2.0",
|
||||||
|
"id": 1,
|
||||||
|
"method": "initialize",
|
||||||
|
"params": {
|
||||||
|
"protocolVersion": "2025-06-18",
|
||||||
|
"capabilities": {},
|
||||||
|
"clientInfo": {"name": "repair-script", "version": "1.0"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
auth,
|
||||||
|
encryption_key,
|
||||||
|
user_agent,
|
||||||
|
)
|
||||||
|
if status != 200 or not session_id:
|
||||||
|
raise RuntimeError(f"initialize failed: status={status}, body={body[:500]}")
|
||||||
|
|
||||||
|
status, _, body = post_json(
|
||||||
|
url,
|
||||||
|
{"jsonrpc": "2.0", "method": "notifications/initialized", "params": {}},
|
||||||
|
auth,
|
||||||
|
encryption_key,
|
||||||
|
user_agent,
|
||||||
|
session_id,
|
||||||
|
)
|
||||||
|
if status not in (200, 202):
|
||||||
|
raise RuntimeError(
|
||||||
|
f"notifications/initialized failed: status={status}, body={body[:500]}"
|
||||||
|
)
|
||||||
|
return session_id
|
||||||
|
|
||||||
|
|
||||||
|
def load_entry_index(
|
||||||
|
url: str, auth: str, encryption_key: str, user_agent: str, session_id: str
|
||||||
|
) -> dict[str, tuple[str, str]]:
|
||||||
|
status, _, body = post_json(
|
||||||
|
url,
|
||||||
|
{
|
||||||
|
"jsonrpc": "2.0",
|
||||||
|
"id": 999_001,
|
||||||
|
"method": "tools/call",
|
||||||
|
"params": {
|
||||||
|
"name": "secrets_entry_find",
|
||||||
|
"arguments": {
|
||||||
|
"limit": 1000,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
auth,
|
||||||
|
encryption_key,
|
||||||
|
user_agent,
|
||||||
|
session_id,
|
||||||
|
)
|
||||||
|
items = parse_sse_json(body)
|
||||||
|
last = items[-1] if items else {"raw": body[:1000]}
|
||||||
|
if status != 200:
|
||||||
|
raise RuntimeError(
|
||||||
|
f"secrets_entry_find failed: status={status}, body={body[:500]}"
|
||||||
|
)
|
||||||
|
if "error" in last:
|
||||||
|
raise RuntimeError(f"secrets_entry_find returned error: {last}")
|
||||||
|
|
||||||
|
content = last.get("result", {}).get("content", [])
|
||||||
|
if not content:
|
||||||
|
raise RuntimeError("secrets_entry_find returned no content")
|
||||||
|
payload = json.loads(content[0]["text"])
|
||||||
|
|
||||||
|
index: dict[str, tuple[str, str]] = {}
|
||||||
|
for entry in payload.get("entries", []):
|
||||||
|
entry_id = entry.get("id")
|
||||||
|
name = entry.get("name")
|
||||||
|
folder = entry.get("folder", "")
|
||||||
|
if entry_id and name is not None:
|
||||||
|
index[entry_id] = (name, folder)
|
||||||
|
return index
|
||||||
|
|
||||||
|
|
||||||
|
def call_secrets_entry_update(
|
||||||
|
url: str,
|
||||||
|
auth: str,
|
||||||
|
encryption_key: str,
|
||||||
|
user_agent: str,
|
||||||
|
session_id: str,
|
||||||
|
request_id: int,
|
||||||
|
entry_id: str,
|
||||||
|
entry_name: str,
|
||||||
|
entry_folder: str,
|
||||||
|
secrets_obj: dict[str, str],
|
||||||
|
) -> dict[str, Any]:
|
||||||
|
payload = {
|
||||||
|
"jsonrpc": "2.0",
|
||||||
|
"id": request_id,
|
||||||
|
"method": "tools/call",
|
||||||
|
"params": {
|
||||||
|
"name": "secrets_entry_update",
|
||||||
|
"arguments": {
|
||||||
|
"id": entry_id,
|
||||||
|
"name": entry_name,
|
||||||
|
"folder": entry_folder,
|
||||||
|
"secrets_obj": secrets_obj,
|
||||||
|
# Pass the key as an argument too, so repair can still work
|
||||||
|
# even when a client/proxy mishandles custom headers.
|
||||||
|
"encryption_key": encryption_key,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
status, _, body = post_json(
|
||||||
|
url, payload, auth, encryption_key, user_agent, session_id
|
||||||
|
)
|
||||||
|
items = parse_sse_json(body)
|
||||||
|
last = items[-1] if items else {"raw": body[:1000]}
|
||||||
|
if status != 200:
|
||||||
|
raise RuntimeError(
|
||||||
|
f"secrets_entry_update failed for {entry_id}: status={status}, body={body[:500]}"
|
||||||
|
)
|
||||||
|
return last
|
||||||
|
|
||||||
|
|
||||||
|
def main() -> int:
|
||||||
|
args = parse_args()
|
||||||
|
|
||||||
|
try:
|
||||||
|
url, auth, encryption_key = resolve_connection_settings(args)
|
||||||
|
updates = load_updates(args.csv)
|
||||||
|
except Exception as exc:
|
||||||
|
print(f"ERROR: {exc}", file=sys.stderr)
|
||||||
|
return 1
|
||||||
|
|
||||||
|
print(f"Loaded {len(updates)} entries from {args.csv}")
|
||||||
|
|
||||||
|
if args.dry_run:
|
||||||
|
for entry_id, secrets_obj in updates.items():
|
||||||
|
print(
|
||||||
|
json.dumps(
|
||||||
|
{"id": entry_id, "secrets_obj": secrets_obj},
|
||||||
|
ensure_ascii=False,
|
||||||
|
indent=2,
|
||||||
|
)
|
||||||
|
)
|
||||||
|
return 0
|
||||||
|
|
||||||
|
try:
|
||||||
|
session_id = initialize_session(url, auth, encryption_key, args.user_agent)
|
||||||
|
entry_index = load_entry_index(
|
||||||
|
url, auth, encryption_key, args.user_agent, session_id
|
||||||
|
)
|
||||||
|
except Exception as exc:
|
||||||
|
print(f"ERROR: {exc}", file=sys.stderr)
|
||||||
|
return 1
|
||||||
|
|
||||||
|
success = 0
|
||||||
|
failures = 0
|
||||||
|
for request_id, (entry_id, secrets_obj) in enumerate(updates.items(), start=2):
|
||||||
|
try:
|
||||||
|
if entry_id not in entry_index:
|
||||||
|
raise RuntimeError(
|
||||||
|
f"entry id not found in secrets_entry_find results: {entry_id}"
|
||||||
|
)
|
||||||
|
entry_name, entry_folder = entry_index[entry_id]
|
||||||
|
result = call_secrets_entry_update(
|
||||||
|
url,
|
||||||
|
auth,
|
||||||
|
encryption_key,
|
||||||
|
args.user_agent,
|
||||||
|
session_id,
|
||||||
|
request_id,
|
||||||
|
entry_id,
|
||||||
|
entry_name,
|
||||||
|
entry_folder,
|
||||||
|
secrets_obj,
|
||||||
|
)
|
||||||
|
if "error" in result:
|
||||||
|
failures += 1
|
||||||
|
print(
|
||||||
|
json.dumps(
|
||||||
|
{"id": entry_id, "status": "error", "result": result},
|
||||||
|
ensure_ascii=False,
|
||||||
|
),
|
||||||
|
file=sys.stderr,
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
success += 1
|
||||||
|
print(
|
||||||
|
json.dumps(
|
||||||
|
{"id": entry_id, "status": "ok", "result": result},
|
||||||
|
ensure_ascii=False,
|
||||||
|
)
|
||||||
|
)
|
||||||
|
except Exception as exc:
|
||||||
|
failures += 1
|
||||||
|
print(f"{entry_id}: ERROR: {exc}", file=sys.stderr)
|
||||||
|
|
||||||
|
print(f"Done. success={success} failure={failures}")
|
||||||
|
return 0 if failures == 0 else 2
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
raise SystemExit(main())
|
||||||
@@ -1,20 +1,17 @@
|
|||||||
#!/usr/bin/env bash
|
#!/usr/bin/env bash
|
||||||
#
|
#
|
||||||
# 为 refining/secrets 仓库配置 Gitea Actions 所需的 Secrets 和 Variables
|
# 为 refining/secrets 仓库配置 v3 CI 所需的 Variables
|
||||||
# 参考: .gitea/workflows/secrets.yml
|
# 参考: .gitea/workflows/secrets.yml
|
||||||
#
|
#
|
||||||
# 所需配置:
|
# 所需配置:
|
||||||
# - secrets.RELEASE_TOKEN (必选) Release 上传用,值为 Gitea PAT
|
|
||||||
# - vars.WEBHOOK_URL (可选) 飞书通知
|
# - vars.WEBHOOK_URL (可选) 飞书通知
|
||||||
#
|
#
|
||||||
# 注意:
|
# 注意:
|
||||||
# - Gitea 不允许 secret/variable 名以 GITEA_ 或 GITHUB_ 开头,故使用 RELEASE_TOKEN
|
# - Variables 的 value 字段为原始字符串,不要 base64。
|
||||||
# - Secret/Variable 的 data/value 字段需传入原始值,不要使用 base64 编码
|
|
||||||
#
|
#
|
||||||
# 用法:
|
# 用法:
|
||||||
# 1. 从 ~/.config/gitea/config.env 读取 GITEA_URL, GITEA_TOKEN, GITEA_WEBHOOK_URL
|
# 1. 从 ~/.config/gitea/config.env 读取 GITEA_URL, GITEA_TOKEN, GITEA_WEBHOOK_URL
|
||||||
# 2. 或通过环境变量覆盖: GITEA_TOKEN(作为 RELEASE_TOKEN 的值), WEBHOOK_URL
|
# 2. 或通过环境变量覆盖: GITEA_TOKEN, WEBHOOK_URL
|
||||||
# 3. 或使用 secrets CLI 获取: 需 DATABASE_URL,从 refining/service gitea 读取
|
|
||||||
#
|
#
|
||||||
|
|
||||||
set -e
|
set -e
|
||||||
@@ -23,26 +20,38 @@ OWNER="refining"
|
|||||||
REPO="secrets"
|
REPO="secrets"
|
||||||
|
|
||||||
# 解析参数
|
# 解析参数
|
||||||
USE_SECRETS_CLI=false
|
|
||||||
while [[ $# -gt 0 ]]; do
|
while [[ $# -gt 0 ]]; do
|
||||||
case $1 in
|
case $1 in
|
||||||
--from-secrets) USE_SECRETS_CLI=true; shift ;;
|
--from-secrets)
|
||||||
|
echo "❌ --from-secrets 尚未实现,请使用 ~/.config/gitea/config.env 或环境变量" >&2
|
||||||
|
exit 1
|
||||||
|
;;
|
||||||
-h|--help)
|
-h|--help)
|
||||||
echo "用法: $0 [--from-secrets]"
|
echo "用法: $0"
|
||||||
echo ""
|
echo ""
|
||||||
echo " --from-secrets 从 secrets CLI (refining/service gitea) 获取 token 和 webhook_url"
|
echo "从 ~/.config/gitea/config.env 读取,或由环境变量覆盖。"
|
||||||
echo " 否则从 ~/.config/gitea/config.env 读取"
|
|
||||||
echo ""
|
echo ""
|
||||||
echo "环境变量覆盖:"
|
echo "环境变量:"
|
||||||
echo " GITEA_URL Gitea 实例地址"
|
echo " GITEA_URL Gitea 实例根地址(可误带尾部 /api/v1,脚本会规范化后拼接)"
|
||||||
echo " GITEA_TOKEN 用于 Release 上传的 PAT (创建 RELEASE_TOKEN secret)"
|
echo " GITEA_TOKEN Gitea PAT"
|
||||||
echo " WEBHOOK_URL 飞书 Webhook URL (创建 variable,可选)"
|
echo " WEBHOOK_URL 或 GITEA_WEBHOOK_URL → vars.WEBHOOK_URL(可选)"
|
||||||
exit 0
|
exit 0
|
||||||
;;
|
;;
|
||||||
*) shift ;;
|
*)
|
||||||
|
echo "❌ 未知参数: $1" >&2
|
||||||
|
echo " 使用 $0 --help 查看用法" >&2
|
||||||
|
exit 1
|
||||||
|
;;
|
||||||
esac
|
esac
|
||||||
done
|
done
|
||||||
|
|
||||||
|
for cmd in curl jq; do
|
||||||
|
if ! command -v "$cmd" &>/dev/null; then
|
||||||
|
echo "❌ 未找到命令: $cmd(本脚本依赖 curl 与 jq)" >&2
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
|
||||||
# 加载配置
|
# 加载配置
|
||||||
load_config() {
|
load_config() {
|
||||||
local config="$HOME/.config/gitea/config.env"
|
local config="$HOME/.config/gitea/config.env"
|
||||||
@@ -52,26 +61,6 @@ load_config() {
|
|||||||
fi
|
fi
|
||||||
}
|
}
|
||||||
|
|
||||||
# 从 secrets CLI 获取 gitea 凭据
|
|
||||||
fetch_from_secrets() {
|
|
||||||
if ! command -v secrets &>/dev/null; then
|
|
||||||
echo "❌ secrets CLI 未找到,请先构建: cargo build --release" >&2
|
|
||||||
return 1
|
|
||||||
fi
|
|
||||||
# 输出 JSON 格式便于解析;需要 --show-secrets
|
|
||||||
# secrets 当前无 JSON 输出,用简单解析
|
|
||||||
local out
|
|
||||||
out=$(secrets search -n refining --kind service -q gitea --show-secrets 2>/dev/null || true)
|
|
||||||
if [[ -z "$out" ]]; then
|
|
||||||
echo "❌ 未找到 refining/service gitea 记录" >&2
|
|
||||||
return 1
|
|
||||||
fi
|
|
||||||
# 简化:从 metadata 和 secrets 中提取,实际格式需根据 search 输出调整
|
|
||||||
# 此处仅作占位,实际解析较复杂;建议用户优先用 config.env
|
|
||||||
echo "⚠️ --from-secrets 暂不支持自动解析,请使用 config.env 或环境变量" >&2
|
|
||||||
return 1
|
|
||||||
}
|
|
||||||
|
|
||||||
load_config
|
load_config
|
||||||
|
|
||||||
# 优先使用环境变量
|
# 优先使用环境变量
|
||||||
@@ -86,21 +75,19 @@ if [[ -z "$GITEA_URL" ]]; then
|
|||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# 去掉 URL 尾部斜杠
|
# 规范为实例根 URL:去尾部斜杠,并去掉重复的 .../api/v1 后缀(避免拼成 .../api/v1/api/v1)
|
||||||
GITEA_URL="${GITEA_URL%/}"
|
GITEA_URL="${GITEA_URL%/}"
|
||||||
# 确保使用 /api/v1 基础路径(若用户只写了根 URL)
|
while [[ "$GITEA_URL" == */api/v1 ]]; do
|
||||||
[[ "$GITEA_URL" != *"/api/v1"* ]] || true
|
GITEA_URL="${GITEA_URL%/api/v1}"
|
||||||
|
GITEA_URL="${GITEA_URL%/}"
|
||||||
|
done
|
||||||
|
|
||||||
API_BASE="${GITEA_URL}/api/v1"
|
API_BASE="${GITEA_URL}/api/v1"
|
||||||
|
|
||||||
# 获取 GITEA_TOKEN(作为 workflow 中 secrets.RELEASE_TOKEN 的值)
|
|
||||||
if [[ -z "$GITEA_TOKEN" ]]; then
|
if [[ -z "$GITEA_TOKEN" ]]; then
|
||||||
if $USE_SECRETS_CLI; then
|
|
||||||
fetch_from_secrets || exit 1
|
|
||||||
fi
|
|
||||||
echo "❌ GITEA_TOKEN 未配置"
|
echo "❌ GITEA_TOKEN 未配置"
|
||||||
echo " 在 ~/.config/gitea/config.env 中设置,或 export GITEA_TOKEN=xxx" >&2
|
echo " 在 ~/.config/gitea/config.env 中设置,或 export GITEA_TOKEN=xxx" >&2
|
||||||
echo " Token 需具备 repo 写权限(创建 Release、上传附件)" >&2
|
echo " Token 需具备 repo 写权限" >&2
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
@@ -109,32 +96,9 @@ echo "配置 Gitea Actions: $OWNER/$REPO"
|
|||||||
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
|
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
|
||||||
echo ""
|
echo ""
|
||||||
|
|
||||||
# 1. 创建 Secret: RELEASE_TOKEN
|
echo "1. 创建/更新 Variable: WEBHOOK_URL(可选)"
|
||||||
# 注意: Gitea Actions API 的 data 字段需传入原始值,不要使用 base64 编码
|
|
||||||
echo "1. 创建 Secret: RELEASE_TOKEN"
|
|
||||||
secret_payload=$(jq -n --arg t "$GITEA_TOKEN" '{data: $t}')
|
|
||||||
resp=$(curl -s -w "\n%{http_code}" -X PUT \
|
|
||||||
-H "Authorization: token $GITEA_TOKEN" \
|
|
||||||
-H "Content-Type: application/json" \
|
|
||||||
-d "$secret_payload" \
|
|
||||||
"${API_BASE}/repos/${OWNER}/${REPO}/actions/secrets/RELEASE_TOKEN")
|
|
||||||
http_code=$(echo "$resp" | tail -n1)
|
|
||||||
body=$(echo "$resp" | sed '$d')
|
|
||||||
|
|
||||||
if [[ "$http_code" == "200" || "$http_code" == "201" || "$http_code" == "204" ]]; then
|
|
||||||
echo " ✓ RELEASE_TOKEN 已创建/更新"
|
|
||||||
else
|
|
||||||
echo " ❌ 失败 (HTTP $http_code)" >&2
|
|
||||||
echo "$body" | jq -r '.message // .' 2>/dev/null || echo "$body" >&2
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
# 2. 创建/更新 Variable: WEBHOOK_URL(可选)
|
|
||||||
# 注意: Secret 和 Variable 均使用原始值,不要 base64 编码
|
|
||||||
WEBHOOK_VALUE="${WEBHOOK_URL:-$GITEA_WEBHOOK_URL}"
|
WEBHOOK_VALUE="${WEBHOOK_URL:-$GITEA_WEBHOOK_URL}"
|
||||||
if [[ -n "$WEBHOOK_VALUE" ]]; then
|
if [[ -n "$WEBHOOK_VALUE" ]]; then
|
||||||
echo ""
|
|
||||||
echo "2. 创建/更新 Variable: WEBHOOK_URL"
|
|
||||||
var_payload=$(jq -n --arg v "$WEBHOOK_VALUE" '{value: $v}')
|
var_payload=$(jq -n --arg v "$WEBHOOK_VALUE" '{value: $v}')
|
||||||
resp=$(curl -s -w "\n%{http_code}" -X POST \
|
resp=$(curl -s -w "\n%{http_code}" -X POST \
|
||||||
-H "Authorization: token $GITEA_TOKEN" \
|
-H "Authorization: token $GITEA_TOKEN" \
|
||||||
@@ -163,8 +127,7 @@ if [[ -n "$WEBHOOK_VALUE" ]]; then
|
|||||||
echo " ⚠ 失败 (HTTP $http_code),飞书通知将不可用" >&2
|
echo " ⚠ 失败 (HTTP $http_code),飞书通知将不可用" >&2
|
||||||
fi
|
fi
|
||||||
else
|
else
|
||||||
echo ""
|
echo " 跳过 WEBHOOK_URL(未配置 GITEA_WEBHOOK_URL 或 WEBHOOK_URL)"
|
||||||
echo "2. 跳过 WEBHOOK_URL(未配置 GITEA_WEBHOOK_URL 或 WEBHOOK_URL)"
|
|
||||||
echo " 飞书通知将不可用;如需可后续在仓库 Settings → Variables 中添加"
|
echo " 飞书通知将不可用;如需可后续在仓库 Settings → Variables 中添加"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
@@ -174,8 +137,7 @@ echo "✓ 配置完成"
|
|||||||
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
|
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
|
||||||
echo ""
|
echo ""
|
||||||
echo "Workflow 将使用:"
|
echo "Workflow 将使用:"
|
||||||
echo " - secrets.RELEASE_TOKEN 创建 Release 并上传二进制"
|
echo " - vars.WEBHOOK_URL 发送飞书通知(如已配置)"
|
||||||
echo " - vars.WEBHOOK_URL 发送飞书通知(如已配置)"
|
|
||||||
echo ""
|
echo ""
|
||||||
echo "推送代码触发构建:"
|
echo "推送代码触发构建:"
|
||||||
echo " git push origin main"
|
echo " git push origin main"
|
||||||
|
|||||||
32
src/audit.rs
32
src/audit.rs
@@ -1,32 +0,0 @@
|
|||||||
use serde_json::Value;
|
|
||||||
use sqlx::{Postgres, Transaction};
|
|
||||||
|
|
||||||
/// Write an audit entry within an existing transaction.
|
|
||||||
pub async fn log_tx(
|
|
||||||
tx: &mut Transaction<'_, Postgres>,
|
|
||||||
action: &str,
|
|
||||||
namespace: &str,
|
|
||||||
kind: &str,
|
|
||||||
name: &str,
|
|
||||||
detail: Value,
|
|
||||||
) {
|
|
||||||
let actor = std::env::var("USER").unwrap_or_default();
|
|
||||||
let result: Result<_, sqlx::Error> = sqlx::query(
|
|
||||||
"INSERT INTO audit_log (action, namespace, kind, name, detail, actor) \
|
|
||||||
VALUES ($1, $2, $3, $4, $5, $6)",
|
|
||||||
)
|
|
||||||
.bind(action)
|
|
||||||
.bind(namespace)
|
|
||||||
.bind(kind)
|
|
||||||
.bind(name)
|
|
||||||
.bind(&detail)
|
|
||||||
.bind(&actor)
|
|
||||||
.execute(&mut **tx)
|
|
||||||
.await;
|
|
||||||
|
|
||||||
if let Err(e) = result {
|
|
||||||
tracing::warn!(error = %e, "failed to write audit log");
|
|
||||||
} else {
|
|
||||||
tracing::debug!(action, namespace, kind, name, actor, "audit logged");
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,366 +0,0 @@
|
|||||||
use anyhow::Result;
|
|
||||||
use serde_json::{Map, Value, json};
|
|
||||||
use sqlx::PgPool;
|
|
||||||
use std::fs;
|
|
||||||
|
|
||||||
use crate::crypto;
|
|
||||||
use crate::db;
|
|
||||||
use crate::output::OutputMode;
|
|
||||||
|
|
||||||
/// Parse secret / metadata entries into a nested key path and JSON value.
|
|
||||||
/// - `key=value` → stores the literal string `value`
|
|
||||||
/// - `key:=<json>` → parses `<json>` as a typed JSON value
|
|
||||||
/// - `key=@file` → reads the file content as a string
|
|
||||||
/// - `a:b=value` → writes nested fields: `{ "a": { "b": "value" } }`
|
|
||||||
/// - `a:b@./file.txt` → shorthand for nested file reads without manual JSON escaping
|
|
||||||
pub(crate) fn parse_kv(entry: &str) -> Result<(Vec<String>, Value)> {
|
|
||||||
// Typed JSON form: key:=<json>
|
|
||||||
if let Some((key, json_str)) = entry.split_once(":=") {
|
|
||||||
let val: Value = serde_json::from_str(json_str).map_err(|e| {
|
|
||||||
anyhow::anyhow!(
|
|
||||||
"Invalid JSON value for key '{}': {} (use key=value for plain strings)",
|
|
||||||
key,
|
|
||||||
e
|
|
||||||
)
|
|
||||||
})?;
|
|
||||||
return Ok((parse_key_path(key)?, val));
|
|
||||||
}
|
|
||||||
|
|
||||||
// Plain string form: key=value or key=@file
|
|
||||||
if let Some((key, raw_val)) = entry.split_once('=') {
|
|
||||||
let value = if let Some(path) = raw_val.strip_prefix('@') {
|
|
||||||
fs::read_to_string(path)
|
|
||||||
.map_err(|e| anyhow::anyhow!("Failed to read file '{}': {}", path, e))?
|
|
||||||
} else {
|
|
||||||
raw_val.to_string()
|
|
||||||
};
|
|
||||||
|
|
||||||
return Ok((parse_key_path(key)?, Value::String(value)));
|
|
||||||
}
|
|
||||||
|
|
||||||
// Shorthand file form: nested:key@file
|
|
||||||
if let Some((key, path)) = entry.split_once('@') {
|
|
||||||
let value = fs::read_to_string(path)
|
|
||||||
.map_err(|e| anyhow::anyhow!("Failed to read file '{}': {}", path, e))?;
|
|
||||||
return Ok((parse_key_path(key)?, Value::String(value)));
|
|
||||||
}
|
|
||||||
|
|
||||||
anyhow::bail!(
|
|
||||||
"Invalid format '{}'. Expected: key=value, key=@file, nested:key@file, or key:=<json>",
|
|
||||||
entry
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
pub(crate) fn build_json(entries: &[String]) -> Result<Value> {
|
|
||||||
let mut map = Map::new();
|
|
||||||
for entry in entries {
|
|
||||||
let (path, value) = parse_kv(entry)?;
|
|
||||||
insert_path(&mut map, &path, value)?;
|
|
||||||
}
|
|
||||||
Ok(Value::Object(map))
|
|
||||||
}
|
|
||||||
|
|
||||||
pub(crate) fn key_path_to_string(path: &[String]) -> String {
|
|
||||||
path.join(":")
|
|
||||||
}
|
|
||||||
|
|
||||||
pub(crate) fn collect_key_paths(entries: &[String]) -> Result<Vec<String>> {
|
|
||||||
entries
|
|
||||||
.iter()
|
|
||||||
.map(|entry| parse_kv(entry).map(|(path, _)| key_path_to_string(&path)))
|
|
||||||
.collect()
|
|
||||||
}
|
|
||||||
|
|
||||||
pub(crate) fn collect_field_paths(entries: &[String]) -> Result<Vec<String>> {
|
|
||||||
entries
|
|
||||||
.iter()
|
|
||||||
.map(|entry| parse_key_path(entry).map(|path| key_path_to_string(&path)))
|
|
||||||
.collect()
|
|
||||||
}
|
|
||||||
|
|
||||||
pub(crate) fn parse_key_path(key: &str) -> Result<Vec<String>> {
|
|
||||||
let path: Vec<String> = key
|
|
||||||
.split(':')
|
|
||||||
.map(str::trim)
|
|
||||||
.map(ToOwned::to_owned)
|
|
||||||
.collect();
|
|
||||||
|
|
||||||
if path.is_empty() || path.iter().any(|part| part.is_empty()) {
|
|
||||||
anyhow::bail!(
|
|
||||||
"Invalid key path '{}'. Use non-empty segments like 'credentials:content'.",
|
|
||||||
key
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(path)
|
|
||||||
}
|
|
||||||
|
|
||||||
pub(crate) fn insert_path(
|
|
||||||
map: &mut Map<String, Value>,
|
|
||||||
path: &[String],
|
|
||||||
value: Value,
|
|
||||||
) -> Result<()> {
|
|
||||||
if path.is_empty() {
|
|
||||||
anyhow::bail!("Key path cannot be empty");
|
|
||||||
}
|
|
||||||
|
|
||||||
if path.len() == 1 {
|
|
||||||
map.insert(path[0].clone(), value);
|
|
||||||
return Ok(());
|
|
||||||
}
|
|
||||||
|
|
||||||
let head = path[0].clone();
|
|
||||||
let tail = &path[1..];
|
|
||||||
|
|
||||||
match map.entry(head.clone()) {
|
|
||||||
serde_json::map::Entry::Vacant(entry) => {
|
|
||||||
let mut child = Map::new();
|
|
||||||
insert_path(&mut child, tail, value)?;
|
|
||||||
entry.insert(Value::Object(child));
|
|
||||||
}
|
|
||||||
serde_json::map::Entry::Occupied(mut entry) => match entry.get_mut() {
|
|
||||||
Value::Object(child) => insert_path(child, tail, value)?,
|
|
||||||
_ => {
|
|
||||||
anyhow::bail!(
|
|
||||||
"Cannot set nested key '{}' because '{}' is already a non-object value",
|
|
||||||
key_path_to_string(path),
|
|
||||||
head
|
|
||||||
);
|
|
||||||
}
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
pub(crate) fn remove_path(map: &mut Map<String, Value>, path: &[String]) -> Result<bool> {
|
|
||||||
if path.is_empty() {
|
|
||||||
anyhow::bail!("Key path cannot be empty");
|
|
||||||
}
|
|
||||||
|
|
||||||
if path.len() == 1 {
|
|
||||||
return Ok(map.remove(&path[0]).is_some());
|
|
||||||
}
|
|
||||||
|
|
||||||
let Some(value) = map.get_mut(&path[0]) else {
|
|
||||||
return Ok(false);
|
|
||||||
};
|
|
||||||
|
|
||||||
let Value::Object(child) = value else {
|
|
||||||
return Ok(false);
|
|
||||||
};
|
|
||||||
|
|
||||||
let removed = remove_path(child, &path[1..])?;
|
|
||||||
if child.is_empty() {
|
|
||||||
map.remove(&path[0]);
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(removed)
|
|
||||||
}
|
|
||||||
|
|
||||||
pub struct AddArgs<'a> {
|
|
||||||
pub namespace: &'a str,
|
|
||||||
pub kind: &'a str,
|
|
||||||
pub name: &'a str,
|
|
||||||
pub tags: &'a [String],
|
|
||||||
pub meta_entries: &'a [String],
|
|
||||||
pub secret_entries: &'a [String],
|
|
||||||
pub output: OutputMode,
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn run(pool: &PgPool, args: AddArgs<'_>, master_key: &[u8; 32]) -> Result<()> {
|
|
||||||
let metadata = build_json(args.meta_entries)?;
|
|
||||||
let secret_json = build_json(args.secret_entries)?;
|
|
||||||
let encrypted_bytes = crypto::encrypt_json(master_key, &secret_json)?;
|
|
||||||
|
|
||||||
tracing::debug!(args.namespace, args.kind, args.name, "upserting record");
|
|
||||||
|
|
||||||
let meta_keys = collect_key_paths(args.meta_entries)?;
|
|
||||||
let secret_keys = collect_key_paths(args.secret_entries)?;
|
|
||||||
|
|
||||||
let mut tx = pool.begin().await?;
|
|
||||||
|
|
||||||
// Snapshot existing row into history before overwriting (if it exists).
|
|
||||||
#[derive(sqlx::FromRow)]
|
|
||||||
struct ExistingRow {
|
|
||||||
id: uuid::Uuid,
|
|
||||||
version: i64,
|
|
||||||
tags: Vec<String>,
|
|
||||||
metadata: serde_json::Value,
|
|
||||||
encrypted: Vec<u8>,
|
|
||||||
}
|
|
||||||
let existing: Option<ExistingRow> = sqlx::query_as(
|
|
||||||
"SELECT id, version, tags, metadata, encrypted FROM secrets \
|
|
||||||
WHERE namespace = $1 AND kind = $2 AND name = $3",
|
|
||||||
)
|
|
||||||
.bind(args.namespace)
|
|
||||||
.bind(args.kind)
|
|
||||||
.bind(args.name)
|
|
||||||
.fetch_optional(&mut *tx)
|
|
||||||
.await?;
|
|
||||||
|
|
||||||
if let Some(ex) = existing
|
|
||||||
&& let Err(e) = db::snapshot_history(
|
|
||||||
&mut tx,
|
|
||||||
db::SnapshotParams {
|
|
||||||
secret_id: ex.id,
|
|
||||||
namespace: args.namespace,
|
|
||||||
kind: args.kind,
|
|
||||||
name: args.name,
|
|
||||||
version: ex.version,
|
|
||||||
action: "add",
|
|
||||||
tags: &ex.tags,
|
|
||||||
metadata: &ex.metadata,
|
|
||||||
encrypted: &ex.encrypted,
|
|
||||||
},
|
|
||||||
)
|
|
||||||
.await
|
|
||||||
{
|
|
||||||
tracing::warn!(error = %e, "failed to snapshot history before upsert");
|
|
||||||
}
|
|
||||||
|
|
||||||
sqlx::query(
|
|
||||||
r#"
|
|
||||||
INSERT INTO secrets (namespace, kind, name, tags, metadata, encrypted, version, updated_at)
|
|
||||||
VALUES ($1, $2, $3, $4, $5, $6, 1, NOW())
|
|
||||||
ON CONFLICT (namespace, kind, name)
|
|
||||||
DO UPDATE SET
|
|
||||||
tags = EXCLUDED.tags,
|
|
||||||
metadata = EXCLUDED.metadata,
|
|
||||||
encrypted = EXCLUDED.encrypted,
|
|
||||||
version = secrets.version + 1,
|
|
||||||
updated_at = NOW()
|
|
||||||
"#,
|
|
||||||
)
|
|
||||||
.bind(args.namespace)
|
|
||||||
.bind(args.kind)
|
|
||||||
.bind(args.name)
|
|
||||||
.bind(args.tags)
|
|
||||||
.bind(&metadata)
|
|
||||||
.bind(&encrypted_bytes)
|
|
||||||
.execute(&mut *tx)
|
|
||||||
.await?;
|
|
||||||
|
|
||||||
crate::audit::log_tx(
|
|
||||||
&mut tx,
|
|
||||||
"add",
|
|
||||||
args.namespace,
|
|
||||||
args.kind,
|
|
||||||
args.name,
|
|
||||||
json!({
|
|
||||||
"tags": args.tags,
|
|
||||||
"meta_keys": meta_keys,
|
|
||||||
"secret_keys": secret_keys,
|
|
||||||
}),
|
|
||||||
)
|
|
||||||
.await;
|
|
||||||
|
|
||||||
tx.commit().await?;
|
|
||||||
|
|
||||||
let result_json = json!({
|
|
||||||
"action": "added",
|
|
||||||
"namespace": args.namespace,
|
|
||||||
"kind": args.kind,
|
|
||||||
"name": args.name,
|
|
||||||
"tags": args.tags,
|
|
||||||
"meta_keys": meta_keys,
|
|
||||||
"secret_keys": secret_keys,
|
|
||||||
});
|
|
||||||
|
|
||||||
match args.output {
|
|
||||||
OutputMode::Json => {
|
|
||||||
println!("{}", serde_json::to_string_pretty(&result_json)?);
|
|
||||||
}
|
|
||||||
OutputMode::JsonCompact => {
|
|
||||||
println!("{}", serde_json::to_string(&result_json)?);
|
|
||||||
}
|
|
||||||
_ => {
|
|
||||||
println!("Added: [{}/{}] {}", args.namespace, args.kind, args.name);
|
|
||||||
if !args.tags.is_empty() {
|
|
||||||
println!(" tags: {}", args.tags.join(", "));
|
|
||||||
}
|
|
||||||
if !args.meta_entries.is_empty() {
|
|
||||||
println!(" metadata: {}", meta_keys.join(", "));
|
|
||||||
}
|
|
||||||
if !args.secret_entries.is_empty() {
|
|
||||||
println!(" secrets: {}", secret_keys.join(", "));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
#[cfg(test)]
|
|
||||||
mod tests {
|
|
||||||
use super::{build_json, key_path_to_string, parse_kv, remove_path};
|
|
||||||
use serde_json::Value;
|
|
||||||
use std::fs;
|
|
||||||
use std::path::PathBuf;
|
|
||||||
use std::time::{SystemTime, UNIX_EPOCH};
|
|
||||||
|
|
||||||
fn temp_file_path(name: &str) -> PathBuf {
|
|
||||||
let nanos = SystemTime::now()
|
|
||||||
.duration_since(UNIX_EPOCH)
|
|
||||||
.expect("clock should be after unix epoch")
|
|
||||||
.as_nanos();
|
|
||||||
std::env::temp_dir().join(format!("secrets-{name}-{nanos}.txt"))
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn parse_nested_file_shorthand() {
|
|
||||||
let path = temp_file_path("ssh-key");
|
|
||||||
fs::write(&path, "line1\nline2\n").expect("should write temp file");
|
|
||||||
|
|
||||||
let entry = format!("credentials:content@{}", path.display());
|
|
||||||
let (path_parts, value) = parse_kv(&entry).expect("should parse nested file shorthand");
|
|
||||||
|
|
||||||
assert_eq!(key_path_to_string(&path_parts), "credentials:content");
|
|
||||||
assert_eq!(value, serde_json::Value::String("line1\nline2\n".into()));
|
|
||||||
|
|
||||||
fs::remove_file(path).expect("should remove temp file");
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn build_nested_json_from_mixed_entries() {
|
|
||||||
let payload = vec![
|
|
||||||
"credentials:type=ssh".to_string(),
|
|
||||||
"credentials:enabled:=true".to_string(),
|
|
||||||
"username=root".to_string(),
|
|
||||||
];
|
|
||||||
|
|
||||||
let value = build_json(&payload).expect("should build nested json");
|
|
||||||
|
|
||||||
assert_eq!(
|
|
||||||
value,
|
|
||||||
serde_json::json!({
|
|
||||||
"credentials": {
|
|
||||||
"type": "ssh",
|
|
||||||
"enabled": true
|
|
||||||
},
|
|
||||||
"username": "root"
|
|
||||||
})
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn remove_nested_path_prunes_empty_parents() {
|
|
||||||
let mut value = serde_json::json!({
|
|
||||||
"credentials": {
|
|
||||||
"content": "pem-data"
|
|
||||||
},
|
|
||||||
"username": "root"
|
|
||||||
});
|
|
||||||
|
|
||||||
let map = match &mut value {
|
|
||||||
Value::Object(map) => map,
|
|
||||||
_ => panic!("expected object"),
|
|
||||||
};
|
|
||||||
|
|
||||||
let removed = remove_path(map, &["credentials".to_string(), "content".to_string()])
|
|
||||||
.expect("should remove nested field");
|
|
||||||
|
|
||||||
assert!(removed);
|
|
||||||
assert_eq!(value, serde_json::json!({ "username": "root" }));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,55 +0,0 @@
|
|||||||
use crate::config::{self, Config, config_path};
|
|
||||||
use anyhow::Result;
|
|
||||||
|
|
||||||
pub async fn run(action: crate::ConfigAction) -> Result<()> {
|
|
||||||
match action {
|
|
||||||
crate::ConfigAction::SetDb { url } => {
|
|
||||||
// Verify connection before writing config
|
|
||||||
let pool = crate::db::create_pool(&url)
|
|
||||||
.await
|
|
||||||
.map_err(|e| anyhow::anyhow!("Database connection failed: {}", e))?;
|
|
||||||
drop(pool);
|
|
||||||
println!("Database connection successful.");
|
|
||||||
|
|
||||||
let cfg = Config {
|
|
||||||
database_url: Some(url.clone()),
|
|
||||||
};
|
|
||||||
config::save_config(&cfg)?;
|
|
||||||
println!("Database URL saved to: {}", config_path().display());
|
|
||||||
println!(" {}", mask_password(&url));
|
|
||||||
}
|
|
||||||
crate::ConfigAction::Show => {
|
|
||||||
let cfg = config::load_config()?;
|
|
||||||
match cfg.database_url {
|
|
||||||
Some(url) => {
|
|
||||||
println!("database_url = {}", mask_password(&url));
|
|
||||||
println!("config file: {}", config_path().display());
|
|
||||||
}
|
|
||||||
None => {
|
|
||||||
println!("Database URL not configured.");
|
|
||||||
println!("Run: secrets config set-db <DATABASE_URL>");
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
crate::ConfigAction::Path => {
|
|
||||||
println!("{}", config_path().display());
|
|
||||||
}
|
|
||||||
}
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Mask the password in a postgres://user:password@host/db URL.
|
|
||||||
fn mask_password(url: &str) -> String {
|
|
||||||
if let Some(at_pos) = url.rfind('@')
|
|
||||||
&& let Some(scheme_end) = url.find("://")
|
|
||||||
{
|
|
||||||
let prefix = &url[..scheme_end + 3];
|
|
||||||
let credentials = &url[scheme_end + 3..at_pos];
|
|
||||||
let rest = &url[at_pos..];
|
|
||||||
if let Some(colon_pos) = credentials.find(':') {
|
|
||||||
let user = &credentials[..colon_pos];
|
|
||||||
return format!("{}{}:***{}", prefix, user, rest);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
url.to_string()
|
|
||||||
}
|
|
||||||
@@ -1,107 +0,0 @@
|
|||||||
use anyhow::Result;
|
|
||||||
use serde_json::{Value, json};
|
|
||||||
use sqlx::{FromRow, PgPool};
|
|
||||||
use uuid::Uuid;
|
|
||||||
|
|
||||||
use crate::db;
|
|
||||||
use crate::output::OutputMode;
|
|
||||||
|
|
||||||
#[derive(FromRow)]
|
|
||||||
struct DeleteRow {
|
|
||||||
id: Uuid,
|
|
||||||
version: i64,
|
|
||||||
tags: Vec<String>,
|
|
||||||
metadata: Value,
|
|
||||||
encrypted: Vec<u8>,
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn run(
|
|
||||||
pool: &PgPool,
|
|
||||||
namespace: &str,
|
|
||||||
kind: &str,
|
|
||||||
name: &str,
|
|
||||||
output: OutputMode,
|
|
||||||
) -> Result<()> {
|
|
||||||
tracing::debug!(namespace, kind, name, "deleting record");
|
|
||||||
|
|
||||||
let mut tx = pool.begin().await?;
|
|
||||||
|
|
||||||
let row: Option<DeleteRow> = sqlx::query_as(
|
|
||||||
"SELECT id, version, tags, metadata, encrypted FROM secrets \
|
|
||||||
WHERE namespace = $1 AND kind = $2 AND name = $3 \
|
|
||||||
FOR UPDATE",
|
|
||||||
)
|
|
||||||
.bind(namespace)
|
|
||||||
.bind(kind)
|
|
||||||
.bind(name)
|
|
||||||
.fetch_optional(&mut *tx)
|
|
||||||
.await?;
|
|
||||||
|
|
||||||
let Some(row) = row else {
|
|
||||||
tx.rollback().await?;
|
|
||||||
tracing::warn!(namespace, kind, name, "record not found for deletion");
|
|
||||||
match output {
|
|
||||||
OutputMode::Json => println!(
|
|
||||||
"{}",
|
|
||||||
serde_json::to_string_pretty(
|
|
||||||
&json!({"action":"not_found","namespace":namespace,"kind":kind,"name":name})
|
|
||||||
)?
|
|
||||||
),
|
|
||||||
OutputMode::JsonCompact => println!(
|
|
||||||
"{}",
|
|
||||||
serde_json::to_string(
|
|
||||||
&json!({"action":"not_found","namespace":namespace,"kind":kind,"name":name})
|
|
||||||
)?
|
|
||||||
),
|
|
||||||
_ => println!("Not found: [{}/{}] {}", namespace, kind, name),
|
|
||||||
}
|
|
||||||
return Ok(());
|
|
||||||
};
|
|
||||||
|
|
||||||
// Snapshot before physical delete so the row can be restored via rollback.
|
|
||||||
if let Err(e) = db::snapshot_history(
|
|
||||||
&mut tx,
|
|
||||||
db::SnapshotParams {
|
|
||||||
secret_id: row.id,
|
|
||||||
namespace,
|
|
||||||
kind,
|
|
||||||
name,
|
|
||||||
version: row.version,
|
|
||||||
action: "delete",
|
|
||||||
tags: &row.tags,
|
|
||||||
metadata: &row.metadata,
|
|
||||||
encrypted: &row.encrypted,
|
|
||||||
},
|
|
||||||
)
|
|
||||||
.await
|
|
||||||
{
|
|
||||||
tracing::warn!(error = %e, "failed to snapshot history before delete");
|
|
||||||
}
|
|
||||||
|
|
||||||
sqlx::query("DELETE FROM secrets WHERE id = $1")
|
|
||||||
.bind(row.id)
|
|
||||||
.execute(&mut *tx)
|
|
||||||
.await?;
|
|
||||||
|
|
||||||
crate::audit::log_tx(&mut tx, "delete", namespace, kind, name, json!({})).await;
|
|
||||||
|
|
||||||
tx.commit().await?;
|
|
||||||
|
|
||||||
match output {
|
|
||||||
OutputMode::Json => println!(
|
|
||||||
"{}",
|
|
||||||
serde_json::to_string_pretty(
|
|
||||||
&json!({"action":"deleted","namespace":namespace,"kind":kind,"name":name})
|
|
||||||
)?
|
|
||||||
),
|
|
||||||
OutputMode::JsonCompact => println!(
|
|
||||||
"{}",
|
|
||||||
serde_json::to_string(
|
|
||||||
&json!({"action":"deleted","namespace":namespace,"kind":kind,"name":name})
|
|
||||||
)?
|
|
||||||
),
|
|
||||||
_ => println!("Deleted: [{}/{}] {}", namespace, kind, name),
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
@@ -1,70 +0,0 @@
|
|||||||
use anyhow::{Context, Result};
|
|
||||||
use rand::RngExt;
|
|
||||||
use sqlx::PgPool;
|
|
||||||
|
|
||||||
use crate::{crypto, db};
|
|
||||||
|
|
||||||
const MIN_MASTER_PASSWORD_LEN: usize = 8;
|
|
||||||
|
|
||||||
pub async fn run(pool: &PgPool) -> Result<()> {
|
|
||||||
println!("Initializing secrets master key...");
|
|
||||||
println!();
|
|
||||||
|
|
||||||
// Read password (no echo)
|
|
||||||
let password = rpassword::prompt_password(format!(
|
|
||||||
"Enter master password (at least {} characters): ",
|
|
||||||
MIN_MASTER_PASSWORD_LEN
|
|
||||||
))
|
|
||||||
.context("failed to read password")?;
|
|
||||||
if password.chars().count() < MIN_MASTER_PASSWORD_LEN {
|
|
||||||
anyhow::bail!(
|
|
||||||
"Master password must be at least {} characters.",
|
|
||||||
MIN_MASTER_PASSWORD_LEN
|
|
||||||
);
|
|
||||||
}
|
|
||||||
let confirm = rpassword::prompt_password("Confirm master password: ")
|
|
||||||
.context("failed to read password confirmation")?;
|
|
||||||
if password != confirm {
|
|
||||||
anyhow::bail!("Passwords do not match.");
|
|
||||||
}
|
|
||||||
|
|
||||||
// Get or create Argon2id salt
|
|
||||||
let salt = match db::load_argon2_salt(pool).await? {
|
|
||||||
Some(existing) => {
|
|
||||||
println!("Found existing salt in database (not the first device).");
|
|
||||||
existing
|
|
||||||
}
|
|
||||||
None => {
|
|
||||||
println!("Generating new Argon2id salt and storing in database...");
|
|
||||||
let mut salt = vec![0u8; 16];
|
|
||||||
rand::rng().fill(&mut salt[..]);
|
|
||||||
db::store_argon2_salt(pool, &salt).await?;
|
|
||||||
salt
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
// Derive master key
|
|
||||||
print!("Deriving master key (Argon2id, this takes a moment)... ");
|
|
||||||
let master_key = crypto::derive_master_key(&password, &salt)?;
|
|
||||||
println!("done.");
|
|
||||||
|
|
||||||
// Store in OS Keychain
|
|
||||||
crypto::store_master_key(&master_key)?;
|
|
||||||
|
|
||||||
// Self-test: encrypt and decrypt a canary value
|
|
||||||
let canary = b"secrets-cli-canary";
|
|
||||||
let enc = crypto::encrypt(&master_key, canary)?;
|
|
||||||
let dec = crypto::decrypt(&master_key, &enc)?;
|
|
||||||
if dec != canary {
|
|
||||||
anyhow::bail!("Self-test failed: encryption roundtrip mismatch");
|
|
||||||
}
|
|
||||||
|
|
||||||
println!();
|
|
||||||
println!("Master key stored in OS Keychain.");
|
|
||||||
println!("You can now use `secrets add` / `secrets search` commands.");
|
|
||||||
println!();
|
|
||||||
println!("IMPORTANT: Remember your master password — it is not stored anywhere.");
|
|
||||||
println!(" On a new device, run `secrets init` with the same password.");
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
@@ -1,9 +0,0 @@
|
|||||||
pub mod add;
|
|
||||||
pub mod config;
|
|
||||||
pub mod delete;
|
|
||||||
pub mod init;
|
|
||||||
pub mod rollback;
|
|
||||||
pub mod run;
|
|
||||||
pub mod search;
|
|
||||||
pub mod update;
|
|
||||||
pub mod upgrade;
|
|
||||||
@@ -1,239 +0,0 @@
|
|||||||
use anyhow::Result;
|
|
||||||
use serde_json::{Value, json};
|
|
||||||
use sqlx::{FromRow, PgPool};
|
|
||||||
use uuid::Uuid;
|
|
||||||
|
|
||||||
use crate::output::{OutputMode, format_local_time};
|
|
||||||
|
|
||||||
#[derive(FromRow)]
|
|
||||||
struct HistoryRow {
|
|
||||||
secret_id: Uuid,
|
|
||||||
version: i64,
|
|
||||||
action: String,
|
|
||||||
tags: Vec<String>,
|
|
||||||
metadata: Value,
|
|
||||||
encrypted: Vec<u8>,
|
|
||||||
}
|
|
||||||
|
|
||||||
pub struct RollbackArgs<'a> {
|
|
||||||
pub namespace: &'a str,
|
|
||||||
pub kind: &'a str,
|
|
||||||
pub name: &'a str,
|
|
||||||
/// Target version to restore. None → restore the most recent history entry.
|
|
||||||
pub to_version: Option<i64>,
|
|
||||||
pub output: OutputMode,
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn run(pool: &PgPool, args: RollbackArgs<'_>, master_key: &[u8; 32]) -> Result<()> {
|
|
||||||
let snap: Option<HistoryRow> = if let Some(ver) = args.to_version {
|
|
||||||
sqlx::query_as(
|
|
||||||
"SELECT secret_id, version, action, tags, metadata, encrypted \
|
|
||||||
FROM secrets_history \
|
|
||||||
WHERE namespace = $1 AND kind = $2 AND name = $3 AND version = $4 \
|
|
||||||
ORDER BY id DESC LIMIT 1",
|
|
||||||
)
|
|
||||||
.bind(args.namespace)
|
|
||||||
.bind(args.kind)
|
|
||||||
.bind(args.name)
|
|
||||||
.bind(ver)
|
|
||||||
.fetch_optional(pool)
|
|
||||||
.await?
|
|
||||||
} else {
|
|
||||||
sqlx::query_as(
|
|
||||||
"SELECT secret_id, version, action, tags, metadata, encrypted \
|
|
||||||
FROM secrets_history \
|
|
||||||
WHERE namespace = $1 AND kind = $2 AND name = $3 \
|
|
||||||
ORDER BY id DESC LIMIT 1",
|
|
||||||
)
|
|
||||||
.bind(args.namespace)
|
|
||||||
.bind(args.kind)
|
|
||||||
.bind(args.name)
|
|
||||||
.fetch_optional(pool)
|
|
||||||
.await?
|
|
||||||
};
|
|
||||||
|
|
||||||
let snap = snap.ok_or_else(|| {
|
|
||||||
anyhow::anyhow!(
|
|
||||||
"No history found for [{}/{}] {}{}.",
|
|
||||||
args.namespace,
|
|
||||||
args.kind,
|
|
||||||
args.name,
|
|
||||||
args.to_version
|
|
||||||
.map(|v| format!(" at version {}", v))
|
|
||||||
.unwrap_or_default()
|
|
||||||
)
|
|
||||||
})?;
|
|
||||||
|
|
||||||
// Validate encrypted blob is non-trivial (re-encrypt guard).
|
|
||||||
if !snap.encrypted.is_empty() {
|
|
||||||
// Probe decrypt to ensure the blob is valid before restoring.
|
|
||||||
crate::crypto::decrypt_json(master_key, &snap.encrypted)?;
|
|
||||||
}
|
|
||||||
|
|
||||||
let mut tx = pool.begin().await?;
|
|
||||||
|
|
||||||
// Snapshot current live row (if it exists) before overwriting.
|
|
||||||
#[derive(sqlx::FromRow)]
|
|
||||||
struct LiveRow {
|
|
||||||
id: Uuid,
|
|
||||||
version: i64,
|
|
||||||
tags: Vec<String>,
|
|
||||||
metadata: Value,
|
|
||||||
encrypted: Vec<u8>,
|
|
||||||
}
|
|
||||||
let live: Option<LiveRow> = sqlx::query_as(
|
|
||||||
"SELECT id, version, tags, metadata, encrypted FROM secrets \
|
|
||||||
WHERE namespace = $1 AND kind = $2 AND name = $3 FOR UPDATE",
|
|
||||||
)
|
|
||||||
.bind(args.namespace)
|
|
||||||
.bind(args.kind)
|
|
||||||
.bind(args.name)
|
|
||||||
.fetch_optional(&mut *tx)
|
|
||||||
.await?;
|
|
||||||
|
|
||||||
if let Some(lr) = live
|
|
||||||
&& let Err(e) = crate::db::snapshot_history(
|
|
||||||
&mut tx,
|
|
||||||
crate::db::SnapshotParams {
|
|
||||||
secret_id: lr.id,
|
|
||||||
namespace: args.namespace,
|
|
||||||
kind: args.kind,
|
|
||||||
name: args.name,
|
|
||||||
version: lr.version,
|
|
||||||
action: "rollback",
|
|
||||||
tags: &lr.tags,
|
|
||||||
metadata: &lr.metadata,
|
|
||||||
encrypted: &lr.encrypted,
|
|
||||||
},
|
|
||||||
)
|
|
||||||
.await
|
|
||||||
{
|
|
||||||
tracing::warn!(error = %e, "failed to snapshot current row before rollback");
|
|
||||||
}
|
|
||||||
|
|
||||||
sqlx::query(
|
|
||||||
"INSERT INTO secrets (id, namespace, kind, name, tags, metadata, encrypted, version, updated_at) \
|
|
||||||
VALUES ($1, $2, $3, $4, $5, $6, $7, $8, NOW()) \
|
|
||||||
ON CONFLICT (namespace, kind, name) DO UPDATE SET \
|
|
||||||
tags = EXCLUDED.tags, \
|
|
||||||
metadata = EXCLUDED.metadata, \
|
|
||||||
encrypted = EXCLUDED.encrypted, \
|
|
||||||
version = secrets.version + 1, \
|
|
||||||
updated_at = NOW()",
|
|
||||||
)
|
|
||||||
.bind(snap.secret_id)
|
|
||||||
.bind(args.namespace)
|
|
||||||
.bind(args.kind)
|
|
||||||
.bind(args.name)
|
|
||||||
.bind(&snap.tags)
|
|
||||||
.bind(&snap.metadata)
|
|
||||||
.bind(&snap.encrypted)
|
|
||||||
.bind(snap.version)
|
|
||||||
.execute(&mut *tx)
|
|
||||||
.await?;
|
|
||||||
|
|
||||||
crate::audit::log_tx(
|
|
||||||
&mut tx,
|
|
||||||
"rollback",
|
|
||||||
args.namespace,
|
|
||||||
args.kind,
|
|
||||||
args.name,
|
|
||||||
json!({
|
|
||||||
"restored_version": snap.version,
|
|
||||||
"original_action": snap.action,
|
|
||||||
}),
|
|
||||||
)
|
|
||||||
.await;
|
|
||||||
|
|
||||||
tx.commit().await?;
|
|
||||||
|
|
||||||
let result_json = json!({
|
|
||||||
"action": "rolled_back",
|
|
||||||
"namespace": args.namespace,
|
|
||||||
"kind": args.kind,
|
|
||||||
"name": args.name,
|
|
||||||
"restored_version": snap.version,
|
|
||||||
});
|
|
||||||
|
|
||||||
match args.output {
|
|
||||||
OutputMode::Json => println!("{}", serde_json::to_string_pretty(&result_json)?),
|
|
||||||
OutputMode::JsonCompact => println!("{}", serde_json::to_string(&result_json)?),
|
|
||||||
_ => println!(
|
|
||||||
"Rolled back: [{}/{}] {} → version {}",
|
|
||||||
args.namespace, args.kind, args.name, snap.version
|
|
||||||
),
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
/// List history entries for a record.
|
|
||||||
pub async fn list_history(
|
|
||||||
pool: &PgPool,
|
|
||||||
namespace: &str,
|
|
||||||
kind: &str,
|
|
||||||
name: &str,
|
|
||||||
limit: u32,
|
|
||||||
output: OutputMode,
|
|
||||||
) -> Result<()> {
|
|
||||||
#[derive(FromRow)]
|
|
||||||
struct HistorySummary {
|
|
||||||
version: i64,
|
|
||||||
action: String,
|
|
||||||
actor: String,
|
|
||||||
created_at: chrono::DateTime<chrono::Utc>,
|
|
||||||
}
|
|
||||||
|
|
||||||
let rows: Vec<HistorySummary> = sqlx::query_as(
|
|
||||||
"SELECT version, action, actor, created_at FROM secrets_history \
|
|
||||||
WHERE namespace = $1 AND kind = $2 AND name = $3 \
|
|
||||||
ORDER BY id DESC LIMIT $4",
|
|
||||||
)
|
|
||||||
.bind(namespace)
|
|
||||||
.bind(kind)
|
|
||||||
.bind(name)
|
|
||||||
.bind(limit as i64)
|
|
||||||
.fetch_all(pool)
|
|
||||||
.await?;
|
|
||||||
|
|
||||||
match output {
|
|
||||||
OutputMode::Json | OutputMode::JsonCompact => {
|
|
||||||
let arr: Vec<Value> = rows
|
|
||||||
.iter()
|
|
||||||
.map(|r| {
|
|
||||||
json!({
|
|
||||||
"version": r.version,
|
|
||||||
"action": r.action,
|
|
||||||
"actor": r.actor,
|
|
||||||
"created_at": r.created_at.format("%Y-%m-%dT%H:%M:%SZ").to_string(),
|
|
||||||
})
|
|
||||||
})
|
|
||||||
.collect();
|
|
||||||
let out = if output == OutputMode::Json {
|
|
||||||
serde_json::to_string_pretty(&arr)?
|
|
||||||
} else {
|
|
||||||
serde_json::to_string(&arr)?
|
|
||||||
};
|
|
||||||
println!("{}", out);
|
|
||||||
}
|
|
||||||
_ => {
|
|
||||||
if rows.is_empty() {
|
|
||||||
println!("No history found for [{}/{}] {}.", namespace, kind, name);
|
|
||||||
return Ok(());
|
|
||||||
}
|
|
||||||
println!("History for [{}/{}] {}:", namespace, kind, name);
|
|
||||||
for r in &rows {
|
|
||||||
println!(
|
|
||||||
" v{:<4} {:8} {} {}",
|
|
||||||
r.version,
|
|
||||||
r.action,
|
|
||||||
r.actor,
|
|
||||||
format_local_time(r.created_at)
|
|
||||||
);
|
|
||||||
}
|
|
||||||
println!(" (use `secrets rollback --to-version <N>` to restore)");
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
@@ -1,143 +0,0 @@
|
|||||||
use anyhow::Result;
|
|
||||||
use serde_json::Value;
|
|
||||||
use sqlx::PgPool;
|
|
||||||
use std::collections::HashMap;
|
|
||||||
|
|
||||||
use crate::commands::search::build_injected_env_map;
|
|
||||||
use crate::output::OutputMode;
|
|
||||||
|
|
||||||
pub struct InjectArgs<'a> {
|
|
||||||
pub namespace: Option<&'a str>,
|
|
||||||
pub kind: Option<&'a str>,
|
|
||||||
pub name: Option<&'a str>,
|
|
||||||
pub tags: &'a [String],
|
|
||||||
/// Prefix to prepend to every variable name. Empty string means no prefix.
|
|
||||||
pub prefix: &'a str,
|
|
||||||
pub output: OutputMode,
|
|
||||||
}
|
|
||||||
|
|
||||||
pub struct RunArgs<'a> {
|
|
||||||
pub namespace: Option<&'a str>,
|
|
||||||
pub kind: Option<&'a str>,
|
|
||||||
pub name: Option<&'a str>,
|
|
||||||
pub tags: &'a [String],
|
|
||||||
pub prefix: &'a str,
|
|
||||||
/// The command and its arguments to execute with injected secrets.
|
|
||||||
pub command: &'a [String],
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Fetch secrets matching the filter and build a flat env map.
|
|
||||||
/// Metadata and secret fields are merged; naming: `<PREFIX_><NAME>_<KEY>` (uppercased).
|
|
||||||
pub async fn collect_env_map(
|
|
||||||
pool: &PgPool,
|
|
||||||
namespace: Option<&str>,
|
|
||||||
kind: Option<&str>,
|
|
||||||
name: Option<&str>,
|
|
||||||
tags: &[String],
|
|
||||||
prefix: &str,
|
|
||||||
master_key: &[u8; 32],
|
|
||||||
) -> Result<HashMap<String, String>> {
|
|
||||||
if namespace.is_none() && kind.is_none() && name.is_none() && tags.is_empty() {
|
|
||||||
anyhow::bail!(
|
|
||||||
"At least one filter (--namespace, --kind, --name, or --tag) is required for inject/run"
|
|
||||||
);
|
|
||||||
}
|
|
||||||
let rows = crate::commands::search::fetch_rows(pool, namespace, kind, name, tags, None).await?;
|
|
||||||
if rows.is_empty() {
|
|
||||||
anyhow::bail!("No records matched the given filters.");
|
|
||||||
}
|
|
||||||
let mut map = HashMap::new();
|
|
||||||
for row in &rows {
|
|
||||||
let row_map = build_injected_env_map(row, prefix, master_key)?;
|
|
||||||
for (k, v) in row_map {
|
|
||||||
map.insert(k, v);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
Ok(map)
|
|
||||||
}
|
|
||||||
|
|
||||||
/// `inject` command: print env vars to stdout (suitable for `eval $(...)` or export).
|
|
||||||
pub async fn run_inject(pool: &PgPool, args: InjectArgs<'_>, master_key: &[u8; 32]) -> Result<()> {
|
|
||||||
let env_map = collect_env_map(
|
|
||||||
pool,
|
|
||||||
args.namespace,
|
|
||||||
args.kind,
|
|
||||||
args.name,
|
|
||||||
args.tags,
|
|
||||||
args.prefix,
|
|
||||||
master_key,
|
|
||||||
)
|
|
||||||
.await?;
|
|
||||||
|
|
||||||
match args.output {
|
|
||||||
OutputMode::Json => {
|
|
||||||
let obj: serde_json::Map<String, Value> = env_map
|
|
||||||
.into_iter()
|
|
||||||
.map(|(k, v)| (k, Value::String(v)))
|
|
||||||
.collect();
|
|
||||||
println!("{}", serde_json::to_string_pretty(&Value::Object(obj))?);
|
|
||||||
}
|
|
||||||
OutputMode::JsonCompact => {
|
|
||||||
let obj: serde_json::Map<String, Value> = env_map
|
|
||||||
.into_iter()
|
|
||||||
.map(|(k, v)| (k, Value::String(v)))
|
|
||||||
.collect();
|
|
||||||
println!("{}", serde_json::to_string(&Value::Object(obj))?);
|
|
||||||
}
|
|
||||||
_ => {
|
|
||||||
// Shell-safe KEY=VALUE output, one per line.
|
|
||||||
let mut pairs: Vec<(String, String)> = env_map.into_iter().collect();
|
|
||||||
pairs.sort_by(|a, b| a.0.cmp(&b.0));
|
|
||||||
for (k, v) in pairs {
|
|
||||||
println!("{}={}", k, shell_quote(&v));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
/// `run` command: inject secrets into a child process environment and execute.
|
|
||||||
pub async fn run_exec(pool: &PgPool, args: RunArgs<'_>, master_key: &[u8; 32]) -> Result<()> {
|
|
||||||
if args.command.is_empty() {
|
|
||||||
anyhow::bail!(
|
|
||||||
"No command specified. Usage: secrets run [filter flags] -- <command> [args]"
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
let env_map = collect_env_map(
|
|
||||||
pool,
|
|
||||||
args.namespace,
|
|
||||||
args.kind,
|
|
||||||
args.name,
|
|
||||||
args.tags,
|
|
||||||
args.prefix,
|
|
||||||
master_key,
|
|
||||||
)
|
|
||||||
.await?;
|
|
||||||
|
|
||||||
tracing::debug!(
|
|
||||||
vars = env_map.len(),
|
|
||||||
cmd = args.command[0].as_str(),
|
|
||||||
"injecting secrets into child process"
|
|
||||||
);
|
|
||||||
|
|
||||||
let status = std::process::Command::new(&args.command[0])
|
|
||||||
.args(&args.command[1..])
|
|
||||||
.envs(&env_map)
|
|
||||||
.status()
|
|
||||||
.map_err(|e| anyhow::anyhow!("Failed to execute '{}': {}", args.command[0], e))?;
|
|
||||||
|
|
||||||
if !status.success() {
|
|
||||||
let code = status.code().unwrap_or(1);
|
|
||||||
std::process::exit(code);
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Quote a value for safe shell output. Wraps the value in single quotes,
|
|
||||||
/// escaping any single quotes within the value.
|
|
||||||
fn shell_quote(s: &str) -> String {
|
|
||||||
format!("'{}'", s.replace('\'', "'\\''"))
|
|
||||||
}
|
|
||||||
@@ -1,472 +0,0 @@
|
|||||||
use anyhow::Result;
|
|
||||||
use serde_json::{Value, json};
|
|
||||||
use sqlx::PgPool;
|
|
||||||
use std::collections::HashMap;
|
|
||||||
|
|
||||||
use crate::crypto;
|
|
||||||
use crate::models::Secret;
|
|
||||||
use crate::output::{OutputMode, format_local_time};
|
|
||||||
|
|
||||||
pub struct SearchArgs<'a> {
|
|
||||||
pub namespace: Option<&'a str>,
|
|
||||||
pub kind: Option<&'a str>,
|
|
||||||
pub name: Option<&'a str>,
|
|
||||||
pub tags: &'a [String],
|
|
||||||
pub query: Option<&'a str>,
|
|
||||||
pub show_secrets: bool,
|
|
||||||
pub fields: &'a [String],
|
|
||||||
pub summary: bool,
|
|
||||||
pub limit: u32,
|
|
||||||
pub offset: u32,
|
|
||||||
pub sort: &'a str,
|
|
||||||
pub output: OutputMode,
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn run(pool: &PgPool, args: SearchArgs<'_>) -> Result<()> {
|
|
||||||
validate_safe_search_args(args.show_secrets, args.fields)?;
|
|
||||||
|
|
||||||
let rows = fetch_rows_paged(
|
|
||||||
pool,
|
|
||||||
PagedFetchArgs {
|
|
||||||
namespace: args.namespace,
|
|
||||||
kind: args.kind,
|
|
||||||
name: args.name,
|
|
||||||
tags: args.tags,
|
|
||||||
query: args.query,
|
|
||||||
sort: args.sort,
|
|
||||||
limit: args.limit,
|
|
||||||
offset: args.offset,
|
|
||||||
},
|
|
||||||
)
|
|
||||||
.await?;
|
|
||||||
|
|
||||||
// -f/--field: extract specific field values directly
|
|
||||||
if !args.fields.is_empty() {
|
|
||||||
return print_fields(&rows, args.fields);
|
|
||||||
}
|
|
||||||
|
|
||||||
match args.output {
|
|
||||||
OutputMode::Json | OutputMode::JsonCompact => {
|
|
||||||
let arr: Vec<Value> = rows.iter().map(|r| to_json(r, args.summary)).collect();
|
|
||||||
let out = if args.output == OutputMode::Json {
|
|
||||||
serde_json::to_string_pretty(&arr)?
|
|
||||||
} else {
|
|
||||||
serde_json::to_string(&arr)?
|
|
||||||
};
|
|
||||||
println!("{}", out);
|
|
||||||
}
|
|
||||||
OutputMode::Text => {
|
|
||||||
if rows.is_empty() {
|
|
||||||
println!("No records found.");
|
|
||||||
return Ok(());
|
|
||||||
}
|
|
||||||
for row in &rows {
|
|
||||||
print_text(row, args.summary)?;
|
|
||||||
}
|
|
||||||
println!("{} record(s) found.", rows.len());
|
|
||||||
if rows.len() == args.limit as usize {
|
|
||||||
println!(
|
|
||||||
" (showing up to {}; use --offset {} to see more)",
|
|
||||||
args.limit,
|
|
||||||
args.offset + args.limit
|
|
||||||
);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
fn validate_safe_search_args(show_secrets: bool, fields: &[String]) -> Result<()> {
|
|
||||||
if show_secrets {
|
|
||||||
anyhow::bail!(
|
|
||||||
"`search` no longer reveals secrets. Use `secrets inject` or `secrets run` instead."
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
if let Some(field) = fields.iter().find(|field| is_secret_field(field)) {
|
|
||||||
anyhow::bail!(
|
|
||||||
"Field '{}' is sensitive. `search -f` only supports metadata.* fields; use `secrets inject` or `secrets run` for secrets.",
|
|
||||||
field
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
fn is_secret_field(field: &str) -> bool {
|
|
||||||
matches!(
|
|
||||||
field.split_once('.').map(|(section, _)| section),
|
|
||||||
Some("secret" | "secrets" | "encrypted")
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Fetch rows with simple equality/tag filters (no pagination). Used by inject/run.
|
|
||||||
pub async fn fetch_rows(
|
|
||||||
pool: &PgPool,
|
|
||||||
namespace: Option<&str>,
|
|
||||||
kind: Option<&str>,
|
|
||||||
name: Option<&str>,
|
|
||||||
tags: &[String],
|
|
||||||
query: Option<&str>,
|
|
||||||
) -> Result<Vec<Secret>> {
|
|
||||||
fetch_rows_paged(
|
|
||||||
pool,
|
|
||||||
PagedFetchArgs {
|
|
||||||
namespace,
|
|
||||||
kind,
|
|
||||||
name,
|
|
||||||
tags,
|
|
||||||
query,
|
|
||||||
sort: "name",
|
|
||||||
limit: 200,
|
|
||||||
offset: 0,
|
|
||||||
},
|
|
||||||
)
|
|
||||||
.await
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Arguments for the internal paged fetch. Grouped to avoid too-many-arguments lint.
|
|
||||||
struct PagedFetchArgs<'a> {
|
|
||||||
namespace: Option<&'a str>,
|
|
||||||
kind: Option<&'a str>,
|
|
||||||
name: Option<&'a str>,
|
|
||||||
tags: &'a [String],
|
|
||||||
query: Option<&'a str>,
|
|
||||||
sort: &'a str,
|
|
||||||
limit: u32,
|
|
||||||
offset: u32,
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn fetch_rows_paged(pool: &PgPool, a: PagedFetchArgs<'_>) -> Result<Vec<Secret>> {
|
|
||||||
let mut conditions: Vec<String> = Vec::new();
|
|
||||||
let mut idx: i32 = 1;
|
|
||||||
|
|
||||||
if a.namespace.is_some() {
|
|
||||||
conditions.push(format!("namespace = ${}", idx));
|
|
||||||
idx += 1;
|
|
||||||
}
|
|
||||||
if a.kind.is_some() {
|
|
||||||
conditions.push(format!("kind = ${}", idx));
|
|
||||||
idx += 1;
|
|
||||||
}
|
|
||||||
if a.name.is_some() {
|
|
||||||
conditions.push(format!("name = ${}", idx));
|
|
||||||
idx += 1;
|
|
||||||
}
|
|
||||||
if !a.tags.is_empty() {
|
|
||||||
let placeholders: Vec<String> = a
|
|
||||||
.tags
|
|
||||||
.iter()
|
|
||||||
.map(|_| {
|
|
||||||
let p = format!("${}", idx);
|
|
||||||
idx += 1;
|
|
||||||
p
|
|
||||||
})
|
|
||||||
.collect();
|
|
||||||
conditions.push(format!("tags @> ARRAY[{}]", placeholders.join(", ")));
|
|
||||||
}
|
|
||||||
if a.query.is_some() {
|
|
||||||
conditions.push(format!(
|
|
||||||
"(name ILIKE ${i} ESCAPE '\\' OR namespace ILIKE ${i} ESCAPE '\\' OR kind ILIKE ${i} ESCAPE '\\' OR metadata::text ILIKE ${i} ESCAPE '\\' OR EXISTS (SELECT 1 FROM unnest(tags) t WHERE t ILIKE ${i} ESCAPE '\\'))",
|
|
||||||
i = idx
|
|
||||||
));
|
|
||||||
idx += 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
let where_clause = if conditions.is_empty() {
|
|
||||||
String::new()
|
|
||||||
} else {
|
|
||||||
format!("WHERE {}", conditions.join(" AND "))
|
|
||||||
};
|
|
||||||
|
|
||||||
let order = match a.sort {
|
|
||||||
"updated" => "updated_at DESC",
|
|
||||||
"created" => "created_at DESC",
|
|
||||||
_ => "namespace, kind, name",
|
|
||||||
};
|
|
||||||
|
|
||||||
let sql = format!(
|
|
||||||
"SELECT * FROM secrets {} ORDER BY {} LIMIT ${} OFFSET ${}",
|
|
||||||
where_clause,
|
|
||||||
order,
|
|
||||||
idx,
|
|
||||||
idx + 1
|
|
||||||
);
|
|
||||||
|
|
||||||
tracing::debug!(sql, "executing search query");
|
|
||||||
|
|
||||||
let mut q = sqlx::query_as::<_, Secret>(&sql);
|
|
||||||
if let Some(v) = a.namespace {
|
|
||||||
q = q.bind(v);
|
|
||||||
}
|
|
||||||
if let Some(v) = a.kind {
|
|
||||||
q = q.bind(v);
|
|
||||||
}
|
|
||||||
if let Some(v) = a.name {
|
|
||||||
q = q.bind(v);
|
|
||||||
}
|
|
||||||
for v in a.tags {
|
|
||||||
q = q.bind(v.as_str());
|
|
||||||
}
|
|
||||||
if let Some(v) = a.query {
|
|
||||||
q = q.bind(format!(
|
|
||||||
"%{}%",
|
|
||||||
v.replace('\\', "\\\\")
|
|
||||||
.replace('%', "\\%")
|
|
||||||
.replace('_', "\\_")
|
|
||||||
));
|
|
||||||
}
|
|
||||||
q = q.bind(a.limit as i64).bind(a.offset as i64);
|
|
||||||
|
|
||||||
let rows = q.fetch_all(pool).await?;
|
|
||||||
Ok(rows)
|
|
||||||
}
|
|
||||||
|
|
||||||
fn env_prefix(row: &Secret, prefix: &str) -> String {
|
|
||||||
let name_part = row.name.to_uppercase().replace(['-', '.', ' '], "_");
|
|
||||||
if prefix.is_empty() {
|
|
||||||
name_part
|
|
||||||
} else {
|
|
||||||
format!(
|
|
||||||
"{}_{}",
|
|
||||||
prefix.to_uppercase().replace(['-', '.', ' '], "_"),
|
|
||||||
name_part
|
|
||||||
)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Build a flat `KEY=VALUE` map from metadata only.
|
|
||||||
/// Variable names: `<PREFIX><NAME>_<FIELD>` (all uppercased, hyphens/dots → underscores).
|
|
||||||
/// If `prefix` is empty, the name segment alone is used as the prefix.
|
|
||||||
pub fn build_metadata_env_map(row: &Secret, prefix: &str) -> HashMap<String, String> {
|
|
||||||
let effective_prefix = env_prefix(row, prefix);
|
|
||||||
|
|
||||||
let mut map = HashMap::new();
|
|
||||||
|
|
||||||
if let Some(meta) = row.metadata.as_object() {
|
|
||||||
for (k, v) in meta {
|
|
||||||
let key = format!(
|
|
||||||
"{}_{}",
|
|
||||||
effective_prefix,
|
|
||||||
k.to_uppercase().replace(['-', '.'], "_")
|
|
||||||
);
|
|
||||||
map.insert(key, json_value_to_env_string(v));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
map
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Build a flat `KEY=VALUE` map from metadata and decrypted secrets.
|
|
||||||
pub fn build_injected_env_map(
|
|
||||||
row: &Secret,
|
|
||||||
prefix: &str,
|
|
||||||
master_key: &[u8; 32],
|
|
||||||
) -> Result<HashMap<String, String>> {
|
|
||||||
let effective_prefix = env_prefix(row, prefix);
|
|
||||||
let mut map = build_metadata_env_map(row, prefix);
|
|
||||||
|
|
||||||
if !row.encrypted.is_empty() {
|
|
||||||
let decrypted = crypto::decrypt_json(master_key, &row.encrypted)?;
|
|
||||||
if let Some(enc) = decrypted.as_object() {
|
|
||||||
for (k, v) in enc {
|
|
||||||
let key = format!(
|
|
||||||
"{}_{}",
|
|
||||||
effective_prefix,
|
|
||||||
k.to_uppercase().replace(['-', '.'], "_")
|
|
||||||
);
|
|
||||||
map.insert(key, json_value_to_env_string(v));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(map)
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Convert a JSON value to its string representation suitable for env vars.
|
|
||||||
fn json_value_to_env_string(v: &Value) -> String {
|
|
||||||
match v {
|
|
||||||
Value::String(s) => s.clone(),
|
|
||||||
Value::Null => String::new(),
|
|
||||||
other => other.to_string(),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn to_json(row: &Secret, summary: bool) -> Value {
|
|
||||||
if summary {
|
|
||||||
let desc = row
|
|
||||||
.metadata
|
|
||||||
.get("desc")
|
|
||||||
.or_else(|| row.metadata.get("url"))
|
|
||||||
.and_then(|v| v.as_str())
|
|
||||||
.unwrap_or("")
|
|
||||||
.to_string();
|
|
||||||
return json!({
|
|
||||||
"namespace": row.namespace,
|
|
||||||
"kind": row.kind,
|
|
||||||
"name": row.name,
|
|
||||||
"tags": row.tags,
|
|
||||||
"desc": desc,
|
|
||||||
"updated_at": row.updated_at.format("%Y-%m-%dT%H:%M:%SZ").to_string(),
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
let secrets_val = if row.encrypted.is_empty() {
|
|
||||||
Value::Object(Default::default())
|
|
||||||
} else {
|
|
||||||
json!({"_encrypted": true})
|
|
||||||
};
|
|
||||||
|
|
||||||
json!({
|
|
||||||
"id": row.id,
|
|
||||||
"namespace": row.namespace,
|
|
||||||
"kind": row.kind,
|
|
||||||
"name": row.name,
|
|
||||||
"tags": row.tags,
|
|
||||||
"metadata": row.metadata,
|
|
||||||
"secrets": secrets_val,
|
|
||||||
"version": row.version,
|
|
||||||
"created_at": row.created_at.format("%Y-%m-%dT%H:%M:%SZ").to_string(),
|
|
||||||
"updated_at": row.updated_at.format("%Y-%m-%dT%H:%M:%SZ").to_string(),
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
fn print_text(row: &Secret, summary: bool) -> Result<()> {
|
|
||||||
println!("[{}/{}] {}", row.namespace, row.kind, row.name);
|
|
||||||
if summary {
|
|
||||||
let desc = row
|
|
||||||
.metadata
|
|
||||||
.get("desc")
|
|
||||||
.or_else(|| row.metadata.get("url"))
|
|
||||||
.and_then(|v| v.as_str())
|
|
||||||
.unwrap_or("-");
|
|
||||||
if !row.tags.is_empty() {
|
|
||||||
println!(" tags: [{}]", row.tags.join(", "));
|
|
||||||
}
|
|
||||||
println!(" desc: {}", desc);
|
|
||||||
println!(" updated: {}", format_local_time(row.updated_at));
|
|
||||||
} else {
|
|
||||||
println!(" id: {}", row.id);
|
|
||||||
if !row.tags.is_empty() {
|
|
||||||
println!(" tags: [{}]", row.tags.join(", "));
|
|
||||||
}
|
|
||||||
if row.metadata.as_object().is_some_and(|m| !m.is_empty()) {
|
|
||||||
println!(
|
|
||||||
" metadata: {}",
|
|
||||||
serde_json::to_string_pretty(&row.metadata)?
|
|
||||||
);
|
|
||||||
}
|
|
||||||
if !row.encrypted.is_empty() {
|
|
||||||
println!(" secrets: [encrypted] (use `secrets inject` or `secrets run`)");
|
|
||||||
}
|
|
||||||
println!(" created: {}", format_local_time(row.created_at));
|
|
||||||
}
|
|
||||||
println!();
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Extract one or more field paths like `metadata.url`.
|
|
||||||
fn print_fields(rows: &[Secret], fields: &[String]) -> Result<()> {
|
|
||||||
for row in rows {
|
|
||||||
for field in fields {
|
|
||||||
let val = extract_field(row, field)?;
|
|
||||||
println!("{}", val);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
fn extract_field(row: &Secret, field: &str) -> Result<String> {
|
|
||||||
let (section, key) = field
|
|
||||||
.split_once('.')
|
|
||||||
.ok_or_else(|| anyhow::anyhow!("Invalid field path '{}'. Use metadata.<key>.", field))?;
|
|
||||||
|
|
||||||
let obj = match section {
|
|
||||||
"metadata" | "meta" => &row.metadata,
|
|
||||||
other => anyhow::bail!("Unknown field section '{}'. Use 'metadata'.", other),
|
|
||||||
};
|
|
||||||
|
|
||||||
obj.get(key)
|
|
||||||
.and_then(|v| {
|
|
||||||
v.as_str()
|
|
||||||
.map(|s| s.to_string())
|
|
||||||
.or_else(|| Some(v.to_string()))
|
|
||||||
})
|
|
||||||
.ok_or_else(|| {
|
|
||||||
anyhow::anyhow!(
|
|
||||||
"Field '{}' not found in record [{}/{}/{}]",
|
|
||||||
field,
|
|
||||||
row.namespace,
|
|
||||||
row.kind,
|
|
||||||
row.name
|
|
||||||
)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
#[cfg(test)]
|
|
||||||
mod tests {
|
|
||||||
use super::*;
|
|
||||||
use chrono::Utc;
|
|
||||||
use serde_json::json;
|
|
||||||
use uuid::Uuid;
|
|
||||||
|
|
||||||
fn sample_secret() -> Secret {
|
|
||||||
let key = [0x42u8; 32];
|
|
||||||
let encrypted = crypto::encrypt_json(&key, &json!({"token": "abc123"})).unwrap();
|
|
||||||
|
|
||||||
Secret {
|
|
||||||
id: Uuid::nil(),
|
|
||||||
namespace: "refining".to_string(),
|
|
||||||
kind: "service".to_string(),
|
|
||||||
name: "gitea.main".to_string(),
|
|
||||||
tags: vec!["prod".to_string()],
|
|
||||||
metadata: json!({"url": "https://gitea.refining.dev", "enabled": true}),
|
|
||||||
encrypted,
|
|
||||||
version: 1,
|
|
||||||
created_at: Utc::now(),
|
|
||||||
updated_at: Utc::now(),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn rejects_show_secrets_flag() {
|
|
||||||
let err = validate_safe_search_args(true, &[]).unwrap_err();
|
|
||||||
assert!(err.to_string().contains("no longer reveals secrets"));
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn rejects_secret_field_extraction() {
|
|
||||||
let fields = vec!["secret.token".to_string()];
|
|
||||||
let err = validate_safe_search_args(false, &fields).unwrap_err();
|
|
||||||
assert!(err.to_string().contains("sensitive"));
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn metadata_env_map_excludes_secret_values() {
|
|
||||||
let row = sample_secret();
|
|
||||||
let map = build_metadata_env_map(&row, "");
|
|
||||||
|
|
||||||
assert_eq!(
|
|
||||||
map.get("GITEA_MAIN_URL").map(String::as_str),
|
|
||||||
Some("https://gitea.refining.dev")
|
|
||||||
);
|
|
||||||
assert_eq!(
|
|
||||||
map.get("GITEA_MAIN_ENABLED").map(String::as_str),
|
|
||||||
Some("true")
|
|
||||||
);
|
|
||||||
assert!(!map.contains_key("GITEA_MAIN_TOKEN"));
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn injected_env_map_includes_secret_values() {
|
|
||||||
let row = sample_secret();
|
|
||||||
let key = [0x42u8; 32];
|
|
||||||
let map = build_injected_env_map(&row, "", &key).unwrap();
|
|
||||||
|
|
||||||
assert_eq!(
|
|
||||||
map.get("GITEA_MAIN_TOKEN").map(String::as_str),
|
|
||||||
Some("abc123")
|
|
||||||
);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,223 +0,0 @@
|
|||||||
use anyhow::Result;
|
|
||||||
use serde_json::{Map, Value, json};
|
|
||||||
use sqlx::{FromRow, PgPool};
|
|
||||||
use uuid::Uuid;
|
|
||||||
|
|
||||||
use super::add::{
|
|
||||||
collect_field_paths, collect_key_paths, insert_path, parse_key_path, parse_kv, remove_path,
|
|
||||||
};
|
|
||||||
use crate::crypto;
|
|
||||||
use crate::db;
|
|
||||||
use crate::output::OutputMode;
|
|
||||||
|
|
||||||
#[derive(FromRow)]
|
|
||||||
struct UpdateRow {
|
|
||||||
id: Uuid,
|
|
||||||
version: i64,
|
|
||||||
tags: Vec<String>,
|
|
||||||
metadata: Value,
|
|
||||||
encrypted: Vec<u8>,
|
|
||||||
}
|
|
||||||
|
|
||||||
pub struct UpdateArgs<'a> {
|
|
||||||
pub namespace: &'a str,
|
|
||||||
pub kind: &'a str,
|
|
||||||
pub name: &'a str,
|
|
||||||
pub add_tags: &'a [String],
|
|
||||||
pub remove_tags: &'a [String],
|
|
||||||
pub meta_entries: &'a [String],
|
|
||||||
pub remove_meta: &'a [String],
|
|
||||||
pub secret_entries: &'a [String],
|
|
||||||
pub remove_secrets: &'a [String],
|
|
||||||
pub output: OutputMode,
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn run(pool: &PgPool, args: UpdateArgs<'_>, master_key: &[u8; 32]) -> Result<()> {
|
|
||||||
let mut tx = pool.begin().await?;
|
|
||||||
|
|
||||||
let row: Option<UpdateRow> = sqlx::query_as(
|
|
||||||
"SELECT id, version, tags, metadata, encrypted \
|
|
||||||
FROM secrets \
|
|
||||||
WHERE namespace = $1 AND kind = $2 AND name = $3 \
|
|
||||||
FOR UPDATE",
|
|
||||||
)
|
|
||||||
.bind(args.namespace)
|
|
||||||
.bind(args.kind)
|
|
||||||
.bind(args.name)
|
|
||||||
.fetch_optional(&mut *tx)
|
|
||||||
.await?;
|
|
||||||
|
|
||||||
let row = row.ok_or_else(|| {
|
|
||||||
anyhow::anyhow!(
|
|
||||||
"Not found: [{}/{}] {}. Use `add` to create it first.",
|
|
||||||
args.namespace,
|
|
||||||
args.kind,
|
|
||||||
args.name
|
|
||||||
)
|
|
||||||
})?;
|
|
||||||
|
|
||||||
// Snapshot current state before modifying.
|
|
||||||
if let Err(e) = db::snapshot_history(
|
|
||||||
&mut tx,
|
|
||||||
db::SnapshotParams {
|
|
||||||
secret_id: row.id,
|
|
||||||
namespace: args.namespace,
|
|
||||||
kind: args.kind,
|
|
||||||
name: args.name,
|
|
||||||
version: row.version,
|
|
||||||
action: "update",
|
|
||||||
tags: &row.tags,
|
|
||||||
metadata: &row.metadata,
|
|
||||||
encrypted: &row.encrypted,
|
|
||||||
},
|
|
||||||
)
|
|
||||||
.await
|
|
||||||
{
|
|
||||||
tracing::warn!(error = %e, "failed to snapshot history before update");
|
|
||||||
}
|
|
||||||
|
|
||||||
// Merge tags
|
|
||||||
let mut tags: Vec<String> = row.tags;
|
|
||||||
for t in args.add_tags {
|
|
||||||
if !tags.contains(t) {
|
|
||||||
tags.push(t.clone());
|
|
||||||
}
|
|
||||||
}
|
|
||||||
tags.retain(|t| !args.remove_tags.contains(t));
|
|
||||||
|
|
||||||
// Merge metadata
|
|
||||||
let mut meta_map: Map<String, Value> = match row.metadata {
|
|
||||||
Value::Object(m) => m,
|
|
||||||
_ => Map::new(),
|
|
||||||
};
|
|
||||||
for entry in args.meta_entries {
|
|
||||||
let (path, value) = parse_kv(entry)?;
|
|
||||||
insert_path(&mut meta_map, &path, value)?;
|
|
||||||
}
|
|
||||||
for key in args.remove_meta {
|
|
||||||
let path = parse_key_path(key)?;
|
|
||||||
remove_path(&mut meta_map, &path)?;
|
|
||||||
}
|
|
||||||
let metadata = Value::Object(meta_map);
|
|
||||||
|
|
||||||
// Decrypt existing encrypted blob, merge changes, re-encrypt
|
|
||||||
let existing_json = if row.encrypted.is_empty() {
|
|
||||||
Value::Object(Map::new())
|
|
||||||
} else {
|
|
||||||
crypto::decrypt_json(master_key, &row.encrypted)?
|
|
||||||
};
|
|
||||||
let mut enc_map: Map<String, Value> = match existing_json {
|
|
||||||
Value::Object(m) => m,
|
|
||||||
_ => Map::new(),
|
|
||||||
};
|
|
||||||
for entry in args.secret_entries {
|
|
||||||
let (path, value) = parse_kv(entry)?;
|
|
||||||
insert_path(&mut enc_map, &path, value)?;
|
|
||||||
}
|
|
||||||
for key in args.remove_secrets {
|
|
||||||
let path = parse_key_path(key)?;
|
|
||||||
remove_path(&mut enc_map, &path)?;
|
|
||||||
}
|
|
||||||
let secret_json = Value::Object(enc_map);
|
|
||||||
let encrypted_bytes = crypto::encrypt_json(master_key, &secret_json)?;
|
|
||||||
|
|
||||||
tracing::debug!(
|
|
||||||
namespace = args.namespace,
|
|
||||||
kind = args.kind,
|
|
||||||
name = args.name,
|
|
||||||
"updating record"
|
|
||||||
);
|
|
||||||
|
|
||||||
// CAS: update only if version hasn't changed (FOR UPDATE lock ensures this).
|
|
||||||
let result = sqlx::query(
|
|
||||||
"UPDATE secrets \
|
|
||||||
SET tags = $1, metadata = $2, encrypted = $3, version = version + 1, updated_at = NOW() \
|
|
||||||
WHERE id = $4 AND version = $5",
|
|
||||||
)
|
|
||||||
.bind(&tags)
|
|
||||||
.bind(&metadata)
|
|
||||||
.bind(&encrypted_bytes)
|
|
||||||
.bind(row.id)
|
|
||||||
.bind(row.version)
|
|
||||||
.execute(&mut *tx)
|
|
||||||
.await?;
|
|
||||||
|
|
||||||
if result.rows_affected() == 0 {
|
|
||||||
tx.rollback().await?;
|
|
||||||
anyhow::bail!(
|
|
||||||
"Concurrent modification detected for [{}/{}] {}. Please retry.",
|
|
||||||
args.namespace,
|
|
||||||
args.kind,
|
|
||||||
args.name
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
let meta_keys = collect_key_paths(args.meta_entries)?;
|
|
||||||
let remove_meta_keys = collect_field_paths(args.remove_meta)?;
|
|
||||||
let secret_keys = collect_key_paths(args.secret_entries)?;
|
|
||||||
let remove_secret_keys = collect_field_paths(args.remove_secrets)?;
|
|
||||||
|
|
||||||
crate::audit::log_tx(
|
|
||||||
&mut tx,
|
|
||||||
"update",
|
|
||||||
args.namespace,
|
|
||||||
args.kind,
|
|
||||||
args.name,
|
|
||||||
json!({
|
|
||||||
"add_tags": args.add_tags,
|
|
||||||
"remove_tags": args.remove_tags,
|
|
||||||
"meta_keys": meta_keys,
|
|
||||||
"remove_meta": remove_meta_keys,
|
|
||||||
"secret_keys": secret_keys,
|
|
||||||
"remove_secrets": remove_secret_keys,
|
|
||||||
}),
|
|
||||||
)
|
|
||||||
.await;
|
|
||||||
|
|
||||||
tx.commit().await?;
|
|
||||||
|
|
||||||
let result_json = json!({
|
|
||||||
"action": "updated",
|
|
||||||
"namespace": args.namespace,
|
|
||||||
"kind": args.kind,
|
|
||||||
"name": args.name,
|
|
||||||
"add_tags": args.add_tags,
|
|
||||||
"remove_tags": args.remove_tags,
|
|
||||||
"meta_keys": meta_keys,
|
|
||||||
"remove_meta": remove_meta_keys,
|
|
||||||
"secret_keys": secret_keys,
|
|
||||||
"remove_secrets": remove_secret_keys,
|
|
||||||
});
|
|
||||||
|
|
||||||
match args.output {
|
|
||||||
OutputMode::Json => {
|
|
||||||
println!("{}", serde_json::to_string_pretty(&result_json)?);
|
|
||||||
}
|
|
||||||
OutputMode::JsonCompact => {
|
|
||||||
println!("{}", serde_json::to_string(&result_json)?);
|
|
||||||
}
|
|
||||||
_ => {
|
|
||||||
println!("Updated: [{}/{}] {}", args.namespace, args.kind, args.name);
|
|
||||||
if !args.add_tags.is_empty() {
|
|
||||||
println!(" +tags: {}", args.add_tags.join(", "));
|
|
||||||
}
|
|
||||||
if !args.remove_tags.is_empty() {
|
|
||||||
println!(" -tags: {}", args.remove_tags.join(", "));
|
|
||||||
}
|
|
||||||
if !args.meta_entries.is_empty() {
|
|
||||||
println!(" +metadata: {}", meta_keys.join(", "));
|
|
||||||
}
|
|
||||||
if !args.remove_meta.is_empty() {
|
|
||||||
println!(" -metadata: {}", remove_meta_keys.join(", "));
|
|
||||||
}
|
|
||||||
if !args.secret_entries.is_empty() {
|
|
||||||
println!(" +secrets: {}", secret_keys.join(", "));
|
|
||||||
}
|
|
||||||
if !args.remove_secrets.is_empty() {
|
|
||||||
println!(" -secrets: {}", remove_secret_keys.join(", "));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
@@ -1,394 +0,0 @@
|
|||||||
use anyhow::{Context, Result, bail};
|
|
||||||
use flate2::read::GzDecoder;
|
|
||||||
use serde::Deserialize;
|
|
||||||
use sha2::{Digest, Sha256};
|
|
||||||
use std::io::{Cursor, Read, Write};
|
|
||||||
use std::time::Duration;
|
|
||||||
|
|
||||||
const GITEA_API: &str = "https://gitea.refining.dev/api/v1/repos/refining/secrets/releases/latest";
|
|
||||||
|
|
||||||
const CURRENT_VERSION: &str = env!("CARGO_PKG_VERSION");
|
|
||||||
|
|
||||||
#[derive(Debug, Deserialize)]
|
|
||||||
struct Release {
|
|
||||||
tag_name: String,
|
|
||||||
assets: Vec<Asset>,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug, Deserialize)]
|
|
||||||
struct Asset {
|
|
||||||
name: String,
|
|
||||||
browser_download_url: String,
|
|
||||||
}
|
|
||||||
|
|
||||||
fn available_assets(assets: &[Asset]) -> String {
|
|
||||||
assets
|
|
||||||
.iter()
|
|
||||||
.map(|a| a.name.as_str())
|
|
||||||
.collect::<Vec<_>>()
|
|
||||||
.join(", ")
|
|
||||||
}
|
|
||||||
|
|
||||||
fn release_asset_name(tag_name: &str, suffix: &str) -> String {
|
|
||||||
format!("secrets-{tag_name}-{suffix}")
|
|
||||||
}
|
|
||||||
|
|
||||||
fn find_asset_by_name<'a>(assets: &'a [Asset], name: &str) -> Result<&'a Asset> {
|
|
||||||
assets.iter().find(|a| a.name == name).with_context(|| {
|
|
||||||
format!(
|
|
||||||
"no matching release asset found: {name}\navailable: {}",
|
|
||||||
available_assets(assets)
|
|
||||||
)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Detect the asset suffix for the current platform/arch at compile time.
|
|
||||||
fn platform_asset_suffix() -> Result<&'static str> {
|
|
||||||
#[cfg(all(target_os = "linux", target_arch = "x86_64"))]
|
|
||||||
{
|
|
||||||
Ok("x86_64-linux-musl.tar.gz")
|
|
||||||
}
|
|
||||||
|
|
||||||
#[cfg(all(target_os = "macos", target_arch = "aarch64"))]
|
|
||||||
{
|
|
||||||
Ok("aarch64-macos.tar.gz")
|
|
||||||
}
|
|
||||||
|
|
||||||
#[cfg(all(target_os = "macos", target_arch = "x86_64"))]
|
|
||||||
{
|
|
||||||
Ok("x86_64-macos.tar.gz")
|
|
||||||
}
|
|
||||||
|
|
||||||
#[cfg(all(target_os = "windows", target_arch = "x86_64"))]
|
|
||||||
{
|
|
||||||
Ok("x86_64-windows.zip")
|
|
||||||
}
|
|
||||||
|
|
||||||
#[cfg(not(any(
|
|
||||||
all(target_os = "linux", target_arch = "x86_64"),
|
|
||||||
all(target_os = "macos", target_arch = "aarch64"),
|
|
||||||
all(target_os = "macos", target_arch = "x86_64"),
|
|
||||||
all(target_os = "windows", target_arch = "x86_64"),
|
|
||||||
)))]
|
|
||||||
bail!(
|
|
||||||
"Unsupported platform: {}/{}",
|
|
||||||
std::env::consts::OS,
|
|
||||||
std::env::consts::ARCH
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Strip the "secrets-" prefix from the tag and parse as semver.
|
|
||||||
fn parse_tag_version(tag: &str) -> Result<semver::Version> {
|
|
||||||
let ver_str = tag
|
|
||||||
.strip_prefix("secrets-")
|
|
||||||
.with_context(|| format!("unexpected tag format: {tag}"))?;
|
|
||||||
semver::Version::parse(ver_str)
|
|
||||||
.with_context(|| format!("failed to parse version from tag: {tag}"))
|
|
||||||
}
|
|
||||||
|
|
||||||
fn sha256_hex(bytes: &[u8]) -> String {
|
|
||||||
let digest = Sha256::digest(bytes);
|
|
||||||
format!("{digest:x}")
|
|
||||||
}
|
|
||||||
|
|
||||||
fn verify_checksum(asset_name: &str, archive: &[u8], checksum_contents: &str) -> Result<String> {
|
|
||||||
let expected_checksum = parse_checksum_file(checksum_contents)?;
|
|
||||||
let actual_checksum = sha256_hex(archive);
|
|
||||||
|
|
||||||
if actual_checksum != expected_checksum {
|
|
||||||
bail!(
|
|
||||||
"checksum verification failed for {}: expected {}, got {}",
|
|
||||||
asset_name,
|
|
||||||
expected_checksum,
|
|
||||||
actual_checksum
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(actual_checksum)
|
|
||||||
}
|
|
||||||
|
|
||||||
fn parse_checksum_file(contents: &str) -> Result<String> {
|
|
||||||
let checksum = contents
|
|
||||||
.split_whitespace()
|
|
||||||
.next()
|
|
||||||
.context("checksum file is empty")?
|
|
||||||
.trim()
|
|
||||||
.to_ascii_lowercase();
|
|
||||||
|
|
||||||
if checksum.len() != 64 || !checksum.bytes().all(|b| b.is_ascii_hexdigit()) {
|
|
||||||
bail!("invalid SHA-256 checksum format")
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(checksum)
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn download_bytes(client: &reqwest::Client, url: &str, context: &str) -> Result<Vec<u8>> {
|
|
||||||
Ok(client
|
|
||||||
.get(url)
|
|
||||||
.send()
|
|
||||||
.await
|
|
||||||
.with_context(|| format!("{context}: request failed"))?
|
|
||||||
.error_for_status()
|
|
||||||
.with_context(|| format!("{context}: server returned an error"))?
|
|
||||||
.bytes()
|
|
||||||
.await
|
|
||||||
.with_context(|| format!("{context}: failed to read response body"))?
|
|
||||||
.to_vec())
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Extract the binary from a tar.gz archive (first file whose name == "secrets").
|
|
||||||
fn extract_from_targz(bytes: &[u8]) -> Result<Vec<u8>> {
|
|
||||||
let gz = GzDecoder::new(Cursor::new(bytes));
|
|
||||||
let mut archive = tar::Archive::new(gz);
|
|
||||||
for entry in archive.entries().context("failed to read tar entries")? {
|
|
||||||
let mut entry = entry.context("bad tar entry")?;
|
|
||||||
let path = entry.path().context("bad tar entry path")?.into_owned();
|
|
||||||
let fname = path
|
|
||||||
.file_name()
|
|
||||||
.and_then(|n| n.to_str())
|
|
||||||
.unwrap_or_default();
|
|
||||||
if fname == "secrets" || fname == "secrets.exe" {
|
|
||||||
let mut buf = Vec::new();
|
|
||||||
entry.read_to_end(&mut buf).context("read tar entry")?;
|
|
||||||
return Ok(buf);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
bail!("binary not found inside tar.gz archive")
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Extract the binary from a zip archive (first file whose name matches).
|
|
||||||
#[cfg(target_os = "windows")]
|
|
||||||
fn extract_from_zip(bytes: &[u8]) -> Result<Vec<u8>> {
|
|
||||||
let reader = Cursor::new(bytes);
|
|
||||||
let mut archive = zip::ZipArchive::new(reader).context("failed to open zip archive")?;
|
|
||||||
for i in 0..archive.len() {
|
|
||||||
let mut file = archive.by_index(i).context("bad zip entry")?;
|
|
||||||
let fname = file.name().to_owned();
|
|
||||||
if fname.ends_with("secrets.exe") || fname.ends_with("secrets") {
|
|
||||||
let mut buf = Vec::new();
|
|
||||||
file.read_to_end(&mut buf).context("read zip entry")?;
|
|
||||||
return Ok(buf);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
bail!("binary not found inside zip archive")
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn run(check_only: bool) -> Result<()> {
|
|
||||||
let current = semver::Version::parse(CURRENT_VERSION).context("invalid current version")?;
|
|
||||||
|
|
||||||
println!("Current version: v{current}");
|
|
||||||
println!("Checking for updates...");
|
|
||||||
|
|
||||||
let client = reqwest::Client::builder()
|
|
||||||
.user_agent(format!("secrets-cli/{CURRENT_VERSION}"))
|
|
||||||
.connect_timeout(Duration::from_secs(10))
|
|
||||||
.timeout(Duration::from_secs(120))
|
|
||||||
.build()
|
|
||||||
.context("failed to build HTTP client")?;
|
|
||||||
|
|
||||||
let release: Release = client
|
|
||||||
.get(GITEA_API)
|
|
||||||
.send()
|
|
||||||
.await
|
|
||||||
.context("failed to fetch release info from Gitea")?
|
|
||||||
.error_for_status()
|
|
||||||
.context("Gitea API returned an error")?
|
|
||||||
.json()
|
|
||||||
.await
|
|
||||||
.context("failed to parse release JSON")?;
|
|
||||||
|
|
||||||
let latest = parse_tag_version(&release.tag_name)?;
|
|
||||||
|
|
||||||
if latest <= current {
|
|
||||||
println!("Already up to date (v{current})");
|
|
||||||
return Ok(());
|
|
||||||
}
|
|
||||||
|
|
||||||
println!("New version available: v{latest}");
|
|
||||||
|
|
||||||
if check_only {
|
|
||||||
println!("Run `secrets upgrade` to update.");
|
|
||||||
return Ok(());
|
|
||||||
}
|
|
||||||
|
|
||||||
let suffix = platform_asset_suffix()?;
|
|
||||||
let asset_name = release_asset_name(&release.tag_name, suffix);
|
|
||||||
let asset = find_asset_by_name(&release.assets, &asset_name)?;
|
|
||||||
let checksum_name = format!("{}.sha256", asset.name);
|
|
||||||
let checksum_asset = find_asset_by_name(&release.assets, &checksum_name)?;
|
|
||||||
|
|
||||||
println!("Downloading {}...", asset.name);
|
|
||||||
|
|
||||||
let archive = download_bytes(&client, &asset.browser_download_url, "archive download").await?;
|
|
||||||
let checksum_contents = download_bytes(
|
|
||||||
&client,
|
|
||||||
&checksum_asset.browser_download_url,
|
|
||||||
"checksum download",
|
|
||||||
)
|
|
||||||
.await?;
|
|
||||||
let actual_checksum = verify_checksum(
|
|
||||||
&asset.name,
|
|
||||||
&archive,
|
|
||||||
std::str::from_utf8(&checksum_contents).context("checksum file is not valid UTF-8")?,
|
|
||||||
)?;
|
|
||||||
|
|
||||||
println!("Verified SHA-256: {actual_checksum}");
|
|
||||||
|
|
||||||
println!("Extracting...");
|
|
||||||
|
|
||||||
let binary = if suffix.ends_with(".tar.gz") {
|
|
||||||
extract_from_targz(&archive)?
|
|
||||||
} else {
|
|
||||||
#[cfg(target_os = "windows")]
|
|
||||||
{
|
|
||||||
extract_from_zip(&archive)?
|
|
||||||
}
|
|
||||||
#[cfg(not(target_os = "windows"))]
|
|
||||||
bail!("zip extraction is only supported on Windows")
|
|
||||||
};
|
|
||||||
|
|
||||||
// Write to a temporary file, set executable permission, then atomically replace.
|
|
||||||
let mut tmp = tempfile::NamedTempFile::new().context("failed to create temp file")?;
|
|
||||||
tmp.write_all(&binary)
|
|
||||||
.context("failed to write temp binary")?;
|
|
||||||
|
|
||||||
#[cfg(unix)]
|
|
||||||
{
|
|
||||||
use std::os::unix::fs::PermissionsExt;
|
|
||||||
let perms = std::fs::Permissions::from_mode(0o755);
|
|
||||||
std::fs::set_permissions(tmp.path(), perms).context("failed to chmod temp binary")?;
|
|
||||||
}
|
|
||||||
|
|
||||||
self_replace::self_replace(tmp.path()).context("failed to replace current binary")?;
|
|
||||||
|
|
||||||
println!("Updated: v{current} → v{latest}");
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
#[cfg(test)]
|
|
||||||
mod tests {
|
|
||||||
use super::*;
|
|
||||||
use flate2::Compression;
|
|
||||||
use flate2::write::GzEncoder;
|
|
||||||
use tar::Builder;
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn parse_tag_version_accepts_release_tag() {
|
|
||||||
let version = parse_tag_version("secrets-0.6.1").expect("version should parse");
|
|
||||||
assert_eq!(version, semver::Version::new(0, 6, 1));
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn parse_tag_version_rejects_invalid_tag() {
|
|
||||||
let err = parse_tag_version("v0.6.1").expect_err("tag should be rejected");
|
|
||||||
assert!(err.to_string().contains("unexpected tag format"));
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn parse_checksum_file_accepts_sha256sum_format() {
|
|
||||||
let checksum = parse_checksum_file(
|
|
||||||
"0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef secrets.tar.gz",
|
|
||||||
)
|
|
||||||
.expect("checksum should parse");
|
|
||||||
assert_eq!(
|
|
||||||
checksum,
|
|
||||||
"0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef"
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn parse_checksum_file_rejects_invalid_checksum() {
|
|
||||||
let err = parse_checksum_file("not-a-sha256").expect_err("checksum should be rejected");
|
|
||||||
assert!(err.to_string().contains("invalid SHA-256 checksum format"));
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn release_asset_name_matches_release_tag() {
|
|
||||||
assert_eq!(
|
|
||||||
release_asset_name("secrets-0.7.0", "x86_64-linux-musl.tar.gz"),
|
|
||||||
"secrets-secrets-0.7.0-x86_64-linux-musl.tar.gz"
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn find_asset_by_name_rejects_stale_platform_match() {
|
|
||||||
let assets = vec![
|
|
||||||
Asset {
|
|
||||||
name: "secrets-secrets-0.6.9-x86_64-linux-musl.tar.gz".into(),
|
|
||||||
browser_download_url: "https://example.invalid/old".into(),
|
|
||||||
},
|
|
||||||
Asset {
|
|
||||||
name: "secrets-secrets-0.7.0-aarch64-macos.tar.gz".into(),
|
|
||||||
browser_download_url: "https://example.invalid/other".into(),
|
|
||||||
},
|
|
||||||
];
|
|
||||||
|
|
||||||
let err = find_asset_by_name(&assets, "secrets-secrets-0.7.0-x86_64-linux-musl.tar.gz")
|
|
||||||
.expect_err("stale asset should not match");
|
|
||||||
|
|
||||||
assert!(err.to_string().contains("no matching release asset found"));
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn sha256_hex_matches_known_value() {
|
|
||||||
assert_eq!(
|
|
||||||
sha256_hex(b"abc"),
|
|
||||||
"ba7816bf8f01cfea414140de5dae2223b00361a396177a9cb410ff61f20015ad"
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn verify_checksum_rejects_mismatch() {
|
|
||||||
let err = verify_checksum(
|
|
||||||
"secrets.tar.gz",
|
|
||||||
b"abc",
|
|
||||||
"0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef secrets.tar.gz",
|
|
||||||
)
|
|
||||||
.expect_err("checksum mismatch should fail");
|
|
||||||
|
|
||||||
assert!(err.to_string().contains("checksum verification failed"));
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn extract_from_targz_reads_binary() {
|
|
||||||
let payload = b"fake-secrets-binary";
|
|
||||||
let archive = make_test_targz("secrets", payload);
|
|
||||||
let extracted = extract_from_targz(&archive).expect("binary should extract");
|
|
||||||
assert_eq!(extracted, payload);
|
|
||||||
}
|
|
||||||
|
|
||||||
fn make_test_targz(name: &str, payload: &[u8]) -> Vec<u8> {
|
|
||||||
let encoder = GzEncoder::new(Vec::new(), Compression::default());
|
|
||||||
let mut builder = Builder::new(encoder);
|
|
||||||
|
|
||||||
let mut header = tar::Header::new_gnu();
|
|
||||||
header.set_mode(0o755);
|
|
||||||
header.set_size(payload.len() as u64);
|
|
||||||
header.set_cksum();
|
|
||||||
builder
|
|
||||||
.append_data(&mut header, name, payload)
|
|
||||||
.expect("append tar entry");
|
|
||||||
|
|
||||||
let encoder = builder.into_inner().expect("finish tar builder");
|
|
||||||
encoder.finish().expect("finish gzip")
|
|
||||||
}
|
|
||||||
|
|
||||||
#[cfg(target_os = "windows")]
|
|
||||||
#[test]
|
|
||||||
fn extract_from_zip_reads_binary() {
|
|
||||||
use zip::write::SimpleFileOptions;
|
|
||||||
|
|
||||||
let cursor = Cursor::new(Vec::<u8>::new());
|
|
||||||
let mut writer = zip::ZipWriter::new(cursor);
|
|
||||||
writer
|
|
||||||
.start_file("secrets.exe", SimpleFileOptions::default())
|
|
||||||
.expect("start zip file");
|
|
||||||
writer
|
|
||||||
.write_all(b"fake-secrets-binary")
|
|
||||||
.expect("write zip payload");
|
|
||||||
let bytes = writer.finish().expect("finish zip").into_inner();
|
|
||||||
|
|
||||||
let extracted = extract_from_zip(&bytes).expect("binary should extract");
|
|
||||||
assert_eq!(extracted, b"fake-secrets-binary");
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,73 +0,0 @@
|
|||||||
use anyhow::{Context, Result};
|
|
||||||
use serde::{Deserialize, Serialize};
|
|
||||||
use std::fs;
|
|
||||||
use std::path::PathBuf;
|
|
||||||
|
|
||||||
#[derive(Debug, Serialize, Deserialize, Default)]
|
|
||||||
pub struct Config {
|
|
||||||
pub database_url: Option<String>,
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn config_dir() -> PathBuf {
|
|
||||||
dirs::config_dir()
|
|
||||||
.or_else(|| dirs::home_dir().map(|h| h.join(".config")))
|
|
||||||
.unwrap_or_else(|| PathBuf::from(".config"))
|
|
||||||
.join("secrets")
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn config_path() -> PathBuf {
|
|
||||||
config_dir().join("config.toml")
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn load_config() -> Result<Config> {
|
|
||||||
let path = config_path();
|
|
||||||
if !path.exists() {
|
|
||||||
return Ok(Config::default());
|
|
||||||
}
|
|
||||||
let content = fs::read_to_string(&path)
|
|
||||||
.with_context(|| format!("failed to read config file: {}", path.display()))?;
|
|
||||||
let config: Config = toml::from_str(&content)
|
|
||||||
.with_context(|| format!("failed to parse config file: {}", path.display()))?;
|
|
||||||
Ok(config)
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn save_config(config: &Config) -> Result<()> {
|
|
||||||
let dir = config_dir();
|
|
||||||
fs::create_dir_all(&dir)
|
|
||||||
.with_context(|| format!("failed to create config dir: {}", dir.display()))?;
|
|
||||||
|
|
||||||
let path = config_path();
|
|
||||||
let content = toml::to_string_pretty(config).context("failed to serialize config")?;
|
|
||||||
fs::write(&path, &content)
|
|
||||||
.with_context(|| format!("failed to write config file: {}", path.display()))?;
|
|
||||||
|
|
||||||
// Set file permissions to 0600 (owner read/write only)
|
|
||||||
#[cfg(unix)]
|
|
||||||
{
|
|
||||||
use std::os::unix::fs::PermissionsExt;
|
|
||||||
let perms = fs::Permissions::from_mode(0o600);
|
|
||||||
fs::set_permissions(&path, perms)
|
|
||||||
.with_context(|| format!("failed to set file permissions: {}", path.display()))?;
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Resolve database URL by priority:
|
|
||||||
/// 1. --db-url CLI flag (if non-empty)
|
|
||||||
/// 2. database_url in ~/.config/secrets/config.toml
|
|
||||||
/// 3. Error with setup instructions
|
|
||||||
pub fn resolve_db_url(cli_db_url: &str) -> Result<String> {
|
|
||||||
if !cli_db_url.is_empty() {
|
|
||||||
return Ok(cli_db_url.to_string());
|
|
||||||
}
|
|
||||||
|
|
||||||
let config = load_config()?;
|
|
||||||
if let Some(url) = config.database_url
|
|
||||||
&& !url.is_empty()
|
|
||||||
{
|
|
||||||
return Ok(url);
|
|
||||||
}
|
|
||||||
|
|
||||||
anyhow::bail!("Database not configured. Run:\n\n secrets config set-db <DATABASE_URL>\n")
|
|
||||||
}
|
|
||||||
183
src/crypto.rs
183
src/crypto.rs
@@ -1,183 +0,0 @@
|
|||||||
use aes_gcm::{
|
|
||||||
Aes256Gcm, Key, Nonce,
|
|
||||||
aead::{Aead, AeadCore, KeyInit, OsRng},
|
|
||||||
};
|
|
||||||
use anyhow::{Context, Result, bail};
|
|
||||||
use argon2::{Argon2, Params, Version};
|
|
||||||
use serde_json::Value;
|
|
||||||
|
|
||||||
const KEYRING_SERVICE: &str = "secrets-cli";
|
|
||||||
const KEYRING_USER: &str = "master-key";
|
|
||||||
const NONCE_LEN: usize = 12;
|
|
||||||
|
|
||||||
// ─── Argon2id key derivation ─────────────────────────────────────────────────
|
|
||||||
|
|
||||||
/// Derive a 32-byte Master Key from a password and salt using Argon2id.
|
|
||||||
/// Parameters: m=65536 KiB (64 MB), t=3, p=4 — OWASP recommended.
|
|
||||||
pub fn derive_master_key(password: &str, salt: &[u8]) -> Result<[u8; 32]> {
|
|
||||||
let params = Params::new(65536, 3, 4, Some(32)).context("invalid Argon2id params")?;
|
|
||||||
let argon2 = Argon2::new(argon2::Algorithm::Argon2id, Version::V0x13, params);
|
|
||||||
let mut key = [0u8; 32];
|
|
||||||
argon2
|
|
||||||
.hash_password_into(password.as_bytes(), salt, &mut key)
|
|
||||||
.map_err(|e| anyhow::anyhow!("Argon2id derivation failed: {}", e))?;
|
|
||||||
Ok(key)
|
|
||||||
}
|
|
||||||
|
|
||||||
// ─── AES-256-GCM encrypt / decrypt ───────────────────────────────────────────
|
|
||||||
|
|
||||||
/// Encrypt plaintext bytes with AES-256-GCM.
|
|
||||||
/// Returns `nonce (12 B) || ciphertext+tag`.
|
|
||||||
pub fn encrypt(master_key: &[u8; 32], plaintext: &[u8]) -> Result<Vec<u8>> {
|
|
||||||
let key = Key::<Aes256Gcm>::from_slice(master_key);
|
|
||||||
let cipher = Aes256Gcm::new(key);
|
|
||||||
let nonce = Aes256Gcm::generate_nonce(&mut OsRng);
|
|
||||||
let ciphertext = cipher
|
|
||||||
.encrypt(&nonce, plaintext)
|
|
||||||
.map_err(|e| anyhow::anyhow!("AES-256-GCM encryption failed: {}", e))?;
|
|
||||||
let mut out = Vec::with_capacity(NONCE_LEN + ciphertext.len());
|
|
||||||
out.extend_from_slice(&nonce);
|
|
||||||
out.extend_from_slice(&ciphertext);
|
|
||||||
Ok(out)
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Decrypt `nonce (12 B) || ciphertext+tag` with AES-256-GCM.
|
|
||||||
pub fn decrypt(master_key: &[u8; 32], data: &[u8]) -> Result<Vec<u8>> {
|
|
||||||
if data.len() < NONCE_LEN {
|
|
||||||
bail!(
|
|
||||||
"encrypted data too short ({}B); possibly corrupted",
|
|
||||||
data.len()
|
|
||||||
);
|
|
||||||
}
|
|
||||||
let (nonce_bytes, ciphertext) = data.split_at(NONCE_LEN);
|
|
||||||
let key = Key::<Aes256Gcm>::from_slice(master_key);
|
|
||||||
let cipher = Aes256Gcm::new(key);
|
|
||||||
let nonce = Nonce::from_slice(nonce_bytes);
|
|
||||||
cipher
|
|
||||||
.decrypt(nonce, ciphertext)
|
|
||||||
.map_err(|_| anyhow::anyhow!("decryption failed — wrong master key or corrupted data"))
|
|
||||||
}
|
|
||||||
|
|
||||||
// ─── JSON helpers ─────────────────────────────────────────────────────────────
|
|
||||||
|
|
||||||
/// Serialize a JSON Value and encrypt it. Returns the encrypted blob.
|
|
||||||
pub fn encrypt_json(master_key: &[u8; 32], value: &Value) -> Result<Vec<u8>> {
|
|
||||||
let bytes = serde_json::to_vec(value).context("serialize JSON for encryption")?;
|
|
||||||
encrypt(master_key, &bytes)
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Decrypt an encrypted blob and deserialize it as a JSON Value.
|
|
||||||
pub fn decrypt_json(master_key: &[u8; 32], data: &[u8]) -> Result<Value> {
|
|
||||||
let bytes = decrypt(master_key, data)?;
|
|
||||||
serde_json::from_slice(&bytes).context("deserialize decrypted JSON")
|
|
||||||
}
|
|
||||||
|
|
||||||
// ─── OS Keychain ──────────────────────────────────────────────────────────────
|
|
||||||
|
|
||||||
/// Load the Master Key from the OS Keychain.
|
|
||||||
/// Returns an error with a helpful message if it hasn't been initialized.
|
|
||||||
pub fn load_master_key() -> Result<[u8; 32]> {
|
|
||||||
let entry =
|
|
||||||
keyring::Entry::new(KEYRING_SERVICE, KEYRING_USER).context("create keychain entry")?;
|
|
||||||
let hex = entry.get_password().map_err(|_| {
|
|
||||||
anyhow::anyhow!("Master key not found in keychain. Run `secrets init` first.")
|
|
||||||
})?;
|
|
||||||
let bytes = hex::decode_hex(&hex)?;
|
|
||||||
if bytes.len() != 32 {
|
|
||||||
bail!(
|
|
||||||
"stored master key has unexpected length {}; re-run `secrets init`",
|
|
||||||
bytes.len()
|
|
||||||
);
|
|
||||||
}
|
|
||||||
let mut key = [0u8; 32];
|
|
||||||
key.copy_from_slice(&bytes);
|
|
||||||
Ok(key)
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Store the Master Key in the OS Keychain (overwrites any existing value).
|
|
||||||
pub fn store_master_key(key: &[u8; 32]) -> Result<()> {
|
|
||||||
let entry =
|
|
||||||
keyring::Entry::new(KEYRING_SERVICE, KEYRING_USER).context("create keychain entry")?;
|
|
||||||
let hex = hex::encode_hex(key);
|
|
||||||
entry
|
|
||||||
.set_password(&hex)
|
|
||||||
.map_err(|e| anyhow::anyhow!("keychain write failed: {}", e))?;
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
// ─── Minimal hex helpers (avoid extra dep) ────────────────────────────────────
|
|
||||||
|
|
||||||
mod hex {
|
|
||||||
use anyhow::{Result, bail};
|
|
||||||
|
|
||||||
pub fn encode_hex(bytes: &[u8]) -> String {
|
|
||||||
bytes.iter().map(|b| format!("{:02x}", b)).collect()
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn decode_hex(s: &str) -> Result<Vec<u8>> {
|
|
||||||
if !s.len().is_multiple_of(2) {
|
|
||||||
bail!("hex string has odd length");
|
|
||||||
}
|
|
||||||
(0..s.len())
|
|
||||||
.step_by(2)
|
|
||||||
.map(|i| u8::from_str_radix(&s[i..i + 2], 16).map_err(|e| anyhow::anyhow!("{}", e)))
|
|
||||||
.collect()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[cfg(test)]
|
|
||||||
mod tests {
|
|
||||||
use super::*;
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn roundtrip_encrypt_decrypt() {
|
|
||||||
let key = [0x42u8; 32];
|
|
||||||
let plaintext = b"hello world";
|
|
||||||
let enc = encrypt(&key, plaintext).unwrap();
|
|
||||||
let dec = decrypt(&key, &enc).unwrap();
|
|
||||||
assert_eq!(dec, plaintext);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn encrypt_produces_different_ciphertexts() {
|
|
||||||
let key = [0x42u8; 32];
|
|
||||||
let plaintext = b"hello world";
|
|
||||||
let enc1 = encrypt(&key, plaintext).unwrap();
|
|
||||||
let enc2 = encrypt(&key, plaintext).unwrap();
|
|
||||||
// Different nonces → different ciphertexts
|
|
||||||
assert_ne!(enc1, enc2);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn wrong_key_fails_decryption() {
|
|
||||||
let key1 = [0x42u8; 32];
|
|
||||||
let key2 = [0x43u8; 32];
|
|
||||||
let enc = encrypt(&key1, b"secret").unwrap();
|
|
||||||
assert!(decrypt(&key2, &enc).is_err());
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn json_roundtrip() {
|
|
||||||
let key = [0x42u8; 32];
|
|
||||||
let value = serde_json::json!({"token": "abc123", "password": "hunter2"});
|
|
||||||
let enc = encrypt_json(&key, &value).unwrap();
|
|
||||||
let dec = decrypt_json(&key, &enc).unwrap();
|
|
||||||
assert_eq!(dec, value);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn derive_master_key_deterministic() {
|
|
||||||
let salt = b"fixed_test_salt_";
|
|
||||||
let k1 = derive_master_key("password", salt).unwrap();
|
|
||||||
let k2 = derive_master_key("password", salt).unwrap();
|
|
||||||
assert_eq!(k1, k2);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn derive_master_key_different_passwords() {
|
|
||||||
let salt = b"fixed_test_salt_";
|
|
||||||
let k1 = derive_master_key("password1", salt).unwrap();
|
|
||||||
let k2 = derive_master_key("password2", salt).unwrap();
|
|
||||||
assert_ne!(k1, k2);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
177
src/db.rs
177
src/db.rs
@@ -1,177 +0,0 @@
|
|||||||
use anyhow::Result;
|
|
||||||
use sqlx::PgPool;
|
|
||||||
use sqlx::postgres::PgPoolOptions;
|
|
||||||
|
|
||||||
pub async fn create_pool(database_url: &str) -> Result<PgPool> {
|
|
||||||
tracing::debug!("connecting to database");
|
|
||||||
let pool = PgPoolOptions::new()
|
|
||||||
.max_connections(5)
|
|
||||||
.acquire_timeout(std::time::Duration::from_secs(5))
|
|
||||||
.connect(database_url)
|
|
||||||
.await?;
|
|
||||||
tracing::debug!("database connection established");
|
|
||||||
Ok(pool)
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn migrate(pool: &PgPool) -> Result<()> {
|
|
||||||
tracing::debug!("running migrations");
|
|
||||||
sqlx::raw_sql(
|
|
||||||
r#"
|
|
||||||
CREATE TABLE IF NOT EXISTS secrets (
|
|
||||||
id UUID PRIMARY KEY DEFAULT uuidv7(),
|
|
||||||
namespace VARCHAR(64) NOT NULL,
|
|
||||||
kind VARCHAR(64) NOT NULL,
|
|
||||||
name VARCHAR(256) NOT NULL,
|
|
||||||
tags TEXT[] NOT NULL DEFAULT '{}',
|
|
||||||
metadata JSONB NOT NULL DEFAULT '{}',
|
|
||||||
encrypted BYTEA NOT NULL DEFAULT '\x',
|
|
||||||
version BIGINT NOT NULL DEFAULT 1,
|
|
||||||
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
|
|
||||||
updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
|
|
||||||
UNIQUE(namespace, kind, name)
|
|
||||||
);
|
|
||||||
|
|
||||||
-- idempotent column add for existing tables
|
|
||||||
DO $$ BEGIN
|
|
||||||
ALTER TABLE secrets ADD COLUMN IF NOT EXISTS metadata JSONB NOT NULL DEFAULT '{}';
|
|
||||||
EXCEPTION WHEN OTHERS THEN NULL;
|
|
||||||
END $$;
|
|
||||||
|
|
||||||
DO $$ BEGIN
|
|
||||||
ALTER TABLE secrets ADD COLUMN IF NOT EXISTS version BIGINT NOT NULL DEFAULT 1;
|
|
||||||
EXCEPTION WHEN OTHERS THEN NULL;
|
|
||||||
END $$;
|
|
||||||
|
|
||||||
-- Migrate encrypted column from JSONB to BYTEA if still JSONB type.
|
|
||||||
-- After migration, old plaintext rows will have their JSONB data
|
|
||||||
-- stored as raw bytes (UTF-8 encoded).
|
|
||||||
DO $$ BEGIN
|
|
||||||
IF EXISTS (
|
|
||||||
SELECT 1 FROM information_schema.columns
|
|
||||||
WHERE table_name = 'secrets'
|
|
||||||
AND column_name = 'encrypted'
|
|
||||||
AND data_type = 'jsonb'
|
|
||||||
) THEN
|
|
||||||
ALTER TABLE secrets RENAME COLUMN encrypted TO encrypted_jsonb_old;
|
|
||||||
ALTER TABLE secrets ADD COLUMN encrypted BYTEA NOT NULL DEFAULT '\x';
|
|
||||||
-- Copy existing JSONB data as raw UTF-8 bytes so nothing is lost
|
|
||||||
UPDATE secrets SET encrypted = convert_to(encrypted_jsonb_old::text, 'UTF8');
|
|
||||||
ALTER TABLE secrets DROP COLUMN encrypted_jsonb_old;
|
|
||||||
END IF;
|
|
||||||
EXCEPTION WHEN OTHERS THEN NULL;
|
|
||||||
END $$;
|
|
||||||
|
|
||||||
CREATE INDEX IF NOT EXISTS idx_secrets_namespace ON secrets(namespace);
|
|
||||||
CREATE INDEX IF NOT EXISTS idx_secrets_kind ON secrets(kind);
|
|
||||||
CREATE INDEX IF NOT EXISTS idx_secrets_tags ON secrets USING GIN(tags);
|
|
||||||
CREATE INDEX IF NOT EXISTS idx_secrets_metadata ON secrets USING GIN(metadata jsonb_path_ops);
|
|
||||||
|
|
||||||
-- Key-value config table: stores Argon2id salt (shared across devices)
|
|
||||||
CREATE TABLE IF NOT EXISTS kv_config (
|
|
||||||
key TEXT PRIMARY KEY,
|
|
||||||
value BYTEA NOT NULL
|
|
||||||
);
|
|
||||||
|
|
||||||
CREATE TABLE IF NOT EXISTS audit_log (
|
|
||||||
id BIGINT GENERATED ALWAYS AS IDENTITY PRIMARY KEY,
|
|
||||||
action VARCHAR(32) NOT NULL,
|
|
||||||
namespace VARCHAR(64) NOT NULL,
|
|
||||||
kind VARCHAR(64) NOT NULL,
|
|
||||||
name VARCHAR(256) NOT NULL,
|
|
||||||
detail JSONB NOT NULL DEFAULT '{}',
|
|
||||||
actor VARCHAR(128) NOT NULL DEFAULT '',
|
|
||||||
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW()
|
|
||||||
);
|
|
||||||
|
|
||||||
CREATE INDEX IF NOT EXISTS idx_audit_log_created ON audit_log(created_at DESC);
|
|
||||||
CREATE INDEX IF NOT EXISTS idx_audit_log_ns_kind ON audit_log(namespace, kind);
|
|
||||||
|
|
||||||
-- History table: snapshot of secrets before each write operation.
|
|
||||||
-- Supports rollback to any prior version via `secrets rollback`.
|
|
||||||
CREATE TABLE IF NOT EXISTS secrets_history (
|
|
||||||
id BIGINT GENERATED ALWAYS AS IDENTITY PRIMARY KEY,
|
|
||||||
secret_id UUID NOT NULL,
|
|
||||||
namespace VARCHAR(64) NOT NULL,
|
|
||||||
kind VARCHAR(64) NOT NULL,
|
|
||||||
name VARCHAR(256) NOT NULL,
|
|
||||||
version BIGINT NOT NULL,
|
|
||||||
action VARCHAR(16) NOT NULL,
|
|
||||||
tags TEXT[] NOT NULL DEFAULT '{}',
|
|
||||||
metadata JSONB NOT NULL DEFAULT '{}',
|
|
||||||
encrypted BYTEA NOT NULL DEFAULT '\x',
|
|
||||||
actor VARCHAR(128) NOT NULL DEFAULT '',
|
|
||||||
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW()
|
|
||||||
);
|
|
||||||
|
|
||||||
CREATE INDEX IF NOT EXISTS idx_history_secret_id ON secrets_history(secret_id, version DESC);
|
|
||||||
CREATE INDEX IF NOT EXISTS idx_history_ns_kind_name ON secrets_history(namespace, kind, name, version DESC);
|
|
||||||
"#,
|
|
||||||
)
|
|
||||||
.execute(pool)
|
|
||||||
.await?;
|
|
||||||
tracing::debug!("migrations complete");
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Snapshot parameters grouped to avoid too-many-arguments lint.
|
|
||||||
pub struct SnapshotParams<'a> {
|
|
||||||
pub secret_id: uuid::Uuid,
|
|
||||||
pub namespace: &'a str,
|
|
||||||
pub kind: &'a str,
|
|
||||||
pub name: &'a str,
|
|
||||||
pub version: i64,
|
|
||||||
pub action: &'a str,
|
|
||||||
pub tags: &'a [String],
|
|
||||||
pub metadata: &'a serde_json::Value,
|
|
||||||
pub encrypted: &'a [u8],
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Snapshot a secrets row into `secrets_history` before a write operation.
|
|
||||||
/// `action` is one of "add", "update", "delete".
|
|
||||||
/// Failures are non-fatal (caller should warn).
|
|
||||||
pub async fn snapshot_history(
|
|
||||||
tx: &mut sqlx::Transaction<'_, sqlx::Postgres>,
|
|
||||||
p: SnapshotParams<'_>,
|
|
||||||
) -> Result<()> {
|
|
||||||
let actor = std::env::var("USER").unwrap_or_default();
|
|
||||||
sqlx::query(
|
|
||||||
"INSERT INTO secrets_history \
|
|
||||||
(secret_id, namespace, kind, name, version, action, tags, metadata, encrypted, actor) \
|
|
||||||
VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10)",
|
|
||||||
)
|
|
||||||
.bind(p.secret_id)
|
|
||||||
.bind(p.namespace)
|
|
||||||
.bind(p.kind)
|
|
||||||
.bind(p.name)
|
|
||||||
.bind(p.version)
|
|
||||||
.bind(p.action)
|
|
||||||
.bind(p.tags)
|
|
||||||
.bind(p.metadata)
|
|
||||||
.bind(p.encrypted)
|
|
||||||
.bind(&actor)
|
|
||||||
.execute(&mut **tx)
|
|
||||||
.await?;
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Load the Argon2id salt from the database.
|
|
||||||
/// Returns None if not yet initialized.
|
|
||||||
pub async fn load_argon2_salt(pool: &PgPool) -> Result<Option<Vec<u8>>> {
|
|
||||||
let row: Option<(Vec<u8>,)> =
|
|
||||||
sqlx::query_as("SELECT value FROM kv_config WHERE key = 'argon2_salt'")
|
|
||||||
.fetch_optional(pool)
|
|
||||||
.await?;
|
|
||||||
Ok(row.map(|(v,)| v))
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Store the Argon2id salt in the database (only called once on first device init).
|
|
||||||
pub async fn store_argon2_salt(pool: &PgPool, salt: &[u8]) -> Result<()> {
|
|
||||||
sqlx::query(
|
|
||||||
"INSERT INTO kv_config (key, value) VALUES ('argon2_salt', $1) \
|
|
||||||
ON CONFLICT (key) DO NOTHING",
|
|
||||||
)
|
|
||||||
.bind(salt)
|
|
||||||
.execute(pool)
|
|
||||||
.await?;
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
693
src/main.rs
693
src/main.rs
@@ -1,693 +0,0 @@
|
|||||||
mod audit;
|
|
||||||
mod commands;
|
|
||||||
mod config;
|
|
||||||
mod crypto;
|
|
||||||
mod db;
|
|
||||||
mod models;
|
|
||||||
mod output;
|
|
||||||
|
|
||||||
use anyhow::Result;
|
|
||||||
use clap::{Parser, Subcommand};
|
|
||||||
use tracing_subscriber::EnvFilter;
|
|
||||||
|
|
||||||
use output::resolve_output_mode;
|
|
||||||
|
|
||||||
#[derive(Parser)]
|
|
||||||
#[command(
|
|
||||||
name = "secrets",
|
|
||||||
version,
|
|
||||||
about = "Secrets & config manager backed by PostgreSQL — optimised for AI agents",
|
|
||||||
after_help = "QUICK START:
|
|
||||||
# 1. Configure database (once per device)
|
|
||||||
secrets config set-db \"postgres://postgres:<password>@<host>:<port>/secrets\"
|
|
||||||
|
|
||||||
# 2. Initialize master key (once per device)
|
|
||||||
secrets init
|
|
||||||
|
|
||||||
# Discover what namespaces / kinds exist
|
|
||||||
secrets search --summary --limit 20
|
|
||||||
|
|
||||||
# Precise lookup (JSON output for easy parsing)
|
|
||||||
secrets search -n refining --kind service --name gitea -o json
|
|
||||||
|
|
||||||
# Extract a single metadata field directly
|
|
||||||
secrets search -n refining --kind service --name gitea -f metadata.url
|
|
||||||
|
|
||||||
# Pipe-friendly (non-TTY defaults to json-compact automatically)
|
|
||||||
secrets search -n refining --kind service | jq '.[].name'
|
|
||||||
|
|
||||||
# Inject secrets into environment variables when you really need them
|
|
||||||
secrets inject -n refining --kind service --name gitea"
|
|
||||||
)]
|
|
||||||
struct Cli {
|
|
||||||
/// Database URL, overrides saved config (one-time override)
|
|
||||||
#[arg(long, global = true, default_value = "")]
|
|
||||||
db_url: String,
|
|
||||||
|
|
||||||
/// Enable verbose debug output
|
|
||||||
#[arg(long, short, global = true)]
|
|
||||||
verbose: bool,
|
|
||||||
|
|
||||||
#[command(subcommand)]
|
|
||||||
command: Commands,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Subcommand)]
|
|
||||||
enum Commands {
|
|
||||||
/// Initialize master key on this device (run once per device).
|
|
||||||
///
|
|
||||||
/// Prompts for a master password, derives a key with Argon2id, and stores
|
|
||||||
/// it in the OS Keychain. Use the same password on every device.
|
|
||||||
///
|
|
||||||
/// NOTE: Run `secrets config set-db <URL>` first if database is not configured.
|
|
||||||
#[command(after_help = "PREREQUISITE:
|
|
||||||
Database must be configured first. Run: secrets config set-db <DATABASE_URL>
|
|
||||||
|
|
||||||
EXAMPLES:
|
|
||||||
# First device: generates a new Argon2id salt and stores master key
|
|
||||||
secrets init
|
|
||||||
|
|
||||||
# Subsequent devices: reuses existing salt from the database
|
|
||||||
secrets init")]
|
|
||||||
Init,
|
|
||||||
|
|
||||||
/// Add or update a record (upsert). Use -m for plaintext metadata, -s for secrets.
|
|
||||||
#[command(after_help = "EXAMPLES:
|
|
||||||
# Add a server
|
|
||||||
secrets add -n refining --kind server --name my-server \\
|
|
||||||
--tag aliyun --tag shanghai \\
|
|
||||||
-m ip=47.117.131.22 -m desc=\"Aliyun Shanghai ECS\" \\
|
|
||||||
-s username=root -s ssh_key=@./keys/server.pem
|
|
||||||
|
|
||||||
# Add a service credential
|
|
||||||
secrets add -n refining --kind service --name gitea \\
|
|
||||||
--tag gitea \\
|
|
||||||
-m url=https://gitea.refining.dev -m default_org=refining \\
|
|
||||||
-s token=<token>
|
|
||||||
|
|
||||||
# Add typed JSON metadata
|
|
||||||
secrets add -n refining --kind service --name gitea \\
|
|
||||||
-m port:=3000 \\
|
|
||||||
-m enabled:=true \\
|
|
||||||
-m domains:='[\"gitea.refining.dev\",\"git.refining.dev\"]' \\
|
|
||||||
-m tls:='{\"enabled\":true,\"redirect_http\":true}'
|
|
||||||
|
|
||||||
# Add with token read from a file
|
|
||||||
secrets add -n ricnsmart --kind service --name mqtt \\
|
|
||||||
-m host=mqtt.ricnsmart.com -m port=1883 \\
|
|
||||||
-s password=@./mqtt_password.txt
|
|
||||||
|
|
||||||
# Add typed JSON secrets
|
|
||||||
secrets add -n refining --kind service --name deploy-bot \\
|
|
||||||
-s enabled:=true \\
|
|
||||||
-s retry_count:=3 \\
|
|
||||||
-s scopes:='[\"repo\",\"workflow\"]' \\
|
|
||||||
-s extra:='{\"region\":\"ap-east-1\",\"verify_tls\":true}'
|
|
||||||
|
|
||||||
# Write a multiline file into a nested secret field
|
|
||||||
secrets add -n refining --kind server --name my-server \\
|
|
||||||
-s credentials:content@./keys/server.pem")]
|
|
||||||
Add {
|
|
||||||
/// Namespace, e.g. refining, ricnsmart
|
|
||||||
#[arg(short, long)]
|
|
||||||
namespace: String,
|
|
||||||
/// Kind of record: server, service, key, ...
|
|
||||||
#[arg(long)]
|
|
||||||
kind: String,
|
|
||||||
/// Human-readable unique name, e.g. gitea, i-uf63f2uookgs5uxmrdyc
|
|
||||||
#[arg(long)]
|
|
||||||
name: String,
|
|
||||||
/// Tag for categorization (repeatable), e.g. --tag aliyun --tag hongkong
|
|
||||||
#[arg(long = "tag")]
|
|
||||||
tags: Vec<String>,
|
|
||||||
/// Plaintext metadata: key=value, key:=<json>, key=@file, or nested:path@file
|
|
||||||
#[arg(long = "meta", short = 'm')]
|
|
||||||
meta: Vec<String>,
|
|
||||||
/// Secret entry: key=value, key:=<json>, key=@file, or nested:path@file
|
|
||||||
#[arg(long = "secret", short = 's')]
|
|
||||||
secrets: Vec<String>,
|
|
||||||
/// Output format: text (default on TTY), json, json-compact
|
|
||||||
#[arg(short, long = "output")]
|
|
||||||
output: Option<String>,
|
|
||||||
},
|
|
||||||
|
|
||||||
/// Search / read records. This is the primary read command for AI agents.
|
|
||||||
///
|
|
||||||
/// Supports fuzzy search (-q), exact lookup (--name), field extraction (-f),
|
|
||||||
/// summary view (--summary), pagination (--limit / --offset), and structured
|
|
||||||
/// output (-o json / json-compact). When stdout is not a TTY, output
|
|
||||||
/// defaults to json-compact automatically.
|
|
||||||
#[command(after_help = "EXAMPLES:
|
|
||||||
# Discover all records (summary, safe default limit)
|
|
||||||
secrets search --summary --limit 20
|
|
||||||
|
|
||||||
# Filter by namespace and kind
|
|
||||||
secrets search -n refining --kind service
|
|
||||||
|
|
||||||
# Exact lookup — returns 0 or 1 record
|
|
||||||
secrets search -n refining --kind service --name gitea
|
|
||||||
|
|
||||||
# Fuzzy keyword search (matches name, namespace, kind, tags, metadata)
|
|
||||||
secrets search -q mqtt
|
|
||||||
|
|
||||||
# Extract a single metadata field value
|
|
||||||
secrets search -n refining --kind service --name gitea -f metadata.url
|
|
||||||
|
|
||||||
# Multiple fields at once
|
|
||||||
secrets search -n refining --kind service --name gitea \\
|
|
||||||
-f metadata.url -f metadata.default_org
|
|
||||||
|
|
||||||
# Inject decrypted secrets only when needed
|
|
||||||
secrets inject -n refining --kind service --name gitea
|
|
||||||
secrets run -n refining --kind service --name gitea -- printenv
|
|
||||||
|
|
||||||
# Paginate large result sets
|
|
||||||
secrets search -n refining --summary --limit 10 --offset 0
|
|
||||||
secrets search -n refining --summary --limit 10 --offset 10
|
|
||||||
|
|
||||||
# Sort by most recently updated
|
|
||||||
secrets search --sort updated --limit 5 --summary
|
|
||||||
|
|
||||||
# Non-TTY / pipe: output is json-compact by default
|
|
||||||
secrets search -n refining --kind service | jq '.[].name'")]
|
|
||||||
Search {
|
|
||||||
/// Filter by namespace, e.g. refining, ricnsmart
|
|
||||||
#[arg(short, long)]
|
|
||||||
namespace: Option<String>,
|
|
||||||
/// Filter by kind, e.g. server, service
|
|
||||||
#[arg(long)]
|
|
||||||
kind: Option<String>,
|
|
||||||
/// Exact name filter, e.g. gitea, i-uf63f2uookgs5uxmrdyc
|
|
||||||
#[arg(long)]
|
|
||||||
name: Option<String>,
|
|
||||||
/// Filter by tag, e.g. --tag aliyun (repeatable for AND intersection)
|
|
||||||
#[arg(long)]
|
|
||||||
tag: Vec<String>,
|
|
||||||
/// Fuzzy keyword (matches name, namespace, kind, tags, metadata text)
|
|
||||||
#[arg(short, long)]
|
|
||||||
query: Option<String>,
|
|
||||||
/// Deprecated: search never reveals secrets; use inject/run instead
|
|
||||||
#[arg(long)]
|
|
||||||
show_secrets: bool,
|
|
||||||
/// Extract metadata field value(s) directly: metadata.<key> (repeatable)
|
|
||||||
#[arg(short = 'f', long = "field")]
|
|
||||||
fields: Vec<String>,
|
|
||||||
/// Return lightweight summary only (namespace, kind, name, tags, desc, updated_at)
|
|
||||||
#[arg(long)]
|
|
||||||
summary: bool,
|
|
||||||
/// Maximum number of records to return [default: 50]
|
|
||||||
#[arg(long, default_value = "50")]
|
|
||||||
limit: u32,
|
|
||||||
/// Skip this many records (for pagination)
|
|
||||||
#[arg(long, default_value = "0")]
|
|
||||||
offset: u32,
|
|
||||||
/// Sort order: name (default), updated, created
|
|
||||||
#[arg(long, default_value = "name")]
|
|
||||||
sort: String,
|
|
||||||
/// Output format: text (default on TTY), json, json-compact
|
|
||||||
#[arg(short, long = "output")]
|
|
||||||
output: Option<String>,
|
|
||||||
},
|
|
||||||
|
|
||||||
/// Delete a record permanently. Requires exact namespace + kind + name.
|
|
||||||
#[command(after_help = "EXAMPLES:
|
|
||||||
# Delete a service credential
|
|
||||||
secrets delete -n refining --kind service --name legacy-mqtt
|
|
||||||
|
|
||||||
# Delete a server record
|
|
||||||
secrets delete -n ricnsmart --kind server --name i-old-server-id")]
|
|
||||||
Delete {
|
|
||||||
/// Namespace, e.g. refining
|
|
||||||
#[arg(short, long)]
|
|
||||||
namespace: String,
|
|
||||||
/// Kind, e.g. server, service
|
|
||||||
#[arg(long)]
|
|
||||||
kind: String,
|
|
||||||
/// Exact name of the record to delete
|
|
||||||
#[arg(long)]
|
|
||||||
name: String,
|
|
||||||
/// Output format: text (default on TTY), json, json-compact
|
|
||||||
#[arg(short, long = "output")]
|
|
||||||
output: Option<String>,
|
|
||||||
},
|
|
||||||
|
|
||||||
/// Incrementally update an existing record (merge semantics; record must exist).
|
|
||||||
///
|
|
||||||
/// Only the fields you pass are changed — everything else is preserved.
|
|
||||||
/// Use --add-tag / --remove-tag to modify tags without touching other fields.
|
|
||||||
#[command(after_help = "EXAMPLES:
|
|
||||||
# Update a single metadata field (all other fields unchanged)
|
|
||||||
secrets update -n refining --kind server --name my-server -m ip=10.0.0.1
|
|
||||||
|
|
||||||
# Rotate a secret token
|
|
||||||
secrets update -n refining --kind service --name gitea -s token=<new-token>
|
|
||||||
|
|
||||||
# Update typed JSON metadata
|
|
||||||
secrets update -n refining --kind service --name gitea \\
|
|
||||||
-m deploy:strategy:='{\"type\":\"rolling\",\"batch\":2}' \\
|
|
||||||
-m runtime:max_open_conns:=20
|
|
||||||
|
|
||||||
# Add a tag and rotate password at the same time
|
|
||||||
secrets update -n refining --kind service --name gitea \\
|
|
||||||
--add-tag production -s token=<new-token>
|
|
||||||
|
|
||||||
# Remove a deprecated metadata field and a stale secret key
|
|
||||||
secrets update -n refining --kind service --name mqtt \\
|
|
||||||
--remove-meta old_port --remove-secret old_password
|
|
||||||
|
|
||||||
# Remove a nested field
|
|
||||||
secrets update -n refining --kind server --name my-server \\
|
|
||||||
--remove-secret credentials:content
|
|
||||||
|
|
||||||
# Remove a tag
|
|
||||||
secrets update -n refining --kind service --name gitea --remove-tag staging
|
|
||||||
|
|
||||||
# Update a nested secret field from a file
|
|
||||||
secrets update -n refining --kind server --name my-server \\
|
|
||||||
-s credentials:content@./keys/server.pem
|
|
||||||
|
|
||||||
# Update nested typed JSON fields
|
|
||||||
secrets update -n refining --kind service --name deploy-bot \\
|
|
||||||
-s auth:config:='{\"issuer\":\"gitea\",\"rotate\":true}' \\
|
|
||||||
-s auth:retry:=5")]
|
|
||||||
Update {
|
|
||||||
/// Namespace, e.g. refining, ricnsmart
|
|
||||||
#[arg(short, long)]
|
|
||||||
namespace: String,
|
|
||||||
/// Kind of record: server, service, key, ...
|
|
||||||
#[arg(long)]
|
|
||||||
kind: String,
|
|
||||||
/// Human-readable unique name
|
|
||||||
#[arg(long)]
|
|
||||||
name: String,
|
|
||||||
/// Add a tag (repeatable; does not affect existing tags)
|
|
||||||
#[arg(long = "add-tag")]
|
|
||||||
add_tags: Vec<String>,
|
|
||||||
/// Remove a tag (repeatable)
|
|
||||||
#[arg(long = "remove-tag")]
|
|
||||||
remove_tags: Vec<String>,
|
|
||||||
/// Set or overwrite a metadata field: key=value, key:=<json>, key=@file, or nested:path@file
|
|
||||||
#[arg(long = "meta", short = 'm')]
|
|
||||||
meta: Vec<String>,
|
|
||||||
/// Delete a metadata field by key or nested path, e.g. old_port or credentials:content
|
|
||||||
#[arg(long = "remove-meta")]
|
|
||||||
remove_meta: Vec<String>,
|
|
||||||
/// Set or overwrite a secret field: key=value, key:=<json>, key=@file, or nested:path@file
|
|
||||||
#[arg(long = "secret", short = 's')]
|
|
||||||
secrets: Vec<String>,
|
|
||||||
/// Delete a secret field by key or nested path, e.g. old_password or credentials:content
|
|
||||||
#[arg(long = "remove-secret")]
|
|
||||||
remove_secrets: Vec<String>,
|
|
||||||
/// Output format: text (default on TTY), json, json-compact
|
|
||||||
#[arg(short, long = "output")]
|
|
||||||
output: Option<String>,
|
|
||||||
},
|
|
||||||
|
|
||||||
/// Manage CLI configuration (database connection, etc.)
|
|
||||||
#[command(after_help = "EXAMPLES:
|
|
||||||
# Configure the database URL (run once per device; persisted to config file)
|
|
||||||
secrets config set-db \"postgres://postgres:<password>@<host>:<port>/secrets\"
|
|
||||||
|
|
||||||
# Show current config (password is masked)
|
|
||||||
secrets config show
|
|
||||||
|
|
||||||
# Print path to the config file
|
|
||||||
secrets config path")]
|
|
||||||
Config {
|
|
||||||
#[command(subcommand)]
|
|
||||||
action: ConfigAction,
|
|
||||||
},
|
|
||||||
|
|
||||||
/// Show the change history for a record.
|
|
||||||
#[command(after_help = "EXAMPLES:
|
|
||||||
# Show last 20 versions for a service record
|
|
||||||
secrets history -n refining --kind service --name gitea
|
|
||||||
|
|
||||||
# Show last 5 versions
|
|
||||||
secrets history -n refining --kind service --name gitea --limit 5")]
|
|
||||||
History {
|
|
||||||
#[arg(short, long)]
|
|
||||||
namespace: String,
|
|
||||||
#[arg(long)]
|
|
||||||
kind: String,
|
|
||||||
#[arg(long)]
|
|
||||||
name: String,
|
|
||||||
/// Number of history entries to show [default: 20]
|
|
||||||
#[arg(long, default_value = "20")]
|
|
||||||
limit: u32,
|
|
||||||
/// Output format: text (default on TTY), json, json-compact
|
|
||||||
#[arg(short, long = "output")]
|
|
||||||
output: Option<String>,
|
|
||||||
},
|
|
||||||
|
|
||||||
/// Roll back a record to a previous version.
|
|
||||||
#[command(after_help = "EXAMPLES:
|
|
||||||
# Roll back to the most recent snapshot (undo last change)
|
|
||||||
secrets rollback -n refining --kind service --name gitea
|
|
||||||
|
|
||||||
# Roll back to a specific version number
|
|
||||||
secrets rollback -n refining --kind service --name gitea --to-version 3")]
|
|
||||||
Rollback {
|
|
||||||
#[arg(short, long)]
|
|
||||||
namespace: String,
|
|
||||||
#[arg(long)]
|
|
||||||
kind: String,
|
|
||||||
#[arg(long)]
|
|
||||||
name: String,
|
|
||||||
/// Target version to restore. Omit to restore the most recent snapshot.
|
|
||||||
#[arg(long)]
|
|
||||||
to_version: Option<i64>,
|
|
||||||
/// Output format: text (default on TTY), json, json-compact
|
|
||||||
#[arg(short, long = "output")]
|
|
||||||
output: Option<String>,
|
|
||||||
},
|
|
||||||
|
|
||||||
/// Print secrets as environment variables (stdout only, nothing persisted).
|
|
||||||
///
|
|
||||||
/// Outputs KEY=VALUE pairs for all matched records. Safe to pipe or eval.
|
|
||||||
#[command(after_help = "EXAMPLES:
|
|
||||||
# Print env vars for a single service
|
|
||||||
secrets inject -n refining --kind service --name gitea
|
|
||||||
|
|
||||||
# With a custom prefix
|
|
||||||
secrets inject -n refining --kind service --name gitea --prefix GITEA
|
|
||||||
|
|
||||||
# JSON output (all vars as a JSON object)
|
|
||||||
secrets inject -n refining --kind service --name gitea -o json
|
|
||||||
|
|
||||||
# Eval into current shell (use with caution)
|
|
||||||
eval $(secrets inject -n refining --kind service --name gitea)")]
|
|
||||||
Inject {
|
|
||||||
#[arg(short, long)]
|
|
||||||
namespace: Option<String>,
|
|
||||||
#[arg(long)]
|
|
||||||
kind: Option<String>,
|
|
||||||
#[arg(long)]
|
|
||||||
name: Option<String>,
|
|
||||||
#[arg(long)]
|
|
||||||
tag: Vec<String>,
|
|
||||||
/// Prefix to prepend to every variable name (uppercased automatically)
|
|
||||||
#[arg(long, default_value = "")]
|
|
||||||
prefix: String,
|
|
||||||
/// Output format: text/KEY=VALUE (default), json, json-compact
|
|
||||||
#[arg(short, long = "output")]
|
|
||||||
output: Option<String>,
|
|
||||||
},
|
|
||||||
|
|
||||||
/// Run a command with secrets injected as environment variables.
|
|
||||||
///
|
|
||||||
/// Secrets are available only to the child process; the current shell
|
|
||||||
/// environment is not modified. The process exit code is propagated.
|
|
||||||
#[command(after_help = "EXAMPLES:
|
|
||||||
# Run a script with a single service's secrets injected
|
|
||||||
secrets run -n refining --kind service --name gitea -- ./deploy.sh
|
|
||||||
|
|
||||||
# Run with a tag filter (all matched records merged)
|
|
||||||
secrets run --tag production -- env | grep GITEA
|
|
||||||
|
|
||||||
# With prefix
|
|
||||||
secrets run -n refining --kind service --name gitea --prefix GITEA -- printenv")]
|
|
||||||
Run {
|
|
||||||
#[arg(short, long)]
|
|
||||||
namespace: Option<String>,
|
|
||||||
#[arg(long)]
|
|
||||||
kind: Option<String>,
|
|
||||||
#[arg(long)]
|
|
||||||
name: Option<String>,
|
|
||||||
#[arg(long)]
|
|
||||||
tag: Vec<String>,
|
|
||||||
/// Prefix to prepend to every variable name (uppercased automatically)
|
|
||||||
#[arg(long, default_value = "")]
|
|
||||||
prefix: String,
|
|
||||||
/// Command and arguments to execute with injected environment
|
|
||||||
#[arg(last = true, required = true)]
|
|
||||||
command: Vec<String>,
|
|
||||||
},
|
|
||||||
|
|
||||||
/// Check for a newer version and update the binary in-place.
|
|
||||||
///
|
|
||||||
/// Downloads the latest release from Gitea and replaces the current binary.
|
|
||||||
/// No database connection or master key required.
|
|
||||||
#[command(after_help = "EXAMPLES:
|
|
||||||
# Check for updates only (no download)
|
|
||||||
secrets upgrade --check
|
|
||||||
|
|
||||||
# Download and install the latest version
|
|
||||||
secrets upgrade")]
|
|
||||||
Upgrade {
|
|
||||||
/// Only check if a newer version is available; do not download
|
|
||||||
#[arg(long)]
|
|
||||||
check: bool,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Subcommand)]
|
|
||||||
enum ConfigAction {
|
|
||||||
/// Save database URL to config file (~/.config/secrets/config.toml)
|
|
||||||
SetDb {
|
|
||||||
/// PostgreSQL connection string, e.g. postgres://user:pass@<host>:<port>/dbname
|
|
||||||
url: String,
|
|
||||||
},
|
|
||||||
/// Show current configuration (password masked)
|
|
||||||
Show,
|
|
||||||
/// Print path to the config file
|
|
||||||
Path,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[tokio::main]
|
|
||||||
async fn main() -> Result<()> {
|
|
||||||
let cli = Cli::parse();
|
|
||||||
|
|
||||||
let filter = if cli.verbose {
|
|
||||||
EnvFilter::new("secrets=debug")
|
|
||||||
} else {
|
|
||||||
EnvFilter::try_from_default_env().unwrap_or_else(|_| EnvFilter::new("secrets=warn"))
|
|
||||||
};
|
|
||||||
tracing_subscriber::fmt()
|
|
||||||
.with_env_filter(filter)
|
|
||||||
.with_target(false)
|
|
||||||
.init();
|
|
||||||
|
|
||||||
// config subcommand needs no database or master key
|
|
||||||
if let Commands::Config { action } = cli.command {
|
|
||||||
return commands::config::run(action).await;
|
|
||||||
}
|
|
||||||
|
|
||||||
// upgrade needs no database or master key either
|
|
||||||
if let Commands::Upgrade { check } = cli.command {
|
|
||||||
return commands::upgrade::run(check).await;
|
|
||||||
}
|
|
||||||
|
|
||||||
let db_url = config::resolve_db_url(&cli.db_url)?;
|
|
||||||
let pool = db::create_pool(&db_url).await?;
|
|
||||||
db::migrate(&pool).await?;
|
|
||||||
|
|
||||||
// init needs a pool but sets up the master key — handle before loading it
|
|
||||||
if let Commands::Init = cli.command {
|
|
||||||
return commands::init::run(&pool).await;
|
|
||||||
}
|
|
||||||
|
|
||||||
// All remaining commands require the master key from the OS Keychain,
|
|
||||||
// except delete which operates on plaintext metadata only.
|
|
||||||
|
|
||||||
match cli.command {
|
|
||||||
Commands::Init | Commands::Config { .. } | Commands::Upgrade { .. } => unreachable!(),
|
|
||||||
|
|
||||||
Commands::Add {
|
|
||||||
namespace,
|
|
||||||
kind,
|
|
||||||
name,
|
|
||||||
tags,
|
|
||||||
meta,
|
|
||||||
secrets,
|
|
||||||
output,
|
|
||||||
} => {
|
|
||||||
let master_key = crypto::load_master_key()?;
|
|
||||||
let _span =
|
|
||||||
tracing::info_span!("cmd", command = "add", %namespace, %kind, %name).entered();
|
|
||||||
let out = resolve_output_mode(output.as_deref())?;
|
|
||||||
commands::add::run(
|
|
||||||
&pool,
|
|
||||||
commands::add::AddArgs {
|
|
||||||
namespace: &namespace,
|
|
||||||
kind: &kind,
|
|
||||||
name: &name,
|
|
||||||
tags: &tags,
|
|
||||||
meta_entries: &meta,
|
|
||||||
secret_entries: &secrets,
|
|
||||||
output: out,
|
|
||||||
},
|
|
||||||
&master_key,
|
|
||||||
)
|
|
||||||
.await?;
|
|
||||||
}
|
|
||||||
|
|
||||||
Commands::Search {
|
|
||||||
namespace,
|
|
||||||
kind,
|
|
||||||
name,
|
|
||||||
tag,
|
|
||||||
query,
|
|
||||||
show_secrets,
|
|
||||||
fields,
|
|
||||||
summary,
|
|
||||||
limit,
|
|
||||||
offset,
|
|
||||||
sort,
|
|
||||||
output,
|
|
||||||
} => {
|
|
||||||
let _span = tracing::info_span!("cmd", command = "search").entered();
|
|
||||||
let out = resolve_output_mode(output.as_deref())?;
|
|
||||||
commands::search::run(
|
|
||||||
&pool,
|
|
||||||
commands::search::SearchArgs {
|
|
||||||
namespace: namespace.as_deref(),
|
|
||||||
kind: kind.as_deref(),
|
|
||||||
name: name.as_deref(),
|
|
||||||
tags: &tag,
|
|
||||||
query: query.as_deref(),
|
|
||||||
show_secrets,
|
|
||||||
fields: &fields,
|
|
||||||
summary,
|
|
||||||
limit,
|
|
||||||
offset,
|
|
||||||
sort: &sort,
|
|
||||||
output: out,
|
|
||||||
},
|
|
||||||
)
|
|
||||||
.await?;
|
|
||||||
}
|
|
||||||
|
|
||||||
Commands::Delete {
|
|
||||||
namespace,
|
|
||||||
kind,
|
|
||||||
name,
|
|
||||||
output,
|
|
||||||
} => {
|
|
||||||
let _span =
|
|
||||||
tracing::info_span!("cmd", command = "delete", %namespace, %kind, %name).entered();
|
|
||||||
let out = resolve_output_mode(output.as_deref())?;
|
|
||||||
commands::delete::run(&pool, &namespace, &kind, &name, out).await?;
|
|
||||||
}
|
|
||||||
|
|
||||||
Commands::Update {
|
|
||||||
namespace,
|
|
||||||
kind,
|
|
||||||
name,
|
|
||||||
add_tags,
|
|
||||||
remove_tags,
|
|
||||||
meta,
|
|
||||||
remove_meta,
|
|
||||||
secrets,
|
|
||||||
remove_secrets,
|
|
||||||
output,
|
|
||||||
} => {
|
|
||||||
let master_key = crypto::load_master_key()?;
|
|
||||||
let _span =
|
|
||||||
tracing::info_span!("cmd", command = "update", %namespace, %kind, %name).entered();
|
|
||||||
let out = resolve_output_mode(output.as_deref())?;
|
|
||||||
commands::update::run(
|
|
||||||
&pool,
|
|
||||||
commands::update::UpdateArgs {
|
|
||||||
namespace: &namespace,
|
|
||||||
kind: &kind,
|
|
||||||
name: &name,
|
|
||||||
add_tags: &add_tags,
|
|
||||||
remove_tags: &remove_tags,
|
|
||||||
meta_entries: &meta,
|
|
||||||
remove_meta: &remove_meta,
|
|
||||||
secret_entries: &secrets,
|
|
||||||
remove_secrets: &remove_secrets,
|
|
||||||
output: out,
|
|
||||||
},
|
|
||||||
&master_key,
|
|
||||||
)
|
|
||||||
.await?;
|
|
||||||
}
|
|
||||||
|
|
||||||
Commands::History {
|
|
||||||
namespace,
|
|
||||||
kind,
|
|
||||||
name,
|
|
||||||
limit,
|
|
||||||
output,
|
|
||||||
} => {
|
|
||||||
let out = resolve_output_mode(output.as_deref())?;
|
|
||||||
commands::rollback::list_history(&pool, &namespace, &kind, &name, limit, out).await?;
|
|
||||||
}
|
|
||||||
|
|
||||||
Commands::Rollback {
|
|
||||||
namespace,
|
|
||||||
kind,
|
|
||||||
name,
|
|
||||||
to_version,
|
|
||||||
output,
|
|
||||||
} => {
|
|
||||||
let master_key = crypto::load_master_key()?;
|
|
||||||
let out = resolve_output_mode(output.as_deref())?;
|
|
||||||
commands::rollback::run(
|
|
||||||
&pool,
|
|
||||||
commands::rollback::RollbackArgs {
|
|
||||||
namespace: &namespace,
|
|
||||||
kind: &kind,
|
|
||||||
name: &name,
|
|
||||||
to_version,
|
|
||||||
output: out,
|
|
||||||
},
|
|
||||||
&master_key,
|
|
||||||
)
|
|
||||||
.await?;
|
|
||||||
}
|
|
||||||
|
|
||||||
Commands::Inject {
|
|
||||||
namespace,
|
|
||||||
kind,
|
|
||||||
name,
|
|
||||||
tag,
|
|
||||||
prefix,
|
|
||||||
output,
|
|
||||||
} => {
|
|
||||||
let master_key = crypto::load_master_key()?;
|
|
||||||
let out = resolve_output_mode(output.as_deref())?;
|
|
||||||
commands::run::run_inject(
|
|
||||||
&pool,
|
|
||||||
commands::run::InjectArgs {
|
|
||||||
namespace: namespace.as_deref(),
|
|
||||||
kind: kind.as_deref(),
|
|
||||||
name: name.as_deref(),
|
|
||||||
tags: &tag,
|
|
||||||
prefix: &prefix,
|
|
||||||
output: out,
|
|
||||||
},
|
|
||||||
&master_key,
|
|
||||||
)
|
|
||||||
.await?;
|
|
||||||
}
|
|
||||||
|
|
||||||
Commands::Run {
|
|
||||||
namespace,
|
|
||||||
kind,
|
|
||||||
name,
|
|
||||||
tag,
|
|
||||||
prefix,
|
|
||||||
command,
|
|
||||||
} => {
|
|
||||||
let master_key = crypto::load_master_key()?;
|
|
||||||
commands::run::run_exec(
|
|
||||||
&pool,
|
|
||||||
commands::run::RunArgs {
|
|
||||||
namespace: namespace.as_deref(),
|
|
||||||
kind: kind.as_deref(),
|
|
||||||
name: name.as_deref(),
|
|
||||||
tags: &tag,
|
|
||||||
prefix: &prefix,
|
|
||||||
command: &command,
|
|
||||||
},
|
|
||||||
&master_key,
|
|
||||||
)
|
|
||||||
.await?;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
@@ -1,20 +0,0 @@
|
|||||||
use chrono::{DateTime, Utc};
|
|
||||||
use serde::{Deserialize, Serialize};
|
|
||||||
use serde_json::Value;
|
|
||||||
use uuid::Uuid;
|
|
||||||
|
|
||||||
#[derive(Debug, Serialize, Deserialize, sqlx::FromRow)]
|
|
||||||
pub struct Secret {
|
|
||||||
pub id: Uuid,
|
|
||||||
pub namespace: String,
|
|
||||||
pub kind: String,
|
|
||||||
pub name: String,
|
|
||||||
pub tags: Vec<String>,
|
|
||||||
pub metadata: Value,
|
|
||||||
/// AES-256-GCM ciphertext: nonce(12B) || ciphertext+tag
|
|
||||||
/// Decrypt with crypto::decrypt_json() before use.
|
|
||||||
pub encrypted: Vec<u8>,
|
|
||||||
pub version: i64,
|
|
||||||
pub created_at: DateTime<Utc>,
|
|
||||||
pub updated_at: DateTime<Utc>,
|
|
||||||
}
|
|
||||||
@@ -1,52 +0,0 @@
|
|||||||
use chrono::{DateTime, Local, Utc};
|
|
||||||
use std::io::IsTerminal;
|
|
||||||
use std::str::FromStr;
|
|
||||||
|
|
||||||
/// Output format for all commands.
|
|
||||||
#[derive(Debug, Clone, Default, PartialEq)]
|
|
||||||
pub enum OutputMode {
|
|
||||||
/// Human-readable text (default when stdout is a TTY)
|
|
||||||
#[default]
|
|
||||||
Text,
|
|
||||||
/// Pretty-printed JSON
|
|
||||||
Json,
|
|
||||||
/// Single-line JSON (default when stdout is NOT a TTY, e.g. piped to jq)
|
|
||||||
JsonCompact,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl FromStr for OutputMode {
|
|
||||||
type Err = anyhow::Error;
|
|
||||||
|
|
||||||
fn from_str(s: &str) -> Result<Self, Self::Err> {
|
|
||||||
match s {
|
|
||||||
"text" => Ok(Self::Text),
|
|
||||||
"json" => Ok(Self::Json),
|
|
||||||
"json-compact" => Ok(Self::JsonCompact),
|
|
||||||
other => Err(anyhow::anyhow!(
|
|
||||||
"Unknown output format '{}'. Valid: text, json, json-compact",
|
|
||||||
other
|
|
||||||
)),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Resolve the effective output mode.
|
|
||||||
/// - Explicit value from `--output` takes priority.
|
|
||||||
/// - TTY → text; non-TTY (piped/redirected) → json-compact.
|
|
||||||
pub fn resolve_output_mode(explicit: Option<&str>) -> anyhow::Result<OutputMode> {
|
|
||||||
if let Some(s) = explicit {
|
|
||||||
return s.parse();
|
|
||||||
}
|
|
||||||
if std::io::stdout().is_terminal() {
|
|
||||||
Ok(OutputMode::Text)
|
|
||||||
} else {
|
|
||||||
Ok(OutputMode::JsonCompact)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Format a UTC timestamp for local human-readable output.
|
|
||||||
pub fn format_local_time(dt: DateTime<Utc>) -> String {
|
|
||||||
dt.with_timezone(&Local)
|
|
||||||
.format("%Y-%m-%d %H:%M:%S %:z")
|
|
||||||
.to_string()
|
|
||||||
}
|
|
||||||
Reference in New Issue
Block a user