From 49fb7430a8d9a9bbce4f5c2f05526fc64ba00072 Mon Sep 17 00:00:00 2001 From: voson Date: Fri, 20 Mar 2026 17:36:00 +0800 Subject: [PATCH] refactor: workspace secrets-core + secrets-mcp MCP SaaS - Split library (db/crypto/service) and MCP/Web/OAuth binary - Add deploy examples and CI/docs updates Made-with: Cursor --- .gitea/workflows/secrets.yml | 243 ++--- .gitignore | 4 +- .vscode/tasks.json | 166 +-- AGENTS.md | 702 ++----------- Cargo.lock | 956 ++++++++++-------- Cargo.toml | 55 +- README.md | 401 ++------ crates/secrets-core/Cargo.toml | 26 + {src => crates/secrets-core/src}/audit.rs | 0 crates/secrets-core/src/config.rs | 20 + {src => crates/secrets-core/src}/crypto.rs | 136 +-- {src => crates/secrets-core/src}/db.rs | 93 +- crates/secrets-core/src/lib.rs | 6 + {src => crates/secrets-core/src}/models.rs | 64 +- crates/secrets-core/src/service/add.rs | 383 +++++++ crates/secrets-core/src/service/api_key.rs | 55 + crates/secrets-core/src/service/delete.rs | 268 +++++ crates/secrets-core/src/service/env_map.rs | 124 +++ crates/secrets-core/src/service/export.rs | 139 +++ crates/secrets-core/src/service/get_secret.rs | 79 ++ crates/secrets-core/src/service/history.rs | 63 ++ crates/secrets-core/src/service/import.rs | 123 +++ crates/secrets-core/src/service/mod.rs | 12 + .../secrets-core/src/service}/rollback.rs | 155 ++- crates/secrets-core/src/service/search.rs | 241 +++++ .../secrets-core/src/service}/update.rs | 206 ++-- crates/secrets-core/src/service/user.rs | 213 ++++ crates/secrets-mcp/Cargo.toml | 44 + crates/secrets-mcp/src/auth.rs | 114 +++ crates/secrets-mcp/src/main.rs | 155 +++ crates/secrets-mcp/src/oauth/google.rs | 66 ++ crates/secrets-mcp/src/oauth/mod.rs | 45 + crates/secrets-mcp/src/oauth/wechat.rs | 18 + crates/secrets-mcp/src/tools.rs | 609 +++++++++++ crates/secrets-mcp/src/web.rs | 494 +++++++++ crates/secrets-mcp/templates/dashboard.html | 725 +++++++++++++ crates/secrets-mcp/templates/login.html | 131 +++ deploy/.env.example | 27 + deploy/secrets-mcp.service | 27 + scripts/release-check.sh | 8 +- scripts/setup-gitea-actions.sh | 80 +- src/commands/add.rs | 459 --------- src/commands/config.rs | 55 - src/commands/delete.rs | 291 ------ src/commands/export_cmd.rs | 109 -- src/commands/history.rs | 78 -- src/commands/import_cmd.rs | 217 ---- src/commands/init.rs | 70 -- src/commands/mod.rs | 12 - src/commands/run.rs | 248 ----- src/commands/search.rs | 568 ----------- src/commands/upgrade.rs | 411 -------- src/config.rs | 77 -- src/main.rs | 853 ---------------- src/output.rs | 60 -- test-fixtures/example-key.pem | 3 - 56 files changed, 5531 insertions(+), 5456 deletions(-) create mode 100644 crates/secrets-core/Cargo.toml rename {src => crates/secrets-core/src}/audit.rs (100%) create mode 100644 crates/secrets-core/src/config.rs rename {src => crates/secrets-core/src}/crypto.rs (59%) rename {src => crates/secrets-core/src}/db.rs (70%) create mode 100644 crates/secrets-core/src/lib.rs rename {src => crates/secrets-core/src}/models.rs (83%) create mode 100644 crates/secrets-core/src/service/add.rs create mode 100644 crates/secrets-core/src/service/api_key.rs create mode 100644 crates/secrets-core/src/service/delete.rs create mode 100644 crates/secrets-core/src/service/env_map.rs create mode 100644 crates/secrets-core/src/service/export.rs create mode 100644 crates/secrets-core/src/service/get_secret.rs create mode 100644 crates/secrets-core/src/service/history.rs create mode 100644 crates/secrets-core/src/service/import.rs create mode 100644 crates/secrets-core/src/service/mod.rs rename {src/commands => crates/secrets-core/src/service}/rollback.rs (51%) create mode 100644 crates/secrets-core/src/service/search.rs rename {src/commands => crates/secrets-core/src/service}/update.rs (50%) create mode 100644 crates/secrets-core/src/service/user.rs create mode 100644 crates/secrets-mcp/Cargo.toml create mode 100644 crates/secrets-mcp/src/auth.rs create mode 100644 crates/secrets-mcp/src/main.rs create mode 100644 crates/secrets-mcp/src/oauth/google.rs create mode 100644 crates/secrets-mcp/src/oauth/mod.rs create mode 100644 crates/secrets-mcp/src/oauth/wechat.rs create mode 100644 crates/secrets-mcp/src/tools.rs create mode 100644 crates/secrets-mcp/src/web.rs create mode 100644 crates/secrets-mcp/templates/dashboard.html create mode 100644 crates/secrets-mcp/templates/login.html create mode 100644 deploy/.env.example create mode 100644 deploy/secrets-mcp.service delete mode 100644 src/commands/add.rs delete mode 100644 src/commands/config.rs delete mode 100644 src/commands/delete.rs delete mode 100644 src/commands/export_cmd.rs delete mode 100644 src/commands/history.rs delete mode 100644 src/commands/import_cmd.rs delete mode 100644 src/commands/init.rs delete mode 100644 src/commands/mod.rs delete mode 100644 src/commands/run.rs delete mode 100644 src/commands/search.rs delete mode 100644 src/commands/upgrade.rs delete mode 100644 src/config.rs delete mode 100644 src/main.rs delete mode 100644 src/output.rs delete mode 100644 test-fixtures/example-key.pem diff --git a/.gitea/workflows/secrets.yml b/.gitea/workflows/secrets.yml index fa58e88..d8e2330 100644 --- a/.gitea/workflows/secrets.yml +++ b/.gitea/workflows/secrets.yml @@ -1,12 +1,15 @@ -name: Secrets CLI - Build & Release +# MCP 分支:仅构建/发布 secrets-mcp(CLI 在 main 分支维护) +name: Secrets MCP — Build & Release on: push: - branches: [main] + branches: [main, feat/mcp] paths: - - 'src/**' + - 'crates/**' - 'Cargo.toml' - 'Cargo.lock' + # systemd / 部署模板变更也应跑构建(产物无变时可快速跳过 check) + - 'deploy/**' concurrency: group: ${{ github.workflow }}-${{ github.ref }} @@ -16,8 +19,7 @@ permissions: contents: write env: - BINARY_NAME: secrets - SECRETS_UPGRADE_URL: ${{ github.server_url }}/api/v1/repos/${{ github.repository }}/releases/latest + MCP_BINARY: secrets-mcp CARGO_INCREMENTAL: 0 CARGO_NET_RETRY: 10 CARGO_TERM_COLOR: always @@ -40,9 +42,9 @@ jobs: - name: 解析版本 id: ver run: | - version=$(grep -m1 '^version' Cargo.toml | sed 's/.*"\(.*\)".*/\1/') - tag="secrets-${version}" - previous_tag=$(git tag --list 'secrets-*' --sort=-v:refname | awk -v tag="$tag" '$0 != tag { print; exit }') + version=$(grep -m1 '^version' crates/secrets-mcp/Cargo.toml | sed 's/.*"\(.*\)".*/\1/') + tag="secrets-mcp-${version}" + previous_tag=$(git tag --list 'secrets-mcp-*' --sort=-v:refname | awk -v tag="$tag" '$0 != tag { print; exit }') echo "version=${version}" >> "$GITHUB_OUTPUT" echo "tag=${tag}" >> "$GITHUB_OUTPUT" @@ -60,7 +62,7 @@ jobs: if: steps.ver.outputs.tag_exists == 'true' run: | echo "错误: 版本 ${{ steps.ver.outputs.tag }} 已存在,禁止重复发版。" - echo "请先 bump Cargo.toml 中的 version,并执行 cargo build 同步 Cargo.lock。" + echo "请先 bump crates/secrets-mcp/Cargo.toml 中的 version,并执行 cargo build 同步 Cargo.lock。" exit 1 - name: 创建 Tag @@ -112,7 +114,7 @@ jobs: payload=$(jq -n \ --arg tag "$tag" \ - --arg name "${{ env.BINARY_NAME }} ${version}" \ + --arg name "secrets-mcp ${version}" \ --arg body "$body" \ '{tag_name: $tag, name: $name, body: $body, draft: true}') @@ -138,7 +140,7 @@ jobs: check: name: 质量检查 (fmt / clippy / test) runs-on: debian - timeout-minutes: 10 + timeout-minutes: 15 steps: - name: 安装 Rust run: | @@ -168,10 +170,10 @@ jobs: - run: cargo test --locked build-linux: - name: Build (x86_64-unknown-linux-musl) + name: Build Linux (secrets-mcp, musl) needs: [version, check] runs-on: debian - timeout-minutes: 15 + timeout-minutes: 25 steps: - name: 安装依赖 run: | @@ -198,8 +200,10 @@ jobs: restore-keys: | cargo-x86_64-unknown-linux-musl- - - run: cargo build --release --locked --target x86_64-unknown-linux-musl - - run: strip target/x86_64-unknown-linux-musl/release/${{ env.BINARY_NAME }} + - name: 构建 secrets-mcp (musl) + run: | + cargo build --release --locked --target x86_64-unknown-linux-musl -p secrets-mcp + strip target/x86_64-unknown-linux-musl/release/${{ env.MCP_BINARY }} - name: 上传 Release 产物 if: needs.version.outputs.release_id != '' @@ -208,16 +212,15 @@ jobs: run: | [ -z "$RELEASE_TOKEN" ] && exit 0 tag="${{ needs.version.outputs.tag }}" - bin="target/x86_64-unknown-linux-musl/release/${{ env.BINARY_NAME }}" - archive="${{ env.BINARY_NAME }}-${tag}-x86_64-linux-musl.tar.gz" + bin="target/x86_64-unknown-linux-musl/release/${{ env.MCP_BINARY }}" + archive="${{ env.MCP_BINARY }}-${tag}-x86_64-linux-musl.tar.gz" tar -czf "$archive" -C "$(dirname "$bin")" "$(basename "$bin")" sha256sum "$archive" > "${archive}.sha256" + release_url="${{ github.server_url }}/api/v1/repos/${{ github.repository }}/releases/${{ needs.version.outputs.release_id }}/assets" curl -fsS -H "Authorization: token $RELEASE_TOKEN" \ - -F "attachment=@${archive}" \ - "${{ github.server_url }}/api/v1/repos/${{ github.repository }}/releases/${{ needs.version.outputs.release_id }}/assets" + -F "attachment=@${archive}" "$release_url" curl -fsS -H "Authorization: token $RELEASE_TOKEN" \ - -F "attachment=@${archive}.sha256" \ - "${{ github.server_url }}/api/v1/repos/${{ github.repository }}/releases/${{ needs.version.outputs.release_id }}/assets" + -F "attachment=@${archive}.sha256" "$release_url" - name: 飞书通知 if: always() @@ -231,7 +234,7 @@ jobs: url="${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_number }}" result="${{ job.status }}" if [ "$result" = "success" ]; then icon="✅"; else icon="❌"; fi - msg="secrets linux 构建${icon} + msg="secrets-mcp linux 构建${icon} 版本:${tag} 提交:${commit} 作者:${{ github.actor }} @@ -239,23 +242,29 @@ jobs: payload=$(jq -n --arg text "$msg" '{msg_type: "text", content: {text: $text}}') curl -sS -H "Content-Type: application/json" -X POST -d "$payload" "$WEBHOOK_URL" - build-macos: - name: Build (macOS aarch64 + x86_64) - needs: [version, check] - runs-on: darwin-arm64 - timeout-minutes: 15 + deploy-mcp: + name: 部署 secrets-mcp + needs: [version, build-linux] + # 部署目标由仓库 Actions 配置:vars.DEPLOY_HOST / vars.DEPLOY_USER;私钥 secrets.DEPLOY_SSH_KEY(PEM 原文,勿 base64) + # (可用 scripts/setup-gitea-actions.sh 或 Gitea API 写入,勿写进本文件) + # Google OAuth / SERVER_MASTER_KEY / SECRETS_DATABASE_URL 等勿写入 CI,请在 ECS 上 + # /opt/secrets-mcp/.env 配置(见 deploy/.env.example)。 + # 若仓库 main 仍为纯 CLI、仅 feat/mcp 含本 workflow,请去掉条件里的 main,避免误部署。 + if: needs.build-linux.result == 'success' && (github.ref == 'refs/heads/main' || github.ref == 'refs/heads/feat/mcp') + runs-on: debian + timeout-minutes: 10 steps: - - name: 安装依赖 + - uses: actions/checkout@v4 + + - name: 安装 Rust run: | if ! command -v cargo >/dev/null 2>&1; then curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y --default-toolchain stable + echo "$HOME/.cargo/bin" >> "$GITHUB_PATH" fi source "$HOME/.cargo/env" 2>/dev/null || true - rustup target add aarch64-apple-darwin - rustup target add x86_64-apple-darwin - echo "$HOME/.cargo/bin" >> "$GITHUB_PATH" - - - uses: actions/checkout@v4 + sudo apt-get update -qq && sudo apt-get install -y -qq pkg-config musl-tools + rustup target add x86_64-unknown-linux-musl - name: 缓存 Cargo uses: actions/cache@v4 @@ -265,45 +274,43 @@ jobs: ~/.cargo/registry/cache ~/.cargo/git/db target - key: cargo-macos-${{ hashFiles('Cargo.lock') }} + key: cargo-x86_64-unknown-linux-musl-${{ hashFiles('Cargo.lock') }} restore-keys: | - cargo-macos- + cargo-x86_64-unknown-linux-musl- - - run: cargo build --release --locked --target aarch64-apple-darwin - - run: cargo build --release --locked --target x86_64-apple-darwin - - run: strip -x target/aarch64-apple-darwin/release/${{ env.BINARY_NAME }} - - run: strip -x target/x86_64-apple-darwin/release/${{ env.BINARY_NAME }} - - - name: 上传 Release 产物 - if: needs.version.outputs.release_id != '' - env: - RELEASE_TOKEN: ${{ secrets.RELEASE_TOKEN }} + - name: 构建 secrets-mcp run: | - [ -z "$RELEASE_TOKEN" ] && exit 0 - tag="${{ needs.version.outputs.tag }}" - release_id="${{ needs.version.outputs.release_id }}" + cargo build --release --locked --target x86_64-unknown-linux-musl -p secrets-mcp + strip target/x86_64-unknown-linux-musl/release/${{ env.MCP_BINARY }} - arm_bin="target/aarch64-apple-darwin/release/${{ env.BINARY_NAME }}" - arm_archive="${{ env.BINARY_NAME }}-${tag}-aarch64-macos.tar.gz" - tar -czf "$arm_archive" -C "$(dirname "$arm_bin")" "$(basename "$arm_bin")" - shasum -a 256 "$arm_archive" > "${arm_archive}.sha256" - curl -fsS -H "Authorization: token $RELEASE_TOKEN" \ - -F "attachment=@${arm_archive}" \ - "${{ github.server_url }}/api/v1/repos/${{ github.repository }}/releases/${release_id}/assets" - curl -fsS -H "Authorization: token $RELEASE_TOKEN" \ - -F "attachment=@${arm_archive}.sha256" \ - "${{ github.server_url }}/api/v1/repos/${{ github.repository }}/releases/${release_id}/assets" + - name: 部署到阿里云 ECS + env: + DEPLOY_HOST: ${{ vars.DEPLOY_HOST }} + DEPLOY_USER: ${{ vars.DEPLOY_USER }} + DEPLOY_SSH_KEY: ${{ secrets.DEPLOY_SSH_KEY }} + run: | + if [ -z "$DEPLOY_HOST" ] || [ -z "$DEPLOY_USER" ] || [ -z "$DEPLOY_SSH_KEY" ]; then + echo "部署跳过:请在仓库 Actions 中配置 vars.DEPLOY_HOST、vars.DEPLOY_USER 与 secrets.DEPLOY_SSH_KEY" + exit 0 + fi - intel_bin="target/x86_64-apple-darwin/release/${{ env.BINARY_NAME }}" - intel_archive="${{ env.BINARY_NAME }}-${tag}-x86_64-macos.tar.gz" - tar -czf "$intel_archive" -C "$(dirname "$intel_bin")" "$(basename "$intel_bin")" - shasum -a 256 "$intel_archive" > "${intel_archive}.sha256" - curl -fsS -H "Authorization: token $RELEASE_TOKEN" \ - -F "attachment=@${intel_archive}" \ - "${{ github.server_url }}/api/v1/repos/${{ github.repository }}/releases/${release_id}/assets" - curl -fsS -H "Authorization: token $RELEASE_TOKEN" \ - -F "attachment=@${intel_archive}.sha256" \ - "${{ github.server_url }}/api/v1/repos/${{ github.repository }}/releases/${release_id}/assets" + echo "$DEPLOY_SSH_KEY" > /tmp/deploy_key + chmod 600 /tmp/deploy_key + + SCP="scp -i /tmp/deploy_key -o StrictHostKeyChecking=no" + + $SCP target/x86_64-unknown-linux-musl/release/${{ env.MCP_BINARY }} \ + "${DEPLOY_USER}@${DEPLOY_HOST}:/tmp/secrets-mcp.new" + + ssh -i /tmp/deploy_key -o StrictHostKeyChecking=no "${DEPLOY_USER}@${DEPLOY_HOST}" " + sudo mv /tmp/secrets-mcp.new /opt/secrets-mcp/secrets-mcp + sudo chmod +x /opt/secrets-mcp/secrets-mcp + sudo systemctl restart secrets-mcp + sleep 2 + sudo systemctl is-active secrets-mcp && echo '服务启动成功' || (sudo journalctl -u secrets-mcp -n 20 && exit 1) + " + + rm -f /tmp/deploy_key - name: 飞书通知 if: always() @@ -311,102 +318,29 @@ jobs: WEBHOOK_URL: ${{ vars.WEBHOOK_URL }} run: | [ -z "$WEBHOOK_URL" ] && exit 0 + command -v jq >/dev/null 2>&1 || (sudo apt-get update -qq && sudo apt-get install -y -qq jq) tag="${{ needs.version.outputs.tag }}" commit=$(git log -1 --pretty=format:"%s" 2>/dev/null || echo "N/A") url="${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_number }}" result="${{ job.status }}" if [ "$result" = "success" ]; then icon="✅"; else icon="❌"; fi - msg="secrets macOS 双架构构建${icon} + msg="secrets-mcp 部署${icon} 版本:${tag} - 目标:aarch64-apple-darwin, x86_64-apple-darwin 提交:${commit} 作者:${{ github.actor }} 详情:${url}" - payload=$(python3 -c "import json,sys; print(json.dumps({'msg_type':'text','content':{'text':sys.argv[1]}}))" "$msg") + payload=$(jq -n --arg text "$msg" '{msg_type: "text", content: {text: $text}}') curl -sS -H "Content-Type: application/json" -X POST -d "$payload" "$WEBHOOK_URL" - build-windows: - name: Build (x86_64-pc-windows-msvc) - needs: [version, check] - runs-on: windows - timeout-minutes: 15 - steps: - - name: 安装依赖 - shell: pwsh - run: | - $cargoBin = Join-Path $env:USERPROFILE ".cargo\bin" - if (-not (Get-Command cargo -ErrorAction SilentlyContinue)) { - Invoke-WebRequest -Uri "https://win.rustup.rs/x86_64" -OutFile rustup-init.exe - .\rustup-init.exe -y --default-toolchain stable - Remove-Item rustup-init.exe - } - $env:Path = "$cargoBin;$env:Path" - Add-Content -Path $env:GITHUB_PATH -Value $cargoBin - rustup target add x86_64-pc-windows-msvc - - - uses: actions/checkout@v4 - - - name: 缓存 Cargo - uses: actions/cache@v4 - with: - path: | - ~/.cargo/registry/index - ~/.cargo/registry/cache - ~/.cargo/git/db - target - key: cargo-x86_64-pc-windows-msvc-${{ hashFiles('Cargo.lock') }} - restore-keys: | - cargo-x86_64-pc-windows-msvc- - - - name: 构建 - shell: pwsh - run: cargo build --release --locked --target x86_64-pc-windows-msvc - - - name: 上传 Release 产物 - if: needs.version.outputs.release_id != '' - shell: pwsh - env: - RELEASE_TOKEN: ${{ secrets.RELEASE_TOKEN }} - run: | - if (-not $env:RELEASE_TOKEN) { exit 0 } - $tag = "${{ needs.version.outputs.tag }}" - $bin = "target\x86_64-pc-windows-msvc\release\${{ env.BINARY_NAME }}.exe" - $archive = "${{ env.BINARY_NAME }}-${tag}-x86_64-windows.zip" - Compress-Archive -Path $bin -DestinationPath $archive -Force - $hash = (Get-FileHash -Algorithm SHA256 $archive).Hash.ToLower() - Set-Content -Path "${archive}.sha256" -Value "$hash $archive" -NoNewline - $url = "${{ github.server_url }}/api/v1/repos/${{ github.repository }}/releases/${{ needs.version.outputs.release_id }}/assets" - Invoke-RestMethod -Uri $url -Method Post ` - -Headers @{ "Authorization" = "token $env:RELEASE_TOKEN" } ` - -Form @{ attachment = Get-Item $archive } - Invoke-RestMethod -Uri $url -Method Post ` - -Headers @{ "Authorization" = "token $env:RELEASE_TOKEN" } ` - -Form @{ attachment = Get-Item "${archive}.sha256" } - - - name: 飞书通知 - if: always() - shell: pwsh - env: - WEBHOOK_URL: ${{ vars.WEBHOOK_URL }} - run: | - if (-not $env:WEBHOOK_URL) { exit 0 } - $tag = "${{ needs.version.outputs.tag }}" - $commit = (git log -1 --pretty=format:"%s" 2>$null) ?? "N/A" - $url = "${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_number }}" - $result = "${{ job.status }}" - $icon = if ($result -eq "success") { "✅" } else { "❌" } - $msg = "secrets windows 构建${icon}`n版本:${tag}`n提交:${commit}`n作者:${{ github.actor }}`n详情:${url}" - $payload = @{ msg_type = "text"; content = @{ text = $msg } } | ConvertTo-Json - Invoke-RestMethod -Uri $env:WEBHOOK_URL -Method Post ` - -ContentType "application/json" -Body $payload - publish-release: name: 发布草稿 Release - needs: [version, build-linux, build-macos, build-windows] + needs: [version, build-linux] if: always() && needs.version.outputs.release_id != '' runs-on: debian timeout-minutes: 5 steps: + - uses: actions/checkout@v4 + - name: 发布草稿 env: RELEASE_TOKEN: ${{ secrets.RELEASE_TOKEN }} @@ -414,11 +348,8 @@ jobs: [ -z "$RELEASE_TOKEN" ] && exit 0 linux_r="${{ needs.build-linux.result }}" - macos_r="${{ needs.build-macos.result }}" - windows_r="${{ needs.build-windows.result }}" - if [ "$linux_r" != "success" ] || [ "$macos_r" != "success" ] || [ "$windows_r" != "success" ]; then - echo "存在未成功的构建任务,保留草稿 Release" - echo "linux=${linux_r} macos=${macos_r} windows=${windows_r}" + if [ "$linux_r" != "success" ]; then + echo "linux 构建未成功,保留草稿 Release" exit 0 fi @@ -451,15 +382,13 @@ jobs: url="${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_number }}" linux_r="${{ needs.build-linux.result }}" - macos_r="${{ needs.build-macos.result }}" - windows_r="${{ needs.build-windows.result }}" publish_r="${{ job.status }}" icon() { case "$1" in success) echo "✅";; skipped) echo "⏭";; *) echo "❌";; esac; } - if [ "$linux_r" = "success" ] && [ "$macos_r" = "success" ] && [ "$windows_r" = "success" ] && [ "$publish_r" = "success" ]; then + if [ "$linux_r" = "success" ] && [ "$publish_r" = "success" ]; then status="发布成功 ✅" - elif [ "$linux_r" != "success" ] || [ "$macos_r" != "success" ] || [ "$windows_r" != "success" ]; then + elif [ "$linux_r" != "success" ]; then status="构建失败 ❌" else status="发布失败 ❌" @@ -471,9 +400,9 @@ jobs: version_line="🔄 重复构建 ${tag}" fi - msg="secrets ${status} + msg="secrets-mcp ${status} ${version_line} - linux $(icon "$linux_r") | macOS $(icon "$macos_r") | windows $(icon "$windows_r") | Release $(icon "$publish_r") + linux $(icon "$linux_r") | Release $(icon "$publish_r") 提交:${commit} 作者:${{ github.actor }} 详情:${url}" diff --git a/.gitignore b/.gitignore index d1fc889..08ca2ea 100644 --- a/.gitignore +++ b/.gitignore @@ -1,4 +1,6 @@ /target .env .DS_Store -.cursor/ \ No newline at end of file +.cursor/ +# Google OAuth 下载的 JSON 凭据文件 +client_secret_*.apps.googleusercontent.com.json diff --git a/.vscode/tasks.json b/.vscode/tasks.json index 70f226a..ae77f9e 100644 --- a/.vscode/tasks.json +++ b/.vscode/tasks.json @@ -4,146 +4,48 @@ { "label": "build", "type": "shell", - "command": "cargo build", + "command": "cargo build --workspace --locked", "group": { "kind": "build", "isDefault": true } }, { - "label": "cli: version", + "label": "mcp: build", "type": "shell", - "command": "./target/debug/secrets -V", + "command": "cargo build --locked -p secrets-mcp", + "group": "build" + }, + { + "label": "mcp: run", + "type": "shell", + "command": "cargo run --locked -p secrets-mcp", + "dependsOn": "mcp: build", + "options": { + "envFile": "${workspaceFolder}/.env" + } + }, + { + "label": "test: workspace", + "type": "shell", + "command": "cargo test --workspace --locked", + "dependsOn": "build", + "group": { "kind": "test", "isDefault": true } + }, + { + "label": "fmt: check", + "type": "shell", + "command": "cargo fmt -- --check", + "problemMatcher": [] + }, + { + "label": "clippy: workspace", + "type": "shell", + "command": "cargo clippy --workspace --locked -- -D warnings", "dependsOn": "build" }, { - "label": "cli: help", + "label": "ci: release-check", "type": "shell", - "command": "./target/debug/secrets --help", - "dependsOn": "build" - }, - { - "label": "cli: help add", - "type": "shell", - "command": "./target/debug/secrets help add", - "dependsOn": "build" - }, - { - "label": "cli: help config", - "type": "shell", - "command": "./target/debug/secrets help config", - "dependsOn": "build" - }, - { - "label": "cli: config path", - "type": "shell", - "command": "./target/debug/secrets config path", - "dependsOn": "build" - }, - { - "label": "cli: config show", - "type": "shell", - "command": "./target/debug/secrets config show", - "dependsOn": "build" - }, - { - "label": "test: search all", - "type": "shell", - "command": "./target/debug/secrets search", - "dependsOn": "build" - }, - { - "label": "test: search all (verbose)", - "type": "shell", - "command": "./target/debug/secrets --verbose search", - "dependsOn": "build" - }, - { - "label": "test: search by namespace (refining)", - "type": "shell", - "command": "./target/debug/secrets search -n refining", - "dependsOn": "build" - }, - { - "label": "test: search by namespace (ricnsmart)", - "type": "shell", - "command": "./target/debug/secrets search -n ricnsmart", - "dependsOn": "build" - }, - { - "label": "test: search servers", - "type": "shell", - "command": "./target/debug/secrets search --kind server", - "dependsOn": "build" - }, - { - "label": "test: search services", - "type": "shell", - "command": "./target/debug/secrets search --kind service", - "dependsOn": "build" - }, - { - "label": "test: search keys", - "type": "shell", - "command": "./target/debug/secrets search --kind key", - "dependsOn": "build" - }, - { - "label": "test: search by tag (aliyun)", - "type": "shell", - "command": "./target/debug/secrets search --tag aliyun", - "dependsOn": "build" - }, - { - "label": "test: search by tag (hongkong)", - "type": "shell", - "command": "./target/debug/secrets search --tag hongkong", - "dependsOn": "build" - }, - { - "label": "test: search keyword (gitea)", - "type": "shell", - "command": "./target/debug/secrets search -q gitea", - "dependsOn": "build" - }, - { - "label": "test: run service secrets", - "type": "shell", - "command": "./target/debug/secrets run -n refining --kind service --name gitea -- printenv", - "dependsOn": "build" - }, - { - "label": "test: combined search (ricnsmart + server + shanghai)", - "type": "shell", - "command": "./target/debug/secrets search -n ricnsmart --kind server --tag shanghai", - "dependsOn": "build" - }, - { - "label": "test: add + delete roundtrip", - "type": "shell", - "command": "echo '--- add ---' && ./target/debug/secrets add -n test --kind demo --name roundtrip-test --tag test -m foo=bar -s password=secret123 && echo '--- search metadata ---' && ./target/debug/secrets search -n test && echo '--- run secrets ---' && ./target/debug/secrets run -n test --kind demo --name roundtrip-test -- printenv && echo '--- delete ---' && ./target/debug/secrets delete -n test --kind demo --name roundtrip-test && echo '--- verify deleted ---' && ./target/debug/secrets search -n test", - "dependsOn": "build" - }, - { - "label": "test: add + delete roundtrip (verbose)", - "type": "shell", - "command": "echo '--- add (verbose) ---' && ./target/debug/secrets --verbose add -n test --kind demo --name roundtrip-verbose --tag test -m foo=bar -s password=secret123 && echo '--- delete (verbose) ---' && ./target/debug/secrets --verbose delete -n test --kind demo --name roundtrip-verbose", - "dependsOn": "build" - }, - { - "label": "test: update roundtrip", - "type": "shell", - "command": "echo '--- add ---' && ./target/debug/secrets add -n test --kind demo --name update-test --tag v1 -m env=staging && echo '--- update ---' && ./target/debug/secrets update -n test --kind demo --name update-test --add-tag v2 --remove-tag v1 -m env=production && echo '--- verify ---' && ./target/debug/secrets search -n test --kind demo && echo '--- cleanup ---' && ./target/debug/secrets delete -n test --kind demo --name update-test", - "dependsOn": "build" - }, - { - "label": "test: audit log", - "type": "shell", - "command": "echo '--- add ---' && ./target/debug/secrets add -n test --kind demo --name audit-test -m foo=bar -s key=val && echo '--- update ---' && ./target/debug/secrets update -n test --kind demo --name audit-test -m foo=baz && echo '--- delete ---' && ./target/debug/secrets delete -n test --kind demo --name audit-test && echo '--- audit log (last 5) ---' && psql $DATABASE_URL -c \"SELECT action, namespace, kind, name, actor, detail, created_at FROM audit_log ORDER BY created_at DESC LIMIT 5;\"", - "dependsOn": "build" - }, - { - "label": "test: add with file secret", - "type": "shell", - "command": "echo '--- add key from file ---' && ./target/debug/secrets add -n test --kind key --name test-key --tag test -s content=@./test-fixtures/example-key.pem && echo '--- verify metadata ---' && ./target/debug/secrets search -n test --kind key && echo '--- verify run ---' && ./target/debug/secrets run -n test --kind key --name test-key -- printenv && echo '--- cleanup ---' && ./target/debug/secrets delete -n test --kind key --name test-key", - "dependsOn": "build" + "command": "./scripts/release-check.sh", + "problemMatcher": [] } ] } diff --git a/AGENTS.md b/AGENTS.md index 99c1969..7899d0c 100644 --- a/AGENTS.md +++ b/AGENTS.md @@ -1,68 +1,49 @@ -# Secrets CLI — AGENTS.md +# Secrets MCP — AGENTS.md -## 提交 / 发版硬规则(优先于下文其他说明) +本仓库为 **MCP SaaS**:`secrets-core`(业务与持久化)+ `secrets-mcp`(Streamable HTTP MCP、Web、OAuth、API Key)。对外入口见 `crates/secrets-mcp`。 -1. 涉及 `src/**`、`Cargo.toml`、`Cargo.lock`、CLI 行为变更的提交,默认视为**需要发版**,除非用户明确说明“本次不发版”。 -2. 发版前必须先检查 `Cargo.toml` 中的 `version`,再检查是否已存在对应 tag:`git tag -l 'secrets-*'`。 -3. 若当前版本对应 tag 已存在,必须先 bump `Cargo.toml` 的 `version`,再执行 `cargo build` 同步 `Cargo.lock`,然后才能提交。 -4. 提交前优先运行 `./scripts/release-check.sh`;该脚本会检查重复版本并执行 `cargo fmt -- --check && cargo clippy --locked -- -D warnings && cargo test --locked`。 +## 提交 / 发版硬规则(优先于下文) -跨设备密钥与配置管理 CLI 工具,将服务器信息、服务凭据等存储到 PostgreSQL 18,供 AI 工具读取上下文。每个加密字段单独行存储(`secrets` 子表),字段名、类型、长度以明文保存,主密钥由 Argon2id 从主密码派生并存入平台安全存储(macOS Keychain / Windows Credential Manager / Linux keyutils)。 +1. 涉及 `crates/**`、根目录 `Cargo.toml`/`Cargo.lock`、`secrets-mcp` 行为变更的提交,默认视为**需要发版**,除非明确说明「本次不发版」。 +2. 发版前检查 `crates/secrets-mcp/Cargo.toml` 的 `version`,再查 tag:`git tag -l 'secrets-mcp-*'`。 +3. 若当前版本对应 tag 已存在,须先 bump `version`,再 `cargo build` 同步 `Cargo.lock` 后提交。 +4. 提交前优先运行 `./scripts/release-check.sh`(版本/tag + `fmt` + `clippy --locked` + `test --locked`)。 ## 项目结构 ``` secrets/ - src/ - main.rs # CLI 入口,clap 命令定义,auto-migrate,--verbose 全局参数 - output.rs # OutputMode 枚举(默认 json,-o text 供人类使用) - config.rs # 配置读写:~/.config/secrets/config.toml(database_url) - db.rs # PgPool 创建 + 建表/索引(DROP+CREATE,含所有表) - crypto.rs # AES-256-GCM 加解密、Argon2id 派生、OS 钥匙串 - models.rs # Entry + SecretField 结构体(sqlx::FromRow + serde) - audit.rs # 审计写入:log_tx(事务内) - commands/ - init.rs # init 命令:主密钥初始化(每台设备一次) - add.rs # add 命令:upsert entries + 逐字段写入 secrets,含历史快照 - config.rs # config 命令:set-db / show / path(持久化 database_url) - search.rs # search 命令:多条件查询,展示 secrets 字段 schema(无需 master_key) - delete.rs # delete 命令:事务化,CASCADE 删除 secrets,含历史快照 - update.rs # update 命令:增量更新,secrets 行级 UPSERT/DELETE,CAS 并发保护 - rollback.rs # rollback 命令:按 entry_version 恢复 entry + secrets - history.rs # history 命令:查看 entry 变更历史列表 - run.rs # run 命令:仅 secrets 逐字段解密 + key_ref 引用解析(不含 metadata) - upgrade.rs # upgrade 命令:检查、校验摘要并下载最新版本,自动替换二进制 - export_cmd.rs # export 命令:批量导出记录,支持 JSON/TOML/YAML,含解密明文 - import_cmd.rs # import 命令:批量导入记录,冲突检测,dry-run,重新加密写入 + Cargo.toml + crates/ + secrets-core/ # db / crypto / models / audit / service + secrets-mcp/ # rmcp tools、axum、OAuth、Dashboard scripts/ - release-check.sh # 发版前检查版本号/tag 是否重复,并执行 fmt/clippy/test - setup-gitea-actions.sh # 配置 Gitea Actions 变量与 Secrets - .gitea/workflows/ - secrets.yml # CI:fmt + clippy + musl 构建 + Release 上传 + 飞书通知 - .vscode/tasks.json # 本地测试任务(build / config / search / add+delete / update / audit 等) + release-check.sh + setup-gitea-actions.sh + .gitea/workflows/secrets.yml + .vscode/tasks.json ``` ## 数据库 -- **Host**: `:` -- **Database**: `secrets` -- **连接串**: `postgres://postgres:@:/secrets` -- **表**: `entries`(主表)+ `secrets`(加密字段子表)+ `entries_history` + `secrets_history` + `audit_log` + `kv_config`,首次连接自动建表(auto-migrate) +- **建议库名**:`secrets-mcp`(专用实例,与历史库名区分)。 +- **连接**:环境变量 **`SECRETS_DATABASE_URL`**(本分支无本地配置文件路径)。 +- **表**:`entries`(含 `user_id`)、`secrets`、`entries_history`、`secrets_history`、`audit_log`、`users`、`oauth_accounts`、`api_keys`,首次连接 **auto-migrate**。 -### 表结构 +### 表结构(摘录) ```sql entries ( - id UUID PRIMARY KEY DEFAULT uuidv7(), -- PG18 时间有序 UUID - namespace VARCHAR(64) NOT NULL, -- 一级隔离: "refining" | "ricnsmart" - kind VARCHAR(64) NOT NULL, -- 类型: "server" | "service" | "key"(可扩展) - name VARCHAR(256) NOT NULL, -- 人类可读标识 - tags TEXT[] NOT NULL DEFAULT '{}', -- 灵活标签: ["aliyun","hongkong"] - metadata JSONB NOT NULL DEFAULT '{}', -- 明文描述: ip, desc, domains, location... - version BIGINT NOT NULL DEFAULT 1, -- 乐观锁版本号,每次写操作自增 + id UUID PRIMARY KEY DEFAULT uuidv7(), + user_id UUID, -- 多租户:NULL=遗留行;非空=归属用户 + namespace VARCHAR(64) NOT NULL, + kind VARCHAR(64) NOT NULL, + name VARCHAR(256) NOT NULL, + tags TEXT[] NOT NULL DEFAULT '{}', + metadata JSONB NOT NULL DEFAULT '{}', + version BIGINT NOT NULL DEFAULT 1, created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(), - updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW(), - UNIQUE(namespace, kind, name) + updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW() ) ``` @@ -70,8 +51,8 @@ entries ( secrets ( id UUID PRIMARY KEY DEFAULT uuidv7(), entry_id UUID NOT NULL REFERENCES entries(id) ON DELETE CASCADE, - field_name VARCHAR(256) NOT NULL, -- 明文字段名: "username", "token", "ssh_key" - encrypted BYTEA NOT NULL DEFAULT '\x', -- 仅加密值本身:nonce(12B)||ciphertext+tag + field_name VARCHAR(256) NOT NULL, + encrypted BYTEA NOT NULL DEFAULT '\x', version BIGINT NOT NULL DEFAULT 1, created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(), updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW(), @@ -79,588 +60,109 @@ secrets ( ) ``` +### users / oauth_accounts / api_keys + ```sql -kv_config ( - key TEXT PRIMARY KEY, -- 如 'argon2_salt' - value BYTEA NOT NULL -- Argon2id salt,首台设备 init 时生成 +users ( + id UUID PRIMARY KEY DEFAULT uuidv7(), + email VARCHAR(256), + name VARCHAR(256) NOT NULL DEFAULT '', + avatar_url TEXT, + key_salt BYTEA, -- PBKDF2 salt(32B),首次设置密码短语时写入 + key_check BYTEA, -- 派生密钥加密已知常量,用于验证密码短语 + key_params JSONB, -- 算法参数,如 {"alg":"pbkdf2-sha256","iterations":600000} + created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(), + updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW() +) + +oauth_accounts ( + id UUID PRIMARY KEY DEFAULT uuidv7(), + user_id UUID NOT NULL REFERENCES users(id) ON DELETE CASCADE, + provider VARCHAR(32) NOT NULL, + provider_id VARCHAR(256) NOT NULL, + ... + UNIQUE(provider, provider_id) +) + +api_keys ( + id UUID PRIMARY KEY DEFAULT uuidv7(), + user_id UUID NOT NULL REFERENCES users(id) ON DELETE CASCADE, + name VARCHAR(256) NOT NULL, + key_hash VARCHAR(64) NOT NULL UNIQUE, + key_prefix VARCHAR(12) NOT NULL, + last_used_at TIMESTAMPTZ, + created_at TIMESTAMPTZ NOT NULL DEFAULT NOW() ) ``` -### audit_log 表结构 +### audit_log / history -```sql -audit_log ( - id BIGINT GENERATED ALWAYS AS IDENTITY PRIMARY KEY, - action VARCHAR(32) NOT NULL, -- 'add' | 'update' | 'delete' | 'rollback' - namespace VARCHAR(64) NOT NULL, - kind VARCHAR(64) NOT NULL, - name VARCHAR(256) NOT NULL, - detail JSONB NOT NULL DEFAULT '{}', -- 变更摘要(tags/meta keys/secret keys,不含 value) - actor VARCHAR(128) NOT NULL DEFAULT '', -- 操作者($USER 环境变量) - created_at TIMESTAMPTZ NOT NULL DEFAULT NOW() -) -``` +与迁移脚本一致:`audit_log`、`entries_history`、`secrets_history` 用于审计与时间旅行恢复;字段定义见 `crates/secrets-core/src/db.rs` 内 `migrate` SQL。 -### entries_history 表结构 +### 字段职责 -```sql -entries_history ( - id BIGINT GENERATED ALWAYS AS IDENTITY PRIMARY KEY, - entry_id UUID NOT NULL, - namespace VARCHAR(64) NOT NULL, - kind VARCHAR(64) NOT NULL, - name VARCHAR(256) NOT NULL, - version BIGINT NOT NULL, -- 被快照时的版本号 - action VARCHAR(16) NOT NULL, -- 'add' | 'update' | 'delete' | 'rollback' - tags TEXT[] NOT NULL DEFAULT '{}', - metadata JSONB NOT NULL DEFAULT '{}', - actor VARCHAR(128) NOT NULL DEFAULT '', - created_at TIMESTAMPTZ NOT NULL DEFAULT NOW() -) -``` - -### secrets_history 表结构 - -```sql -secrets_history ( - id BIGINT GENERATED ALWAYS AS IDENTITY PRIMARY KEY, - entry_id UUID NOT NULL, - secret_id UUID NOT NULL, -- 对应 secrets.id - entry_version BIGINT NOT NULL, -- 关联 entries_history 的版本号 - field_name VARCHAR(256) NOT NULL, - encrypted BYTEA NOT NULL DEFAULT '\x', - action VARCHAR(16) NOT NULL, -- 'add' | 'update' | 'delete' | 'rollback' - actor VARCHAR(128) NOT NULL DEFAULT '', - created_at TIMESTAMPTZ NOT NULL DEFAULT NOW() -) -``` - -### 字段职责划分 - -| 字段 | 存什么 | 示例 | -|------|--------|------| -| `namespace` | 项目/团队隔离 | `refining`, `ricnsmart` | +| 字段 | 含义 | 示例 | +|------|------|------| +| `namespace` | 隔离空间 | `refining` | | `kind` | 记录类型 | `server`, `service`, `key` | -| `name` | 唯一标识名 | `i-example0abcd1234efgh`, `gitea` | -| `tags` | 多维分类标签 | `["aliyun","hongkong","ricn"]` | -| `metadata` | 明文非敏感信息 | `{"ip":"192.0.2.1","desc":"Grafana","key_ref":"my-shared-key"}` | -| `secrets.field_name` | 加密字段名(明文) | `"username"`, `"token"`, `"ssh_key"` | -| `secrets.encrypted` | 仅加密值本身 | AES-256-GCM 密文 | +| `name` | 标识名 | `gitea`, `i-example0…` | +| `tags` | 标签 | `["aliyun","prod"]` | +| `metadata` | 明文描述 | `ip`、`url`、`key_ref` | +| `secrets.field_name` | 加密字段名(明文) | `token`, `ssh_key` | +| `secrets.encrypted` | 密文 | AES-GCM | -### PEM 共享机制(key_ref) +### PEM 共享(`key_ref`) -同一 PEM 被多台服务器共享时,将 PEM 存为独立的 `kind=key` 记录,服务器通过 `metadata.key_ref` 引用: - -```bash -# 1. 存共享 PEM -secrets add -n refining --kind key --name my-shared-key \ - --tag aliyun --tag hongkong \ - -s content=@./keys/my-shared-key.pem - -# 2. 服务器通过 metadata.key_ref 引用(run 时自动合并 key 的 secrets) -secrets add -n refining --kind server --name i-example0xyz789 \ - -m ip=192.0.2.1 -m key_ref=my-shared-key \ - -s username=ecs-user - -# 3. 轮换只需更新 key 记录,所有引用服务器自动生效 -secrets update -n refining --kind key --name my-shared-key \ - -s content=@./keys/new-key.pem -``` - -## 数据库配置 - -首次使用需显式配置数据库连接,设置一次后在该设备上持久生效: - -```bash -secrets config set-db "postgres://postgres:@:/secrets" -secrets config show # 查看当前配置(密码脱敏) -secrets config path # 打印配置文件路径 -``` - -`set-db` 会先验证连接可用,成功后才写入配置文件;连接失败时提示 "Database connection failed" 且不修改配置。 - -配置文件:`~/.config/secrets/config.toml`,权限 0600。`--db-url` 参数可一次性覆盖。 - -## 主密钥与加密 - -首次使用(每台设备各执行一次): - -```bash -secrets config set-db "postgres://postgres:@:/secrets" -secrets init # 提示输入主密码,Argon2id 派生主密钥后存入 OS 钥匙串 -``` - -主密码不存储;salt 存于 `kv_config`,首台设备生成后共享,确保同一主密码在所有设备派生出相同主密钥。 - -主密钥存储后端:macOS Keychain、Windows Credential Manager、Linux keyutils(会话级,重启后需再次 `secrets init`)。 - -**从旧版(明文 JSONB)升级**:升级后执行 `secrets init` 即可(明文记录需手动重新 add 或通过 update 更新)。 - -## CLI 命令 - -### AI 使用主路径 - -**读取一律用 `search`,写入用 `add` / `update`,避免反复查帮助。** - -输出格式规则: -- 默认始终输出 `json`(pretty-printed),无论 TTY 还是管道 -- 显式 `-o json-compact` → 单行 JSON(管道处理时更紧凑) -- 显式 `-o text` → 人类可读文本格式 - ---- - -### init — 主密钥初始化(每台设备一次) - -```bash -# 首次设备:生成 Argon2id salt 并存库,派生主密钥后存 OS 钥匙串 -secrets init - -# 后续设备:复用已有 salt,派生主密钥后存钥匙串(主密码需与首台相同) -secrets init -``` - -### search — 发现与读取 - -```bash -# 参数说明(带典型值) -# -n / --namespace refining | ricnsmart -# --kind server | service -# --name gitea | i-example0abcd1234efgh | mqtt -# --tag aliyun | hongkong | production -# -q / --query mqtt | grafana | gitea (模糊匹配 name/namespace/kind/tags/metadata) -# secrets schema search 默认展示 secrets 字段名、类型与长度(无需 master_key) -# -f / --field metadata.ip | metadata.url | metadata.default_org -# --summary 不带值的 flag,仅返回摘要(name/tags/desc/updated_at) -# --limit 20 | 50(默认 50) -# --offset 0 | 10 | 20(分页偏移) -# --sort name(默认)| updated | created -# -o / --output text | json | json-compact - -# 发现概览(起步推荐) -secrets search --summary --limit 20 -secrets search -n refining --summary --limit 20 -secrets search --sort updated --limit 10 --summary - -# 精确定位单条记录 -secrets search -n refining --kind service --name gitea -secrets search -n refining --kind server --name i-example0abcd1234efgh - -# 精确定位并获取完整内容(secrets 保持加密占位) -secrets search -n refining --kind service --name gitea -o json - -# 直接提取 metadata 字段值(最短路径) -secrets search -n refining --kind service --name gitea -f metadata.url -secrets search -n refining --kind service --name gitea \ - -f metadata.url -f metadata.default_org - -# 需要 secrets 时,改用 run -secrets run -n refining --kind service --name gitea -- printenv - -# 模糊关键词搜索 -secrets search -q mqtt -secrets search -q grafana -secrets search -q 192.0.2 - -# 按条件过滤 -secrets search -n refining --kind service -secrets search -n ricnsmart --kind server -secrets search --tag hongkong -secrets search --tag aliyun --summary - -# 分页 -secrets search -n refining --summary --limit 10 --offset 0 -secrets search -n refining --summary --limit 10 --offset 10 - -# 管道 / AI 调用(默认 json,直接可解析) -secrets search -n refining --kind service | jq '.[].name' -``` - ---- - -### add — 新增或全量覆盖(upsert) - -```bash -# 参数说明(带典型值) -# -n / --namespace refining | ricnsmart -# --kind server | service -# --name gitea | i-example0abcd1234efgh -# --tag aliyun | hongkong(可重复) -# -m / --meta ip=10.0.0.1 | desc="ECS" | url=https://... | tls:cert@./cert.pem(可重复) -# -s / --secret token= | ssh_key=@./key.pem | password=secret123 | credentials:content@./key.pem(可重复) - -# 添加服务器 -secrets add -n refining --kind server --name i-example0abcd1234efgh \ - --tag aliyun --tag shanghai \ - -m ip=10.0.0.1 -m desc="Aliyun Shanghai ECS" \ - -s username=root -s ssh_key=@./keys/deploy-key.pem - -# 添加服务凭据 -secrets add -n refining --kind service --name gitea \ - --tag gitea \ - -m url=https://code.example.com -m default_org=refining -m username=voson \ - -s token= -s runner_token= - -# 从文件读取 token -secrets add -n ricnsmart --kind service --name mqtt \ - -m host=mqtt.example.com -m port=1883 \ - -s password=@./mqtt_password.txt - -# 多行文件直接写入嵌套 secret 字段 -secrets add -n refining --kind server --name i-example0abcd1234efgh \ - -s credentials:content@./keys/deploy-key.pem - -# 使用类型化值(key:=)存储非字符串类型 -secrets add -n refining --kind service --name prometheus \ - -m scrape_interval:=15 \ - -m enabled:=true \ - -m labels:='["prod","metrics"]' \ - -s api_key=abc123 -``` - ---- - -### update — 增量更新(记录必须已存在) - -只有传入的字段才会变动,其余全部保留。 - -```bash -# 参数说明(带典型值) -# -n / --namespace refining | ricnsmart -# --kind server | service -# --name gitea | i-example0abcd1234efgh -# --add-tag production | backup(不影响已有 tag,可重复) -# --remove-tag staging | deprecated(可重复) -# -m / --meta ip=10.0.0.1 | desc="新描述" | credentials:username=root(新增或覆盖,可重复) -# --remove-meta old_port | legacy_key | credentials:content(删除 metadata 字段,可重复) -# -s / --secret token= | ssh_key=@./new.pem | credentials:content@./new.pem(新增或覆盖,可重复) -# --remove-secret old_password | deprecated_key | credentials:content(删除 secret 字段,可重复) - -# 更新单个 metadata 字段 -secrets update -n refining --kind server --name i-example0abcd1234efgh \ - -m ip=10.0.0.1 - -# 轮换 token -secrets update -n refining --kind service --name gitea \ - -s token= - -# 新增 tag 并轮换 token -secrets update -n refining --kind service --name gitea \ - --add-tag production \ - -s token= - -# 移除废弃字段 -secrets update -n refining --kind service --name mqtt \ - --remove-meta old_port --remove-secret old_password - -# 从文件更新嵌套 secret 字段 -secrets update -n refining --kind server --name i-example0abcd1234efgh \ - -s credentials:content@./keys/deploy-key.pem - -# 删除嵌套字段 -secrets update -n refining --kind server --name i-example0abcd1234efgh \ - --remove-secret credentials:content - -# 移除 tag -secrets update -n refining --kind service --name gitea --remove-tag staging -``` - ---- - -### delete — 删除记录(支持单条精确删除与批量删除) - -删除时会自动将 entry 与所有关联 secret 字段快照到历史表,并写入审计日志,可通过 `rollback` 命令恢复。 - -```bash -# 参数说明(带典型值) -# -n / --namespace refining | ricnsmart(必填) -# --kind server | service(指定 --name 时必填;批量时可选) -# --name gitea | i-example0abcd1234efgh(精确匹配;省略则批量删除) -# --dry-run 预览将删除的记录,不实际写入(仅批量模式有效) -# -o / --output text | json | json-compact - -# 精确删除单条记录(--kind 必填) -secrets delete -n refining --kind service --name legacy-mqtt -secrets delete -n ricnsmart --kind server --name i-old-server-id - -# 预览批量删除(不写入数据库) -secrets delete -n refining --dry-run -secrets delete -n ricnsmart --kind server --dry-run - -# 批量删除整个 namespace 的所有记录 -secrets delete -n ricnsmart - -# 批量删除 namespace 下指定 kind 的所有记录 -secrets delete -n ricnsmart --kind server - -# JSON 输出 -secrets delete -n refining --kind service -o json -``` - ---- - -### history — 查看变更历史 - -```bash -# 参数说明 -# -n / --namespace refining | ricnsmart -# --kind server | service -# --name 记录名 -# --limit 返回条数(默认 20) - -# 查看某条记录的历史版本列表 -secrets history -n refining --kind service --name gitea - -# 查最近 5 条 -secrets history -n refining --kind service --name gitea --limit 5 - -# JSON 输出 -secrets history -n refining --kind service --name gitea -o json -``` - ---- - -### rollback — 回滚到指定版本 - -```bash -# 参数说明 -# -n / --namespace refining | ricnsmart -# --kind server | service -# --name 记录名 -# --to-version 目标版本号(省略则恢复最近一次快照) - -# 撤销上次修改(回滚到最近一次快照) -secrets rollback -n refining --kind service --name gitea - -# 回滚到版本 3 -secrets rollback -n refining --kind service --name gitea --to-version 3 -``` - ---- - -### run — 向子进程注入 secrets 并执行命令 - -仅注入 secrets 表中的加密字段(解密后),不含 metadata。secrets 仅作用于子进程环境,不修改当前 shell,进程退出码透传。 - -使用 `-s/--secret` 指定只注入哪些字段(最小权限原则);使用 `--dry-run` 预览将注入哪些变量名及来源,不执行命令。 - -```bash -# 参数说明 -# -n / --namespace refining | ricnsmart -# --kind server | service -# --name 记录名 -# --tag 按 tag 过滤(可重复) -# -s / --secret 只注入指定字段名(可重复;省略则注入全部) -# --prefix 变量名前缀 -# --dry-run 预览变量映射,不执行命令 -# -o / --output json(默认)| json-compact | text -# -- 执行的命令及参数(--dry-run 时可省略) - -# 注入全部 secrets 到脚本 -secrets run -n refining --kind service --name gitea -- ./deploy.sh - -# 只注入特定字段(最小化注入范围) -secrets run -n refining --kind service --name aliyun \ - -s access_key_id -s access_key_secret -- aliyun ecs DescribeInstances - -# 按 tag 批量注入(多条记录合并) -secrets run --tag production -- env | grep -i token - -# 预览将注入哪些变量(不执行命令,默认 JSON 输出) -secrets run -n refining --kind service --name gitea --dry-run - -# 配合字段过滤预览 -secrets run -n refining --kind service --name gitea -s token --dry-run - -# text 模式预览(人类阅读) -secrets run -n refining --kind service --name gitea --dry-run -o text -``` - ---- - -### upgrade — 自动更新 CLI 二进制 - -从 Release 服务器下载最新版本,校验对应 `.sha256` 摘要后替换当前二进制,无需数据库连接或主密钥。 - -**配置方式**:`SECRETS_UPGRADE_URL` 必填。优先用**构建时**:`SECRETS_UPGRADE_URL=https://... cargo build`,CI 已自动注入。或**运行时**:写在 `.env` 或 `export` 后执行。 - -```bash -# 检查是否有新版本(不下载) -secrets upgrade --check - -# 下载、校验 SHA-256 并安装最新版本 -secrets upgrade -``` - ---- - -### export — 批量导出记录 - -将匹配的记录(含解密后的明文 secrets)导出到文件或 stdout。支持 JSON、TOML、YAML 三种格式,文件格式由扩展名自动推断。使用 `--no-secrets` 时无需主密钥。 - -```bash -# 参数说明 -# -n / --namespace refining | ricnsmart -# --kind server | service -# --name gitea | i-example0abcd1234efgh -# --tag aliyun | production(可重复) -# -q / --query 模糊关键词 -# --file 输出文件路径,格式由扩展名推断(.json / .toml / .yaml / .yml) -# --format json | toml | yaml 显式指定格式(输出到 stdout 时必须指定) -# --no-secrets 不导出 secrets,无需主密钥 - -# 全量导出到 JSON 文件 -secrets export --file backup.json - -# 按 namespace 导出为 TOML -secrets export -n refining --file refining.toml - -# 按 kind 导出为 YAML -secrets export -n refining --kind service --file services.yaml - -# 按 tag 过滤导出 -secrets export --tag production --file prod.json - -# 模糊关键词导出 -secrets export -q mqtt --file mqtt.json - -# 仅导出 schema(不含 secrets,无需主密钥) -secrets export --no-secrets --file schema.json - -# 输出到 stdout(必须指定 --format) -secrets export -n refining --format yaml -secrets export --format json | jq '.' -``` - ---- - -### import — 批量导入记录 - -从导出文件读取记录并写入数据库,自动重新加密 secrets。支持 JSON、TOML、YAML 三种格式,文件格式由扩展名自动推断。 - -```bash -# 参数说明 -# 必选,输入文件路径(格式由扩展名推断) -# --force 冲突时覆盖已有记录(默认:报错并停止) -# --dry-run 预览将执行的操作,不写入数据库 -# -o / --output text | json | json-compact - -# 导入 JSON 文件(遇到已存在记录报错) -secrets import backup.json - -# 导入 TOML 文件,冲突时覆盖 -secrets import --force refining.toml - -# 导入 YAML 文件,冲突时覆盖 -secrets import --force services.yaml - -# 预览将执行的操作(不写入) -secrets import --dry-run backup.json - -# JSON 格式输出导入摘要 -secrets import backup.json -o json -``` - ---- - -### config — 配置管理(无需主密钥) - -```bash -# 设置数据库连接(每台设备执行一次,之后永久生效;先验证连接可用再写入) -secrets config set-db "postgres://postgres:@:/secrets" - -# 查看当前配置(密码脱敏) -secrets config show - -# 打印配置文件路径 -secrets config path -# 输出: /Users//.config/secrets/config.toml -``` - ---- - -### 全局参数 - -```bash -# debug 日志(位于子命令之前) -secrets --verbose search -q mqtt -secrets -v add -n refining --kind service --name gitea -m url=xxx -s token=yyy - -# 或通过环境变量精细控制 -RUST_LOG=secrets=trace secrets search - -# 一次性覆盖数据库连接 -secrets --db-url "postgres://..." search -n refining -``` +将共享 PEM 存为 `kind=key` 的 entry;其它记录在 `metadata.key_ref` 指向该 key 的 `name`。更新 key 记录后,引用方通过服务层解析合并逻辑即可使用新密钥(实现见 `secrets_core::service`)。 ## 代码规范 -- 错误处理:统一使用 `anyhow::Result`,不用 `unwrap()` -- 异步:全程 `tokio`,数据库操作 `sqlx` async -- SQL:使用 `sqlx::query` / `sqlx::query_as` 绑定参数,禁止字符串拼接(搜索的动态 WHERE 子句除外,需使用参数绑定 `$1/$2`) -- 新增 `kind` 类型时:只需在 `add` 调用时传入,无需改代码 -- 字段命名:CLI 短标志 `-n`=namespace,`-m`=meta,`-s`=secret,`-q`=query,`-v`=verbose,`-f`=field,`-o`=output -- 日志:用户可见输出用 `println!`;调试/运维信息用 `tracing::debug!`/`info!`/`warn!`/`error!` -- 审计:`add`/`update`/`delete` 成功后调用 `audit::log_tx`,写入 `audit_log` 表;失败只 warn 不中断 -- 加密:`encrypted` 列存储 AES-256-GCM 密文;`add`/`update`/`search`/`delete` 需主密钥(`secrets init` 后从 OS 钥匙串加载) -- 输出:读命令通过 `OutputMode` 支持 text/json/json-compact;默认始终 `json`(pretty),`-o text` 供人类阅读;写命令 `add` 同样支持 `-o json` +- 错误:业务层 `anyhow::Result`,避免生产路径 `unwrap()`。 +- 异步:`tokio` + `sqlx` async。 +- SQL:`sqlx::query` / `query_as` 参数绑定;动态 WHERE 仍须用占位符绑定。 +- 日志:运维用 `tracing`;面向用户的 Web 响应走 axum handler。 +- 审计:写操作成功后尽量 `audit::log_tx`;失败可 `warn`,不掩盖主错误。 +- 加密:密钥由用户密码短语通过 **PBKDF2-SHA256(600k 次)** 在客户端派生,服务端只存 `key_salt`/`key_check`/`key_params`,不持有原始密钥。Web 客户端在浏览器本地完成加解密;MCP 客户端通过 `X-Encryption-Key` 请求头传递密钥,服务端临时解密后返回明文。 +- MCP:tools 参数与 JSON Schema(`schemars`)保持同步,鉴权以请求扩展中的用户上下文为准。 -## 提交前检查(必须全部通过) - -每次提交代码前,请在本地依次执行以下检查,**全部通过后再 push**: - -优先使用: +## 提交前检查 ```bash ./scripts/release-check.sh ``` -它等价于先检查版本号 / tag,再执行下面的格式、Lint、测试。 - -### 1. 版本号(按需) - -若本次改动需要发版,请先确认 `Cargo.toml` 中的 `version` 已提升,避免 CI 打出的 Tag 与已有版本重复。**升级版本后需同时更新 `Cargo.lock`**(运行 `cargo build` 即可自动同步),否则 CI 中 `cargo clippy --locked` 会因 lock 与 manifest 不一致而失败。可通过 git tag 判断: +或手动: ```bash -# 查看当前 Cargo.toml 版本 -grep '^version' Cargo.toml - -# 查看是否已存在该版本对应的 tag(CI 使用格式 secrets-) -git tag -l 'secrets-*' +cargo fmt -- --check +cargo clippy --locked -- -D warnings +cargo test --locked ``` -若当前版本已被 tag(例如已有 `secrets-0.3.0` 且 `Cargo.toml` 仍为 `0.3.0`),则应在 `Cargo.toml` 中 bump 版本号,再执行 `cargo build` 同步 `Cargo.lock`,最后一并提交,以便 CI 自动打新 Tag 并发布 Release。 - -### 2. 格式、Lint、测试 +发版前确认未重复 tag: ```bash -cargo fmt -- --check # 格式检查(不通过则运行 cargo fmt 修复) -cargo clippy -- -D warnings # Lint 检查(消除所有 warning) -cargo test # 单元/集成测试 -``` - -或一次性执行: - -```bash -cargo fmt -- --check && cargo clippy -- -D warnings && cargo test +grep '^version' crates/secrets-mcp/Cargo.toml +git tag -l 'secrets-mcp-*' ``` ## CI/CD -- Gitea Actions(runners: debian / darwin-arm64 / windows) -- 触发:`src/**`、`Cargo.toml`、`Cargo.lock` 变更推送到 main -- 构建目标:`x86_64-unknown-linux-musl`、`aarch64-apple-darwin`、`x86_64-apple-darwin`(由 ARM mac runner 交叉编译)、`x86_64-pc-windows-msvc` -- 新版本自动打 Tag(格式 `secrets-`)并上传二进制与对应 `.sha256` 摘要到 Gitea Release -- Release 仅在 Linux/macOS/Windows 构建全部成功后才会从 draft 发布 -- 通知:飞书 Webhook(`vars.WEBHOOK_URL`) -- 所需 secrets/vars:`RELEASE_TOKEN`(Release 上传,Gitea PAT)、`vars.WEBHOOK_URL`(通知,可选) -- **注意**:Gitea Actions 的 Secret/Variable 创建时,`data`/`value` 字段需传入**原始值**,不要使用 base64 编码 +- **触发**:`main` / `feat/mcp`(以仓库 workflow 为准);路径含 `crates/**`、`deploy/**`、`Cargo.toml`、`Cargo.lock`。 +- **构建**:`x86_64-unknown-linux-musl` → `secrets-mcp`。 +- **Release**:tag `secrets-mcp-`,上传 tar.gz + `.sha256`。 +- **部署**:可选在仓库 Actions 中配置 `vars.DEPLOY_HOST`、`vars.DEPLOY_USER` 与 `secrets.DEPLOY_SSH_KEY`(勿写进 workflow);可用 `scripts/setup-gitea-actions.sh` 调 Gitea API 写入。Actions **secrets 须为原始值**(如 PEM 全文、PAT 明文),**不要**先 base64 再写入,否则工作流内无法识别(例如 SSH 私钥无效)。**勿**在 CI 中保存 `GOOGLE_CLIENT_SECRET`、DB 密码。 +- **通知**:`vars.WEBHOOK_URL`(可选)。 -## 环境变量 +## 环境变量(secrets-mcp) | 变量 | 说明 | |------|------| -| `RUST_LOG` | 日志级别,如 `secrets=debug`、`secrets=trace`(默认 warn) | -| `USER` | 审计日志 actor 字段来源,Shell 自动设置,通常无需手动配置 | -| `SECRETS_UPGRADE_URL` | upgrade 的 Release API 地址。构建时(cargo build)或运行时(.env/export) | +| `SECRETS_DATABASE_URL` | **必填**。PostgreSQL URL。 | +| `BASE_URL` | 对外基址;OAuth 回调 `${BASE_URL}/auth/google/callback`。 | +| `SECRETS_MCP_BIND` | 监听地址,默认 `0.0.0.0:9315`。 | +| `GOOGLE_CLIENT_ID` / `GOOGLE_CLIENT_SECRET` | 可选;仅运行时配置。 | +| `RUST_LOG` | 如 `secrets_mcp=debug`。 | +| `USER` | 若写入审计 `actor`,由运行环境提供。 | -数据库连接通过 `secrets config set-db` 持久化到 `~/.config/secrets/config.toml`,不支持环境变量。 +> `SERVER_MASTER_KEY` 已不再需要。新架构下密钥由用户密码短语在客户端派生,服务端不持有。 diff --git a/Cargo.lock b/Cargo.lock index 8ecba86..10d5d9c 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2,12 +2,6 @@ # It is not intended for manual editing. version = 4 -[[package]] -name = "adler2" -version = "2.0.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "320119579fcad9c21884f5c4861d16174d0e06250625266f50fe6898340abefa" - [[package]] name = "aead" version = "0.5.2" @@ -67,56 +61,6 @@ dependencies = [ "libc", ] -[[package]] -name = "anstream" -version = "1.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "824a212faf96e9acacdbd09febd34438f8f711fb84e09a8916013cd7815ca28d" -dependencies = [ - "anstyle", - "anstyle-parse", - "anstyle-query", - "anstyle-wincon", - "colorchoice", - "is_terminal_polyfill", - "utf8parse", -] - -[[package]] -name = "anstyle" -version = "1.0.14" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "940b3a0ca603d1eade50a4846a2afffd5ef57a9feac2c0e2ec2e14f9ead76000" - -[[package]] -name = "anstyle-parse" -version = "1.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "52ce7f38b242319f7cabaa6813055467063ecdc9d355bbb4ce0c68908cd8130e" -dependencies = [ - "utf8parse", -] - -[[package]] -name = "anstyle-query" -version = "1.1.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "40c48f72fd53cd289104fc64099abca73db4166ad86ea0b4341abe65af83dadc" -dependencies = [ - "windows-sys 0.61.2", -] - -[[package]] -name = "anstyle-wincon" -version = "3.0.11" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "291e6a250ff86cd4a820112fb8898808a366d8f9f58ce16d1f538353ad55747d" -dependencies = [ - "anstyle", - "once_cell_polyfill", - "windows-sys 0.61.2", -] - [[package]] name = "anyhow" version = "1.0.102" @@ -124,15 +68,56 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7f202df86484c868dbad7eaa557ef785d5c66295e41b460ef922eca0723b842c" [[package]] -name = "argon2" -version = "0.5.3" +name = "askama" +version = "0.13.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3c3610892ee6e0cbce8ae2700349fcf8f98adb0dbfbee85aec3c9179d29cc072" +checksum = "5d4744ed2eef2645831b441d8f5459689ade2ab27c854488fbab1fbe94fce1a7" dependencies = [ - "base64ct", - "blake2", - "cpufeatures 0.2.17", - "password-hash", + "askama_derive", + "itoa", + "percent-encoding", + "serde", + "serde_json", +] + +[[package]] +name = "askama_derive" +version = "0.13.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d661e0f57be36a5c14c48f78d09011e67e0cb618f269cca9f2fd8d15b68c46ac" +dependencies = [ + "askama_parser", + "basic-toml", + "memchr", + "proc-macro2", + "quote", + "rustc-hash", + "serde", + "serde_derive", + "syn", +] + +[[package]] +name = "askama_parser" +version = "0.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cf315ce6524c857bb129ff794935cf6d42c82a6cff60526fe2a63593de4d0d4f" +dependencies = [ + "memchr", + "serde", + "serde_derive", + "winnow 0.7.15", +] + +[[package]] +name = "async-trait" +version = "0.1.89" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9035ad2d096bed7955a320ee7e2230574d28fd3c3a0f186cbea1ff3c7eed5dbb" +dependencies = [ + "proc-macro2", + "quote", + "syn", ] [[package]] @@ -156,6 +141,81 @@ version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c08606f8c3cbf4ce6ec8e28fb0014a2c086708fe954eaa885384a6165172e7e8" +[[package]] +name = "axum" +version = "0.8.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8b52af3cb4058c895d37317bb27508dccc8e5f2d39454016b297bf4a400597b8" +dependencies = [ + "axum-core", + "bytes", + "form_urlencoded", + "futures-util", + "http", + "http-body", + "http-body-util", + "hyper", + "hyper-util", + "itoa", + "matchit", + "memchr", + "mime", + "percent-encoding", + "pin-project-lite", + "serde_core", + "serde_json", + "serde_path_to_error", + "serde_urlencoded", + "sync_wrapper", + "tokio", + "tower", + "tower-layer", + "tower-service", + "tracing", +] + +[[package]] +name = "axum-core" +version = "0.5.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "08c78f31d7b1291f7ee735c1c6780ccde7785daae9a9206026862dab7d8792d1" +dependencies = [ + "bytes", + "futures-core", + "http", + "http-body", + "http-body-util", + "mime", + "pin-project-lite", + "sync_wrapper", + "tower-layer", + "tower-service", + "tracing", +] + +[[package]] +name = "axum-extra" +version = "0.10.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9963ff19f40c6102c76756ef0a46004c0d58957d87259fc9208ff8441c12ab96" +dependencies = [ + "axum", + "axum-core", + "bytes", + "futures-util", + "headers", + "http", + "http-body", + "http-body-util", + "mime", + "pin-project-lite", + "rustversion", + "serde_core", + "tower-layer", + "tower-service", + "tracing", +] + [[package]] name = "base64" version = "0.22.1" @@ -168,6 +228,15 @@ version = "1.8.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2af50177e190e07a26ab74f8b1efbfe2ef87da2116221318cb1c2e82baf7de06" +[[package]] +name = "basic-toml" +version = "0.1.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ba62675e8242a4c4e806d12f11d136e626e6c8361d6b829310732241652a178a" +dependencies = [ + "serde", +] + [[package]] name = "bitflags" version = "2.11.0" @@ -177,15 +246,6 @@ dependencies = [ "serde_core", ] -[[package]] -name = "blake2" -version = "0.10.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "46502ad458c9a52b69d4d4d32775c788b7a1b85e8bc9d482d92250fc0e3f8efe" -dependencies = [ - "digest", -] - [[package]] name = "block-buffer" version = "0.10.4" @@ -270,52 +330,6 @@ dependencies = [ "inout", ] -[[package]] -name = "clap" -version = "4.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b193af5b67834b676abd72466a96c1024e6a6ad978a1f484bd90b85c94041351" -dependencies = [ - "clap_builder", - "clap_derive", -] - -[[package]] -name = "clap_builder" -version = "4.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "714a53001bf66416adb0e2ef5ac857140e7dc3a0c48fb28b2f10762fc4b5069f" -dependencies = [ - "anstream", - "anstyle", - "clap_lex", - "strsim", -] - -[[package]] -name = "clap_derive" -version = "4.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1110bd8a634a1ab8cb04345d8d878267d57c3cf1b38d91b71af6686408bbca6a" -dependencies = [ - "heck", - "proc-macro2", - "quote", - "syn", -] - -[[package]] -name = "clap_lex" -version = "1.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c8d4a3bb8b1e0c1050499d1815f5ab16d04f0959b233085fb31653fbfc9d98f9" - -[[package]] -name = "colorchoice" -version = "1.0.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1d07550c9036bf2ae0c684c4297d503f838287c83c53686d05370d0e139ae570" - [[package]] name = "concurrent-queue" version = "2.5.0" @@ -332,23 +346,14 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c2459377285ad874054d797f3ccebf984978aa39129f6eafde5cdc8315b612f8" [[package]] -name = "core-foundation" -version = "0.9.4" +name = "cookie" +version = "0.18.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "91e195e091a93c46f7102ec7818a2aa394e1e1771c3ab4825963fa03e45afb8f" +checksum = "4ddef33a339a91ea89fb53151bd0a4689cfce27055c291dfa69945475d22c747" dependencies = [ - "core-foundation-sys", - "libc", -] - -[[package]] -name = "core-foundation" -version = "0.10.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b2a6cd9ae233e7f62ba4e9353e81a88df7fc8a5987b8d445b4d90c879bd156f6" -dependencies = [ - "core-foundation-sys", - "libc", + "percent-encoding", + "time", + "version_check", ] [[package]] @@ -390,15 +395,6 @@ version = "2.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "19d374276b40fb8bbdee95aef7c7fa6b5316ec764510eb64b8dd0e2ed0d7e7f5" -[[package]] -name = "crc32fast" -version = "1.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9481c1c90cbf2ac953f07c8d4a58aa3945c425b7185c9154d67a65e4230da511" -dependencies = [ - "cfg-if", -] - [[package]] name = "crossbeam-queue" version = "0.3.12" @@ -434,6 +430,40 @@ dependencies = [ "cipher", ] +[[package]] +name = "darling" +version = "0.23.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "25ae13da2f202d56bd7f91c25fba009e7717a1e4a1cc98a76d844b65ae912e9d" +dependencies = [ + "darling_core", + "darling_macro", +] + +[[package]] +name = "darling_core" +version = "0.23.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9865a50f7c335f53564bb694ef660825eb8610e0a53d3e11bf1b0d3df31e03b0" +dependencies = [ + "ident_case", + "proc-macro2", + "quote", + "strsim", + "syn", +] + +[[package]] +name = "darling_macro" +version = "0.23.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ac3984ec7bd6cfa798e62b4a642426a5be0e68f9401cfc2a01e3fa9ea2fcdb8d" +dependencies = [ + "darling_core", + "quote", + "syn", +] + [[package]] name = "der" version = "0.7.10" @@ -445,6 +475,16 @@ dependencies = [ "zeroize", ] +[[package]] +name = "deranged" +version = "0.5.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7cd812cc2bc1d69d4764bd80df88b4317eaef9e773c75226407d9bc0876b211c" +dependencies = [ + "powerfmt", + "serde_core", +] + [[package]] name = "digest" version = "0.10.7" @@ -457,27 +497,6 @@ dependencies = [ "subtle", ] -[[package]] -name = "dirs" -version = "6.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c3e8aa94d75141228480295a7d0e7feb620b1a5ad9f12bc40be62411e38cce4e" -dependencies = [ - "dirs-sys", -] - -[[package]] -name = "dirs-sys" -version = "0.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e01a3366d27ee9890022452ee61b2b63a67e6f13f58900b651ff5665f0bb1fab" -dependencies = [ - "libc", - "option-ext", - "redox_users", - "windows-sys 0.61.2", -] - [[package]] name = "displaydoc" version = "0.2.5" @@ -495,6 +514,12 @@ version = "0.15.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1aaf95b3e5c8f23aa320147307562d361db0ae0d51242340f558153b4eb2439b" +[[package]] +name = "dyn-clone" +version = "1.0.20" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d0881ea181b1df73ff77ffaaf9c7544ecc11e82fba9b5f27b262a3c73a332555" + [[package]] name = "either" version = "1.15.0" @@ -548,34 +573,12 @@ version = "2.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "37909eebbb50d72f9059c3b6d82c0463f2ff062c9e95845c43a6c9c0355411be" -[[package]] -name = "filetime" -version = "0.2.27" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f98844151eee8917efc50bd9e8318cb963ae8b297431495d3f758616ea5c57db" -dependencies = [ - "cfg-if", - "libc", - "libredox", -] - [[package]] name = "find-msvc-tools" version = "0.1.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5baebc0774151f905a1a2cc41989300b1e6fbb29aff0ceffa1064fdd3088d582" -[[package]] -name = "flate2" -version = "1.1.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "843fba2746e448b37e26a819579957415c8cef339bf08564fe8b7ddbd959573c" -dependencies = [ - "crc32fast", - "miniz_oxide", - "zlib-rs", -] - [[package]] name = "flume" version = "0.11.1" @@ -602,6 +605,21 @@ dependencies = [ "percent-encoding", ] +[[package]] +name = "futures" +version = "0.3.32" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8b147ee9d1f6d097cef9ce628cd2ee62288d963e16fb287bd9286455b241382d" +dependencies = [ + "futures-channel", + "futures-core", + "futures-executor", + "futures-io", + "futures-sink", + "futures-task", + "futures-util", +] + [[package]] name = "futures-channel" version = "0.3.32" @@ -646,6 +664,17 @@ version = "0.3.32" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cecba35d7ad927e23624b22ad55235f2239cfa44fd10428eecbeba6d6a717718" +[[package]] +name = "futures-macro" +version = "0.3.32" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e835b70203e41293343137df5c0664546da5745f82ec9b84d40be8336958447b" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + [[package]] name = "futures-sink" version = "0.3.32" @@ -664,8 +693,10 @@ version = "0.3.32" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "389ca41296e6190b48053de0321d02a77f32f8a5d2461dd38762c0593805c6d6" dependencies = [ + "futures-channel", "futures-core", "futures-io", + "futures-macro", "futures-sink", "futures-task", "memchr", @@ -760,6 +791,30 @@ dependencies = [ "hashbrown 0.15.5", ] +[[package]] +name = "headers" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b3314d5adb5d94bcdf56771f2e50dbbc80bb4bdf88967526706205ac9eff24eb" +dependencies = [ + "base64", + "bytes", + "headers-core", + "http", + "httpdate", + "mime", + "sha1", +] + +[[package]] +name = "headers-core" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "54b4a22553d4242c49fddb9ba998a99962b5cc6f22cb5a3482bec22522403ce4" +dependencies = [ + "http", +] + [[package]] name = "heck" version = "0.5.0" @@ -838,6 +893,12 @@ version = "1.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6dbf3de79e51f3d586ab4cb9d5c3e2c14aa28ed23d180cf89b4df0454a69cc87" +[[package]] +name = "httpdate" +version = "1.0.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "df3b46402a9d5adb4c86a0cf463f42e19994e3ee891101b1841f30a545cb49a9" + [[package]] name = "hyper" version = "1.8.1" @@ -851,6 +912,7 @@ dependencies = [ "http", "http-body", "httparse", + "httpdate", "itoa", "pin-project-lite", "pin-utils", @@ -1010,6 +1072,12 @@ version = "2.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3d3067d79b975e8844ca9eb072e16b31c3c1c36928edf9c6789548c524d0d954" +[[package]] +name = "ident_case" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b9e0384b61958566e926dc50660321d12159025e767c18e043daf26b70104c39" + [[package]] name = "idna" version = "1.1.0" @@ -1068,12 +1136,6 @@ dependencies = [ "serde", ] -[[package]] -name = "is_terminal_polyfill" -version = "1.70.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a6cb138bb79a146c1bd460005623e142ef0181e3d0219cb493e02f7d08a35695" - [[package]] name = "itoa" version = "1.0.17" @@ -1090,21 +1152,6 @@ dependencies = [ "wasm-bindgen", ] -[[package]] -name = "keyring" -version = "3.6.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eebcc3aff044e5944a8fbaf69eb277d11986064cba30c468730e8b9909fb551c" -dependencies = [ - "byteorder", - "linux-keyutils", - "log", - "security-framework 2.11.1", - "security-framework 3.7.0", - "windows-sys 0.60.2", - "zeroize", -] - [[package]] name = "lazy_static" version = "1.5.0" @@ -1154,16 +1201,6 @@ dependencies = [ "vcpkg", ] -[[package]] -name = "linux-keyutils" -version = "0.2.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "761e49ec5fd8a5a463f9b84e877c373d888935b71c6be78f3767fe2ae6bed18e" -dependencies = [ - "bitflags", - "libc", -] - [[package]] name = "linux-raw-sys" version = "0.12.1" @@ -1183,6 +1220,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "224399e74b87b5f3557511d98dff8b14089b3dadafcab6bb93eab67d3aace965" dependencies = [ "scopeguard", + "serde", ] [[package]] @@ -1206,6 +1244,12 @@ dependencies = [ "regex-automata", ] +[[package]] +name = "matchit" +version = "0.8.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "47e1ffaa40ddd1f3ed91f717a33c8c0ee23fff369e3aa8772b9605cc1d22f4c3" + [[package]] name = "md-5" version = "0.10.6" @@ -1223,14 +1267,10 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f8ca58f447f06ed17d5fc4043ce1b10dd205e060fb3ce5b979b8ed8e59ff3f79" [[package]] -name = "miniz_oxide" -version = "0.8.9" +name = "mime" +version = "0.3.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1fa76a2c86f704bdb222d66965fb3d63269ce38518b83cb0575fca855ebb6316" -dependencies = [ - "adler2", - "simd-adler32", -] +checksum = "6877bb514081ee2a7ff5ef9de3281f14a4dd4bceac4c09388074a6b5df8a139a" [[package]] name = "mio" @@ -1268,6 +1308,12 @@ dependencies = [ "zeroize", ] +[[package]] +name = "num-conv" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cf97ec579c3c42f953ef76dbf8d55ac91fb219dde70e49aa4a6b7d74e9919050" + [[package]] name = "num-integer" version = "0.1.46" @@ -1304,24 +1350,12 @@ version = "1.21.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9f7c3e4beb33f85d45ae3e3a1792185706c8e16d043238c593331cc7cd313b50" -[[package]] -name = "once_cell_polyfill" -version = "1.70.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "384b8ab6d37215f3c5301a95a4accb5d64aa607f1fcb26a11b5303878451b4fe" - [[package]] name = "opaque-debug" version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c08d65885ee38876c4f86fa503fb49d7b507c2b62552df7c70b2fce627e06381" -[[package]] -name = "option-ext" -version = "0.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "04744f49eae99ab78e0d5c0b603ab218f515ea8cfe5a456d7629ad883a3b6e7d" - [[package]] name = "parking" version = "2.2.1" @@ -1352,15 +1386,10 @@ dependencies = [ ] [[package]] -name = "password-hash" -version = "0.5.0" +name = "pastey" +version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "346f04948ba92c43e8469c1ee6736c7563d71012b17d40745260fe106aac2166" -dependencies = [ - "base64ct", - "rand_core 0.6.4", - "subtle", -] +checksum = "b867cad97c0791bbd3aaa6472142568c6c9e8f71937e98379f584cfb0cf35bec" [[package]] name = "pem-rfc7468" @@ -1443,6 +1472,12 @@ dependencies = [ "zerovec", ] +[[package]] +name = "powerfmt" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "439ee305def115ba05938db6eb1644ff94165c5ab5e9420d1c1bcedbba909391" + [[package]] name = "ppv-lite86" version = "0.2.21" @@ -1642,14 +1677,23 @@ dependencies = [ ] [[package]] -name = "redox_users" -version = "0.5.2" +name = "ref-cast" +version = "1.0.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a4e608c6638b9c18977b00b475ac1f28d14e84b27d8d42f70e0bf1e3dec127ac" +checksum = "f354300ae66f76f1c85c5f84693f0ce81d747e2c3f21a45fef496d89c960bf7d" dependencies = [ - "getrandom 0.2.17", - "libredox", - "thiserror", + "ref-cast-impl", +] + +[[package]] +name = "ref-cast-impl" +version = "1.0.25" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b7186006dcb21920990093f30e3dea63b7d6e977bf1256be20c3563a5db070da" +dependencies = [ + "proc-macro2", + "quote", + "syn", ] [[package]] @@ -1722,14 +1766,47 @@ dependencies = [ ] [[package]] -name = "rpassword" -version = "7.4.0" +name = "rmcp" +version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "66d4c8b64f049c6721ec8ccec37ddfc3d641c4a7fca57e8f2a89de509c73df39" +checksum = "ba6b9d2f0efe2258b23767f1f9e0054cfbcac9c2d6f81a031214143096d7864f" dependencies = [ - "libc", - "rtoolbox", - "windows-sys 0.59.0", + "async-trait", + "base64", + "bytes", + "chrono", + "futures", + "http", + "http-body", + "http-body-util", + "pastey", + "pin-project-lite", + "rand 0.10.0", + "rmcp-macros", + "schemars", + "serde", + "serde_json", + "sse-stream", + "thiserror", + "tokio", + "tokio-stream", + "tokio-util", + "tower-service", + "tracing", + "uuid", +] + +[[package]] +name = "rmcp-macros" +version = "1.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ab9d95d7ed26ad8306352b0d5f05b593222b272790564589790d210aa15caa9e" +dependencies = [ + "darling", + "proc-macro2", + "quote", + "serde_json", + "syn", ] [[package]] @@ -1752,16 +1829,6 @@ dependencies = [ "zeroize", ] -[[package]] -name = "rtoolbox" -version = "0.0.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a7cc970b249fbe527d6e02e0a227762c9108b2f49d81094fe357ffc6d14d7f6f" -dependencies = [ - "libc", - "windows-sys 0.52.0", -] - [[package]] name = "rustc-hash" version = "2.1.1" @@ -1828,6 +1895,32 @@ version = "1.0.23" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9774ba4a74de5f7b1c1451ed6cd5285a32eddb5cccb8cc655a4e50009e06477f" +[[package]] +name = "schemars" +version = "1.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a2b42f36aa1cd011945615b92222f6bf73c599a102a300334cd7f8dbeec726cc" +dependencies = [ + "chrono", + "dyn-clone", + "ref-cast", + "schemars_derive", + "serde", + "serde_json", +] + +[[package]] +name = "schemars_derive" +version = "1.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7d115b50f4aaeea07e79c1912f645c7513d81715d0420f8bc77a18c6260b307f" +dependencies = [ + "proc-macro2", + "quote", + "serde_derive_internals", + "syn", +] + [[package]] name = "scopeguard" version = "1.2.0" @@ -1835,83 +1928,53 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "94143f37725109f92c262ed2cf5e59bce7498c01bcc1502d7b9afe439a4e9f49" [[package]] -name = "secrets" -version = "0.9.6" +name = "secrets-core" +version = "0.1.0" dependencies = [ "aes-gcm", "anyhow", - "argon2", "chrono", - "clap", - "dirs", - "dotenvy", - "flate2", - "keyring", "rand 0.10.0", - "reqwest", - "rpassword", - "self-replace", - "semver", "serde", "serde_json", "serde_yaml", "sha2", "sqlx", - "tar", "tempfile", "tokio", "toml", "tracing", - "tracing-subscriber", "uuid", - "zip", ] [[package]] -name = "security-framework" -version = "2.11.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "897b2245f0b511c87893af39b033e5ca9cce68824c4d7e7630b5a1d339658d02" +name = "secrets-mcp" +version = "0.1.0" dependencies = [ - "bitflags", - "core-foundation 0.9.4", - "core-foundation-sys", - "libc", - "security-framework-sys", -] - -[[package]] -name = "security-framework" -version = "3.7.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b7f4bc775c73d9a02cde8bf7b2ec4c9d12743edf609006c7facc23998404cd1d" -dependencies = [ - "bitflags", - "core-foundation 0.10.1", - "core-foundation-sys", - "libc", - "security-framework-sys", -] - -[[package]] -name = "security-framework-sys" -version = "2.17.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6ce2691df843ecc5d231c0b14ece2acc3efb62c0a398c7e1d875f3983ce020e3" -dependencies = [ - "core-foundation-sys", - "libc", -] - -[[package]] -name = "self-replace" -version = "1.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "03ec815b5eab420ab893f63393878d89c90fdd94c0bcc44c07abb8ad95552fb7" -dependencies = [ - "fastrand", - "tempfile", - "windows-sys 0.52.0", + "anyhow", + "askama", + "axum", + "axum-extra", + "chrono", + "dotenvy", + "http", + "rand 0.10.0", + "reqwest", + "rmcp", + "schemars", + "secrets-core", + "serde", + "serde_json", + "sha2", + "sqlx", + "tokio", + "tower", + "tower-http", + "tower-sessions", + "tracing", + "tracing-subscriber", + "urlencoding", + "uuid", ] [[package]] @@ -1950,6 +2013,17 @@ dependencies = [ "syn", ] +[[package]] +name = "serde_derive_internals" +version = "0.29.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "18d26a20a969b9e3fdf2fc2d9f21eda6c40e2de84c9408bb5d3b05d499aae711" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + [[package]] name = "serde_json" version = "1.0.149" @@ -1963,6 +2037,17 @@ dependencies = [ "zmij", ] +[[package]] +name = "serde_path_to_error" +version = "0.1.20" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "10a9ff822e371bb5403e391ecd83e182e0e77ba7f6fe0160b795797109d1b457" +dependencies = [ + "itoa", + "serde", + "serde_core", +] + [[package]] name = "serde_spanned" version = "1.0.4" @@ -2054,12 +2139,6 @@ dependencies = [ "rand_core 0.6.4", ] -[[package]] -name = "simd-adler32" -version = "0.3.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e320a6c5ad31d271ad523dcf3ad13e2767ad8b1cb8f047f75a8aeaf8da139da2" - [[package]] name = "slab" version = "0.4.12" @@ -2302,6 +2381,19 @@ dependencies = [ "uuid", ] +[[package]] +name = "sse-stream" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "eb4dc4d33c68ec1f27d386b5610a351922656e1fdf5c05bbaad930cd1519479a" +dependencies = [ + "bytes", + "futures-util", + "http-body", + "http-body-util", + "pin-project-lite", +] + [[package]] name = "stable_deref_trait" version = "1.2.1" @@ -2362,17 +2454,6 @@ dependencies = [ "syn", ] -[[package]] -name = "tar" -version = "0.4.44" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1d863878d212c87a19c1a610eb53bb01fe12951c0501cf5a0d65f724914a667a" -dependencies = [ - "filetime", - "libc", - "xattr", -] - [[package]] name = "tempfile" version = "3.27.0" @@ -2415,6 +2496,37 @@ dependencies = [ "cfg-if", ] +[[package]] +name = "time" +version = "0.3.47" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "743bd48c283afc0388f9b8827b976905fb217ad9e647fae3a379a9283c4def2c" +dependencies = [ + "deranged", + "itoa", + "num-conv", + "powerfmt", + "serde_core", + "time-core", + "time-macros", +] + +[[package]] +name = "time-core" +version = "0.1.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7694e1cfe791f8d31026952abf09c69ca6f6fa4e1a1229e18988f06a04a12dca" + +[[package]] +name = "time-macros" +version = "0.2.27" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2e70e4c5a0e0a8a4823ad65dfe1a6930e4f4d756dcd9dd7939022b5e8c501215" +dependencies = [ + "num-conv", + "time-core", +] + [[package]] name = "tinystr" version = "0.8.2" @@ -2488,6 +2600,19 @@ dependencies = [ "tokio", ] +[[package]] +name = "tokio-util" +version = "0.7.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9ae9cec805b01e8fc3fd2fe289f89149a9b66dd16786abd8b19cfa7b48cb0098" +dependencies = [ + "bytes", + "futures-core", + "futures-sink", + "pin-project-lite", + "tokio", +] + [[package]] name = "toml" version = "1.0.7+spec-1.1.0" @@ -2500,7 +2625,7 @@ dependencies = [ "toml_datetime", "toml_parser", "toml_writer", - "winnow", + "winnow 1.0.0", ] [[package]] @@ -2518,7 +2643,7 @@ version = "1.0.10+spec-1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7df25b4befd31c4816df190124375d5a20c6b6921e2cad937316de3fccd63420" dependencies = [ - "winnow", + "winnow 1.0.0", ] [[package]] @@ -2540,6 +2665,23 @@ dependencies = [ "tokio", "tower-layer", "tower-service", + "tracing", +] + +[[package]] +name = "tower-cookies" +version = "0.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "151b5a3e3c45df17466454bb74e9ecedecc955269bdedbf4d150dfa393b55a36" +dependencies = [ + "axum-core", + "cookie", + "futures-util", + "http", + "parking_lot", + "pin-project-lite", + "tower-layer", + "tower-service", ] [[package]] @@ -2572,6 +2714,57 @@ version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8df9b6e13f2d32c91b9bd719c00d1958837bc7dec474d94952798cc8e69eeec3" +[[package]] +name = "tower-sessions" +version = "0.14.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "43a05911f23e8fae446005fe9b7b97e66d95b6db589dc1c4d59f6a2d4d4927d3" +dependencies = [ + "async-trait", + "http", + "time", + "tokio", + "tower-cookies", + "tower-layer", + "tower-service", + "tower-sessions-core", + "tower-sessions-memory-store", + "tracing", +] + +[[package]] +name = "tower-sessions-core" +version = "0.14.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ce8cce604865576b7751b7a6bc3058f754569a60d689328bb74c52b1d87e355b" +dependencies = [ + "async-trait", + "axum-core", + "base64", + "futures", + "http", + "parking_lot", + "rand 0.8.5", + "serde", + "serde_json", + "thiserror", + "time", + "tokio", + "tracing", +] + +[[package]] +name = "tower-sessions-memory-store" +version = "0.14.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fb05909f2e1420135a831dd5df9f5596d69196d0a64c3499ca474c4bd3d33242" +dependencies = [ + "async-trait", + "time", + "tokio", + "tower-sessions-core", +] + [[package]] name = "tracing" version = "0.1.44" @@ -2640,12 +2833,6 @@ version = "0.2.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e421abadd41a4225275504ea4d6566923418b7f05506fbc9c0fe86ba7396114b" -[[package]] -name = "typed-path" -version = "0.12.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8e28f89b80c87b8fb0cf04ab448d5dd0dd0ade2f8891bae878de66a75a28600e" - [[package]] name = "typenum" version = "1.19.0" @@ -2719,24 +2906,25 @@ dependencies = [ "serde", ] +[[package]] +name = "urlencoding" +version = "2.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "daf8dba3b7eb870caf1ddeed7bc9d2a049f3cfdfae7cb521b087cc33ae4c49da" + [[package]] name = "utf8_iter" version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b6c140620e7ffbb22c2dee59cafe6084a59b5ffc27a8859a5f0d494b5d52b6be" -[[package]] -name = "utf8parse" -version = "0.2.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "06abde3611657adf66d383f00b093d7faecc7fa57071cce2578660c9f1010821" - [[package]] name = "uuid" version = "1.22.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a68d3c8f01c0cfa54a75291d83601161799e4a89a39e0929f4b0354d88757a37" dependencies = [ + "getrandom 0.4.2", "js-sys", "serde_core", "wasm-bindgen", @@ -3017,15 +3205,6 @@ dependencies = [ "windows-targets 0.52.6", ] -[[package]] -name = "windows-sys" -version = "0.59.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e38bc4d79ed67fd075bcc251a1c39b32a1776bbe92e5bef1f0bf1f8c531853b" -dependencies = [ - "windows-targets 0.52.6", -] - [[package]] name = "windows-sys" version = "0.60.2" @@ -3230,6 +3409,15 @@ version = "0.53.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d6bbff5f0aada427a1e5a6da5f1f98158182f26556f345ac9e04d36d0ebed650" +[[package]] +name = "winnow" +version = "0.7.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "df79d97927682d2fd8adb29682d1140b343be4ac0f08fd68b7765d9c059d3945" +dependencies = [ + "memchr", +] + [[package]] name = "winnow" version = "1.0.0" @@ -3330,16 +3518,6 @@ version = "0.6.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9edde0db4769d2dc68579893f2306b26c6ecfbe0ef499b013d731b7b9247e0b9" -[[package]] -name = "xattr" -version = "1.6.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "32e45ad4206f6d2479085147f02bc2ef834ac85886624a23575ae137c8aa8156" -dependencies = [ - "libc", - "rustix", -] - [[package]] name = "yoke" version = "0.8.1" @@ -3443,40 +3621,8 @@ dependencies = [ "syn", ] -[[package]] -name = "zip" -version = "8.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b680f2a0cd479b4cff6e1233c483fdead418106eae419dc60200ae9850f6d004" -dependencies = [ - "crc32fast", - "flate2", - "indexmap", - "memchr", - "typed-path", - "zopfli", -] - -[[package]] -name = "zlib-rs" -version = "0.6.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3be3d40e40a133f9c916ee3f9f4fa2d9d63435b5fbe1bfc6d9dae0aa0ada1513" - [[package]] name = "zmij" version = "1.0.21" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b8848ee67ecc8aedbaf3e4122217aff892639231befc6a1b58d29fff4c2cabaa" - -[[package]] -name = "zopfli" -version = "0.8.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f05cd8797d63865425ff89b5c4a48804f35ba0ce8d125800027ad6017d2b5249" -dependencies = [ - "bumpalo", - "crc32fast", - "log", - "simd-adler32", -] diff --git a/Cargo.toml b/Cargo.toml index 03ee094..e103bf6 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,33 +1,38 @@ -[package] -name = "secrets" -version = "0.9.6" +[workspace] +members = [ + "crates/secrets-core", + "crates/secrets-mcp", +] +resolver = "2" + +[workspace.package] edition = "2024" -[dependencies] -aes-gcm = "^0.10.3" -anyhow = "^1.0.102" -argon2 = { version = "^0.5.3", features = ["std"] } -chrono = { version = "^0.4.44", features = ["serde"] } -clap = { version = "^4.6.0", features = ["derive"] } -dirs = "^6.0.0" -dotenvy = "^0.15" -flate2 = "^1.1.9" -keyring = { version = "^3.6.3", features = ["apple-native", "windows-native", "linux-native"] } -rand = "^0.10.0" -reqwest = { version = "^0.12", default-features = false, features = ["rustls-tls", "json"] } -rpassword = "^7.4.0" -self-replace = "^1.5.0" -semver = "^1.0.27" +[workspace.dependencies] +# Async runtime +tokio = { version = "^1.50.0", features = ["rt-multi-thread", "macros", "fs", "io-util", "process", "signal"] } + +# Database +sqlx = { version = "^0.8.6", features = ["runtime-tokio", "tls-rustls", "postgres", "uuid", "json", "chrono"] } + +# Serialization serde = { version = "^1.0.228", features = ["derive"] } serde_json = "^1.0.149" serde_yaml = "^0.9" -sha2 = "^0.10.9" -sqlx = { version = "^0.8.6", features = ["runtime-tokio", "tls-rustls", "postgres", "uuid", "json", "chrono"] } -tar = "^0.4.44" -tempfile = "^3.19" -tokio = { version = "^1.50.0", features = ["rt-multi-thread", "macros", "fs", "io-util", "process", "signal"] } toml = "^1.0.7" + +# Crypto +aes-gcm = "^0.10.3" +sha2 = "^0.10.9" +rand = "^0.10.0" + +# Utils +anyhow = "^1.0.102" +chrono = { version = "^0.4.44", features = ["serde"] } +uuid = { version = "^1.22.0", features = ["serde"] } tracing = "^0.1" tracing-subscriber = { version = "^0.3", features = ["env-filter"] } -uuid = { version = "^1.22.0", features = ["serde"] } -zip = { version = "^8.2.0", default-features = false, features = ["deflate"] } +dotenvy = "^0.15" + +# HTTP +reqwest = { version = "^0.12", default-features = false, features = ["rustls-tls", "json"] } diff --git a/README.md b/README.md index 6d7d2dc..cede465 100644 --- a/README.md +++ b/README.md @@ -1,308 +1,149 @@ -# secrets +# secrets-mcp -跨设备密钥与配置管理 CLI,基于 Rust + PostgreSQL 18。 - -将服务器信息、服务凭据统一存入数据库,供本地工具和 AI 读取上下文。每个敏感字段单独行存储(`secrets` 子表),字段名、类型、长度以明文保存便于 AI 理解,仅值本身使用 AES-256-GCM 加密;主密钥由 Argon2id 从主密码派生并存入系统钥匙串。 +Workspace:**`secrets-core`** + **`secrets-mcp`**(HTTP Streamable MCP + Web)。多租户密钥与元数据存 PostgreSQL;用户通过 **Google OAuth** 登录,**API Key** 鉴权 MCP 请求;秘密数据用**用户密码短语派生的密钥**在客户端加密,服务端不持有原始密钥。 ## 安装 ```bash -cargo build --release -# 或从 Release 页面下载预编译二进制 +cargo build --release -p secrets-mcp +# 产物: target/release/secrets-mcp ``` -已有旧版本时,可执行 `secrets upgrade` 自动下载最新版并替换。该命令会校验 Release 附带的 `.sha256` 摘要后再安装。 +发版产物见 Gitea Release(tag:`secrets-mcp-`,Linux musl 预编译);其它平台本地 `cargo build`。 -## 首次使用(每台设备各执行一次) +## 环境变量与本地运行 + +复制 `deploy/.env.example` 为项目根目录 `.env`(已在 `.gitignore`),或导出同名变量: + +| 变量 | 说明 | +|------|------| +| `SECRETS_DATABASE_URL` | **必填**。PostgreSQL 连接串(建议专用库,如 `secrets-mcp`)。 | +| `BASE_URL` | 对外访问基址;OAuth 回调为 `{BASE_URL}/auth/google/callback`。默认 `http://localhost:9315`。 | +| `SECRETS_MCP_BIND` | 监听地址,默认 `0.0.0.0:9315`。反代时常为 `127.0.0.1:9315`。 | +| `GOOGLE_CLIENT_ID` / `GOOGLE_CLIENT_SECRET` | 可选;不配置则无 Google 登录入口。运行时从环境读取,勿写入 CI、勿打入二进制。 | ```bash -# 1. 配置数据库连接(会先验证连接可用再写入) -secrets config set-db "postgres://postgres:@:/secrets" - -# 2. 初始化主密钥(提示输入至少 8 位的主密码,派生后存入 OS 钥匙串) -secrets init +cargo run -p secrets-mcp ``` -主密码不会存储,仅用于派生主密钥,且至少需 8 位。同一主密码在所有设备上会得到相同主密钥(salt 存于数据库,首台设备生成后共享)。 +- **Web**:`BASE_URL`(登录、Dashboard、设置密码短语、创建 API Key)。 +- **MCP**:Streamable HTTP 基址 `{BASE_URL}/mcp`,需 `Authorization: Bearer ` + `X-Encryption-Key: ` 请求头。 -**主密钥存储**:macOS → Keychain;Windows → Credential Manager;Linux → keyutils(会话级,重启后需再次 `secrets init`)。 +## 加密架构(混合 E2EE) -**从旧版(明文存储)升级**:升级后首次运行需执行 `secrets init` 即可(明文记录需手动重新 add 或通过 update 更新)。 +### 密钥派生 -## AI Agent 快速指南 +用户在 Web Dashboard 设置**密码短语**,浏览器使用 **Web Crypto API(PBKDF2-SHA256,600k 次迭代)**在本地派生 256-bit AES 密钥。 -这个 CLI 以 AI 使用优先设计。核心路径只有一条:**读取用 `search`,写入用 `add` / `update`**。 +- **Salt(32B)**:首次设置时在浏览器生成,存入服务端 `users.key_salt` +- **key_check**:派生密钥加密已知常量 `"secrets-mcp-key-check"`,存入 `users.key_check`,用于登录时验证密码短语 +- **服务端不存储原始密钥**,只存 salt + key_check -### 第一步:发现有哪些数据 +跨设备同步:新设备登录 → 输入相同密码短语 → 从服务端取 salt → 同样的 PBKDF2 → 得到相同密钥。 -```bash -# 列出所有记录摘要(默认最多 50 条,安全起步) -secrets search --summary --limit 20 +### 写入与读取流程 -# 按 namespace 过滤 -secrets search -n refining --summary --limit 20 +```mermaid +flowchart LR + subgraph Web["Web 浏览器(E2E)"] + P["密码短语"] --> K["PBKDF2 → 256-bit key"] + K --> Enc["AES-256-GCM 加密"] + K --> Dec["AES-256-GCM 解密"] + end -# 按最近更新排序 -secrets search --sort updated --limit 10 --summary + subgraph AI["AI 客户端(MCP)"] + HdrKey["X-Encryption-Key: hex"] + end + + subgraph Server["secrets-mcp 服务端"] + Middleware["请求中临时持有 key\n请求结束即丢弃"] + DB[(PostgreSQL\nsecrets.encrypted = 密文\nentries.metadata = 明文)] + end + + Enc -->|密文| Server + HdrKey -->|key + 请求| Middleware + Middleware <-->|加解密| DB + DB -->|密文| Dec ``` -`--summary` 只返回轻量字段(namespace、kind、name、tags、desc、updated_at),不含完整 metadata 和 secrets。 +### 两种客户端对比 -### 第二步:精确读取单条记录 +| | Web 浏览器 | AI 客户端(MCP) | +|---|---|---| +| 密钥位置 | 仅在浏览器内存 / sessionStorage | MCP 配置 headers 中 | +| 加解密位置 | 客户端(真正 E2E) | 服务端临时(请求级生命周期) | +| 安全边界 | 服务端零知识 | 依赖 TLS + 服务端内存隔离 | -```bash -# 精确定位(namespace + kind + name 三元组) -secrets search -n refining --kind service --name gitea +### 敏感数据传输 -# 获取完整记录(含 secrets 字段名,无需 master_key) -secrets search -n refining --kind service --name gitea -o json +- **OAuth `client_secret`** 只存服务端环境变量,不发给浏览器 +- **API Key** 创建时原始 key 仅展示一次,库中只存 SHA-256 哈希 +- **X-Encryption-Key** 随 MCP 请求经 TLS 传输,服务端仅在请求处理期间持有(不持久化) +- **生产环境必须走 HTTPS/TLS** -# 直接提取单个 metadata 字段值(最短路径) -secrets search -n refining --kind service --name gitea -f metadata.url +## AI 客户端配置 -# 同时提取多个 metadata 字段 -secrets search -n refining --kind service --name gitea \ - -f metadata.url -f metadata.default_org +在 Web Dashboard 设置密码短语后,解锁页面会按客户端格式生成配置。常见客户端示例如下: -# 需要 secrets 时,改用 run(只注入 token 字段到子进程) -secrets run -n refining --kind service --name gitea -s token -- ./deploy.sh +`Cursor / Claude Desktop` 风格: -# 预览 run 会注入哪些变量(不执行命令) -secrets run -n refining --kind service --name gitea --dry-run +```json +{ + "mcpServers": { + "secrets": { + "url": "https://secrets.example.com/mcp", + "headers": { + "Authorization": "Bearer sk_abc123...", + "X-Encryption-Key": "a1b2c3...(64位hex)" + } + } + } +} ``` -`search` 展示 metadata 与 secrets 的字段名,不展示 secret 值本身;需要 secret 值时用 `run`(仅注入加密字段到子进程,不含 metadata)。用 `-s` 指定只注入特定字段,最小化注入范围。 +`OpenCode` 风格: -### 输出格式 - -| 场景 | 推荐命令 | -|------|----------| -| AI 解析 / 管道处理(默认) | json(pretty-printed) | -| 管道紧凑格式 | `-o json-compact` | -| 注入 secrets 到子进程环境 | `run` | -| 人类查看 | `-o text` | - -默认始终输出 JSON,无论是 TTY 还是管道。`text` 输出中时间按本地时区显示;`json/json-compact` 使用 UTC(RFC3339)。 - -```bash -# 默认 JSON 输出,直接可 jq 解析 -secrets search -n refining --kind service | jq '.[].name' - -# 需要 secrets 时,使用 run(-s 指定只注入特定字段) -secrets run -n refining --kind service --name gitea -s token -- ./deploy.sh - -# 预览 run 会注入哪些变量(不执行命令) -secrets run -n refining --kind service --name gitea --dry-run -``` - -## 完整命令参考 - -```bash -# 查看帮助(包含各子命令 EXAMPLES) -secrets --help -secrets init --help # 主密钥初始化 -secrets search --help -secrets add --help -secrets update --help -secrets delete --help -secrets config --help -secrets upgrade --help # 检查并更新 CLI 版本 -secrets export --help # 批量导出(JSON/TOML/YAML) -secrets import --help # 批量导入(JSON/TOML/YAML) - -# ── search ────────────────────────────────────────────────────────────────── -secrets search --summary --limit 20 # 发现概览 -secrets search -n refining --kind service # 按 namespace + kind -secrets search -n refining --kind service --name gitea # 精确查找 -secrets search -q mqtt # 关键词模糊搜索 -secrets search --tag hongkong # 按 tag 过滤 -secrets search -n refining --kind service --name gitea -f metadata.url # 提取 metadata 字段 -secrets search -n refining --kind service --name gitea -o json # 完整记录(含 secrets schema) -secrets search --sort updated --limit 10 --summary # 最近改动 -secrets search -n refining --summary --limit 10 --offset 10 # 翻页 - -# ── add ────────────────────────────────────────────────────────────────────── -secrets add -n refining --kind server --name my-server \ - --tag aliyun --tag shanghai \ - -m ip=10.0.0.1 -m desc="Example ECS" \ - -s username=root -s ssh_key=@./keys/server.pem - -# 多行文件直接写入嵌套 secret 字段 -secrets add -n refining --kind server --name my-server \ - -s credentials:content@./keys/server.pem - -# 使用 typed JSON 写入 secret(布尔、数字、数组、对象) -secrets add -n refining --kind service --name deploy-bot \ - -s enabled:=true \ - -s retry_count:=3 \ - -s scopes:='["repo","workflow"]' \ - -s extra:='{"region":"ap-east-1","verify_tls":true}' - -secrets add -n refining --kind service --name gitea \ - --tag gitea \ - -m url=https://code.example.com -m default_org=myorg \ - -s token= - -# ── update ─────────────────────────────────────────────────────────────────── -secrets update -n refining --kind server --name my-server -m ip=10.0.0.1 -secrets update -n refining --kind service --name gitea --add-tag production -s token= -secrets update -n refining --kind service --name mqtt --remove-meta old_port --remove-secret old_key -secrets update -n refining --kind server --name my-server --remove-secret credentials:content - -# ── delete ─────────────────────────────────────────────────────────────────── -secrets delete -n refining --kind service --name legacy-mqtt # 精确删除单条(--kind 必填) -secrets delete -n refining --dry-run # 预览批量删除(不写入) -secrets delete -n ricnsmart # 批量删除整个 namespace -secrets delete -n ricnsmart --kind server # 批量删除指定 kind - -# ── init ───────────────────────────────────────────────────────────────────── -secrets init # 主密钥初始化(每台设备一次,主密码至少 8 位,派生后存钥匙串) - -# ── config ─────────────────────────────────────────────────────────────────── -secrets config set-db "postgres://postgres:@:/secrets" # 先验证再写入 -secrets config show # 密码脱敏展示 -secrets config path # 打印配置文件路径 - -# ── upgrade ────────────────────────────────────────────────────────────────── -secrets upgrade --check # 仅检查是否有新版本 -secrets upgrade # 下载、校验 SHA-256 并安装最新版(可通过 SECRETS_UPGRADE_URL 自托管) - -# ── export ──────────────────────────────────────────────────────────────────── -secrets export --file backup.json # 全量导出到 JSON -secrets export -n refining --file refining.toml # 按 namespace 导出为 TOML -secrets export -n refining --kind service --file svc.yaml # 按 kind 导出为 YAML -secrets export --tag production --file prod.json # 按 tag 过滤 -secrets export -q mqtt --file mqtt.json # 模糊搜索导出 -secrets export --no-secrets --file schema.json # 仅导出 schema(无需主密钥) -secrets export -n refining --format yaml # 输出到 stdout,指定格式 - -# ── import ──────────────────────────────────────────────────────────────────── -secrets import backup.json # 导入(冲突时报错) -secrets import --force refining.toml # 冲突时覆盖已有记录 -secrets import --dry-run backup.yaml # 预览将要执行的操作(不写入) - -# ── run ─────────────────────────────────────────────────────────────────────── -secrets run -n refining --kind service --name gitea -- ./deploy.sh # 注入全部 secrets -secrets run -n refining --kind service --name gitea -s token -- ./deploy.sh # 只注入 token 字段 -secrets run -n refining --kind service --name aliyun \ - -s access_key_id -s access_key_secret -- aliyun ecs DescribeInstances # 只注入指定字段 -secrets run --tag production -- env # 按 tag 批量注入 -secrets run -n refining --kind service --name gitea --dry-run # 预览变量映射 -secrets run -n refining --kind service --name gitea -s token --dry-run # 过滤后预览 -secrets run -n refining --kind service --name gitea --dry-run -o text # 人类可读预览 - -# ── 调试 ────────────────────────────────────────────────────────────────────── -secrets --verbose search -q mqtt -RUST_LOG=secrets=trace secrets search +```json +{ + "mcp": { + "secrets": { + "type": "remote", + "enabled": true, + "url": "https://secrets.example.com/mcp", + "headers": { + "Authorization": "Bearer sk_abc123...", + "X-Encryption-Key": "a1b2c3...(64位hex)" + } + } + } +} ``` ## 数据模型 -主表 `entries`(namespace、kind、name、tags、metadata)+ 子表 `secrets`(每个加密字段一行,含 field_name、encrypted)。首次连接自动建表;同时创建 `audit_log`、`entries_history`、`secrets_history` 等表。 +主表 **`entries`**(`namespace`、`kind`、`name`、`tags`、`metadata`,多租户时带 `user_id`)+ 子表 **`secrets`**(每行一个加密字段:`field_name`、`encrypted`)。另有 `entries_history`、`secrets_history`、`audit_log`,以及 **`users`**(含 `key_salt`、`key_check`、`key_params`)、**`oauth_accounts`**、**`api_keys`**。首次连库自动迁移建表。 | 位置 | 字段 | 说明 | |------|------|------| | entries | namespace | 一级隔离,如 `refining`、`ricnsmart` | -| entries | kind | 记录类型,如 `server`、`service`、`key`(可自由扩展) | -| entries | name | 人类可读唯一标识 | -| entries | tags | 多维标签,如 `["aliyun","hongkong"]` | -| entries | metadata | 明文描述(ip、desc、domains、key_ref 等) | -| secrets | field_name | 明文,search 可见,AI 可推断 run 会注入哪些变量 | -| secrets | encrypted | 仅加密值本身,AES-256-GCM | +| entries | kind | `server`、`service`、`key` 等(可扩展) | +| entries | name | 人类可读标识 | +| entries | metadata | 明文 JSON(ip、url、`key_ref` 等) | +| secrets | field_name | 明文字段名,便于 schema 展示 | +| secrets | encrypted | AES-GCM 密文(含 nonce) | +| users | key_salt | PBKDF2 salt(32B),首次设置密码短语时写入 | +| users | key_check | 派生密钥加密已知常量,用于验证密码短语 | +| users | key_params | 派生算法参数,如 `{"alg":"pbkdf2-sha256","iterations":600000}` | -`-m` / `--meta` 写入 `metadata`,`-s` / `--secret` 写入 `secrets` 表的独立行。支持 `key=value`、`key=@file`、`key:=`,也支持 `credentials:content@./key.pem` 这类嵌套字段文件写入;删除时支持 `--remove-secret credentials:content`。加解密使用主密钥(由 `secrets init` 设置)。 +### PEM 共享(`key_ref`) -**PEM 共享**:同一 PEM 被多台服务器共享时,可存为 `kind=key` 记录,服务器通过 `metadata.key_ref` 引用;轮换只需 update 一条 key 记录,所有引用自动生效。详见 [AGENTS.md](AGENTS.md)。 - -### `-m` / `--meta` JSON 语法速查 - -`-m` 和 `-s` 走的是同一套解析规则,只是写入位置不同:`-m` 写到明文 `metadata`,适合端口、开关、标签、描述性配置等非敏感信息。 - -| 目标值 | 写法示例 | 实际存入 | -|------|------|------| -| 普通字符串 | `-m url=https://code.example.com` | `"https://code.example.com"` | -| 文件内容字符串 | `-m notes=@./service-notes.txt` | `"..."` | -| 布尔值 | `-m enabled:=true` | `true` | -| 数字 | `-m port:=3000` | `3000` | -| `null` | `-m deprecated_at:=null` | `null` | -| 数组 | `-m domains:='["code.example.com","git.example.com"]'` | `["code.example.com","git.example.com"]` | -| 对象 | `-m tls:='{"enabled":true,"redirect_http":true}'` | `{"enabled":true,"redirect_http":true}` | -| 嵌套路径 + JSON | `-m deploy:strategy:='{"type":"rolling","batch":2}'` | `{"deploy":{"strategy":{"type":"rolling","batch":2}}}` | - -常见规则: - -- `=` 表示按字符串存储。 -- `:=` 表示按 JSON 解析。 -- shell 中数组和对象建议整体用单引号包住。 -- 嵌套字段继续用冒号分隔:`-m runtime:max_open_conns:=20`。 - -示例:新增一条带 typed metadata 的记录 - -```bash -secrets add -n refining --kind service --name gitea \ - -m url=https://code.example.com \ - -m port:=3000 \ - -m enabled:=true \ - -m domains:='["code.example.com","git.example.com"]' \ - -m tls:='{"enabled":true,"redirect_http":true}' -``` - -示例:更新已有记录中的嵌套 metadata - -```bash -secrets update -n refining --kind service --name gitea \ - -m deploy:strategy:='{"type":"rolling","batch":2}' \ - -m runtime:max_open_conns:=20 -``` - -### `-s` / `--secret` JSON 语法速查 - -当你希望写入的不是普通字符串,而是 `true`、`123`、`null`、数组或对象时,用 `:=`,右侧按 JSON 解析。 - -| 目标值 | 写法示例 | 实际存入 | -|------|------|------| -| 普通字符串 | `-s token=abc123` | `"abc123"` | -| 文件内容字符串 | `-s ssh_key=@./id_ed25519` | `"-----BEGIN ..."` | -| 布尔值 | `-s enabled:=true` | `true` | -| 数字 | `-s retry_count:=3` | `3` | -| `null` | `-s deprecated_at:=null` | `null` | -| 数组 | `-s scopes:='["repo","workflow"]'` | `["repo","workflow"]` | -| 对象 | `-s extra:='{"region":"ap-east-1","verify_tls":true}'` | `{"region":"ap-east-1","verify_tls":true}` | -| 嵌套路径 + JSON | `-s auth:policy:='{"mfa":true,"ttl":3600}'` | `{"auth":{"policy":{"mfa":true,"ttl":3600}}}` | - -常见规则: - -- `=` 表示按字符串存储,不做 JSON 解析。 -- `:=` 表示按 JSON 解析,适合布尔、数字、数组、对象、`null`。 -- shell 里对象和数组通常要整体加引号,推荐单引号:`-s flags:='["a","b"]'`。 -- 嵌套字段继续用冒号分隔:`-s credentials:enabled:=true`。 -- 如果你就是想存一个“JSON 字符串字面量”,可以写成 `-s note:='"hello"'`,但大多数字符串场景直接用 `=` 更直观。 - -示例:新增一条同时包含字符串、文件、布尔、数组、对象的记录 - -```bash -secrets add -n refining --kind service --name deploy-bot \ - -s token=abc123 \ - -s ssh_key=@./keys/deploy-bot.pem \ - -s enabled:=true \ - -s scopes:='["repo","workflow"]' \ - -s policy:='{"ttl":3600,"mfa":true}' -``` - -示例:更新已有记录中的嵌套 JSON 字段 - -```bash -secrets update -n refining --kind service --name deploy-bot \ - -s auth:config:='{"issuer":"gitea","rotate":true}' \ - -s auth:retry:=5 -``` +同一 PEM 可被多条 `server` 记录引用:将 PEM 存为 `kind=key` 的 entry,在服务器条目的 `metadata.key_ref` 中写 key 的名称;轮换时只更新 key 对应记录即可。 ## 审计日志 -`add`、`update`、`delete` 操作成功后自动向 `audit_log` 表写入一条记录,包含操作类型、操作对象和变更摘要(不含 secret 值)。操作者取自 `$USER` 环境变量。 +`add`、`update`、`delete` 等写操作写入 **`audit_log`**(操作类型、对象、摘要,不含 secret 明文)。 ```sql --- 查看最近 20 条审计记录 SELECT action, namespace, kind, name, actor, detail, created_at FROM audit_log ORDER BY created_at DESC @@ -312,49 +153,19 @@ LIMIT 20; ## 项目结构 ``` -src/ - main.rs # CLI 入口(clap),含各子命令 after_help 示例 - output.rs # OutputMode 枚举 + TTY 检测 - config.rs # 配置读写(~/.config/secrets/config.toml) - db.rs # 连接池 + auto-migrate(entries + secrets + entries_history + secrets_history + audit_log + kv_config) - crypto.rs # AES-256-GCM 加解密、Argon2id 派生、OS 钥匙串 - models.rs # Entry + SecretField 结构体 - audit.rs # 审计日志写入(audit_log 表) - commands/ - init.rs # 主密钥初始化(首次/新设备) - add.rs # upsert entries + secrets 行,支持 -o json - config.rs # config set-db/show/path - search.rs # 多条件查询,展示 secrets schema(-f/-o/--summary/--limit/--offset/--sort) - delete.rs # 删除(CASCADE 删除 secrets) - update.rs # 增量更新(tags/metadata + secrets 行级 UPSERT/DELETE) - rollback.rs # rollback / history:按 entry_version 恢复 - run.rs # run,仅 secrets 逐字段解密 + key_ref 引用解析(不含 metadata) - upgrade.rs # 从 Gitea Release 自更新 - export_cmd.rs # export:批量导出,支持 JSON/TOML/YAML,含解密明文 - import_cmd.rs # import:批量导入,冲突检测,dry-run,重新加密写入 +Cargo.toml +crates/secrets-core/ # db / crypto / models / audit / service +crates/secrets-mcp/ # MCP HTTP、Web、OAuth、API Key scripts/ - setup-gitea-actions.sh # 配置 Gitea Actions 变量与 Secrets +deploy/ # systemd、.env 示例 ``` ## CI/CD(Gitea Actions) -推送 `main` 分支时自动:fmt/clippy/test 检查 → Linux/macOS/Windows 构建 → 上传二进制与 `.sha256` 摘要 → 所有平台成功后发布 Release。 - -**首次使用需配置 Actions 变量和 Secrets:** +见 [`.gitea/workflows/secrets.yml`](.gitea/workflows/secrets.yml)。变更 `crates/**`、`deploy/**`、根目录 `Cargo.toml`/`Cargo.lock` 并推送到配置的分支时:fmt / clippy / test → 构建 `x86_64-unknown-linux-musl` → tag `secrets-mcp-` 与 Release 产物 → 可选 SSH 部署。 ```bash -# 需有 ~/.config/gitea/config.env(GITEA_URL、GITEA_TOKEN、GITEA_WEBHOOK_URL) -./scripts/setup-gitea-actions.sh +./scripts/setup-gitea-actions.sh # 配置 Gitea 变量与 Secrets ``` -- `RELEASE_TOKEN`(Secret):Gitea PAT,用于创建 Release 上传二进制 -- `WEBHOOK_URL`(Variable):飞书通知,可选 -- **注意**:Secret/Variable 的 `data`/`value` 字段需传入原始值,不要 base64 编码 - -当前 Release 预编译产物覆盖: -- Linux `x86_64-unknown-linux-musl` -- macOS Apple Silicon `aarch64-apple-darwin` -- macOS Intel `x86_64-apple-darwin`(由 ARM mac runner 交叉编译) -- Windows `x86_64-pc-windows-msvc` - -详见 [AGENTS.md](AGENTS.md)。 +详见 [AGENTS.md](AGENTS.md)(发版规则、代码规范)。 diff --git a/crates/secrets-core/Cargo.toml b/crates/secrets-core/Cargo.toml new file mode 100644 index 0000000..1772c31 --- /dev/null +++ b/crates/secrets-core/Cargo.toml @@ -0,0 +1,26 @@ +[package] +name = "secrets-core" +version = "0.1.0" +edition.workspace = true + +[lib] +name = "secrets_core" +path = "src/lib.rs" + +[dependencies] +aes-gcm.workspace = true +anyhow.workspace = true +chrono.workspace = true +rand.workspace = true +serde.workspace = true +serde_json.workspace = true +serde_yaml.workspace = true +sha2.workspace = true +sqlx.workspace = true +toml.workspace = true +tokio.workspace = true +tracing.workspace = true +uuid.workspace = true + +[dev-dependencies] +tempfile = "3" diff --git a/src/audit.rs b/crates/secrets-core/src/audit.rs similarity index 100% rename from src/audit.rs rename to crates/secrets-core/src/audit.rs diff --git a/crates/secrets-core/src/config.rs b/crates/secrets-core/src/config.rs new file mode 100644 index 0000000..966a81e --- /dev/null +++ b/crates/secrets-core/src/config.rs @@ -0,0 +1,20 @@ +use anyhow::Result; + +/// Resolve database URL from environment. +/// Priority: `SECRETS_DATABASE_URL` env var → error. +pub fn resolve_db_url(override_url: &str) -> Result { + if !override_url.is_empty() { + return Ok(override_url.to_string()); + } + + if let Ok(url) = std::env::var("SECRETS_DATABASE_URL") + && !url.is_empty() + { + return Ok(url); + } + + anyhow::bail!( + "Database not configured. Set the SECRETS_DATABASE_URL environment variable.\n\ + Example: SECRETS_DATABASE_URL=postgres://user:pass@host:port/dbname" + ) +} diff --git a/src/crypto.rs b/crates/secrets-core/src/crypto.rs similarity index 59% rename from src/crypto.rs rename to crates/secrets-core/src/crypto.rs index 171a4e4..14b2990 100644 --- a/src/crypto.rs +++ b/crates/secrets-core/src/crypto.rs @@ -3,39 +3,10 @@ use aes_gcm::{ aead::{Aead, AeadCore, KeyInit, OsRng}, }; use anyhow::{Context, Result, bail}; -use argon2::{Argon2, Params, Version}; use serde_json::Value; -const KEYRING_SERVICE: &str = "secrets-cli"; -const KEYRING_USER: &str = "master-key"; const NONCE_LEN: usize = 12; -// Argon2id parameters — OWASP recommended (m=64 MiB, t=3 iterations, p=4 threads, key=32 B) -const ARGON2_M_COST: u32 = 65_536; -const ARGON2_T_COST: u32 = 3; -const ARGON2_P_COST: u32 = 4; -const ARGON2_KEY_LEN: usize = 32; - -// ─── Argon2id key derivation ───────────────────────────────────────────────── - -/// Derive a 32-byte Master Key from a password and salt using Argon2id. -/// Parameters: m=65536 KiB (64 MB), t=3, p=4 — OWASP recommended. -pub fn derive_master_key(password: &str, salt: &[u8]) -> Result<[u8; 32]> { - let params = Params::new( - ARGON2_M_COST, - ARGON2_T_COST, - ARGON2_P_COST, - Some(ARGON2_KEY_LEN), - ) - .context("invalid Argon2id params")?; - let argon2 = Argon2::new(argon2::Algorithm::Argon2id, Version::V0x13, params); - let mut key = [0u8; 32]; - argon2 - .hash_password_into(password.as_bytes(), salt, &mut key) - .map_err(|e| anyhow::anyhow!("Argon2id derivation failed: {}", e))?; - Ok(key) -} - // ─── AES-256-GCM encrypt / decrypt ─────────────────────────────────────────── /// Encrypt plaintext bytes with AES-256-GCM. @@ -84,20 +55,43 @@ pub fn decrypt_json(master_key: &[u8; 32], data: &[u8]) -> Result { serde_json::from_slice(&bytes).context("deserialize decrypted JSON") } -// ─── OS Keychain ────────────────────────────────────────────────────────────── +// ─── Per-user key management (DEPRECATED — kept only for migration) ─────────── -/// Load the Master Key from the OS Keychain. -/// Returns an error with a helpful message if it hasn't been initialized. -pub fn load_master_key() -> Result<[u8; 32]> { - let entry = - keyring::Entry::new(KEYRING_SERVICE, KEYRING_USER).context("create keychain entry")?; - let hex = entry.get_password().map_err(|_| { - anyhow::anyhow!("Master key not found in keychain. Run `secrets init` first.") - })?; - let bytes = hex::decode_hex(&hex)?; +/// Generate a new random 32-byte per-user encryption key. +#[allow(dead_code)] +pub fn generate_user_key() -> [u8; 32] { + use aes_gcm::aead::rand_core::RngCore; + let mut key = [0u8; 32]; + OsRng.fill_bytes(&mut key); + key +} + +/// Wrap a per-user key with the server master key using AES-256-GCM. +#[allow(dead_code)] +pub fn wrap_user_key(server_master_key: &[u8; 32], user_key: &[u8; 32]) -> Result> { + encrypt(server_master_key, user_key.as_ref()) +} + +/// Unwrap a per-user key using the server master key. +#[allow(dead_code)] +pub fn unwrap_user_key(server_master_key: &[u8; 32], wrapped: &[u8]) -> Result<[u8; 32]> { + let bytes = decrypt(server_master_key, wrapped)?; + if bytes.len() != 32 { + bail!("unwrapped user key has unexpected length {}", bytes.len()); + } + let mut key = [0u8; 32]; + key.copy_from_slice(&bytes); + Ok(key) +} + +// ─── Client-supplied key extraction ────────────────────────────────────────── + +/// Parse a 64-char hex string (from X-Encryption-Key header) into a 32-byte key. +pub fn extract_key_from_hex(hex_str: &str) -> Result<[u8; 32]> { + let bytes = hex::decode_hex(hex_str.trim())?; if bytes.len() != 32 { bail!( - "stored master key has unexpected length {}; re-run `secrets init`", + "X-Encryption-Key must be 64 hex chars (32 bytes), got {} bytes", bytes.len() ); } @@ -106,20 +100,36 @@ pub fn load_master_key() -> Result<[u8; 32]> { Ok(key) } -/// Store the Master Key in the OS Keychain (overwrites any existing value). -pub fn store_master_key(key: &[u8; 32]) -> Result<()> { - let entry = - keyring::Entry::new(KEYRING_SERVICE, KEYRING_USER).context("create keychain entry")?; - let hex = hex::encode_hex(key); - entry - .set_password(&hex) - .map_err(|e| anyhow::anyhow!("keychain write failed: {}", e))?; - Ok(()) +// ─── Server master key ──────────────────────────────────────────────────────── + +/// Load the server master key from `SERVER_MASTER_KEY` environment variable (64 hex chars). +pub fn load_master_key_auto() -> Result<[u8; 32]> { + let hex_str = std::env::var("SERVER_MASTER_KEY").map_err(|_| { + anyhow::anyhow!( + "SERVER_MASTER_KEY is not set. \ + Generate one with: openssl rand -hex 32" + ) + })?; + + if hex_str.is_empty() { + bail!("SERVER_MASTER_KEY is set but empty"); + } + + let bytes = hex::decode_hex(hex_str.trim())?; + if bytes.len() != 32 { + bail!( + "SERVER_MASTER_KEY must be 64 hex chars (32 bytes), got {} bytes", + bytes.len() + ); + } + let mut key = [0u8; 32]; + key.copy_from_slice(&bytes); + Ok(key) } -// ─── Minimal hex helpers (avoid extra dep) ──────────────────────────────────── +// ─── Public hex helpers ─────────────────────────────────────────────────────── -mod hex { +pub mod hex { use anyhow::{Result, bail}; pub fn encode_hex(bytes: &[u8]) -> String { @@ -127,6 +137,7 @@ mod hex { } pub fn decode_hex(s: &str) -> Result> { + let s = s.trim(); if !s.len().is_multiple_of(2) { bail!("hex string has odd length"); } @@ -156,7 +167,6 @@ mod tests { let plaintext = b"hello world"; let enc1 = encrypt(&key, plaintext).unwrap(); let enc2 = encrypt(&key, plaintext).unwrap(); - // Different nonces → different ciphertexts assert_ne!(enc1, enc2); } @@ -178,18 +188,20 @@ mod tests { } #[test] - fn derive_master_key_deterministic() { - let salt = b"fixed_test_salt_"; - let k1 = derive_master_key("password", salt).unwrap(); - let k2 = derive_master_key("password", salt).unwrap(); - assert_eq!(k1, k2); + fn user_key_wrap_unwrap_roundtrip() { + let server_key = [0xABu8; 32]; + let user_key = [0xCDu8; 32]; + let wrapped = wrap_user_key(&server_key, &user_key).unwrap(); + let unwrapped = unwrap_user_key(&server_key, &wrapped).unwrap(); + assert_eq!(unwrapped, user_key); } #[test] - fn derive_master_key_different_passwords() { - let salt = b"fixed_test_salt_"; - let k1 = derive_master_key("password1", salt).unwrap(); - let k2 = derive_master_key("password2", salt).unwrap(); - assert_ne!(k1, k2); + fn user_key_wrap_wrong_server_key_fails() { + let server_key1 = [0xABu8; 32]; + let server_key2 = [0xEFu8; 32]; + let user_key = [0xCDu8; 32]; + let wrapped = wrap_user_key(&server_key1, &user_key).unwrap(); + assert!(unwrap_user_key(&server_key2, &wrapped).is_err()); } } diff --git a/src/db.rs b/crates/secrets-core/src/db.rs similarity index 70% rename from src/db.rs rename to crates/secrets-core/src/db.rs index a66f06d..0a4ed93 100644 --- a/src/db.rs +++ b/crates/secrets-core/src/db.rs @@ -8,7 +8,7 @@ use crate::audit::current_actor; pub async fn create_pool(database_url: &str) -> Result { tracing::debug!("connecting to database"); let pool = PgPoolOptions::new() - .max_connections(5) + .max_connections(10) .acquire_timeout(std::time::Duration::from_secs(5)) .connect(database_url) .await?; @@ -20,9 +20,10 @@ pub async fn migrate(pool: &PgPool) -> Result<()> { tracing::debug!("running migrations"); sqlx::raw_sql( r#" - -- ── entries: top-level entities (server, service, key, …) ────────────── + -- ── entries: top-level entities ───────────────────────────────────────── CREATE TABLE IF NOT EXISTS entries ( id UUID PRIMARY KEY DEFAULT uuidv7(), + user_id UUID, namespace VARCHAR(64) NOT NULL, kind VARCHAR(64) NOT NULL, name VARCHAR(256) NOT NULL, @@ -30,16 +31,26 @@ pub async fn migrate(pool: &PgPool) -> Result<()> { metadata JSONB NOT NULL DEFAULT '{}', version BIGINT NOT NULL DEFAULT 1, created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(), - updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW(), - UNIQUE(namespace, kind, name) + updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW() ); + -- Legacy unique constraint without user_id (single-user mode) + CREATE UNIQUE INDEX IF NOT EXISTS idx_entries_unique_legacy + ON entries(namespace, kind, name) + WHERE user_id IS NULL; + + -- Multi-user unique constraint + CREATE UNIQUE INDEX IF NOT EXISTS idx_entries_unique_user + ON entries(user_id, namespace, kind, name) + WHERE user_id IS NOT NULL; + CREATE INDEX IF NOT EXISTS idx_entries_namespace ON entries(namespace); CREATE INDEX IF NOT EXISTS idx_entries_kind ON entries(kind); + CREATE INDEX IF NOT EXISTS idx_entries_user_id ON entries(user_id) WHERE user_id IS NOT NULL; CREATE INDEX IF NOT EXISTS idx_entries_tags ON entries USING GIN(tags); CREATE INDEX IF NOT EXISTS idx_entries_metadata ON entries USING GIN(metadata jsonb_path_ops); - -- ── secrets: one row per encrypted field, plaintext schema metadata ──── + -- ── secrets: one row per encrypted field ───────────────────────────────── CREATE TABLE IF NOT EXISTS secrets ( id UUID PRIMARY KEY DEFAULT uuidv7(), entry_id UUID NOT NULL REFERENCES entries(id) ON DELETE CASCADE, @@ -53,13 +64,7 @@ pub async fn migrate(pool: &PgPool) -> Result<()> { CREATE INDEX IF NOT EXISTS idx_secrets_entry_id ON secrets(entry_id); - -- ── kv_config: global key-value store (Argon2id salt, etc.) ──────────── - CREATE TABLE IF NOT EXISTS kv_config ( - key TEXT PRIMARY KEY, - value BYTEA NOT NULL - ); - - -- ── audit_log: append-only operation log ──────────────────────────────── + -- ── audit_log: append-only operation log ───────────────────────────────── CREATE TABLE IF NOT EXISTS audit_log ( id BIGINT GENERATED ALWAYS AS IDENTITY PRIMARY KEY, action VARCHAR(32) NOT NULL, @@ -74,7 +79,7 @@ pub async fn migrate(pool: &PgPool) -> Result<()> { CREATE INDEX IF NOT EXISTS idx_audit_log_created ON audit_log(created_at DESC); CREATE INDEX IF NOT EXISTS idx_audit_log_ns_kind ON audit_log(namespace, kind); - -- ── entries_history: entry-level snapshot (tags + metadata) ───────────── + -- ── entries_history ─────────────────────────────────────────────────────── CREATE TABLE IF NOT EXISTS entries_history ( id BIGINT GENERATED ALWAYS AS IDENTITY PRIMARY KEY, entry_id UUID NOT NULL, @@ -94,7 +99,7 @@ pub async fn migrate(pool: &PgPool) -> Result<()> { CREATE INDEX IF NOT EXISTS idx_entries_history_ns_kind_name ON entries_history(namespace, kind, name, version DESC); - -- ── secrets_history: field-level snapshot ─────────────────────────────── + -- ── secrets_history: field-level snapshot ──────────────────────────────── CREATE TABLE IF NOT EXISTS secrets_history ( id BIGINT GENERATED ALWAYS AS IDENTITY PRIMARY KEY, entry_id UUID NOT NULL, @@ -111,6 +116,37 @@ pub async fn migrate(pool: &PgPool) -> Result<()> { ON secrets_history(entry_id, entry_version DESC); CREATE INDEX IF NOT EXISTS idx_secrets_history_secret_id ON secrets_history(secret_id); + + -- ── users ───────────────────────────────────────────────────────────────── + CREATE TABLE IF NOT EXISTS users ( + id UUID PRIMARY KEY DEFAULT uuidv7(), + email VARCHAR(256), + name VARCHAR(256) NOT NULL DEFAULT '', + avatar_url TEXT, + key_salt BYTEA, + key_check BYTEA, + key_params JSONB, + api_key TEXT UNIQUE, + created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(), + updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW() + ); + + -- ── oauth_accounts: per-provider identity links ─────────────────────────── + CREATE TABLE IF NOT EXISTS oauth_accounts ( + id UUID PRIMARY KEY DEFAULT uuidv7(), + user_id UUID NOT NULL REFERENCES users(id) ON DELETE CASCADE, + provider VARCHAR(32) NOT NULL, + provider_id VARCHAR(256) NOT NULL, + email VARCHAR(256), + name VARCHAR(256), + avatar_url TEXT, + created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(), + UNIQUE(provider, provider_id) + ); + + CREATE INDEX IF NOT EXISTS idx_oauth_accounts_user ON oauth_accounts(user_id); + CREATE UNIQUE INDEX IF NOT EXISTS idx_oauth_accounts_user_provider + ON oauth_accounts(user_id, provider); "#, ) .execute(pool) @@ -119,7 +155,7 @@ pub async fn migrate(pool: &PgPool) -> Result<()> { Ok(()) } -// ── Entry-level history snapshot ──────────────────────────────────────────── +// ── Entry-level history snapshot ───────────────────────────────────────────── pub struct EntrySnapshotParams<'a> { pub entry_id: uuid::Uuid, @@ -132,7 +168,6 @@ pub struct EntrySnapshotParams<'a> { pub metadata: &'a Value, } -/// Snapshot an entry row into `entries_history` before a write operation. pub async fn snapshot_entry_history( tx: &mut sqlx::Transaction<'_, sqlx::Postgres>, p: EntrySnapshotParams<'_>, @@ -157,7 +192,7 @@ pub async fn snapshot_entry_history( Ok(()) } -// ── Secret field-level history snapshot ───────────────────────────────────── +// ── Secret field-level history snapshot ────────────────────────────────────── pub struct SecretSnapshotParams<'a> { pub entry_id: uuid::Uuid, @@ -168,7 +203,6 @@ pub struct SecretSnapshotParams<'a> { pub action: &'a str, } -/// Snapshot a single secret field into `secrets_history`. pub async fn snapshot_secret_history( tx: &mut sqlx::Transaction<'_, sqlx::Postgres>, p: SecretSnapshotParams<'_>, @@ -191,25 +225,4 @@ pub async fn snapshot_secret_history( Ok(()) } -// ── Argon2 salt helpers ────────────────────────────────────────────────────── - -/// Load the Argon2id salt from the database. -pub async fn load_argon2_salt(pool: &PgPool) -> Result>> { - let row: Option<(Vec,)> = - sqlx::query_as("SELECT value FROM kv_config WHERE key = 'argon2_salt'") - .fetch_optional(pool) - .await?; - Ok(row.map(|(v,)| v)) -} - -/// Store the Argon2id salt in the database (only called once on first device init). -pub async fn store_argon2_salt(pool: &PgPool, salt: &[u8]) -> Result<()> { - sqlx::query( - "INSERT INTO kv_config (key, value) VALUES ('argon2_salt', $1) \ - ON CONFLICT (key) DO NOTHING", - ) - .bind(salt) - .execute(pool) - .await?; - Ok(()) -} +// ── DB helpers ──────────────────────────────────────────────────────────────── diff --git a/crates/secrets-core/src/lib.rs b/crates/secrets-core/src/lib.rs new file mode 100644 index 0000000..5c81284 --- /dev/null +++ b/crates/secrets-core/src/lib.rs @@ -0,0 +1,6 @@ +pub mod audit; +pub mod config; +pub mod crypto; +pub mod db; +pub mod models; +pub mod service; diff --git a/src/models.rs b/crates/secrets-core/src/models.rs similarity index 83% rename from src/models.rs rename to crates/secrets-core/src/models.rs index a5c3a2a..9b4e25f 100644 --- a/src/models.rs +++ b/crates/secrets-core/src/models.rs @@ -61,20 +61,10 @@ pub enum ExportFormat { Yaml, } -impl ExportFormat { - /// Infer format from file extension (.json / .toml / .yaml / .yml). - pub fn from_extension(path: &str) -> anyhow::Result { - let ext = path.rsplit('.').next().unwrap_or("").to_lowercase(); - Self::from_str(&ext).map_err(|_| { - anyhow::anyhow!( - "Cannot infer format from extension '.{}'. Use --format json|toml|yaml", - ext - ) - }) - } +impl std::str::FromStr for ExportFormat { + type Err = anyhow::Error; - /// Parse from --format CLI value. - pub fn from_str(s: &str) -> anyhow::Result { + fn from_str(s: &str) -> Result { match s.to_lowercase().as_str() { "json" => Ok(Self::Json), "toml" => Ok(Self::Toml), @@ -82,6 +72,19 @@ impl ExportFormat { other => anyhow::bail!("Unknown format '{}'. Expected: json, toml, or yaml", other), } } +} + +impl ExportFormat { + /// Infer format from file extension (.json / .toml / .yaml / .yml). + pub fn from_extension(path: &str) -> anyhow::Result { + let ext = path.rsplit('.').next().unwrap_or("").to_lowercase(); + ext.parse().map_err(|_| { + anyhow::anyhow!( + "Cannot infer format from extension '.{}'. Use --format json|toml|yaml", + ext + ) + }) + } /// Serialize ExportData to a string in this format. pub fn serialize(&self, data: &ExportData) -> anyhow::Result { @@ -136,6 +139,41 @@ pub struct ExportEntry { pub secrets: Option>, } +// ── Multi-user models ────────────────────────────────────────────────────────── + +/// A registered user (created on first OAuth login). +#[derive(Debug, Serialize, Deserialize, sqlx::FromRow)] +pub struct User { + pub id: Uuid, + pub email: Option, + pub name: String, + pub avatar_url: Option, + /// PBKDF2 salt (32 B). NULL until user sets up passphrase. + pub key_salt: Option>, + /// AES-256-GCM encryption of the known constant "secrets-mcp-key-check". + /// Used to verify the passphrase without storing the key itself. + pub key_check: Option>, + /// Key derivation parameters, e.g. {"alg":"pbkdf2-sha256","iterations":600000}. + pub key_params: Option, + /// Plaintext API key for MCP Bearer authentication. Auto-created on first login. + pub api_key: Option, + pub created_at: DateTime, + pub updated_at: DateTime, +} + +/// An OAuth account linked to a user. +#[derive(Debug, Serialize, Deserialize, sqlx::FromRow)] +pub struct OauthAccount { + pub id: Uuid, + pub user_id: Uuid, + pub provider: String, + pub provider_id: String, + pub email: Option, + pub name: Option, + pub avatar_url: Option, + pub created_at: DateTime, +} + // ── TOML ↔ JSON value conversion ────────────────────────────────────────────── /// Convert a serde_json Value to a toml Value. diff --git a/crates/secrets-core/src/service/add.rs b/crates/secrets-core/src/service/add.rs new file mode 100644 index 0000000..6649c3d --- /dev/null +++ b/crates/secrets-core/src/service/add.rs @@ -0,0 +1,383 @@ +use anyhow::Result; +use serde_json::{Map, Value}; +use sqlx::PgPool; +use std::fs; +use uuid::Uuid; + +use crate::crypto; +use crate::db; +use crate::models::EntryRow; + +// ── Key/value parsing helpers ───────────────────────────────────────────────── + +pub fn parse_kv(entry: &str) -> Result<(Vec, Value)> { + if let Some((key, json_str)) = entry.split_once(":=") { + let val: Value = serde_json::from_str(json_str).map_err(|e| { + anyhow::anyhow!( + "Invalid JSON value for key '{}': {} (use key=value for plain strings)", + key, + e + ) + })?; + return Ok((parse_key_path(key)?, val)); + } + + if let Some((key, raw_val)) = entry.split_once('=') { + let value = if let Some(path) = raw_val.strip_prefix('@') { + fs::read_to_string(path) + .map_err(|e| anyhow::anyhow!("Failed to read file '{}': {}", path, e))? + } else { + raw_val.to_string() + }; + return Ok((parse_key_path(key)?, Value::String(value))); + } + + if let Some((key, path)) = entry.split_once('@') { + let value = fs::read_to_string(path) + .map_err(|e| anyhow::anyhow!("Failed to read file '{}': {}", path, e))?; + return Ok((parse_key_path(key)?, Value::String(value))); + } + + anyhow::bail!( + "Invalid format '{}'. Expected: key=value, key=@file, nested:key@file, or key:=", + entry + ) +} + +pub fn build_json(entries: &[String]) -> Result { + let mut map = Map::new(); + for entry in entries { + let (path, value) = parse_kv(entry)?; + insert_path(&mut map, &path, value)?; + } + Ok(Value::Object(map)) +} + +pub fn key_path_to_string(path: &[String]) -> String { + path.join(":") +} + +pub fn collect_key_paths(entries: &[String]) -> Result> { + entries + .iter() + .map(|entry| parse_kv(entry).map(|(path, _)| key_path_to_string(&path))) + .collect() +} + +pub fn collect_field_paths(entries: &[String]) -> Result> { + entries + .iter() + .map(|entry| parse_key_path(entry).map(|path| key_path_to_string(&path))) + .collect() +} + +pub fn parse_key_path(key: &str) -> Result> { + let path: Vec = key + .split(':') + .map(str::trim) + .map(ToOwned::to_owned) + .collect(); + + if path.is_empty() || path.iter().any(|part| part.is_empty()) { + anyhow::bail!( + "Invalid key path '{}'. Use non-empty segments like 'credentials:content'.", + key + ); + } + Ok(path) +} + +pub fn insert_path(map: &mut Map, path: &[String], value: Value) -> Result<()> { + if path.is_empty() { + anyhow::bail!("Key path cannot be empty"); + } + if path.len() == 1 { + map.insert(path[0].clone(), value); + return Ok(()); + } + let head = path[0].clone(); + let tail = &path[1..]; + match map.entry(head.clone()) { + serde_json::map::Entry::Vacant(entry) => { + let mut child = Map::new(); + insert_path(&mut child, tail, value)?; + entry.insert(Value::Object(child)); + } + serde_json::map::Entry::Occupied(mut entry) => match entry.get_mut() { + Value::Object(child) => insert_path(child, tail, value)?, + _ => { + anyhow::bail!( + "Cannot set nested key '{}' because '{}' is already a non-object value", + key_path_to_string(path), + head + ); + } + }, + } + Ok(()) +} + +pub fn remove_path(map: &mut Map, path: &[String]) -> Result { + if path.is_empty() { + anyhow::bail!("Key path cannot be empty"); + } + if path.len() == 1 { + return Ok(map.remove(&path[0]).is_some()); + } + let Some(value) = map.get_mut(&path[0]) else { + return Ok(false); + }; + let Value::Object(child) = value else { + return Ok(false); + }; + let removed = remove_path(child, &path[1..])?; + if child.is_empty() { + map.remove(&path[0]); + } + Ok(removed) +} + +pub fn flatten_json_fields(prefix: &str, value: &Value) -> Vec<(String, Value)> { + match value { + Value::Object(map) => { + let mut out = Vec::new(); + for (k, v) in map { + let full_key = if prefix.is_empty() { + k.clone() + } else { + format!("{}.{}", prefix, k) + }; + out.extend(flatten_json_fields(&full_key, v)); + } + out + } + other => vec![(prefix.to_string(), other.clone())], + } +} + +// ── AddResult ───────────────────────────────────────────────────────────────── + +#[derive(Debug, serde::Serialize)] +pub struct AddResult { + pub namespace: String, + pub kind: String, + pub name: String, + pub tags: Vec, + pub meta_keys: Vec, + pub secret_keys: Vec, +} + +pub struct AddParams<'a> { + pub namespace: &'a str, + pub kind: &'a str, + pub name: &'a str, + pub tags: &'a [String], + pub meta_entries: &'a [String], + pub secret_entries: &'a [String], + /// Optional user_id for multi-user isolation (None = single-user CLI mode) + pub user_id: Option, +} + +pub async fn run(pool: &PgPool, params: AddParams<'_>, master_key: &[u8; 32]) -> Result { + let metadata = build_json(params.meta_entries)?; + let secret_json = build_json(params.secret_entries)?; + let meta_keys = collect_key_paths(params.meta_entries)?; + let secret_keys = collect_key_paths(params.secret_entries)?; + + let mut tx = pool.begin().await?; + + // Fetch existing entry (user-scoped or global depending on user_id) + let existing: Option = if let Some(uid) = params.user_id { + sqlx::query_as( + "SELECT id, version, tags, metadata FROM entries \ + WHERE user_id = $1 AND namespace = $2 AND kind = $3 AND name = $4", + ) + .bind(uid) + .bind(params.namespace) + .bind(params.kind) + .bind(params.name) + .fetch_optional(&mut *tx) + .await? + } else { + sqlx::query_as( + "SELECT id, version, tags, metadata FROM entries \ + WHERE user_id IS NULL AND namespace = $1 AND kind = $2 AND name = $3", + ) + .bind(params.namespace) + .bind(params.kind) + .bind(params.name) + .fetch_optional(&mut *tx) + .await? + }; + + if let Some(ref ex) = existing + && let Err(e) = db::snapshot_entry_history( + &mut tx, + db::EntrySnapshotParams { + entry_id: ex.id, + namespace: params.namespace, + kind: params.kind, + name: params.name, + version: ex.version, + action: "add", + tags: &ex.tags, + metadata: &ex.metadata, + }, + ) + .await + { + tracing::warn!(error = %e, "failed to snapshot entry history before upsert"); + } + + let entry_id: Uuid = if let Some(uid) = params.user_id { + sqlx::query_scalar( + r#"INSERT INTO entries (user_id, namespace, kind, name, tags, metadata, version, updated_at) + VALUES ($1, $2, $3, $4, $5, $6, 1, NOW()) + ON CONFLICT (user_id, namespace, kind, name) WHERE user_id IS NOT NULL + DO UPDATE SET + tags = EXCLUDED.tags, + metadata = EXCLUDED.metadata, + version = entries.version + 1, + updated_at = NOW() + RETURNING id"#, + ) + .bind(uid) + .bind(params.namespace) + .bind(params.kind) + .bind(params.name) + .bind(params.tags) + .bind(&metadata) + .fetch_one(&mut *tx) + .await? + } else { + sqlx::query_scalar( + r#"INSERT INTO entries (namespace, kind, name, tags, metadata, version, updated_at) + VALUES ($1, $2, $3, $4, $5, 1, NOW()) + ON CONFLICT (namespace, kind, name) WHERE user_id IS NULL + DO UPDATE SET + tags = EXCLUDED.tags, + metadata = EXCLUDED.metadata, + version = entries.version + 1, + updated_at = NOW() + RETURNING id"#, + ) + .bind(params.namespace) + .bind(params.kind) + .bind(params.name) + .bind(params.tags) + .bind(&metadata) + .fetch_one(&mut *tx) + .await? + }; + + let new_entry_version: i64 = sqlx::query_scalar("SELECT version FROM entries WHERE id = $1") + .bind(entry_id) + .fetch_one(&mut *tx) + .await?; + + if existing.is_some() { + #[derive(sqlx::FromRow)] + struct ExistingField { + id: Uuid, + field_name: String, + encrypted: Vec, + } + let existing_fields: Vec = + sqlx::query_as("SELECT id, field_name, encrypted FROM secrets WHERE entry_id = $1") + .bind(entry_id) + .fetch_all(&mut *tx) + .await?; + + for f in &existing_fields { + if let Err(e) = db::snapshot_secret_history( + &mut tx, + db::SecretSnapshotParams { + entry_id, + secret_id: f.id, + entry_version: new_entry_version - 1, + field_name: &f.field_name, + encrypted: &f.encrypted, + action: "add", + }, + ) + .await + { + tracing::warn!(error = %e, "failed to snapshot secret field history"); + } + } + + sqlx::query("DELETE FROM secrets WHERE entry_id = $1") + .bind(entry_id) + .execute(&mut *tx) + .await?; + } + + let flat_fields = flatten_json_fields("", &secret_json); + for (field_name, field_value) in &flat_fields { + let encrypted = crypto::encrypt_json(master_key, field_value)?; + sqlx::query("INSERT INTO secrets (entry_id, field_name, encrypted) VALUES ($1, $2, $3)") + .bind(entry_id) + .bind(field_name) + .bind(&encrypted) + .execute(&mut *tx) + .await?; + } + + crate::audit::log_tx( + &mut tx, + "add", + params.namespace, + params.kind, + params.name, + serde_json::json!({ + "tags": params.tags, + "meta_keys": meta_keys, + "secret_keys": secret_keys, + }), + ) + .await; + + tx.commit().await?; + + Ok(AddResult { + namespace: params.namespace.to_string(), + kind: params.kind.to_string(), + name: params.name.to_string(), + tags: params.tags.to_vec(), + meta_keys, + secret_keys, + }) +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn parse_nested_file_shorthand() { + use std::io::Write; + let mut f = tempfile::NamedTempFile::new().unwrap(); + writeln!(f, "line1\nline2").unwrap(); + let path = f.path().to_str().unwrap().to_string(); + let entry = format!("credentials:content@{}", path); + let (path_parts, value) = parse_kv(&entry).unwrap(); + assert_eq!(key_path_to_string(&path_parts), "credentials:content"); + assert!(matches!(value, Value::String(_))); + } + + #[test] + fn flatten_json_fields_nested() { + let v = serde_json::json!({ + "username": "root", + "credentials": { + "type": "ssh", + "content": "pem" + } + }); + let mut fields = flatten_json_fields("", &v); + fields.sort_by(|a, b| a.0.cmp(&b.0)); + assert_eq!(fields[0].0, "credentials.content"); + assert_eq!(fields[1].0, "credentials.type"); + assert_eq!(fields[2].0, "username"); + } +} diff --git a/crates/secrets-core/src/service/api_key.rs b/crates/secrets-core/src/service/api_key.rs new file mode 100644 index 0000000..35e887f --- /dev/null +++ b/crates/secrets-core/src/service/api_key.rs @@ -0,0 +1,55 @@ +use anyhow::Result; +use sqlx::PgPool; +use uuid::Uuid; + +const KEY_PREFIX: &str = "sk_"; + +/// Generate a new API key: `sk_<64 hex chars>` = 67 characters total. +pub fn generate_api_key() -> String { + use rand::RngExt; + let mut bytes = [0u8; 32]; + rand::rng().fill(&mut bytes); + let hex: String = bytes.iter().map(|b| format!("{:02x}", b)).collect(); + format!("{}{}", KEY_PREFIX, hex) +} + +/// Return the user's existing API key, or generate and store a new one if NULL. +pub async fn ensure_api_key(pool: &PgPool, user_id: Uuid) -> Result { + let existing: Option<(Option,)> = + sqlx::query_as("SELECT api_key FROM users WHERE id = $1") + .bind(user_id) + .fetch_optional(pool) + .await?; + + if let Some((Some(key),)) = existing { + return Ok(key); + } + + let new_key = generate_api_key(); + sqlx::query("UPDATE users SET api_key = $1 WHERE id = $2") + .bind(&new_key) + .bind(user_id) + .execute(pool) + .await?; + Ok(new_key) +} + +/// Generate a fresh API key for the user, replacing the old one. +pub async fn regenerate_api_key(pool: &PgPool, user_id: Uuid) -> Result { + let new_key = generate_api_key(); + sqlx::query("UPDATE users SET api_key = $1 WHERE id = $2") + .bind(&new_key) + .bind(user_id) + .execute(pool) + .await?; + Ok(new_key) +} + +/// Validate a Bearer token. Returns the `user_id` if the key matches. +pub async fn validate_api_key(pool: &PgPool, raw_key: &str) -> Result> { + let row: Option<(Uuid,)> = sqlx::query_as("SELECT id FROM users WHERE api_key = $1") + .bind(raw_key) + .fetch_optional(pool) + .await?; + Ok(row.map(|(id,)| id)) +} diff --git a/crates/secrets-core/src/service/delete.rs b/crates/secrets-core/src/service/delete.rs new file mode 100644 index 0000000..9aa1d95 --- /dev/null +++ b/crates/secrets-core/src/service/delete.rs @@ -0,0 +1,268 @@ +use anyhow::Result; +use serde_json::json; +use sqlx::PgPool; +use uuid::Uuid; + +use crate::db; +use crate::models::{EntryRow, SecretFieldRow}; + +#[derive(Debug, serde::Serialize)] +pub struct DeletedEntry { + pub namespace: String, + pub kind: String, + pub name: String, +} + +#[derive(Debug, serde::Serialize)] +pub struct DeleteResult { + pub deleted: Vec, + pub dry_run: bool, +} + +pub struct DeleteParams<'a> { + pub namespace: &'a str, + pub kind: Option<&'a str>, + pub name: Option<&'a str>, + pub dry_run: bool, + pub user_id: Option, +} + +pub async fn run(pool: &PgPool, params: DeleteParams<'_>) -> Result { + match params.name { + Some(name) => { + let kind = params + .kind + .ok_or_else(|| anyhow::anyhow!("--kind is required when --name is specified"))?; + delete_one(pool, params.namespace, kind, name, params.user_id).await + } + None => { + delete_bulk( + pool, + params.namespace, + params.kind, + params.dry_run, + params.user_id, + ) + .await + } + } +} + +async fn delete_one( + pool: &PgPool, + namespace: &str, + kind: &str, + name: &str, + user_id: Option, +) -> Result { + let mut tx = pool.begin().await?; + + let row: Option = if let Some(uid) = user_id { + sqlx::query_as( + "SELECT id, version, tags, metadata FROM entries \ + WHERE user_id = $1 AND namespace = $2 AND kind = $3 AND name = $4 FOR UPDATE", + ) + .bind(uid) + .bind(namespace) + .bind(kind) + .bind(name) + .fetch_optional(&mut *tx) + .await? + } else { + sqlx::query_as( + "SELECT id, version, tags, metadata FROM entries \ + WHERE user_id IS NULL AND namespace = $1 AND kind = $2 AND name = $3 FOR UPDATE", + ) + .bind(namespace) + .bind(kind) + .bind(name) + .fetch_optional(&mut *tx) + .await? + }; + + let Some(row) = row else { + tx.rollback().await?; + return Ok(DeleteResult { + deleted: vec![], + dry_run: false, + }); + }; + + snapshot_and_delete(&mut tx, namespace, kind, name, &row).await?; + crate::audit::log_tx(&mut tx, "delete", namespace, kind, name, json!({})).await; + tx.commit().await?; + + Ok(DeleteResult { + deleted: vec![DeletedEntry { + namespace: namespace.to_string(), + kind: kind.to_string(), + name: name.to_string(), + }], + dry_run: false, + }) +} + +async fn delete_bulk( + pool: &PgPool, + namespace: &str, + kind: Option<&str>, + dry_run: bool, + user_id: Option, +) -> Result { + #[derive(Debug, sqlx::FromRow)] + struct FullEntryRow { + id: Uuid, + version: i64, + kind: String, + name: String, + metadata: serde_json::Value, + tags: Vec, + } + + let rows: Vec = match (user_id, kind) { + (Some(uid), Some(k)) => { + sqlx::query_as( + "SELECT id, version, kind, name, metadata, tags FROM entries \ + WHERE user_id = $1 AND namespace = $2 AND kind = $3 ORDER BY name", + ) + .bind(uid) + .bind(namespace) + .bind(k) + .fetch_all(pool) + .await? + } + (Some(uid), None) => { + sqlx::query_as( + "SELECT id, version, kind, name, metadata, tags FROM entries \ + WHERE user_id = $1 AND namespace = $2 ORDER BY kind, name", + ) + .bind(uid) + .bind(namespace) + .fetch_all(pool) + .await? + } + (None, Some(k)) => { + sqlx::query_as( + "SELECT id, version, kind, name, metadata, tags FROM entries \ + WHERE user_id IS NULL AND namespace = $1 AND kind = $2 ORDER BY name", + ) + .bind(namespace) + .bind(k) + .fetch_all(pool) + .await? + } + (None, None) => { + sqlx::query_as( + "SELECT id, version, kind, name, metadata, tags FROM entries \ + WHERE user_id IS NULL AND namespace = $1 ORDER BY kind, name", + ) + .bind(namespace) + .fetch_all(pool) + .await? + } + }; + + if dry_run { + let deleted = rows + .iter() + .map(|r| DeletedEntry { + namespace: namespace.to_string(), + kind: r.kind.clone(), + name: r.name.clone(), + }) + .collect(); + return Ok(DeleteResult { + deleted, + dry_run: true, + }); + } + + let mut deleted = Vec::with_capacity(rows.len()); + for row in &rows { + let entry_row = EntryRow { + id: row.id, + version: row.version, + tags: row.tags.clone(), + metadata: row.metadata.clone(), + }; + let mut tx = pool.begin().await?; + snapshot_and_delete(&mut tx, namespace, &row.kind, &row.name, &entry_row).await?; + crate::audit::log_tx( + &mut tx, + "delete", + namespace, + &row.kind, + &row.name, + json!({"bulk": true}), + ) + .await; + tx.commit().await?; + deleted.push(DeletedEntry { + namespace: namespace.to_string(), + kind: row.kind.clone(), + name: row.name.clone(), + }); + } + + Ok(DeleteResult { + deleted, + dry_run: false, + }) +} + +async fn snapshot_and_delete( + tx: &mut sqlx::Transaction<'_, sqlx::Postgres>, + namespace: &str, + kind: &str, + name: &str, + row: &EntryRow, +) -> Result<()> { + if let Err(e) = db::snapshot_entry_history( + tx, + db::EntrySnapshotParams { + entry_id: row.id, + namespace, + kind, + name, + version: row.version, + action: "delete", + tags: &row.tags, + metadata: &row.metadata, + }, + ) + .await + { + tracing::warn!(error = %e, "failed to snapshot entry history before delete"); + } + + let fields: Vec = + sqlx::query_as("SELECT id, field_name, encrypted FROM secrets WHERE entry_id = $1") + .bind(row.id) + .fetch_all(&mut **tx) + .await?; + + for f in &fields { + if let Err(e) = db::snapshot_secret_history( + tx, + db::SecretSnapshotParams { + entry_id: row.id, + secret_id: f.id, + entry_version: row.version, + field_name: &f.field_name, + encrypted: &f.encrypted, + action: "delete", + }, + ) + .await + { + tracing::warn!(error = %e, "failed to snapshot secret history before delete"); + } + } + + sqlx::query("DELETE FROM entries WHERE id = $1") + .bind(row.id) + .execute(&mut **tx) + .await?; + + Ok(()) +} diff --git a/crates/secrets-core/src/service/env_map.rs b/crates/secrets-core/src/service/env_map.rs new file mode 100644 index 0000000..264cd57 --- /dev/null +++ b/crates/secrets-core/src/service/env_map.rs @@ -0,0 +1,124 @@ +use anyhow::Result; +use serde_json::Value; +use sqlx::PgPool; +use std::collections::HashMap; +use uuid::Uuid; + +use crate::crypto; +use crate::models::Entry; +use crate::service::search::{fetch_entries, fetch_secrets_for_entries}; + +/// Build an env variable map from entry secrets (for dry-run preview or injection). +#[allow(clippy::too_many_arguments)] +pub async fn build_env_map( + pool: &PgPool, + namespace: Option<&str>, + kind: Option<&str>, + name: Option<&str>, + tags: &[String], + only_fields: &[String], + prefix: &str, + master_key: &[u8; 32], + user_id: Option, +) -> Result> { + let entries = fetch_entries(pool, namespace, kind, name, tags, None, user_id).await?; + + let mut combined: HashMap = HashMap::new(); + + for entry in &entries { + let entry_map = build_entry_env_map(pool, entry, only_fields, prefix, master_key).await?; + combined.extend(entry_map); + } + + Ok(combined) +} + +async fn build_entry_env_map( + pool: &PgPool, + entry: &Entry, + only_fields: &[String], + prefix: &str, + master_key: &[u8; 32], +) -> Result> { + let entry_ids = vec![entry.id]; + let secrets_map = fetch_secrets_for_entries(pool, &entry_ids).await?; + let all_fields = secrets_map.get(&entry.id).map(Vec::as_slice).unwrap_or(&[]); + + let fields: Vec<_> = if only_fields.is_empty() { + all_fields.iter().collect() + } else { + all_fields + .iter() + .filter(|f| only_fields.contains(&f.field_name)) + .collect() + }; + + let effective_prefix = env_prefix(entry, prefix); + let mut map = HashMap::new(); + + for f in fields { + let decrypted = crypto::decrypt_json(master_key, &f.encrypted)?; + let key = format!( + "{}_{}", + effective_prefix, + f.field_name.to_uppercase().replace(['-', '.'], "_") + ); + map.insert(key, json_to_env_string(&decrypted)); + } + + // Resolve key_ref + if let Some(key_ref) = entry.metadata.get("key_ref").and_then(|v| v.as_str()) { + let key_entries = fetch_entries( + pool, + Some(&entry.namespace), + Some("key"), + Some(key_ref), + &[], + None, + None, + ) + .await?; + + if let Some(key_entry) = key_entries.first() { + let key_ids = vec![key_entry.id]; + let key_fields_map = fetch_secrets_for_entries(pool, &key_ids).await?; + let empty = vec![]; + let key_fields = key_fields_map.get(&key_entry.id).unwrap_or(&empty); + let key_prefix = env_prefix(key_entry, prefix); + for f in key_fields { + let decrypted = crypto::decrypt_json(master_key, &f.encrypted)?; + let key_var = format!( + "{}_{}", + key_prefix, + f.field_name.to_uppercase().replace(['-', '.'], "_") + ); + map.insert(key_var, json_to_env_string(&decrypted)); + } + } else { + tracing::warn!(key_ref, "key_ref target not found"); + } + } + + Ok(map) +} + +fn env_prefix(entry: &Entry, prefix: &str) -> String { + let name_part = entry.name.to_uppercase().replace(['-', '.', ' '], "_"); + if prefix.is_empty() { + name_part + } else { + format!( + "{}_{}", + prefix.to_uppercase().replace(['-', '.', ' '], "_"), + name_part + ) + } +} + +fn json_to_env_string(v: &Value) -> String { + match v { + Value::String(s) => s.clone(), + Value::Null => String::new(), + other => other.to_string(), + } +} diff --git a/crates/secrets-core/src/service/export.rs b/crates/secrets-core/src/service/export.rs new file mode 100644 index 0000000..b7bc9eb --- /dev/null +++ b/crates/secrets-core/src/service/export.rs @@ -0,0 +1,139 @@ +use anyhow::Result; +use serde_json::Value; +use sqlx::PgPool; +use std::collections::{BTreeMap, HashMap}; +use uuid::Uuid; + +use crate::crypto; +use crate::models::{ExportData, ExportEntry, ExportFormat}; +use crate::service::search::{fetch_entries, fetch_secrets_for_entries}; + +pub struct ExportParams<'a> { + pub namespace: Option<&'a str>, + pub kind: Option<&'a str>, + pub name: Option<&'a str>, + pub tags: &'a [String], + pub query: Option<&'a str>, + pub no_secrets: bool, + pub user_id: Option, +} + +pub async fn export( + pool: &PgPool, + params: ExportParams<'_>, + master_key: Option<&[u8; 32]>, +) -> Result { + let entries = fetch_entries( + pool, + params.namespace, + params.kind, + params.name, + params.tags, + params.query, + params.user_id, + ) + .await?; + + let entry_ids: Vec = entries.iter().map(|e| e.id).collect(); + let secrets_map: HashMap> = if !params.no_secrets && !entry_ids.is_empty() { + fetch_secrets_for_entries(pool, &entry_ids).await? + } else { + HashMap::new() + }; + + let mut export_entries: Vec = Vec::with_capacity(entries.len()); + for entry in &entries { + let secrets = if params.no_secrets { + None + } else { + let fields = secrets_map.get(&entry.id).map(Vec::as_slice).unwrap_or(&[]); + if fields.is_empty() { + Some(BTreeMap::new()) + } else { + let mk = master_key + .ok_or_else(|| anyhow::anyhow!("master key required to decrypt secrets"))?; + let mut map = BTreeMap::new(); + for f in fields { + let decrypted = crypto::decrypt_json(mk, &f.encrypted)?; + map.insert(f.field_name.clone(), decrypted); + } + Some(map) + } + }; + + export_entries.push(ExportEntry { + namespace: entry.namespace.clone(), + kind: entry.kind.clone(), + name: entry.name.clone(), + tags: entry.tags.clone(), + metadata: entry.metadata.clone(), + secrets, + }); + } + + Ok(ExportData { + version: 1, + exported_at: chrono::Utc::now().format("%Y-%m-%dT%H:%M:%SZ").to_string(), + entries: export_entries, + }) +} + +pub async fn export_to_file( + pool: &PgPool, + params: ExportParams<'_>, + master_key: Option<&[u8; 32]>, + file_path: &str, + format_override: Option<&str>, +) -> Result { + let format = if let Some(f) = format_override { + f.parse::()? + } else { + ExportFormat::from_extension(file_path).unwrap_or(ExportFormat::Json) + }; + + let data = export(pool, params, master_key).await?; + let count = data.entries.len(); + let serialized = format.serialize(&data)?; + std::fs::write(file_path, &serialized)?; + Ok(count) +} + +pub async fn export_to_string( + pool: &PgPool, + params: ExportParams<'_>, + master_key: Option<&[u8; 32]>, + format: &str, +) -> Result { + let fmt = format.parse::()?; + let data = export(pool, params, master_key).await?; + fmt.serialize(&data) +} + +// ── Build helpers for re-encoding values as CLI-style entries ───────────────── + +pub fn build_meta_entries(metadata: &Value) -> Vec { + let mut entries = Vec::new(); + if let Some(obj) = metadata.as_object() { + for (k, v) in obj { + entries.push(value_to_kv_entry(k, v)); + } + } + entries +} + +pub fn build_secret_entries(secrets: Option<&BTreeMap>) -> Vec { + let mut entries = Vec::new(); + if let Some(map) = secrets { + for (k, v) in map { + entries.push(value_to_kv_entry(k, v)); + } + } + entries +} + +pub fn value_to_kv_entry(key: &str, value: &Value) -> String { + match value { + Value::String(s) => format!("{}={}", key, s), + other => format!("{}:={}", key, other), + } +} diff --git a/crates/secrets-core/src/service/get_secret.rs b/crates/secrets-core/src/service/get_secret.rs new file mode 100644 index 0000000..7ddec64 --- /dev/null +++ b/crates/secrets-core/src/service/get_secret.rs @@ -0,0 +1,79 @@ +use anyhow::Result; +use serde_json::Value; +use sqlx::PgPool; +use std::collections::HashMap; +use uuid::Uuid; + +use crate::crypto; +use crate::service::search::{fetch_entries, fetch_secrets_for_entries}; + +/// Decrypt a single named field from an entry. +pub async fn get_secret_field( + pool: &PgPool, + namespace: &str, + kind: &str, + name: &str, + field_name: &str, + master_key: &[u8; 32], + user_id: Option, +) -> Result { + let entries = fetch_entries( + pool, + Some(namespace), + Some(kind), + Some(name), + &[], + None, + user_id, + ) + .await?; + let entry = entries + .first() + .ok_or_else(|| anyhow::anyhow!("Not found: [{}/{}] {}", namespace, kind, name))?; + + let entry_ids = vec![entry.id]; + let secrets_map = fetch_secrets_for_entries(pool, &entry_ids).await?; + let fields = secrets_map.get(&entry.id).map(Vec::as_slice).unwrap_or(&[]); + + let field = fields + .iter() + .find(|f| f.field_name == field_name) + .ok_or_else(|| anyhow::anyhow!("Secret field '{}' not found", field_name))?; + + crypto::decrypt_json(master_key, &field.encrypted) +} + +/// Decrypt all secret fields from an entry. Returns a map field_name → decrypted Value. +pub async fn get_all_secrets( + pool: &PgPool, + namespace: &str, + kind: &str, + name: &str, + master_key: &[u8; 32], + user_id: Option, +) -> Result> { + let entries = fetch_entries( + pool, + Some(namespace), + Some(kind), + Some(name), + &[], + None, + user_id, + ) + .await?; + let entry = entries + .first() + .ok_or_else(|| anyhow::anyhow!("Not found: [{}/{}] {}", namespace, kind, name))?; + + let entry_ids = vec![entry.id]; + let secrets_map = fetch_secrets_for_entries(pool, &entry_ids).await?; + let fields = secrets_map.get(&entry.id).map(Vec::as_slice).unwrap_or(&[]); + + let mut map = HashMap::new(); + for f in fields { + let decrypted = crypto::decrypt_json(master_key, &f.encrypted)?; + map.insert(f.field_name.clone(), decrypted); + } + Ok(map) +} diff --git a/crates/secrets-core/src/service/history.rs b/crates/secrets-core/src/service/history.rs new file mode 100644 index 0000000..5908aa1 --- /dev/null +++ b/crates/secrets-core/src/service/history.rs @@ -0,0 +1,63 @@ +use anyhow::Result; +use serde_json::Value; +use sqlx::PgPool; +use uuid::Uuid; + +#[derive(Debug, serde::Serialize)] +pub struct HistoryEntry { + pub version: i64, + pub action: String, + pub actor: String, + pub created_at: String, +} + +pub async fn run( + pool: &PgPool, + namespace: &str, + kind: &str, + name: &str, + limit: u32, + _user_id: Option, +) -> Result> { + #[derive(sqlx::FromRow)] + struct Row { + version: i64, + action: String, + actor: String, + created_at: chrono::DateTime, + } + + let rows: Vec = sqlx::query_as( + "SELECT version, action, actor, created_at FROM entries_history \ + WHERE namespace = $1 AND kind = $2 AND name = $3 \ + ORDER BY id DESC LIMIT $4", + ) + .bind(namespace) + .bind(kind) + .bind(name) + .bind(limit as i64) + .fetch_all(pool) + .await?; + + Ok(rows + .into_iter() + .map(|r| HistoryEntry { + version: r.version, + action: r.action, + actor: r.actor, + created_at: r.created_at.format("%Y-%m-%dT%H:%M:%SZ").to_string(), + }) + .collect()) +} + +pub async fn run_json( + pool: &PgPool, + namespace: &str, + kind: &str, + name: &str, + limit: u32, + user_id: Option, +) -> Result { + let entries = run(pool, namespace, kind, name, limit, user_id).await?; + Ok(serde_json::to_value(entries)?) +} diff --git a/crates/secrets-core/src/service/import.rs b/crates/secrets-core/src/service/import.rs new file mode 100644 index 0000000..dbb890d --- /dev/null +++ b/crates/secrets-core/src/service/import.rs @@ -0,0 +1,123 @@ +use anyhow::Result; +use sqlx::PgPool; +use uuid::Uuid; + +use crate::models::ExportFormat; +use crate::service::add::{AddParams, run as add_run}; +use crate::service::export::{build_meta_entries, build_secret_entries}; + +#[derive(Debug, serde::Serialize)] +pub struct ImportSummary { + pub total: usize, + pub inserted: usize, + pub skipped: usize, + pub failed: usize, + pub dry_run: bool, +} + +pub struct ImportParams<'a> { + pub file: &'a str, + pub force: bool, + pub dry_run: bool, + pub user_id: Option, +} + +pub async fn run( + pool: &PgPool, + params: ImportParams<'_>, + master_key: &[u8; 32], +) -> Result { + let format = ExportFormat::from_extension(params.file)?; + let content = std::fs::read_to_string(params.file) + .map_err(|e| anyhow::anyhow!("Cannot read file '{}': {}", params.file, e))?; + let data = format.deserialize(&content)?; + + if data.version != 1 { + anyhow::bail!( + "Unsupported export version {}. Only version 1 is supported.", + data.version + ); + } + + let total = data.entries.len(); + let mut inserted = 0usize; + let mut skipped = 0usize; + let mut failed = 0usize; + + for entry in &data.entries { + let exists: bool = sqlx::query_scalar( + "SELECT EXISTS(SELECT 1 FROM entries \ + WHERE namespace = $1 AND kind = $2 AND name = $3 AND user_id IS NOT DISTINCT FROM $4)", + ) + .bind(&entry.namespace) + .bind(&entry.kind) + .bind(&entry.name) + .bind(params.user_id) + .fetch_one(pool) + .await + .unwrap_or(false); + + if exists && !params.force { + return Err(anyhow::anyhow!( + "Import aborted: conflict on [{}/{}/{}]", + entry.namespace, + entry.kind, + entry.name + )); + } + + if params.dry_run { + if exists { + skipped += 1; + } else { + inserted += 1; + } + continue; + } + + let secret_entries = build_secret_entries(entry.secrets.as_ref()); + let meta_entries = build_meta_entries(&entry.metadata); + + match add_run( + pool, + AddParams { + namespace: &entry.namespace, + kind: &entry.kind, + name: &entry.name, + tags: &entry.tags, + meta_entries: &meta_entries, + secret_entries: &secret_entries, + user_id: params.user_id, + }, + master_key, + ) + .await + { + Ok(_) => { + inserted += 1; + } + Err(e) => { + tracing::error!( + namespace = entry.namespace, + kind = entry.kind, + name = entry.name, + error = %e, + "failed to import entry" + ); + failed += 1; + } + } + } + + if failed > 0 { + return Err(anyhow::anyhow!("{} record(s) failed to import", failed)); + } + + Ok(ImportSummary { + total, + inserted, + skipped, + failed, + dry_run: params.dry_run, + }) +} diff --git a/crates/secrets-core/src/service/mod.rs b/crates/secrets-core/src/service/mod.rs new file mode 100644 index 0000000..5ef4e9e --- /dev/null +++ b/crates/secrets-core/src/service/mod.rs @@ -0,0 +1,12 @@ +pub mod add; +pub mod api_key; +pub mod delete; +pub mod env_map; +pub mod export; +pub mod get_secret; +pub mod history; +pub mod import; +pub mod rollback; +pub mod search; +pub mod update; +pub mod user; diff --git a/src/commands/rollback.rs b/crates/secrets-core/src/service/rollback.rs similarity index 51% rename from src/commands/rollback.rs rename to crates/secrets-core/src/service/rollback.rs index a226d9c..1964673 100644 --- a/src/commands/rollback.rs +++ b/crates/secrets-core/src/service/rollback.rs @@ -1,24 +1,29 @@ use anyhow::Result; -use serde_json::{Value, json}; -use sqlx::{FromRow, PgPool}; +use serde_json::Value; +use sqlx::PgPool; use uuid::Uuid; use crate::crypto; use crate::db; -use crate::output::{OutputMode, print_json}; -pub struct RollbackArgs<'a> { - pub namespace: &'a str, - pub kind: &'a str, - pub name: &'a str, - /// Target entry version to restore. None → restore the most recent history entry. - pub to_version: Option, - pub output: OutputMode, +#[derive(Debug, serde::Serialize)] +pub struct RollbackResult { + pub namespace: String, + pub kind: String, + pub name: String, + pub restored_version: i64, } -pub async fn run(pool: &PgPool, args: RollbackArgs<'_>, master_key: &[u8; 32]) -> Result<()> { - // ── Find the target entry history snapshot ──────────────────────────────── - #[derive(FromRow)] +pub async fn run( + pool: &PgPool, + namespace: &str, + kind: &str, + name: &str, + to_version: Option, + master_key: &[u8; 32], + _user_id: Option, +) -> Result { + #[derive(sqlx::FromRow)] struct EntryHistoryRow { entry_id: Uuid, version: i64, @@ -27,29 +32,26 @@ pub async fn run(pool: &PgPool, args: RollbackArgs<'_>, master_key: &[u8; 32]) - metadata: Value, } - let snap: Option = if let Some(ver) = args.to_version { + let snap: Option = if let Some(ver) = to_version { sqlx::query_as( - "SELECT entry_id, version, action, tags, metadata \ - FROM entries_history \ + "SELECT entry_id, version, action, tags, metadata FROM entries_history \ WHERE namespace = $1 AND kind = $2 AND name = $3 AND version = $4 \ ORDER BY id DESC LIMIT 1", ) - .bind(args.namespace) - .bind(args.kind) - .bind(args.name) + .bind(namespace) + .bind(kind) + .bind(name) .bind(ver) .fetch_optional(pool) .await? } else { sqlx::query_as( - "SELECT entry_id, version, action, tags, metadata \ - FROM entries_history \ - WHERE namespace = $1 AND kind = $2 AND name = $3 \ - ORDER BY id DESC LIMIT 1", + "SELECT entry_id, version, action, tags, metadata FROM entries_history \ + WHERE namespace = $1 AND kind = $2 AND name = $3 ORDER BY id DESC LIMIT 1", ) - .bind(args.namespace) - .bind(args.kind) - .bind(args.name) + .bind(namespace) + .bind(kind) + .bind(name) .fetch_optional(pool) .await? }; @@ -57,17 +59,16 @@ pub async fn run(pool: &PgPool, args: RollbackArgs<'_>, master_key: &[u8; 32]) - let snap = snap.ok_or_else(|| { anyhow::anyhow!( "No history found for [{}/{}] {}{}.", - args.namespace, - args.kind, - args.name, - args.to_version + namespace, + kind, + name, + to_version .map(|v| format!(" at version {}", v)) .unwrap_or_default() ) })?; - // ── Find the matching secret field snapshots ────────────────────────────── - #[derive(FromRow)] + #[derive(sqlx::FromRow)] struct SecretHistoryRow { secret_id: Uuid, field_name: String, @@ -76,17 +77,14 @@ pub async fn run(pool: &PgPool, args: RollbackArgs<'_>, master_key: &[u8; 32]) - } let field_snaps: Vec = sqlx::query_as( - "SELECT secret_id, field_name, encrypted, action \ - FROM secrets_history \ - WHERE entry_id = $1 AND entry_version = $2 \ - ORDER BY field_name", + "SELECT secret_id, field_name, encrypted, action FROM secrets_history \ + WHERE entry_id = $1 AND entry_version = $2 ORDER BY field_name", ) .bind(snap.entry_id) .bind(snap.version) .fetch_all(pool) .await?; - // Validate: try decrypting all encrypted fields before writing anything. for f in &field_snaps { if f.action != "delete" && !f.encrypted.is_empty() { crypto::decrypt_json(master_key, &f.encrypted).map_err(|e| { @@ -101,7 +99,6 @@ pub async fn run(pool: &PgPool, args: RollbackArgs<'_>, master_key: &[u8; 32]) - let mut tx = pool.begin().await?; - // ── Snapshot the current live state before overwriting ──────────────────── #[derive(sqlx::FromRow)] struct LiveEntry { id: Uuid, @@ -113,9 +110,9 @@ pub async fn run(pool: &PgPool, args: RollbackArgs<'_>, master_key: &[u8; 32]) - "SELECT id, version, tags, metadata FROM entries \ WHERE namespace = $1 AND kind = $2 AND name = $3 FOR UPDATE", ) - .bind(args.namespace) - .bind(args.kind) - .bind(args.name) + .bind(namespace) + .bind(kind) + .bind(name) .fetch_optional(&mut *tx) .await?; @@ -124,9 +121,9 @@ pub async fn run(pool: &PgPool, args: RollbackArgs<'_>, master_key: &[u8; 32]) - &mut tx, db::EntrySnapshotParams { entry_id: lr.id, - namespace: args.namespace, - kind: args.kind, - name: args.name, + namespace, + kind, + name, version: lr.version, action: "rollback", tags: &lr.tags, @@ -138,20 +135,17 @@ pub async fn run(pool: &PgPool, args: RollbackArgs<'_>, master_key: &[u8; 32]) - tracing::warn!(error = %e, "failed to snapshot entry before rollback"); } - // Snapshot existing secret fields. #[derive(sqlx::FromRow)] struct LiveField { id: Uuid, field_name: String, encrypted: Vec, } - let live_fields: Vec = sqlx::query_as( - "SELECT id, field_name, encrypted \ - FROM secrets WHERE entry_id = $1", - ) - .bind(lr.id) - .fetch_all(&mut *tx) - .await?; + let live_fields: Vec = + sqlx::query_as("SELECT id, field_name, encrypted FROM secrets WHERE entry_id = $1") + .bind(lr.id) + .fetch_all(&mut *tx) + .await?; for f in &live_fields { if let Err(e) = db::snapshot_secret_history( @@ -172,29 +166,23 @@ pub async fn run(pool: &PgPool, args: RollbackArgs<'_>, master_key: &[u8; 32]) - } } - // ── Restore entry row ───────────────────────────────────────────────────── sqlx::query( "INSERT INTO entries (id, namespace, kind, name, tags, metadata, version, updated_at) \ VALUES ($1, $2, $3, $4, $5, $6, $7, NOW()) \ - ON CONFLICT (namespace, kind, name) DO UPDATE SET \ - tags = EXCLUDED.tags, \ - metadata = EXCLUDED.metadata, \ - version = entries.version + 1, \ - updated_at = NOW()", + ON CONFLICT (namespace, kind, name) WHERE user_id IS NULL DO UPDATE SET \ + tags = EXCLUDED.tags, metadata = EXCLUDED.metadata, \ + version = entries.version + 1, updated_at = NOW()", ) .bind(snap.entry_id) - .bind(args.namespace) - .bind(args.kind) - .bind(args.name) + .bind(namespace) + .bind(kind) + .bind(name) .bind(&snap.tags) .bind(&snap.metadata) .bind(snap.version) .execute(&mut *tx) .await?; - // ── Restore secret fields ───────────────────────────────────────────────── - // Delete all current fields and re-insert from snapshot - // (only non-deleted fields from the snapshot are restored). sqlx::query("DELETE FROM secrets WHERE entry_id = $1") .bind(snap.entry_id) .execute(&mut *tx) @@ -202,16 +190,12 @@ pub async fn run(pool: &PgPool, args: RollbackArgs<'_>, master_key: &[u8; 32]) - for f in &field_snaps { if f.action == "delete" { - // Field was deleted at this snapshot point — don't restore it. continue; } sqlx::query( - "INSERT INTO secrets (id, entry_id, field_name, encrypted) \ - VALUES ($1, $2, $3, $4) \ + "INSERT INTO secrets (id, entry_id, field_name, encrypted) VALUES ($1, $2, $3, $4) \ ON CONFLICT (entry_id, field_name) DO UPDATE SET \ - encrypted = EXCLUDED.encrypted, \ - version = secrets.version + 1, \ - updated_at = NOW()", + encrypted = EXCLUDED.encrypted, version = secrets.version + 1, updated_at = NOW()", ) .bind(f.secret_id) .bind(snap.entry_id) @@ -224,10 +208,10 @@ pub async fn run(pool: &PgPool, args: RollbackArgs<'_>, master_key: &[u8; 32]) - crate::audit::log_tx( &mut tx, "rollback", - args.namespace, - args.kind, - args.name, - json!({ + namespace, + kind, + name, + serde_json::json!({ "restored_version": snap.version, "original_action": snap.action, }), @@ -236,21 +220,10 @@ pub async fn run(pool: &PgPool, args: RollbackArgs<'_>, master_key: &[u8; 32]) - tx.commit().await?; - let result_json = json!({ - "action": "rolled_back", - "namespace": args.namespace, - "kind": args.kind, - "name": args.name, - "restored_version": snap.version, - }); - - match args.output { - OutputMode::Text => println!( - "Rolled back: [{}/{}] {} → version {}", - args.namespace, args.kind, args.name, snap.version - ), - ref mode => print_json(&result_json, mode)?, - } - - Ok(()) + Ok(RollbackResult { + namespace: namespace.to_string(), + kind: kind.to_string(), + name: name.to_string(), + restored_version: snap.version, + }) } diff --git a/crates/secrets-core/src/service/search.rs b/crates/secrets-core/src/service/search.rs new file mode 100644 index 0000000..a747068 --- /dev/null +++ b/crates/secrets-core/src/service/search.rs @@ -0,0 +1,241 @@ +use anyhow::Result; +use serde_json::Value; +use sqlx::PgPool; +use std::collections::HashMap; +use uuid::Uuid; + +use crate::models::{Entry, SecretField}; + +pub const FETCH_ALL_LIMIT: u32 = 100_000; + +pub struct SearchParams<'a> { + pub namespace: Option<&'a str>, + pub kind: Option<&'a str>, + pub name: Option<&'a str>, + pub tags: &'a [String], + pub query: Option<&'a str>, + pub sort: &'a str, + pub limit: u32, + pub offset: u32, + /// Multi-user: filter by this user_id. None = single-user / no filter. + pub user_id: Option, +} + +#[derive(Debug, serde::Serialize)] +pub struct SearchResult { + pub entries: Vec, + pub secret_schemas: HashMap>, +} + +pub async fn run(pool: &PgPool, params: SearchParams<'_>) -> Result { + let entries = fetch_entries_paged(pool, ¶ms).await?; + let entry_ids: Vec = entries.iter().map(|e| e.id).collect(); + let secret_schemas = if !entry_ids.is_empty() { + fetch_secret_schemas(pool, &entry_ids).await? + } else { + HashMap::new() + }; + Ok(SearchResult { + entries, + secret_schemas, + }) +} + +/// Fetch entries matching the given filters — returns all matching entries up to FETCH_ALL_LIMIT. +pub async fn fetch_entries( + pool: &PgPool, + namespace: Option<&str>, + kind: Option<&str>, + name: Option<&str>, + tags: &[String], + query: Option<&str>, + user_id: Option, +) -> Result> { + let params = SearchParams { + namespace, + kind, + name, + tags, + query, + sort: "name", + limit: FETCH_ALL_LIMIT, + offset: 0, + user_id, + }; + fetch_entries_paged(pool, ¶ms).await +} + +async fn fetch_entries_paged(pool: &PgPool, a: &SearchParams<'_>) -> Result> { + let mut conditions: Vec = Vec::new(); + let mut idx: i32 = 1; + + // user_id filtering — always comes first when present + if a.user_id.is_some() { + conditions.push(format!("user_id = ${}", idx)); + idx += 1; + } else { + conditions.push("user_id IS NULL".to_string()); + } + + if a.namespace.is_some() { + conditions.push(format!("namespace = ${}", idx)); + idx += 1; + } + if a.kind.is_some() { + conditions.push(format!("kind = ${}", idx)); + idx += 1; + } + if a.name.is_some() { + conditions.push(format!("name = ${}", idx)); + idx += 1; + } + if !a.tags.is_empty() { + let placeholders: Vec = a + .tags + .iter() + .map(|_| { + let p = format!("${}", idx); + idx += 1; + p + }) + .collect(); + conditions.push(format!( + "tags @> ARRAY[{}]::text[]", + placeholders.join(", ") + )); + } + if a.query.is_some() { + conditions.push(format!( + "(name ILIKE ${i} ESCAPE '\\' OR namespace ILIKE ${i} ESCAPE '\\' \ + OR kind ILIKE ${i} ESCAPE '\\' OR metadata::text ILIKE ${i} ESCAPE '\\' \ + OR EXISTS (SELECT 1 FROM unnest(tags) t WHERE t ILIKE ${i} ESCAPE '\\'))", + i = idx + )); + idx += 1; + } + + let order = match a.sort { + "updated" => "updated_at DESC", + "created" => "created_at DESC", + _ => "name ASC", + }; + + let limit_idx = idx; + idx += 1; + let offset_idx = idx; + + let where_clause = if conditions.is_empty() { + String::new() + } else { + format!("WHERE {}", conditions.join(" AND ")) + }; + + let sql = format!( + "SELECT id, COALESCE(user_id, '00000000-0000-0000-0000-000000000000'::uuid) AS user_id, \ + namespace, kind, name, tags, metadata, version, created_at, updated_at \ + FROM entries {where_clause} ORDER BY {order} LIMIT ${limit_idx} OFFSET ${offset_idx}" + ); + + let mut q = sqlx::query_as::<_, EntryRaw>(&sql); + + if let Some(uid) = a.user_id { + q = q.bind(uid); + } + if let Some(v) = a.namespace { + q = q.bind(v); + } + if let Some(v) = a.kind { + q = q.bind(v); + } + if let Some(v) = a.name { + q = q.bind(v); + } + for tag in a.tags { + q = q.bind(tag); + } + if let Some(v) = a.query { + let pattern = format!("%{}%", v.replace('%', "\\%").replace('_', "\\_")); + q = q.bind(pattern); + } + q = q.bind(a.limit as i64).bind(a.offset as i64); + + let rows = q.fetch_all(pool).await?; + Ok(rows.into_iter().map(Entry::from).collect()) +} + +/// Fetch secret field names for a set of entry ids (no decryption). +pub async fn fetch_secret_schemas( + pool: &PgPool, + entry_ids: &[Uuid], +) -> Result>> { + if entry_ids.is_empty() { + return Ok(HashMap::new()); + } + let fields: Vec = sqlx::query_as( + "SELECT * FROM secrets WHERE entry_id = ANY($1) ORDER BY entry_id, field_name", + ) + .bind(entry_ids) + .fetch_all(pool) + .await?; + + let mut map: HashMap> = HashMap::new(); + for f in fields { + map.entry(f.entry_id).or_default().push(f); + } + Ok(map) +} + +/// Fetch all secret fields (including encrypted bytes) for a set of entry ids. +pub async fn fetch_secrets_for_entries( + pool: &PgPool, + entry_ids: &[Uuid], +) -> Result>> { + if entry_ids.is_empty() { + return Ok(HashMap::new()); + } + let fields: Vec = sqlx::query_as( + "SELECT * FROM secrets WHERE entry_id = ANY($1) ORDER BY entry_id, field_name", + ) + .bind(entry_ids) + .fetch_all(pool) + .await?; + + let mut map: HashMap> = HashMap::new(); + for f in fields { + map.entry(f.entry_id).or_default().push(f); + } + Ok(map) +} + +// ── Internal raw row (because user_id is nullable in DB) ───────────────────── + +#[derive(sqlx::FromRow)] +struct EntryRaw { + id: Uuid, + #[allow(dead_code)] // Selected for row shape; Entry model has no user_id field + user_id: Uuid, + namespace: String, + kind: String, + name: String, + tags: Vec, + metadata: Value, + version: i64, + created_at: chrono::DateTime, + updated_at: chrono::DateTime, +} + +impl From for Entry { + fn from(r: EntryRaw) -> Self { + Entry { + id: r.id, + namespace: r.namespace, + kind: r.kind, + name: r.name, + tags: r.tags, + metadata: r.metadata, + version: r.version, + created_at: r.created_at, + updated_at: r.updated_at, + } + } +} diff --git a/src/commands/update.rs b/crates/secrets-core/src/service/update.rs similarity index 50% rename from src/commands/update.rs rename to crates/secrets-core/src/service/update.rs index 851d86e..7dd0df3 100644 --- a/src/commands/update.rs +++ b/crates/secrets-core/src/service/update.rs @@ -1,18 +1,30 @@ use anyhow::Result; -use serde_json::{Map, Value, json}; +use serde_json::{Map, Value}; use sqlx::PgPool; use uuid::Uuid; -use super::add::{ - collect_field_paths, collect_key_paths, flatten_json_fields, insert_path, parse_key_path, - parse_kv, remove_path, -}; use crate::crypto; use crate::db; use crate::models::EntryRow; -use crate::output::{OutputMode, print_json}; +use crate::service::add::{ + collect_field_paths, collect_key_paths, flatten_json_fields, insert_path, parse_key_path, + parse_kv, remove_path, +}; -pub struct UpdateArgs<'a> { +#[derive(Debug, serde::Serialize)] +pub struct UpdateResult { + pub namespace: String, + pub kind: String, + pub name: String, + pub add_tags: Vec, + pub remove_tags: Vec, + pub meta_keys: Vec, + pub remove_meta: Vec, + pub secret_keys: Vec, + pub remove_secrets: Vec, +} + +pub struct UpdateParams<'a> { pub namespace: &'a str, pub kind: &'a str, pub name: &'a str, @@ -22,41 +34,55 @@ pub struct UpdateArgs<'a> { pub remove_meta: &'a [String], pub secret_entries: &'a [String], pub remove_secrets: &'a [String], - pub output: OutputMode, + pub user_id: Option, } -pub async fn run(pool: &PgPool, args: UpdateArgs<'_>, master_key: &[u8; 32]) -> Result<()> { +pub async fn run( + pool: &PgPool, + params: UpdateParams<'_>, + master_key: &[u8; 32], +) -> Result { let mut tx = pool.begin().await?; - let row: Option = sqlx::query_as( - "SELECT id, version, tags, metadata \ - FROM entries \ - WHERE namespace = $1 AND kind = $2 AND name = $3 \ - FOR UPDATE", - ) - .bind(args.namespace) - .bind(args.kind) - .bind(args.name) - .fetch_optional(&mut *tx) - .await?; + let row: Option = if let Some(uid) = params.user_id { + sqlx::query_as( + "SELECT id, version, tags, metadata FROM entries \ + WHERE user_id = $1 AND namespace = $2 AND kind = $3 AND name = $4 FOR UPDATE", + ) + .bind(uid) + .bind(params.namespace) + .bind(params.kind) + .bind(params.name) + .fetch_optional(&mut *tx) + .await? + } else { + sqlx::query_as( + "SELECT id, version, tags, metadata FROM entries \ + WHERE user_id IS NULL AND namespace = $1 AND kind = $2 AND name = $3 FOR UPDATE", + ) + .bind(params.namespace) + .bind(params.kind) + .bind(params.name) + .fetch_optional(&mut *tx) + .await? + }; let row = row.ok_or_else(|| { anyhow::anyhow!( "Not found: [{}/{}] {}. Use `add` to create it first.", - args.namespace, - args.kind, - args.name + params.namespace, + params.kind, + params.name ) })?; - // Snapshot current entry state before modifying. if let Err(e) = db::snapshot_entry_history( &mut tx, db::EntrySnapshotParams { entry_id: row.id, - namespace: args.namespace, - kind: args.kind, - name: args.name, + namespace: params.namespace, + kind: params.kind, + name: params.name, version: row.version, action: "update", tags: &row.tags, @@ -68,34 +94,30 @@ pub async fn run(pool: &PgPool, args: UpdateArgs<'_>, master_key: &[u8; 32]) -> tracing::warn!(error = %e, "failed to snapshot entry history before update"); } - // ── Merge tags ──────────────────────────────────────────────────────────── - let mut tags: Vec = row.tags; - for t in args.add_tags { + let mut tags: Vec = row.tags.clone(); + for t in params.add_tags { if !tags.contains(t) { tags.push(t.clone()); } } - tags.retain(|t| !args.remove_tags.contains(t)); + tags.retain(|t| !params.remove_tags.contains(t)); - // ── Merge metadata ──────────────────────────────────────────────────────── - let mut meta_map: Map = match row.metadata { + let mut meta_map: Map = match row.metadata.clone() { Value::Object(m) => m, _ => Map::new(), }; - for entry in args.meta_entries { + for entry in params.meta_entries { let (path, value) = parse_kv(entry)?; insert_path(&mut meta_map, &path, value)?; } - for key in args.remove_meta { + for key in params.remove_meta { let path = parse_key_path(key)?; remove_path(&mut meta_map, &path)?; } let metadata = Value::Object(meta_map); - // CAS update of the entry row. let result = sqlx::query( - "UPDATE entries \ - SET tags = $1, metadata = $2, version = version + 1, updated_at = NOW() \ + "UPDATE entries SET tags = $1, metadata = $2, version = version + 1, updated_at = NOW() \ WHERE id = $3 AND version = $4", ) .bind(&tags) @@ -109,20 +131,16 @@ pub async fn run(pool: &PgPool, args: UpdateArgs<'_>, master_key: &[u8; 32]) -> tx.rollback().await?; anyhow::bail!( "Concurrent modification detected for [{}/{}] {}. Please retry.", - args.namespace, - args.kind, - args.name + params.namespace, + params.kind, + params.name ); } let new_version = row.version + 1; - // ── Update secret fields ────────────────────────────────────────────────── - for entry in args.secret_entries { + for entry in params.secret_entries { let (path, field_value) = parse_kv(entry)?; - - // For nested paths (e.g. credentials:type), flatten into dot-separated names - // and treat the sub-value as the individual field to store. let flat = flatten_json_fields("", &{ let mut m = Map::new(); insert_path(&mut m, &path, field_value)?; @@ -132,22 +150,20 @@ pub async fn run(pool: &PgPool, args: UpdateArgs<'_>, master_key: &[u8; 32]) -> for (field_name, fv) in &flat { let encrypted = crypto::encrypt_json(master_key, fv)?; - // Snapshot existing field before replacing. #[derive(sqlx::FromRow)] struct ExistingField { id: Uuid, encrypted: Vec, } - let existing_field: Option = sqlx::query_as( - "SELECT id, encrypted \ - FROM secrets WHERE entry_id = $1 AND field_name = $2", + let ef: Option = sqlx::query_as( + "SELECT id, encrypted FROM secrets WHERE entry_id = $1 AND field_name = $2", ) .bind(row.id) .bind(field_name) .fetch_optional(&mut *tx) .await?; - if let Some(ef) = &existing_field + if let Some(ef) = &ef && let Err(e) = db::snapshot_secret_history( &mut tx, db::SecretSnapshotParams { @@ -165,12 +181,9 @@ pub async fn run(pool: &PgPool, args: UpdateArgs<'_>, master_key: &[u8; 32]) -> } sqlx::query( - "INSERT INTO secrets (entry_id, field_name, encrypted) \ - VALUES ($1, $2, $3) \ + "INSERT INTO secrets (entry_id, field_name, encrypted) VALUES ($1, $2, $3) \ ON CONFLICT (entry_id, field_name) DO UPDATE SET \ - encrypted = EXCLUDED.encrypted, \ - version = secrets.version + 1, \ - updated_at = NOW()", + encrypted = EXCLUDED.encrypted, version = secrets.version + 1, updated_at = NOW()", ) .bind(row.id) .bind(field_name) @@ -180,21 +193,17 @@ pub async fn run(pool: &PgPool, args: UpdateArgs<'_>, master_key: &[u8; 32]) -> } } - // ── Remove secret fields ────────────────────────────────────────────────── - for key in args.remove_secrets { + for key in params.remove_secrets { let path = parse_key_path(key)?; - // Dot-join the path to match flattened field_name storage. let field_name = path.join("."); - // Snapshot before delete. #[derive(sqlx::FromRow)] struct FieldToDelete { id: Uuid, encrypted: Vec, } let field: Option = sqlx::query_as( - "SELECT id, encrypted \ - FROM secrets WHERE entry_id = $1 AND field_name = $2", + "SELECT id, encrypted FROM secrets WHERE entry_id = $1 AND field_name = $2", ) .bind(row.id) .bind(&field_name) @@ -217,7 +226,6 @@ pub async fn run(pool: &PgPool, args: UpdateArgs<'_>, master_key: &[u8; 32]) -> { tracing::warn!(error = %e, "failed to snapshot secret field history before delete"); } - sqlx::query("DELETE FROM secrets WHERE id = $1") .bind(f.id) .execute(&mut *tx) @@ -225,20 +233,20 @@ pub async fn run(pool: &PgPool, args: UpdateArgs<'_>, master_key: &[u8; 32]) -> } } - let meta_keys = collect_key_paths(args.meta_entries)?; - let remove_meta_keys = collect_field_paths(args.remove_meta)?; - let secret_keys = collect_key_paths(args.secret_entries)?; - let remove_secret_keys = collect_field_paths(args.remove_secrets)?; + let meta_keys = collect_key_paths(params.meta_entries)?; + let remove_meta_keys = collect_field_paths(params.remove_meta)?; + let secret_keys = collect_key_paths(params.secret_entries)?; + let remove_secret_keys = collect_field_paths(params.remove_secrets)?; crate::audit::log_tx( &mut tx, "update", - args.namespace, - args.kind, - args.name, - json!({ - "add_tags": args.add_tags, - "remove_tags": args.remove_tags, + params.namespace, + params.kind, + params.name, + serde_json::json!({ + "add_tags": params.add_tags, + "remove_tags": params.remove_tags, "meta_keys": meta_keys, "remove_meta": remove_meta_keys, "secret_keys": secret_keys, @@ -249,45 +257,15 @@ pub async fn run(pool: &PgPool, args: UpdateArgs<'_>, master_key: &[u8; 32]) -> tx.commit().await?; - let result_json = json!({ - "action": "updated", - "namespace": args.namespace, - "kind": args.kind, - "name": args.name, - "add_tags": args.add_tags, - "remove_tags": args.remove_tags, - "meta_keys": meta_keys, - "remove_meta": remove_meta_keys, - "secret_keys": secret_keys, - "remove_secrets": remove_secret_keys, - }); - - match args.output { - OutputMode::Json | OutputMode::JsonCompact => { - print_json(&result_json, &args.output)?; - } - _ => { - println!("Updated: [{}/{}] {}", args.namespace, args.kind, args.name); - if !args.add_tags.is_empty() { - println!(" +tags: {}", args.add_tags.join(", ")); - } - if !args.remove_tags.is_empty() { - println!(" -tags: {}", args.remove_tags.join(", ")); - } - if !args.meta_entries.is_empty() { - println!(" +metadata: {}", meta_keys.join(", ")); - } - if !args.remove_meta.is_empty() { - println!(" -metadata: {}", remove_meta_keys.join(", ")); - } - if !args.secret_entries.is_empty() { - println!(" +secrets: {}", secret_keys.join(", ")); - } - if !args.remove_secrets.is_empty() { - println!(" -secrets: {}", remove_secret_keys.join(", ")); - } - } - } - - Ok(()) + Ok(UpdateResult { + namespace: params.namespace.to_string(), + kind: params.kind.to_string(), + name: params.name.to_string(), + add_tags: params.add_tags.to_vec(), + remove_tags: params.remove_tags.to_vec(), + meta_keys, + remove_meta: remove_meta_keys, + secret_keys, + remove_secrets: remove_secret_keys, + }) } diff --git a/crates/secrets-core/src/service/user.rs b/crates/secrets-core/src/service/user.rs new file mode 100644 index 0000000..40dd443 --- /dev/null +++ b/crates/secrets-core/src/service/user.rs @@ -0,0 +1,213 @@ +use anyhow::Result; +use serde_json::Value; +use sqlx::PgPool; +use uuid::Uuid; + +use crate::models::{OauthAccount, User}; + +pub struct OAuthProfile { + pub provider: String, + pub provider_id: String, + pub email: Option, + pub name: Option, + pub avatar_url: Option, +} + +/// Find or create a user from an OAuth profile. +/// Returns (user, is_new) where is_new indicates first-time registration. +pub async fn find_or_create_user(pool: &PgPool, profile: OAuthProfile) -> Result<(User, bool)> { + // Check if this OAuth account already exists + let existing: Option = sqlx::query_as( + "SELECT id, user_id, provider, provider_id, email, name, avatar_url, created_at \ + FROM oauth_accounts WHERE provider = $1 AND provider_id = $2", + ) + .bind(&profile.provider) + .bind(&profile.provider_id) + .fetch_optional(pool) + .await?; + + if let Some(oa) = existing { + let user: User = sqlx::query_as( + "SELECT id, email, name, avatar_url, key_salt, key_check, key_params, api_key, created_at, updated_at \ + FROM users WHERE id = $1", + ) + .bind(oa.user_id) + .fetch_one(pool) + .await?; + return Ok((user, false)); + } + + // New user — create records (no key yet; user sets passphrase on dashboard) + let display_name = profile + .name + .clone() + .unwrap_or_else(|| profile.email.clone().unwrap_or_else(|| "User".to_string())); + + let mut tx = pool.begin().await?; + + let user: User = sqlx::query_as( + "INSERT INTO users (email, name, avatar_url) \ + VALUES ($1, $2, $3) \ + RETURNING id, email, name, avatar_url, key_salt, key_check, key_params, api_key, created_at, updated_at", + ) + .bind(&profile.email) + .bind(&display_name) + .bind(&profile.avatar_url) + .fetch_one(&mut *tx) + .await?; + + sqlx::query( + "INSERT INTO oauth_accounts (user_id, provider, provider_id, email, name, avatar_url) \ + VALUES ($1, $2, $3, $4, $5, $6)", + ) + .bind(user.id) + .bind(&profile.provider) + .bind(&profile.provider_id) + .bind(&profile.email) + .bind(&profile.name) + .bind(&profile.avatar_url) + .execute(&mut *tx) + .await?; + + tx.commit().await?; + + Ok((user, true)) +} + +/// Store the PBKDF2 salt, key_check, and params for a user's passphrase setup. +pub async fn update_user_key_setup( + pool: &PgPool, + user_id: Uuid, + key_salt: &[u8], + key_check: &[u8], + key_params: &Value, +) -> Result<()> { + sqlx::query( + "UPDATE users SET key_salt = $1, key_check = $2, key_params = $3, updated_at = NOW() \ + WHERE id = $4", + ) + .bind(key_salt) + .bind(key_check) + .bind(key_params) + .bind(user_id) + .execute(pool) + .await?; + Ok(()) +} + +/// Fetch a user by ID. +pub async fn get_user_by_id(pool: &PgPool, user_id: Uuid) -> Result> { + let user = sqlx::query_as( + "SELECT id, email, name, avatar_url, key_salt, key_check, key_params, api_key, created_at, updated_at \ + FROM users WHERE id = $1", + ) + .bind(user_id) + .fetch_optional(pool) + .await?; + Ok(user) +} + +/// List all OAuth accounts linked to a user. +pub async fn list_oauth_accounts(pool: &PgPool, user_id: Uuid) -> Result> { + let accounts = sqlx::query_as( + "SELECT id, user_id, provider, provider_id, email, name, avatar_url, created_at \ + FROM oauth_accounts WHERE user_id = $1 ORDER BY created_at", + ) + .bind(user_id) + .fetch_all(pool) + .await?; + Ok(accounts) +} + +/// Bind an additional OAuth account to an existing user. +pub async fn bind_oauth_account( + pool: &PgPool, + user_id: Uuid, + profile: OAuthProfile, +) -> Result { + // Check if this provider_id is already linked to someone else + let conflict: Option<(Uuid,)> = sqlx::query_as( + "SELECT user_id FROM oauth_accounts WHERE provider = $1 AND provider_id = $2", + ) + .bind(&profile.provider) + .bind(&profile.provider_id) + .fetch_optional(pool) + .await?; + + if let Some((existing_user_id,)) = conflict { + if existing_user_id != user_id { + anyhow::bail!( + "This {} account is already linked to a different user", + profile.provider + ); + } + anyhow::bail!( + "This {} account is already linked to your account", + profile.provider + ); + } + + let existing_provider_for_user: Option<(String,)> = sqlx::query_as( + "SELECT provider_id FROM oauth_accounts WHERE user_id = $1 AND provider = $2", + ) + .bind(user_id) + .bind(&profile.provider) + .fetch_optional(pool) + .await?; + + if existing_provider_for_user.is_some() { + anyhow::bail!( + "You already linked a {} account. Unlink the other provider instead of binding multiple {} accounts.", + profile.provider, + profile.provider + ); + } + + let account: OauthAccount = sqlx::query_as( + "INSERT INTO oauth_accounts (user_id, provider, provider_id, email, name, avatar_url) \ + VALUES ($1, $2, $3, $4, $5, $6) \ + RETURNING id, user_id, provider, provider_id, email, name, avatar_url, created_at", + ) + .bind(user_id) + .bind(&profile.provider) + .bind(&profile.provider_id) + .bind(&profile.email) + .bind(&profile.name) + .bind(&profile.avatar_url) + .fetch_one(pool) + .await?; + + Ok(account) +} + +/// Unbind an OAuth account. Ensures at least one remains and blocks unlinking the current login provider. +pub async fn unbind_oauth_account( + pool: &PgPool, + user_id: Uuid, + provider: &str, + current_login_provider: Option<&str>, +) -> Result<()> { + if current_login_provider == Some(provider) { + anyhow::bail!( + "Cannot unlink the {} account you are currently using to sign in", + provider + ); + } + + let count: i64 = sqlx::query_scalar("SELECT COUNT(*) FROM oauth_accounts WHERE user_id = $1") + .bind(user_id) + .fetch_one(pool) + .await?; + + if count <= 1 { + anyhow::bail!("Cannot unbind the last OAuth account. Please link another account first."); + } + + sqlx::query("DELETE FROM oauth_accounts WHERE user_id = $1 AND provider = $2") + .bind(user_id) + .bind(provider) + .execute(pool) + .await?; + + Ok(()) +} diff --git a/crates/secrets-mcp/Cargo.toml b/crates/secrets-mcp/Cargo.toml new file mode 100644 index 0000000..7ae0f47 --- /dev/null +++ b/crates/secrets-mcp/Cargo.toml @@ -0,0 +1,44 @@ +[package] +name = "secrets-mcp" +version = "0.1.0" +edition.workspace = true + +[[bin]] +name = "secrets-mcp" +path = "src/main.rs" + +[dependencies] +secrets-core = { path = "../secrets-core" } + +# MCP +rmcp = { version = "1", features = ["server", "macros", "transport-streamable-http-server", "schemars"] } + +# Web framework +axum = "0.8" +axum-extra = { version = "0.10", features = ["typed-header"] } +tower = "0.5" +tower-http = { version = "0.6", features = ["cors"] } +tower-sessions = "0.14" + +# OAuth (manual token exchange via reqwest) +reqwest.workspace = true + +# Templating - render templates manually to avoid integration crate issues +askama = "0.13" + +# Common +anyhow.workspace = true +chrono.workspace = true +serde.workspace = true +serde_json.workspace = true +sha2.workspace = true +rand.workspace = true +sqlx.workspace = true +tokio.workspace = true +tracing.workspace = true +tracing-subscriber.workspace = true +uuid.workspace = true +dotenvy.workspace = true +urlencoding = "2" +schemars = "1" +http = "1" diff --git a/crates/secrets-mcp/src/auth.rs b/crates/secrets-mcp/src/auth.rs new file mode 100644 index 0000000..304f05e --- /dev/null +++ b/crates/secrets-mcp/src/auth.rs @@ -0,0 +1,114 @@ +use std::net::SocketAddr; + +use axum::{ + extract::{ConnectInfo, Request, State}, + http::StatusCode, + middleware::Next, + response::Response, +}; +use sqlx::PgPool; +use uuid::Uuid; + +use secrets_core::service::api_key::validate_api_key; + +/// Injected into request extensions after Bearer token validation. +#[derive(Clone, Debug)] +pub struct AuthUser { + pub user_id: Uuid, +} + +fn log_client_ip(req: &Request) -> Option { + if let Some(first) = req + .headers() + .get("x-forwarded-for") + .and_then(|v| v.to_str().ok()) + .and_then(|s| s.split(',').next()) + { + let s = first.trim(); + if !s.is_empty() { + return Some(s.to_string()); + } + } + req.extensions() + .get::>() + .map(|c| c.ip().to_string()) +} + +/// Axum middleware that validates Bearer API keys for the /mcp route. +/// Passes all non-MCP paths through without authentication. +pub async fn bearer_auth_middleware( + State(pool): State, + req: Request, + next: Next, +) -> Result { + let path = req.uri().path(); + let method = req.method().as_str(); + let client_ip = log_client_ip(&req); + + // Only authenticate /mcp paths + if !path.starts_with("/mcp") { + return Ok(next.run(req).await); + } + + // Allow OPTIONS (CORS preflight) through + if req.method() == axum::http::Method::OPTIONS { + return Ok(next.run(req).await); + } + + let auth_header = req + .headers() + .get(axum::http::header::AUTHORIZATION) + .and_then(|v| v.to_str().ok()); + + let raw_key = match auth_header { + Some(h) if h.starts_with("Bearer ") => h.trim_start_matches("Bearer ").trim(), + Some(_) => { + tracing::warn!( + method, + path, + client_ip = client_ip.as_deref(), + "invalid Authorization header format on /mcp (expected Bearer …)" + ); + return Err(StatusCode::UNAUTHORIZED); + } + None => { + tracing::warn!( + method, + path, + client_ip = client_ip.as_deref(), + "missing Authorization header on /mcp" + ); + return Err(StatusCode::UNAUTHORIZED); + } + }; + + match validate_api_key(&pool, raw_key).await { + Ok(Some(user_id)) => { + tracing::debug!(?user_id, "api key authenticated"); + let mut req = req; + req.extensions_mut().insert(AuthUser { user_id }); + Ok(next.run(req).await) + } + Ok(None) => { + tracing::warn!( + method, + path, + client_ip = client_ip.as_deref(), + key_prefix = %&raw_key.chars().take(12).collect::(), + key_len = raw_key.len(), + "invalid api key (not found in database — e.g. revoked key or DB was reset; update MCP client Bearer token)" + ); + Err(StatusCode::UNAUTHORIZED) + } + Err(e) => { + tracing::error!( + method, + path, + client_ip = client_ip.as_deref(), + error = %e, + "api key validation error" + ); + Err(StatusCode::INTERNAL_SERVER_ERROR) + } + } +} diff --git a/crates/secrets-mcp/src/main.rs b/crates/secrets-mcp/src/main.rs new file mode 100644 index 0000000..529d743 --- /dev/null +++ b/crates/secrets-mcp/src/main.rs @@ -0,0 +1,155 @@ +mod auth; +mod oauth; +mod tools; +mod web; + +use std::net::SocketAddr; +use std::sync::Arc; + +use anyhow::{Context, Result}; +use axum::Router; +use rmcp::transport::streamable_http_server::{ + StreamableHttpService, session::local::LocalSessionManager, +}; +use sqlx::PgPool; +use tower_http::cors::{Any, CorsLayer}; +use tower_sessions::cookie::SameSite; +use tower_sessions::{MemoryStore, SessionManagerLayer}; +use tracing_subscriber::EnvFilter; + +use secrets_core::config::resolve_db_url; +use secrets_core::db::{create_pool, migrate}; + +use crate::oauth::OAuthConfig; +use crate::tools::SecretsService; + +/// Shared application state injected into web routes and middleware. +#[derive(Clone)] +pub struct AppState { + pub pool: PgPool, + pub google_config: Option, + pub base_url: String, + pub http_client: reqwest::Client, +} + +fn load_env_var(name: &str) -> Option { + std::env::var(name).ok().filter(|s| !s.is_empty()) +} + +fn load_oauth_config(prefix: &str, base_url: &str, path: &str) -> Option { + let client_id = load_env_var(&format!("{}_CLIENT_ID", prefix))?; + let client_secret = load_env_var(&format!("{}_CLIENT_SECRET", prefix))?; + Some(OAuthConfig { + client_id, + client_secret, + redirect_uri: format!("{}{}", base_url, path), + }) +} + +#[tokio::main] +async fn main() -> Result<()> { + // Load .env if present + let _ = dotenvy::dotenv(); + + tracing_subscriber::fmt() + .with_env_filter( + EnvFilter::try_from_default_env().unwrap_or_else(|_| "secrets_mcp=info".into()), + ) + .init(); + + // ── Database ────────────────────────────────────────────────────────────── + let db_url = resolve_db_url("") + .context("Database not configured. Set SECRETS_DATABASE_URL environment variable.")?; + let pool = create_pool(&db_url) + .await + .context("failed to connect to database")?; + migrate(&pool) + .await + .context("failed to run database migrations")?; + tracing::info!("Database connected and migrated"); + + // ── Configuration ───────────────────────────────────────────────────────── + let base_url = load_env_var("BASE_URL").unwrap_or_else(|| "http://localhost:9315".to_string()); + let bind_addr = load_env_var("SECRETS_MCP_BIND").unwrap_or_else(|| "0.0.0.0:9315".to_string()); + + // ── OAuth providers ─────────────────────────────────────────────────────── + let google_config = load_oauth_config("GOOGLE", &base_url, "/auth/google/callback"); + + if google_config.is_none() { + tracing::warn!( + "No OAuth providers configured. Set GOOGLE_CLIENT_ID/GOOGLE_CLIENT_SECRET to enable login." + ); + } + + // ── Session store ───────────────────────────────────────────────────────── + let session_store = MemoryStore::default(); + // Strict would drop the session cookie on redirect from Google → our origin (cross-site nav). + let session_layer = SessionManagerLayer::new(session_store) + .with_secure(base_url.starts_with("https://")) + .with_same_site(SameSite::Lax); + + // ── App state ───────────────────────────────────────────────────────────── + let app_state = AppState { + pool: pool.clone(), + google_config, + base_url: base_url.clone(), + http_client: reqwest::Client::builder() + .timeout(std::time::Duration::from_secs(15)) + .build() + .context("failed to build HTTP client")?, + }; + + // ── MCP service ─────────────────────────────────────────────────────────── + let pool_arc = Arc::new(pool.clone()); + + let mcp_service = StreamableHttpService::new( + move || { + let p = pool_arc.clone(); + Ok(SecretsService::new(p)) + }, + LocalSessionManager::default().into(), + Default::default(), + ); + + // ── Router ──────────────────────────────────────────────────────────────── + let cors = CorsLayer::new() + .allow_origin(Any) + .allow_methods(Any) + .allow_headers(Any); + + let router = Router::new() + .merge(web::web_router()) + .nest_service("/mcp", mcp_service) + .layer(axum::middleware::from_fn_with_state( + pool, + auth::bearer_auth_middleware, + )) + .layer(session_layer) + .layer(cors) + .with_state(app_state); + + // ── Start server ────────────────────────────────────────────────────────── + let listener = tokio::net::TcpListener::bind(&bind_addr) + .await + .with_context(|| format!("failed to bind to {}", bind_addr))?; + + tracing::info!("Secrets MCP Server listening on http://{}", bind_addr); + tracing::info!("MCP endpoint: {}/mcp", base_url); + + axum::serve( + listener, + router.into_make_service_with_connect_info::(), + ) + .with_graceful_shutdown(shutdown_signal()) + .await + .context("server error")?; + + Ok(()) +} + +async fn shutdown_signal() { + tokio::signal::ctrl_c() + .await + .expect("failed to install CTRL+C signal handler"); + tracing::info!("Shutting down gracefully..."); +} diff --git a/crates/secrets-mcp/src/oauth/google.rs b/crates/secrets-mcp/src/oauth/google.rs new file mode 100644 index 0000000..4fbc184 --- /dev/null +++ b/crates/secrets-mcp/src/oauth/google.rs @@ -0,0 +1,66 @@ +use anyhow::{Context, Result}; +use serde::Deserialize; + +use super::{OAuthConfig, OAuthUserInfo}; + +#[derive(Deserialize)] +struct TokenResponse { + access_token: String, + #[allow(dead_code)] + token_type: String, + #[allow(dead_code)] + id_token: Option, +} + +#[derive(Deserialize)] +struct UserInfo { + sub: String, + email: Option, + name: Option, + picture: Option, +} + +/// Exchange authorization code for tokens and fetch user profile. +pub async fn exchange_code( + client: &reqwest::Client, + config: &OAuthConfig, + code: &str, +) -> Result { + let token_resp: TokenResponse = client + .post("https://oauth2.googleapis.com/token") + .form(&[ + ("code", code), + ("client_id", &config.client_id), + ("client_secret", &config.client_secret), + ("redirect_uri", &config.redirect_uri), + ("grant_type", "authorization_code"), + ]) + .send() + .await + .context("failed to exchange Google code")? + .error_for_status() + .context("Google token endpoint error")? + .json() + .await + .context("failed to parse Google token response")?; + + let user: UserInfo = client + .get("https://openidconnect.googleapis.com/v1/userinfo") + .bearer_auth(&token_resp.access_token) + .send() + .await + .context("failed to fetch Google userinfo")? + .error_for_status() + .context("Google userinfo endpoint error")? + .json() + .await + .context("failed to parse Google userinfo")?; + + Ok(OAuthUserInfo { + provider: "google".to_string(), + provider_id: user.sub, + email: user.email, + name: user.name, + avatar_url: user.picture, + }) +} diff --git a/crates/secrets-mcp/src/oauth/mod.rs b/crates/secrets-mcp/src/oauth/mod.rs new file mode 100644 index 0000000..982397f --- /dev/null +++ b/crates/secrets-mcp/src/oauth/mod.rs @@ -0,0 +1,45 @@ +pub mod google; +pub mod wechat; // not yet implemented — placeholder for future WeChat integration + +use serde::{Deserialize, Serialize}; + +/// Normalized OAuth user profile from any provider. +#[derive(Debug, Clone)] +pub struct OAuthUserInfo { + pub provider: String, + pub provider_id: String, + pub email: Option, + pub name: Option, + pub avatar_url: Option, +} + +/// OAuth provider configuration. +#[derive(Clone, Debug, Deserialize, Serialize)] +pub struct OAuthConfig { + pub client_id: String, + pub client_secret: String, + pub redirect_uri: String, +} + +/// Build the Google authorization URL. +pub fn google_auth_url(config: &OAuthConfig, state: &str) -> String { + format!( + "https://accounts.google.com/o/oauth2/v2/auth\ + ?client_id={}\ + &redirect_uri={}\ + &response_type=code\ + &scope=openid%20email%20profile\ + &state={}\ + &access_type=offline", + urlencoding::encode(&config.client_id), + urlencoding::encode(&config.redirect_uri), + urlencoding::encode(state), + ) +} + +pub fn random_state() -> String { + use rand::RngExt; + let mut bytes = [0u8; 16]; + rand::rng().fill(&mut bytes); + bytes.iter().map(|b| format!("{:02x}", b)).collect() +} diff --git a/crates/secrets-mcp/src/oauth/wechat.rs b/crates/secrets-mcp/src/oauth/wechat.rs new file mode 100644 index 0000000..2653e22 --- /dev/null +++ b/crates/secrets-mcp/src/oauth/wechat.rs @@ -0,0 +1,18 @@ +use super::{OAuthConfig, OAuthUserInfo}; +/// WeChat OAuth — not yet implemented. +/// +/// This module is a placeholder for future WeChat Open Platform integration. +/// When ready, implement `exchange_code` following the non-standard WeChat OAuth 2.0 flow: +/// - Token exchange uses a GET request (not POST) +/// - Preferred user identifier is `unionid` (cross-app), falling back to `openid` +/// - Docs: https://developers.weixin.qq.com/doc/oplatform/Website_App/WeChat_Login/Wechat_Login.html +use anyhow::{Result, bail}; + +#[allow(dead_code)] +pub async fn exchange_code( + _client: &reqwest::Client, + _config: &OAuthConfig, + _code: &str, +) -> Result { + bail!("WeChat login is not yet implemented") +} diff --git a/crates/secrets-mcp/src/tools.rs b/crates/secrets-mcp/src/tools.rs new file mode 100644 index 0000000..73a1482 --- /dev/null +++ b/crates/secrets-mcp/src/tools.rs @@ -0,0 +1,609 @@ +use std::sync::Arc; + +use anyhow::Result; +use rmcp::{ + RoleServer, ServerHandler, + handler::server::wrapper::Parameters, + model::{ + CallToolResult, Content, Implementation, InitializeResult, ProtocolVersion, + ServerCapabilities, + }, + service::RequestContext, + tool, tool_handler, tool_router, +}; +use schemars::JsonSchema; +use serde::Deserialize; +use sqlx::PgPool; +use uuid::Uuid; + +use secrets_core::service::{ + add::{AddParams, run as svc_add}, + delete::{DeleteParams, run as svc_delete}, + export::{ExportParams, export as svc_export}, + get_secret::{get_all_secrets, get_secret_field}, + history::run as svc_history, + rollback::run as svc_rollback, + search::{SearchParams, run as svc_search}, + update::{UpdateParams, run as svc_update}, +}; + +use crate::auth::AuthUser; + +// ── Shared state ────────────────────────────────────────────────────────────── + +#[derive(Clone)] +pub struct SecretsService { + pub pool: Arc, + pub tool_router: rmcp::handler::server::router::tool::ToolRouter, +} + +impl SecretsService { + pub fn new(pool: Arc) -> Self { + Self { + pool, + tool_router: Self::tool_router(), + } + } + + /// Extract user_id from the HTTP request parts injected by auth middleware. + fn user_id_from_ctx(ctx: &RequestContext) -> Result, rmcp::ErrorData> { + let parts = ctx + .extensions + .get::() + .ok_or_else(|| rmcp::ErrorData::internal_error("Missing HTTP parts", None))?; + Ok(parts.extensions.get::().map(|a| a.user_id)) + } + + /// Get the authenticated user_id (returns error if not authenticated). + fn require_user_id(ctx: &RequestContext) -> Result { + let parts = ctx + .extensions + .get::() + .ok_or_else(|| rmcp::ErrorData::internal_error("Missing HTTP parts", None))?; + parts + .extensions + .get::() + .map(|a| a.user_id) + .ok_or_else(|| rmcp::ErrorData::invalid_request("Unauthorized: API key required", None)) + } + + /// Extract the 32-byte encryption key from the X-Encryption-Key request header. + /// The header value must be 64 lowercase hex characters (PBKDF2-derived key). + fn extract_enc_key(ctx: &RequestContext) -> Result<[u8; 32], rmcp::ErrorData> { + let parts = ctx + .extensions + .get::() + .ok_or_else(|| rmcp::ErrorData::internal_error("Missing HTTP parts", None))?; + let hex_str = parts + .headers + .get("x-encryption-key") + .ok_or_else(|| { + rmcp::ErrorData::invalid_request( + "Missing X-Encryption-Key header. \ + Set this to your 64-char hex encryption key derived from your passphrase.", + None, + ) + })? + .to_str() + .map_err(|_| { + rmcp::ErrorData::invalid_request("Invalid X-Encryption-Key header value", None) + })?; + secrets_core::crypto::extract_key_from_hex(hex_str) + .map_err(|e| rmcp::ErrorData::invalid_request(e.to_string(), None)) + } + + /// Require both user_id and encryption key. + fn require_user_and_key( + ctx: &RequestContext, + ) -> Result<(Uuid, [u8; 32]), rmcp::ErrorData> { + let user_id = Self::require_user_id(ctx)?; + let key = Self::extract_enc_key(ctx)?; + Ok((user_id, key)) + } +} + +// ── Tool parameter types ────────────────────────────────────────────────────── + +#[derive(Debug, Deserialize, JsonSchema)] +struct SearchInput { + #[schemars(description = "Namespace filter (e.g. 'refining', 'ricnsmart')")] + namespace: Option, + #[schemars(description = "Kind filter (e.g. 'server', 'service', 'key')")] + kind: Option, + #[schemars(description = "Exact record name")] + name: Option, + #[schemars(description = "Tag filters (all must match)")] + tags: Option>, + #[schemars(description = "Fuzzy search across name, namespace, kind, tags, metadata")] + query: Option, + #[schemars(description = "Return only summary fields (name/tags/desc/updated_at)")] + summary: Option, + #[schemars(description = "Sort order: 'name' (default), 'updated', 'created'")] + sort: Option, + #[schemars(description = "Max results (default 20)")] + limit: Option, + #[schemars(description = "Pagination offset (default 0)")] + offset: Option, +} + +#[derive(Debug, Deserialize, JsonSchema)] +struct GetSecretInput { + #[schemars(description = "Namespace of the entry")] + namespace: String, + #[schemars(description = "Kind of the entry (e.g. 'server', 'service')")] + kind: String, + #[schemars(description = "Name of the entry")] + name: String, + #[schemars(description = "Specific field to retrieve. If omitted, returns all fields.")] + field: Option, +} + +#[derive(Debug, Deserialize, JsonSchema)] +struct AddInput { + #[schemars(description = "Namespace")] + namespace: String, + #[schemars(description = "Kind (e.g. 'server', 'service', 'key')")] + kind: String, + #[schemars(description = "Unique name within namespace+kind")] + name: String, + #[schemars(description = "Tags for this entry")] + tags: Option>, + #[schemars(description = "Metadata fields as 'key=value' or 'key:=json' strings")] + meta: Option>, + #[schemars(description = "Secret fields as 'key=value' strings")] + secrets: Option>, +} + +#[derive(Debug, Deserialize, JsonSchema)] +struct UpdateInput { + #[schemars(description = "Namespace")] + namespace: String, + #[schemars(description = "Kind")] + kind: String, + #[schemars(description = "Name")] + name: String, + #[schemars(description = "Tags to add")] + add_tags: Option>, + #[schemars(description = "Tags to remove")] + remove_tags: Option>, + #[schemars(description = "Metadata fields to update/add as 'key=value' strings")] + meta: Option>, + #[schemars(description = "Metadata field keys to remove")] + remove_meta: Option>, + #[schemars(description = "Secret fields to update/add as 'key=value' strings")] + secrets: Option>, + #[schemars(description = "Secret field keys to remove")] + remove_secrets: Option>, +} + +#[derive(Debug, Deserialize, JsonSchema)] +struct DeleteInput { + #[schemars(description = "Namespace")] + namespace: String, + #[schemars(description = "Kind filter (required for single delete)")] + kind: Option, + #[schemars(description = "Exact name to delete. Omit for bulk delete by namespace+kind.")] + name: Option, + #[schemars(description = "Preview deletions without writing")] + dry_run: Option, +} + +#[derive(Debug, Deserialize, JsonSchema)] +struct HistoryInput { + #[schemars(description = "Namespace")] + namespace: String, + #[schemars(description = "Kind")] + kind: String, + #[schemars(description = "Name")] + name: String, + #[schemars(description = "Max history entries to return (default 20)")] + limit: Option, +} + +#[derive(Debug, Deserialize, JsonSchema)] +struct RollbackInput { + #[schemars(description = "Namespace")] + namespace: String, + #[schemars(description = "Kind")] + kind: String, + #[schemars(description = "Name")] + name: String, + #[schemars(description = "Target version number. Omit to restore the most recent snapshot.")] + to_version: Option, +} + +#[derive(Debug, Deserialize, JsonSchema)] +struct ExportInput { + #[schemars(description = "Namespace filter")] + namespace: Option, + #[schemars(description = "Kind filter")] + kind: Option, + #[schemars(description = "Exact name filter")] + name: Option, + #[schemars(description = "Tag filters")] + tags: Option>, + #[schemars(description = "Fuzzy query")] + query: Option, + #[schemars(description = "Export format: 'json' (default), 'toml', 'yaml'")] + format: Option, +} + +#[derive(Debug, Deserialize, JsonSchema)] +struct EnvMapInput { + #[schemars(description = "Namespace filter")] + namespace: Option, + #[schemars(description = "Kind filter")] + kind: Option, + #[schemars(description = "Exact name filter")] + name: Option, + #[schemars(description = "Tag filters")] + tags: Option>, + #[schemars(description = "Only include these secret fields")] + only_fields: Option>, + #[schemars(description = "Environment variable name prefix")] + prefix: Option, +} + +// ── Tool implementations ────────────────────────────────────────────────────── + +#[tool_router] +impl SecretsService { + #[tool( + description = "Search entries in the secrets store. Returns entries with metadata and \ + secret field names (not values). Use secrets_get to decrypt secret values." + )] + async fn secrets_search( + &self, + Parameters(input): Parameters, + ctx: RequestContext, + ) -> Result { + let user_id = Self::user_id_from_ctx(&ctx)?; + let tags = input.tags.unwrap_or_default(); + let result = svc_search( + &self.pool, + SearchParams { + namespace: input.namespace.as_deref(), + kind: input.kind.as_deref(), + name: input.name.as_deref(), + tags: &tags, + query: input.query.as_deref(), + sort: input.sort.as_deref().unwrap_or("name"), + limit: input.limit.unwrap_or(20), + offset: input.offset.unwrap_or(0), + user_id, + }, + ) + .await + .map_err(|e| rmcp::ErrorData::internal_error(e.to_string(), None))?; + + let summary = input.summary.unwrap_or(false); + let entries: Vec = result + .entries + .iter() + .map(|e| { + if summary { + serde_json::json!({ + "namespace": e.namespace, + "kind": e.kind, + "name": e.name, + "tags": e.tags, + "desc": e.metadata.get("desc").or_else(|| e.metadata.get("url")) + .and_then(|v| v.as_str()).unwrap_or(""), + "updated_at": e.updated_at.format("%Y-%m-%dT%H:%M:%SZ").to_string(), + }) + } else { + let schema: Vec<&str> = result + .secret_schemas + .get(&e.id) + .map(|f| f.iter().map(|s| s.field_name.as_str()).collect()) + .unwrap_or_default(); + serde_json::json!({ + "id": e.id, + "namespace": e.namespace, + "kind": e.kind, + "name": e.name, + "tags": e.tags, + "metadata": e.metadata, + "secret_fields": schema, + "version": e.version, + "updated_at": e.updated_at.format("%Y-%m-%dT%H:%M:%SZ").to_string(), + }) + } + }) + .collect(); + + let json = serde_json::to_string_pretty(&entries).unwrap_or_else(|_| "[]".to_string()); + Ok(CallToolResult::success(vec![Content::text(json)])) + } + + #[tool( + description = "Get decrypted secret field values for an entry. Requires your \ + encryption key via X-Encryption-Key header (64 hex chars, PBKDF2-derived). \ + Returns all fields, or a specific field if 'field' is provided." + )] + async fn secrets_get( + &self, + Parameters(input): Parameters, + ctx: RequestContext, + ) -> Result { + let (user_id, user_key) = Self::require_user_and_key(&ctx)?; + + if let Some(field_name) = &input.field { + let value = get_secret_field( + &self.pool, + &input.namespace, + &input.kind, + &input.name, + field_name, + &user_key, + Some(user_id), + ) + .await + .map_err(|e| rmcp::ErrorData::internal_error(e.to_string(), None))?; + + let result = serde_json::json!({ field_name: value }); + let json = serde_json::to_string_pretty(&result).unwrap_or_default(); + Ok(CallToolResult::success(vec![Content::text(json)])) + } else { + let secrets = get_all_secrets( + &self.pool, + &input.namespace, + &input.kind, + &input.name, + &user_key, + Some(user_id), + ) + .await + .map_err(|e| rmcp::ErrorData::internal_error(e.to_string(), None))?; + + let json = serde_json::to_string_pretty(&secrets).unwrap_or_default(); + Ok(CallToolResult::success(vec![Content::text(json)])) + } + } + + #[tool( + description = "Add or upsert an entry with metadata and encrypted secret fields. \ + Requires X-Encryption-Key header. \ + Meta and secret values use 'key=value', 'key=@file', or 'key:=' format." + )] + async fn secrets_add( + &self, + Parameters(input): Parameters, + ctx: RequestContext, + ) -> Result { + let (user_id, user_key) = Self::require_user_and_key(&ctx)?; + + let tags = input.tags.unwrap_or_default(); + let meta = input.meta.unwrap_or_default(); + let secrets = input.secrets.unwrap_or_default(); + + let result = svc_add( + &self.pool, + AddParams { + namespace: &input.namespace, + kind: &input.kind, + name: &input.name, + tags: &tags, + meta_entries: &meta, + secret_entries: &secrets, + user_id: Some(user_id), + }, + &user_key, + ) + .await + .map_err(|e| rmcp::ErrorData::internal_error(e.to_string(), None))?; + + let json = serde_json::to_string_pretty(&result).unwrap_or_default(); + Ok(CallToolResult::success(vec![Content::text(json)])) + } + + #[tool( + description = "Incrementally update an existing entry. Requires X-Encryption-Key header. \ + Only the fields you specify are changed; everything else is preserved." + )] + async fn secrets_update( + &self, + Parameters(input): Parameters, + ctx: RequestContext, + ) -> Result { + let (user_id, user_key) = Self::require_user_and_key(&ctx)?; + + let add_tags = input.add_tags.unwrap_or_default(); + let remove_tags = input.remove_tags.unwrap_or_default(); + let meta = input.meta.unwrap_or_default(); + let remove_meta = input.remove_meta.unwrap_or_default(); + let secrets = input.secrets.unwrap_or_default(); + let remove_secrets = input.remove_secrets.unwrap_or_default(); + + let result = svc_update( + &self.pool, + UpdateParams { + namespace: &input.namespace, + kind: &input.kind, + name: &input.name, + add_tags: &add_tags, + remove_tags: &remove_tags, + meta_entries: &meta, + remove_meta: &remove_meta, + secret_entries: &secrets, + remove_secrets: &remove_secrets, + user_id: Some(user_id), + }, + &user_key, + ) + .await + .map_err(|e| rmcp::ErrorData::internal_error(e.to_string(), None))?; + + let json = serde_json::to_string_pretty(&result).unwrap_or_default(); + Ok(CallToolResult::success(vec![Content::text(json)])) + } + + #[tool( + description = "Delete one entry (specify namespace+kind+name) or bulk delete all \ + entries matching namespace+kind. Use dry_run=true to preview." + )] + async fn secrets_delete( + &self, + Parameters(input): Parameters, + ctx: RequestContext, + ) -> Result { + let user_id = Self::user_id_from_ctx(&ctx)?; + + let result = svc_delete( + &self.pool, + DeleteParams { + namespace: &input.namespace, + kind: input.kind.as_deref(), + name: input.name.as_deref(), + dry_run: input.dry_run.unwrap_or(false), + user_id, + }, + ) + .await + .map_err(|e| rmcp::ErrorData::internal_error(e.to_string(), None))?; + + let json = serde_json::to_string_pretty(&result).unwrap_or_default(); + Ok(CallToolResult::success(vec![Content::text(json)])) + } + + #[tool( + description = "View change history for an entry. Returns a list of versions with \ + actions and timestamps." + )] + async fn secrets_history( + &self, + Parameters(input): Parameters, + _ctx: RequestContext, + ) -> Result { + let result = svc_history( + &self.pool, + &input.namespace, + &input.kind, + &input.name, + input.limit.unwrap_or(20), + None, + ) + .await + .map_err(|e| rmcp::ErrorData::internal_error(e.to_string(), None))?; + + let json = serde_json::to_string_pretty(&result).unwrap_or_default(); + Ok(CallToolResult::success(vec![Content::text(json)])) + } + + #[tool( + description = "Rollback an entry to a previous version. Requires X-Encryption-Key header. \ + Omit to_version to restore the most recent snapshot." + )] + async fn secrets_rollback( + &self, + Parameters(input): Parameters, + ctx: RequestContext, + ) -> Result { + let (user_id, user_key) = Self::require_user_and_key(&ctx)?; + + let result = svc_rollback( + &self.pool, + &input.namespace, + &input.kind, + &input.name, + input.to_version, + &user_key, + Some(user_id), + ) + .await + .map_err(|e| rmcp::ErrorData::internal_error(e.to_string(), None))?; + + let json = serde_json::to_string_pretty(&result).unwrap_or_default(); + Ok(CallToolResult::success(vec![Content::text(json)])) + } + + #[tool( + description = "Export matching entries with decrypted secrets as JSON/TOML/YAML string. \ + Requires X-Encryption-Key header. Useful for backup or data migration." + )] + async fn secrets_export( + &self, + Parameters(input): Parameters, + ctx: RequestContext, + ) -> Result { + let (user_id, user_key) = Self::require_user_and_key(&ctx)?; + let tags = input.tags.unwrap_or_default(); + let format = input.format.as_deref().unwrap_or("json"); + + let data = svc_export( + &self.pool, + ExportParams { + namespace: input.namespace.as_deref(), + kind: input.kind.as_deref(), + name: input.name.as_deref(), + tags: &tags, + query: input.query.as_deref(), + no_secrets: false, + user_id: Some(user_id), + }, + Some(&user_key), + ) + .await + .map_err(|e| rmcp::ErrorData::internal_error(e.to_string(), None))?; + + let serialized = format + .parse::() + .and_then(|fmt| fmt.serialize(&data)) + .map_err(|e| rmcp::ErrorData::internal_error(e.to_string(), None))?; + + Ok(CallToolResult::success(vec![Content::text(serialized)])) + } + + #[tool( + description = "Preview the environment variable mapping that would be injected when \ + running a command. Requires X-Encryption-Key header. \ + Shows variable names and sources, useful for debugging." + )] + async fn secrets_env_map( + &self, + Parameters(input): Parameters, + ctx: RequestContext, + ) -> Result { + let (user_id, user_key) = Self::require_user_and_key(&ctx)?; + let tags = input.tags.unwrap_or_default(); + let only_fields = input.only_fields.unwrap_or_default(); + + let env_map = secrets_core::service::env_map::build_env_map( + &self.pool, + input.namespace.as_deref(), + input.kind.as_deref(), + input.name.as_deref(), + &tags, + &only_fields, + input.prefix.as_deref().unwrap_or(""), + &user_key, + Some(user_id), + ) + .await + .map_err(|e| rmcp::ErrorData::internal_error(e.to_string(), None))?; + + let json = serde_json::to_string_pretty(&env_map).unwrap_or_default(); + Ok(CallToolResult::success(vec![Content::text(json)])) + } +} + +// ── ServerHandler ───────────────────────────────────────────────────────────── + +#[tool_handler] +impl ServerHandler for SecretsService { + fn get_info(&self) -> InitializeResult { + let mut info = InitializeResult::new(ServerCapabilities::builder().enable_tools().build()); + info.server_info = Implementation::new("secrets-mcp", env!("CARGO_PKG_VERSION")); + info.protocol_version = ProtocolVersion::V_2025_03_26; + info.instructions = Some( + "Manage cross-device secrets and configuration securely. \ + Data is encrypted with your passphrase-derived key. \ + Include your 64-char hex key in the X-Encryption-Key header for all read/write operations. \ + Use secrets_search to discover entries (no key needed), \ + secrets_get to decrypt secret values, \ + and secrets_add/secrets_update to write encrypted secrets." + .to_string(), + ); + info + } +} diff --git a/crates/secrets-mcp/src/web.rs b/crates/secrets-mcp/src/web.rs new file mode 100644 index 0000000..9137e94 --- /dev/null +++ b/crates/secrets-mcp/src/web.rs @@ -0,0 +1,494 @@ +use askama::Template; +use axum::{ + Json, Router, + extract::{Path, Query, State}, + http::StatusCode, + response::{Html, IntoResponse, Redirect, Response}, + routing::{get, post}, +}; +use serde::{Deserialize, Serialize}; +use tower_sessions::Session; +use uuid::Uuid; + +use secrets_core::crypto::hex; +use secrets_core::service::{ + api_key::{ensure_api_key, regenerate_api_key}, + user::{ + OAuthProfile, bind_oauth_account, find_or_create_user, get_user_by_id, + unbind_oauth_account, update_user_key_setup, + }, +}; + +use crate::AppState; +use crate::oauth::{OAuthConfig, OAuthUserInfo, google_auth_url, random_state}; + +const SESSION_USER_ID: &str = "user_id"; +const SESSION_OAUTH_STATE: &str = "oauth_state"; +const SESSION_OAUTH_BIND_MODE: &str = "oauth_bind_mode"; +const SESSION_LOGIN_PROVIDER: &str = "login_provider"; + +// ── Template types ──────────────────────────────────────────────────────────── + +#[derive(Template)] +#[template(path = "login.html")] +struct LoginTemplate { + has_google: bool, +} + +#[derive(Template)] +#[template(path = "dashboard.html")] +struct DashboardTemplate { + user_name: String, + user_email: String, + has_passphrase: bool, + base_url: String, +} + +// ── App state helpers ───────────────────────────────────────────────────────── + +fn google_cfg(state: &AppState) -> Option<&OAuthConfig> { + state.google_config.as_ref() +} + +async fn current_user_id(session: &Session) -> Option { + session + .get::(SESSION_USER_ID) + .await + .ok() + .flatten() + .and_then(|s| Uuid::parse_str(&s).ok()) +} + +// ── Routes ──────────────────────────────────────────────────────────────────── + +pub fn web_router() -> Router { + Router::new() + .route("/", get(login_page)) + .route("/auth/google", get(auth_google)) + .route("/auth/google/callback", get(auth_google_callback)) + .route("/auth/logout", post(auth_logout)) + .route("/dashboard", get(dashboard)) + .route("/account/bind/google", get(account_bind_google)) + .route( + "/account/bind/google/callback", + get(account_bind_google_callback), + ) + .route("/account/unbind/{provider}", post(account_unbind)) + .route("/api/key-salt", get(api_key_salt)) + .route("/api/key-setup", post(api_key_setup)) + .route("/api/apikey", get(api_apikey_get)) + .route("/api/apikey/regenerate", post(api_apikey_regenerate)) +} + +// ── Login page ──────────────────────────────────────────────────────────────── + +async fn login_page( + State(state): State, + session: Session, +) -> Result { + if let Some(_uid) = current_user_id(&session).await { + return Ok(Redirect::to("/dashboard").into_response()); + } + + let tmpl = LoginTemplate { + has_google: state.google_config.is_some(), + }; + render_template(tmpl) +} + +// ── Google OAuth ────────────────────────────────────────────────────────────── + +async fn auth_google( + State(state): State, + session: Session, +) -> Result { + let config = google_cfg(&state).ok_or(StatusCode::SERVICE_UNAVAILABLE)?; + + let oauth_state = random_state(); + session + .insert(SESSION_OAUTH_STATE, &oauth_state) + .await + .map_err(|_| StatusCode::INTERNAL_SERVER_ERROR)?; + + let url = google_auth_url(config, &oauth_state); + Ok(Redirect::to(&url).into_response()) +} + +#[derive(Deserialize)] +struct OAuthCallbackQuery { + code: Option, + state: Option, + error: Option, +} + +async fn auth_google_callback( + State(state): State, + session: Session, + Query(params): Query, +) -> Result { + handle_oauth_callback(&state, &session, params, "google", |s, cfg, code| { + Box::pin(crate::oauth::google::exchange_code( + &s.http_client, + cfg, + code, + )) + }) + .await +} + +// ── Shared OAuth callback handler ───────────────────────────────────────────── + +async fn handle_oauth_callback( + state: &AppState, + session: &Session, + params: OAuthCallbackQuery, + provider: &str, + exchange_fn: F, +) -> Result +where + F: for<'a> Fn( + &'a AppState, + &'a OAuthConfig, + &'a str, + ) -> std::pin::Pin< + Box> + Send + 'a>, + >, +{ + if let Some(err) = params.error { + tracing::warn!(provider, error = %err, "OAuth error"); + return Ok(Redirect::to("/?error=oauth_error").into_response()); + } + + let Some(code) = params.code else { + tracing::warn!(provider, "OAuth callback missing code"); + return Ok(Redirect::to("/?error=oauth_missing_code").into_response()); + }; + let Some(returned_state) = params.state.as_deref() else { + tracing::warn!(provider, "OAuth callback missing state"); + return Ok(Redirect::to("/?error=oauth_missing_state").into_response()); + }; + + let expected_state: Option = session + .get(SESSION_OAUTH_STATE) + .await + .map_err(|_| StatusCode::INTERNAL_SERVER_ERROR)?; + if expected_state.as_deref() != Some(returned_state) { + tracing::warn!( + provider, + expected_present = expected_state.is_some(), + "OAuth state mismatch (empty session often means SameSite=Strict or server restart)" + ); + return Ok(Redirect::to("/?error=oauth_state").into_response()); + } + session.remove::(SESSION_OAUTH_STATE).await.ok(); + + let config = match provider { + "google" => state + .google_config + .as_ref() + .ok_or(StatusCode::SERVICE_UNAVAILABLE)?, + _ => return Err(StatusCode::BAD_REQUEST), + }; + + let user_info = exchange_fn(state, config, code.as_str()) + .await + .map_err(|e| { + tracing::error!(provider, error = %e, "failed to exchange OAuth code"); + StatusCode::INTERNAL_SERVER_ERROR + })?; + + let bind_mode: bool = session + .get(SESSION_OAUTH_BIND_MODE) + .await + .unwrap_or(None) + .unwrap_or(false); + + if bind_mode { + let user_id = current_user_id(session) + .await + .ok_or(StatusCode::UNAUTHORIZED)?; + session.remove::(SESSION_OAUTH_BIND_MODE).await.ok(); + + let profile = OAuthProfile { + provider: user_info.provider, + provider_id: user_info.provider_id, + email: user_info.email, + name: user_info.name, + avatar_url: user_info.avatar_url, + }; + + bind_oauth_account(&state.pool, user_id, profile) + .await + .map_err(|e| { + tracing::error!(error = %e, "failed to bind OAuth account"); + StatusCode::INTERNAL_SERVER_ERROR + })?; + + return Ok(Redirect::to("/dashboard?bound=1").into_response()); + } + + let profile = OAuthProfile { + provider: user_info.provider, + provider_id: user_info.provider_id, + email: user_info.email, + name: user_info.name, + avatar_url: user_info.avatar_url, + }; + + let (user, _is_new) = find_or_create_user(&state.pool, profile) + .await + .map_err(|e| { + tracing::error!(error = %e, "failed to find or create user"); + StatusCode::INTERNAL_SERVER_ERROR + })?; + + // Ensure the user has an API key (auto-creates on first login). + if let Err(e) = ensure_api_key(&state.pool, user.id).await { + tracing::warn!(error = %e, "failed to ensure api key for user"); + } + + session + .insert(SESSION_USER_ID, user.id.to_string()) + .await + .map_err(|_| StatusCode::INTERNAL_SERVER_ERROR)?; + session + .insert(SESSION_LOGIN_PROVIDER, &provider) + .await + .map_err(|_| StatusCode::INTERNAL_SERVER_ERROR)?; + + Ok(Redirect::to("/dashboard").into_response()) +} + +// ── Logout ──────────────────────────────────────────────────────────────────── + +async fn auth_logout(session: Session) -> impl IntoResponse { + session.flush().await.ok(); + Redirect::to("/") +} + +// ── Dashboard ───────────────────────────────────────────────────────────────── + +async fn dashboard( + State(state): State, + session: Session, +) -> Result { + let user_id = current_user_id(&session) + .await + .ok_or(StatusCode::UNAUTHORIZED)?; + + let user = get_user_by_id(&state.pool, user_id) + .await + .map_err(|_| StatusCode::INTERNAL_SERVER_ERROR)? + .ok_or(StatusCode::UNAUTHORIZED)?; + + let tmpl = DashboardTemplate { + user_name: user.name.clone(), + user_email: user.email.clone().unwrap_or_default(), + has_passphrase: user.key_salt.is_some(), + base_url: state.base_url.clone(), + }; + + render_template(tmpl) +} + +// ── Account bind/unbind ─────────────────────────────────────────────────────── + +async fn account_bind_google( + State(state): State, + session: Session, +) -> Result { + let _ = current_user_id(&session) + .await + .ok_or(StatusCode::UNAUTHORIZED)?; + + session + .insert(SESSION_OAUTH_BIND_MODE, true) + .await + .map_err(|_| StatusCode::INTERNAL_SERVER_ERROR)?; + + let redirect_uri = format!("{}/account/bind/google/callback", state.base_url); + let mut cfg = state + .google_config + .clone() + .ok_or(StatusCode::SERVICE_UNAVAILABLE)?; + cfg.redirect_uri = redirect_uri; + let st = random_state(); + session.insert(SESSION_OAUTH_STATE, &st).await.ok(); + + Ok(Redirect::to(&google_auth_url(&cfg, &st)).into_response()) +} + +async fn account_bind_google_callback( + State(state): State, + session: Session, + Query(params): Query, +) -> Result { + handle_oauth_callback(&state, &session, params, "google", |s, cfg, code| { + Box::pin(crate::oauth::google::exchange_code( + &s.http_client, + cfg, + code, + )) + }) + .await +} + +async fn account_unbind( + State(state): State, + Path(provider): Path, + session: Session, +) -> Result { + let user_id = current_user_id(&session) + .await + .ok_or(StatusCode::UNAUTHORIZED)?; + + let current_login_provider = session + .get::(SESSION_LOGIN_PROVIDER) + .await + .map_err(|_| StatusCode::INTERNAL_SERVER_ERROR)?; + + unbind_oauth_account( + &state.pool, + user_id, + &provider, + current_login_provider.as_deref(), + ) + .await + .map_err(|e| { + tracing::warn!(error = %e, "failed to unbind oauth account"); + StatusCode::BAD_REQUEST + })?; + + Ok(Redirect::to("/dashboard?unbound=1").into_response()) +} + +// ── Passphrase / Key setup API ──────────────────────────────────────────────── + +#[derive(Serialize)] +struct KeySaltResponse { + has_passphrase: bool, + #[serde(skip_serializing_if = "Option::is_none")] + salt: Option, + #[serde(skip_serializing_if = "Option::is_none")] + key_check: Option, + #[serde(skip_serializing_if = "Option::is_none")] + params: Option, +} + +async fn api_key_salt( + State(state): State, + session: Session, +) -> Result, StatusCode> { + let user_id = current_user_id(&session) + .await + .ok_or(StatusCode::UNAUTHORIZED)?; + + let user = get_user_by_id(&state.pool, user_id) + .await + .map_err(|_| StatusCode::INTERNAL_SERVER_ERROR)? + .ok_or(StatusCode::UNAUTHORIZED)?; + + if user.key_salt.is_none() { + return Ok(Json(KeySaltResponse { + has_passphrase: false, + salt: None, + key_check: None, + params: None, + })); + } + + Ok(Json(KeySaltResponse { + has_passphrase: true, + salt: user.key_salt.as_deref().map(hex::encode_hex), + key_check: user.key_check.as_deref().map(hex::encode_hex), + params: user.key_params, + })) +} + +#[derive(Deserialize)] +struct KeySetupRequest { + /// Hex-encoded 32-byte random salt + salt: String, + /// Hex-encoded AES-256-GCM encryption of "secrets-mcp-key-check" with the derived key + key_check: String, + /// Key derivation parameters, e.g. {"alg":"pbkdf2-sha256","iterations":600000} + params: serde_json::Value, +} + +#[derive(Serialize)] +struct KeySetupResponse { + ok: bool, +} + +async fn api_key_setup( + State(state): State, + session: Session, + Json(body): Json, +) -> Result, StatusCode> { + let user_id = current_user_id(&session) + .await + .ok_or(StatusCode::UNAUTHORIZED)?; + + let salt = hex::decode_hex(&body.salt).map_err(|_| StatusCode::BAD_REQUEST)?; + let key_check = hex::decode_hex(&body.key_check).map_err(|_| StatusCode::BAD_REQUEST)?; + + if salt.len() != 32 { + return Err(StatusCode::BAD_REQUEST); + } + + update_user_key_setup(&state.pool, user_id, &salt, &key_check, &body.params) + .await + .map_err(|e| { + tracing::error!(error = %e, "failed to update key setup"); + StatusCode::INTERNAL_SERVER_ERROR + })?; + + Ok(Json(KeySetupResponse { ok: true })) +} + +// ── API Key management ──────────────────────────────────────────────────────── + +#[derive(Serialize)] +struct ApiKeyResponse { + api_key: String, +} + +async fn api_apikey_get( + State(state): State, + session: Session, +) -> Result, StatusCode> { + let user_id = current_user_id(&session) + .await + .ok_or(StatusCode::UNAUTHORIZED)?; + + let api_key = ensure_api_key(&state.pool, user_id) + .await + .map_err(|_| StatusCode::INTERNAL_SERVER_ERROR)?; + + Ok(Json(ApiKeyResponse { api_key })) +} + +async fn api_apikey_regenerate( + State(state): State, + session: Session, +) -> Result, StatusCode> { + let user_id = current_user_id(&session) + .await + .ok_or(StatusCode::UNAUTHORIZED)?; + + let api_key = regenerate_api_key(&state.pool, user_id) + .await + .map_err(|_| StatusCode::INTERNAL_SERVER_ERROR)?; + + Ok(Json(ApiKeyResponse { api_key })) +} + +// ── Helper ──────────────────────────────────────────────────────────────────── + +fn render_template(tmpl: T) -> Result { + let html = tmpl.render().map_err(|e| { + tracing::error!(error = %e, "template render error"); + StatusCode::INTERNAL_SERVER_ERROR + })?; + Ok(Html(html).into_response()) +} diff --git a/crates/secrets-mcp/templates/dashboard.html b/crates/secrets-mcp/templates/dashboard.html new file mode 100644 index 0000000..bf9eae2 --- /dev/null +++ b/crates/secrets-mcp/templates/dashboard.html @@ -0,0 +1,725 @@ + + + + + + Secrets + + + + + + +
+
+ + +
+
获取 MCP 配置
+
输入加密密码,派生密钥后生成完整的 MCP 配置,可直接复制到 AI 客户端。
+
+
说明
+
API Key 用于身份认证,告诉服务端“你是谁”。
+
+
+ Cursor + Claude Code + Codex + Gemini CLI +
+ + +
+
+
+ + + + + + +
+ + + + +
+
+ + + + + + + diff --git a/crates/secrets-mcp/templates/login.html b/crates/secrets-mcp/templates/login.html new file mode 100644 index 0000000..09669e8 --- /dev/null +++ b/crates/secrets-mcp/templates/login.html @@ -0,0 +1,131 @@ + + + + + + Secrets — Sign In + + + +
+
+
+ + + +
+
+ +

登录

+

安全管理你的跨设备 secrets。

+ + {% if has_google %} + + + + + + + + 使用 Google 登录 + + {% endif %} + + {% if !has_google %} +

+ 未配置登录方式,请联系管理员。 +

+ {% endif %} +
+ + + diff --git a/deploy/.env.example b/deploy/.env.example new file mode 100644 index 0000000..e2e8197 --- /dev/null +++ b/deploy/.env.example @@ -0,0 +1,27 @@ +# Secrets MCP Server 环境变量配置 +# 复制此文件为 .env 并填写真实值 + +# ─── 数据库 ─────────────────────────────────────────────────────────── +SECRETS_DATABASE_URL=postgres://postgres:PASSWORD@HOST:PORT/secrets-mcp + +# ─── 服务地址 ───────────────────────────────────────────────────────── +# 内网监听地址(Cloudflare / Nginx 反代时填内网端口) +SECRETS_MCP_BIND=127.0.0.1:9315 + +# 对外 HTTPS 地址(用于 OAuth 回调 URL 拼接) +BASE_URL=https://secrets.example.com + +# ─── Google OAuth ───────────────────────────────────────────────────── +# Google Cloud Console → APIs & Services → Credentials +# 授权回调 URI 须配置为:${BASE_URL}/auth/google/callback +GOOGLE_CLIENT_ID= +GOOGLE_CLIENT_SECRET= + +# ─── 微信登录(暂未开放,预留)─────────────────────────────────────── +# WECHAT_APP_CLIENT_ID= +# WECHAT_APP_CLIENT_SECRET= + +# ─── 注意 ───────────────────────────────────────────────────────────── +# SERVER_MASTER_KEY 已不再需要。 +# 新架构(E2EE)中,加密密钥由用户密码短语在客户端本地派生,服务端不持有原始密钥。 +# 仅在需要迁移旧版 wrapped_key 数据时临时启用。 diff --git a/deploy/secrets-mcp.service b/deploy/secrets-mcp.service new file mode 100644 index 0000000..5f46f2b --- /dev/null +++ b/deploy/secrets-mcp.service @@ -0,0 +1,27 @@ +[Unit] +Description=Secrets MCP Server +After=network.target +Wants=network-online.target + +[Service] +Type=simple +User=secrets-mcp +Group=secrets-mcp +WorkingDirectory=/opt/secrets-mcp +EnvironmentFile=/opt/secrets-mcp/.env +ExecStart=/opt/secrets-mcp/secrets-mcp +Restart=always +RestartSec=5 +StandardOutput=journal +StandardError=journal +SyslogIdentifier=secrets-mcp + +# 安全加固 +NoNewPrivileges=yes +ProtectSystem=strict +ProtectHome=yes +ReadWritePaths=/opt/secrets-mcp +PrivateTmp=yes + +[Install] +WantedBy=multi-user.target diff --git a/scripts/release-check.sh b/scripts/release-check.sh index f008726..2063a78 100755 --- a/scripts/release-check.sh +++ b/scripts/release-check.sh @@ -5,15 +5,15 @@ set -euo pipefail repo_root="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)" cd "$repo_root" -version="$(grep -m1 '^version' Cargo.toml | sed 's/.*"\(.*\)".*/\1/')" -tag="secrets-${version}" +version="$(grep -m1 '^version' crates/secrets-mcp/Cargo.toml | sed 's/.*"\(.*\)".*/\1/')" +tag="secrets-mcp-${version}" -echo "==> 当前版本: ${version}" +echo "==> 当前 secrets-mcp 版本: ${version}" echo "==> 检查是否已存在 tag: ${tag}" if git rev-parse "refs/tags/${tag}" >/dev/null 2>&1; then echo "错误: 已存在 tag ${tag}" - echo "请先 bump Cargo.toml 中的 version,再执行 cargo build 同步 Cargo.lock。" + echo "请先 bump crates/secrets-mcp/Cargo.toml 中的 version,再执行 cargo build 同步 Cargo.lock。" exit 1 fi diff --git a/scripts/setup-gitea-actions.sh b/scripts/setup-gitea-actions.sh index 085a516..ccfdc25 100755 --- a/scripts/setup-gitea-actions.sh +++ b/scripts/setup-gitea-actions.sh @@ -6,14 +6,21 @@ # 所需配置: # - secrets.RELEASE_TOKEN (必选) Release 上传用,值为 Gitea PAT # - vars.WEBHOOK_URL (可选) 飞书通知 +# - vars.DEPLOY_HOST (可选) 部署目标 SSH 主机(IP 或域名) +# - vars.DEPLOY_USER (可选) SSH 用户名 +# - secrets.DEPLOY_SSH_KEY (可选) SSH 私钥 PEM 全文(原始字符,含 BEGIN/END 行);通过 DEPLOY_SSH_KEY_FILE 写入 API # # 注意: # - Gitea 不允许 secret/variable 名以 GITEA_ 或 GITHUB_ 开头,故使用 RELEASE_TOKEN -# - Secret/Variable 的 data/value 字段需传入原始值,不要使用 base64 编码 +# - Gitea Actions 的 secrets(API 的 data 字段,及网页里粘贴的值)必须是未经 base64 的原始值。 +# 若事先 base64 再写入,工作流里拿到的仍是「一串 base64 文本」,SSH/OpenSSH 无法识别,部署会失败。 +# DEPLOY_SSH_KEY 须与 .pem 文件内容一致:本脚本用 jq --rawfile 按原文上传。 +# - Variables 的 value 字段同样为原始字符串,不要 base64。 # # 用法: # 1. 从 ~/.config/gitea/config.env 读取 GITEA_URL, GITEA_TOKEN, GITEA_WEBHOOK_URL -# 2. 或通过环境变量覆盖: GITEA_TOKEN(作为 RELEASE_TOKEN 的值), WEBHOOK_URL +# 2. 或通过环境变量覆盖: GITEA_TOKEN(作为 RELEASE_TOKEN 的值), WEBHOOK_URL, +# DEPLOY_HOST, DEPLOY_USER, DEPLOY_SSH_KEY_FILE(部署到 ECS) # 3. 或使用 secrets CLI 获取: 需 DATABASE_URL,从 refining/service gitea 读取 # @@ -109,8 +116,7 @@ echo "配置 Gitea Actions: $OWNER/$REPO" echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━" echo "" -# 1. 创建 Secret: RELEASE_TOKEN -# 注意: Gitea Actions API 的 data 字段需传入原始值,不要使用 base64 编码 +# 1. 创建 Secret: RELEASE_TOKEN(data = PAT 原文,勿 base64) echo "1. 创建 Secret: RELEASE_TOKEN" secret_payload=$(jq -n --arg t "$GITEA_TOKEN" '{data: $t}') resp=$(curl -s -w "\n%{http_code}" -X PUT \ @@ -129,8 +135,7 @@ else exit 1 fi -# 2. 创建/更新 Variable: WEBHOOK_URL(可选) -# 注意: Secret 和 Variable 均使用原始值,不要 base64 编码 +# 2. 创建/更新 Variable: WEBHOOK_URL(可选,value 为原始 URL 字符串,勿 base64) WEBHOOK_VALUE="${WEBHOOK_URL:-$GITEA_WEBHOOK_URL}" if [[ -n "$WEBHOOK_VALUE" ]]; then echo "" @@ -168,6 +173,68 @@ else echo " 飞书通知将不可用;如需可后续在仓库 Settings → Variables 中添加" fi +# 3. 部署用 Variable + Secret(与 .gitea/workflows/secrets.yml 中 deploy-mcp 一致) +upsert_repo_variable() { + local var_name="$1" var_value="$2" + local var_payload http_code body resp + var_payload=$(jq -n --arg v "$var_value" '{value: $v}') + resp=$(curl -s -w "\n%{http_code}" -X POST \ + -H "Authorization: token $GITEA_TOKEN" \ + -H "Content-Type: application/json" \ + -d "$var_payload" \ + "${API_BASE}/repos/${OWNER}/${REPO}/actions/variables/${var_name}") + http_code=$(echo "$resp" | tail -n1) + if [[ "$http_code" == "200" || "$http_code" == "201" || "$http_code" == "204" ]]; then + return 0 + fi + if [[ "$http_code" == "409" ]]; then + resp=$(curl -s -w "\n%{http_code}" -X PUT \ + -H "Authorization: token $GITEA_TOKEN" \ + -H "Content-Type: application/json" \ + -d "$var_payload" \ + "${API_BASE}/repos/${OWNER}/${REPO}/actions/variables/${var_name}") + http_code=$(echo "$resp" | tail -n1) + [[ "$http_code" == "200" || "$http_code" == "204" ]] + return + fi + body=$(echo "$resp" | sed '$d') + echo " ❌ 变量 ${var_name} 失败 (HTTP $http_code)" >&2 + echo "$body" | jq -r '.message // .' 2>/dev/null || echo "$body" >&2 + return 1 +} + +if [[ -n "$DEPLOY_HOST" && -n "$DEPLOY_USER" && -n "$DEPLOY_SSH_KEY_FILE" ]]; then + echo "" + echo "3. 部署目标: vars.DEPLOY_HOST / vars.DEPLOY_USER + secrets.DEPLOY_SSH_KEY" + if [[ ! -f "$DEPLOY_SSH_KEY_FILE" ]]; then + echo " ❌ DEPLOY_SSH_KEY_FILE 不是文件: $DEPLOY_SSH_KEY_FILE" >&2 + exit 1 + fi + upsert_repo_variable DEPLOY_HOST "$DEPLOY_HOST" || exit 1 + echo " ✓ DEPLOY_HOST" + upsert_repo_variable DEPLOY_USER "$DEPLOY_USER" || exit 1 + echo " ✓ DEPLOY_USER" + # PEM 原文写入 secret.data;勿对文件先做 base64,否则 runner 侧 ssh 无法解析密钥 + secret_payload=$(jq -n --rawfile k "$DEPLOY_SSH_KEY_FILE" '{data: $k}') + resp=$(curl -s -w "\n%{http_code}" -X PUT \ + -H "Authorization: token $GITEA_TOKEN" \ + -H "Content-Type: application/json" \ + -d "$secret_payload" \ + "${API_BASE}/repos/${OWNER}/${REPO}/actions/secrets/DEPLOY_SSH_KEY") + http_code=$(echo "$resp" | tail -n1) + body=$(echo "$resp" | sed '$d') + if [[ "$http_code" == "200" || "$http_code" == "201" || "$http_code" == "204" ]]; then + echo " ✓ DEPLOY_SSH_KEY" + else + echo " ❌ DEPLOY_SSH_KEY 失败 (HTTP $http_code)" >&2 + echo "$body" | jq -r '.message // .' 2>/dev/null || echo "$body" >&2 + exit 1 + fi +else + echo "" + echo "3. 跳过部署配置(需同时设置 DEPLOY_HOST、DEPLOY_USER、DEPLOY_SSH_KEY_FILE)" +fi + echo "" echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━" echo "✓ 配置完成" @@ -176,6 +243,7 @@ echo "" echo "Workflow 将使用:" echo " - secrets.RELEASE_TOKEN 创建 Release 并上传二进制" echo " - vars.WEBHOOK_URL 发送飞书通知(如已配置)" +echo " - vars.DEPLOY_* / secrets.DEPLOY_SSH_KEY deploy-mcp(如已配置)" echo "" echo "推送代码触发构建:" echo " git push origin main" diff --git a/src/commands/add.rs b/src/commands/add.rs deleted file mode 100644 index 10cddff..0000000 --- a/src/commands/add.rs +++ /dev/null @@ -1,459 +0,0 @@ -use anyhow::Result; -use serde_json::{Map, Value, json}; -use sqlx::PgPool; -use std::fs; - -use crate::crypto; -use crate::db; -use crate::models::EntryRow; -use crate::output::{OutputMode, print_json}; - -// ── Key/value parsing helpers (shared with update.rs) ─────────────────────── - -/// Parse secret / metadata entries into a nested key path and JSON value. -/// - `key=value` → stores the literal string `value` -/// - `key:=` → parses `` as a typed JSON value -/// - `key=@file` → reads the file content as a string -/// - `a:b=value` → writes nested fields: `{ "a": { "b": "value" } }` -/// - `a:b@./file.txt` → shorthand for nested file reads without manual JSON escaping -pub(crate) fn parse_kv(entry: &str) -> Result<(Vec, Value)> { - // Typed JSON form: key:= - if let Some((key, json_str)) = entry.split_once(":=") { - let val: Value = serde_json::from_str(json_str).map_err(|e| { - anyhow::anyhow!( - "Invalid JSON value for key '{}': {} (use key=value for plain strings)", - key, - e - ) - })?; - return Ok((parse_key_path(key)?, val)); - } - - // Plain string form: key=value or key=@file - if let Some((key, raw_val)) = entry.split_once('=') { - let value = if let Some(path) = raw_val.strip_prefix('@') { - fs::read_to_string(path) - .map_err(|e| anyhow::anyhow!("Failed to read file '{}': {}", path, e))? - } else { - raw_val.to_string() - }; - - return Ok((parse_key_path(key)?, Value::String(value))); - } - - // Shorthand file form: nested:key@file - if let Some((key, path)) = entry.split_once('@') { - let value = fs::read_to_string(path) - .map_err(|e| anyhow::anyhow!("Failed to read file '{}': {}", path, e))?; - return Ok((parse_key_path(key)?, Value::String(value))); - } - - anyhow::bail!( - "Invalid format '{}'. Expected: key=value, key=@file, nested:key@file, or key:=", - entry - ) -} - -pub(crate) fn build_json(entries: &[String]) -> Result { - let mut map = Map::new(); - for entry in entries { - let (path, value) = parse_kv(entry)?; - insert_path(&mut map, &path, value)?; - } - Ok(Value::Object(map)) -} - -pub(crate) fn key_path_to_string(path: &[String]) -> String { - path.join(":") -} - -pub(crate) fn collect_key_paths(entries: &[String]) -> Result> { - entries - .iter() - .map(|entry| parse_kv(entry).map(|(path, _)| key_path_to_string(&path))) - .collect() -} - -pub(crate) fn collect_field_paths(entries: &[String]) -> Result> { - entries - .iter() - .map(|entry| parse_key_path(entry).map(|path| key_path_to_string(&path))) - .collect() -} - -pub(crate) fn parse_key_path(key: &str) -> Result> { - let path: Vec = key - .split(':') - .map(str::trim) - .map(ToOwned::to_owned) - .collect(); - - if path.is_empty() || path.iter().any(|part| part.is_empty()) { - anyhow::bail!( - "Invalid key path '{}'. Use non-empty segments like 'credentials:content'.", - key - ); - } - - Ok(path) -} - -pub(crate) fn insert_path( - map: &mut Map, - path: &[String], - value: Value, -) -> Result<()> { - if path.is_empty() { - anyhow::bail!("Key path cannot be empty"); - } - - if path.len() == 1 { - map.insert(path[0].clone(), value); - return Ok(()); - } - - let head = path[0].clone(); - let tail = &path[1..]; - - match map.entry(head.clone()) { - serde_json::map::Entry::Vacant(entry) => { - let mut child = Map::new(); - insert_path(&mut child, tail, value)?; - entry.insert(Value::Object(child)); - } - serde_json::map::Entry::Occupied(mut entry) => match entry.get_mut() { - Value::Object(child) => insert_path(child, tail, value)?, - _ => { - anyhow::bail!( - "Cannot set nested key '{}' because '{}' is already a non-object value", - key_path_to_string(path), - head - ); - } - }, - } - - Ok(()) -} - -pub(crate) fn remove_path(map: &mut Map, path: &[String]) -> Result { - if path.is_empty() { - anyhow::bail!("Key path cannot be empty"); - } - - if path.len() == 1 { - return Ok(map.remove(&path[0]).is_some()); - } - - let Some(value) = map.get_mut(&path[0]) else { - return Ok(false); - }; - - let Value::Object(child) = value else { - return Ok(false); - }; - - let removed = remove_path(child, &path[1..])?; - if child.is_empty() { - map.remove(&path[0]); - } - - Ok(removed) -} - -/// Flatten a (potentially nested) JSON object into dot-separated field entries. -/// e.g. `{"credentials": {"type": "ssh", "content": "..."}}` → -/// `[("credentials.type", "ssh"), ("credentials.content", "...")]` -/// Top-level non-object values are emitted directly. -pub(crate) fn flatten_json_fields(prefix: &str, value: &Value) -> Vec<(String, Value)> { - match value { - Value::Object(map) => { - let mut out = Vec::new(); - for (k, v) in map { - let full_key = if prefix.is_empty() { - k.clone() - } else { - format!("{}.{}", prefix, k) - }; - out.extend(flatten_json_fields(&full_key, v)); - } - out - } - other => vec![(prefix.to_string(), other.clone())], - } -} - -// ── Add command ────────────────────────────────────────────────────────────── - -pub struct AddArgs<'a> { - pub namespace: &'a str, - pub kind: &'a str, - pub name: &'a str, - pub tags: &'a [String], - pub meta_entries: &'a [String], - pub secret_entries: &'a [String], - pub output: OutputMode, -} - -pub async fn run(pool: &PgPool, args: AddArgs<'_>, master_key: &[u8; 32]) -> Result<()> { - let metadata = build_json(args.meta_entries)?; - let secret_json = build_json(args.secret_entries)?; - - tracing::debug!(args.namespace, args.kind, args.name, "upserting entry"); - - let meta_keys = collect_key_paths(args.meta_entries)?; - let secret_keys = collect_key_paths(args.secret_entries)?; - - let mut tx = pool.begin().await?; - - // Upsert the entry row (tags + metadata). - let existing: Option = sqlx::query_as( - "SELECT id, version, tags, metadata FROM entries \ - WHERE namespace = $1 AND kind = $2 AND name = $3", - ) - .bind(args.namespace) - .bind(args.kind) - .bind(args.name) - .fetch_optional(&mut *tx) - .await?; - - // Snapshot the current entry state before overwriting. - if let Some(ref ex) = existing - && let Err(e) = db::snapshot_entry_history( - &mut tx, - db::EntrySnapshotParams { - entry_id: ex.id, - namespace: args.namespace, - kind: args.kind, - name: args.name, - version: ex.version, - action: "add", - tags: &ex.tags, - metadata: &ex.metadata, - }, - ) - .await - { - tracing::warn!(error = %e, "failed to snapshot entry history before upsert"); - } - - let entry_id: uuid::Uuid = sqlx::query_scalar( - r#" - INSERT INTO entries (namespace, kind, name, tags, metadata, version, updated_at) - VALUES ($1, $2, $3, $4, $5, 1, NOW()) - ON CONFLICT (namespace, kind, name) - DO UPDATE SET - tags = EXCLUDED.tags, - metadata = EXCLUDED.metadata, - version = entries.version + 1, - updated_at = NOW() - RETURNING id - "#, - ) - .bind(args.namespace) - .bind(args.kind) - .bind(args.name) - .bind(args.tags) - .bind(&metadata) - .fetch_one(&mut *tx) - .await?; - - let new_entry_version: i64 = sqlx::query_scalar("SELECT version FROM entries WHERE id = $1") - .bind(entry_id) - .fetch_one(&mut *tx) - .await?; - - // Snapshot existing secret fields before replacing. - if existing.is_some() { - #[derive(sqlx::FromRow)] - struct ExistingField { - id: uuid::Uuid, - field_name: String, - encrypted: Vec, - } - let existing_fields: Vec = sqlx::query_as( - "SELECT id, field_name, encrypted \ - FROM secrets WHERE entry_id = $1", - ) - .bind(entry_id) - .fetch_all(&mut *tx) - .await?; - - for f in &existing_fields { - if let Err(e) = db::snapshot_secret_history( - &mut tx, - db::SecretSnapshotParams { - entry_id, - secret_id: f.id, - entry_version: new_entry_version - 1, - field_name: &f.field_name, - encrypted: &f.encrypted, - action: "add", - }, - ) - .await - { - tracing::warn!(error = %e, "failed to snapshot secret field history"); - } - } - - // Delete existing secret fields so we can re-insert the full set. - sqlx::query("DELETE FROM secrets WHERE entry_id = $1") - .bind(entry_id) - .execute(&mut *tx) - .await?; - } - - // Insert new secret fields. - let flat_fields = flatten_json_fields("", &secret_json); - for (field_name, field_value) in &flat_fields { - let encrypted = crypto::encrypt_json(master_key, field_value)?; - - sqlx::query( - "INSERT INTO secrets (entry_id, field_name, encrypted) \ - VALUES ($1, $2, $3)", - ) - .bind(entry_id) - .bind(field_name) - .bind(&encrypted) - .execute(&mut *tx) - .await?; - } - - crate::audit::log_tx( - &mut tx, - "add", - args.namespace, - args.kind, - args.name, - json!({ - "tags": args.tags, - "meta_keys": meta_keys, - "secret_keys": secret_keys, - }), - ) - .await; - - tx.commit().await?; - - let result_json = json!({ - "action": "added", - "namespace": args.namespace, - "kind": args.kind, - "name": args.name, - "tags": args.tags, - "meta_keys": meta_keys, - "secret_keys": secret_keys, - }); - - match args.output { - OutputMode::Json | OutputMode::JsonCompact => { - print_json(&result_json, &args.output)?; - } - _ => { - println!("Added: [{}/{}] {}", args.namespace, args.kind, args.name); - if !args.tags.is_empty() { - println!(" tags: {}", args.tags.join(", ")); - } - if !args.meta_entries.is_empty() { - println!(" metadata: {}", meta_keys.join(", ")); - } - if !args.secret_entries.is_empty() { - println!(" secrets: {}", secret_keys.join(", ")); - } - } - } - - Ok(()) -} - -#[cfg(test)] -mod tests { - use super::{build_json, flatten_json_fields, key_path_to_string, parse_kv, remove_path}; - use serde_json::Value; - use std::fs; - use std::path::PathBuf; - use std::time::{SystemTime, UNIX_EPOCH}; - - fn temp_file_path(name: &str) -> PathBuf { - let nanos = SystemTime::now() - .duration_since(UNIX_EPOCH) - .expect("clock should be after unix epoch") - .as_nanos(); - std::env::temp_dir().join(format!("secrets-{name}-{nanos}.txt")) - } - - #[test] - fn parse_nested_file_shorthand() { - let path = temp_file_path("ssh-key"); - fs::write(&path, "line1\nline2\n").expect("should write temp file"); - - let entry = format!("credentials:content@{}", path.display()); - let (path_parts, value) = parse_kv(&entry).expect("should parse nested file shorthand"); - - assert_eq!(key_path_to_string(&path_parts), "credentials:content"); - assert_eq!(value, serde_json::Value::String("line1\nline2\n".into())); - - fs::remove_file(path).expect("should remove temp file"); - } - - #[test] - fn build_nested_json_from_mixed_entries() { - let payload = vec![ - "credentials:type=ssh".to_string(), - "credentials:enabled:=true".to_string(), - "username=root".to_string(), - ]; - - let value = build_json(&payload).expect("should build nested json"); - - assert_eq!( - value, - serde_json::json!({ - "credentials": { - "type": "ssh", - "enabled": true - }, - "username": "root" - }) - ); - } - - #[test] - fn remove_nested_path_prunes_empty_parents() { - let mut value = serde_json::json!({ - "credentials": { - "content": "pem-data" - }, - "username": "root" - }); - - let map = match &mut value { - Value::Object(map) => map, - _ => panic!("expected object"), - }; - - let removed = remove_path(map, &["credentials".to_string(), "content".to_string()]) - .expect("should remove nested field"); - - assert!(removed); - assert_eq!(value, serde_json::json!({ "username": "root" })); - } - - #[test] - fn flatten_json_fields_nested() { - let v = serde_json::json!({ - "username": "root", - "credentials": { - "type": "ssh", - "content": "pem-data" - } - }); - let mut fields = flatten_json_fields("", &v); - fields.sort_by(|a, b| a.0.cmp(&b.0)); - - assert_eq!(fields[0].0, "credentials.content"); - assert_eq!(fields[1].0, "credentials.type"); - assert_eq!(fields[2].0, "username"); - } -} diff --git a/src/commands/config.rs b/src/commands/config.rs deleted file mode 100644 index f696e74..0000000 --- a/src/commands/config.rs +++ /dev/null @@ -1,55 +0,0 @@ -use crate::config::{self, Config, config_path}; -use anyhow::Result; - -pub async fn run(action: crate::ConfigAction) -> Result<()> { - match action { - crate::ConfigAction::SetDb { url } => { - // Verify connection before writing config - let pool = crate::db::create_pool(&url) - .await - .map_err(|e| anyhow::anyhow!("Database connection failed: {}", e))?; - drop(pool); - println!("Database connection successful."); - - let cfg = Config { - database_url: Some(url.clone()), - }; - config::save_config(&cfg)?; - println!("Database URL saved to: {}", config_path()?.display()); - println!(" {}", mask_password(&url)); - } - crate::ConfigAction::Show => { - let cfg = config::load_config()?; - match cfg.database_url { - Some(url) => { - println!("database_url = {}", mask_password(&url)); - println!("config file: {}", config_path()?.display()); - } - None => { - println!("Database URL not configured."); - println!("Run: secrets config set-db "); - } - } - } - crate::ConfigAction::Path => { - println!("{}", config_path()?.display()); - } - } - Ok(()) -} - -/// Mask the password in a postgres://user:password@host/db URL. -fn mask_password(url: &str) -> String { - if let Some(at_pos) = url.rfind('@') - && let Some(scheme_end) = url.find("://") - { - let prefix = &url[..scheme_end + 3]; - let credentials = &url[scheme_end + 3..at_pos]; - let rest = &url[at_pos..]; - if let Some(colon_pos) = credentials.find(':') { - let user = &credentials[..colon_pos]; - return format!("{}{}:***{}", prefix, user, rest); - } - } - url.to_string() -} diff --git a/src/commands/delete.rs b/src/commands/delete.rs deleted file mode 100644 index 73d58e8..0000000 --- a/src/commands/delete.rs +++ /dev/null @@ -1,291 +0,0 @@ -use anyhow::Result; -use serde_json::json; -use sqlx::PgPool; -use uuid::Uuid; - -use crate::db; -use crate::models::{EntryRow, SecretFieldRow}; -use crate::output::{OutputMode, print_json}; - -pub struct DeleteArgs<'a> { - pub namespace: &'a str, - /// Kind filter. Required when --name is given; optional for bulk deletes. - pub kind: Option<&'a str>, - /// Exact record name. When None, bulk-delete all matching records. - pub name: Option<&'a str>, - /// Preview without writing to the database (bulk mode only). - pub dry_run: bool, - pub output: OutputMode, -} - -// ── Internal row type used for bulk queries ──────────────────────────────── - -#[derive(Debug, sqlx::FromRow)] -struct FullEntryRow { - pub id: Uuid, - pub version: i64, - pub kind: String, - pub name: String, - pub metadata: serde_json::Value, - pub tags: Vec, -} - -// ── Entry point ──────────────────────────────────────────────────────────── - -pub async fn run(pool: &PgPool, args: DeleteArgs<'_>) -> Result<()> { - match args.name { - Some(name) => { - let kind = args - .kind - .ok_or_else(|| anyhow::anyhow!("--kind is required when --name is specified"))?; - delete_one(pool, args.namespace, kind, name, args.output).await - } - None => delete_bulk(pool, args.namespace, args.kind, args.dry_run, args.output).await, - } -} - -// ── Single-record delete (original behaviour) ───────────────────────────── - -async fn delete_one( - pool: &PgPool, - namespace: &str, - kind: &str, - name: &str, - output: OutputMode, -) -> Result<()> { - tracing::debug!(namespace, kind, name, "deleting entry"); - - let mut tx = pool.begin().await?; - - let row: Option = sqlx::query_as( - "SELECT id, version, tags, metadata FROM entries \ - WHERE namespace = $1 AND kind = $2 AND name = $3 \ - FOR UPDATE", - ) - .bind(namespace) - .bind(kind) - .bind(name) - .fetch_optional(&mut *tx) - .await?; - - let Some(row) = row else { - tx.rollback().await?; - tracing::warn!(namespace, kind, name, "entry not found for deletion"); - let v = json!({"action":"not_found","namespace":namespace,"kind":kind,"name":name}); - match output { - OutputMode::Text => println!("Not found: [{}/{}] {}", namespace, kind, name), - ref mode => print_json(&v, mode)?, - } - return Ok(()); - }; - - snapshot_and_delete(&mut tx, namespace, kind, name, &row).await?; - - crate::audit::log_tx(&mut tx, "delete", namespace, kind, name, json!({})).await; - tx.commit().await?; - - let v = json!({"action":"deleted","namespace":namespace,"kind":kind,"name":name}); - match output { - OutputMode::Text => println!("Deleted: [{}/{}] {}", namespace, kind, name), - ref mode => print_json(&v, mode)?, - } - Ok(()) -} - -// ── Bulk delete by namespace (+ optional kind filter) ───────────────────── - -async fn delete_bulk( - pool: &PgPool, - namespace: &str, - kind: Option<&str>, - dry_run: bool, - output: OutputMode, -) -> Result<()> { - tracing::debug!(namespace, ?kind, dry_run, "bulk-deleting entries"); - - let rows: Vec = if let Some(k) = kind { - sqlx::query_as( - "SELECT id, version, kind, name, metadata, tags FROM entries \ - WHERE namespace = $1 AND kind = $2 \ - ORDER BY name", - ) - .bind(namespace) - .bind(k) - .fetch_all(pool) - .await? - } else { - sqlx::query_as( - "SELECT id, version, kind, name, metadata, tags FROM entries \ - WHERE namespace = $1 \ - ORDER BY kind, name", - ) - .bind(namespace) - .fetch_all(pool) - .await? - }; - - if rows.is_empty() { - let v = json!({ - "action": "noop", - "namespace": namespace, - "kind": kind, - "deleted": 0, - "dry_run": dry_run - }); - match output { - OutputMode::Text => println!( - "No records found in namespace \"{}\"{}.", - namespace, - kind.map(|k| format!(" with kind \"{}\"", k)) - .unwrap_or_default() - ), - ref mode => print_json(&v, mode)?, - } - return Ok(()); - } - - if dry_run { - let count = rows.len(); - match output { - OutputMode::Text => { - println!( - "dry-run: would delete {} record(s) in namespace \"{}\":", - count, namespace - ); - for r in &rows { - println!(" [{}/{}] {}", namespace, r.kind, r.name); - } - } - ref mode => { - let items: Vec<_> = rows - .iter() - .map(|r| json!({"namespace": namespace, "kind": r.kind, "name": r.name})) - .collect(); - print_json( - &json!({ - "action": "dry_run", - "namespace": namespace, - "kind": kind, - "would_delete": count, - "entries": items - }), - mode, - )?; - } - } - return Ok(()); - } - - let mut deleted = Vec::with_capacity(rows.len()); - - for row in &rows { - let entry_row = EntryRow { - id: row.id, - version: row.version, - tags: row.tags.clone(), - metadata: row.metadata.clone(), - }; - let mut tx = pool.begin().await?; - snapshot_and_delete(&mut tx, namespace, &row.kind, &row.name, &entry_row).await?; - crate::audit::log_tx( - &mut tx, - "delete", - namespace, - &row.kind, - &row.name, - json!({"bulk": true}), - ) - .await; - tx.commit().await?; - - deleted.push(json!({"namespace": namespace, "kind": row.kind, "name": row.name})); - tracing::info!(namespace, kind = %row.kind, name = %row.name, "bulk deleted"); - } - - let count = deleted.len(); - match output { - OutputMode::Text => { - for item in &deleted { - println!( - "Deleted: [{}/{}] {}", - item["namespace"].as_str().unwrap_or(""), - item["kind"].as_str().unwrap_or(""), - item["name"].as_str().unwrap_or("") - ); - } - println!("Total: {} record(s) deleted.", count); - } - ref mode => print_json( - &json!({ - "action": "deleted", - "namespace": namespace, - "kind": kind, - "deleted": count, - "entries": deleted - }), - mode, - )?, - } - Ok(()) -} - -// ── Shared helper: snapshot history then DELETE ──────────────────────────── - -async fn snapshot_and_delete( - tx: &mut sqlx::Transaction<'_, sqlx::Postgres>, - namespace: &str, - kind: &str, - name: &str, - row: &EntryRow, -) -> Result<()> { - if let Err(e) = db::snapshot_entry_history( - tx, - db::EntrySnapshotParams { - entry_id: row.id, - namespace, - kind, - name, - version: row.version, - action: "delete", - tags: &row.tags, - metadata: &row.metadata, - }, - ) - .await - { - tracing::warn!(error = %e, "failed to snapshot entry history before delete"); - } - - let fields: Vec = sqlx::query_as( - "SELECT id, field_name, encrypted \ - FROM secrets WHERE entry_id = $1", - ) - .bind(row.id) - .fetch_all(&mut **tx) - .await?; - - for f in &fields { - if let Err(e) = db::snapshot_secret_history( - tx, - db::SecretSnapshotParams { - entry_id: row.id, - secret_id: f.id, - entry_version: row.version, - field_name: &f.field_name, - encrypted: &f.encrypted, - action: "delete", - }, - ) - .await - { - tracing::warn!(error = %e, "failed to snapshot secret history before delete"); - } - } - - sqlx::query("DELETE FROM entries WHERE id = $1") - .bind(row.id) - .execute(&mut **tx) - .await?; - - Ok(()) -} diff --git a/src/commands/export_cmd.rs b/src/commands/export_cmd.rs deleted file mode 100644 index 6a655ef..0000000 --- a/src/commands/export_cmd.rs +++ /dev/null @@ -1,109 +0,0 @@ -use anyhow::Result; -use sqlx::PgPool; -use std::collections::BTreeMap; -use std::io::Write; - -use crate::commands::search::{fetch_entries, fetch_secrets_for_entries}; -use crate::crypto; -use crate::models::{ExportData, ExportEntry, ExportFormat}; - -pub struct ExportArgs<'a> { - pub namespace: Option<&'a str>, - pub kind: Option<&'a str>, - pub name: Option<&'a str>, - pub tags: &'a [String], - pub query: Option<&'a str>, - /// Output file path. None means write to stdout. - pub file: Option<&'a str>, - /// Explicit format override (e.g. from --format flag). - pub format: Option<&'a str>, - /// When true, secrets are omitted and master_key is not used. - pub no_secrets: bool, -} - -pub async fn run(pool: &PgPool, args: ExportArgs<'_>, master_key: Option<&[u8; 32]>) -> Result<()> { - // Determine output format: --format > file extension > default JSON. - let format = if let Some(fmt_str) = args.format { - ExportFormat::from_str(fmt_str)? - } else if let Some(path) = args.file { - ExportFormat::from_extension(path).unwrap_or(ExportFormat::Json) - } else { - ExportFormat::Json - }; - - let entries = fetch_entries( - pool, - args.namespace, - args.kind, - args.name, - args.tags, - args.query, - ) - .await?; - - let entry_ids: Vec = entries.iter().map(|e| e.id).collect(); - - let secrets_map = if !args.no_secrets && !entry_ids.is_empty() { - fetch_secrets_for_entries(pool, &entry_ids).await? - } else { - std::collections::HashMap::new() - }; - - let key = if !args.no_secrets { master_key } else { None }; - - let mut export_entries: Vec = Vec::with_capacity(entries.len()); - for entry in &entries { - let secrets = if args.no_secrets { - None - } else { - let fields = secrets_map.get(&entry.id).map(Vec::as_slice).unwrap_or(&[]); - if fields.is_empty() { - Some(BTreeMap::new()) - } else { - let mk = - key.ok_or_else(|| anyhow::anyhow!("master key required to decrypt secrets"))?; - let mut map = BTreeMap::new(); - for f in fields { - let decrypted = crypto::decrypt_json(mk, &f.encrypted)?; - map.insert(f.field_name.clone(), decrypted); - } - Some(map) - } - }; - - export_entries.push(ExportEntry { - namespace: entry.namespace.clone(), - kind: entry.kind.clone(), - name: entry.name.clone(), - tags: entry.tags.clone(), - metadata: entry.metadata.clone(), - secrets, - }); - } - - let data = ExportData { - version: 1, - exported_at: chrono::Utc::now().format("%Y-%m-%dT%H:%M:%SZ").to_string(), - entries: export_entries, - }; - - let serialized = format.serialize(&data)?; - - if let Some(path) = args.file { - std::fs::write(path, &serialized)?; - println!( - "Exported {} record(s) to {} ({:?})", - data.entries.len(), - path, - format - ); - } else { - std::io::stdout().write_all(serialized.as_bytes())?; - // Ensure trailing newline on stdout. - if !serialized.ends_with('\n') { - println!(); - } - } - - Ok(()) -} diff --git a/src/commands/history.rs b/src/commands/history.rs deleted file mode 100644 index 46d9653..0000000 --- a/src/commands/history.rs +++ /dev/null @@ -1,78 +0,0 @@ -use anyhow::Result; -use serde_json::{Value, json}; -use sqlx::{FromRow, PgPool}; - -use crate::output::{OutputMode, format_local_time, print_json}; - -pub struct HistoryArgs<'a> { - pub namespace: &'a str, - pub kind: &'a str, - pub name: &'a str, - pub limit: u32, - pub output: OutputMode, -} - -/// List history entries for an entry. -pub async fn run(pool: &PgPool, args: HistoryArgs<'_>) -> Result<()> { - #[derive(FromRow)] - struct HistorySummary { - version: i64, - action: String, - actor: String, - created_at: chrono::DateTime, - } - - let rows: Vec = sqlx::query_as( - "SELECT version, action, actor, created_at FROM entries_history \ - WHERE namespace = $1 AND kind = $2 AND name = $3 \ - ORDER BY id DESC LIMIT $4", - ) - .bind(args.namespace) - .bind(args.kind) - .bind(args.name) - .bind(args.limit as i64) - .fetch_all(pool) - .await?; - - match args.output { - OutputMode::Json | OutputMode::JsonCompact => { - let arr: Vec = rows - .iter() - .map(|r| { - json!({ - "version": r.version, - "action": r.action, - "actor": r.actor, - "created_at": r.created_at.format("%Y-%m-%dT%H:%M:%SZ").to_string(), - }) - }) - .collect(); - print_json(&Value::Array(arr), &args.output)?; - } - _ => { - if rows.is_empty() { - println!( - "No history found for [{}/{}] {}.", - args.namespace, args.kind, args.name - ); - return Ok(()); - } - println!( - "History for [{}/{}] {}:", - args.namespace, args.kind, args.name - ); - for r in &rows { - println!( - " v{:<4} {:8} {} {}", - r.version, - r.action, - r.actor, - format_local_time(r.created_at) - ); - } - println!(" (use `secrets rollback --to-version ` to restore)"); - } - } - - Ok(()) -} diff --git a/src/commands/import_cmd.rs b/src/commands/import_cmd.rs deleted file mode 100644 index 845dd10..0000000 --- a/src/commands/import_cmd.rs +++ /dev/null @@ -1,217 +0,0 @@ -use anyhow::Result; -use serde_json::Value; -use sqlx::PgPool; -use std::collections::BTreeMap; - -use crate::commands::add::{self, AddArgs}; -use crate::models::ExportFormat; -use crate::output::{OutputMode, print_json}; - -pub struct ImportArgs<'a> { - pub file: &'a str, - /// Overwrite existing records when there is a conflict (upsert). - /// Without this flag, the import aborts on the first conflict. - /// A future `--skip` flag could allow silently skipping conflicts and continuing. - pub force: bool, - /// Check and preview operations without writing to the database. - pub dry_run: bool, - pub output: OutputMode, -} - -pub async fn run(pool: &PgPool, args: ImportArgs<'_>, master_key: &[u8; 32]) -> Result<()> { - let format = ExportFormat::from_extension(args.file)?; - let content = std::fs::read_to_string(args.file) - .map_err(|e| anyhow::anyhow!("Cannot read file '{}': {}", args.file, e))?; - let data = format.deserialize(&content)?; - - if data.version != 1 { - anyhow::bail!( - "Unsupported export version {}. Only version 1 is supported.", - data.version - ); - } - - let total = data.entries.len(); - let mut inserted = 0usize; - let mut skipped = 0usize; - let mut failed = 0usize; - - for entry in &data.entries { - // Check if record already exists. - let exists: bool = sqlx::query_scalar( - "SELECT EXISTS(SELECT 1 FROM entries \ - WHERE namespace = $1 AND kind = $2 AND name = $3)", - ) - .bind(&entry.namespace) - .bind(&entry.kind) - .bind(&entry.name) - .fetch_one(pool) - .await - .unwrap_or(false); - - if exists && !args.force { - let v = serde_json::json!({ - "action": "conflict", - "namespace": entry.namespace, - "kind": entry.kind, - "name": entry.name, - }); - match args.output { - OutputMode::Text => eprintln!( - "[{}/{}/{}] conflict — record already exists (use --force to overwrite)", - entry.namespace, entry.kind, entry.name - ), - ref mode => { - // Write conflict notice to stderr so it does not mix with summary JSON. - eprint!( - "{}", - if *mode == OutputMode::Json { - serde_json::to_string_pretty(&v)? - } else { - serde_json::to_string(&v)? - } - ); - eprintln!(); - } - } - return Err(anyhow::anyhow!( - "Import aborted: conflict on [{}/{}/{}]", - entry.namespace, - entry.kind, - entry.name - )); - } - - let action = if exists { "upsert" } else { "insert" }; - - if args.dry_run { - let v = serde_json::json!({ - "action": action, - "namespace": entry.namespace, - "kind": entry.kind, - "name": entry.name, - "dry_run": true, - }); - match args.output { - OutputMode::Text => println!( - "[dry-run] {} [{}/{}/{}]", - action, entry.namespace, entry.kind, entry.name - ), - ref mode => print_json(&v, mode)?, - } - if exists { - skipped += 1; - } else { - inserted += 1; - } - continue; - } - - // Build secret_entries: convert BTreeMap to Vec ("key:=json") - let secret_entries = build_secret_entries(entry.secrets.as_ref()); - - // Build meta_entries from metadata JSON object. - let meta_entries = build_meta_entries(&entry.metadata); - - match add::run( - pool, - AddArgs { - namespace: &entry.namespace, - kind: &entry.kind, - name: &entry.name, - tags: &entry.tags, - meta_entries: &meta_entries, - secret_entries: &secret_entries, - output: OutputMode::Text, - }, - master_key, - ) - .await - { - Ok(()) => { - let v = serde_json::json!({ - "action": action, - "namespace": entry.namespace, - "kind": entry.kind, - "name": entry.name, - }); - match args.output { - OutputMode::Text => println!( - "Imported [{}/{}/{}]", - entry.namespace, entry.kind, entry.name - ), - ref mode => print_json(&v, mode)?, - } - inserted += 1; - } - Err(e) => { - eprintln!( - "Error importing [{}/{}/{}]: {}", - entry.namespace, entry.kind, entry.name, e - ); - failed += 1; - } - } - } - - let summary = serde_json::json!({ - "total": total, - "inserted": inserted, - "skipped": skipped, - "failed": failed, - "dry_run": args.dry_run, - }); - match args.output { - OutputMode::Text => { - if args.dry_run { - println!( - "\n[dry-run] {} total: {} would insert, {} would skip, {} would fail", - total, inserted, skipped, failed - ); - } else { - println!( - "\nImport done: {} total — {} inserted, {} skipped, {} failed", - total, inserted, skipped, failed - ); - } - } - ref mode => print_json(&summary, mode)?, - } - - if failed > 0 { - anyhow::bail!("{} record(s) failed to import", failed); - } - - Ok(()) -} - -/// Convert metadata JSON object into Vec of "key:=json_value" entries. -fn build_meta_entries(metadata: &Value) -> Vec { - let mut entries = Vec::new(); - if let Some(obj) = metadata.as_object() { - for (k, v) in obj { - entries.push(value_to_kv_entry(k, v)); - } - } - entries -} - -/// Convert a BTreeMap (secrets) into Vec of "key:=json_value" entries. -fn build_secret_entries(secrets: Option<&BTreeMap>) -> Vec { - let mut entries = Vec::new(); - if let Some(map) = secrets { - for (k, v) in map { - entries.push(value_to_kv_entry(k, v)); - } - } - entries -} - -/// Convert a key/value pair to a CLI-style entry string. -/// Strings use `key=value`; everything else uses `key:=`. -fn value_to_kv_entry(key: &str, value: &Value) -> String { - match value { - Value::String(s) => format!("{}={}", key, s), - other => format!("{}:={}", key, other), - } -} diff --git a/src/commands/init.rs b/src/commands/init.rs deleted file mode 100644 index de42986..0000000 --- a/src/commands/init.rs +++ /dev/null @@ -1,70 +0,0 @@ -use anyhow::{Context, Result}; -use rand::RngExt; -use sqlx::PgPool; - -use crate::{crypto, db}; - -const MIN_MASTER_PASSWORD_LEN: usize = 8; - -pub async fn run(pool: &PgPool) -> Result<()> { - println!("Initializing secrets master key..."); - println!(); - - // Read password (no echo) - let password = rpassword::prompt_password(format!( - "Enter master password (at least {} characters): ", - MIN_MASTER_PASSWORD_LEN - )) - .context("failed to read password")?; - if password.chars().count() < MIN_MASTER_PASSWORD_LEN { - anyhow::bail!( - "Master password must be at least {} characters.", - MIN_MASTER_PASSWORD_LEN - ); - } - let confirm = rpassword::prompt_password("Confirm master password: ") - .context("failed to read password confirmation")?; - if password != confirm { - anyhow::bail!("Passwords do not match."); - } - - // Get or create Argon2id salt - let salt = match db::load_argon2_salt(pool).await? { - Some(existing) => { - println!("Found existing salt in database (not the first device)."); - existing - } - None => { - println!("Generating new Argon2id salt and storing in database..."); - let mut salt = vec![0u8; 16]; - rand::rng().fill(&mut salt[..]); - db::store_argon2_salt(pool, &salt).await?; - salt - } - }; - - // Derive master key - print!("Deriving master key (Argon2id, this takes a moment)... "); - let master_key = crypto::derive_master_key(&password, &salt)?; - println!("done."); - - // Store in OS Keychain - crypto::store_master_key(&master_key)?; - - // Self-test: encrypt and decrypt a canary value - let canary = b"secrets-cli-canary"; - let enc = crypto::encrypt(&master_key, canary)?; - let dec = crypto::decrypt(&master_key, &enc)?; - if dec != canary { - anyhow::bail!("Self-test failed: encryption roundtrip mismatch"); - } - - println!(); - println!("Master key stored in OS Keychain."); - println!("You can now use `secrets add` / `secrets search` commands."); - println!(); - println!("IMPORTANT: Remember your master password — it is not stored anywhere."); - println!(" On a new device, run `secrets init` with the same password."); - - Ok(()) -} diff --git a/src/commands/mod.rs b/src/commands/mod.rs deleted file mode 100644 index c0fa776..0000000 --- a/src/commands/mod.rs +++ /dev/null @@ -1,12 +0,0 @@ -pub mod add; -pub mod config; -pub mod delete; -pub mod export_cmd; -pub mod history; -pub mod import_cmd; -pub mod init; -pub mod rollback; -pub mod run; -pub mod search; -pub mod update; -pub mod upgrade; diff --git a/src/commands/run.rs b/src/commands/run.rs deleted file mode 100644 index 41a9bc5..0000000 --- a/src/commands/run.rs +++ /dev/null @@ -1,248 +0,0 @@ -use anyhow::Result; -use serde_json::json; -use sqlx::PgPool; -use std::collections::HashMap; - -use crate::commands::search::{build_injected_env_map, fetch_entries, fetch_secrets_for_entries}; -use crate::output::OutputMode; - -pub struct RunArgs<'a> { - pub namespace: Option<&'a str>, - pub kind: Option<&'a str>, - pub name: Option<&'a str>, - pub tags: &'a [String], - pub secret_fields: &'a [String], - pub prefix: &'a str, - pub dry_run: bool, - pub output: OutputMode, - pub command: &'a [String], -} - -/// A single environment variable with its origin for dry-run display. -pub struct EnvMapping { - pub var_name: String, - pub source: String, - pub field: String, -} - -struct CollectArgs<'a> { - namespace: Option<&'a str>, - kind: Option<&'a str>, - name: Option<&'a str>, - tags: &'a [String], - secret_fields: &'a [String], - prefix: &'a str, -} - -/// Fetch entries matching the filter and build a flat env map (decrypted secrets only, no metadata). -/// If `secret_fields` is non-empty, only those fields are decrypted and included. -async fn collect_env_map( - pool: &PgPool, - args: &CollectArgs<'_>, - master_key: &[u8; 32], -) -> Result> { - if args.namespace.is_none() - && args.kind.is_none() - && args.name.is_none() - && args.tags.is_empty() - { - anyhow::bail!( - "At least one filter (--namespace, --kind, --name, or --tag) is required for run" - ); - } - let entries = - fetch_entries(pool, args.namespace, args.kind, args.name, args.tags, None).await?; - if entries.is_empty() { - anyhow::bail!("No records matched the given filters."); - } - - let entry_ids: Vec = entries.iter().map(|e| e.id).collect(); - let fields_map = fetch_secrets_for_entries(pool, &entry_ids).await?; - - let mut map = HashMap::new(); - for entry in &entries { - let empty = vec![]; - let all_fields = fields_map.get(&entry.id).unwrap_or(&empty); - let filtered_fields: Vec<_> = if args.secret_fields.is_empty() { - all_fields.iter().collect() - } else { - all_fields - .iter() - .filter(|f| args.secret_fields.contains(&f.field_name)) - .collect() - }; - let row_map = - build_injected_env_map(pool, entry, args.prefix, master_key, &filtered_fields).await?; - for (k, v) in row_map { - map.insert(k, v); - } - } - Ok(map) -} - -/// Like `collect_env_map` but also returns per-variable source info for dry-run display. -async fn collect_env_map_with_source( - pool: &PgPool, - args: &CollectArgs<'_>, - master_key: &[u8; 32], -) -> Result<(HashMap, Vec)> { - if args.namespace.is_none() - && args.kind.is_none() - && args.name.is_none() - && args.tags.is_empty() - { - anyhow::bail!( - "At least one filter (--namespace, --kind, --name, or --tag) is required for run" - ); - } - let entries = - fetch_entries(pool, args.namespace, args.kind, args.name, args.tags, None).await?; - if entries.is_empty() { - anyhow::bail!("No records matched the given filters."); - } - - let entry_ids: Vec = entries.iter().map(|e| e.id).collect(); - let fields_map = fetch_secrets_for_entries(pool, &entry_ids).await?; - - let mut map = HashMap::new(); - let mut mappings: Vec = Vec::new(); - - for entry in &entries { - let empty = vec![]; - let all_fields = fields_map.get(&entry.id).unwrap_or(&empty); - let filtered_fields: Vec<_> = if args.secret_fields.is_empty() { - all_fields.iter().collect() - } else { - all_fields - .iter() - .filter(|f| args.secret_fields.contains(&f.field_name)) - .collect() - }; - - let row_map = - build_injected_env_map(pool, entry, args.prefix, master_key, &filtered_fields).await?; - - let source = format!("{}/{}/{}", entry.namespace, entry.kind, entry.name); - for field in &filtered_fields { - let var_name = format!( - "{}_{}", - env_prefix_name(&entry.name, args.prefix), - field.field_name.to_uppercase().replace(['-', '.'], "_") - ); - if row_map.contains_key(&var_name) { - mappings.push(EnvMapping { - var_name: var_name.clone(), - source: source.clone(), - field: field.field_name.clone(), - }); - } - } - - for (k, v) in row_map { - map.insert(k, v); - } - } - Ok((map, mappings)) -} - -fn env_prefix_name(entry_name: &str, prefix: &str) -> String { - let name_part = entry_name.to_uppercase().replace(['-', '.', ' '], "_"); - if prefix.is_empty() { - name_part - } else { - format!( - "{}_{}", - prefix.to_uppercase().replace(['-', '.', ' '], "_"), - name_part - ) - } -} - -/// `run` command: inject secrets into a child process environment and execute. -/// With `--dry-run`, prints the variable mapping (names and sources only) without executing. -pub async fn run_exec(pool: &PgPool, args: RunArgs<'_>, master_key: &[u8; 32]) -> Result<()> { - if !args.dry_run && args.command.is_empty() { - anyhow::bail!( - "No command specified. Usage: secrets run [filter flags] -- [args]" - ); - } - - let collect = CollectArgs { - namespace: args.namespace, - kind: args.kind, - name: args.name, - tags: args.tags, - secret_fields: args.secret_fields, - prefix: args.prefix, - }; - - if args.dry_run { - let (env_map, mappings) = collect_env_map_with_source(pool, &collect, master_key).await?; - - let total_vars = env_map.len(); - let total_records = { - let mut seen = std::collections::HashSet::new(); - for m in &mappings { - seen.insert(&m.source); - } - seen.len() - }; - - match args.output { - OutputMode::Text => { - for m in &mappings { - println!("{:<40} <- {} :: {}", m.var_name, m.source, m.field); - } - println!("---"); - println!( - "{} variable(s) from {} record(s).", - total_vars, total_records - ); - } - OutputMode::Json | OutputMode::JsonCompact => { - let vars: Vec<_> = mappings - .iter() - .map(|m| { - json!({ - "name": m.var_name, - "source": m.source, - "field": m.field, - }) - }) - .collect(); - let out = json!({ - "variables": vars, - "total_vars": total_vars, - "total_records": total_records, - }); - if args.output == OutputMode::Json { - println!("{}", serde_json::to_string_pretty(&out)?); - } else { - println!("{}", serde_json::to_string(&out)?); - } - } - } - return Ok(()); - } - - let env_map = collect_env_map(pool, &collect, master_key).await?; - - tracing::debug!( - vars = env_map.len(), - cmd = args.command[0].as_str(), - "injecting secrets into child process" - ); - - let status = std::process::Command::new(&args.command[0]) - .args(&args.command[1..]) - .envs(&env_map) - .status() - .map_err(|e| anyhow::anyhow!("Failed to execute '{}': {}", args.command[0], e))?; - - if !status.success() { - let code = status.code().unwrap_or(1); - std::process::exit(code); - } - - Ok(()) -} diff --git a/src/commands/search.rs b/src/commands/search.rs deleted file mode 100644 index babf2bd..0000000 --- a/src/commands/search.rs +++ /dev/null @@ -1,568 +0,0 @@ -use anyhow::Result; -use serde_json::{Value, json}; -use sqlx::PgPool; -use std::collections::HashMap; - -use crate::crypto; -use crate::models::{Entry, SecretField}; -use crate::output::{OutputMode, format_local_time}; - -pub struct SearchArgs<'a> { - pub namespace: Option<&'a str>, - pub kind: Option<&'a str>, - pub name: Option<&'a str>, - pub tags: &'a [String], - pub query: Option<&'a str>, - pub fields: &'a [String], - pub summary: bool, - pub limit: u32, - pub offset: u32, - pub sort: &'a str, - pub output: OutputMode, -} - -pub async fn run(pool: &PgPool, args: SearchArgs<'_>) -> Result<()> { - validate_safe_search_args(args.fields)?; - - let rows = fetch_entries_paged( - pool, - PagedFetchArgs { - namespace: args.namespace, - kind: args.kind, - name: args.name, - tags: args.tags, - query: args.query, - sort: args.sort, - limit: args.limit, - offset: args.offset, - }, - ) - .await?; - - // -f/--field: extract specific metadata field values directly - if !args.fields.is_empty() { - return print_fields(&rows, args.fields); - } - - // Fetch secret schemas for all returned entries (no master key needed). - let entry_ids: Vec = rows.iter().map(|r| r.id).collect(); - let schema_map = if !args.summary && !entry_ids.is_empty() { - fetch_secret_schemas(pool, &entry_ids).await? - } else { - HashMap::new() - }; - - match args.output { - OutputMode::Json | OutputMode::JsonCompact => { - let arr: Vec = rows - .iter() - .map(|r| to_json(r, args.summary, schema_map.get(&r.id).map(Vec::as_slice))) - .collect(); - let out = if args.output == OutputMode::Json { - serde_json::to_string_pretty(&arr)? - } else { - serde_json::to_string(&arr)? - }; - println!("{}", out); - } - OutputMode::Text => { - if rows.is_empty() { - println!("No records found."); - return Ok(()); - } - for row in &rows { - print_text( - row, - args.summary, - schema_map.get(&row.id).map(Vec::as_slice), - )?; - } - println!("{} record(s) found.", rows.len()); - if rows.len() == args.limit as usize { - println!( - " (showing up to {}; use --offset {} to see more)", - args.limit, - args.offset + args.limit - ); - } - } - } - - Ok(()) -} - -fn validate_safe_search_args(fields: &[String]) -> Result<()> { - if let Some(field) = fields.iter().find(|field| is_secret_field(field)) { - anyhow::bail!( - "Field '{}' is sensitive. `search -f` only supports metadata.* fields; use `secrets run` for secrets.", - field - ); - } - Ok(()) -} - -fn is_secret_field(field: &str) -> bool { - matches!( - field.split_once('.').map(|(section, _)| section), - Some("secret" | "secrets" | "encrypted") - ) -} - -// ── Entry fetching ──────────────────────────────────────────────────────────── - -struct PagedFetchArgs<'a> { - namespace: Option<&'a str>, - kind: Option<&'a str>, - name: Option<&'a str>, - tags: &'a [String], - query: Option<&'a str>, - sort: &'a str, - limit: u32, - offset: u32, -} - -/// A very large limit used when callers need all matching records (export, run). -/// Postgres will stop scanning when this many rows are found; adjust if needed. -pub const FETCH_ALL_LIMIT: u32 = 100_000; - -/// Fetch entries matching the given filters (used by search, run). -/// `limit` caps the result set; pass `FETCH_ALL_LIMIT` when you need all matching records. -pub async fn fetch_entries( - pool: &PgPool, - namespace: Option<&str>, - kind: Option<&str>, - name: Option<&str>, - tags: &[String], - query: Option<&str>, -) -> Result> { - fetch_entries_with_limit(pool, namespace, kind, name, tags, query, FETCH_ALL_LIMIT).await -} - -/// Like `fetch_entries` but with an explicit limit. Used internally by `search`. -pub(crate) async fn fetch_entries_with_limit( - pool: &PgPool, - namespace: Option<&str>, - kind: Option<&str>, - name: Option<&str>, - tags: &[String], - query: Option<&str>, - limit: u32, -) -> Result> { - fetch_entries_paged( - pool, - PagedFetchArgs { - namespace, - kind, - name, - tags, - query, - sort: "name", - limit, - offset: 0, - }, - ) - .await -} - -async fn fetch_entries_paged(pool: &PgPool, a: PagedFetchArgs<'_>) -> Result> { - let mut conditions: Vec = Vec::new(); - let mut idx: i32 = 1; - - if a.namespace.is_some() { - conditions.push(format!("namespace = ${}", idx)); - idx += 1; - } - if a.kind.is_some() { - conditions.push(format!("kind = ${}", idx)); - idx += 1; - } - if a.name.is_some() { - conditions.push(format!("name = ${}", idx)); - idx += 1; - } - if !a.tags.is_empty() { - let placeholders: Vec = a - .tags - .iter() - .map(|_| { - let p = format!("${}", idx); - idx += 1; - p - }) - .collect(); - conditions.push(format!("tags @> ARRAY[{}]", placeholders.join(", "))); - } - if a.query.is_some() { - conditions.push(format!( - "(name ILIKE ${i} ESCAPE '\\' OR namespace ILIKE ${i} ESCAPE '\\' OR kind ILIKE ${i} ESCAPE '\\' OR metadata::text ILIKE ${i} ESCAPE '\\' OR EXISTS (SELECT 1 FROM unnest(tags) t WHERE t ILIKE ${i} ESCAPE '\\'))", - i = idx - )); - idx += 1; - } - - let where_clause = if conditions.is_empty() { - String::new() - } else { - format!("WHERE {}", conditions.join(" AND ")) - }; - - let order = match a.sort { - "updated" => "updated_at DESC", - "created" => "created_at DESC", - _ => "namespace, kind, name", - }; - - let sql = format!( - "SELECT * FROM entries {} ORDER BY {} LIMIT ${} OFFSET ${}", - where_clause, - order, - idx, - idx + 1 - ); - - tracing::debug!(sql, "executing search query"); - - let mut q = sqlx::query_as::<_, Entry>(&sql); - if let Some(v) = a.namespace { - q = q.bind(v); - } - if let Some(v) = a.kind { - q = q.bind(v); - } - if let Some(v) = a.name { - q = q.bind(v); - } - for v in a.tags { - q = q.bind(v.as_str()); - } - if let Some(v) = a.query { - q = q.bind(format!( - "%{}%", - v.replace('\\', "\\\\") - .replace('%', "\\%") - .replace('_', "\\_") - )); - } - q = q.bind(a.limit as i64).bind(a.offset as i64); - - Ok(q.fetch_all(pool).await?) -} - -// ── Secret schema fetching (no master key) ─────────────────────────────────── - -/// Fetch secret field names for a set of entry ids. -/// Returns a map from entry_id to list of SecretField. -async fn fetch_secret_schemas( - pool: &PgPool, - entry_ids: &[uuid::Uuid], -) -> Result>> { - if entry_ids.is_empty() { - return Ok(HashMap::new()); - } - - let fields: Vec = sqlx::query_as( - "SELECT * FROM secrets WHERE entry_id = ANY($1) ORDER BY entry_id, field_name", - ) - .bind(entry_ids) - .fetch_all(pool) - .await?; - - let mut map: HashMap> = HashMap::new(); - for f in fields { - map.entry(f.entry_id).or_default().push(f); - } - Ok(map) -} - -/// Fetch all secret fields (including encrypted bytes) for a set of entry ids. -pub async fn fetch_secrets_for_entries( - pool: &PgPool, - entry_ids: &[uuid::Uuid], -) -> Result>> { - if entry_ids.is_empty() { - return Ok(HashMap::new()); - } - - let fields: Vec = sqlx::query_as( - "SELECT * FROM secrets WHERE entry_id = ANY($1) ORDER BY entry_id, field_name", - ) - .bind(entry_ids) - .fetch_all(pool) - .await?; - - let mut map: HashMap> = HashMap::new(); - for f in fields { - map.entry(f.entry_id).or_default().push(f); - } - Ok(map) -} - -// ── Display helpers ─────────────────────────────────────────────────────────── - -fn env_prefix(entry: &Entry, prefix: &str) -> String { - let name_part = entry.name.to_uppercase().replace(['-', '.', ' '], "_"); - if prefix.is_empty() { - name_part - } else { - format!( - "{}_{}", - prefix.to_uppercase().replace(['-', '.', ' '], "_"), - name_part - ) - } -} - -/// Build a flat KEY=VALUE map from decrypted secret fields only. -/// Resolves key_ref: if metadata.key_ref is set, merges secret fields from that key entry. -pub async fn build_injected_env_map( - pool: &PgPool, - entry: &Entry, - prefix: &str, - master_key: &[u8; 32], - fields: &[&SecretField], -) -> Result> { - let effective_prefix = env_prefix(entry, prefix); - let mut map = HashMap::new(); - - // Decrypt each secret field and add to env map. - for f in fields { - let decrypted = crypto::decrypt_json(master_key, &f.encrypted)?; - let key = format!( - "{}_{}", - effective_prefix, - f.field_name.to_uppercase().replace(['-', '.'], "_") - ); - map.insert(key, json_value_to_env_string(&decrypted)); - } - - // Resolve key_ref: merge secrets from the referenced key entry. - if let Some(key_ref) = entry.metadata.get("key_ref").and_then(|v| v.as_str()) { - let key_entries = fetch_entries( - pool, - Some(&entry.namespace), - Some("key"), - Some(key_ref), - &[], - None, - ) - .await?; - - if let Some(key_entry) = key_entries.first() { - let key_ids = vec![key_entry.id]; - let key_fields_map = fetch_secrets_for_entries(pool, &key_ids).await?; - let empty = vec![]; - let key_fields = key_fields_map.get(&key_entry.id).unwrap_or(&empty); - - let key_prefix = env_prefix(key_entry, prefix); - for f in key_fields { - let decrypted = crypto::decrypt_json(master_key, &f.encrypted)?; - let key_var = format!( - "{}_{}", - key_prefix, - f.field_name.to_uppercase().replace(['-', '.'], "_") - ); - map.insert(key_var, json_value_to_env_string(&decrypted)); - } - } else { - tracing::warn!(key_ref, "key_ref target not found"); - } - } - - Ok(map) -} - -fn json_value_to_env_string(v: &Value) -> String { - match v { - Value::String(s) => s.clone(), - Value::Null => String::new(), - other => other.to_string(), - } -} - -fn to_json(entry: &Entry, summary: bool, schema: Option<&[SecretField]>) -> Value { - if summary { - let desc = entry - .metadata - .get("desc") - .or_else(|| entry.metadata.get("url")) - .and_then(|v| v.as_str()) - .unwrap_or("") - .to_string(); - return json!({ - "namespace": entry.namespace, - "kind": entry.kind, - "name": entry.name, - "tags": entry.tags, - "desc": desc, - "updated_at": entry.updated_at.format("%Y-%m-%dT%H:%M:%SZ").to_string(), - }); - } - - let secrets_val: Value = match schema { - Some(fields) if !fields.is_empty() => { - let schema_arr: Vec = fields - .iter() - .map(|f| { - json!({ - "field_name": f.field_name, - }) - }) - .collect(); - Value::Array(schema_arr) - } - _ => Value::Array(vec![]), - }; - - json!({ - "id": entry.id, - "namespace": entry.namespace, - "kind": entry.kind, - "name": entry.name, - "tags": entry.tags, - "metadata": entry.metadata, - "secrets": secrets_val, - "version": entry.version, - "created_at": entry.created_at.format("%Y-%m-%dT%H:%M:%SZ").to_string(), - "updated_at": entry.updated_at.format("%Y-%m-%dT%H:%M:%SZ").to_string(), - }) -} - -fn print_text(entry: &Entry, summary: bool, schema: Option<&[SecretField]>) -> Result<()> { - println!("[{}/{}] {}", entry.namespace, entry.kind, entry.name); - if summary { - let desc = entry - .metadata - .get("desc") - .or_else(|| entry.metadata.get("url")) - .and_then(|v| v.as_str()) - .unwrap_or("-"); - if !entry.tags.is_empty() { - println!(" tags: [{}]", entry.tags.join(", ")); - } - println!(" desc: {}", desc); - println!(" updated: {}", format_local_time(entry.updated_at)); - } else { - println!(" id: {}", entry.id); - if !entry.tags.is_empty() { - println!(" tags: [{}]", entry.tags.join(", ")); - } - if entry.metadata.as_object().is_some_and(|m| !m.is_empty()) { - println!( - " metadata: {}", - serde_json::to_string_pretty(&entry.metadata)? - ); - } - match schema { - Some(fields) if !fields.is_empty() => { - let schema_str: Vec = fields.iter().map(|f| f.field_name.clone()).collect(); - println!(" secrets: {}", schema_str.join(", ")); - println!(" (use `secrets run` to get values)"); - } - _ => {} - } - println!(" version: {}", entry.version); - println!(" created: {}", format_local_time(entry.created_at)); - } - println!(); - Ok(()) -} - -/// Extract one or more metadata field paths like `metadata.url`. -fn print_fields(rows: &[Entry], fields: &[String]) -> Result<()> { - for row in rows { - for field in fields { - let val = extract_field(row, field)?; - println!("{}", val); - } - } - Ok(()) -} - -fn extract_field(entry: &Entry, field: &str) -> Result { - let (section, key) = field - .split_once('.') - .ok_or_else(|| anyhow::anyhow!("Invalid field path '{}'. Use metadata..", field))?; - - let obj = match section { - "metadata" | "meta" => &entry.metadata, - other => anyhow::bail!("Unknown field section '{}'. Use 'metadata'.", other), - }; - - obj.get(key) - .and_then(|v| { - v.as_str() - .map(|s| s.to_string()) - .or_else(|| Some(v.to_string())) - }) - .ok_or_else(|| { - anyhow::anyhow!( - "Field '{}' not found in record [{}/{}/{}]", - field, - entry.namespace, - entry.kind, - entry.name - ) - }) -} - -#[cfg(test)] -mod tests { - use super::*; - use chrono::Utc; - use serde_json::json; - use uuid::Uuid; - - fn sample_entry() -> Entry { - Entry { - id: Uuid::nil(), - namespace: "refining".to_string(), - kind: "service".to_string(), - name: "gitea.main".to_string(), - tags: vec!["prod".to_string()], - metadata: json!({"url": "https://code.example.com", "enabled": true}), - version: 1, - created_at: Utc::now(), - updated_at: Utc::now(), - } - } - - fn sample_fields() -> Vec { - let key = [0x42u8; 32]; - let enc = crypto::encrypt_json(&key, &json!("abc123")).unwrap(); - vec![SecretField { - id: Uuid::nil(), - entry_id: Uuid::nil(), - field_name: "token".to_string(), - encrypted: enc, - version: 1, - created_at: Utc::now(), - updated_at: Utc::now(), - }] - } - - #[test] - fn rejects_secret_field_extraction() { - let fields = vec!["secret.token".to_string()]; - let err = validate_safe_search_args(&fields).unwrap_err(); - assert!(err.to_string().contains("sensitive")); - } - - #[test] - fn to_json_full_includes_secrets_schema() { - let entry = sample_entry(); - let fields = sample_fields(); - let v = to_json(&entry, false, Some(&fields)); - - let secrets = v.get("secrets").unwrap().as_array().unwrap(); - assert_eq!(secrets.len(), 1); - assert_eq!(secrets[0]["field_name"], "token"); - } - - #[test] - fn to_json_summary_omits_secrets_schema() { - let entry = sample_entry(); - let fields = sample_fields(); - let v = to_json(&entry, true, Some(&fields)); - assert!(v.get("secrets").is_none()); - } -} diff --git a/src/commands/upgrade.rs b/src/commands/upgrade.rs deleted file mode 100644 index c8ce7ad..0000000 --- a/src/commands/upgrade.rs +++ /dev/null @@ -1,411 +0,0 @@ -use anyhow::{Context, Result, bail}; -use flate2::read::GzDecoder; -use serde::Deserialize; -use sha2::{Digest, Sha256}; -use std::io::{Cursor, Read, Write}; -use std::time::Duration; - -const CURRENT_VERSION: &str = env!("CARGO_PKG_VERSION"); - -/// Build-time config via `option_env!("SECRETS_UPGRADE_URL")`. Set during `cargo build`, e.g.: -/// SECRETS_UPGRADE_URL=https://... cargo build --release -const BUILD_UPGRADE_URL: Option<&'static str> = option_env!("SECRETS_UPGRADE_URL"); - -fn upgrade_api_url() -> Result { - if let Some(url) = BUILD_UPGRADE_URL.filter(|s| !s.trim().is_empty()) { - return Ok(url.to_string()); - } - let url = std::env::var("SECRETS_UPGRADE_URL").context( - "SECRETS_UPGRADE_URL is not set at build or runtime. Set it when building: \ - SECRETS_UPGRADE_URL=https://... cargo build, or export before running secrets upgrade.", - )?; - if url.trim().is_empty() { - anyhow::bail!("SECRETS_UPGRADE_URL is empty."); - } - Ok(url) -} - -#[derive(Debug, Deserialize)] -struct Release { - tag_name: String, - assets: Vec, -} - -#[derive(Debug, Deserialize)] -struct Asset { - name: String, - browser_download_url: String, -} - -fn available_assets(assets: &[Asset]) -> String { - assets - .iter() - .map(|a| a.name.as_str()) - .collect::>() - .join(", ") -} - -fn release_asset_name(tag_name: &str, suffix: &str) -> String { - format!("secrets-{tag_name}-{suffix}") -} - -fn find_asset_by_name<'a>(assets: &'a [Asset], name: &str) -> Result<&'a Asset> { - assets.iter().find(|a| a.name == name).with_context(|| { - format!( - "no matching release asset found: {name}\navailable: {}", - available_assets(assets) - ) - }) -} - -/// Detect the asset suffix for the current platform/arch at compile time. -fn platform_asset_suffix() -> Result<&'static str> { - #[cfg(all(target_os = "linux", target_arch = "x86_64"))] - { - Ok("x86_64-linux-musl.tar.gz") - } - - #[cfg(all(target_os = "macos", target_arch = "aarch64"))] - { - Ok("aarch64-macos.tar.gz") - } - - #[cfg(all(target_os = "macos", target_arch = "x86_64"))] - { - Ok("x86_64-macos.tar.gz") - } - - #[cfg(all(target_os = "windows", target_arch = "x86_64"))] - { - Ok("x86_64-windows.zip") - } - - #[cfg(not(any( - all(target_os = "linux", target_arch = "x86_64"), - all(target_os = "macos", target_arch = "aarch64"), - all(target_os = "macos", target_arch = "x86_64"), - all(target_os = "windows", target_arch = "x86_64"), - )))] - bail!( - "Unsupported platform: {}/{}", - std::env::consts::OS, - std::env::consts::ARCH - ) -} - -/// Strip the "secrets-" prefix from the tag and parse as semver. -fn parse_tag_version(tag: &str) -> Result { - let ver_str = tag - .strip_prefix("secrets-") - .with_context(|| format!("unexpected tag format: {tag}"))?; - semver::Version::parse(ver_str) - .with_context(|| format!("failed to parse version from tag: {tag}")) -} - -fn sha256_hex(bytes: &[u8]) -> String { - let digest = Sha256::digest(bytes); - format!("{digest:x}") -} - -fn verify_checksum(asset_name: &str, archive: &[u8], checksum_contents: &str) -> Result { - let expected_checksum = parse_checksum_file(checksum_contents)?; - let actual_checksum = sha256_hex(archive); - - if actual_checksum != expected_checksum { - bail!( - "checksum verification failed for {}: expected {}, got {}", - asset_name, - expected_checksum, - actual_checksum - ); - } - - Ok(actual_checksum) -} - -fn parse_checksum_file(contents: &str) -> Result { - let checksum = contents - .split_whitespace() - .next() - .context("checksum file is empty")? - .trim() - .to_ascii_lowercase(); - - if checksum.len() != 64 || !checksum.bytes().all(|b| b.is_ascii_hexdigit()) { - bail!("invalid SHA-256 checksum format") - } - - Ok(checksum) -} - -async fn download_bytes(client: &reqwest::Client, url: &str, context: &str) -> Result> { - Ok(client - .get(url) - .send() - .await - .with_context(|| format!("{context}: request failed"))? - .error_for_status() - .with_context(|| format!("{context}: server returned an error"))? - .bytes() - .await - .with_context(|| format!("{context}: failed to read response body"))? - .to_vec()) -} - -/// Extract the binary from a tar.gz archive (first file whose name == "secrets"). -fn extract_from_targz(bytes: &[u8]) -> Result> { - let gz = GzDecoder::new(Cursor::new(bytes)); - let mut archive = tar::Archive::new(gz); - for entry in archive.entries().context("failed to read tar entries")? { - let mut entry = entry.context("bad tar entry")?; - let path = entry.path().context("bad tar entry path")?.into_owned(); - let fname = path - .file_name() - .and_then(|n| n.to_str()) - .unwrap_or_default(); - if fname == "secrets" || fname == "secrets.exe" { - let mut buf = Vec::new(); - entry.read_to_end(&mut buf).context("read tar entry")?; - return Ok(buf); - } - } - bail!("binary not found inside tar.gz archive") -} - -/// Extract the binary from a zip archive (first file whose name matches). -#[cfg(target_os = "windows")] -fn extract_from_zip(bytes: &[u8]) -> Result> { - let reader = Cursor::new(bytes); - let mut archive = zip::ZipArchive::new(reader).context("failed to open zip archive")?; - for i in 0..archive.len() { - let mut file = archive.by_index(i).context("bad zip entry")?; - let fname = file.name().to_owned(); - if fname.ends_with("secrets.exe") || fname.ends_with("secrets") { - let mut buf = Vec::new(); - file.read_to_end(&mut buf).context("read zip entry")?; - return Ok(buf); - } - } - bail!("binary not found inside zip archive") -} - -pub async fn run(check_only: bool) -> Result<()> { - let current = semver::Version::parse(CURRENT_VERSION).context("invalid current version")?; - - println!("Current version: v{current}"); - println!("Checking for updates..."); - - let client = reqwest::Client::builder() - .user_agent(format!("secrets-cli/{CURRENT_VERSION}")) - .connect_timeout(Duration::from_secs(10)) - .timeout(Duration::from_secs(120)) - .build() - .context("failed to build HTTP client")?; - - let api_url = upgrade_api_url()?; - let release: Release = client - .get(&api_url) - .send() - .await - .context("failed to fetch release info")? - .error_for_status() - .context("release API returned an error")? - .json() - .await - .context("failed to parse release JSON")?; - - let latest = parse_tag_version(&release.tag_name)?; - - if latest <= current { - println!("Already up to date (v{current})"); - return Ok(()); - } - - println!("New version available: v{latest}"); - - if check_only { - println!("Run `secrets upgrade` to update."); - return Ok(()); - } - - let suffix = platform_asset_suffix()?; - let asset_name = release_asset_name(&release.tag_name, suffix); - let asset = find_asset_by_name(&release.assets, &asset_name)?; - let checksum_name = format!("{}.sha256", asset.name); - let checksum_asset = find_asset_by_name(&release.assets, &checksum_name)?; - - println!("Downloading {}...", asset.name); - - let archive = download_bytes(&client, &asset.browser_download_url, "archive download").await?; - let checksum_contents = download_bytes( - &client, - &checksum_asset.browser_download_url, - "checksum download", - ) - .await?; - let actual_checksum = verify_checksum( - &asset.name, - &archive, - std::str::from_utf8(&checksum_contents).context("checksum file is not valid UTF-8")?, - )?; - - println!("Verified SHA-256: {actual_checksum}"); - - println!("Extracting..."); - - let binary = if suffix.ends_with(".tar.gz") { - extract_from_targz(&archive)? - } else { - #[cfg(target_os = "windows")] - { - extract_from_zip(&archive)? - } - #[cfg(not(target_os = "windows"))] - bail!("zip extraction is only supported on Windows") - }; - - // Write to a temporary file, set executable permission, then atomically replace. - let mut tmp = tempfile::NamedTempFile::new().context("failed to create temp file")?; - tmp.write_all(&binary) - .context("failed to write temp binary")?; - - #[cfg(unix)] - { - use std::os::unix::fs::PermissionsExt; - let perms = std::fs::Permissions::from_mode(0o755); - std::fs::set_permissions(tmp.path(), perms).context("failed to chmod temp binary")?; - } - - self_replace::self_replace(tmp.path()).context("failed to replace current binary")?; - - println!("Updated: v{current} → v{latest}"); - Ok(()) -} - -#[cfg(test)] -mod tests { - use super::*; - use flate2::Compression; - use flate2::write::GzEncoder; - use tar::Builder; - - #[test] - fn parse_tag_version_accepts_release_tag() { - let version = parse_tag_version("secrets-0.6.1").expect("version should parse"); - assert_eq!(version, semver::Version::new(0, 6, 1)); - } - - #[test] - fn parse_tag_version_rejects_invalid_tag() { - let err = parse_tag_version("v0.6.1").expect_err("tag should be rejected"); - assert!(err.to_string().contains("unexpected tag format")); - } - - #[test] - fn parse_checksum_file_accepts_sha256sum_format() { - let checksum = parse_checksum_file( - "0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef secrets.tar.gz", - ) - .expect("checksum should parse"); - assert_eq!( - checksum, - "0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef" - ); - } - - #[test] - fn parse_checksum_file_rejects_invalid_checksum() { - let err = parse_checksum_file("not-a-sha256").expect_err("checksum should be rejected"); - assert!(err.to_string().contains("invalid SHA-256 checksum format")); - } - - #[test] - fn release_asset_name_matches_release_tag() { - assert_eq!( - release_asset_name("secrets-0.7.0", "x86_64-linux-musl.tar.gz"), - "secrets-secrets-0.7.0-x86_64-linux-musl.tar.gz" - ); - } - - #[test] - fn find_asset_by_name_rejects_stale_platform_match() { - let assets = vec![ - Asset { - name: "secrets-secrets-0.6.9-x86_64-linux-musl.tar.gz".into(), - browser_download_url: "https://example.invalid/old".into(), - }, - Asset { - name: "secrets-secrets-0.7.0-aarch64-macos.tar.gz".into(), - browser_download_url: "https://example.invalid/other".into(), - }, - ]; - - let err = find_asset_by_name(&assets, "secrets-secrets-0.7.0-x86_64-linux-musl.tar.gz") - .expect_err("stale asset should not match"); - - assert!(err.to_string().contains("no matching release asset found")); - } - - #[test] - fn sha256_hex_matches_known_value() { - assert_eq!( - sha256_hex(b"abc"), - "ba7816bf8f01cfea414140de5dae2223b00361a396177a9cb410ff61f20015ad" - ); - } - - #[test] - fn verify_checksum_rejects_mismatch() { - let err = verify_checksum( - "secrets.tar.gz", - b"abc", - "0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef secrets.tar.gz", - ) - .expect_err("checksum mismatch should fail"); - - assert!(err.to_string().contains("checksum verification failed")); - } - - #[test] - fn extract_from_targz_reads_binary() { - let payload = b"fake-secrets-binary"; - let archive = make_test_targz("secrets", payload); - let extracted = extract_from_targz(&archive).expect("binary should extract"); - assert_eq!(extracted, payload); - } - - fn make_test_targz(name: &str, payload: &[u8]) -> Vec { - let encoder = GzEncoder::new(Vec::new(), Compression::default()); - let mut builder = Builder::new(encoder); - - let mut header = tar::Header::new_gnu(); - header.set_mode(0o755); - header.set_size(payload.len() as u64); - header.set_cksum(); - builder - .append_data(&mut header, name, payload) - .expect("append tar entry"); - - let encoder = builder.into_inner().expect("finish tar builder"); - encoder.finish().expect("finish gzip") - } - - #[cfg(target_os = "windows")] - #[test] - fn extract_from_zip_reads_binary() { - use zip::write::SimpleFileOptions; - - let cursor = Cursor::new(Vec::::new()); - let mut writer = zip::ZipWriter::new(cursor); - writer - .start_file("secrets.exe", SimpleFileOptions::default()) - .expect("start zip file"); - writer - .write_all(b"fake-secrets-binary") - .expect("write zip payload"); - let bytes = writer.finish().expect("finish zip").into_inner(); - - let extracted = extract_from_zip(&bytes).expect("binary should extract"); - assert_eq!(extracted, b"fake-secrets-binary"); - } -} diff --git a/src/config.rs b/src/config.rs deleted file mode 100644 index d54bda9..0000000 --- a/src/config.rs +++ /dev/null @@ -1,77 +0,0 @@ -use anyhow::{Context, Result}; -use serde::{Deserialize, Serialize}; -use std::fs; -use std::path::PathBuf; - -#[derive(Debug, Serialize, Deserialize, Default)] -pub struct Config { - pub database_url: Option, -} - -pub fn config_dir() -> Result { - let dir = dirs::config_dir() - .or_else(|| dirs::home_dir().map(|h| h.join(".config"))) - .context( - "Cannot determine config directory: \ - neither XDG_CONFIG_HOME nor HOME is set", - )? - .join("secrets"); - Ok(dir) -} - -pub fn config_path() -> Result { - Ok(config_dir()?.join("config.toml")) -} - -pub fn load_config() -> Result { - let path = config_path()?; - if !path.exists() { - return Ok(Config::default()); - } - let content = fs::read_to_string(&path) - .with_context(|| format!("failed to read config file: {}", path.display()))?; - let config: Config = toml::from_str(&content) - .with_context(|| format!("failed to parse config file: {}", path.display()))?; - Ok(config) -} - -pub fn save_config(config: &Config) -> Result<()> { - let dir = config_dir()?; - fs::create_dir_all(&dir) - .with_context(|| format!("failed to create config dir: {}", dir.display()))?; - - let path = dir.join("config.toml"); - let content = toml::to_string_pretty(config).context("failed to serialize config")?; - fs::write(&path, &content) - .with_context(|| format!("failed to write config file: {}", path.display()))?; - - // Set file permissions to 0600 (owner read/write only) - #[cfg(unix)] - { - use std::os::unix::fs::PermissionsExt; - let perms = fs::Permissions::from_mode(0o600); - fs::set_permissions(&path, perms) - .with_context(|| format!("failed to set file permissions: {}", path.display()))?; - } - - Ok(()) -} - -/// Resolve database URL by priority: -/// 1. --db-url CLI flag (if non-empty) -/// 2. database_url in ~/.config/secrets/config.toml -/// 3. Error with setup instructions -pub fn resolve_db_url(cli_db_url: &str) -> Result { - if !cli_db_url.is_empty() { - return Ok(cli_db_url.to_string()); - } - - let config = load_config()?; - if let Some(url) = config.database_url - && !url.is_empty() - { - return Ok(url); - } - - anyhow::bail!("Database not configured. Run:\n\n secrets config set-db \n") -} diff --git a/src/main.rs b/src/main.rs deleted file mode 100644 index 5ea4867..0000000 --- a/src/main.rs +++ /dev/null @@ -1,853 +0,0 @@ -mod audit; -mod commands; -mod config; -mod crypto; -mod db; -mod models; -mod output; - -use anyhow::Result; - -/// Load .env from current or parent directories (best-effort, no error if missing). -fn load_dotenv() { - let _ = dotenvy::dotenv(); -} -use clap::{Parser, Subcommand}; -use tracing_subscriber::EnvFilter; - -use output::resolve_output_mode; - -#[derive(Parser)] -#[command( - name = "secrets", - version, - about = "Secrets & config manager backed by PostgreSQL — optimised for AI agents", - after_help = "QUICK START: - # 1. Configure database (once per device) - secrets config set-db \"postgres://postgres:@:/secrets\" - - # 2. Initialize master key (once per device) - secrets init - - # Discover what namespaces / kinds exist - secrets search --summary --limit 20 - - # Precise lookup (JSON output for easy parsing) - secrets search -n refining --kind service --name gitea -o json - - # Extract a single metadata field directly - secrets search -n refining --kind service --name gitea -f metadata.url - - # Pipe-friendly (non-TTY defaults to json-compact automatically) - secrets search -n refining --kind service | jq '.[].name' - - # Run a command with secrets injected into its child process environment - secrets run -n refining --kind service --name gitea -- printenv" -)] -struct Cli { - /// Database URL, overrides saved config (one-time override) - #[arg(long, global = true, default_value = "")] - db_url: String, - - /// Enable verbose debug output - #[arg(long, short, global = true)] - verbose: bool, - - #[command(subcommand)] - command: Commands, -} - -#[derive(Subcommand)] -enum Commands { - /// Initialize master key on this device (run once per device). - /// - /// Prompts for a master password, derives a key with Argon2id, and stores - /// it in the OS Keychain. Use the same password on every device. - /// - /// NOTE: Run `secrets config set-db ` first if database is not configured. - #[command(after_help = "PREREQUISITE: - Database must be configured first. Run: secrets config set-db - -EXAMPLES: - # First device: generates a new Argon2id salt and stores master key - secrets init - - # Subsequent devices: reuses existing salt from the database - secrets init")] - Init, - - /// Add or update a record (upsert). Use -m for plaintext metadata, -s for secrets. - #[command(after_help = "EXAMPLES: - # Add a server - secrets add -n refining --kind server --name my-server \\ - --tag aliyun --tag shanghai \\ - -m ip=10.0.0.1 -m desc=\"Example ECS\" \\ - -s username=root -s ssh_key=@./keys/server.pem - - # Add a service credential - secrets add -n refining --kind service --name gitea \\ - --tag gitea \\ - -m url=https://code.example.com -m default_org=myorg \\ - -s token= - - # Add typed JSON metadata - secrets add -n refining --kind service --name gitea \\ - -m port:=3000 \\ - -m enabled:=true \\ - -m domains:='[\"code.example.com\",\"git.example.com\"]' \\ - -m tls:='{\"enabled\":true,\"redirect_http\":true}' - - # Add with token read from a file - secrets add -n ricnsmart --kind service --name mqtt \\ - -m host=mqtt.example.com -m port=1883 \\ - -s password=@./mqtt_password.txt - - # Add typed JSON secrets - secrets add -n refining --kind service --name deploy-bot \\ - -s enabled:=true \\ - -s retry_count:=3 \\ - -s scopes:='[\"repo\",\"workflow\"]' \\ - -s extra:='{\"region\":\"ap-east-1\",\"verify_tls\":true}' - - # Write a multiline file into a nested secret field - secrets add -n refining --kind server --name my-server \\ - -s credentials:content@./keys/server.pem - - # Shared PEM (key_ref): store key once, reference from multiple servers - secrets add -n refining --kind key --name my-shared-key \\ - --tag aliyun -s content=@./keys/shared.pem - secrets add -n refining --kind server --name i-abc123 \\ - -m ip=10.0.0.1 -m key_ref=my-shared-key -s username=ecs-user")] - Add { - /// Namespace, e.g. refining, ricnsmart - #[arg(short, long)] - namespace: String, - /// Kind of record: server, service, key, ... - #[arg(long)] - kind: String, - /// Human-readable unique name, e.g. gitea, i-example0abcd1234efgh - #[arg(long)] - name: String, - /// Tag for categorization (repeatable), e.g. --tag aliyun --tag hongkong - #[arg(long = "tag")] - tags: Vec, - /// Plaintext metadata: key=value, key:=, key=@file, or nested:path@file. - /// Use key_ref= to reference a shared key entry (kind=key); run merges its secrets. - #[arg(long = "meta", short = 'm')] - meta: Vec, - /// Secret entry: key=value, key:=, key=@file, or nested:path@file - #[arg(long = "secret", short = 's')] - secrets: Vec, - /// Output format: text (default on TTY), json, json-compact - #[arg(short, long = "output")] - output: Option, - }, - - /// Search / read records. This is the primary read command for AI agents. - /// - /// Supports fuzzy search (-q), exact lookup (--name), field extraction (-f), - /// summary view (--summary), pagination (--limit / --offset), and structured - /// output (-o json / json-compact). When stdout is not a TTY, output - /// defaults to json-compact automatically. - #[command(after_help = "EXAMPLES: - # Discover all records (summary, safe default limit) - secrets search --summary --limit 20 - - # Filter by namespace and kind - secrets search -n refining --kind service - - # Exact lookup — returns 0 or 1 record - secrets search -n refining --kind service --name gitea - - # Fuzzy keyword search (matches name, namespace, kind, tags, metadata) - secrets search -q mqtt - - # Extract a single metadata field value - secrets search -n refining --kind service --name gitea -f metadata.url - - # Multiple fields at once - secrets search -n refining --kind service --name gitea \\ - -f metadata.url -f metadata.default_org - - # Run a command with decrypted secrets only when needed - secrets run -n refining --kind service --name gitea -- printenv - - # Paginate large result sets - secrets search -n refining --summary --limit 10 --offset 0 - secrets search -n refining --summary --limit 10 --offset 10 - - # Sort by most recently updated - secrets search --sort updated --limit 5 --summary - - # Non-TTY / pipe: output is json-compact by default - secrets search -n refining --kind service | jq '.[].name'")] - Search { - /// Filter by namespace, e.g. refining, ricnsmart - #[arg(short, long)] - namespace: Option, - /// Filter by kind, e.g. server, service - #[arg(long)] - kind: Option, - /// Exact name filter, e.g. gitea, i-example0abcd1234efgh - #[arg(long)] - name: Option, - /// Filter by tag, e.g. --tag aliyun (repeatable for AND intersection) - #[arg(long)] - tag: Vec, - /// Fuzzy keyword (matches name, namespace, kind, tags, metadata text) - #[arg(short, long)] - query: Option, - /// Extract metadata field value(s) directly: metadata. (repeatable) - #[arg(short = 'f', long = "field")] - fields: Vec, - /// Return lightweight summary only (namespace, kind, name, tags, desc, updated_at) - #[arg(long)] - summary: bool, - /// Maximum number of records to return [default: 50] - #[arg(long, default_value = "50")] - limit: u32, - /// Skip this many records (for pagination) - #[arg(long, default_value = "0")] - offset: u32, - /// Sort order: name (default), updated, created - #[arg(long, default_value = "name")] - sort: String, - /// Output format: text (default on TTY), json, json-compact - #[arg(short, long = "output")] - output: Option, - }, - - /// Delete one record precisely, or bulk-delete by namespace. - /// - /// With --name: deletes exactly that record (--kind also required). - /// Without --name: bulk-deletes all records matching namespace + optional --kind. - /// Use --dry-run to preview bulk deletes before committing. - #[command(after_help = "EXAMPLES: - # Delete a single record (exact match) - secrets delete -n refining --kind service --name legacy-mqtt - - # Preview what a bulk delete would remove (no writes) - secrets delete -n refining --dry-run - - # Bulk-delete all records in a namespace - secrets delete -n ricnsmart - - # Bulk-delete only server records in a namespace - secrets delete -n ricnsmart --kind server - - # JSON output - secrets delete -n refining --kind service -o json")] - Delete { - /// Namespace, e.g. refining - #[arg(short, long)] - namespace: String, - /// Kind filter, e.g. server, service (required with --name; optional for bulk) - #[arg(long)] - kind: Option, - /// Exact name of the record to delete (omit for bulk delete) - #[arg(long)] - name: Option, - /// Preview what would be deleted without making any changes (bulk mode only) - #[arg(long)] - dry_run: bool, - /// Output format: text (default on TTY), json, json-compact - #[arg(short, long = "output")] - output: Option, - }, - - /// Incrementally update an existing record (merge semantics; record must exist). - /// - /// Only the fields you pass are changed — everything else is preserved. - /// Use --add-tag / --remove-tag to modify tags without touching other fields. - #[command(after_help = "EXAMPLES: - # Update a single metadata field (all other fields unchanged) - secrets update -n refining --kind server --name my-server -m ip=10.0.0.1 - - # Rotate a secret token - secrets update -n refining --kind service --name gitea -s token= - - # Update typed JSON metadata - secrets update -n refining --kind service --name gitea \\ - -m deploy:strategy:='{\"type\":\"rolling\",\"batch\":2}' \\ - -m runtime:max_open_conns:=20 - - # Add a tag and rotate password at the same time - secrets update -n refining --kind service --name gitea \\ - --add-tag production -s token= - - # Remove a deprecated metadata field and a stale secret key - secrets update -n refining --kind service --name mqtt \\ - --remove-meta old_port --remove-secret old_password - - # Remove a nested field - secrets update -n refining --kind server --name my-server \\ - --remove-secret credentials:content - - # Remove a tag - secrets update -n refining --kind service --name gitea --remove-tag staging - - # Update a nested secret field from a file - secrets update -n refining --kind server --name my-server \\ - -s credentials:content@./keys/server.pem - - # Update nested typed JSON fields - secrets update -n refining --kind service --name deploy-bot \\ - -s auth:config:='{\"issuer\":\"gitea\",\"rotate\":true}' \\ - -s auth:retry:=5 - - # Rotate shared PEM (all servers with key_ref=my-shared-key get the new key) - secrets update -n refining --kind key --name my-shared-key \\ - -s content=@./keys/new-shared.pem")] - Update { - /// Namespace, e.g. refining, ricnsmart - #[arg(short, long)] - namespace: String, - /// Kind of record: server, service, key, ... - #[arg(long)] - kind: String, - /// Human-readable unique name - #[arg(long)] - name: String, - /// Add a tag (repeatable; does not affect existing tags) - #[arg(long = "add-tag")] - add_tags: Vec, - /// Remove a tag (repeatable) - #[arg(long = "remove-tag")] - remove_tags: Vec, - /// Set or overwrite a metadata field: key=value, key:=, key=@file, or nested:path@file. - /// Use key_ref= to reference a shared key entry (kind=key). - #[arg(long = "meta", short = 'm')] - meta: Vec, - /// Delete a metadata field by key or nested path, e.g. old_port or credentials:content - #[arg(long = "remove-meta")] - remove_meta: Vec, - /// Set or overwrite a secret field: key=value, key:=, key=@file, or nested:path@file - #[arg(long = "secret", short = 's')] - secrets: Vec, - /// Delete a secret field by key or nested path, e.g. old_password or credentials:content - #[arg(long = "remove-secret")] - remove_secrets: Vec, - /// Output format: text (default on TTY), json, json-compact - #[arg(short, long = "output")] - output: Option, - }, - - /// Manage CLI configuration (database connection, etc.) - #[command(after_help = "EXAMPLES: - # Configure the database URL (run once per device; persisted to config file) - secrets config set-db \"postgres://postgres:@:/secrets\" - - # Show current config (password is masked) - secrets config show - - # Print path to the config file - secrets config path")] - Config { - #[command(subcommand)] - action: ConfigAction, - }, - - /// Show the change history for a record. - #[command(after_help = "EXAMPLES: - # Show last 20 versions for a service record - secrets history -n refining --kind service --name gitea - - # Show last 5 versions - secrets history -n refining --kind service --name gitea --limit 5")] - History { - #[arg(short, long)] - namespace: String, - #[arg(long)] - kind: String, - #[arg(long)] - name: String, - /// Number of history entries to show [default: 20] - #[arg(long, default_value = "20")] - limit: u32, - /// Output format: text (default on TTY), json, json-compact - #[arg(short, long = "output")] - output: Option, - }, - - /// Roll back a record to a previous version. - #[command(after_help = "EXAMPLES: - # Roll back to the most recent snapshot (undo last change) - secrets rollback -n refining --kind service --name gitea - - # Roll back to a specific version number - secrets rollback -n refining --kind service --name gitea --to-version 3")] - Rollback { - #[arg(short, long)] - namespace: String, - #[arg(long)] - kind: String, - #[arg(long)] - name: String, - /// Target version to restore. Omit to restore the most recent snapshot. - #[arg(long)] - to_version: Option, - /// Output format: text (default on TTY), json, json-compact - #[arg(short, long = "output")] - output: Option, - }, - - /// Run a command with secrets injected as environment variables. - /// - /// Secrets are available only to the child process; the current shell - /// environment is not modified. The process exit code is propagated. - /// - /// Use -s/--secret to inject only specific fields. Use --dry-run to preview - /// which variables would be injected without executing the command. - #[command(after_help = "EXAMPLES: - # Run a script with a single service's secrets injected - secrets run -n refining --kind service --name gitea -- ./deploy.sh - - # Inject only specific fields (minimal exposure) - secrets run -n refining --kind service --name aliyun \\ - -s access_key_id -s access_key_secret -- aliyun ecs DescribeInstances - - # Run with a tag filter (all matched records merged) - secrets run --tag production -- env | grep GITEA - - # With prefix - secrets run -n refining --kind service --name gitea --prefix GITEA -- printenv - - # Preview which variables would be injected (no command executed) - secrets run -n refining --kind service --name gitea --dry-run - - # Preview with field filter and JSON output - secrets run -n refining --kind service --name gitea -s token --dry-run -o json - - # metadata.key_ref entries get key secrets merged (e.g. server + shared PEM)")] - Run { - #[arg(short, long)] - namespace: Option, - #[arg(long)] - kind: Option, - #[arg(long)] - name: Option, - #[arg(long)] - tag: Vec, - /// Only inject these secret field names (repeatable). Omit to inject all fields. - #[arg(long = "secret", short = 's')] - secret_fields: Vec, - /// Prefix to prepend to every variable name (uppercased automatically) - #[arg(long, default_value = "")] - prefix: String, - /// Preview variables that would be injected without executing the command - #[arg(long)] - dry_run: bool, - /// Output format for --dry-run: json (default), json-compact, text - #[arg(short, long = "output")] - output: Option, - /// Command and arguments to execute with injected environment - #[arg(last = true)] - command: Vec, - }, - - /// Check for a newer version and update the binary in-place. - /// - /// Downloads the latest release and replaces the current binary. No database connection or master key required. - /// Release URL defaults to the upstream server; override via SECRETS_UPGRADE_URL for self-hosted or fork. - #[command(after_help = "EXAMPLES: - # Check for updates only (no download) - secrets upgrade --check - - # Download and install the latest version - secrets upgrade")] - Upgrade { - /// Only check if a newer version is available; do not download - #[arg(long)] - check: bool, - }, - - /// Export records to a file (JSON, TOML, or YAML). - /// - /// Decrypts and exports all matched records. Requires master key unless --no-secrets is used. - #[command(after_help = "EXAMPLES: - # Export everything to JSON - secrets export --file backup.json - - # Export a specific namespace to TOML - secrets export -n refining --file refining.toml - - # Export a specific kind - secrets export -n refining --kind service --file services.yaml - - # Export by tag - secrets export --tag production --file prod.json - - # Export schema only (no decryption needed) - secrets export --no-secrets --file schema.json - - # Print to stdout in YAML - secrets export -n refining --format yaml")] - Export { - /// Filter by namespace - #[arg(short, long)] - namespace: Option, - /// Filter by kind, e.g. server, service - #[arg(long)] - kind: Option, - /// Exact name filter - #[arg(long)] - name: Option, - /// Filter by tag (repeatable) - #[arg(long)] - tag: Vec, - /// Fuzzy keyword search - #[arg(short, long)] - query: Option, - /// Output file path (format inferred from extension: .json / .toml / .yaml / .yml) - #[arg(long)] - file: Option, - /// Explicit format: json, toml, or yaml (overrides file extension; required for stdout) - #[arg(long)] - format: Option, - /// Omit secrets from output (no master key required) - #[arg(long)] - no_secrets: bool, - }, - - /// Import records from a file (JSON, TOML, or YAML). - /// - /// Reads an export file and inserts or updates entries. Requires master key to re-encrypt secrets. - #[command(after_help = "EXAMPLES: - # Import a JSON backup (conflict = error by default) - secrets import backup.json - - # Import and overwrite existing records - secrets import --force refining.toml - - # Preview what would be imported (no writes) - secrets import --dry-run backup.yaml - - # JSON output for the import summary - secrets import backup.json -o json")] - Import { - /// Input file path (format inferred from extension: .json / .toml / .yaml / .yml) - file: String, - /// Overwrite existing records on conflict (default: error and abort) - #[arg(long)] - force: bool, - /// Preview operations without writing to the database - #[arg(long)] - dry_run: bool, - /// Output format: text (default on TTY), json, json-compact - #[arg(short, long = "output")] - output: Option, - }, -} - -#[derive(Subcommand)] -enum ConfigAction { - /// Save database URL to config file (~/.config/secrets/config.toml) - SetDb { - /// PostgreSQL connection string, e.g. postgres://user:pass@:/dbname - url: String, - }, - /// Show current configuration (password masked) - Show, - /// Print path to the config file - Path, -} - -#[tokio::main] -async fn main() -> Result<()> { - load_dotenv(); - let cli = Cli::parse(); - - let filter = if cli.verbose { - EnvFilter::new("secrets=debug") - } else { - EnvFilter::try_from_default_env().unwrap_or_else(|_| EnvFilter::new("secrets=warn")) - }; - tracing_subscriber::fmt() - .with_env_filter(filter) - .with_target(false) - .init(); - - // config subcommand needs no database or master key - if let Commands::Config { action } = cli.command { - return commands::config::run(action).await; - } - - // upgrade needs no database or master key either - if let Commands::Upgrade { check } = cli.command { - return commands::upgrade::run(check).await; - } - - let db_url = config::resolve_db_url(&cli.db_url)?; - let pool = db::create_pool(&db_url).await?; - db::migrate(&pool).await?; - - // init needs a pool but sets up the master key — handle before loading it - if let Commands::Init = cli.command { - return commands::init::run(&pool).await; - } - - // All remaining commands require the master key from the OS Keychain, - // except delete which operates on plaintext metadata only. - - match cli.command { - Commands::Init | Commands::Config { .. } | Commands::Upgrade { .. } => unreachable!(), - - Commands::Add { - namespace, - kind, - name, - tags, - meta, - secrets, - output, - } => { - let master_key = crypto::load_master_key()?; - let _span = - tracing::info_span!("cmd", command = "add", %namespace, %kind, %name).entered(); - let out = resolve_output_mode(output.as_deref())?; - commands::add::run( - &pool, - commands::add::AddArgs { - namespace: &namespace, - kind: &kind, - name: &name, - tags: &tags, - meta_entries: &meta, - secret_entries: &secrets, - output: out, - }, - &master_key, - ) - .await?; - } - - Commands::Search { - namespace, - kind, - name, - tag, - query, - fields, - summary, - limit, - offset, - sort, - output, - } => { - let _span = tracing::info_span!("cmd", command = "search").entered(); - let out = resolve_output_mode(output.as_deref())?; - commands::search::run( - &pool, - commands::search::SearchArgs { - namespace: namespace.as_deref(), - kind: kind.as_deref(), - name: name.as_deref(), - tags: &tag, - query: query.as_deref(), - fields: &fields, - summary, - limit, - offset, - sort: &sort, - output: out, - }, - ) - .await?; - } - - Commands::Delete { - namespace, - kind, - name, - dry_run, - output, - } => { - let _span = - tracing::info_span!("cmd", command = "delete", %namespace, ?kind, ?name).entered(); - let out = resolve_output_mode(output.as_deref())?; - commands::delete::run( - &pool, - commands::delete::DeleteArgs { - namespace: &namespace, - kind: kind.as_deref(), - name: name.as_deref(), - dry_run, - output: out, - }, - ) - .await?; - } - - Commands::Update { - namespace, - kind, - name, - add_tags, - remove_tags, - meta, - remove_meta, - secrets, - remove_secrets, - output, - } => { - let master_key = crypto::load_master_key()?; - let _span = - tracing::info_span!("cmd", command = "update", %namespace, %kind, %name).entered(); - let out = resolve_output_mode(output.as_deref())?; - commands::update::run( - &pool, - commands::update::UpdateArgs { - namespace: &namespace, - kind: &kind, - name: &name, - add_tags: &add_tags, - remove_tags: &remove_tags, - meta_entries: &meta, - remove_meta: &remove_meta, - secret_entries: &secrets, - remove_secrets: &remove_secrets, - output: out, - }, - &master_key, - ) - .await?; - } - - Commands::History { - namespace, - kind, - name, - limit, - output, - } => { - let out = resolve_output_mode(output.as_deref())?; - commands::history::run( - &pool, - commands::history::HistoryArgs { - namespace: &namespace, - kind: &kind, - name: &name, - limit, - output: out, - }, - ) - .await?; - } - - Commands::Rollback { - namespace, - kind, - name, - to_version, - output, - } => { - let master_key = crypto::load_master_key()?; - let out = resolve_output_mode(output.as_deref())?; - commands::rollback::run( - &pool, - commands::rollback::RollbackArgs { - namespace: &namespace, - kind: &kind, - name: &name, - to_version, - output: out, - }, - &master_key, - ) - .await?; - } - - Commands::Run { - namespace, - kind, - name, - tag, - secret_fields, - prefix, - dry_run, - output, - command, - } => { - let master_key = crypto::load_master_key()?; - let out = resolve_output_mode(output.as_deref())?; - if !dry_run && command.is_empty() { - anyhow::bail!( - "No command specified. Usage: secrets run [filter flags] -- [args]" - ); - } - commands::run::run_exec( - &pool, - commands::run::RunArgs { - namespace: namespace.as_deref(), - kind: kind.as_deref(), - name: name.as_deref(), - tags: &tag, - secret_fields: &secret_fields, - prefix: &prefix, - dry_run, - output: out, - command: &command, - }, - &master_key, - ) - .await?; - } - - Commands::Export { - namespace, - kind, - name, - tag, - query, - file, - format, - no_secrets, - } => { - let master_key = if no_secrets { - None - } else { - Some(crypto::load_master_key()?) - }; - let _span = tracing::info_span!("cmd", command = "export").entered(); - commands::export_cmd::run( - &pool, - commands::export_cmd::ExportArgs { - namespace: namespace.as_deref(), - kind: kind.as_deref(), - name: name.as_deref(), - tags: &tag, - query: query.as_deref(), - file: file.as_deref(), - format: format.as_deref(), - no_secrets, - }, - master_key.as_ref(), - ) - .await?; - } - - Commands::Import { - file, - force, - dry_run, - output, - } => { - let master_key = crypto::load_master_key()?; - let _span = tracing::info_span!("cmd", command = "import").entered(); - let out = resolve_output_mode(output.as_deref())?; - commands::import_cmd::run( - &pool, - commands::import_cmd::ImportArgs { - file: &file, - force, - dry_run, - output: out, - }, - &master_key, - ) - .await?; - } - } - - Ok(()) -} diff --git a/src/output.rs b/src/output.rs deleted file mode 100644 index be3f239..0000000 --- a/src/output.rs +++ /dev/null @@ -1,60 +0,0 @@ -use chrono::{DateTime, Local, Utc}; -use std::str::FromStr; - -/// Output format for all commands. -#[derive(Debug, Clone, Default, PartialEq)] -pub enum OutputMode { - /// Human-readable text (default when stdout is a TTY) - #[default] - Text, - /// Pretty-printed JSON - Json, - /// Single-line JSON (default when stdout is NOT a TTY, e.g. piped to jq) - JsonCompact, -} - -impl FromStr for OutputMode { - type Err = anyhow::Error; - - fn from_str(s: &str) -> Result { - match s { - "text" => Ok(Self::Text), - "json" => Ok(Self::Json), - "json-compact" => Ok(Self::JsonCompact), - other => Err(anyhow::anyhow!( - "Unknown output format '{}'. Valid: text, json, json-compact", - other - )), - } - } -} - -/// Resolve the effective output mode. -/// - Explicit value from `--output` takes priority. -/// - Default is always `Json` (AI-first); use `-o text` for human-readable output. -pub fn resolve_output_mode(explicit: Option<&str>) -> anyhow::Result { - if let Some(s) = explicit { - return s.parse(); - } - Ok(OutputMode::Json) -} - -/// Format a UTC timestamp for local human-readable output. -pub fn format_local_time(dt: DateTime) -> String { - dt.with_timezone(&Local) - .format("%Y-%m-%d %H:%M:%S %:z") - .to_string() -} - -/// Print a JSON value to stdout in the requested output mode. -/// - `Json` → pretty-printed -/// - `JsonCompact` → single line -/// - `Text` → no-op (caller is responsible for the text branch) -pub fn print_json(value: &serde_json::Value, mode: &OutputMode) -> anyhow::Result<()> { - match mode { - OutputMode::Json => println!("{}", serde_json::to_string_pretty(value)?), - OutputMode::JsonCompact => println!("{}", serde_json::to_string(value)?), - OutputMode::Text => {} - } - Ok(()) -} diff --git a/test-fixtures/example-key.pem b/test-fixtures/example-key.pem deleted file mode 100644 index d2a80bd..0000000 --- a/test-fixtures/example-key.pem +++ /dev/null @@ -1,3 +0,0 @@ ------BEGIN EXAMPLE KEY PLACEHOLDER----- -This file is for local dev/testing. Replace with a real key when needed. ------END EXAMPLE KEY PLACEHOLDER-----